+#include <klibc/string.h>
#include <lunaix/common.h>
#include <lunaix/elf.h>
+#include <lunaix/exec.h>
#include <lunaix/fs.h>
-#include <lunaix/ld.h>
#include <lunaix/mm/mmap.h>
#include <lunaix/mm/valloc.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/spike.h>
-int
-__elf_populate_mapped(struct mm_region* region, void* pg, off_t segfoff)
+static inline int
+elf32_read(struct v_file* elf, void* data, size_t off, size_t len)
{
- size_t segsz = region->flen;
- size_t segmoff = segfoff - region->foff;
-
- if (segmoff >= segsz) {
- return 0;
- }
-
- struct v_file* file = region->mfile;
- size_t rdlen = MIN(segsz - segmoff, PG_SIZE);
-
- if (rdlen == PG_SIZE) {
- // This is because we want to exploit any optimization on read_page
- return file->ops->read_page(file->inode, pg, PG_SIZE, segfoff);
- } else {
- // we don't want to over-read the segment!
- return file->ops->read(file->inode, pg, rdlen, segfoff);
- }
+ // it is wise to do cached read
+ return pcache_read(elf->inode, data, len, off);
}
int
-elf_map_segment(struct ld_param* ldparam,
- struct v_file* elfile,
- struct elf32_phdr* phdr)
+elf32_map_segment(struct load_context* ldctx,
+ const struct elf32* elf,
+ struct elf32_phdr* phdre)
{
+ struct v_file* elfile = (struct v_file*)elf->elf_file;
+
+ assert(PG_ALIGNED(phdre->p_offset));
+
int proct = 0;
- if ((phdr->p_flags & PF_R)) {
+ if ((phdre->p_flags & PF_R)) {
proct |= PROT_READ;
}
- if ((phdr->p_flags & PF_W)) {
+ if ((phdre->p_flags & PF_W)) {
proct |= PROT_WRITE;
}
- if ((phdr->p_flags & PF_X)) {
+ if ((phdre->p_flags & PF_X)) {
proct |= PROT_EXEC;
}
- struct mm_region* seg_reg;
- struct mmap_param param = { .vms_mnt = ldparam->vms_mnt,
- .pvms = &ldparam->proc->mm,
+ struct exec_container* container = ldctx->container;
+ struct mmap_param param = { .vms_mnt = container->vms_mnt,
+ .pvms = &container->proc->mm,
.proct = proct,
- .offset = phdr->p_offset,
- .mlen = ROUNDUP(phdr->p_memsz, PG_SIZE),
- .flen = phdr->p_filesz,
+ .offset = PG_ALIGN(phdre->p_offset),
+ .mlen = ROUNDUP(phdre->p_memsz, PG_SIZE),
+ .flen = phdre->p_filesz + PG_MOD(phdre->p_va),
.flags = MAP_FIXED | MAP_PRIVATE,
.type = REGION_TYPE_CODE };
- int status = mem_map(NULL, &seg_reg, PG_ALIGN(phdr->p_va), elfile, ¶m);
+ struct mm_region* seg_reg;
+ int status = mem_map(NULL, &seg_reg, PG_ALIGN(phdre->p_va), elfile, ¶m);
if (!status) {
- seg_reg->init_page = __elf_populate_mapped;
-
- size_t next_addr = phdr->p_memsz + phdr->p_va;
- ldparam->info.end = MAX(ldparam->info.end, ROUNDUP(next_addr, PG_SIZE));
- ldparam->info.mem_sz += phdr->p_memsz;
+ size_t next_addr = phdre->p_memsz + phdre->p_va;
+ ldctx->end = MAX(ldctx->end, ROUNDUP(next_addr, PG_SIZE));
+ ldctx->mem_sz += phdre->p_memsz;
+ } else {
+ // we probably fucked up our process
+ terminate_proc(-1);
}
return status;
}
int
-elf_setup_mapping(struct ld_param* ldparam,
- struct v_file* elfile,
- struct elf32_ehdr* ehdr)
+elf32_open(struct elf32* elf, const char* path)
+{
+ struct v_dnode* elfdn;
+ struct v_inode* elfin;
+ struct v_file* elffile;
+ int error = 0;
+
+ if ((error = vfs_walk_proc(path, &elfdn, NULL, 0))) {
+ return error;
+ }
+
+ if ((error = vfs_open(elfdn, &elffile))) {
+ return error;
+ }
+
+ return elf32_openat(elf, elffile);
+}
+
+int
+elf32_openat(struct elf32* elf, void* elf_vfile)
{
int status = 0;
- size_t tbl_sz = ehdr->e_phnum * SIZE_PHDR;
- struct elf32_phdr* phdrs = valloc(tbl_sz);
+ elf->pheaders = NULL;
+ elf->elf_file = elf_vfile;
- if (!phdrs) {
- status = ENOMEM;
- goto done;
+ if ((status = elf32_read_ehdr(elf)) < 0) {
+ elf32_close(elf);
+ return status;
}
- tbl_sz = 1 << ILOG2(tbl_sz);
- status = elfile->ops->read(elfile->inode, phdrs, tbl_sz, ehdr->e_phoff);
+ if ((status = elf32_read_phdr(elf)) < 0) {
+ elf32_close(elf);
+ return status;
+ }
- if (status < 0) {
- goto done;
+ return status;
+}
+
+int
+elf32_close(struct elf32* elf)
+{
+ if (elf->pheaders) {
+ vfree(elf->pheaders);
}
- if (PG_ALIGN(phdrs[0].p_va) != USER_START) {
- status = ENOEXEC;
- goto done;
+ if (elf->elf_file) {
+ vfs_close((struct v_file*)elf->elf_file);
}
- size_t entries = tbl_sz / SIZE_PHDR;
- for (size_t i = 0; i < entries; i++) {
- struct elf32_phdr* phdr = &phdrs[i];
+ memset(elf, 0, sizeof(*elf));
+}
- if (phdr->p_type == PT_LOAD) {
- if (phdr->p_align == PG_SIZE) {
- status = elf_map_segment(ldparam, elfile, phdr);
- } else {
- // surprising alignment!
- status = ENOEXEC;
- }
+int
+elf32_static_linked(const struct elf32* elf)
+{
+ for (size_t i = 0; i < elf->eheader.e_phnum; i++) {
+ struct elf32_phdr* phdre = &elf->pheaders[i];
+ if (phdre->p_type == PT_INTERP) {
+ return 0;
}
- // TODO process other types of segments
+ }
+ return 1;
+}
- if (status) {
- // errno in the middle of mapping restructuring, it is impossible
- // to recover!
- ldparam->status |= LD_STAT_FKUP;
- goto done;
+size_t
+elf32_loadable_memsz(const struct elf32* elf)
+{
+ // XXX: Hmmmm, I am not sure if we need this. This is designed to be handy
+ // if we decided to map the heap region before transfer to loader. As
+ // currently, we push *everything* to user-space loader, thus we modify the
+ // brk syscall to do the initial heap mapping.
+
+ size_t sz = 0;
+ for (size_t i = 0; i < elf->eheader.e_phnum; i++) {
+ struct elf32_phdr* phdre = &elf->pheaders[i];
+ if (phdre->p_type == PT_LOAD) {
+ sz += phdre->p_memsz;
}
}
-done:
- vfree(phdrs);
- return status;
+ return sz;
+}
+
+int
+elf32_find_loader(const struct elf32* elf, char* path_out, size_t len)
+{
+ int retval = NO_LOADER;
+
+ assert_msg(len >= sizeof(DEFAULT_LOADER), "path_out: too small");
+
+ struct v_file* elfile = (struct v_file*)elf->elf_file;
+
+ for (size_t i = 0; i < elf->eheader.e_phnum; i++) {
+ struct elf32_phdr* phdre = &elf->pheaders[i];
+ if (phdre->p_type == PT_INTERP) {
+ assert_msg(len >= phdre->p_filesz, "path_out: too small");
+ retval =
+ elf_read(elfile, path_out, phdre->p_offset, phdre->p_filesz);
+
+ if (retval < 0) {
+ return retval;
+ }
+
+ break;
+ }
+ }
+
+ return retval;
}
int
-elf_load(struct ld_param* ldparam, struct v_file* elfile)
+elf32_read_ehdr(struct elf32* elf)
{
- struct elf32_ehdr* ehdr = valloc(SIZE_EHDR);
- int status = elfile->ops->read(elfile->inode, ehdr, SIZE_EHDR, 0);
+ struct v_file* elfile = (struct v_file*)elf->elf_file;
+ int status = elf_read(elfile, (void*)&elf->eheader, 0, SIZE_EHDR);
if (status < 0) {
- goto done;
+ return status;
}
+}
+
+int
+elf32_read_phdr(struct elf32* elf)
+{
+ int status = 0;
- if (!elf_check_exec(ehdr)) {
- status = ENOEXEC;
- goto done;
+ struct v_file* elfile = (struct v_file*)elf->elf_file;
+
+ size_t entries = elf->eheader.e_phnum;
+ size_t tbl_sz = entries * SIZE_PHDR;
+
+ struct elf32_phdr* phdrs = valloc(tbl_sz);
+
+ if (!phdrs) {
+ return ENOMEM;
}
- if ((status = elf_setup_mapping(ldparam, elfile, ehdr))) {
- goto done;
+ status = elf_read(elfile, phdrs, elf->eheader.e_phoff, tbl_sz);
+
+ if (status < 0) {
+ vfree(phdrs);
+ return status;
}
- ldparam->info.ehdr_out = *ehdr;
+ elf->pheaders = phdrs;
+ return entries;
+}
+
+int
+elf32_check_exec(const struct elf32* elf)
+{
+ struct elf32_ehdr* ehdr = elf->pheaders;
+
+ return *(u32_t*)(ehdr->e_ident) == ELFMAGIC &&
+ ehdr->e_ident[EI_CLASS] == ELFCLASS32 &&
+ ehdr->e_ident[EI_DATA] == ELFDATA2LSB && ehdr->e_type == ET_EXEC &&
+ ehdr->e_machine == EM_386;
+}
+
+int
+elf32_load(struct load_context* ldctx, const struct elf32* elf)
+{
+ int err = 0;
+
+ struct v_file* elfile = (struct v_file*)elf->elf_file;
+
+ for (size_t i = 0; i < elf->eheader.e_phnum && !err; i++) {
+ struct elf32_phdr* phdr = &elf->pheaders[i];
+
+ if (phdr->p_type == PT_LOAD) {
+ if (phdr->p_align != PG_SIZE) {
+ // surprising alignment!
+ err = ENOEXEC;
+ continue;
+ }
+
+ err = elf_map_segment(ldctx, elf, phdr);
+ }
+ // TODO Handle relocation
+ }
done:
- vfree(ehdr);
- return status;
+ return err;
}
\ No newline at end of file