X-Git-Url: https://scm.lunaixsky.com/lunaix-os.git/blobdiff_plain/74926d2db1d9f3228acdfca03013a8ba0ac1d8c0..986ce23ace2f7875a1a561bd947f435a7594146c:/lunaix-os/kernel/loader/elf.c?ds=sidebyside diff --git a/lunaix-os/kernel/loader/elf.c b/lunaix-os/kernel/loader/elf.c index 7880fe6..fa6b576 100644 --- a/lunaix-os/kernel/loader/elf.c +++ b/lunaix-os/kernel/loader/elf.c @@ -1,138 +1,247 @@ +#include #include #include +#include #include -#include #include #include #include #include +static inline int +elf32_read(struct v_file* elf, void* data, size_t off, size_t len) +{ + // it is wise to do cached read + return pcache_read(elf->inode, data, len, off); +} + int -__elf_populate_mapped(struct mm_region* region, void* pg, off_t offset) +elf32_map_segment(struct load_context* ldctx, + const struct elf32* elf, + struct elf32_phdr* phdre) { - struct elf32_phdr* phdr = (struct elf32_phdr*)region->data; - size_t segsz = phdr->p_filesz; - size_t segoff = offset - phdr->p_offset; + struct v_file* elfile = (struct v_file*)elf->elf_file; - if (segoff >= segsz) { - return 0; + assert(PG_ALIGNED(phdre->p_offset)); + + int proct = 0; + if ((phdre->p_flags & PF_R)) { + proct |= PROT_READ; + } + if ((phdre->p_flags & PF_W)) { + proct |= PROT_WRITE; + } + if ((phdre->p_flags & PF_X)) { + proct |= PROT_EXEC; } - struct v_file* file = region->mfile; - size_t rdlen = MIN(segsz - segoff, PG_SIZE); + struct exec_container* container = ldctx->container; + struct mmap_param param = { .vms_mnt = container->vms_mnt, + .pvms = &container->proc->mm, + .proct = proct, + .offset = PG_ALIGN(phdre->p_offset), + .mlen = ROUNDUP(phdre->p_memsz, PG_SIZE), + .flen = phdre->p_filesz + PG_MOD(phdre->p_va), + .flags = MAP_FIXED | MAP_PRIVATE, + .type = REGION_TYPE_CODE }; - if (rdlen == PG_SIZE) { - // This is because we want to exploit any optimization on read_page - return file->ops->read_page(file->inode, pg, PG_SIZE, offset); + struct mm_region* seg_reg; + int status = mem_map(NULL, &seg_reg, PG_ALIGN(phdre->p_va), elfile, ¶m); + + if (!status) { + size_t next_addr = phdre->p_memsz + phdre->p_va; + ldctx->end = MAX(ldctx->end, ROUNDUP(next_addr, PG_SIZE)); + ldctx->mem_sz += phdre->p_memsz; } else { - return file->ops->read(file->inode, pg, rdlen, offset); + // we probably fucked up our process + terminate_proc(-1); } -} -void -__elf_destruct_mapped(struct mm_region* region) -{ - vfree(region->data); + return status; } int -elf_map_segment(struct ld_param* ldparam, - struct v_file* elfile, - struct elf32_phdr* phdr) +elf32_open(struct elf32* elf, const char* path) { - int proct = 0; - if ((phdr->p_flags & PF_R)) { - proct |= PROT_READ; - } - if ((phdr->p_flags & PF_W)) { - proct |= PROT_WRITE; + struct v_dnode* elfdn; + struct v_inode* elfin; + struct v_file* elffile; + int error = 0; + + if ((error = vfs_walk_proc(path, &elfdn, NULL, 0))) { + return error; } - if ((phdr->p_flags & PF_X)) { - proct |= PROT_EXEC; + + if ((error = vfs_open(elfdn, &elffile))) { + return error; } - struct mm_region* seg_reg; - struct mmap_param param = { .vms_mnt = ldparam->vms_mnt, - .regions = &ldparam->proc->mm.regions, - .proct = proct, - .offset = phdr->p_offset, - .length = ROUNDUP(phdr->p_memsz, PG_SIZE), - .flags = - MAP_FIXED | MAP_PRIVATE | REGION_TYPE_CODE }; + return elf32_openat(elf, elffile); +} - int status = mem_map(NULL, &seg_reg, PG_ALIGN(phdr->p_va), elfile, ¶m); +int +elf32_openat(struct elf32* elf, void* elf_vfile) +{ + int status = 0; + elf->pheaders = NULL; + elf->elf_file = elf_vfile; - if (!status) { - struct elf32_phdr* phdr_ = valloc(sizeof(SIZE_PHDR)); - *phdr_ = *phdr; - seg_reg->data = phdr; + if ((status = elf32_read_ehdr(elf)) < 0) { + elf32_close(elf); + return status; + } - seg_reg->init_page = __elf_populate_mapped; - seg_reg->destruct_region = __elf_destruct_mapped; + if ((status = elf32_read_phdr(elf)) < 0) { + elf32_close(elf); + return status; } return status; } int -elf_setup_mapping(struct ld_param* ldparam, - struct v_file* elfile, - struct elf32_ehdr* ehdr) +elf32_close(struct elf32* elf) { - int status = 0; - size_t tbl_sz = ehdr->e_phnum * SIZE_PHDR; - struct elf32_phdr* phdrs = valloc(tbl_sz); + if (elf->pheaders) { + vfree(elf->pheaders); + } - if (!phdrs) { - status = ENOMEM; - goto done; + if (elf->elf_file) { + vfs_close((struct v_file*)elf->elf_file); } - tbl_sz = 1 << ILOG2(tbl_sz); - phdrs = elfile->ops->read(elfile->inode, phdrs, tbl_sz, ehdr->e_phoff); + memset(elf, 0, sizeof(*elf)); +} - size_t entries = tbl_sz / SIZE_PHDR; - for (size_t i = 0; i < entries; i++) { - struct elf32_phdr* phdr = &phdrs[i]; +int +elf32_static_linked(const struct elf32* elf) +{ + for (size_t i = 0; i < elf->eheader.e_phnum; i++) { + struct elf32_phdr* phdre = &elf->pheaders[i]; + if (phdre->p_type == PT_INTERP) { + return 0; + } + } + return 1; +} - if (phdr->p_type == PT_LOAD) { - status = elf_map_segment(ldparam, elfile, phdr); +size_t +elf32_loadable_memsz(const struct elf32* elf) +{ + // XXX: Hmmmm, I am not sure if we need this. This is designed to be handy + // if we decided to map the heap region before transfer to loader. As + // currently, we push *everything* to user-space loader, thus we modify the + // brk syscall to do the initial heap mapping. + + size_t sz = 0; + for (size_t i = 0; i < elf->eheader.e_phnum; i++) { + struct elf32_phdr* phdre = &elf->pheaders[i]; + if (phdre->p_type == PT_LOAD) { + sz += phdre->p_memsz; } - // TODO process other types of segments + } + + return sz; +} + +int +elf32_find_loader(const struct elf32* elf, char* path_out, size_t len) +{ + int retval = NO_LOADER; + + assert_msg(len >= sizeof(DEFAULT_LOADER), "path_out: too small"); + + struct v_file* elfile = (struct v_file*)elf->elf_file; + + for (size_t i = 0; i < elf->eheader.e_phnum; i++) { + struct elf32_phdr* phdre = &elf->pheaders[i]; + if (phdre->p_type == PT_INTERP) { + assert_msg(len >= phdre->p_filesz, "path_out: too small"); + retval = + elf_read(elfile, path_out, phdre->p_offset, phdre->p_filesz); - if (status) { - ldparam->status |= LD_STAT_FKUP; - goto done; + if (retval < 0) { + return retval; + } + + break; } } -done: - vfree(phdrs); - return status; + return retval; } int -elf_load(struct ld_param* ldparam, struct v_file* elfile) +elf32_read_ehdr(struct elf32* elf) { - struct elf32_ehdr* ehdr = valloc(SIZE_EHDR); - int status = elfile->ops->read(elfile->inode, ehdr, SIZE_EHDR, 0); + struct v_file* elfile = (struct v_file*)elf->elf_file; + int status = elf_read(elfile, (void*)&elf->eheader, 0, SIZE_EHDR); - if (status) { - goto done; + if (status < 0) { + return status; } +} + +int +elf32_read_phdr(struct elf32* elf) +{ + int status = 0; + + struct v_file* elfile = (struct v_file*)elf->elf_file; + + size_t entries = elf->eheader.e_phnum; + size_t tbl_sz = entries * SIZE_PHDR; - if (!elf_check_exec(ehdr)) { - status = ENOEXEC; - goto done; + struct elf32_phdr* phdrs = valloc(tbl_sz); + + if (!phdrs) { + return ENOMEM; } - if ((status = elf_setup_mapping(ldparam, elfile, ehdr))) { - goto done; + status = elf_read(elfile, phdrs, elf->eheader.e_phoff, tbl_sz); + + if (status < 0) { + vfree(phdrs); + return status; } - ldparam->ehdr_out = *ehdr; + elf->pheaders = phdrs; + return entries; +} + +int +elf32_check_exec(const struct elf32* elf) +{ + struct elf32_ehdr* ehdr = elf->pheaders; + + return *(u32_t*)(ehdr->e_ident) == ELFMAGIC && + ehdr->e_ident[EI_CLASS] == ELFCLASS32 && + ehdr->e_ident[EI_DATA] == ELFDATA2LSB && ehdr->e_type == ET_EXEC && + ehdr->e_machine == EM_386; +} + +int +elf32_load(struct load_context* ldctx, const struct elf32* elf) +{ + int err = 0; + + struct v_file* elfile = (struct v_file*)elf->elf_file; + + for (size_t i = 0; i < elf->eheader.e_phnum && !err; i++) { + struct elf32_phdr* phdr = &elf->pheaders[i]; + + if (phdr->p_type == PT_LOAD) { + if (phdr->p_align != PG_SIZE) { + // surprising alignment! + err = ENOEXEC; + continue; + } + + err = elf_map_segment(ldctx, elf, phdr); + } + // TODO Handle relocation + } done: - vfree(ehdr); - return status; + return err; } \ No newline at end of file