--- /dev/null
+#ifndef __LUNAIX_ABI_H
+#define __LUNAIX_ABI_H
+
+/* clang-format off */
+
+#if 0
+// templates, new arch should implements these templates
+#define store_retval(retval)
+#endif
+
+#ifdef __ARCH_IA32
+ #include "x86/i386_asm.h"
+ #ifndef __ASM__
+ #include "x86/i386_abi.h"
+ #endif
+#endif
+
+/* clang-format on */
+
+#endif /* __LUNAIX_ABI_H */
--- /dev/null
+#ifndef __LUNAIX_I386ABI_H
+#define __LUNAIX_I386ABI_H
+
+#define store_retval(retval) __current->intr_ctx.registers.eax = (retval)
+
+#define store_retval_to(proc, retval) (proc)->intr_ctx.registers.eax = (retval)
+
+#define j_usr(sp, pc) \
+ asm volatile("movw %0, %%ax\n" \
+ "movw %%ax, %%es\n" \
+ "movw %%ax, %%ds\n" \
+ "movw %%ax, %%fs\n" \
+ "movw %%ax, %%gs\n" \
+ "pushl %0\n" \
+ "pushl %1\n" \
+ "pushl %2\n" \
+ "pushl %3\n" \
+ "retf" ::"i"(UDATA_SEG), \
+ "r"(sp), \
+ "i"(UCODE_SEG), \
+ "r"(pc) \
+ : "eax", "memory");
+
+#endif /* __LUNAIX_ABI_H */
--- /dev/null
+#ifndef __LUNAIX_I386_ASM_H
+#define __LUNAIX_I386_ASM_H
+
+#define KCODE_SEG 0x08
+#define KDATA_SEG 0x10
+#define UCODE_SEG 0x1B
+#define UDATA_SEG 0x23
+#define TSS_SEG 0x28
+
+#endif /* __LUNAIX_I386_ASM_H */
#define KCODE_SEG 0x08
#define KDATA_SEG 0x10
-#define UCODE_SEG 0x1B
-#define UDATA_SEG 0x23
-#define TSS_SEG 0x28
#define USTACK_SIZE MEM_4MB
#define USTACK_TOP 0x9ffffff0
#define ET_EXEC 2
#define PT_LOAD 1
+#define PT_INTERP 3
#define PF_X 0x1
#define PF_W 0x2
elf32_wrd_t p_align;
};
+struct elf32
+{
+ void* elf_file;
+ struct elf32_ehdr eheader;
+ struct elf32_phdr* pheaders;
+};
+
+#define declare_elf32(elf, elf_vfile) \
+ struct elf32 elf = { .elf_file = elf_vfile, .pheaders = (void*)0 }
+
+int
+elf32_open(struct elf32* elf, const char* path);
+
+int
+elf32_openat(struct elf32* elf, void* elf_vfile);
+
+int
+elf32_static_linked(const struct elf32* elf);
+
+int
+elf32_close(struct elf32* elf);
+
+/**
+ * @brief Try to find the PT_INTERP section. If found, copy it's content to
+ * path_out
+ *
+ * @param elf Opened elf32 descriptor
+ * @param path_out
+ * @param len size of path_out buffer
+ * @return int
+ */
+int
+elf32_find_loader(const struct elf32* elf, char* path_out, size_t len);
+
+int
+elf32_read_ehdr(struct elf32* elf);
+
+int
+elf32_read_phdr(struct elf32* elf);
+
+/**
+ * @brief Estimate how much memeory will be acquired if we load all loadable
+ * sections.、
+ *
+ * @param elf
+ * @return size_t
+ */
+size_t
+elf32_loadable_memsz(const struct elf32* elf);
+
+int
+elf32_load(struct load_context* ldctx, const struct elf32* elf);
+
#define SIZE_EHDR sizeof(struct elf32_ehdr)
#define SIZE_PHDR sizeof(struct elf32_phdr)
-static inline int
-elf_check_exec(struct elf32_ehdr* ehdr)
-{
- return *(u32_t*)(ehdr->e_ident) == ELFMAGIC &&
- ehdr->e_ident[EI_CLASS] == ELFCLASS32 &&
- ehdr->e_ident[EI_DATA] == ELFDATA2LSB && ehdr->e_type == ET_EXEC &&
- ehdr->e_machine == EM_386;
-}
#endif /* __LUNAIX_ELF_H */
-#ifndef __LUNAIX_LOADER_H
-#define __LUNAIX_LOADER_H
+#ifndef __LUNAIX_EXEC_H
+#define __LUNAIX_EXEC_H
#include <lunaix/elf.h>
+#include <lunaix/fs.h>
#include <lunaix/process.h>
#include <lunaix/types.h>
-#define LD_STAT_FKUP 0x1U
+#define NO_LOADER 0
+#define DEFAULT_LOADER "usr/ld"
#define MAX_VAR_PAGES 8
#define DEFAULT_HEAP_PAGES 16
-struct ld_info
+struct exec_context;
+
+struct load_context
{
- struct elf32_ehdr ehdr_out;
+ struct exec_container* container;
ptr_t base;
ptr_t end;
ptr_t mem_sz;
- ptr_t stack_top;
ptr_t entry;
};
-struct ld_param
+struct exec_container
{
struct proc_info* proc;
ptr_t vms_mnt;
- struct ld_info info;
+ struct load_context executable;
+
+ ptr_t stack_top;
+ ptr_t entry; // mapped to one of {executable|loader}.entry
+
int status;
};
-struct usr_exec_param
+struct uexec_param
{
int argc;
char** argv;
int envc;
char** envp;
- struct ld_info info;
} PACKED;
#ifndef __USR_WRAPPER__
-int
-elf_load(struct ld_param* ldparam, struct v_file* elfile);
int
-exec_load_byname(struct ld_param* param,
+exec_load_byname(struct exec_container* container,
const char* filename,
const char** argv,
const char** envp);
int
-exec_load(struct ld_param* param,
+exec_load(struct exec_container* container,
struct v_file* executable,
const char** argv,
const char** envp);
-void
-ld_create_param(struct ld_param* param, struct proc_info* proc, ptr_t vms);
+int
+exec_kexecve(const char* filename, const char* argv[], const char* envp);
+
#endif
#endif /* __LUNAIX_LOADER_H */
#define VFS_IFVOLDEV 0x8
#define VFS_IFSYMLINK 0x10
+// Walk, mkdir if component encountered is non-exists.
#define VFS_WALK_MKPARENT 0x1
+
+// Walk, relative to current FS.
#define VFS_WALK_FSRELATIVE 0x2
+
+/*
+ Terminate the walk on the immediate parent,
+ name of child (last component) is returned through `component`
+*/
#define VFS_WALK_PARENT 0x4
+
+// Do not follow the symbolic link
#define VFS_WALK_NOFOLLOW 0x8
#define VFS_HASHTABLE_BITS 10
#define PG_MOUNT_3 (PG_MOUNT_BASE + 0x2000)
#define PG_MOUNT_4 (PG_MOUNT_BASE + 0x3000)
+/*
+ 当前进程内存空间挂载点
+*/
#define VMS_SELF L2_BASE_VADDR
#define CURPROC_PTE(vpn) \
void
vfree(void* ptr);
+void
+vfree_safe(void* ptr);
+
void*
valloc_dma(unsigned int size);
#ifndef __LUNAIX_SPIKE_H
#define __LUNAIX_SPIKE_H
-// Some helper functions. As helpful as Spike the Dragon! :)
+/** Some helper functions. As helpful as Spike the Dragon! :) **/
// 除法 v/(2^k) 向上取整
#define CEIL(v, k) (((v) + (1 << (k)) - 1) >> (k))
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
+#define likely(x) __builtin_expect((x), 1)
+
/**
* @brief Fast log base 2 for integer, utilizing constant unfolding.
* Adopted from
#define assert(cond) (void)(cond); // assert nothing
#define assert_msg(cond, msg) (void)(cond); // assert nothing
-#endif // __LUNAIXOS_NASSERT__
+#endif // __LUNAIXOS_NASSERT__
void
panick(const char* msg);
#define ENOTBLK -26
#define ENOEXEC -27
#define E2BIG -28
+#define ELIBBAD -29
#endif /* __LUNAIX_CODE_H */
#define __ASM__
#include <arch/x86/interrupts.h>
-#include <lunaix/common.h>
+#include <arch/x86/i386_asm.h>
#include <lunaix/syscall.h>
#define __ASM_INTR_DIAGNOSIS
/* 高半核入口点 - 0xC0000000 */
#define __ASM__
-#include <lunaix/common.h>
+#include <arch/x86/i386_asm.h>
.section .text
.global hhk_entry_
goto error;
}
- // reposition the resolved subtree pointed by symlink
- // vfs_dcache_rehash(current_level->parent, dnode);
current_level = dnode;
current_inode = dnode->inode;
}
if (inode->open_count) {
errno = EBUSY;
} else if (!(inode->itype & VFS_IFDIR)) {
- // The underlying unlink implementation should handle
- // symlink case
errno = inode->ops->unlink(inode);
if (!errno) {
vfs_d_free(dnode);
errno = __vfs_try_locate_file(oldpath, &dentry, &to_link, 0);
if (!errno) {
errno = __vfs_try_locate_file(
- newpath, &name_dentry, &name_file, FLOCATE_CREATE_EMPTY);
+ newpath, &name_dentry, &name_file, FLOCATE_CREATE_ONLY);
if (!errno) {
- errno = EEXIST;
- } else if (name_file) {
errno = vfs_link(to_link, name_file);
}
}
+#include <klibc/string.h>
#include <lunaix/common.h>
#include <lunaix/elf.h>
+#include <lunaix/exec.h>
#include <lunaix/fs.h>
-#include <lunaix/ld.h>
#include <lunaix/mm/mmap.h>
#include <lunaix/mm/valloc.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/spike.h>
+static inline int
+elf32_read(struct v_file* elf, void* data, size_t off, size_t len)
+{
+ // it is wise to do cached read
+ return pcache_read(elf->inode, data, len, off);
+}
+
int
-elf_map_segment(struct ld_param* ldparam,
- struct v_file* elfile,
- struct elf32_phdr* phdr)
+elf32_map_segment(struct load_context* ldctx,
+ const struct elf32* elf,
+ struct elf32_phdr* phdre)
{
- assert(PG_ALIGNED(phdr->p_offset));
+ struct v_file* elfile = (struct v_file*)elf->elf_file;
+
+ assert(PG_ALIGNED(phdre->p_offset));
int proct = 0;
- if ((phdr->p_flags & PF_R)) {
+ if ((phdre->p_flags & PF_R)) {
proct |= PROT_READ;
}
- if ((phdr->p_flags & PF_W)) {
+ if ((phdre->p_flags & PF_W)) {
proct |= PROT_WRITE;
}
- if ((phdr->p_flags & PF_X)) {
+ if ((phdre->p_flags & PF_X)) {
proct |= PROT_EXEC;
}
- struct mmap_param param = { .vms_mnt = ldparam->vms_mnt,
- .pvms = &ldparam->proc->mm,
+ struct exec_container* container = ldctx->container;
+ struct mmap_param param = { .vms_mnt = container->vms_mnt,
+ .pvms = &container->proc->mm,
.proct = proct,
- .offset = PG_ALIGN(phdr->p_offset),
- .mlen = ROUNDUP(phdr->p_memsz, PG_SIZE),
- .flen = phdr->p_filesz + PG_MOD(phdr->p_va),
+ .offset = PG_ALIGN(phdre->p_offset),
+ .mlen = ROUNDUP(phdre->p_memsz, PG_SIZE),
+ .flen = phdre->p_filesz + PG_MOD(phdre->p_va),
.flags = MAP_FIXED | MAP_PRIVATE,
.type = REGION_TYPE_CODE };
struct mm_region* seg_reg;
- int status = mem_map(NULL, &seg_reg, PG_ALIGN(phdr->p_va), elfile, ¶m);
+ int status = mem_map(NULL, &seg_reg, PG_ALIGN(phdre->p_va), elfile, ¶m);
if (!status) {
- size_t next_addr = phdr->p_memsz + phdr->p_va;
- ldparam->info.end = MAX(ldparam->info.end, ROUNDUP(next_addr, PG_SIZE));
- ldparam->info.mem_sz += phdr->p_memsz;
+ size_t next_addr = phdre->p_memsz + phdre->p_va;
+ ldctx->end = MAX(ldctx->end, ROUNDUP(next_addr, PG_SIZE));
+ ldctx->mem_sz += phdre->p_memsz;
+ } else {
+ // we probably fucked up our process
+ terminate_proc(-1);
}
return status;
}
int
-elf_setup_mapping(struct ld_param* ldparam,
- struct v_file* elfile,
- struct elf32_ehdr* ehdr)
+elf32_open(struct elf32* elf, const char* path)
+{
+ struct v_dnode* elfdn;
+ struct v_inode* elfin;
+ struct v_file* elffile;
+ int error = 0;
+
+ if ((error = vfs_walk_proc(path, &elfdn, NULL, 0))) {
+ return error;
+ }
+
+ if ((error = vfs_open(elfdn, &elffile))) {
+ return error;
+ }
+
+ return elf32_openat(elf, elffile);
+}
+
+int
+elf32_openat(struct elf32* elf, void* elf_vfile)
{
int status = 0;
- size_t tbl_sz = ehdr->e_phnum * SIZE_PHDR;
- struct elf32_phdr* phdrs = valloc(tbl_sz);
+ elf->pheaders = NULL;
+ elf->elf_file = elf_vfile;
- if (!phdrs) {
- status = ENOMEM;
- goto done;
+ if ((status = elf32_read_ehdr(elf)) < 0) {
+ elf32_close(elf);
+ return status;
}
- tbl_sz = 1 << ILOG2(tbl_sz);
- status = elfile->ops->read(elfile->inode, phdrs, tbl_sz, ehdr->e_phoff);
+ if ((status = elf32_read_phdr(elf)) < 0) {
+ elf32_close(elf);
+ return status;
+ }
- if (status < 0) {
- goto done;
+ return status;
+}
+
+int
+elf32_close(struct elf32* elf)
+{
+ if (elf->pheaders) {
+ vfree(elf->pheaders);
}
- if (PG_ALIGN(phdrs[0].p_va) != USER_START) {
- status = ENOEXEC;
- goto done;
+ if (elf->elf_file) {
+ vfs_close((struct v_file*)elf->elf_file);
}
- size_t entries = tbl_sz / SIZE_PHDR;
- for (size_t i = 0; i < entries; i++) {
- struct elf32_phdr* phdr = &phdrs[i];
+ memset(elf, 0, sizeof(*elf));
+}
- if (phdr->p_type == PT_LOAD) {
- if (phdr->p_align == PG_SIZE) {
- status = elf_map_segment(ldparam, elfile, phdr);
- } else {
- // surprising alignment!
- status = ENOEXEC;
- }
+int
+elf32_static_linked(const struct elf32* elf)
+{
+ for (size_t i = 0; i < elf->eheader.e_phnum; i++) {
+ struct elf32_phdr* phdre = &elf->pheaders[i];
+ if (phdre->p_type == PT_INTERP) {
+ return 0;
}
- // TODO process other types of segments
+ }
+ return 1;
+}
- if (status) {
- // errno in the middle of mapping restructuring, it is impossible
- // to recover!
- ldparam->status |= LD_STAT_FKUP;
- goto done;
+size_t
+elf32_loadable_memsz(const struct elf32* elf)
+{
+ // XXX: Hmmmm, I am not sure if we need this. This is designed to be handy
+ // if we decided to map the heap region before transfer to loader. As
+ // currently, we push *everything* to user-space loader, thus we modify the
+ // brk syscall to do the initial heap mapping.
+
+ size_t sz = 0;
+ for (size_t i = 0; i < elf->eheader.e_phnum; i++) {
+ struct elf32_phdr* phdre = &elf->pheaders[i];
+ if (phdre->p_type == PT_LOAD) {
+ sz += phdre->p_memsz;
}
}
-done:
- vfree(phdrs);
- return status;
+ return sz;
}
int
-elf_load(struct ld_param* ldparam, struct v_file* elfile)
+elf32_find_loader(const struct elf32* elf, char* path_out, size_t len)
{
- struct elf32_ehdr* ehdr = valloc(SIZE_EHDR);
- int status = elfile->ops->read(elfile->inode, ehdr, SIZE_EHDR, 0);
+ int retval = NO_LOADER;
+
+ assert_msg(len >= sizeof(DEFAULT_LOADER), "path_out: too small");
+
+ struct v_file* elfile = (struct v_file*)elf->elf_file;
+
+ for (size_t i = 0; i < elf->eheader.e_phnum; i++) {
+ struct elf32_phdr* phdre = &elf->pheaders[i];
+ if (phdre->p_type == PT_INTERP) {
+ assert_msg(len >= phdre->p_filesz, "path_out: too small");
+ retval =
+ elf_read(elfile, path_out, phdre->p_offset, phdre->p_filesz);
+
+ if (retval < 0) {
+ return retval;
+ }
+
+ break;
+ }
+ }
+
+ return retval;
+}
+
+int
+elf32_read_ehdr(struct elf32* elf)
+{
+ struct v_file* elfile = (struct v_file*)elf->elf_file;
+ int status = elf_read(elfile, (void*)&elf->eheader, 0, SIZE_EHDR);
if (status < 0) {
- goto done;
+ return status;
}
+}
- if (!elf_check_exec(ehdr)) {
- status = ENOEXEC;
- goto done;
+int
+elf32_read_phdr(struct elf32* elf)
+{
+ int status = 0;
+
+ struct v_file* elfile = (struct v_file*)elf->elf_file;
+
+ size_t entries = elf->eheader.e_phnum;
+ size_t tbl_sz = entries * SIZE_PHDR;
+
+ struct elf32_phdr* phdrs = valloc(tbl_sz);
+
+ if (!phdrs) {
+ return ENOMEM;
}
- if ((status = elf_setup_mapping(ldparam, elfile, ehdr))) {
- goto done;
+ status = elf_read(elfile, phdrs, elf->eheader.e_phoff, tbl_sz);
+
+ if (status < 0) {
+ vfree(phdrs);
+ return status;
}
- ldparam->info.ehdr_out = *ehdr;
+ elf->pheaders = phdrs;
+ return entries;
+}
+
+int
+elf32_check_exec(const struct elf32* elf)
+{
+ struct elf32_ehdr* ehdr = elf->pheaders;
+
+ return *(u32_t*)(ehdr->e_ident) == ELFMAGIC &&
+ ehdr->e_ident[EI_CLASS] == ELFCLASS32 &&
+ ehdr->e_ident[EI_DATA] == ELFDATA2LSB && ehdr->e_type == ET_EXEC &&
+ ehdr->e_machine == EM_386;
+}
+
+int
+elf32_load(struct load_context* ldctx, const struct elf32* elf)
+{
+ int err = 0;
+
+ struct v_file* elfile = (struct v_file*)elf->elf_file;
+
+ for (size_t i = 0; i < elf->eheader.e_phnum && !err; i++) {
+ struct elf32_phdr* phdr = &elf->pheaders[i];
+
+ if (phdr->p_type == PT_LOAD) {
+ if (phdr->p_align != PG_SIZE) {
+ // surprising alignment!
+ err = ENOEXEC;
+ continue;
+ }
+
+ err = elf_map_segment(ldctx, elf, phdr);
+ }
+ // TODO Handle relocation
+ }
done:
- vfree(ehdr);
- return status;
+ return err;
}
\ No newline at end of file
+#include <arch/abi.h>
#include <lunaix/elf.h>
+#include <lunaix/exec.h>
#include <lunaix/fs.h>
-#include <lunaix/ld.h>
#include <lunaix/mm/mmap.h>
+#include <lunaix/mm/valloc.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/process.h>
#include <lunaix/spike.h>
#include <klibc/string.h>
+void
+exec_container(struct exec_container* param, struct proc_info* proc, ptr_t vms)
+{
+ *param = (struct exec_container){ .proc = proc,
+ .vms_mnt = vms,
+ .executable = { .container = param } };
+}
+
size_t
exec_str_size(const char** str_arr, size_t* length)
{
return sz + sizeof(char*);
}
-void
-__heap_copied(struct mm_region* region)
-{
- mm_index((void**)®ion->proc_vms->heap, region);
-}
-
-int
-__exec_remap_heap(struct ld_param* param, struct proc_mm* pvms)
-{
- if (pvms->heap) {
- mem_unmap_region(param->vms_mnt, pvms->heap);
- }
-
- struct mmap_param map_param = { .pvms = pvms,
- .vms_mnt = param->vms_mnt,
- .flags = MAP_ANON | MAP_PRIVATE,
- .type = REGION_TYPE_HEAP,
- .proct = PROT_READ | PROT_WRITE,
- .mlen = PG_SIZE };
- int status = 0;
- struct mm_region* heap;
- if ((status = mem_map(NULL, &heap, param->info.end, NULL, &map_param))) {
- param->status |= LD_STAT_FKUP;
- return status;
- }
-
- heap->region_copied = __heap_copied;
- mm_index((void**)&pvms->heap, heap);
-
- return status;
-}
+// externed from mm/dmm.c
+extern int
+create_heap(struct proc_mm* pvms, ptr_t addr);
int
-exec_load(struct ld_param* param,
+exec_load(struct exec_container* container,
struct v_file* executable,
const char** argv,
const char** envp)
{
int errno = 0;
+ char* ldpath = NULL;
size_t argv_len, envp_len;
size_t sz_argv = exec_str_size(argv, &argv_len);
size_t sz_envp = exec_str_size(envp, &envp_len);
- size_t total_sz = ROUNDUP(sz_argv + sz_envp, PG_SIZE);
+ size_t var_sz = ROUNDUP(sz_envp, PG_SIZE);
- if (total_sz / PG_SIZE > MAX_VAR_PAGES) {
+ char* argv_extra[2] = { executable->dnode->name.value, 0 };
+
+ if (var_sz / PG_SIZE > MAX_VAR_PAGES) {
errno = E2BIG;
goto done;
}
- if ((errno = elf_load(param, executable))) {
+ struct elf32 elf;
+
+ if ((errno = elf32_openat(&elf, executable))) {
+ goto done;
+ }
+
+ if (!elf32_check_exec(&elf)) {
+ errno = ENOEXEC;
+ goto done;
+ }
+
+ ldpath = valloc(512);
+ errno = elf32_find_loader(&elf, ldpath, 512);
+
+ if (errno < 0) {
+ vfree(ldpath);
goto done;
}
- struct proc_mm* pvms = ¶m->proc->mm;
+ if (errno != NO_LOADER) {
+ // TODO load loader
+ argv_extra[1] = ldpath;
+
+ // close old elf
+ if ((errno = elf32_close(&elf))) {
+ goto done;
+ }
+
+ // open the loader instead
+ if ((errno = elf32_open(&elf, ldpath))) {
+ goto done;
+ }
+
+ // Is this the valid loader?
+ if (!elf32_static_linked(&elf) || !elf32_check_exec(&elf)) {
+ errno = ELIBBAD;
+ goto done_close_elf32;
+ }
+
+ // TODO: relocate loader
+ }
+
+ if ((errno = elf32_load(&container->executable, &elf))) {
+ goto done_close_elf32;
+ }
+
+ struct proc_mm* pvms = &container->proc->mm;
+
+ // A dedicated place for process variables (e.g. envp)
struct mmap_param map_vars = { .pvms = pvms,
- .vms_mnt = param->vms_mnt,
+ .vms_mnt = container->vms_mnt,
.flags = MAP_ANON | MAP_PRIVATE | MAP_FIXED,
.type = REGION_TYPE_VARS,
.proct = PROT_READ,
void* mapped;
- if ((errno = __exec_remap_heap(param, pvms))) {
- goto done;
+ if (pvms->heap) {
+ mem_unmap_region(container->vms_mnt, pvms->heap);
+ pvms->heap = NULL;
+ }
+
+ if (!argv_extra[1]) {
+ // If loading a statically linked file, then heap remapping we can do,
+ // otherwise delayed.
+ create_heap(container->vms_mnt, PG_ALIGN(container->executable.end));
}
if ((errno = mem_map(&mapped, NULL, UMMAP_END, NULL, &map_vars))) {
- goto done;
+ goto done_close_elf32;
}
- if (param->vms_mnt == VMS_SELF) {
+ if (container->vms_mnt == VMS_SELF) {
// we are loading executable into current addr space
// make some handy infos available to user space
- ptr_t arg_start = mapped + sizeof(struct usr_exec_param);
- if (argv)
- memcpy(arg_start, (void*)argv, sz_argv);
if (envp)
- memcpy(arg_start + sz_argv, (void*)envp, sz_envp);
+ memcpy(mapped, (void*)envp, sz_envp);
- ptr_t* ustack = (ptr_t*)USTACK_TOP;
- struct usr_exec_param* exec_param = (struct usr_exec_param*)mapped;
+ void* ustack = (void*)USTACK_TOP;
- ustack[-1] = (ptr_t)mapped;
- param->info.stack_top = &ustack[-1];
+ if (argv) {
+ ustack = (void*)((ptr_t)ustack - sz_argv);
+ memcpy(ustack, (void*)argv, sz_argv);
+ }
+
+ for (size_t i = 0; i < 2 && argv_extra[i]; i++, argv_len++) {
+ char* extra_arg = argv_extra[i];
+ size_t str_len = strlen(extra_arg);
+
+ ustack = (void*)((ptr_t)ustack - str_len);
+ memcpy(ustack, (void*)extra_arg, str_len);
+ }
+
+ // four args (arg{c|v}, env{c|p}) for main
+ struct uexec_param* exec_param = &((struct uexec_param*)ustack)[-1];
+
+ container->stack_top = (ptr_t)exec_param;
+
+ *exec_param = (struct uexec_param){
+ .argc = argv_len, .argv = ustack, .envc = envp_len, .envp = mapped
+ };
- *exec_param = (struct usr_exec_param){ .argc = argv_len,
- .argv = arg_start,
- .envc = envp_len,
- .envp = arg_start + sz_argv,
- .info = param->info };
} else {
- // TODO need to find a way to inject argv and envp remotely
- // this is for the support of kernel level implementation of
- // posix_spawn
+ /*
+ TODO need to find a way to inject argv and envp remotely
+ this is for the support of kernel level implementation of
+ posix_spawn
+
+ IDEA
+ 1. Allocate a orphaned physical page (i.e., do not belong to any
+ VMA)
+ 2. Mounted to a temporary mount point in current VMA, (i.e.,
+ PG_MOUNT_*)
+ 3. Do setup there.
+ 4. Unmount then mounted to the foreign VMA as the first stack
+ page.
+ */
fail("not implemented");
}
- param->info.entry = param->info.ehdr_out.e_entry;
+done_close_elf32:
+ elf32_close(&elf);
done:
+ vfree_safe(ldpath);
return errno;
}
int
-exec_load_byname(struct ld_param* param,
+exec_load_byname(struct exec_container* container,
const char* filename,
const char** argv,
const char** envp)
goto done;
}
- if ((errno = exec_load(param, file, argv, envp))) {
- vfs_pclose(file, __current->pid);
- }
+ errno = exec_load(container, file, argv, envp);
done:
return errno;
}
+int
+exec_kexecve(const char* filename, const char* argv[], const char* envp)
+{
+ int errno = 0;
+ struct exec_container container;
+ exec_container(&container, __current, VMS_SELF);
+
+ errno = exec_load_byname(&container, filename, argv, envp);
+
+ if (errno) {
+ return errno;
+ }
+
+ j_usr(container.stack_top, container.entry);
+}
+
__DEFINE_LXSYSCALL3(int,
execve,
const char*,
envp[])
{
int errno = 0;
- struct ld_param ldparam;
- ld_create_param(&ldparam, __current, VMS_SELF);
-
- if ((errno = exec_load_byname(&ldparam, filename, argv, envp))) {
- if ((ldparam.status & LD_STAT_FKUP)) {
- // we fucked up our address space.
- terminate_proc(11451);
- schedule();
- fail("should not reach");
- }
+ struct exec_container container;
+ exec_container(&container, __current, VMS_SELF);
+
+ if (!(errno = exec_load_byname(&container, filename, argv, envp))) {
goto done;
}
- volatile struct exec_param* execp = __current->intr_ctx.execp;
- execp->esp = ldparam.info.stack_top;
- execp->eip = ldparam.info.entry;
-
// we will jump to new entry point (_u_start) upon syscall's
// return so execve 'will not return' from the perspective of it's invoker
+ volatile struct exec_param* execp = __current->intr_ctx.execp;
+ execp->esp = container.stack_top;
+ execp->eip = container.entry;
done:
- return DO_STATUS(errno);
+ // set return value
+ store_retval(DO_STATUS(errno));
+
+ // Always yield the process that want execve!
+ schedule();
+
+ // this will never get executed!
+ return -1;
}
\ No newline at end of file
+++ /dev/null
-#include <lunaix/ld.h>
-
-void
-ld_create_param(struct ld_param* param, struct proc_info* proc, ptr_t vms)
-{
- *param = (struct ld_param){ .proc = proc, .vms_mnt = vms };
-}
\ No newline at end of file
#include <lunaix/mm/mmap.h>
+#include <lunaix/mm/vmm.h>
#include <lunaix/process.h>
#include <lunaix/status.h>
#include <lunaix/syscall.h>
#include <lunaix/syscall_utils.h>
+void
+__heap_copied(struct mm_region* region)
+{
+ mm_index((void**)®ion->proc_vms->heap, region);
+}
+
+int
+create_heap(struct proc_mm* pvms, ptr_t addr)
+{
+ struct mmap_param map_param = { .pvms = pvms,
+ .vms_mnt = VMS_SELF,
+ .flags = MAP_ANON | MAP_PRIVATE,
+ .type = REGION_TYPE_HEAP,
+ .proct = PROT_READ | PROT_WRITE,
+ .mlen = PG_SIZE };
+ int status = 0;
+ struct mm_region* heap;
+ if ((status = mem_map(NULL, &heap, addr, NULL, &map_param))) {
+ return status;
+ }
+
+ heap->region_copied = __heap_copied;
+ mm_index((void**)&pvms->heap, heap);
+}
+
__DEFINE_LXSYSCALL1(void*, sbrk, ssize_t, incr)
{
struct proc_mm* pvms = &__current->mm;
struct proc_mm* pvms = &__current->mm;
struct mm_region* heap = pvms->heap;
+ if (!heap) {
+ return DO_STATUS(create_heap(pvms, addr));
+ }
+
assert(heap);
int err = mem_adjust_inplace(&pvms->regions, heap, (ptr_t)addr);
return DO_STATUS(err);
}
}
- struct v_fd* vfd;
- if ((errno = vfs_getfd(fd, &vfd))) {
- goto done;
- }
-
- struct v_file* file = vfd->file;
+ struct v_file* file = NULL;
if (!(options & MAP_ANON)) {
+ struct v_fd* vfd;
+ if ((errno = vfs_getfd(fd, &vfd))) {
+ goto done;
+ }
+
+ file = vfd->file;
if (!file->ops->read_page) {
errno = ENODEV;
goto done;
}
- } else {
- file = NULL;
}
struct mmap_param param = { .flags = options,
__vfree(ptr, piles, CLASS_LEN(piles_names));
}
+void
+vfree_safe(void* ptr)
+{
+ if (!ptr) {
+ return;
+ }
+
+ __vfree(ptr, piles, CLASS_LEN(piles_names));
+}
+
void*
valloc_dma(unsigned int size)
{
#include <lunaix/block.h>
#include <lunaix/common.h>
+#include <lunaix/exec.h>
#include <lunaix/foptions.h>
#include <lunaix/fs.h>
#include <lunaix/fs/probe_boot.h>
#include <lunaix/fs/twifs.h>
-#include <lunaix/ld.h>
#include <lunaix/lxconsole.h>
#include <lunaix/mm/cake.h>
#include <lunaix/mm/pmm.h>
exec_initd()
{
int errno = 0;
- struct ld_param param;
- char filename[] = "/mnt/lunaix-os/usr/init";
- ld_create_param(¶m, __current, VMS_SELF);
-
- if ((errno = exec_load_byname(¶m, filename, NULL, NULL))) {
+ if (exec_kexecve("/mnt/lunaix-os/usr/init", NULL, NULL)) {
goto fail;
}
- // user space
- asm volatile("movw %0, %%ax\n"
- "movw %%ax, %%es\n"
- "movw %%ax, %%ds\n"
- "movw %%ax, %%fs\n"
- "movw %%ax, %%gs\n"
- "pushl %0\n"
- "pushl %1\n"
- "pushl %2\n"
- "pushl %3\n"
- "retf" ::"i"(UDATA_SEG),
- "r"(param.info.stack_top),
- "i"(UCODE_SEG),
- "r"(param.info.entry)
- : "eax", "memory");
-
fail("should not reach");
fail:
+#include <arch/abi.h>
#include <klibc/string.h>
#include <lunaix/clock.h>
#include <lunaix/common.h>
vmm_unmount_pd(VMS_MOUNT_1);
// 正如同fork,返回两次。
- pcb->intr_ctx.registers.eax = 0;
+ store_retval_to(pcb, 0);
commit_process(pcb);
void* default_handlers[_SIG_NUM] = {
// TODO: 添加默认handler
- [_SIGINT] = default_sighandler_term, [_SIGTERM] = default_sighandler_term,
- [_SIGKILL] = default_sighandler_term, [_SIGSEGV] = default_sighandler_term,
[_SIGINT] = default_sighandler_term,
+ [_SIGTERM] = default_sighandler_term,
+ [_SIGKILL] = default_sighandler_term,
+ [_SIGSEGV] = default_sighandler_term,
};
// Referenced in kernel/asm/x86/interrupt.S
return 0;
}
+ // TODO: SIG{INT|TERM|KILL|SEGV} should have highest priority.
+ // Terminate the process right here if any of unmaskable signal is
+ // set.
+
if (!__current->sig_handler[sig_selected] &&
!default_handlers[sig_selected]) {
// 如果该信号没有handler,则忽略
+++ /dev/null
-#define __USR_WRAPPER__
-#include <lunaix/ld.h>
-
-int
-usr_pre_init(struct usr_exec_param* param)
-{
- // TODO some inits before executing user program
-
- extern ptr_t environ;
- environ = (ptr_t)param->envp;
-
- return 0;
-}
\ No newline at end of file
.section .text
.global _u_start
- _u_start:
- movl (%esp), %eax
- pushl %eax
- call usr_pre_init
- jnz 1f
-
- popl %eax
-
- pushl 4(%eax) // argv
- pushl (%eax) // argc
-
+ _u_start:
xorl %eax, %eax
call main