X-Git-Url: https://scm.lunaixsky.com/lunaix-os.git/blobdiff_plain/9b8e0c494de6b447b44454112748f702dffec90d..0fd474df7001837bde53da0e42e83081827c9641:/lunaix-os/kernel/process/process.c?ds=sidebyside diff --git a/lunaix-os/kernel/process/process.c b/lunaix-os/kernel/process/process.c index 1647e10..2a24f50 100644 --- a/lunaix-os/kernel/process/process.c +++ b/lunaix-os/kernel/process/process.c @@ -1,97 +1,95 @@ #include #include -#include -#include +#include +#include #include #include -#include #include #include #include #include #include +#include +#include + +#include +#include LOG_MODULE("PROC") -void* -__dup_pagetable(pid_t pid, uintptr_t mount_point) +int +spawn_process(struct thread** created, ptr_t entry, bool with_ustack) { - void* ptd_pp = pmm_alloc_page(pid, PP_FGPERSIST); - vmm_set_mapping(VMS_SELF, PG_MOUNT_1, ptd_pp, PG_PREM_RW, VMAP_NULL); + struct proc_info* kproc = alloc_process(); + struct proc_mm* mm = vmspace(kproc); - x86_page_table* ptd = PG_MOUNT_1; - x86_page_table* pptd = (x86_page_table*)(mount_point | (0x3FF << 12)); + procvm_initvms_mount(mm); - size_t kspace_l1inx = L1_INDEX(KERNEL_MM_BASE); + struct thread* kthread = create_thread(kproc, with_ustack); - for (size_t i = 0; i < PG_MAX_ENTRIES - 1; i++) { + if (!kthread) { + procvm_unmount(mm); + delete_process(kproc); + return -1; + } - x86_pte_t ptde = pptd->entry[i]; - // 空或者是未在内存中的L1页表项直接照搬过去。 - // 内核地址空间直接共享过去。 - if (!ptde || i >= kspace_l1inx || !(ptde & PG_PRESENT)) { - ptd->entry[i] = ptde; - continue; - } + commit_process(kproc); + start_thread(kthread, entry); - // 复制L2页表 - void* pt_pp = pmm_alloc_page(pid, PP_FGPERSIST); - vmm_set_mapping(VMS_SELF, PG_MOUNT_2, pt_pp, PG_PREM_RW, VMAP_NULL); + procvm_unmount(mm); - x86_page_table* ppt = (x86_page_table*)(mount_point | (i << 12)); - x86_page_table* pt = PG_MOUNT_2; + if (created) { + *created = kthread; + } - for (size_t j = 0; j < PG_MAX_ENTRIES; j++) { - x86_pte_t pte = ppt->entry[j]; - pmm_ref_page(pid, PG_ENTRY_ADDR(pte)); - pt->entry[j] = pte; - } + return 0; +} - ptd->entry[i] = (uintptr_t)pt_pp | PG_ENTRY_FLAGS(ptde); +int +spawn_process_usr(struct thread** created, char* path, + const char** argv, const char** envp) +{ + // FIXME remote injection of user stack not yet implemented + + struct proc_info* proc = alloc_process(); + struct proc_mm* mm = vmspace(proc); + + assert(!kernel_process(proc)); + + procvm_initvms_mount(mm); + + int errno = 0; + struct thread* main_thread; + if (!(main_thread = create_thread(proc, true))) { + errno = ENOMEM; + goto fail; } - ptd->entry[PG_MAX_ENTRIES - 1] = NEW_L1_ENTRY(T_SELF_REF_PERM, ptd_pp); + struct exec_host container; + exec_init_container(&container, main_thread, VMS_MOUNT_1, argv, envp); + if ((errno = exec_load_byname(&container, path))) { + goto fail; + } - return ptd_pp; -} + commit_process(proc); + start_thread(main_thread, container.exe.entry); -void -__del_pagetable(pid_t pid, uintptr_t mount_point) -{ - x86_page_table* pptd = (x86_page_table*)(mount_point | (0x3FF << 12)); - - // only remove user address space - for (size_t i = 0; i < L1_INDEX(KERNEL_MM_BASE); i++) { - x86_pte_t ptde = pptd->entry[i]; - if (!ptde || !(ptde & PG_PRESENT)) { - continue; - } - - x86_page_table* ppt = (x86_page_table*)(mount_point | (i << 12)); - - for (size_t j = 0; j < PG_MAX_ENTRIES; j++) { - x86_pte_t pte = ppt->entry[j]; - // free the 4KB data page - if ((pte & PG_PRESENT)) { - pmm_free_page(pid, PG_ENTRY_ADDR(pte)); - } - } - // free the L2 page table - pmm_free_page(pid, PG_ENTRY_ADDR(ptde)); + if (created) { + *created = main_thread; } - // free the L1 directory - pmm_free_page(pid, PG_ENTRY_ADDR(pptd->entry[PG_MAX_ENTRIES - 1])); -} -void* -vmm_dup_vmspace(pid_t pid) -{ - return __dup_pagetable(pid, VMS_SELF); + procvm_unmount(mm); + return 0; + +fail: + procvm_unmount(mm); + delete_process(proc); + return errno; } -__DEFINE_LXSYSCALL(pid_t, fork) -{ - return dup_proc(); + +ptr_t proc_vmroot() { + return __current->mm->vmroot; } __DEFINE_LXSYSCALL(pid_t, getpid) @@ -114,7 +112,7 @@ __DEFINE_LXSYSCALL2(int, setpgid, pid_t, pid, pid_t, pgid) struct proc_info* proc = pid ? get_process(pid) : __current; if (!proc) { - __current->k_status = EINVAL; + syscall_result(EINVAL); return -1; } @@ -123,7 +121,7 @@ __DEFINE_LXSYSCALL2(int, setpgid, pid_t, pid, pid_t, pgid) struct proc_info* gruppenfuhrer = get_process(pgid); if (!gruppenfuhrer || proc->pgid == gruppenfuhrer->pid) { - __current->k_status = EINVAL; + syscall_result(EINVAL); return -1; } @@ -134,141 +132,121 @@ __DEFINE_LXSYSCALL2(int, setpgid, pid_t, pid, pid_t, pgid) return 0; } -void -init_proc_user_space(struct proc_info* pcb) -{ - vmm_mount_pd(VMS_MOUNT_1, pcb->page_table); +static inline bool +__can_change_real_id(const struct user_scope* procu, caps_t id_cap) { + if (uscope_with_capability(procu, id_cap)) { + return true; + } - /*--- 分配用户栈 ---*/ + if (check_current_acl(0, 0) != ACL_NO_MATCH) { + return true; + } + + return false; +} - struct mm_region* stack_vm; +__DEFINE_LXSYSCALL1(int, setuid, uid_t, uid) +{ + struct user_scope* procu; - stack_vm = region_create_range( - USTACK_END, USTACK_SIZE, REGION_RW | REGION_RSHARED | REGION_ANON); - // 注册用户栈区域 - region_add(&pcb->mm.regions, stack_vm); + procu = current_user_scope(); - // 预留地址空间,具体物理页将由Page Fault Handler按需分配。 - for (uintptr_t i = PG_ALIGN(USTACK_END); i < USTACK_TOP; i += PG_SIZE) { - vmm_set_mapping(VMS_MOUNT_1, i, 0, PG_ALLOW_USER | PG_WRITE, VMAP_NULL); + if (__can_change_real_id(procu, CAP_SETUID)) + { + procu->ruid = uid; } - // TODO other uspace initialization stuff + __current->suid = uid; + __current->euid = uid; - vmm_unmount_pd(VMS_MOUNT_1); + return 0; } -void -__mark_region(uintptr_t start_vpn, uintptr_t end_vpn, int attr) +__DEFINE_LXSYSCALL1(int, setgid, gid_t, gid) { - for (size_t i = start_vpn; i <= end_vpn; i++) { - x86_pte_t* curproc = &PTE_MOUNTED(VMS_SELF, i); - x86_pte_t* newproc = &PTE_MOUNTED(VMS_MOUNT_1, i); - cpu_invplg(newproc); - - if ((attr & REGION_MODE_MASK) == REGION_RSHARED) { - // 如果读共享,则将两者的都标注为只读,那么任何写入都将会应用COW策略。 - cpu_invplg(curproc); - cpu_invplg(i << 12); - *curproc = *curproc & ~PG_WRITE; - *newproc = *newproc & ~PG_WRITE; - } else { - // 如果是私有页,则将该页从新进程中移除。 - *newproc = 0; - } + struct user_scope* procu; + + procu = current_user_scope(); + + if (__can_change_real_id(procu, CAP_SETGID)) + { + procu->rgid = gid; } + + __current->sgid = gid; + __current->egid = gid; + + return 0; } -void -__copy_fdtable(struct proc_info* pcb) +__DEFINE_LXSYSCALL1(int, seteuid, uid_t, euid) { - for (size_t i = 0; i < VFS_MAX_FD; i++) { - struct v_fd* fd = __current->fdtable->fds[i]; - if (!fd) - continue; - vfs_dup_fd(fd, &pcb->fdtable->fds[i]); - } + __current->euid = euid; + + return 0; } -pid_t -dup_proc() +__DEFINE_LXSYSCALL1(int, setegid, gid_t, egid) { - struct proc_info* pcb = alloc_process(); - pcb->mm.u_heap = __current->mm.u_heap; - pcb->intr_ctx = __current->intr_ctx; - pcb->parent = __current; + __current->egid = egid; - memcpy(pcb->fxstate, __current->fxstate, 512); - - if (__current->cwd) { - pcb->cwd = __current->cwd; - vfs_ref_dnode(pcb->cwd); - } + return 0; +} - __copy_fdtable(pcb); - region_copy(&__current->mm.regions, &pcb->mm.regions); +__DEFINE_LXSYSCALL2(int, setgroups, const gid_t*, gids, unsigned int, len) +{ + struct user_scope* procu; - setup_proc_mem(pcb, VMS_SELF); + procu = current_user_scope(); - // 根据 mm_region 进一步配置页表 + if (check_current_acl(0, 0) == ACL_NO_MATCH) { + return EPERM; + } - struct mm_region *pos, *n; - llist_for_each(pos, n, &pcb->mm.regions, head) - { - // 如果写共享,则不作处理。 - if ((pos->attr & REGION_WSHARED)) { - continue; - } - - uintptr_t start_vpn = pos->start >> 12; - uintptr_t end_vpn = pos->end >> 12; - __mark_region(start_vpn, end_vpn, pos->attr); + if (uscope_with_capability(procu, CAP_SETGID)) { + return EPERM; } - vmm_unmount_pd(VMS_MOUNT_1); + return uscope_setgroups(procu, gids, len); +} - // 正如同fork,返回两次。 - pcb->intr_ctx.registers.eax = 0; - commit_process(pcb); +__DEFINE_LXSYSCALL(uid_t, getuid) +{ + return current_user_scope()->ruid; +} - return pcb->pid; +__DEFINE_LXSYSCALL(gid_t, getgid) +{ + return current_user_scope()->rgid; } -extern void __kernel_end; +__DEFINE_LXSYSCALL(uid_t, geteuid) +{ + return __current->euid; +} -void -setup_proc_mem(struct proc_info* proc, uintptr_t usedMnt) +__DEFINE_LXSYSCALL(gid_t, getegid) { - // copy the entire kernel page table - pid_t pid = proc->pid; - void* pt_copy = __dup_pagetable(pid, usedMnt); - - vmm_mount_pd(VMS_MOUNT_1, pt_copy); // 将新进程的页表挂载到挂载点#2 - - // copy the kernel stack - for (size_t i = KSTACK_START >> 12; i <= KSTACK_TOP >> 12; i++) { - volatile x86_pte_t* ppte = &PTE_MOUNTED(VMS_MOUNT_1, i); - - /* - This is a fucking nightmare, the TLB caching keep the rewrite to PTE - from updating. Even the Nightmare Moon the Evil is far less nasty - than this. It took me hours of debugging to figure this out. - - In the name of Celestia our glorious goddess, I will fucking HATE - the TLB for the rest of my LIFE! - */ - cpu_invplg(ppte); - - x86_pte_t p = *ppte; - void* ppa = vmm_dup_page(pid, PG_ENTRY_ADDR(p)); - pmm_free_page(pid, PG_ENTRY_ADDR(p)); - *ppte = (p & 0xfff) | (uintptr_t)ppa; - } + return __current->egid; +} + +__DEFINE_LXSYSCALL2(int, getgroups, gid_t*, out_buf, unsigned int, len) +{ + struct user_scope* procu; + struct ugroup_obj* gobj; + + procu = current_user_scope(); + gobj = user_groups(procu); - // 我们不需要分配内核的区域,因为所有的内核代码和数据段只能通过系统调用来访问,任何非法的访问 - // 都会导致eip落在区域外面,从而segmentation fault. + assert(gobj); + len = MIN(gobj->maxcap, len); - // 至于其他的区域我们暂时没有办法知道,因为那需要知道用户程序的信息。我们留到之后在处理。 - proc->page_table = pt_copy; -} \ No newline at end of file + unsigned i = 0; + for (; i < len && gobj->list[i] != grp_list_end; i++) + { + out_buf[i] = gobj->list[i]; + } + + return i + 1; +}