X-Git-Url: https://scm.lunaixsky.com/lunaix-os.git/blobdiff_plain/61a1daa59589212608039e2734009870818bacd3..946c3fdd31300074cc78841795bd47af908ddddb:/lunaix-os/kernel/process/process.c?ds=sidebyside diff --git a/lunaix-os/kernel/process/process.c b/lunaix-os/kernel/process/process.c index e30e461..4e8fa0a 100644 --- a/lunaix-os/kernel/process/process.c +++ b/lunaix-os/kernel/process/process.c @@ -1,6 +1,8 @@ +#include #include #include #include +#include #include #include #include @@ -13,13 +15,13 @@ LOG_MODULE("PROC") -void* -__dup_pagetable(pid_t pid, uintptr_t mount_point) +ptr_t +__dup_pagetable(pid_t pid, ptr_t mount_point) { - void* ptd_pp = pmm_alloc_page(pid, PP_FGPERSIST); - vmm_set_mapping(PD_REFERENCED, PG_MOUNT_1, ptd_pp, PG_PREM_RW, VMAP_NULL); + ptr_t ptd_pp = pmm_alloc_page(pid, PP_FGPERSIST); + vmm_set_mapping(VMS_SELF, PG_MOUNT_1, ptd_pp, PG_PREM_RW, VMAP_NULL); - x86_page_table* ptd = PG_MOUNT_1; + x86_page_table* ptd = (x86_page_table*)PG_MOUNT_1; x86_page_table* pptd = (x86_page_table*)(mount_point | (0x3FF << 12)); size_t kspace_l1inx = L1_INDEX(KERNEL_MM_BASE); @@ -35,12 +37,11 @@ __dup_pagetable(pid_t pid, uintptr_t mount_point) } // 复制L2页表 - void* pt_pp = pmm_alloc_page(pid, PP_FGPERSIST); - vmm_set_mapping( - PD_REFERENCED, PG_MOUNT_2, pt_pp, PG_PREM_RW, VMAP_NULL); + ptr_t pt_pp = pmm_alloc_page(pid, PP_FGPERSIST); + vmm_set_mapping(VMS_SELF, PG_MOUNT_2, pt_pp, PG_PREM_RW, VMAP_NULL); x86_page_table* ppt = (x86_page_table*)(mount_point | (i << 12)); - x86_page_table* pt = PG_MOUNT_2; + x86_page_table* pt = (x86_page_table*)PG_MOUNT_2; for (size_t j = 0; j < PG_MAX_ENTRIES; j++) { x86_pte_t pte = ppt->entry[j]; @@ -48,7 +49,7 @@ __dup_pagetable(pid_t pid, uintptr_t mount_point) pt->entry[j] = pte; } - ptd->entry[i] = (uintptr_t)pt_pp | PG_ENTRY_FLAGS(ptde); + ptd->entry[i] = (ptr_t)pt_pp | PG_ENTRY_FLAGS(ptde); } ptd->entry[PG_MAX_ENTRIES - 1] = NEW_L1_ENTRY(T_SELF_REF_PERM, ptd_pp); @@ -57,7 +58,7 @@ __dup_pagetable(pid_t pid, uintptr_t mount_point) } void -__del_pagetable(pid_t pid, uintptr_t mount_point) +__del_pagetable(pid_t pid, ptr_t mount_point) { x86_page_table* pptd = (x86_page_table*)(mount_point | (0x3FF << 12)); @@ -84,10 +85,10 @@ __del_pagetable(pid_t pid, uintptr_t mount_point) pmm_free_page(pid, PG_ENTRY_ADDR(pptd->entry[PG_MAX_ENTRIES - 1])); } -void* +ptr_t vmm_dup_vmspace(pid_t pid) { - return __dup_pagetable(pid, PD_REFERENCED); + return __dup_pagetable(pid, VMS_SELF); } __DEFINE_LXSYSCALL(pid_t, fork) @@ -123,7 +124,7 @@ __DEFINE_LXSYSCALL2(int, setpgid, pid_t, pid, pid_t, pgid) struct proc_info* gruppenfuhrer = get_process(pgid); - if (!gruppenfuhrer || proc->pgid == proc->pid) { + if (!gruppenfuhrer || proc->pgid == gruppenfuhrer->pid) { __current->k_status = EINVAL; return -1; } @@ -135,39 +136,54 @@ __DEFINE_LXSYSCALL2(int, setpgid, pid_t, pid, pid_t, pgid) return 0; } +void +__stack_copied(struct mm_region* region) +{ + mm_index((void**)®ion->proc_vms->stack, region); +} + void init_proc_user_space(struct proc_info* pcb) { - vmm_mount_pd(PD_MOUNT_1, pcb->page_table); + vmm_mount_pd(VMS_MOUNT_1, pcb->page_table); /*--- 分配用户栈 ---*/ - // 注册用户栈区域 - region_add( - &pcb->mm.regions, USTACK_END, USTACK_TOP, REGION_RW | REGION_RSHARED); - - // 预留地址空间,具体物理页将由Page Fault Handler按需分配。 - for (uintptr_t i = PG_ALIGN(USTACK_END); i < USTACK_TOP; i += PG_SIZE) { - vmm_set_mapping(PD_MOUNT_1, i, 0, PG_ALLOW_USER | PG_WRITE, VMAP_NULL); + struct mm_region* mapped; + struct mmap_param param = { .vms_mnt = VMS_MOUNT_1, + .pvms = &pcb->mm, + .mlen = USTACK_SIZE, + .proct = PROT_READ | PROT_WRITE, + .flags = MAP_ANON | MAP_PRIVATE | MAP_FIXED, + .type = REGION_TYPE_STACK }; + + int status = 0; + if ((status = mem_map(NULL, &mapped, USTACK_END, NULL, ¶m))) { + kprint_panic("fail to alloc user stack: %d", status); } + mapped->region_copied = __stack_copied; + mm_index((void**)&pcb->mm.stack, mapped); + // TODO other uspace initialization stuff - vmm_unmount_pd(PD_MOUNT_1); + vmm_unmount_pd(VMS_MOUNT_1); } void -__mark_region(uintptr_t start_vpn, uintptr_t end_vpn, int attr) +__mark_region(ptr_t start_vpn, ptr_t end_vpn, int attr) { for (size_t i = start_vpn; i <= end_vpn; i++) { - x86_pte_t* curproc = &PTE_MOUNTED(PD_REFERENCED, i); - x86_pte_t* newproc = &PTE_MOUNTED(PD_MOUNT_1, i); - cpu_invplg(newproc); + x86_pte_t* curproc = &PTE_MOUNTED(VMS_SELF, i); + x86_pte_t* newproc = &PTE_MOUNTED(VMS_MOUNT_1, i); + + cpu_invplg((ptr_t)newproc); if ((attr & REGION_MODE_MASK) == REGION_RSHARED) { // 如果读共享,则将两者的都标注为只读,那么任何写入都将会应用COW策略。 - cpu_invplg(curproc); - cpu_invplg(i << 12); + cpu_invplg((ptr_t)curproc); + cpu_invplg((ptr_t)(i << 12)); + *curproc = *curproc & ~PG_WRITE; *newproc = *newproc & ~PG_WRITE; } else { @@ -192,34 +208,43 @@ pid_t dup_proc() { struct proc_info* pcb = alloc_process(); - pcb->mm.u_heap = __current->mm.u_heap; pcb->intr_ctx = __current->intr_ctx; pcb->parent = __current; + memcpy(pcb->fxstate, __current->fxstate, 512); + + if (__current->cwd) { + pcb->cwd = __current->cwd; + vfs_ref_dnode(pcb->cwd); + } + __copy_fdtable(pcb); - region_copy(&__current->mm.regions, &pcb->mm.regions); + region_copy(&__current->mm, &pcb->mm); - setup_proc_mem(pcb, PD_REFERENCED); + /* + * store the return value for forked process. + * this will be implicit carried over after kernel stack is copied. + */ + store_retval(0); + + copy_kernel_stack(pcb, VMS_SELF); // 根据 mm_region 进一步配置页表 struct mm_region *pos, *n; - llist_for_each(pos, n, &pcb->mm.regions.head, head) + llist_for_each(pos, n, &pcb->mm.regions, head) { // 如果写共享,则不作处理。 if ((pos->attr & REGION_WSHARED)) { continue; } - uintptr_t start_vpn = pos->start >> 12; - uintptr_t end_vpn = pos->end >> 12; + ptr_t start_vpn = pos->start >> 12; + ptr_t end_vpn = pos->end >> 12; __mark_region(start_vpn, end_vpn, pos->attr); } - vmm_unmount_pd(PD_MOUNT_1); - - // 正如同fork,返回两次。 - pcb->intr_ctx.registers.eax = 0; + vmm_unmount_pd(VMS_MOUNT_1); commit_process(pcb); @@ -229,17 +254,17 @@ dup_proc() extern void __kernel_end; void -setup_proc_mem(struct proc_info* proc, uintptr_t usedMnt) +copy_kernel_stack(struct proc_info* proc, ptr_t usedMnt) { // copy the entire kernel page table pid_t pid = proc->pid; - void* pt_copy = __dup_pagetable(pid, usedMnt); + ptr_t pt_copy = __dup_pagetable(pid, usedMnt); - vmm_mount_pd(PD_MOUNT_1, pt_copy); // 将新进程的页表挂载到挂载点#2 + vmm_mount_pd(VMS_MOUNT_1, pt_copy); // 将新进程的页表挂载到挂载点#2 // copy the kernel stack for (size_t i = KSTACK_START >> 12; i <= KSTACK_TOP >> 12; i++) { - volatile x86_pte_t* ppte = &PTE_MOUNTED(PD_MOUNT_1, i); + volatile x86_pte_t* ppte = &PTE_MOUNTED(VMS_MOUNT_1, i); /* This is a fucking nightmare, the TLB caching keep the rewrite to PTE @@ -249,17 +274,13 @@ setup_proc_mem(struct proc_info* proc, uintptr_t usedMnt) In the name of Celestia our glorious goddess, I will fucking HATE the TLB for the rest of my LIFE! */ - cpu_invplg(ppte); + cpu_invplg((ptr_t)ppte); x86_pte_t p = *ppte; - void* ppa = vmm_dup_page(pid, PG_ENTRY_ADDR(p)); + ptr_t ppa = vmm_dup_page(pid, PG_ENTRY_ADDR(p)); pmm_free_page(pid, PG_ENTRY_ADDR(p)); - *ppte = (p & 0xfff) | (uintptr_t)ppa; + *ppte = (p & 0xfff) | ppa; } - // 我们不需要分配内核的区域,因为所有的内核代码和数据段只能通过系统调用来访问,任何非法的访问 - // 都会导致eip落在区域外面,从而segmentation fault. - - // 至于其他的区域我们暂时没有办法知道,因为那需要知道用户程序的信息。我们留到之后在处理。 proc->page_table = pt_copy; } \ No newline at end of file