1 #include <klibc/string.h>
2 #include <lunaix/clock.h>
3 #include <lunaix/common.h>
4 #include <lunaix/mm/pmm.h>
5 #include <lunaix/mm/region.h>
6 #include <lunaix/mm/valloc.h>
7 #include <lunaix/mm/vmm.h>
8 #include <lunaix/process.h>
9 #include <lunaix/spike.h>
10 #include <lunaix/status.h>
11 #include <lunaix/syscall.h>
12 #include <lunaix/syslog.h>
17 __dup_pagetable(pid_t pid, uintptr_t mount_point)
19 void* ptd_pp = pmm_alloc_page(pid, PP_FGPERSIST);
20 vmm_set_mapping(VMS_SELF, PG_MOUNT_1, ptd_pp, PG_PREM_RW, VMAP_NULL);
22 x86_page_table* ptd = PG_MOUNT_1;
23 x86_page_table* pptd = (x86_page_table*)(mount_point | (0x3FF << 12));
25 size_t kspace_l1inx = L1_INDEX(KERNEL_MM_BASE);
27 for (size_t i = 0; i < PG_MAX_ENTRIES - 1; i++) {
29 x86_pte_t ptde = pptd->entry[i];
30 // 空或者是未在内存中的L1页表项直接照搬过去。
32 if (!ptde || i >= kspace_l1inx || !(ptde & PG_PRESENT)) {
38 void* pt_pp = pmm_alloc_page(pid, PP_FGPERSIST);
39 vmm_set_mapping(VMS_SELF, PG_MOUNT_2, pt_pp, PG_PREM_RW, VMAP_NULL);
41 x86_page_table* ppt = (x86_page_table*)(mount_point | (i << 12));
42 x86_page_table* pt = PG_MOUNT_2;
44 for (size_t j = 0; j < PG_MAX_ENTRIES; j++) {
45 x86_pte_t pte = ppt->entry[j];
46 pmm_ref_page(pid, PG_ENTRY_ADDR(pte));
50 ptd->entry[i] = (uintptr_t)pt_pp | PG_ENTRY_FLAGS(ptde);
53 ptd->entry[PG_MAX_ENTRIES - 1] = NEW_L1_ENTRY(T_SELF_REF_PERM, ptd_pp);
59 __del_pagetable(pid_t pid, uintptr_t mount_point)
61 x86_page_table* pptd = (x86_page_table*)(mount_point | (0x3FF << 12));
63 // only remove user address space
64 for (size_t i = 0; i < L1_INDEX(KERNEL_MM_BASE); i++) {
65 x86_pte_t ptde = pptd->entry[i];
66 if (!ptde || !(ptde & PG_PRESENT)) {
70 x86_page_table* ppt = (x86_page_table*)(mount_point | (i << 12));
72 for (size_t j = 0; j < PG_MAX_ENTRIES; j++) {
73 x86_pte_t pte = ppt->entry[j];
74 // free the 4KB data page
75 if ((pte & PG_PRESENT)) {
76 pmm_free_page(pid, PG_ENTRY_ADDR(pte));
79 // free the L2 page table
80 pmm_free_page(pid, PG_ENTRY_ADDR(ptde));
82 // free the L1 directory
83 pmm_free_page(pid, PG_ENTRY_ADDR(pptd->entry[PG_MAX_ENTRIES - 1]));
87 vmm_dup_vmspace(pid_t pid)
89 return __dup_pagetable(pid, VMS_SELF);
92 __DEFINE_LXSYSCALL(pid_t, fork)
97 __DEFINE_LXSYSCALL(pid_t, getpid)
99 return __current->pid;
102 __DEFINE_LXSYSCALL(pid_t, getppid)
104 return __current->parent->pid;
107 __DEFINE_LXSYSCALL(pid_t, getpgid)
109 return __current->pgid;
112 __DEFINE_LXSYSCALL2(int, setpgid, pid_t, pid, pid_t, pgid)
114 struct proc_info* proc = pid ? get_process(pid) : __current;
117 __current->k_status = EINVAL;
121 pgid = pgid ? pgid : proc->pid;
123 struct proc_info* gruppenfuhrer = get_process(pgid);
125 if (!gruppenfuhrer || proc->pgid == gruppenfuhrer->pid) {
126 __current->k_status = EINVAL;
130 llist_delete(&proc->grp_member);
131 llist_append(&gruppenfuhrer->grp_member, &proc->grp_member);
138 init_proc_user_space(struct proc_info* pcb)
140 vmm_mount_pd(VMS_MOUNT_1, pcb->page_table);
144 struct mm_region* stack_vm;
146 stack_vm = region_create_range(
147 USTACK_END, USTACK_SIZE, REGION_RW | REGION_RSHARED | REGION_ANON);
149 region_add(&pcb->mm.regions, stack_vm);
151 // 预留地址空间,具体物理页将由Page Fault Handler按需分配。
152 for (uintptr_t i = PG_ALIGN(USTACK_END); i < USTACK_TOP; i += PG_SIZE) {
153 vmm_set_mapping(VMS_MOUNT_1, i, 0, PG_ALLOW_USER | PG_WRITE, VMAP_NULL);
156 // TODO other uspace initialization stuff
158 vmm_unmount_pd(VMS_MOUNT_1);
162 __mark_region(uintptr_t start_vpn, uintptr_t end_vpn, int attr)
164 for (size_t i = start_vpn; i <= end_vpn; i++) {
165 x86_pte_t* curproc = &PTE_MOUNTED(VMS_SELF, i);
166 x86_pte_t* newproc = &PTE_MOUNTED(VMS_MOUNT_1, i);
169 if ((attr & REGION_MODE_MASK) == REGION_RSHARED) {
170 // 如果读共享,则将两者的都标注为只读,那么任何写入都将会应用COW策略。
173 *curproc = *curproc & ~PG_WRITE;
174 *newproc = *newproc & ~PG_WRITE;
176 // 如果是私有页,则将该页从新进程中移除。
183 __copy_fdtable(struct proc_info* pcb)
185 for (size_t i = 0; i < VFS_MAX_FD; i++) {
186 struct v_fd* fd = __current->fdtable->fds[i];
189 vfs_dup_fd(fd, &pcb->fdtable->fds[i]);
196 struct proc_info* pcb = alloc_process();
197 pcb->mm.u_heap = __current->mm.u_heap;
198 pcb->intr_ctx = __current->intr_ctx;
199 pcb->parent = __current;
201 memcpy(pcb->fxstate, __current->fxstate, 512);
203 if (__current->cwd) {
204 pcb->cwd = __current->cwd;
205 vfs_ref_dnode(pcb->cwd);
209 region_copy(&__current->mm.regions, &pcb->mm.regions);
211 setup_proc_mem(pcb, VMS_SELF);
213 // 根据 mm_region 进一步配置页表
215 struct mm_region *pos, *n;
216 llist_for_each(pos, n, &pcb->mm.regions, head)
219 if ((pos->attr & REGION_WSHARED)) {
223 uintptr_t start_vpn = pos->start >> 12;
224 uintptr_t end_vpn = pos->end >> 12;
225 __mark_region(start_vpn, end_vpn, pos->attr);
228 vmm_unmount_pd(VMS_MOUNT_1);
231 pcb->intr_ctx.registers.eax = 0;
238 extern void __kernel_end;
241 setup_proc_mem(struct proc_info* proc, uintptr_t usedMnt)
243 // copy the entire kernel page table
244 pid_t pid = proc->pid;
245 void* pt_copy = __dup_pagetable(pid, usedMnt);
247 vmm_mount_pd(VMS_MOUNT_1, pt_copy); // 将新进程的页表挂载到挂载点#2
249 // copy the kernel stack
250 for (size_t i = KSTACK_START >> 12; i <= KSTACK_TOP >> 12; i++) {
251 volatile x86_pte_t* ppte = &PTE_MOUNTED(VMS_MOUNT_1, i);
254 This is a fucking nightmare, the TLB caching keep the rewrite to PTE
255 from updating. Even the Nightmare Moon the Evil is far less nasty
256 than this. It took me hours of debugging to figure this out.
258 In the name of Celestia our glorious goddess, I will fucking HATE
259 the TLB for the rest of my LIFE!
264 void* ppa = vmm_dup_page(pid, PG_ENTRY_ADDR(p));
265 pmm_free_page(pid, PG_ENTRY_ADDR(p));
266 *ppte = (p & 0xfff) | (uintptr_t)ppa;
269 // 我们不需要分配内核的区域,因为所有的内核代码和数据段只能通过系统调用来访问,任何非法的访问
270 // 都会导致eip落在区域外面,从而segmentation fault.
272 // 至于其他的区域我们暂时没有办法知道,因为那需要知道用户程序的信息。我们留到之后在处理。
273 proc->page_table = pt_copy;