1 #include <lunaix/mm/procvm.h>
2 #include <lunaix/mm/valloc.h>
3 #include <lunaix/mm/region.h>
4 #include <lunaix/mm/pmm.h>
5 #include <lunaix/mm/vmm.h>
6 #include <lunaix/mm/mmap.h>
7 #include <lunaix/process.h>
9 #include <sys/mm/mm_defs.h>
11 #include <klibc/string.h>
14 procvm_create(struct proc_info* proc) {
15 struct proc_mm* mm = vzalloc(sizeof(struct proc_mm));
22 llist_init_head(&mm->regions);
27 vmscpy(ptr_t dest_mnt, ptr_t src_mnt, bool only_kernel)
29 pte_t* ptep_dest = mkl0tep(mkptep_va(dest_mnt, 0));
30 pte_t* ptep = mkl0tep(mkptep_va(src_mnt, 0));
31 pte_t* ptepd_kernel = mkl0tep(mkptep_va(dest_mnt, KERNEL_RESIDENT));
32 pte_t* ptep_kernel = mkl0tep(mkptep_va(src_mnt, KERNEL_RESIDENT));
34 // Build the self-reference on dest vms
35 pte_t* ptep_sms = mkptep_va(VMS_SELF, (ptr_t)ptep_dest);
36 pte_t* ptep_ssm = mkptep_va(VMS_SELF, (ptr_t)ptep_sms);
37 pte_t pte_sms = mkpte_prot(KERNEL_DATA);
39 pte_sms = vmm_alloc_page(ptep_ssm, pte_sms);
40 set_pte(ptep_sms, pte_sms);
42 cpu_flush_page((ptr_t)dest_mnt);
46 ptep_dest += ptep_vfn(ptep_kernel);
53 while (ptep < ptep_kernel)
56 ptr_t pa = pte_paddr(pte);
58 if (pte_isnull(pte)) {
62 if (pt_last_level(level) || pte_huge(pte)) {
63 set_pte(ptep_dest, pte);
65 if (pte_isloaded(pte))
68 else if (!pt_last_level(level)) {
69 vmm_alloc_page(ptep_dest, pte);
71 ptep = ptep_step_into(ptep);
72 ptep_dest = ptep_step_into(ptep_dest);
79 if (ptep_vfn(ptep) == MAX_PTEN - 1) {
81 ptep = ptep_step_out(ptep);
82 ptep_dest = ptep_step_out(ptep_dest);
90 // Ensure we step back to L0T
92 assert(ptep_dest == ptepd_kernel);
94 // Carry over the kernel (exclude last two entry)
95 while (ptep_vfn(ptep) < MAX_PTEN - 2) {
97 assert(!pte_isnull(pte));
99 set_pte(ptep_dest, pte);
100 pmm_ref_page(pte_paddr(pte));
106 return pte_paddr(*(ptep_dest + 1));
109 static void optimize("O0")
110 vmsfree(ptr_t vm_mnt)
112 pte_t* ptep_head = mkl0tep(mkptep_va(vm_mnt, 0));
113 pte_t* ptep_kernel = mkl0tep(mkptep_va(vm_mnt, KERNEL_RESIDENT));
116 volatile pte_t* ptep = ptep_head;
117 while (ptep < ptep_kernel)
120 ptr_t pa = pte_paddr(pte);
122 if (pte_isnull(pte)) {
126 if (!pt_last_level(level) && !pte_huge(pte)) {
127 ptep = ptep_step_into(ptep);
133 if (pte_isloaded(pte))
137 if (ptep_vfn(ptep) == MAX_PTEN - 1) {
138 ptep = ptep_step_out(ptep);
139 pmm_free_any(pte_paddr(pte_at(ptep)));
146 ptr_t self_pa = pte_paddr(ptep_head[MAX_PTEN - 1]);
147 pmm_free_any(self_pa);
151 __attach_to_current_vms(struct proc_mm* guest_mm)
153 struct proc_mm* mm_current = vmspace(__current);
155 assert(!mm_current->guest_mm);
156 mm_current->guest_mm = guest_mm;
161 __detach_from_current_vms(struct proc_mm* guest_mm)
163 struct proc_mm* mm_current = vmspace(__current);
165 assert(mm_current->guest_mm == guest_mm);
166 mm_current->guest_mm = NULL;
172 procvm_dupvms_mount(struct proc_mm* mm) {
176 struct proc_mm* mm_current = vmspace(__current);
178 __attach_to_current_vms(mm);
180 mm->heap = mm_current->heap;
181 mm->vm_mnt = VMS_MOUNT_1;
182 mm->vmroot = vmscpy(VMS_MOUNT_1, VMS_SELF, false);
184 region_copy_mm(mm_current, mm);
188 procvm_mount(struct proc_mm* mm)
193 vms_mount(VMS_MOUNT_1, mm->vmroot);
195 __attach_to_current_vms(mm);
197 mm->vm_mnt = VMS_MOUNT_1;
201 procvm_unmount(struct proc_mm* mm)
205 vms_unmount(VMS_MOUNT_1);
206 struct proc_mm* mm_current = vmspace(__current);
208 mm_current->guest_mm = NULL;
215 procvm_initvms_mount(struct proc_mm* mm)
219 __attach_to_current_vms(mm);
221 mm->vm_mnt = VMS_MOUNT_1;
222 mm->vmroot = vmscpy(VMS_MOUNT_1, VMS_SELF, true);
226 procvm_unmount_release(struct proc_mm* mm) {
227 ptr_t vm_mnt = mm->vm_mnt;
228 struct mm_region *pos, *n;
229 llist_for_each(pos, n, &mm->regions, head)
231 mem_sync_pages(vm_mnt, pos, pos->start, pos->end - pos->start, 0);
239 __detach_from_current_vms(mm);
243 procvm_mount_self(struct proc_mm* mm)
246 assert(!mm->guest_mm);
248 mm->vm_mnt = VMS_SELF;
252 procvm_unmount_self(struct proc_mm* mm)
254 assert(mm->vm_mnt == VMS_SELF);
260 procvm_enter_remote(struct remote_vmctx* rvmctx, struct proc_mm* mm,
261 ptr_t remote_base, size_t size)
263 ptr_t vm_mnt = mm->vm_mnt;
266 pfn_t size_pn = pfn(size + MEM_PAGE);
267 assert(size_pn < REMOTEVM_MAX_PAGES);
269 struct mm_region* region = region_get(&mm->regions, remote_base);
270 assert(region && region_contains(region, remote_base + size));
272 rvmctx->vms_mnt = vm_mnt;
273 rvmctx->page_cnt = size_pn;
275 remote_base = va_align(remote_base);
276 rvmctx->remote = remote_base;
277 rvmctx->local_mnt = PG_MOUNT_4_END + 1;
279 pte_t* rptep = mkptep_va(vm_mnt, remote_base);
280 pte_t* lptep = mkptep_va(VMS_SELF, rvmctx->local_mnt);
281 unsigned int pattr = region_pteprot(region);
283 for (size_t i = 0; i < size_pn; i++)
285 pte_t pte = vmm_tryptep(rptep, PAGE_SIZE);
286 if (pte_isloaded(pte)) {
291 ptr_t pa = pmm_alloc_page(0);
292 set_pte(lptep, mkpte(pa, KERNEL_DATA));
293 set_pte(rptep, mkpte(pa, pattr));
301 procvm_copy_remote_transaction(struct remote_vmctx* rvmctx,
302 ptr_t remote_dest, void* local_src, size_t sz)
304 if (remote_dest < rvmctx->remote) {
308 ptr_t offset = remote_dest - rvmctx->remote;
309 if (pfn(offset + sz) >= rvmctx->page_cnt) {
313 memcpy((void*)(rvmctx->local_mnt + offset), local_src, sz);
319 procvm_exit_remote(struct remote_vmctx* rvmctx)
321 pte_t* lptep = mkptep_va(VMS_SELF, rvmctx->local_mnt);
322 vmm_unset_ptes(lptep, rvmctx->page_cnt);