+ src++;
+ }
+}
+
+static void
+vmscpy(struct proc_mm* dest_mm, struct proc_mm* src_mm)
+{
+ // Build the self-reference on dest vms
+
+ /*
+ * -- What the heck are ptep_ssm and ptep_sms ? --
+ *
+ * ptep_dest point to the pagetable itself that is mounted
+ * at dest_mnt (or simply mnt):
+ * mnt -> self -> self -> self -> L0TE@offset
+ *
+ * ptep_sms shallowed the recursion chain:
+ * self -> mnt -> self -> self -> L0TE@self
+ *
+ * ptep_ssm shallowed the recursion chain:
+ * self -> self -> mnt -> self -> L0TE@self
+ *
+ * Now, here is the problem, back to x86_32, the translation is
+ * a depth-3 recursion:
+ * L0T -> LFT -> Page
+ *
+ * So ptep_ssm will terminate at mnt and give us a leaf
+ * slot for allocate a fresh page table for mnt:
+ * self -> self -> L0TE@mnt
+ *
+ * but in x86_64 translation has extra two more step:
+ * L0T -> L1T -> L2T -> LFT -> Page
+ *
+ * So we must continue push down....
+ * ptep_sssms shallowed the recursion chain:
+ * self -> self -> self -> mnt -> L0TE@self
+ *
+ * ptep_ssssm shallowed the recursion chain:
+ * self -> self -> self -> self -> L0TE@mnt
+ *
+ * Note: PML4: 2 extra steps
+ * PML5: 3 extra steps
+ */
+
+ ptr_t dest_mnt, src_mnt;
+
+ dest_mnt = dest_mm->vm_mnt;
+ assert(dest_mnt);
+
+ pte_t* ptep_ssm = mkl0tep_va(VMS_SELF, dest_mnt);
+ pte_t* ptep_smx = mkl1tep_va(VMS_SELF, dest_mnt);
+ pte_t pte_sms = mkpte_prot(KERNEL_PGTAB);
+
+ pte_sms = alloc_pagetable_trace(ptep_ssm, pte_sms, 0, 0);
+ set_pte(&ptep_smx[VMS_SELF_L0TI], pte_sms);
+
+ tlb_flush_kernel((ptr_t)dest_mnt);
+
+ if (!src_mm) {
+ goto done;
+ }
+
+ src_mnt = src_mm->vm_mnt;
+
+ struct mm_region *pos, *n;
+ llist_for_each(pos, n, &src_mm->regions, head)
+ {
+ vmrcpy(dest_mnt, src_mnt, pos);
+ }
+
+done:;
+ procvm_link_kernel(dest_mnt);
+
+ dest_mm->vmroot = pte_paddr(pte_sms);
+}
+
+static void
+__purge_vms_residual(struct proc_mm* mm, int level, ptr_t va)
+{
+ pte_t *ptep, pte;
+ ptr_t _va;
+
+ if (level >= MAX_LEVEL) {
+ return;
+ }
+
+ ptep = mklntep_va(level, mm->vm_mnt, va);
+
+ for (unsigned i = 0; i < LEVEL_SIZE; i++, ptep++)
+ {
+ pte = pte_at(ptep);
+ if (pte_isnull(pte) || !pte_isloaded(pte)) {
+ continue;
+ }
+
+ if (lntep_implie_vmnts(ptep, lnt_page_size(level))) {
+ continue;
+ }
+
+ _va = va + (i * lnt_page_size(level));
+ __purge_vms_residual(mm, level + 1, _va);
+
+ set_pte(ptep, null_pte);
+ leaflet_return(pte_leaflet_aligned(pte));