+ // Ensure we step back to L0T
+ assert(!level);
+ assert(ptep_dest == ptepd_kernel);
+
+ // Carry over the kernel (exclude last two entry)
+ while (ptep_vfn(ptep) < MAX_PTEN - 2) {
+ pte_t pte = *ptep;
+ assert(!pte_isnull(pte));
+
+ // Ensure it is a next level pagetable,
+ // we MAY relax this later allow kernel
+ // to have huge leaflet mapped at L0T
+ leaflet = pte_leaflet_aligned(pte);
+ assert(leaflet_order(leaflet) == 0);
+
+ set_pte(ptep_dest, pte);
+ leaflet_borrow(leaflet);
+
+ ptep++;
+ ptep_dest++;
+ }
+
+ return pte_paddr(*(ptep_dest + 1));
+}
+
+static void
+vmsfree(ptr_t vm_mnt)
+{
+ struct leaflet* leaflet;
+ pte_t* ptep_head = mkl0tep(mkptep_va(vm_mnt, 0));
+ pte_t* ptep_kernel = mkl0tep(mkptep_va(vm_mnt, KERNEL_RESIDENT));
+
+ int level = 0;
+ pte_t* ptep = ptep_head;
+ while (ptep < ptep_kernel)
+ {
+ pte_t pte = *ptep;
+ ptr_t pa = pte_paddr(pte);
+
+ if (pte_isnull(pte)) {
+ goto cont;
+ }
+
+ if (!pt_last_level(level) && !pte_huge(pte)) {
+ ptep = ptep_step_into(ptep);
+ level++;