#include <lunaix/mm/page.h>
#include <lunaix/mm/mmap.h>
#include <lunaix/process.h>
+#include <lunaix/syslog.h>
#include <asm/mm_defs.h>
#include <klibc/string.h>
+#define alloc_pagetable_trace(ptep, pte, ord, level) \
+ ({ \
+ alloc_kpage_at(ptep, pte, ord); \
+ })
+
+#define free_pagetable_trace(ptep, pte, level) \
+ ({ \
+ struct leaflet* leaflet = pte_leaflet_aligned(pte); \
+ assert(leaflet_order(leaflet) == 0); \
+ leaflet_return(leaflet); \
+ set_pte(ptep, null_pte); \
+ })
+
struct proc_mm*
procvm_create(struct proc_info* proc) {
struct proc_mm* mm = vzalloc(sizeof(struct proc_mm));
}
if (alloc && pte_isnull(pte_at(dest))) {
- alloc_kpage_at(dest, pte, 0);
+ alloc_pagetable_trace(dest, pte, 0, i);
}
i++;
static inline void
copy_root(pte_t* dest, pte_t* src, pte_t pte, int level)
{
- alloc_kpage_at(dest, pte, 0);
+ alloc_pagetable_trace(dest, pte, 0, level);
}
static void
src = mkptep_va(vm_mnt, region->start);
end = mkptep_va(vm_mnt, region->end);
- level = __descend(0, vm_mnt, loc, false);
+ level = __descend(vm_mnt, vm_mnt, loc, false);
while (src < end)
{
continue;
}
+ set_pte(src, null_pte);
+
if (pte_isloaded(pte)) {
leaflet = pte_leaflet_aligned(pte);
leaflet_return(leaflet);
cont:
while (ptep_vfn(src) == MAX_PTEN - 1) {
src = ptep_step_out(src);
- leaflet = pte_leaflet_aligned(pte_at(src));
-
- assert(leaflet_order(leaflet) == 0);
- leaflet_return(leaflet);
+ free_pagetable_trace(src, pte_at(src), level);
level--;
}
pte_t* ptep_smx = mkl1tep_va(VMS_SELF, dest_mnt);
pte_t pte_sms = mkpte_prot(KERNEL_PGTAB);
- pte_sms = alloc_kpage_at(ptep_ssm, pte_sms, 0);
+ pte_sms = alloc_pagetable_trace(ptep_ssm, pte_sms, 0, 0);
set_pte(&ptep_smx[VMS_SELF_L0TI], pte_sms);
tlb_flush_kernel((ptr_t)dest_mnt);
dest_mm->vmroot = pte_paddr(pte_sms);
}
+static void
+__purge_vms_residual(struct proc_mm* mm, int level, ptr_t va)
+{
+ pte_t *ptep, pte;
+ ptr_t _va;
+
+ if (level >= MAX_LEVEL) {
+ return;
+ }
+
+ ptep = mklntep_va(level, mm->vm_mnt, va);
+
+ for (unsigned i = 0; i < LEVEL_SIZE; i++, ptep++)
+ {
+ pte = pte_at(ptep);
+ if (pte_isnull(pte) || !pte_isloaded(pte)) {
+ continue;
+ }
+
+ if (lntep_implie_vmnts(ptep, lnt_page_size(level))) {
+ continue;
+ }
+
+ _va = va + (i * lnt_page_size(level));
+ __purge_vms_residual(mm, level + 1, _va);
+
+ set_pte(ptep, null_pte);
+ leaflet_return(pte_leaflet_aligned(pte));
+ }
+}
+
static void
vmsfree(struct proc_mm* mm)
{
struct leaflet* leaflet;
+ struct mm_region *pos, *n;
ptr_t vm_mnt;
pte_t* ptep_self;
vm_mnt = mm->vm_mnt;
- ptep_self = mkl0tep(mkptep_va(vm_mnt, VMS_SELF));
+ ptep_self = mkl0tep_va(vm_mnt, VMS_SELF);
- struct mm_region *pos, *n;
+ // first pass: free region mappings
llist_for_each(pos, n, &mm->regions, head)
{
vmrfree(vm_mnt, pos);
}
- procvm_unlink_kernel();
+ procvm_unlink_kernel(vm_mnt);
+
+ // free up all allocated tables on intermediate levels
+ __purge_vms_residual(mm, 0, 0);
- leaflet = pte_leaflet_aligned(pte_at(ptep_self));
- leaflet_return(leaflet);
+ free_pagetable_trace(ptep_self, pte_at(ptep_self), 0);
}
static inline void
}
}
+void
+procvm_prune_vmr(ptr_t vm_mnt, struct mm_region* region)
+{
+ vmrfree(vm_mnt, region);
+}
void
procvm_dupvms_mount(struct proc_mm* mm) {
procvm_unmount_release(struct proc_mm* mm) {
ptr_t vm_mnt = mm->vm_mnt;
struct mm_region *pos, *n;
+
llist_for_each(pos, n, &mm->regions, head)
{
mem_sync_pages(vm_mnt, pos, pos->start, pos->end - pos->start, 0);
- region_release(pos);
}
vmsfree(mm);
+
+ llist_for_each(pos, n, &mm->regions, head)
+ {
+ region_release(pos);
+ }
+
vms_unmount(vm_mnt);
vfree(mm);