X-Git-Url: https://scm.lunaixsky.com/lunaix-os.git/blobdiff_plain/9440be3a5115a91dcdf8dff05a361cac4b6cea29..86a9cfeadbbe180abf6c8bd8c64c86957fdacfeb:/lunaix-os/kernel/mm/vmm.c diff --git a/lunaix-os/kernel/mm/vmm.c b/lunaix-os/kernel/mm/vmm.c index 0aa09b8..5e407ff 100644 --- a/lunaix-os/kernel/mm/vmm.c +++ b/lunaix-os/kernel/mm/vmm.c @@ -1,7 +1,5 @@ #include #include -#include -#include #include #include @@ -16,7 +14,7 @@ vmm_init() x86_page_table* vmm_init_pd() { - x86_page_table* dir = (x86_page_table*)pmm_alloc_page(); + x86_page_table* dir = (x86_page_table*)pmm_alloc_page(KERNEL_PID, PP_FGPERSIST); for (size_t i = 0; i < PG_MAX_ENTRIES; i++) { dir->entry[i] = PTE_NULL; } @@ -28,7 +26,8 @@ vmm_init_pd() } int -__vmm_map_internal(uint32_t l1_inx, +__vmm_map_internal(pid_t pid, + uint32_t l1_inx, uint32_t l2_inx, uintptr_t pa, pt_attr attr, @@ -41,14 +40,15 @@ __vmm_map_internal(uint32_t l1_inx, assert(attr <= 128); if (!l1pt->entry[l1_inx]) { - x86_page_table* new_l1pt_pa = pmm_alloc_page(); + x86_page_table* new_l1pt_pa = pmm_alloc_page(pid, PP_FGPERSIST); // 物理内存已满! if (!new_l1pt_pa) { return 0; } - l1pt->entry[l1_inx] = NEW_L1_ENTRY(attr, new_l1pt_pa); + // This must be writable + l1pt->entry[l1_inx] = NEW_L1_ENTRY(attr | PG_WRITE, new_l1pt_pa); memset((void*)L2_VADDR(l1_inx), 0, PG_SIZE); } @@ -57,9 +57,11 @@ __vmm_map_internal(uint32_t l1_inx, if (!forced) { return 0; } - if (HAS_FLAGS(l2pte, PG_PRESENT)) { - assert_msg(pmm_free_page(GET_PG_ADDR(l2pte)), "fail to release physical page"); - } + } + + if ((HAS_FLAGS(attr, PG_PRESENT))) { + // add one on reference count, regardless of existence. + pmm_ref_page(pid, pa); } l2pt->entry[l2_inx] = NEW_L2_ENTRY(attr, pa); @@ -68,7 +70,7 @@ __vmm_map_internal(uint32_t l1_inx, } void* -vmm_map_page(void* va, void* pa, pt_attr tattr) +vmm_map_page(pid_t pid, void* va, void* pa, pt_attr tattr) { // 显然,对空指针进行映射没有意义。 if (!pa || !va) { @@ -92,8 +94,7 @@ vmm_map_page(void* va, void* pa, pt_attr tattr) l2pt = (x86_page_table*)L2_VADDR(l1_index); } // 页表有空位,只需要开辟一个新的 PTE (Level 2) - if (l2pt && !l2pt->entry[l2_index]) { - l2pt->entry[l2_index] = NEW_L2_ENTRY(tattr, pa); + if (__vmm_map_internal(pid, l1_index, l2_index, pa, tattr, false)) { return (void*)V_ADDR(l1_index, l2_index, PG_OFFSET(va)); } l2_index++; @@ -104,7 +105,7 @@ vmm_map_page(void* va, void* pa, pt_attr tattr) return NULL; } - if (!__vmm_map_internal(l1_index, l2_index, (uintptr_t)pa, tattr, false)) { + if (!__vmm_map_internal(pid, l1_index, l2_index, (uintptr_t)pa, tattr, false)) { return NULL; } @@ -112,7 +113,7 @@ vmm_map_page(void* va, void* pa, pt_attr tattr) } void* -vmm_fmap_page(void* va, void* pa, pt_attr tattr) +vmm_fmap_page(pid_t pid, void* va, void* pa, pt_attr tattr) { if (!pa || !va) { return NULL; @@ -123,42 +124,44 @@ vmm_fmap_page(void* va, void* pa, pt_attr tattr) uint32_t l1_index = L1_INDEX(va); uint32_t l2_index = L2_INDEX(va); - if (!__vmm_map_internal(l1_index, l2_index, (uintptr_t)pa, tattr, true)) { + if (!__vmm_map_internal(pid, l1_index, l2_index, (uintptr_t)pa, tattr, true)) { return NULL; } cpu_invplg(va); - return (void*)V_ADDR(l1_index, l2_index, PG_OFFSET(va)); + return va; } void* -vmm_alloc_page(void* vpn, pt_attr tattr) +vmm_alloc_page(pid_t pid, void* vpn, void** pa, pt_attr tattr, pp_attr_t pattr) { - void* pp = pmm_alloc_page(); - void* result = vmm_map_page(vpn, pp, tattr); + void* pp = pmm_alloc_page(pid, pattr); + void* result = vmm_map_page(pid, vpn, pp, tattr); if (!result) { - pmm_free_page(pp); + pmm_free_page(pp, pid); } + pa ? (*pa = pp) : 0; return result; } int -vmm_alloc_pages(void* va, size_t sz, pt_attr tattr) +vmm_alloc_pages(pid_t pid, void* va, size_t sz, pt_attr tattr, pp_attr_t pattr) { assert((uintptr_t)va % PG_SIZE == 0) assert(sz % PG_SIZE == 0); void* va_ = va; for (size_t i = 0; i < (sz >> PG_SIZE_BITS); i++, va_ += PG_SIZE) { - void* pp = pmm_alloc_page(); + void* pp = pmm_alloc_page(pid, pattr); uint32_t l1_index = L1_INDEX(va_); uint32_t l2_index = L2_INDEX(va_); if (!pp || !__vmm_map_internal( + pid, l1_index, l2_index, (uintptr_t)pp, tattr, false)) { // if one failed, release previous allocated pages. va_ = va; for (size_t j = 0; j < i; j++, va_ += PG_SIZE) { - vmm_unmap_page(va_); + vmm_unmap_page(pid, va_); } return false; @@ -168,8 +171,8 @@ vmm_alloc_pages(void* va, size_t sz, pt_attr tattr) return true; } -void -vmm_set_mapping(void* va, void* pa, pt_attr attr) { +int +vmm_set_mapping(pid_t pid, void* va, void* pa, pt_attr attr) { assert(((uintptr_t)va & 0xFFFU) == 0); uint32_t l1_index = L1_INDEX(va); @@ -177,15 +180,15 @@ vmm_set_mapping(void* va, void* pa, pt_attr attr) { // prevent map of recursive mapping region if (l1_index == 1023) { - return; + return 0; } - __vmm_map_internal(l1_index, l2_index, (uintptr_t)pa, attr, false); + __vmm_map_internal(pid, l1_index, l2_index, (uintptr_t)pa, attr, false); + return 1; } void -vmm_unmap_page(void* va) -{ +__vmm_unmap_internal(pid_t pid, void* va, int free_ppage) { assert(((uintptr_t)va & 0xFFFU) == 0); uint32_t l1_index = L1_INDEX(va); @@ -203,14 +206,25 @@ vmm_unmap_page(void* va) if (l1pte) { x86_page_table* l2pt = (x86_page_table*)L2_VADDR(l1_index); x86_pte_t l2pte = l2pt->entry[l2_index]; - if (IS_CACHED(l2pte)) { - pmm_free_page((void*)l2pte); + if (IS_CACHED(l2pte) && free_ppage) { + pmm_free_page(pid, (void*)l2pte); } cpu_invplg(va); l2pt->entry[l2_index] = PTE_NULL; } } +void +vmm_unset_mapping(void* va) { + __vmm_unmap_internal(0, va, false); +} + +void +vmm_unmap_page(pid_t pid, void* va) +{ + __vmm_unmap_internal(pid, va, true); +} + v_mapping vmm_lookup(void* va) { @@ -224,12 +238,13 @@ vmm_lookup(void* va) v_mapping mapping = { .flags = 0, .pa = 0, .pn = 0 }; if (l1pte) { - x86_pte_t l2pte = - ((x86_page_table*)L2_VADDR(l1_index))->entry[l2_index]; + x86_pte_t* l2pte = + &((x86_page_table*)L2_VADDR(l1_index))->entry[l2_index]; if (l2pte) { - mapping.flags = PG_ENTRY_FLAGS(l2pte); - mapping.pa = PG_ENTRY_ADDR(l2pte); + mapping.flags = PG_ENTRY_FLAGS(*l2pte); + mapping.pa = PG_ENTRY_ADDR(*l2pte); mapping.pn = mapping.pa >> PG_SIZE_BITS; + mapping.pte = l2pte; } } @@ -240,4 +255,19 @@ void* vmm_v2p(void* va) { return (void*)vmm_lookup(va).pa; +} + +void* +vmm_mount_pd(uintptr_t mnt, void* pde) { + x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR; + l1pt->entry[(mnt >> 22)] = NEW_L1_ENTRY(T_SELF_REF_PERM, pde); + cpu_invplg(mnt); + return mnt; +} + +void* +vmm_unmount_pd(uintptr_t mnt) { + x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR; + l1pt->entry[(mnt >> 22)] = 0; + cpu_invplg(mnt); } \ No newline at end of file