-void vmm_unmap_page(void* vpn) {
- uintptr_t pd_offset = PD_INDEX(vpn);
- uintptr_t pt_offset = PT_INDEX(vpn);
- ptd_t* self_pde = PTD_BASE_VADDR;
-
- ptd_t pde = self_pde[pd_offset];
-
- if (pde) {
- pt_t* pt = (pt_t*)PT_VADDR(pd_offset);
- uint32_t pte = pt[pt_offset];
- if (IS_CACHED(pte) && pmm_free_page(pte)) {
- // 刷新TLB
- #ifdef __ARCH_IA32
- __asm__("invlpg (%0)" :: "r"((uintptr_t)vpn) : "memory");
- #endif
+int
+vmm_alloc_pages(pid_t pid, void* va, size_t sz, pt_attr tattr, pp_attr_t pattr)
+{
+ assert((uintptr_t)va % PG_SIZE == 0) assert(sz % PG_SIZE == 0);
+
+ void* va_ = va;
+ for (size_t i = 0; i < (sz >> PG_SIZE_BITS); i++, va_ += PG_SIZE) {
+ void* pp = pmm_alloc_page(pid, pattr);
+ uint32_t l1_index = L1_INDEX(va_);
+ uint32_t l2_index = L2_INDEX(va_);
+ if (!pp || !__vmm_map_internal(
+ pid,
+ l1_index, l2_index, (uintptr_t)pp, tattr, false)) {
+ // if one failed, release previous allocated pages.
+ va_ = va;
+ for (size_t j = 0; j < i; j++, va_ += PG_SIZE) {
+ vmm_unmap_page(pid, va_);
+ }
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+int
+vmm_set_mapping(pid_t pid, void* va, void* pa, pt_attr attr) {
+ assert(((uintptr_t)va & 0xFFFU) == 0);
+
+ uint32_t l1_index = L1_INDEX(va);
+ uint32_t l2_index = L2_INDEX(va);
+
+ // prevent map of recursive mapping region
+ if (l1_index == 1023) {
+ return 0;
+ }
+
+ __vmm_map_internal(pid, l1_index, l2_index, (uintptr_t)pa, attr, false);
+ return 1;
+}
+
+void
+__vmm_unmap_internal(pid_t pid, void* va, int free_ppage) {
+ assert(((uintptr_t)va & 0xFFFU) == 0);
+
+ uint32_t l1_index = L1_INDEX(va);
+ uint32_t l2_index = L2_INDEX(va);
+
+ // prevent unmap of recursive mapping region
+ if (l1_index == 1023) {
+ return;
+ }
+
+ x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
+
+ x86_pte_t l1pte = l1pt->entry[l1_index];
+
+ if (l1pte) {
+ x86_page_table* l2pt = (x86_page_table*)L2_VADDR(l1_index);
+ x86_pte_t l2pte = l2pt->entry[l2_index];
+ if (IS_CACHED(l2pte) && free_ppage) {
+ pmm_free_page(pid, (void*)l2pte);