-void* vmm_map_page(void* va, void* pa, pt_attr dattr, pt_attr tattr) {
+int
+__vmm_map_internal(uint32_t l1_inx,
+ uint32_t l2_inx,
+ uintptr_t pa,
+ pt_attr attr,
+ int forced)
+{
+ x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
+ x86_page_table* l2pt = (x86_page_table*)L2_VADDR(l1_inx);
+
+ // See if attr make sense
+ assert(attr <= 128);
+
+ if (!l1pt->entry[l1_inx]) {
+ x86_page_table* new_l1pt_pa = pmm_alloc_page();
+
+ // 物理内存已满!
+ if (!new_l1pt_pa) {
+ return 0;
+ }
+
+ l1pt->entry[l1_inx] = NEW_L1_ENTRY(attr, new_l1pt_pa);
+ memset((void*)L2_VADDR(l1_inx), 0, PG_SIZE);
+ }
+
+ x86_pte_t l2pte = l2pt->entry[l2_inx];
+ if (l2pte) {
+ if (!forced) {
+ return 0;
+ }
+ if (HAS_FLAGS(l2pte, PG_PRESENT)) {
+ assert_msg(pmm_free_page(GET_PG_ADDR(l2pte)), "fail to release physical page");
+ }
+ }
+
+ l2pt->entry[l2_inx] = NEW_L2_ENTRY(attr, pa);
+
+ return 1;
+}
+
+void*
+vmm_map_page(void* va, void* pa, pt_attr tattr)
+{