+ volatile x86_pte_t* pte = &PTE_MOUNTED(PD_REFERENCED, ptr >> 12);
+ if ((*pte & PG_PRESENT)) {
+ if ((hit_region->attr & COW_MASK) == COW_MASK) {
+ // normal page fault, do COW
+ cpu_invplg(pte);
+ uintptr_t pa =
+ (uintptr_t)vmm_dup_page(__current->pid, PG_ENTRY_ADDR(*pte));
+ pmm_free_page(__current->pid, *pte & ~0xFFF);
+ *pte = (*pte & 0xFFF) | pa | PG_WRITE;
+ goto resolved;
+ }
+ // impossible cases or accessing privileged page
+ goto segv_term;
+ }
+
+ if (!(*pte)) {
+ // Invalid location
+ goto segv_term;
+ }
+
+ uintptr_t loc = *pte & ~0xfff;
+
+ // a writable page, not present, not cached, pte attr is not null
+ // -> a new page need to be alloc
+ if ((hit_region->attr & REGION_WRITE) && (*pte & 0xfff) && !loc) {
+ cpu_invplg(pte);
+ uintptr_t pa = pmm_alloc_page(__current->pid, 0);
+ *pte = *pte | pa | PG_PRESENT;
+ goto resolved;
+ }
+
+ // page not present, bring it from disk or somewhere else
+ __print_panic_msg("WIP page fault route", param);
+ while (1)
+ ;
+
+segv_term:
+ kprintf(KERROR "(pid: %d) Segmentation fault on %p (%p:%p)\n",
+ __current->pid,
+ ptr,
+ param->cs,
+ param->eip);
+ __SIGSET(__current->sig_pending, _SIGSEGV);
+ schedule();
+ // should not reach
+ while (1)
+ ;
+
+resolved:
+ cpu_invplg(ptr);
+ return;
+}
+
+int
+do_kernel(v_mapping* mapping)
+{
+ uintptr_t addr = mapping->va;
+ if (addr >= KHEAP_START && addr < PROC_START) {
+ // This is kernel heap page
+ uintptr_t pa = pmm_alloc_page(KERNEL_PID, 0);
+ *mapping->pte = (*mapping->pte & 0xfff) | pa | PG_PRESENT;
+ cpu_invplg(mapping->pte);
+ cpu_invplg(addr);
+ goto done;
+ }