+ uint32_t errcode = param->execp->err_code;
+ ptr_t ptr = cpu_rcr2();
+ if (!ptr) {
+ goto segv_term;
+ }
+
+ v_mapping mapping;
+ if (!vmm_lookup(ptr, &mapping)) {
+ goto segv_term;
+ }
+
+ if (!SEL_RPL(param->execp->cs)) {
+ // TODO if kernel pfault
+ }
+
+ vm_regions_t* vmr = (vm_regions_t*)&__current->mm.regions;
+ struct mm_region* hit_region = region_get(vmr, ptr);
+
+ if (!hit_region) {
+ // 当你凝视深渊时……
+ goto segv_term;
+ }
+
+ volatile x86_pte_t* pte = &PTE_MOUNTED(VMS_SELF, ptr >> 12);
+ if (PG_IS_PRESENT(*pte)) {
+ if (((errcode ^ mapping.flags) & PG_ALLOW_USER)) {
+ // invalid access
+ kprintf(KDEBUG "invalid user access. (%p->%p, attr:0x%x)\n",
+ mapping.va,
+ mapping.pa,
+ mapping.flags);
+ goto segv_term;
+ }
+ if ((hit_region->attr & COW_MASK) == COW_MASK) {
+ // normal page fault, do COW
+ cpu_invplg((ptr_t)pte);
+
+ ptr_t pa = (ptr_t)vmm_dup_page(__current->pid, PG_ENTRY_ADDR(*pte));
+
+ pmm_free_page(__current->pid, *pte & ~0xFFF);
+ *pte = (*pte & 0xFFF & ~PG_DIRTY) | pa | PG_WRITE;
+
+ goto resolved;
+ }
+ // impossible cases or accessing privileged page
+ goto segv_term;
+ }
+
+ // an anonymous page and not present
+ // -> a new page need to be alloc
+ if ((hit_region->attr & REGION_ANON)) {
+ if (!PG_IS_PRESENT(*pte)) {
+ cpu_invplg((ptr_t)pte);
+
+ ptr_t pa = pmm_alloc_page(__current->pid, 0);
+ if (!pa) {
+ goto oom;
+ }
+
+ *pte = *pte | pa | get_ptattr(hit_region);
+ memset((void*)PG_ALIGN(ptr), 0, PG_SIZE);
+ goto resolved;
+ }
+ // permission denied on anon page (e.g., write on readonly page)
+ goto segv_term;