1 #include <arch/x86/interrupts.h>
2 #include <lunaix/common.h>
3 #include <lunaix/mm/mm.h>
4 #include <lunaix/mm/pmm.h>
5 #include <lunaix/mm/region.h>
6 #include <lunaix/mm/vmm.h>
7 #include <lunaix/sched.h>
8 #include <lunaix/status.h>
9 #include <lunaix/syslog.h>
12 kprintf(const char* fmt, ...)
16 __kprintf("PFAULT", fmt, args);
20 #define COW_MASK (REGION_RSHARED | REGION_READ | REGION_WRITE)
23 __print_panic_msg(const char* msg, const isr_param* param);
26 intr_routine_page_fault(const isr_param* param)
28 uintptr_t ptr = cpu_rcr2();
34 if (!vmm_lookup(ptr, &mapping)) {
38 if (!SEL_RPL(param->cs)) {
40 if (do_kernel(&mapping)) {
46 struct mm_region* hit_region = region_get(&__current->mm.regions, ptr);
53 x86_pte_t* pte = &PTE_MOUNTED(PD_REFERENCED, ptr >> 12);
54 if ((*pte & PG_PRESENT)) {
55 if ((hit_region->attr & COW_MASK) == COW_MASK) {
56 // normal page fault, do COW
59 (uintptr_t)vmm_dup_page(__current->pid, PG_ENTRY_ADDR(*pte));
60 pmm_free_page(__current->pid, *pte & ~0xFFF);
61 *pte = (*pte & 0xFFF) | pa | PG_WRITE;
64 // impossible cases or accessing privileged page
73 uintptr_t loc = *pte & ~0xfff;
75 // a writable page, not present, not cached, pte attr is not null
76 // -> a new page need to be alloc
77 if ((hit_region->attr & REGION_WRITE) && (*pte & 0xfff) && !loc) {
79 uintptr_t pa = pmm_alloc_page(__current->pid, 0);
80 *pte = *pte | pa | PG_PRESENT;
84 // page not present, bring it from disk or somewhere else
85 __print_panic_msg("WIP page fault route", param);
90 kprintf(KERROR "(pid: %d) Segmentation fault on %p (%p:%p)\n",
95 terminate_proc(LXSEGFAULT);
106 do_kernel(v_mapping* mapping)
108 uintptr_t addr = mapping->va;
109 if (addr >= KHEAP_START && addr < PROC_START) {
110 // This is kernel heap page
111 uintptr_t pa = pmm_alloc_page(KERNEL_PID, 0);
112 *mapping->pte = (*mapping->pte & 0xfff) | pa | PG_PRESENT;
113 cpu_invplg(mapping->pte);