1 #include <arch/x86/interrupts.h>
2 #include <lunaix/common.h>
3 #include <lunaix/lxsignal.h>
4 #include <lunaix/mm/mm.h>
5 #include <lunaix/mm/pmm.h>
6 #include <lunaix/mm/region.h>
7 #include <lunaix/mm/vmm.h>
8 #include <lunaix/sched.h>
9 #include <lunaix/status.h>
10 #include <lunaix/syslog.h>
13 kprintf(const char* fmt, ...)
17 __kprintf("PFAULT", fmt, args);
21 #define COW_MASK (REGION_RSHARED | REGION_READ | REGION_WRITE)
24 __print_panic_msg(const char* msg, const isr_param* param);
27 intr_routine_page_fault(const isr_param* param)
29 uintptr_t ptr = cpu_rcr2();
35 if (!vmm_lookup(ptr, &mapping)) {
39 if (!SEL_RPL(param->cs)) {
41 if (do_kernel(&mapping)) {
47 struct mm_region* hit_region = region_get(&__current->mm.regions, ptr);
54 x86_pte_t* pte = &PTE_MOUNTED(PD_REFERENCED, ptr >> 12);
55 if ((*pte & PG_PRESENT)) {
56 if ((hit_region->attr & COW_MASK) == COW_MASK) {
57 // normal page fault, do COW
60 (uintptr_t)vmm_dup_page(__current->pid, PG_ENTRY_ADDR(*pte));
61 pmm_free_page(__current->pid, *pte & ~0xFFF);
62 *pte = (*pte & 0xFFF) | pa | PG_WRITE;
65 // impossible cases or accessing privileged page
74 uintptr_t loc = *pte & ~0xfff;
76 // a writable page, not present, not cached, pte attr is not null
77 // -> a new page need to be alloc
78 if ((hit_region->attr & REGION_WRITE) && (*pte & 0xfff) && !loc) {
80 uintptr_t pa = pmm_alloc_page(__current->pid, 0);
81 *pte = *pte | pa | PG_PRESENT;
85 // page not present, bring it from disk or somewhere else
86 __print_panic_msg("WIP page fault route", param);
91 kprintf(KERROR "(pid: %d) Segmentation fault on %p (%p:%p)\n",
96 __SIGSET(__current->sig_pending, _SIGSEGV);
108 do_kernel(v_mapping* mapping)
110 uintptr_t addr = mapping->va;
111 if (addr >= KHEAP_START && addr < PROC_START) {
112 // This is kernel heap page
113 uintptr_t pa = pmm_alloc_page(KERNEL_PID, 0);
114 *mapping->pte = (*mapping->pte & 0xfff) | pa | PG_PRESENT;
115 cpu_invplg(mapping->pte);