1 #include <lunaix/mm/fault.h>
2 #include <lunaix/mm/pmm.h>
3 #include <lunaix/mm/region.h>
4 #include <lunaix/mm/vmm.h>
5 #include <lunaix/sched.h>
6 #include <lunaix/signal.h>
7 #include <lunaix/status.h>
8 #include <lunaix/syslog.h>
9 #include <lunaix/trace.h>
10 #include <lunaix/hart_state.h>
11 #include <lunaix/failsafe.h>
13 #include <sys/mm/mm_defs.h>
15 #include <klibc/string.h>
20 __gather_memaccess_info(struct fault_context* context)
22 pte_t* ptep = (pte_t*)context->fault_va;
23 ptr_t mnt = ptep_vm_mnt(ptep);
26 context->mm = vmspace(__current);
28 if (mnt < VMS_MOUNT_1) {
33 context->ptep_fault = true;
34 context->remote_fault = (mnt != VMS_SELF);
36 if (context->remote_fault && context->mm) {
37 context->mm = context->mm->guest_mm;
42 ptep = (pte_t*)page_addr(ptep_pfn(ptep));
43 mnt = ptep_vm_mnt(ptep);
44 if (mnt < VMS_MOUNT_1) {
51 ptep = (pte_t*)page_addr(ptep_pfn(ptep));
52 mnt = ptep_vm_mnt(ptep);
53 if (mnt < VMS_MOUNT_1) {
60 ptep = (pte_t*)page_addr(ptep_pfn(ptep));
61 mnt = ptep_vm_mnt(ptep);
62 if (mnt < VMS_MOUNT_1) {
68 ptep = (pte_t*)page_addr(ptep_pfn(ptep));
69 mnt = ptep_vm_mnt(ptep);
71 assert(mnt < VMS_MOUNT_1);
75 context->fault_refva = refva;
79 __prepare_fault_context(struct fault_context* fault)
81 if (!__arch_prepare_fault_context(fault)) {
85 __gather_memaccess_info(fault);
87 pte_t* fault_ptep = fault->fault_ptep;
88 ptr_t fault_va = fault->fault_va;
89 pte_t fault_pte = *fault_ptep;
90 bool kernel_vmfault = kernel_addr(fault_va);
91 bool kernel_refaddr = kernel_addr(fault->fault_refva);
93 // for a ptep fault, the parent page tables should match the actual
94 // accesser permission
96 ptep_alloc_hierarchy(fault_ptep, fault_va, KERNEL_DATA);
98 ptep_alloc_hierarchy(fault_ptep, fault_va, USER_DATA);
101 fault->fault_pte = fault_pte;
103 if (fault->ptep_fault && !kernel_refaddr) {
104 fault->resolving = pte_setprot(fault_pte, USER_DATA);
106 fault->resolving = pte_setprot(fault_pte, KERNEL_DATA);
109 fault->resolving = pte_mkloaded(fault->resolving);
110 fault->kernel_vmfault = kernel_vmfault;
111 fault->kernel_access = kernel_context(fault->hstate);
117 __flush_staled_tlb(struct fault_context* fault, struct leaflet* leaflet)
119 tlb_flush_mm_range(fault->mm, fault->fault_va, leaflet_nfold(leaflet));
123 __handle_conflict_pte(struct fault_context* fault)
126 struct leaflet *fault_leaflet, *duped_leaflet;
128 pte = fault->fault_pte;
129 fault_leaflet = pte_leaflet(pte);
131 if (!pte_allow_user(pte)) {
135 assert(pte_iswprotect(pte));
137 if (writable_region(fault->vmr)) {
138 // normal page fault, do COW
139 duped_leaflet = dup_leaflet(fault_leaflet);
141 pte = pte_mkwritable(pte);
142 pte = pte_mkuntouch(pte);
143 pte = pte_mkclean(pte);
145 ptep_map_leaflet(fault->fault_ptep, pte, duped_leaflet);
146 __flush_staled_tlb(fault, duped_leaflet);
148 leaflet_return(fault_leaflet);
150 fault_resolved(fault, NO_PREALLOC);
158 __handle_anon_region(struct fault_context* fault)
160 pte_t pte = fault->resolving;
161 pte_attr_t prot = region_pteprot(fault->vmr);
162 pte = pte_setprot(pte, prot);
164 // TODO Potentially we can get different order of leaflet here
165 struct leaflet* region_part = alloc_leaflet(0);
167 ptep_map_leaflet(fault->fault_ptep, pte, region_part);
168 __flush_staled_tlb(fault, region_part);
170 fault_resolved(fault, NO_PREALLOC);
175 __handle_named_region(struct fault_context* fault)
177 struct mm_region* vmr = fault->vmr;
178 struct v_file* file = vmr->mfile;
180 pte_t pte = fault->resolving;
181 ptr_t fault_va = page_aligned(fault->fault_va);
183 u32_t mseg_off = (fault_va - vmr->start);
184 u32_t mfile_off = mseg_off + vmr->foff;
186 // TODO Potentially we can get different order of leaflet here
187 struct leaflet* region_part = alloc_leaflet(0);
189 pte = pte_setprot(pte, region_pteprot(vmr));
190 ptep_map_leaflet(fault->fault_ptep, pte, region_part);
192 int errno = file->ops->read_page(file->inode, (void*)fault_va, mfile_off);
194 ERROR("fail to populate page (%d)", errno);
196 ptep_unmap_leaflet(fault->fault_ptep, region_part);
197 leaflet_return(region_part);
202 __flush_staled_tlb(fault, region_part);
204 fault_resolved(fault, NO_PREALLOC);
208 __handle_kernel_page(struct fault_context* fault)
210 // we must ensure only ptep fault is resolvable
211 if (fault->fault_va < VMS_MOUNT_1) {
215 struct leaflet* leaflet = fault->prealloc;
217 pin_leaflet(leaflet);
218 leaflet_wipe(leaflet);
220 pte_t pte = fault->resolving;
221 ptep_map_leaflet(fault->fault_ptep, pte, leaflet);
223 tlb_flush_kernel_ranged(fault->fault_va, leaflet_nfold(leaflet));
225 fault_resolved(fault, 0);
230 fault_prealloc_page(struct fault_context* fault)
232 if (!pte_isnull(fault->fault_pte)) {
238 struct leaflet* leaflet = alloc_leaflet(0);
243 fault->prealloc = leaflet;
248 __fail_to_resolve(struct fault_context* fault)
250 if (fault->prealloc) {
251 leaflet_return(fault->prealloc);
254 ERROR("(pid: %d) Segmentation fault on %p (%p,e=0x%x)",
261 if (fault->kernel_access) {
262 // if a page fault from kernel is not resolvable, then
263 // something must be went south
264 FATAL("unresolvable page fault");
265 failsafe_diagnostic();
268 trace_printstack_isr(fault->hstate);
270 thread_setsignal(current_thread, _SIGSEGV);
273 fail("Unexpected return from segfault");
279 __try_resolve_fault(struct fault_context* fault)
281 pte_t fault_pte = fault->fault_pte;
282 if (pte_isguardian(fault_pte)) {
283 ERROR("memory region over-running");
287 if (fault->kernel_vmfault && fault->kernel_access) {
288 __handle_kernel_page(fault);
293 vm_regions_t* vmr = &fault->mm->regions;
294 fault->vmr = region_get(vmr, fault->fault_va);
300 if (pte_isloaded(fault_pte)) {
301 __handle_conflict_pte(fault);
303 else if (anon_region(fault->vmr)) {
304 __handle_anon_region(fault);
306 else if (fault->vmr->mfile) {
307 __handle_named_region(fault);
310 // page not present, might be a chance to introduce swap file?
311 ERROR("WIP page fault route");
315 return !!(fault->resolve_type & RESOLVE_OK);
319 intr_routine_page_fault(const struct hart_state* hstate)
321 if (hstate->depth > 10) {
322 // Too many nested fault! we must messed up something
323 // XXX should we failed silently?
327 struct fault_context fault = { .hstate = hstate };
329 if (!__prepare_fault_context(&fault)) {
330 __fail_to_resolve(&fault);
333 fault_prealloc_page(&fault);
335 if (!__try_resolve_fault(&fault)) {
336 __fail_to_resolve(&fault);
339 if ((fault.resolve_type & NO_PREALLOC)) {
340 if (fault.prealloc) {
341 leaflet_return(fault.prealloc);