1 #include <lunaix/mm/fault.h>
2 #include <lunaix/mm/pmm.h>
3 #include <lunaix/mm/region.h>
4 #include <lunaix/mm/vmm.h>
5 #include <lunaix/sched.h>
6 #include <lunaix/signal.h>
7 #include <lunaix/status.h>
8 #include <lunaix/syslog.h>
9 #include <lunaix/trace.h>
10 #include <lunaix/hart_state.h>
11 #include <lunaix/failsafe.h>
13 #include <asm/mm_defs.h>
15 #include <klibc/string.h>
20 __gather_memaccess_info(struct fault_context* context)
22 pte_t* ptep = (pte_t*)context->fault_va;
23 ptr_t mnt = ptep_vm_mnt(ptep);
26 context->mm = vmspace(__current);
28 if (!vmnt_packed(ptep)) {
33 context->ptep_fault = true;
34 context->remote_fault = !active_vms(mnt);
36 if (context->remote_fault && context->mm) {
37 context->mm = context->mm->guest_mm;
41 // unpack the ptep to reveal the one true va!
44 ptep = (pte_t*)page_addr(ptep_pfn(ptep));
45 mnt = ptep_vm_mnt(ptep);
46 if (!vmnt_packed(ptep)) {
53 ptep = (pte_t*)page_addr(ptep_pfn(ptep));
54 mnt = ptep_vm_mnt(ptep);
55 if (!vmnt_packed(ptep)) {
62 ptep = (pte_t*)page_addr(ptep_pfn(ptep));
63 mnt = ptep_vm_mnt(ptep);
64 if (!vmnt_packed(ptep)) {
70 ptep = (pte_t*)page_addr(ptep_pfn(ptep));
71 mnt = ptep_vm_mnt(ptep);
73 assert(!vmnt_packed(ptep));
77 context->fault_refva = refva;
81 __prepare_fault_context(struct fault_context* fault)
83 pte_t* fault_ptep = fault->fault_ptep;
84 ptr_t fault_va = fault->fault_va;
85 pte_t fault_pte = *fault_ptep;
86 bool kernel_vmfault = kernel_addr(fault_va);
87 bool kernel_refaddr = kernel_addr(fault->fault_refva);
89 // for a ptep fault, the parent page tables should match the actual
90 // accesser permission
92 ptep_alloc_hierarchy(fault_ptep, fault_va, KERNEL_PGTAB);
94 ptep_alloc_hierarchy(fault_ptep, fault_va, USER_PGTAB);
97 fault->fault_pte = fault_pte;
99 if (fault->ptep_fault) {
100 // fault on intermediate levels.
101 fault_pte = pte_setprot(fault_pte, KERNEL_PGTAB);
104 if (!kernel_refaddr) {
105 fault_pte = pte_mkuser(fault_pte);
108 fault->resolving = pte_mkloaded(fault_pte);
109 fault->kernel_vmfault = kernel_vmfault;
110 fault->kernel_access = kernel_context(fault->hstate);
114 __flush_staled_tlb(struct fault_context* fault, struct leaflet* leaflet)
116 tlb_flush_mm_range(fault->mm, fault->fault_va, leaflet_nfold(leaflet));
120 __handle_conflict_pte(struct fault_context* fault)
123 struct leaflet *fault_leaflet, *duped_leaflet;
125 pte = fault->fault_pte;
126 fault_leaflet = pte_leaflet(pte);
128 if (!pte_allow_user(pte)) {
132 assert(pte_iswprotect(pte));
134 if (writable_region(fault->vmr)) {
135 // normal page fault, do COW
136 duped_leaflet = dup_leaflet(fault_leaflet);
138 pte = pte_mkwritable(pte);
139 pte = pte_mkuntouch(pte);
140 pte = pte_mkclean(pte);
142 ptep_map_leaflet(fault->fault_ptep, pte, duped_leaflet);
143 __flush_staled_tlb(fault, duped_leaflet);
145 leaflet_return(fault_leaflet);
147 fault_resolved(fault, NO_PREALLOC);
155 __handle_anon_region(struct fault_context* fault)
157 pte_t pte = fault->resolving;
158 pte = region_tweakpte(fault->vmr, pte);
160 // TODO Potentially we can get different order of leaflet here
161 struct leaflet* region_part = alloc_leaflet(0);
163 ptep_map_leaflet(fault->fault_ptep, pte, region_part);
164 __flush_staled_tlb(fault, region_part);
166 fault_resolved(fault, NO_PREALLOC);
171 __handle_named_region(struct fault_context* fault)
174 struct mm_region* vmr = fault->vmr;
175 struct v_file* file = vmr->mfile;
176 struct v_file_ops * fops = file->ops;
178 pte_t pte = fault->resolving;
179 ptr_t fault_va = page_aligned(fault->fault_va);
181 u32_t mseg_off = (fault_va - vmr->start);
182 u32_t mfile_off = mseg_off + vmr->foff;
183 size_t mapped_len = vmr->flen;
185 // TODO Potentially we can get different order of leaflet here
186 struct leaflet* region_part = alloc_leaflet(0);
188 pte = region_tweakpte(vmr, pte);
189 ptep_map_leaflet(fault->fault_ptep, pte, region_part);
191 if (mseg_off < mapped_len) {
192 mapped_len = MIN(mapped_len - mseg_off, PAGE_SIZE);
198 if (mapped_len == PAGE_SIZE) {
199 errno = fops->read_page(file->inode, (void*)fault_va, mfile_off);
202 leaflet_wipe(region_part);
205 errno = fops->read(file->inode,
206 (void*)fault_va, mapped_len, mfile_off);
211 ERROR("fail to populate page (%d)", errno);
213 ptep_unmap_leaflet(fault->fault_ptep, region_part);
214 leaflet_return(region_part);
219 __flush_staled_tlb(fault, region_part);
221 fault_resolved(fault, NO_PREALLOC);
225 __handle_kernel_page(struct fault_context* fault)
227 // we must ensure only ptep fault is resolvable
228 if (!is_ptep(fault->fault_va)) {
232 struct leaflet* leaflet = fault->prealloc;
234 pin_leaflet(leaflet);
235 leaflet_wipe(leaflet);
237 pte_t pte = fault->resolving;
238 ptep_map_leaflet(fault->fault_ptep, pte, leaflet);
240 tlb_flush_kernel_ranged(fault->fault_va, leaflet_nfold(leaflet));
242 fault_resolved(fault, 0);
247 fault_prealloc_page(struct fault_context* fault)
249 if (!pte_isnull(fault->fault_pte)) {
255 struct leaflet* leaflet = alloc_leaflet(0);
260 fault->prealloc = leaflet;
265 fault_resolving_failed(struct fault_context* fault)
267 if (fault->prealloc) {
268 leaflet_return(fault->prealloc);
271 ERROR("(pid: %d) Segmentation fault on %p (%p,e=0x%x)",
278 if (fault->kernel_access) {
279 // if a page fault from kernel is not resolvable, then
280 // something must be went south
281 FATAL("unresolvable page fault");
282 failsafe_diagnostic();
285 trace_printstack_isr(fault->hstate);
287 thread_setsignal(current_thread, _SIGSEGV);
290 fail("Unexpected return from segfault");
296 __try_resolve_fault(struct fault_context* fault)
298 pte_t fault_pte = fault->fault_pte;
299 if (pte_isguardian(fault_pte)) {
300 ERROR("memory region over-running");
304 if (fault->kernel_vmfault && fault->kernel_access) {
305 __handle_kernel_page(fault);
310 vm_regions_t* vmr = &fault->mm->regions;
311 fault->vmr = region_get(vmr, fault->fault_va);
317 if (pte_isloaded(fault_pte)) {
318 __handle_conflict_pte(fault);
320 else if (anon_region(fault->vmr)) {
321 __handle_anon_region(fault);
323 else if (fault->vmr->mfile) {
324 __handle_named_region(fault);
327 // page not present, might be a chance to introduce swap file?
328 ERROR("WIP page fault route");
332 return !!(fault->resolve_type & RESOLVE_OK);
336 handle_page_fault(struct fault_context* fault)
338 __gather_memaccess_info(fault);
339 __prepare_fault_context(fault);
341 fault_prealloc_page(fault);
343 if (!__try_resolve_fault(fault)) {
347 if ((fault->resolve_type & NO_PREALLOC)) {
348 if (fault->prealloc) {
349 leaflet_return(fault->prealloc);
353 tlb_flush_kernel(fault->fault_va);