X-Git-Url: https://scm.lunaixsky.com/lunaix-os.git/blobdiff_plain/28c176b668c841a3b7fb093faccf0efa39257603..0366f081ce7414c7e6a7cdffffe236cf0cdee400:/lunaix-os/kernel/mm/fault.c diff --git a/lunaix-os/kernel/mm/fault.c b/lunaix-os/kernel/mm/fault.c index 5bbf6ee..f40e765 100644 --- a/lunaix-os/kernel/mm/fault.c +++ b/lunaix-os/kernel/mm/fault.c @@ -10,7 +10,7 @@ #include #include -#include +#include #include @@ -77,15 +77,9 @@ done: context->fault_refva = refva; } -static bool +static void __prepare_fault_context(struct fault_context* fault) { - if (!__arch_prepare_fault_context(fault)) { - return false; - } - - __gather_memaccess_info(fault); - pte_t* fault_ptep = fault->fault_ptep; ptr_t fault_va = fault->fault_va; pte_t fault_pte = *fault_ptep; @@ -95,24 +89,25 @@ __prepare_fault_context(struct fault_context* fault) // for a ptep fault, the parent page tables should match the actual // accesser permission if (kernel_refaddr) { - ptep_alloc_hierarchy(fault_ptep, fault_va, KERNEL_DATA); + ptep_alloc_hierarchy(fault_ptep, fault_va, KERNEL_PGTAB); } else { - ptep_alloc_hierarchy(fault_ptep, fault_va, USER_DATA); + ptep_alloc_hierarchy(fault_ptep, fault_va, USER_PGTAB); } fault->fault_pte = fault_pte; + + if (fault->ptep_fault) { + // fault on intermediate levels. + fault_pte = pte_setprot(fault_pte, KERNEL_PGTAB); + } - if (fault->ptep_fault && !kernel_refaddr) { - fault->resolving = pte_setprot(fault_pte, USER_DATA); - } else { - fault->resolving = pte_setprot(fault_pte, KERNEL_DATA); + if (!kernel_refaddr) { + fault_pte = pte_mkuser(fault_pte); } - fault->resolving = pte_mkloaded(fault->resolving); + fault->resolving = pte_mkloaded(fault_pte); fault->kernel_vmfault = kernel_vmfault; fault->kernel_access = kernel_context(fault->hstate); - - return true; } static inline void @@ -160,8 +155,7 @@ static void __handle_anon_region(struct fault_context* fault) { pte_t pte = fault->resolving; - pte_attr_t prot = region_pteprot(fault->vmr); - pte = pte_setprot(pte, prot); + pte = region_tweakpte(fault->vmr, pte); // TODO Potentially we can get different order of leaflet here struct leaflet* region_part = alloc_leaflet(0); @@ -191,7 +185,7 @@ __handle_named_region(struct fault_context* fault) // TODO Potentially we can get different order of leaflet here struct leaflet* region_part = alloc_leaflet(0); - pte = pte_setprot(pte, region_pteprot(vmr)); + pte = region_tweakpte(vmr, pte); ptep_map_leaflet(fault->fault_ptep, pte, region_part); if (mseg_off < mapped_len) { @@ -267,8 +261,8 @@ fault_prealloc_page(struct fault_context* fault) } -static void noret -__fail_to_resolve(struct fault_context* fault) +void noret +fault_resolving_failed(struct fault_context* fault) { if (fault->prealloc) { leaflet_return(fault->prealloc); @@ -338,32 +332,24 @@ done: return !!(fault->resolve_type & RESOLVE_OK); } -void -intr_routine_page_fault(const struct hart_state* hstate) +bool +handle_page_fault(struct fault_context* fault) { - if (hstate->depth > 10) { - // Too many nested fault! we must messed up something - // XXX should we failed silently? - spin(); - } - - struct fault_context fault = { .hstate = hstate }; - - if (!__prepare_fault_context(&fault)) { - __fail_to_resolve(&fault); - } + __gather_memaccess_info(fault); + __prepare_fault_context(fault); - fault_prealloc_page(&fault); + fault_prealloc_page(fault); - if (!__try_resolve_fault(&fault)) { - __fail_to_resolve(&fault); + if (!__try_resolve_fault(fault)) { + return false; } - if ((fault.resolve_type & NO_PREALLOC)) { - if (fault.prealloc) { - leaflet_return(fault.prealloc); + if ((fault->resolve_type & NO_PREALLOC)) { + if (fault->prealloc) { + leaflet_return(fault->prealloc); } } - tlb_flush_kernel(fault.fault_va); -} \ No newline at end of file + tlb_flush_kernel(fault->fault_va); + return true; +}