X-Git-Url: https://scm.lunaixsky.com/lunaix-os.git/blobdiff_plain/69777bdcab284335651a8002e2896f3862fa423d..e9fda657d5cb6f6619605766618aa78edd97f6a9:/lunaix-os/kernel/mm/fault.c diff --git a/lunaix-os/kernel/mm/fault.c b/lunaix-os/kernel/mm/fault.c index 6ba63a7..f40e765 100644 --- a/lunaix-os/kernel/mm/fault.c +++ b/lunaix-os/kernel/mm/fault.c @@ -7,9 +7,10 @@ #include #include #include -#include +#include +#include -#include +#include #include @@ -24,23 +25,25 @@ __gather_memaccess_info(struct fault_context* context) context->mm = vmspace(__current); - if (mnt < VMS_MOUNT_1) { + if (!vmnt_packed(ptep)) { refva = (ptr_t)ptep; goto done; } context->ptep_fault = true; - context->remote_fault = (mnt != VMS_SELF); + context->remote_fault = !active_vms(mnt); if (context->remote_fault && context->mm) { context->mm = context->mm->guest_mm; assert(context->mm); } + // unpack the ptep to reveal the one true va! + #if LnT_ENABLED(1) ptep = (pte_t*)page_addr(ptep_pfn(ptep)); mnt = ptep_vm_mnt(ptep); - if (mnt < VMS_MOUNT_1) { + if (!vmnt_packed(ptep)) { refva = (ptr_t)ptep; goto done; } @@ -49,7 +52,7 @@ __gather_memaccess_info(struct fault_context* context) #if LnT_ENABLED(2) ptep = (pte_t*)page_addr(ptep_pfn(ptep)); mnt = ptep_vm_mnt(ptep); - if (mnt < VMS_MOUNT_1) { + if (!vmnt_packed(ptep)) { refva = (ptr_t)ptep; goto done; } @@ -58,7 +61,7 @@ __gather_memaccess_info(struct fault_context* context) #if LnT_ENABLED(3) ptep = (pte_t*)page_addr(ptep_pfn(ptep)); mnt = ptep_vm_mnt(ptep); - if (mnt < VMS_MOUNT_1) { + if (!vmnt_packed(ptep)) { refva = (ptr_t)ptep; goto done; } @@ -67,22 +70,16 @@ __gather_memaccess_info(struct fault_context* context) ptep = (pte_t*)page_addr(ptep_pfn(ptep)); mnt = ptep_vm_mnt(ptep); - assert(mnt < VMS_MOUNT_1); + assert(!vmnt_packed(ptep)); refva = (ptr_t)ptep; done: context->fault_refva = refva; } -static bool +static void __prepare_fault_context(struct fault_context* fault) { - if (!__arch_prepare_fault_context(fault)) { - return false; - } - - __gather_memaccess_info(fault); - pte_t* fault_ptep = fault->fault_ptep; ptr_t fault_va = fault->fault_va; pte_t fault_pte = *fault_ptep; @@ -92,30 +89,42 @@ __prepare_fault_context(struct fault_context* fault) // for a ptep fault, the parent page tables should match the actual // accesser permission if (kernel_refaddr) { - ptep_alloc_hierarchy(fault_ptep, fault_va, KERNEL_DATA); + ptep_alloc_hierarchy(fault_ptep, fault_va, KERNEL_PGTAB); } else { - ptep_alloc_hierarchy(fault_ptep, fault_va, USER_DATA); + ptep_alloc_hierarchy(fault_ptep, fault_va, USER_PGTAB); } fault->fault_pte = fault_pte; + + if (fault->ptep_fault) { + // fault on intermediate levels. + fault_pte = pte_setprot(fault_pte, KERNEL_PGTAB); + } - if (fault->ptep_fault && !kernel_refaddr) { - fault->resolving = pte_setprot(fault_pte, USER_DATA); - } else { - fault->resolving = pte_setprot(fault_pte, KERNEL_DATA); + if (!kernel_refaddr) { + fault_pte = pte_mkuser(fault_pte); } + fault->resolving = pte_mkloaded(fault_pte); fault->kernel_vmfault = kernel_vmfault; - fault->kernel_access = kernel_context(fault->ictx); + fault->kernel_access = kernel_context(fault->hstate); +} - return true; +static inline void +__flush_staled_tlb(struct fault_context* fault, struct leaflet* leaflet) +{ + tlb_flush_mm_range(fault->mm, fault->fault_va, leaflet_nfold(leaflet)); } static void __handle_conflict_pte(struct fault_context* fault) { - pte_t pte = fault->fault_pte; - ptr_t fault_pa = pte_paddr(pte); + pte_t pte; + struct leaflet *fault_leaflet, *duped_leaflet; + + pte = fault->fault_pte; + fault_leaflet = pte_leaflet(pte); + if (!pte_allow_user(pte)) { return; } @@ -124,14 +133,18 @@ __handle_conflict_pte(struct fault_context* fault) if (writable_region(fault->vmr)) { // normal page fault, do COW - // TODO makes `vmm_dup_page` arch-independent - ptr_t pa = (ptr_t)vmm_dup_page(fault_pa); + duped_leaflet = dup_leaflet(fault_leaflet); + + pte = pte_mkwritable(pte); + pte = pte_mkuntouch(pte); + pte = pte_mkclean(pte); + + ptep_map_leaflet(fault->fault_ptep, pte, duped_leaflet); + __flush_staled_tlb(fault, duped_leaflet); - pmm_free_page(fault_pa); - pte_t new_pte = pte_setpaddr(pte, pa); - new_pte = pte_mkwritable(new_pte); + leaflet_return(fault_leaflet); - fault_resolved(fault, new_pte, NO_PREALLOC); + fault_resolved(fault, NO_PREALLOC); } return; @@ -142,47 +155,91 @@ static void __handle_anon_region(struct fault_context* fault) { pte_t pte = fault->resolving; - pte_attr_t prot = region_pteprot(fault->vmr); - pte = pte_setprot(pte, prot); + pte = region_tweakpte(fault->vmr, pte); - fault_resolved(fault, pte, 0); + // TODO Potentially we can get different order of leaflet here + struct leaflet* region_part = alloc_leaflet(0); + + ptep_map_leaflet(fault->fault_ptep, pte, region_part); + __flush_staled_tlb(fault, region_part); + + fault_resolved(fault, NO_PREALLOC); } static void __handle_named_region(struct fault_context* fault) { + int errno = 0; struct mm_region* vmr = fault->vmr; struct v_file* file = vmr->mfile; + struct v_file_ops * fops = file->ops; pte_t pte = fault->resolving; - ptr_t fault_va = va_align(fault->fault_va); + ptr_t fault_va = page_aligned(fault->fault_va); u32_t mseg_off = (fault_va - vmr->start); u32_t mfile_off = mseg_off + vmr->foff; + size_t mapped_len = vmr->flen; + + // TODO Potentially we can get different order of leaflet here + struct leaflet* region_part = alloc_leaflet(0); + + pte = region_tweakpte(vmr, pte); + ptep_map_leaflet(fault->fault_ptep, pte, region_part); + + if (mseg_off < mapped_len) { + mapped_len = MIN(mapped_len - mseg_off, PAGE_SIZE); + } + else { + mapped_len = 0; + } + + if (mapped_len == PAGE_SIZE) { + errno = fops->read_page(file->inode, (void*)fault_va, mfile_off); + } + else { + leaflet_wipe(region_part); + + if (mapped_len) { + errno = fops->read(file->inode, + (void*)fault_va, mapped_len, mfile_off); + } + } - int errno = file->ops->read_page(file->inode, (void*)fault_va, mfile_off); if (errno < 0) { ERROR("fail to populate page (%d)", errno); + + ptep_unmap_leaflet(fault->fault_ptep, region_part); + leaflet_return(region_part); + return; } - pte_attr_t prot = region_pteprot(vmr); - pte = pte_setprot(pte, prot); + __flush_staled_tlb(fault, region_part); - fault_resolved(fault, pte, 0); + fault_resolved(fault, NO_PREALLOC); } static void __handle_kernel_page(struct fault_context* fault) { // we must ensure only ptep fault is resolvable - if (fault->fault_va < VMS_MOUNT_1) { + if (!is_ptep(fault->fault_va)) { return; } - fault_resolved(fault, fault->resolving, 0); - pmm_set_attr(fault->prealloc_pa, PP_FGPERSIST); + struct leaflet* leaflet = fault->prealloc; + + pin_leaflet(leaflet); + leaflet_wipe(leaflet); + + pte_t pte = fault->resolving; + ptep_map_leaflet(fault->fault_ptep, pte, leaflet); + + tlb_flush_kernel_ranged(fault->fault_va, leaflet_nfold(leaflet)); + + fault_resolved(fault, 0); } @@ -195,24 +252,20 @@ fault_prealloc_page(struct fault_context* fault) pte_t pte; - pte = vmm_alloc_page(fault->fault_ptep, fault->resolving); - if (pte_isnull(pte)) { + struct leaflet* leaflet = alloc_leaflet(0); + if (!leaflet) { return; } - fault->resolving = pte; - fault->prealloc_pa = pte_paddr(fault->resolving); - - pmm_set_attr(fault->prealloc_pa, 0); - cpu_flush_page(fault->fault_va); + fault->prealloc = leaflet; } -static void noret -__fail_to_resolve(struct fault_context* fault) +void noret +fault_resolving_failed(struct fault_context* fault) { - if (fault->prealloc_pa) { - pmm_free_page(fault->prealloc_pa); + if (fault->prealloc) { + leaflet_return(fault->prealloc); } ERROR("(pid: %d) Segmentation fault on %p (%p,e=0x%x)", @@ -221,15 +274,16 @@ __fail_to_resolve(struct fault_context* fault) fault->fault_instn, fault->fault_data); - trace_printstack_isr(fault->ictx); if (fault->kernel_access) { // if a page fault from kernel is not resolvable, then // something must be went south FATAL("unresolvable page fault"); - unreachable; + failsafe_diagnostic(); } + trace_printstack_isr(fault->hstate); + thread_setsignal(current_thread, _SIGSEGV); schedule(); @@ -278,35 +332,24 @@ done: return !!(fault->resolve_type & RESOLVE_OK); } -void -intr_routine_page_fault(const isr_param* param) +bool +handle_page_fault(struct fault_context* fault) { - if (param->depth > 10) { - // Too many nested fault! we must messed up something - // XXX should we failed silently? - spin(); - } - - struct fault_context fault = { .ictx = param }; - - if (!__prepare_fault_context(&fault)) { - __fail_to_resolve(&fault); - } + __gather_memaccess_info(fault); + __prepare_fault_context(fault); - fault_prealloc_page(&fault); + fault_prealloc_page(fault); - if (!__try_resolve_fault(&fault)) { - __fail_to_resolve(&fault); + if (!__try_resolve_fault(fault)) { + return false; } - if ((fault.resolve_type & NO_PREALLOC)) { - if (fault.prealloc_pa) { - pmm_free_page(fault.prealloc_pa); + if ((fault->resolve_type & NO_PREALLOC)) { + if (fault->prealloc) { + leaflet_return(fault->prealloc); } } - set_pte(fault.fault_ptep, fault.resolving); - - cpu_flush_page(fault.fault_va); - cpu_flush_page((ptr_t)fault.fault_ptep); -} \ No newline at end of file + tlb_flush_kernel(fault->fault_va); + return true; +}