#include <lunaix/hart_state.h>
#include <lunaix/failsafe.h>
-#include <sys/mm/mm_defs.h>
+#include <asm/mm_defs.h>
#include <klibc/string.h>
context->mm = vmspace(__current);
- if (mnt < VMS_MOUNT_1) {
+ if (!vmnt_packed(ptep)) {
refva = (ptr_t)ptep;
goto done;
}
context->ptep_fault = true;
- context->remote_fault = (mnt != VMS_SELF);
+ context->remote_fault = !active_vms(mnt);
if (context->remote_fault && context->mm) {
context->mm = context->mm->guest_mm;
assert(context->mm);
}
+ // unpack the ptep to reveal the one true va!
+
#if LnT_ENABLED(1)
ptep = (pte_t*)page_addr(ptep_pfn(ptep));
mnt = ptep_vm_mnt(ptep);
- if (mnt < VMS_MOUNT_1) {
+ if (!vmnt_packed(ptep)) {
refva = (ptr_t)ptep;
goto done;
}
#if LnT_ENABLED(2)
ptep = (pte_t*)page_addr(ptep_pfn(ptep));
mnt = ptep_vm_mnt(ptep);
- if (mnt < VMS_MOUNT_1) {
+ if (!vmnt_packed(ptep)) {
refva = (ptr_t)ptep;
goto done;
}
#if LnT_ENABLED(3)
ptep = (pte_t*)page_addr(ptep_pfn(ptep));
mnt = ptep_vm_mnt(ptep);
- if (mnt < VMS_MOUNT_1) {
+ if (!vmnt_packed(ptep)) {
refva = (ptr_t)ptep;
goto done;
}
ptep = (pte_t*)page_addr(ptep_pfn(ptep));
mnt = ptep_vm_mnt(ptep);
- assert(mnt < VMS_MOUNT_1);
+ assert(!vmnt_packed(ptep));
refva = (ptr_t)ptep;
done:
context->fault_refva = refva;
}
-static bool
+static void
__prepare_fault_context(struct fault_context* fault)
{
- if (!__arch_prepare_fault_context(fault)) {
- return false;
- }
-
- __gather_memaccess_info(fault);
-
pte_t* fault_ptep = fault->fault_ptep;
ptr_t fault_va = fault->fault_va;
pte_t fault_pte = *fault_ptep;
// for a ptep fault, the parent page tables should match the actual
// accesser permission
if (kernel_refaddr) {
- ptep_alloc_hierarchy(fault_ptep, fault_va, KERNEL_DATA);
+ ptep_alloc_hierarchy(fault_ptep, fault_va, KERNEL_PGTAB);
} else {
- ptep_alloc_hierarchy(fault_ptep, fault_va, USER_DATA);
+ ptep_alloc_hierarchy(fault_ptep, fault_va, USER_PGTAB);
}
fault->fault_pte = fault_pte;
+
+ if (fault->ptep_fault) {
+ // fault on intermediate levels.
+ fault_pte = pte_setprot(fault_pte, KERNEL_PGTAB);
+ }
- if (fault->ptep_fault && !kernel_refaddr) {
- fault->resolving = pte_setprot(fault_pte, USER_DATA);
- } else {
- fault->resolving = pte_setprot(fault_pte, KERNEL_DATA);
+ if (!kernel_refaddr) {
+ fault_pte = pte_mkuser(fault_pte);
}
- fault->resolving = pte_mkloaded(fault->resolving);
+ fault->resolving = pte_mkloaded(fault_pte);
fault->kernel_vmfault = kernel_vmfault;
fault->kernel_access = kernel_context(fault->hstate);
-
- return true;
}
static inline void
__handle_anon_region(struct fault_context* fault)
{
pte_t pte = fault->resolving;
- pte_attr_t prot = region_pteprot(fault->vmr);
- pte = pte_setprot(pte, prot);
+ pte = region_tweakpte(fault->vmr, pte);
// TODO Potentially we can get different order of leaflet here
struct leaflet* region_part = alloc_leaflet(0);
static void
__handle_named_region(struct fault_context* fault)
{
+ int errno = 0;
struct mm_region* vmr = fault->vmr;
struct v_file* file = vmr->mfile;
+ struct v_file_ops * fops = file->ops;
pte_t pte = fault->resolving;
ptr_t fault_va = page_aligned(fault->fault_va);
u32_t mseg_off = (fault_va - vmr->start);
u32_t mfile_off = mseg_off + vmr->foff;
+ size_t mapped_len = vmr->flen;
// TODO Potentially we can get different order of leaflet here
struct leaflet* region_part = alloc_leaflet(0);
- pte = pte_setprot(pte, region_pteprot(vmr));
+ pte = region_tweakpte(vmr, pte);
ptep_map_leaflet(fault->fault_ptep, pte, region_part);
- int errno = file->ops->read_page(file->inode, (void*)fault_va, mfile_off);
+ if (mseg_off < mapped_len) {
+ mapped_len = MIN(mapped_len - mseg_off, PAGE_SIZE);
+ }
+ else {
+ mapped_len = 0;
+ }
+
+ if (mapped_len == PAGE_SIZE) {
+ errno = fops->read_page(file->inode, (void*)fault_va, mfile_off);
+ }
+ else {
+ leaflet_wipe(region_part);
+
+ if (mapped_len) {
+ errno = fops->read(file->inode,
+ (void*)fault_va, mapped_len, mfile_off);
+ }
+ }
+
if (errno < 0) {
ERROR("fail to populate page (%d)", errno);
__handle_kernel_page(struct fault_context* fault)
{
// we must ensure only ptep fault is resolvable
- if (fault->fault_va < VMS_MOUNT_1) {
+ if (!is_ptep(fault->fault_va)) {
return;
}
}
-static void noret
-__fail_to_resolve(struct fault_context* fault)
+void noret
+fault_resolving_failed(struct fault_context* fault)
{
if (fault->prealloc) {
leaflet_return(fault->prealloc);
return !!(fault->resolve_type & RESOLVE_OK);
}
-void
-intr_routine_page_fault(const struct hart_state* hstate)
+bool
+handle_page_fault(struct fault_context* fault)
{
- if (hstate->depth > 10) {
- // Too many nested fault! we must messed up something
- // XXX should we failed silently?
- spin();
- }
-
- struct fault_context fault = { .hstate = hstate };
-
- if (!__prepare_fault_context(&fault)) {
- __fail_to_resolve(&fault);
- }
+ __gather_memaccess_info(fault);
+ __prepare_fault_context(fault);
- fault_prealloc_page(&fault);
+ fault_prealloc_page(fault);
- if (!__try_resolve_fault(&fault)) {
- __fail_to_resolve(&fault);
+ if (!__try_resolve_fault(fault)) {
+ return false;
}
- if ((fault.resolve_type & NO_PREALLOC)) {
- if (fault.prealloc) {
- leaflet_return(fault.prealloc);
+ if ((fault->resolve_type & NO_PREALLOC)) {
+ if (fault->prealloc) {
+ leaflet_return(fault->prealloc);
}
}
-}
\ No newline at end of file
+
+ tlb_flush_kernel(fault->fault_va);
+ return true;
+}