#include <lunaix/mm/pmm.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/spike.h>
+#include <lunaix/syslog.h>
+
+LOG_MODULE("VMM")
void
vmm_init()
}
// This must be writable
- l1pt->entry[l1_inx] = NEW_L1_ENTRY(attr | PG_WRITE, new_l1pt_pa);
+ l1pt->entry[l1_inx] =
+ NEW_L1_ENTRY(attr | PG_WRITE | PG_PRESENT, new_l1pt_pa);
+
+ // make sure our new l2 table is visible to CPU
+ cpu_invplg(l2pt);
+
memset((void*)l2pt, 0, PG_SIZE);
} else {
x86_pte_t pte = l2pt->entry[l2_inx];
}
}
- if (mnt == PD_REFERENCED) {
+ if (mnt == VMS_SELF) {
cpu_invplg(va);
}
+ if ((options & VMAP_NOMAP)) {
+ return 1;
+ }
+
l2pt->entry[l2_inx] = NEW_L2_ENTRY(attr, pa);
return 1;
}
{
assert(((uintptr_t)va & 0xFFFU) == 0);
- uint32_t l1_index = L1_INDEX(va);
- uint32_t l2_index = L2_INDEX(va);
+ u32_t l1_index = L1_INDEX(va);
+ u32_t l2_index = L2_INDEX(va);
// prevent unmap of recursive mapping region
if (l1_index == 1023) {
int
vmm_lookup(uintptr_t va, v_mapping* mapping)
{
- uint32_t l1_index = L1_INDEX(va);
- uint32_t l2_index = L2_INDEX(va);
+ return vmm_lookupat(VMS_SELF, va, mapping);
+}
- x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
+int
+vmm_lookupat(ptr_t mnt, uintptr_t va, v_mapping* mapping)
+{
+ u32_t l1_index = L1_INDEX(va);
+ u32_t l2_index = L2_INDEX(va);
+
+ x86_page_table* l1pt = (x86_page_table*)(mnt | 1023 << 12);
x86_pte_t l1pte = l1pt->entry[l1_index];
if (l1pte) {
x86_pte_t* l2pte =
- &((x86_page_table*)L2_VADDR(l1_index))->entry[l2_index];
+ &((x86_page_table*)(mnt | (l1_index << 12)))->entry[l2_index];
if (l2pte) {
mapping->flags = PG_ENTRY_FLAGS(*l2pte);
mapping->pa = PG_ENTRY_ADDR(*l2pte);
return 0;
}
+void*
+vmm_v2p(void* va)
+{
+ u32_t l1_index = L1_INDEX(va);
+ u32_t l2_index = L2_INDEX(va);
+
+ x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
+ x86_pte_t l1pte = l1pt->entry[l1_index];
+
+ if (l1pte) {
+ x86_pte_t* l2pte =
+ &((x86_page_table*)L2_VADDR(l1_index))->entry[l2_index];
+ if (l2pte) {
+ return PG_ENTRY_ADDR(*l2pte) | ((uintptr_t)va & 0xfff);
+ }
+ }
+ return 0;
+}
+
+void*
+vmm_v2pat(ptr_t mnt, void* va)
+{
+ u32_t l1_index = L1_INDEX(va);
+ u32_t l2_index = L2_INDEX(va);
+
+ x86_page_table* l1pt = (x86_page_table*)(mnt | 1023 << 12);
+ x86_pte_t l1pte = l1pt->entry[l1_index];
+
+ if (l1pte) {
+ x86_pte_t* l2pte =
+ &((x86_page_table*)(mnt | (l1_index << 12)))->entry[l2_index];
+ if (l2pte) {
+ return PG_ENTRY_ADDR(*l2pte) | ((uintptr_t)va & 0xfff);
+ }
+ }
+ return 0;
+}
+
void*
vmm_mount_pd(uintptr_t mnt, void* pde)
{