1 #include <klibc/string.h>
2 #include <lunaix/mm/pmm.h>
3 #include <lunaix/mm/vmm.h>
4 #include <lunaix/spike.h>
5 #include <lunaix/syslog.h>
8 #include <sys/mm/mempart.h>
15 // XXX: something here?
22 (x86_page_table*)pmm_alloc_page(KERNEL_PID, PP_FGPERSIST);
23 for (size_t i = 0; i < PG_MAX_ENTRIES; i++) {
24 dir->entry[i] = PTE_NULL;
27 // 递归映射,方便我们在软件层面进行查表地址转换
28 dir->entry[PG_MAX_ENTRIES - 1] = NEW_L1_ENTRY(T_SELF_REF_PERM, dir);
34 vmm_set_mapping(ptr_t mnt, ptr_t va, ptr_t pa, pt_attr attr, int options)
36 assert((ptr_t)va % PG_SIZE == 0);
38 ptr_t l1_inx = L1_INDEX(va);
39 ptr_t l2_inx = L2_INDEX(va);
40 x86_page_table* l1pt = (x86_page_table*)(mnt | (1023 << 12));
41 x86_page_table* l2pt = (x86_page_table*)(mnt | (l1_inx << 12));
43 // See if attr make sense
46 x86_pte_t* l1pte = &l1pt->entry[l1_inx];
48 x86_page_table* new_l1pt_pa =
49 (x86_page_table*)pmm_alloc_page(KERNEL_PID, PP_FGPERSIST);
56 // This must be writable
57 *l1pte = NEW_L1_ENTRY(attr | PG_WRITE | PG_PRESENT, new_l1pt_pa);
59 // make sure our new l2 table is visible to CPU
60 cpu_flush_page((ptr_t)l2pt);
62 memset((void*)l2pt, 0, PG_SIZE);
64 if ((attr & PG_ALLOW_USER) && !(*l1pte & PG_ALLOW_USER)) {
65 *l1pte |= PG_ALLOW_USER;
68 x86_pte_t pte = l2pt->entry[l2_inx];
69 if (pte && (options & VMAP_IGNORE)) {
74 if (mnt == VMS_SELF) {
78 if ((options & VMAP_NOMAP)) {
82 l2pt->entry[l2_inx] = NEW_L2_ENTRY(attr, pa);
87 vmm_del_mapping(ptr_t mnt, ptr_t va)
89 assert(((ptr_t)va & 0xFFFU) == 0);
91 u32_t l1_index = L1_INDEX(va);
92 u32_t l2_index = L2_INDEX(va);
94 // prevent unmap of recursive mapping region
95 if (l1_index == 1023) {
99 x86_page_table* l1pt = (x86_page_table*)(mnt | (1023 << 12));
101 x86_pte_t l1pte = l1pt->entry[l1_index];
104 x86_page_table* l2pt = (x86_page_table*)(mnt | (l1_index << 12));
105 x86_pte_t l2pte = l2pt->entry[l2_index];
108 l2pt->entry[l2_index] = PTE_NULL;
110 return PG_ENTRY_ADDR(l2pte);
117 vmm_lookup(ptr_t va, v_mapping* mapping)
119 return vmm_lookupat(VMS_SELF, va, mapping);
123 vmm_lookupat(ptr_t mnt, ptr_t va, v_mapping* mapping)
125 u32_t l1_index = L1_INDEX(va);
126 u32_t l2_index = L2_INDEX(va);
128 x86_page_table* l1pt = (x86_page_table*)(mnt | 1023 << 12);
129 x86_pte_t l1pte = l1pt->entry[l1_index];
133 &((x86_page_table*)(mnt | (l1_index << 12)))->entry[l2_index];
136 mapping->flags = PG_ENTRY_FLAGS(*l2pte);
137 mapping->pa = PG_ENTRY_ADDR(*l2pte);
138 mapping->pn = mapping->pa >> PG_SIZE_BITS;
139 mapping->pte = l2pte;
151 u32_t l1_index = L1_INDEX(va);
152 u32_t l2_index = L2_INDEX(va);
154 x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
155 x86_pte_t l1pte = l1pt->entry[l1_index];
159 &((x86_page_table*)L2_VADDR(l1_index))->entry[l2_index];
162 return PG_ENTRY_ADDR(*l2pte) | ((ptr_t)va & 0xfff);
169 vmm_v2pat(ptr_t mnt, ptr_t va)
171 u32_t l1_index = L1_INDEX(va);
172 u32_t l2_index = L2_INDEX(va);
174 x86_page_table* l1pt = (x86_page_table*)(mnt | 1023 << 12);
175 x86_pte_t l1pte = l1pt->entry[l1_index];
179 &((x86_page_table*)(mnt | (l1_index << 12)))->entry[l2_index];
182 return PG_ENTRY_ADDR(*l2pte) | ((ptr_t)va & 0xfff);
189 vmm_mount_pd(ptr_t mnt, ptr_t pde)
191 x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
192 l1pt->entry[(mnt >> 22)] = NEW_L1_ENTRY(T_SELF_REF_PERM, pde);
198 vmm_unmount_pd(ptr_t mnt)
200 x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
201 l1pt->entry[(mnt >> 22)] = 0;
207 vmm_dup_page(pid_t pid, ptr_t pa)
209 ptr_t new_ppg = pmm_alloc_page(pid, 0);
210 vmm_set_mapping(VMS_SELF, PG_MOUNT_3, new_ppg, PG_PREM_RW, VMAP_NULL);
211 vmm_set_mapping(VMS_SELF, PG_MOUNT_4, pa, PG_PREM_RW, VMAP_NULL);
213 asm volatile("movl %1, %%edi\n"
215 "rep movsl\n" ::"c"(1024),
218 : "memory", "%edi", "%esi");
220 vmm_del_mapping(VMS_SELF, PG_MOUNT_3);
221 vmm_del_mapping(VMS_SELF, PG_MOUNT_4);