2 #include <klibc/string.h>
3 #include <lunaix/mm/vmm.h>
4 #include <lunaix/spike.h>
11 // XXX: something here?
17 x86_page_table* dir = (x86_page_table*)pmm_alloc_page(KERNEL_PID, PP_FGPERSIST);
18 for (size_t i = 0; i < PG_MAX_ENTRIES; i++) {
19 dir->entry[i] = PTE_NULL;
22 // 递归映射,方便我们在软件层面进行查表地址转换
23 dir->entry[PG_MAX_ENTRIES - 1] = NEW_L1_ENTRY(T_SELF_REF_PERM, dir);
29 __vmm_map_internal(pid_t pid,
36 x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
37 x86_page_table* l2pt = (x86_page_table*)L2_VADDR(l1_inx);
39 // See if attr make sense
42 if (!l1pt->entry[l1_inx]) {
43 x86_page_table* new_l1pt_pa = pmm_alloc_page(pid, PP_FGPERSIST);
50 // This must be writable
51 l1pt->entry[l1_inx] = NEW_L1_ENTRY(attr | PG_WRITE, new_l1pt_pa);
52 memset((void*)L2_VADDR(l1_inx), 0, PG_SIZE);
55 x86_pte_t l2pte = l2pt->entry[l2_inx];
62 if ((HAS_FLAGS(attr, PG_PRESENT))) {
63 // add one on reference count, regardless of existence.
64 pmm_ref_page(pid, pa);
67 l2pt->entry[l2_inx] = NEW_L2_ENTRY(attr, pa);
73 vmm_map_page(pid_t pid, void* va, void* pa, pt_attr tattr)
80 assert(((uintptr_t)va & 0xFFFU) == 0) assert(((uintptr_t)pa & 0xFFFU) == 0);
82 uint32_t l1_index = L1_INDEX(va);
83 uint32_t l2_index = L2_INDEX(va);
84 x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
86 // 在页表与页目录中找到一个可用的空位进行映射(位于va或其附近)
87 x86_pte_t l1pte = l1pt->entry[l1_index];
88 x86_page_table* l2pt = (x86_page_table*)L2_VADDR(l1_index);
89 while (l1pte && l1_index < PG_MAX_ENTRIES) {
90 if (l2_index == PG_MAX_ENTRIES) {
93 l1pte = l1pt->entry[l1_index];
94 l2pt = (x86_page_table*)L2_VADDR(l1_index);
96 // 页表有空位,只需要开辟一个新的 PTE (Level 2)
97 if (__vmm_map_internal(pid, l1_index, l2_index, pa, tattr, false)) {
98 return (void*)V_ADDR(l1_index, l2_index, PG_OFFSET(va));
104 if (l1_index > PG_MAX_ENTRIES) {
108 if (!__vmm_map_internal(pid, l1_index, l2_index, (uintptr_t)pa, tattr, false)) {
112 return (void*)V_ADDR(l1_index, l2_index, PG_OFFSET(va));
116 vmm_fmap_page(pid_t pid, void* va, void* pa, pt_attr tattr)
122 assert(((uintptr_t)va & 0xFFFU) == 0) assert(((uintptr_t)pa & 0xFFFU) == 0);
124 uint32_t l1_index = L1_INDEX(va);
125 uint32_t l2_index = L2_INDEX(va);
127 if (!__vmm_map_internal(pid, l1_index, l2_index, (uintptr_t)pa, tattr, true)) {
137 vmm_alloc_page(pid_t pid, void* vpn, void** pa, pt_attr tattr, pp_attr_t pattr)
139 void* pp = pmm_alloc_page(pid, pattr);
140 void* result = vmm_map_page(pid, vpn, pp, tattr);
142 pmm_free_page(pp, pid);
149 vmm_alloc_pages(pid_t pid, void* va, size_t sz, pt_attr tattr, pp_attr_t pattr)
151 assert((uintptr_t)va % PG_SIZE == 0) assert(sz % PG_SIZE == 0);
154 for (size_t i = 0; i < (sz >> PG_SIZE_BITS); i++, va_ += PG_SIZE) {
155 void* pp = pmm_alloc_page(pid, pattr);
156 uint32_t l1_index = L1_INDEX(va_);
157 uint32_t l2_index = L2_INDEX(va_);
158 if (!pp || !__vmm_map_internal(
160 l1_index, l2_index, (uintptr_t)pp, tattr, false)) {
161 // if one failed, release previous allocated pages.
163 for (size_t j = 0; j < i; j++, va_ += PG_SIZE) {
164 vmm_unmap_page(pid, va_);
175 vmm_set_mapping(pid_t pid, void* va, void* pa, pt_attr attr) {
176 assert(((uintptr_t)va & 0xFFFU) == 0);
178 uint32_t l1_index = L1_INDEX(va);
179 uint32_t l2_index = L2_INDEX(va);
181 // prevent map of recursive mapping region
182 if (l1_index == 1023) {
186 __vmm_map_internal(pid, l1_index, l2_index, (uintptr_t)pa, attr, false);
191 __vmm_unmap_internal(pid_t pid, void* va, int free_ppage) {
192 assert(((uintptr_t)va & 0xFFFU) == 0);
194 uint32_t l1_index = L1_INDEX(va);
195 uint32_t l2_index = L2_INDEX(va);
197 // prevent unmap of recursive mapping region
198 if (l1_index == 1023) {
202 x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
204 x86_pte_t l1pte = l1pt->entry[l1_index];
207 x86_page_table* l2pt = (x86_page_table*)L2_VADDR(l1_index);
208 x86_pte_t l2pte = l2pt->entry[l2_index];
209 if (IS_CACHED(l2pte) && free_ppage) {
210 pmm_free_page(pid, (void*)l2pte);
213 l2pt->entry[l2_index] = PTE_NULL;
218 vmm_unset_mapping(void* va) {
219 __vmm_unmap_internal(0, va, false);
223 vmm_unmap_page(pid_t pid, void* va)
225 __vmm_unmap_internal(pid, va, true);
231 assert(((uintptr_t)va & 0xFFFU) == 0);
233 uint32_t l1_index = L1_INDEX(va);
234 uint32_t l2_index = L2_INDEX(va);
236 x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
237 x86_pte_t l1pte = l1pt->entry[l1_index];
239 v_mapping mapping = { .flags = 0, .pa = 0, .pn = 0 };
242 &((x86_page_table*)L2_VADDR(l1_index))->entry[l2_index];
244 mapping.flags = PG_ENTRY_FLAGS(*l2pte);
245 mapping.pa = PG_ENTRY_ADDR(*l2pte);
246 mapping.pn = mapping.pa >> PG_SIZE_BITS;
257 return (void*)vmm_lookup(va).pa;