1 #include <libc/string.h>
2 #include <lunaix/mm/page.h>
3 #include <lunaix/mm/pmm.h>
4 #include <lunaix/mm/vmm.h>
5 #include <lunaix/assert.h>
13 // TODO: something here?
19 ptd_t* dir = pmm_alloc_page();
20 for (size_t i = 0; i < PG_MAX_ENTRIES; i++) {
24 // 递归映射,方便我们在软件层面进行查表地址转换
25 dir[PG_MAX_ENTRIES - 1] = NEW_L1_ENTRY(T_SELF_REF_PERM, dir);
31 __vmm_map_internal(uint32_t l1_inx, uint32_t l2_inx, uintptr_t pa, pt_attr attr, int forced) {
32 ptd_t* l1pt = (ptd_t*)L1_BASE_VADDR;
33 pt_t* l2pt = (pt_t*)L2_VADDR(l1_inx);
35 // See if attr make sense
39 uint8_t* new_l1pt_pa = pmm_alloc_page();
46 l1pt[l1_inx] = NEW_L1_ENTRY(attr, new_l1pt_pa);
47 memset((void*)L2_VADDR(l1_inx), 0, PG_SIZE);
50 if (!forced && l2pt[l2_inx]) {
54 l2pt[l2_inx] = NEW_L2_ENTRY(attr, pa);
60 vmm_map_page(void* va, void* pa, pt_attr tattr)
67 assert(((uintptr_t)va & 0xFFFU) == 0)
68 assert(((uintptr_t)pa & 0xFFFU) == 0)
70 uint32_t l1_index = L1_INDEX(va);
71 uint32_t l2_index = L2_INDEX(va);
72 ptd_t* l1pt = (ptd_t*)L1_BASE_VADDR;
74 // 在页表与页目录中找到一个可用的空位进行映射(位于va或其附近)
75 ptd_t l1pte = l1pt[l1_index];
76 pt_t* l2pt = (pt_t*)L2_VADDR(l1_index);
77 while (l1pte && l1_index < PG_MAX_ENTRIES) {
78 if (l2_index == PG_MAX_ENTRIES) {
81 l1pte = l1pt[l1_index];
82 l2pt = (pt_t*)L2_VADDR(l1_index);
84 // 页表有空位,只需要开辟一个新的 PTE (Level 2)
85 if (l2pt && !l2pt[l2_index]) {
86 l2pt[l2_index] = NEW_L2_ENTRY(tattr, pa);
87 return (void*)V_ADDR(l1_index, l2_index, PG_OFFSET(va));
93 if (l1_index > PG_MAX_ENTRIES) {
97 if (!__vmm_map_internal(l1_index, l2_index, pa, tattr, false)) {
101 return (void*)V_ADDR(l1_index, l2_index, PG_OFFSET(va));
105 vmm_fmap_page(void* va, void* pa, pt_attr tattr) {
110 assert(((uintptr_t)va & 0xFFFU) == 0)
111 assert(((uintptr_t)pa & 0xFFFU) == 0)
113 uint32_t l1_index = L1_INDEX(va);
114 uint32_t l2_index = L2_INDEX(va);
116 if (!__vmm_map_internal(l1_index, l2_index, pa, tattr, true)) {
122 return (void*)V_ADDR(l1_index, l2_index, PG_OFFSET(va));
126 vmm_alloc_page(void* vpn, pt_attr tattr)
128 void* pp = pmm_alloc_page();
129 void* result = vmm_map_page(vpn, pp, tattr);
137 vmm_alloc_pages(void* va, size_t sz, pt_attr tattr) {
138 assert((uintptr_t)va % PG_SIZE == 0)
139 assert(sz % PG_SIZE == 0)
142 for (size_t i = 0; i < (sz >> PG_SIZE_BITS); i++, va_ += PG_SIZE)
144 void* pp = pmm_alloc_page();
145 uint32_t l1_index = L1_INDEX(va_);
146 uint32_t l2_index = L2_INDEX(va_);
147 if (!pp || !__vmm_map_internal(l1_index, l2_index, pp, tattr, false)) {
148 // if one failed, release previous allocated pages.
150 for (size_t j = 0; j < i; j++, va_ += PG_SIZE)
163 vmm_unmap_page(void* va)
165 assert(((uintptr_t)va & 0xFFFU) == 0)
167 uint32_t l1_index = L1_INDEX(va);
168 uint32_t l2_index = L2_INDEX(va);
169 ptd_t* l1pt = (ptd_t*)L1_BASE_VADDR;
171 ptd_t l1pte = l1pt[l1_index];
174 pt_t* l2pt = (pt_t*)L2_VADDR(l1_index);
175 uint32_t l2pte = l2pt[l2_index];
176 if (IS_CACHED(l2pte)) {
177 pmm_free_page((void*)l2pte);
187 assert(((uintptr_t)va & 0xFFFU) == 0)
189 uint32_t l1_index = L1_INDEX(va);
190 uint32_t l2_index = L2_INDEX(va);
191 uint32_t po = PG_OFFSET(va);
192 ptd_t* l1pt = (ptd_t*)L1_BASE_VADDR;
194 ptd_t l1pte = l1pt[l1_index];
196 v_mapping mapping = { .flags = 0, .pa = 0, .pn = 0 };
198 pt_t l2pte = ((pt_t*)L2_VADDR(l1_index))[l2_index];
200 uintptr_t ppn = l2pte >> PG_SIZE_BITS;
201 mapping.flags = PG_ENTRY_FLAGS(l2pte);
202 mapping.pa = PG_ENTRY_ADDR(l2pte);
203 mapping.pn = mapping.pa >> PG_SIZE_BITS;
212 return vmm_lookup(va).pa;