3 #include <lunaix/mm/pagetable.h>
4 #include <lunaix/compiler.h>
6 #include <sys/boot/bstage.h>
7 #include <sys/mm/mm_defs.h>
11 bridge_farsym(__kexec_start);
12 bridge_farsym(__kexec_end);
13 bridge_farsym(__kexec_text_start);
14 bridge_farsym(__kexec_text_end);
16 // define the initial page table layout
19 static struct kernel_map kpt __section(".kpg");
20 export_symbol(debug, boot, kpt);
23 pte_t l0t[_PAGE_LEVEL_SIZE]; // root table
24 pte_t l1t_rsvd[_PAGE_LEVEL_SIZE]; // 0~4G reservation
27 pte_t _lft[_PAGE_LEVEL_SIZE];
33 struct kernel_map* kpt_pa;
38 alloc_rsvd_page(struct allocator* _alloc)
40 if (_alloc->pt_usage >= KEXEC_RSVD) {
44 return __ptr(&_alloc->kpt_pa->krsvd[_alloc->pt_usage++]);
47 static pte_t* boot_text
48 prealloc_pt(struct allocator* _allc, ptr_t va,
49 pte_attr_t prot, size_t to_gran)
53 size_t gran = L0T_SIZE;
55 ptep = (pte_t*)&_allc->kpt_pa->l0t[0];
57 for (int i = 0; i < _PTW_LEVEL && gran > to_gran; i++)
59 lvl_i = va_level_index(va, gran);
63 gran = gran >> _PAGE_LEVEL_SHIFT;
65 if (pte_isnull(pte)) {
66 pte = mkpte(alloc_rsvd_page(_allc), KERNEL_DATA);
67 if (to_gran == gran) {
68 pte = pte_setprot(pte, prot);
73 ptep = (pte_t*) pte_paddr(pte);
82 struct kernel_map* kpt_pa = (struct kernel_map*)to_kphysical(&kpt);
84 pte_t* boot_l0tep = (pte_t*) kpt_pa;
87 // identity map the first 4G for legacy compatibility
88 pte_t* l1_rsvd = (pte_t*) kpt_pa->l1t_rsvd;
89 pte_t id_map = pte_mkhuge(mkpte_prot(KERNEL_DATA));
91 set_pte(boot_l0tep, mkpte((ptr_t)l1_rsvd, KERNEL_DATA));
93 for (int i = 0; i < 4; i++, l1_rsvd++)
95 id_map = pte_setpaddr(id_map, (ptr_t)i << 30);
96 set_pte(l1_rsvd, id_map);
99 // Remap the kernel to -2GiB
102 unsigned int lvl_i = 0;
103 struct allocator alloc = {
108 prealloc_pt(&alloc, VMAP, KERNEL_DATA, L1T_SIZE);
110 prealloc_pt(&alloc, PG_MOUNT_1, KERNEL_DATA, LFT_SIZE);
113 ptr_t kstart = page_aligned(__far(__kexec_text_start));
116 size_t gran = L3T_SIZE;
118 size_t gran = L2T_SIZE;
121 prealloc_pt(&alloc, PMAP, KERNEL_DATA, gran);
122 klptep = prealloc_pt(&alloc, kstart, KERNEL_DATA, gran);
123 klptep += va_level_index(kstart, gran);
125 pte = mkpte(0, KERNEL_DATA);
126 for (int i = alloc.pt_usage; i < KEXEC_RSVD; i++)
128 pte = pte_setpaddr(pte, (ptr_t)&kpt_pa->krsvd[i]);
129 set_pte(klptep++, pte);
132 // this is the first LFT we hooked on.
133 // all these LFT are contig in physical address
134 klptep = (pte_t*) &kpt_pa->krsvd[alloc.pt_usage];
136 // Ensure the size of kernel is within the reservation
137 int remain = KEXEC_RSVD - table_usage;
138 pfn_t kimg_pagecount =
139 pfn(__far(__kexec_end) - __far(__kexec_start));
140 if (kimg_pagecount > remain * _PAGE_LEVEL_SIZE) {
141 // ERROR: require more pages
142 // here should do something else other than head into blocking
147 pfn_t ktext_end = pfn(to_kphysical(__far(__kexec_text_end)));
148 pfn_t i = pfn(to_kphysical(kstart));
151 pte = pte_setprot(pte, KERNEL_EXEC);
152 for (; i < ktext_end; i++) {
153 pte = pte_setpaddr(pte, page_addr(i));
154 set_pte(klptep, pte);
159 pfn_t kimg_end = pfn(to_kphysical(__far(__kexec_end)));
161 // all remaining kernel sections
162 pte = pte_setprot(pte, KERNEL_DATA);
163 for (; i < kimg_end; i++) {
164 pte = pte_setpaddr(pte, page_addr(i));
165 set_pte(klptep, pte);
170 // Build up self-reference
171 lvl_i = va_level_index(VMS_SELF, L0T_SIZE);
172 pte = mkpte_root(__ptr(kpt_pa), KERNEL_DATA);
173 set_pte(boot_l0tep + lvl_i, pte);
180 ptr_t kmap_pa = to_kphysical(&kpt);
181 for (size_t i = 0; i < sizeof(kpt); i++) {
182 ((u8_t*)kmap_pa)[i] = 0;