3 #include <lunaix/mm/pagetable.h>
4 #include <lunaix/compiler.h>
5 #include <lunaix/sections.h>
7 #include <sys/boot/bstage.h>
8 #include <asm/mm_defs.h>
12 #define ksection_maps autogen_name(ksecmap)
16 extern_autogen(ksecmap);
18 bridge_farsym(__kexec_text_start);
19 bridge_farsym(ksection_maps);
21 // define the initial page table layout
24 static struct kernel_map kpt __section(".kpg");
25 export_symbol(debug, boot, kpt);
29 pte_t l0t[_PAGE_LEVEL_SIZE]; // root table
30 pte_t l1t_rsvd[_PAGE_LEVEL_SIZE]; // 0~4G reservation
33 pte_t _lft[_PAGE_LEVEL_SIZE];
39 struct kernel_map* kpt_pa;
44 alloc_rsvd_page(struct allocator* _alloc)
46 if (_alloc->pt_usage >= KEXEC_RSVD) {
50 return __ptr(&_alloc->kpt_pa->krsvd[_alloc->pt_usage++]);
53 static pte_t* boot_text
54 prealloc_pt(struct allocator* _allc, ptr_t va,
55 pte_attr_t prot, size_t to_gran)
59 size_t gran = L0T_SIZE;
61 ptep = (pte_t*)&_allc->kpt_pa->l0t[0];
63 for (int i = 0; i < _PTW_LEVEL && gran > to_gran; i++)
65 lvl_i = va_level_index(va, gran);
69 gran = gran >> _PAGE_LEVEL_SHIFT;
71 if (pte_isnull(pte)) {
72 pte = mkpte(alloc_rsvd_page(_allc), KERNEL_PGTAB);
73 if (to_gran == gran) {
74 pte = pte_setprot(pte, prot);
79 ptep = (pte_t*) pte_paddr(pte);
88 struct kernel_map* kpt_pa;
89 pte_t *boot_l0tep, *klptep, *l1_rsvd;
93 unsigned int lvl_i = 0;
95 // identity map the first 4G for legacy compatibility
96 kpt_pa = (struct kernel_map*)to_kphysical(&kpt);
97 boot_l0tep = (pte_t*) kpt_pa;
98 l1_rsvd = (pte_t*) kpt_pa->l1t_rsvd;
99 id_map = pte_mkhuge(mkpte_prot(KERNEL_PGTAB));
101 pte = mkpte((ptr_t)l1_rsvd, KERNEL_PGTAB);
102 set_pte(boot_l0tep, pte);
104 for (int i = 0; i < 4; i++, l1_rsvd++)
106 id_map = pte_setpaddr(id_map, (ptr_t)i << 30);
107 set_pte(l1_rsvd, id_map);
110 // Remap the kernel to -2GiB
112 struct allocator alloc = {
117 prealloc_pt(&alloc, VMAP, KERNEL_PGTAB, L1T_SIZE);
118 prealloc_pt(&alloc, PG_MOUNT_1, KERNEL_PGTAB, LFT_SIZE);
120 kstart = page_aligned(__far(__kexec_text_start));
123 size_t gran = L3T_SIZE;
125 size_t gran = L2T_SIZE;
128 prealloc_pt(&alloc, PMAP, KERNEL_PGTAB, gran);
129 klptep = prealloc_pt(&alloc, kstart, KERNEL_PGTAB, gran);
130 klptep += va_level_index(kstart, gran);
132 pte = mkpte(0, KERNEL_PGTAB);
133 for (int i = alloc.pt_usage; i < KEXEC_RSVD; i++)
135 pte = pte_setpaddr(pte, (ptr_t)&kpt_pa->krsvd[i]);
136 set_pte(klptep++, pte);
139 struct ksecmap* maps;
140 struct ksection* section;
143 maps = (struct ksecmap*)to_kphysical(__far(ksection_maps));
145 // this is the first LFT we hooked on.
146 // all these LFT are contig in physical address
147 klptep = (pte_t*) &kpt_pa->krsvd[alloc.pt_usage];
148 klptep += pfn(to_kphysical(kstart));
150 // Ensure the size of kernel is within the reservation
151 int remain = KEXEC_RSVD - alloc.pt_usage;
152 if (leaf_count(maps->ksize) > remain * _PAGE_LEVEL_SIZE)
157 // assume contig kernel vaddrs
158 for (unsigned int i = 0; i < maps->num; i++)
160 section = &maps->secs[i];
162 if (section->va < KERNEL_RESIDENT) {
166 pte = mkpte_prot(KERNEL_RDONLY);
167 if ((section->flags & PF_X)) {
168 pte = pte_mkexec(pte);
170 if ((section->flags & PF_W)) {
171 pte = pte_mkwritable(pte);
174 pgs = leaf_count(section->size);
175 for (pfn_t j = 0; j < pgs; j++)
177 pte = pte_setpaddr(pte, section->pa + page_addr(j));
178 set_pte(klptep, pte);
184 // Build up self-reference
185 lvl_i = va_level_index(VMS_SELF, L0T_SIZE);
186 pte = mkpte_root(__ptr(kpt_pa), KERNEL_PGTAB);
187 set_pte(boot_l0tep + lvl_i, pte);
194 ptr_t kmap_pa = to_kphysical(&kpt);
196 asm volatile("movq %1, %%rdi\n"
197 "rep stosb\n" ::"c"(sizeof(kpt)),