#include <lunaix/mm/pagetable.h>
#include <lunaix/compiler.h>
+#include <lunaix/sections.h>
#include <sys/boot/bstage.h>
-#include <sys/mm/mm_defs.h>
+#include <asm/mm_defs.h>
#define RSVD_PAGES 32
-bridge_farsym(__kexec_start);
-bridge_farsym(__kexec_end);
+#define ksection_maps autogen_name(ksecmap)
+#define PF_X 0x1
+#define PF_W 0x2
+
+extern_autogen(ksecmap);
+
bridge_farsym(__kexec_text_start);
-bridge_farsym(__kexec_text_end);
+bridge_farsym(ksection_maps);
// define the initial page table layout
struct kernel_map;
static struct kernel_map kpt __section(".kpg");
export_symbol(debug, boot, kpt);
-struct kernel_map {
+struct kernel_map
+{
pte_t l0t[_PAGE_LEVEL_SIZE]; // root table
pte_t l1t_rsvd[_PAGE_LEVEL_SIZE]; // 0~4G reservation
gran = gran >> _PAGE_LEVEL_SHIFT;
if (pte_isnull(pte)) {
- pte = mkpte(alloc_rsvd_page(_allc), KERNEL_DATA);
+ pte = mkpte(alloc_rsvd_page(_allc), KERNEL_PGTAB);
if (to_gran == gran) {
pte = pte_setprot(pte, prot);
}
static void boot_text
do_remap()
{
- struct kernel_map* kpt_pa = (struct kernel_map*)to_kphysical(&kpt);
-
- pte_t* boot_l0tep = (pte_t*) kpt_pa;
- pte_t *klptep, pte;
+ struct kernel_map* kpt_pa;
+ pte_t *boot_l0tep, *klptep, *l1_rsvd;
+ pte_t id_map, pte;
+ ptr_t kstart;
+
+ unsigned int lvl_i = 0;
// identity map the first 4G for legacy compatibility
- pte_t* l1_rsvd = (pte_t*) kpt_pa->l1t_rsvd;
- pte_t id_map = pte_mkhuge(mkpte_prot(KERNEL_DATA));
+ kpt_pa = (struct kernel_map*)to_kphysical(&kpt);
+ boot_l0tep = (pte_t*) kpt_pa;
+ l1_rsvd = (pte_t*) kpt_pa->l1t_rsvd;
+ id_map = pte_mkhuge(mkpte_prot(KERNEL_PGTAB));
- set_pte(boot_l0tep, mkpte((ptr_t)l1_rsvd, KERNEL_DATA));
+ pte = mkpte((ptr_t)l1_rsvd, KERNEL_PGTAB);
+ set_pte(boot_l0tep, pte);
for (int i = 0; i < 4; i++, l1_rsvd++)
{
// Remap the kernel to -2GiB
- int table_usage = 0;
- unsigned int lvl_i = 0;
struct allocator alloc = {
.kpt_pa = kpt_pa,
.pt_usage = 0
};
- prealloc_pt(&alloc, VMAP, KERNEL_DATA, L1T_SIZE);
-
- prealloc_pt(&alloc, PG_MOUNT_1, KERNEL_DATA, LFT_SIZE);
+ prealloc_pt(&alloc, VMAP, KERNEL_PGTAB, L1T_SIZE);
+ prealloc_pt(&alloc, PG_MOUNT_1, KERNEL_PGTAB, LFT_SIZE);
-
- ptr_t kstart = page_aligned(__far(__kexec_text_start));
+ kstart = page_aligned(__far(__kexec_text_start));
#if LnT_ENABLED(3)
size_t gran = L3T_SIZE;
size_t gran = L2T_SIZE;
#endif
- prealloc_pt(&alloc, PMAP, KERNEL_DATA, gran);
- klptep = prealloc_pt(&alloc, kstart, KERNEL_DATA, gran);
+ prealloc_pt(&alloc, PMAP, KERNEL_PGTAB, gran);
+ klptep = prealloc_pt(&alloc, kstart, KERNEL_PGTAB, gran);
klptep += va_level_index(kstart, gran);
- pte = mkpte(0, KERNEL_DATA);
+ pte = mkpte(0, KERNEL_PGTAB);
for (int i = alloc.pt_usage; i < KEXEC_RSVD; i++)
{
pte = pte_setpaddr(pte, (ptr_t)&kpt_pa->krsvd[i]);
set_pte(klptep++, pte);
}
+ struct ksecmap* maps;
+ struct ksection* section;
+ pfn_t pgs;
+
+ maps = (struct ksecmap*)to_kphysical(__far(ksection_maps));
+
// this is the first LFT we hooked on.
- // all these LFT are contig in physical address
+ // all these LFT are contig in physical address
klptep = (pte_t*) &kpt_pa->krsvd[alloc.pt_usage];
-
+ klptep += pfn(to_kphysical(kstart));
+
// Ensure the size of kernel is within the reservation
- int remain = KEXEC_RSVD - table_usage;
- pfn_t kimg_pagecount =
- pfn(__far(__kexec_end) - __far(__kexec_start));
- if (kimg_pagecount > remain * _PAGE_LEVEL_SIZE) {
- // ERROR: require more pages
- // here should do something else other than head into blocking
+ int remain = KEXEC_RSVD - alloc.pt_usage;
+ if (leaf_count(maps->ksize) > remain * _PAGE_LEVEL_SIZE)
+ {
asm("ud2");
}
- // kernel .text
- pfn_t ktext_end = pfn(to_kphysical(__far(__kexec_text_end)));
- pfn_t i = pfn(to_kphysical(kstart));
+ // assume contig kernel vaddrs
+ for (unsigned int i = 0; i < maps->num; i++)
+ {
+ section = &maps->secs[i];
- klptep += i;
- pte = pte_setprot(pte, KERNEL_EXEC);
- for (; i < ktext_end; i++) {
- pte = pte_setpaddr(pte, page_addr(i));
- set_pte(klptep, pte);
+ if (section->va < KERNEL_RESIDENT) {
+ continue;
+ }
- klptep++;
- }
-
- pfn_t kimg_end = pfn(to_kphysical(__far(__kexec_end)));
+ pte = mkpte_prot(KERNEL_RDONLY);
+ if ((section->flags & PF_X)) {
+ pte = pte_mkexec(pte);
+ }
+ if ((section->flags & PF_W)) {
+ pte = pte_mkwritable(pte);
+ }
- // all remaining kernel sections
- pte = pte_setprot(pte, KERNEL_DATA);
- for (; i < kimg_end; i++) {
- pte = pte_setpaddr(pte, page_addr(i));
- set_pte(klptep, pte);
+ pgs = leaf_count(section->size);
+ for (pfn_t j = 0; j < pgs; j++)
+ {
+ pte = pte_setpaddr(pte, section->pa + page_addr(j));
+ set_pte(klptep, pte);
- klptep++;
+ klptep++;
+ }
}
// Build up self-reference
lvl_i = va_level_index(VMS_SELF, L0T_SIZE);
- pte = mkpte_root(__ptr(kpt_pa), KERNEL_DATA);
+ pte = mkpte_root(__ptr(kpt_pa), KERNEL_PGTAB);
set_pte(boot_l0tep + lvl_i, pte);
}