"vmutils.c",
"procvm.c",
"arch.c",
+ "kremap.c"
])
\ No newline at end of file
-#ifndef __LUNAIX_BSTAGE_H
-#define __LUNAIX_BSTAGE_H
-#include <lunaix/types.h>
-#include <lunaix/boot_generic.h>
-
-extern ptr_t __multiboot_addr;
-
-extern u8_t __kboot_start[];
-extern u8_t __kboot_end[];
+#ifndef __LUNAIX_ARCH_GENERIC_BOOT_STAGE_H
+#define __LUNAIX_ARCH_GENERIC_BOOT_STAGE_H
#define boot_text __attribute__((section(".boot.text")))
#define boot_data __attribute__((section(".boot.data")))
-#define boot_bss __attribute__((section(".boot.bss")))
/*
Bridge the far symbol to the vicinity.
issue where symbol define in kernel
code is too far away from the boot code.
*/
-#ifdef CONFIG_ARCH_X86_64
+#ifdef CONFIG_ARCH_BITS_64
#define __bridge_farsym(far_sym) \
asm( \
".section .boot.data\n" \
#endif
-ptr_t
-remap_kernel();
-
-#endif /* __LUNAIX_BSTAGE_H */
+#endif /* __LUNAIX_ARCH_GENERIC_BOOT_STAGE_H */
--- /dev/null
+#ifndef __LUNAIX_ARCH_GENERIC_INIT_PAGETABLE_H
+#define __LUNAIX_ARCH_GENERIC_INIT_PAGETABLE_H
+
+#include <lunaix/types.h>
+#include <lunaix/mm/pagetable.h>
+
+struct pt_alloc
+{
+ ptr_t base;
+ int index;
+ int total;
+};
+
+struct ptw_state
+{
+ struct pt_alloc* alloc;
+ pte_t* root;
+ pte_t* lntp;
+ int tab_index;
+ int level;
+};
+
+/**
+ * Allocate a page table from the page-table pool
+ */
+ptr_t
+kpt_alloc_table(struct pt_alloc* alloc);
+
+/**
+ * Set contiguous number of ptes starting from `addr`
+ * Using flattened apporach (i.e., unfold recursive)
+ */
+unsigned int
+kpt_set_ptes_flatten(struct ptw_state* state, ptr_t addr,
+ pte_t pte, unsigned long lsize, unsigned int nr);
+
+/**
+ * Remap the kernel to high-memory
+ */
+void
+kpt_migrate_highmem(struct ptw_state* state);
+
+static inline void must_inline
+init_pt_alloc(struct pt_alloc* alloc, ptr_t pool, unsigned int size)
+{
+ *alloc = (struct pt_alloc) {
+ .base = pool,
+ .index = 0,
+ .total = size / PAGE_SIZE
+ };
+}
+
+static inline void must_inline
+init_ptw_state(struct ptw_state* state, struct pt_alloc* alloc, ptr_t root)
+{
+ *state = (struct ptw_state) {
+ .alloc = alloc,
+ .root = (pte_t*) root,
+ .level = 0
+ };
+}
+
+/**
+ * set_ptes that is ensured to success
+ */
+static inline void must_inline
+kpt_set_ptes(struct ptw_state* state, ptr_t addr,
+ pte_t pte, unsigned long lsize, unsigned int nr)
+{
+ if (kpt_set_ptes_flatten(state, addr, pte, lsize, nr) != nr) {
+ spin();
+ }
+}
+
+/**
+ * prepare a editable page table covering va range [addr, addr + lsize)
+ */
+static inline void must_inline
+kpt_mktable_at(struct ptw_state* state, ptr_t addr, unsigned long lsize)
+{
+ kpt_set_ptes(state, addr, null_pte, lsize >> _PAGE_BASE_SHIFT, 1);
+}
+
+#endif /* __LUNAIX_ARCH_GENERIC_INIT_PAGETABLE_H */
-#ifndef __LUNAIX_ARCH_MULDIV64_H
-#define __LUNAIX_ARCH_MULDIV64_H
+#ifndef __LUNAIX_ARCH_GENERIC_MULDIV64_H
+#define __LUNAIX_ARCH_GENERIC_MULDIV64_H
#include <lunaix/types.h>
--- /dev/null
+#include <lunaix/mm/pagetable.h>
+#include <lunaix/sections.h>
+
+#include <asm/boot_stage.h>
+#include <asm/mm_defs.h>
+#include <asm-generic/init_pagetable.h>
+
+ptr_t boot_text
+kpt_alloc_table(struct pt_alloc* alloc)
+{
+ if (alloc->index >= alloc->total) {
+ return 0;
+ }
+
+ ptr_t next;
+
+ next = alloc->base + alloc->index * PAGE_SIZE;
+ alloc->index++;
+
+ return next;
+}
+
+static bool boot_text
+__walk(struct ptw_state* state, ptr_t addr,
+ unsigned long level_size, bool create)
+{
+ pte_t pte, *pt;
+ int level, pt_index;
+ unsigned long lsize;
+ ptr_t next_level;
+
+ pt = state->root;
+ level = 0;
+
+ do {
+ lsize = lnt_page_size(level);
+ pt_index = va_level_index(addr, lsize);
+
+ pte = pt[pt_index];
+ if (!pte_isnull(pte)) {
+ next_level = pte_paddr(pte);
+ goto cont;
+ }
+
+ if (pt_last_level(level) || lsize == level_size) {
+ break;
+ }
+
+ if (!create) {
+ goto fail;
+ }
+
+ next_level = kpt_alloc_table(state->alloc);
+ if (!next_level) {
+ state->lntp = NULL;
+ goto fail;
+ }
+
+ pte = mkpte(next_level, KERNEL_PGTAB);
+ pt[pt_index] = pte;
+
+ cont:
+ pt = (pte_t*) next_level;
+ level++;
+ }
+ while (lsize > level_size);
+
+ state->lntp = pt;
+ state->level = level;
+ state->tab_index = pt_index;
+
+ return true;
+
+fail:
+ state->lntp = NULL;
+ return false;
+}
+
+unsigned int boot_text
+kpt_set_ptes_flatten(struct ptw_state* state, ptr_t addr,
+ pte_t pte, unsigned long lsize, unsigned int nr)
+{
+ unsigned int tab_i, _n;
+ pte_t *lntp;
+
+ _n = 0;
+ addr = addr & ~(lsize - 1);
+
+ do {
+ if (!__walk(state, addr, lsize, true)) {
+ break;
+ }
+
+ lntp = state->lntp;
+ tab_i = state->tab_index;
+ while (_n < nr && tab_i < LEVEL_SIZE) {
+ lntp[tab_i++] = pte;
+ pte = pte_advance(pte, lsize);
+
+ addr += lsize;
+ _n++;
+ }
+ }
+ while (_n < nr);
+
+ return _n;
+}
+
+#define ksection_maps autogen_name(ksecmap)
+#define PF_X 0x1
+#define PF_W 0x2
+
+extern_autogen(ksecmap);
+
+bridge_farsym(__kexec_text_start);
+bridge_farsym(ksection_maps);
+
+void boot_text
+kpt_migrate_highmem(struct ptw_state* state)
+{
+ pte_t pte;
+ struct ksecmap* maps;
+ struct ksection* section;
+ pfn_t pgs;
+
+ maps = (struct ksecmap*)to_kphysical(__far(ksection_maps));
+
+ for (unsigned int i = 0; i < maps->num; i++)
+ {
+ section = &maps->secs[i];
+
+ if (section->va < KERNEL_RESIDENT) {
+ continue;
+ }
+
+ pte = mkpte(section->pa, KERNEL_RDONLY);
+ if ((section->flags & PF_X)) {
+ pte = pte_mkexec(pte);
+ }
+ if ((section->flags & PF_W)) {
+ pte = pte_mkwritable(pte);
+ }
+
+ pgs = leaf_count(section->size);
+ kpt_set_ptes(state, section->va, pte, LFT_SIZE, pgs);
+ }
+}
\ No newline at end of file
#include <lunaix/sections.h>
#include <asm/mm_defs.h>
-#include <sys/boot/bstage.h>
+#include <asm/boot_stage.h>
#include <sys-generic/bootmem.h>
#ifdef CONFIG_ARCH_X86_64
#include "sys/boot/multiboot.S.inc"
#endif
-.section .boot.bss
+.section .boot.data
/* 根据System V ABI,栈地址必须16字节对齐 */
/* 这里只是一个临时栈,在_hhk_init里面我们会初始化内核专用栈 */
.align 16
#define __BOOT_CODE__
-#include <lunaix/mm/pagetable.h>
-#include <lunaix/compiler.h>
-#include <lunaix/sections.h>
-
-#include <sys/boot/bstage.h>
+#include <asm/boot_stage.h>
#include <asm/mm_defs.h>
-
-#define PF_X 0x1
-#define PF_W 0x2
-#define ksection_maps autogen_name(ksecmap)
-
-extern_autogen(ksecmap);
-
-bridge_farsym(__kexec_text_start);
-bridge_farsym(ksection_maps);
+#include <asm-generic/init_pagetable.h>
// define the initial page table layout
-struct kernel_map;
-
-static struct kernel_map kernel_pt __section(".kpg");
-export_symbol(debug, boot, kernel_pt);
-
struct kernel_map
{
- pte_t l0t[_PAGE_LEVEL_SIZE];
- pte_t pg_mnt[_PAGE_LEVEL_SIZE];
-
struct {
pte_t _lft[_PAGE_LEVEL_SIZE];
} kernel_lfts[16];
} align(4);
-static void boot_text
+static struct kernel_map kernel_pt __section(".kpg");
+export_symbol(debug, boot, kernel_pt);
+
+static ptr_t boot_text
do_remap()
{
- struct kernel_map* kpt_pa = (struct kernel_map*)to_kphysical(&kernel_pt);
-
- size_t mia_casa_i = pfn_at(KERNEL_RESIDENT, L0T_SIZE);
- pte_t* klptep = (pte_t*) &kpt_pa->l0t[mia_casa_i];
- pte_t* ktep = (pte_t*) kpt_pa->kernel_lfts;
- pte_t* boot_l0tep = (pte_t*) kpt_pa;
+ struct pt_alloc alloc;
+ struct ptw_state ptw;
+ pte_t pte;
- set_pte(boot_l0tep, pte_mkhuge(mkpte_prot(KERNEL_PGTAB)));
+ init_pt_alloc(&alloc, to_kphysical(&kernel_pt), sizeof(kernel_pt));
+ init_ptw_state(&ptw, &alloc, kpt_alloc_table(&alloc));
- // --- 将内核重映射至高半区 ---
+ pte = pte_mkhuge(mkpte_prot(KERNEL_DATA));
+ kpt_set_ptes(&ptw, 0, pte, L0T_SIZE, 1);
- // Hook the kernel reserved LFTs onto L0T
- pte_t pte = mkpte((ptr_t)ktep, KERNEL_PGTAB);
-
- for (u32_t i = 0; i < KEXEC_RSVD; i++) {
- pte = pte_setpaddr(pte, (ptr_t)&kpt_pa->kernel_lfts[i]);
- set_pte(klptep, pte);
+ kpt_mktable_at(&ptw, KMAP, L0T_SIZE);
+ kpt_mktable_at(&ptw, VMAP, L0T_SIZE);
- klptep++;
- }
+ kpt_migrate_highmem(&ptw);
- struct ksecmap* maps;
- struct ksection* section;
- pfn_t pgs;
- pte_t *kmntep;
+ pte = mkpte(__ptr(ptw.root), KERNEL_PGTAB);
+ kpt_set_ptes(&ptw, VMS_SELF, pte, L0T_SIZE, 1);
- maps = (struct ksecmap*)to_kphysical(__far(ksection_maps));
- ktep += pfn(to_kphysical(__far(__kexec_text_start)));
-
- // Ensure the size of kernel is within the reservation
- if (leaf_count(maps->ksize) > KEXEC_RSVD * _PAGE_LEVEL_SIZE)
- {
- // ERROR: require more pages
- // here should do something else other than head into blocking
- asm("ud2");
- }
-
- // Now, map the sections
-
- for (unsigned int i = 0; i < maps->num; i++)
- {
- section = &maps->secs[i];
-
- if (section->va < KERNEL_RESIDENT) {
- continue;
- }
-
- pte = mkpte_prot(KERNEL_RDONLY);
- if ((section->flags & PF_X)) {
- pte = pte_mkexec(pte);
- }
- if ((section->flags & PF_W)) {
- pte = pte_mkwritable(pte);
- }
-
- pgs = leaf_count(section->size);
- for (pfn_t j = 0; j < pgs; j++)
- {
- pte = pte_setpaddr(pte, section->pa + page_addr(j));
- set_pte(ktep, pte);
-
- ktep++;
- }
- }
-
- // set mount point
- kmntep = (pte_t*) &kpt_pa->l0t[pfn_at(PG_MOUNT_1, L0T_SIZE)];
- set_pte(kmntep, mkpte((ptr_t)kpt_pa->pg_mnt, KERNEL_PGTAB));
-
- // Build up self-reference
- int level = (VMS_SELF / L0T_SIZE) & _PAGE_LEVEL_MASK;
-
- pte = mkpte_root((ptr_t)kpt_pa, KERNEL_PGTAB);
- set_pte(&boot_l0tep[level], pte);
+ return __ptr(ptw.root);
}
ptr_t boot_text
((u8_t*)kmap_pa)[i] = 0;
}
- do_remap();
-
- return kmap_pa;
+ return do_remap();
}
\ No newline at end of file
#include <lunaix/mm/pagetable.h>
#include <lunaix/compiler.h>
-#include <sys/boot/bstage.h>
+#include <asm/boot_stage.h>
#include <asm/mm_defs.h>
#include <lunaix/boot_generic.h>
-#include <sys/boot/bstage.h>
+#include <asm/boot_stage.h>
#include <sys/boot/multiboot.h>
#include <sys-generic/bootmem.h>
#define __BOOT_CODE__
-#include <lunaix/mm/pagetable.h>
-#include <lunaix/compiler.h>
-#include <lunaix/sections.h>
-
-#include <sys/boot/bstage.h>
+#include <asm/boot_stage.h>
#include <asm/mm_defs.h>
+#include <asm-generic/init_pagetable.h>
#define RSVD_PAGES 32
-#define ksection_maps autogen_name(ksecmap)
-#define PF_X 0x1
-#define PF_W 0x2
-
-extern_autogen(ksecmap);
-
-bridge_farsym(__kexec_text_start);
-bridge_farsym(ksection_maps);
-
-// define the initial page table layout
-struct kernel_map;
-
-static struct kernel_map kpt __section(".kpg");
-export_symbol(debug, boot, kpt);
-
struct kernel_map
{
- pte_t l0t[_PAGE_LEVEL_SIZE]; // root table
- pte_t l1t_rsvd[_PAGE_LEVEL_SIZE]; // 0~4G reservation
-
struct {
pte_t _lft[_PAGE_LEVEL_SIZE];
} krsvd[RSVD_PAGES];
} align(8);
-struct allocator
-{
- struct kernel_map* kpt_pa;
- int pt_usage;
-};
-
-static inline ptr_t
-alloc_rsvd_page(struct allocator* _alloc)
-{
- if (_alloc->pt_usage >= KEXEC_RSVD) {
- asm ("ud2");
- }
-
- return __ptr(&_alloc->kpt_pa->krsvd[_alloc->pt_usage++]);
-}
-
-static pte_t* boot_text
-prealloc_pt(struct allocator* _allc, ptr_t va,
- pte_attr_t prot, size_t to_gran)
-{
- int lvl_i;
- pte_t *ptep, pte;
- size_t gran = L0T_SIZE;
-
- ptep = (pte_t*)&_allc->kpt_pa->l0t[0];
-
- for (int i = 0; i < _PTW_LEVEL && gran > to_gran; i++)
- {
- lvl_i = va_level_index(va, gran);
- ptep = &ptep[lvl_i];
- pte = pte_at(ptep);
-
- gran = gran >> _PAGE_LEVEL_SHIFT;
-
- if (pte_isnull(pte)) {
- pte = mkpte(alloc_rsvd_page(_allc), KERNEL_PGTAB);
- if (to_gran == gran) {
- pte = pte_setprot(pte, prot);
- }
-
- set_pte(ptep, pte);
- }
- ptep = (pte_t*) pte_paddr(pte);
- }
-
- return ptep;
-}
+static struct kernel_map kpt __section(".kpg");
+export_symbol(debug, boot, kpt);
-static void boot_text
+static ptr_t boot_text
do_remap()
{
- struct kernel_map* kpt_pa;
- pte_t *boot_l0tep, *klptep, *l1_rsvd;
- pte_t id_map, pte;
- ptr_t kstart;
+ struct pt_alloc alloc;
+ struct ptw_state ptw;
+ pte_t pte;
- unsigned int lvl_i = 0;
+ init_pt_alloc(&alloc, to_kphysical(&kpt), sizeof(kpt));
+ init_ptw_state(&ptw, &alloc, kpt_alloc_table(&alloc));
- // identity map the first 4G for legacy compatibility
- kpt_pa = (struct kernel_map*)to_kphysical(&kpt);
- boot_l0tep = (pte_t*) kpt_pa;
- l1_rsvd = (pte_t*) kpt_pa->l1t_rsvd;
- id_map = pte_mkhuge(mkpte_prot(KERNEL_PGTAB));
-
- pte = mkpte((ptr_t)l1_rsvd, KERNEL_PGTAB);
- set_pte(boot_l0tep, pte);
-
- for (int i = 0; i < 4; i++, l1_rsvd++)
- {
- id_map = pte_setpaddr(id_map, (ptr_t)i << 30);
- set_pte(l1_rsvd, id_map);
- }
+ pte = pte_mkhuge(mkpte_prot(KERNEL_PGTAB));
+ kpt_set_ptes(&ptw, 0, pte, L1T_SIZE, 4);
- // Remap the kernel to -2GiB
-
- struct allocator alloc = {
- .kpt_pa = kpt_pa,
- .pt_usage = 0
- };
-
- prealloc_pt(&alloc, VMAP, KERNEL_PGTAB, L1T_SIZE);
- prealloc_pt(&alloc, PG_MOUNT_1, KERNEL_PGTAB, LFT_SIZE);
-
- kstart = page_aligned(__far(__kexec_text_start));
+ kpt_mktable_at(&ptw, VMAP, L0T_SIZE);
#if LnT_ENABLED(3)
size_t gran = L3T_SIZE;
size_t gran = L2T_SIZE;
#endif
- prealloc_pt(&alloc, PMAP, KERNEL_PGTAB, gran);
- klptep = prealloc_pt(&alloc, kstart, KERNEL_PGTAB, gran);
- klptep += va_level_index(kstart, gran);
-
- pte = mkpte(0, KERNEL_PGTAB);
- for (int i = alloc.pt_usage; i < KEXEC_RSVD; i++)
- {
- pte = pte_setpaddr(pte, (ptr_t)&kpt_pa->krsvd[i]);
- set_pte(klptep++, pte);
- }
-
- struct ksecmap* maps;
- struct ksection* section;
- pfn_t pgs;
+ kpt_mktable_at(&ptw, KMAP, gran);
+ kpt_mktable_at(&ptw, PMAP, gran);
- maps = (struct ksecmap*)to_kphysical(__far(ksection_maps));
+ kpt_migrate_highmem(&ptw);
- // this is the first LFT we hooked on.
- // all these LFT are contig in physical address
- klptep = (pte_t*) &kpt_pa->krsvd[alloc.pt_usage];
- klptep += pfn(to_kphysical(kstart));
+ pte = mkpte(__ptr(ptw.root), KERNEL_PGTAB);
+ kpt_set_ptes(&ptw, VMS_SELF, pte, L0T_SIZE, 1);
- // Ensure the size of kernel is within the reservation
- int remain = KEXEC_RSVD - alloc.pt_usage;
- if (leaf_count(maps->ksize) > remain * _PAGE_LEVEL_SIZE)
- {
- asm("ud2");
- }
-
- // assume contig kernel vaddrs
- for (unsigned int i = 0; i < maps->num; i++)
- {
- section = &maps->secs[i];
-
- if (section->va < KERNEL_RESIDENT) {
- continue;
- }
-
- pte = mkpte_prot(KERNEL_RDONLY);
- if ((section->flags & PF_X)) {
- pte = pte_mkexec(pte);
- }
- if ((section->flags & PF_W)) {
- pte = pte_mkwritable(pte);
- }
-
- pgs = leaf_count(section->size);
- for (pfn_t j = 0; j < pgs; j++)
- {
- pte = pte_setpaddr(pte, section->pa + page_addr(j));
- set_pte(klptep, pte);
-
- klptep++;
- }
- }
-
- // Build up self-reference
- lvl_i = va_level_index(VMS_SELF, L0T_SIZE);
- pte = mkpte_root(__ptr(kpt_pa), KERNEL_PGTAB);
- set_pte(boot_l0tep + lvl_i, pte);
+ return __ptr(ptw.root);
}
-
ptr_t boot_text
remap_kernel()
-{
- ptr_t kmap_pa = to_kphysical(&kpt);
-
+{
asm volatile("movq %1, %%rdi\n"
"rep stosb\n" ::"c"(sizeof(kpt)),
- "r"(kmap_pa),
+ "r"(to_kphysical(&kpt)),
"a"(0)
: "rdi", "memory");
- do_remap();
-
- return kmap_pa;
+ return do_remap();
}
\ No newline at end of file
--- /dev/null
+#ifndef __LUNAIX_ARCH_BOOT_STAGE_H
+#define __LUNAIX_ARCH_BOOT_STAGE_H
+#include <lunaix/types.h>
+#include <lunaix/boot_generic.h>
+
+extern ptr_t __multiboot_addr;
+
+extern u8_t __kboot_start[];
+extern u8_t __kboot_end[];
+
+#include <asm-generic/boot_stage.h>
+
+ptr_t
+remap_kernel();
+
+#endif /* __LUNAIX_ARCH_BOOT_STAGE_H */
#define KERNEL_IMG_SIZE __ulong(0x4000000)
#define KERNEL_IMG_END END_POINT(KERNEL_IMG)
-#define PG_MOUNT_1 __ulong(0xc8000000)
+#define KMAP __ulong(0xc8000000)
+#define PG_MOUNT_1 KMAP
#define PG_MOUNT_1_SIZE __ulong(0x1000)
#define PG_MOUNT_1_END END_POINT(PG_MOUNT_1)
#define __LUNAIX_ARCHINIT_H
#include <lunaix/types.h>
-#include "bstage.h"
+#include <asm/boot_stage.h>
#include "multiboot.h"
ptr_t boot_text
return 0;
}
+static inline pte_t must_inline
+pte_advance(pte_t pte, unsigned long lvl_size)
+{
+ return pte_setpaddr(pte, pte_paddr(pte) + lvl_size);
+}
+
#endif /* __LUNAIX_PAGETABLE_H */
{
do {
set_pte(ptep, pte);
- pte_val(pte) += lvl_size;
+ pte = pte_advance(pte, lvl_size);
ptep++;
} while (--n > 0);
}
lcfg_env.update()
lcfg_env.save(opts.config_save)
lcfg_env.export()
- else:
- do_buildfile_gen(opts, lcfg_env)
+
+ do_buildfile_gen(opts, lcfg_env)
if __name__ == "__main__":
main()
\ No newline at end of file
${prefix} rm -d "${tmp_mnt}" || cleanup
+${prefix} chmod o+rw ${rootfs} || cleanup
+
if [ ! "${has_err:-0}" -eq 0 ]; then
echo "done, but with error."
else
"gdb_port": "$GDB_PORT",
"traced": [
"x86_recv_fault",
+ "x86_log_pagefault",
"ide_dma_cb"
]
},