#include <klibc/string.h>
#include <lunaix/boot_generic.h>
-#include <lunaix/mm/page.h>
#include <lunaix/mm/pmm.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/spike.h>
#include <lunaix/kcmd.h>
-#include <sys/mm/mempart.h>
+#include <sys/mm/mm_defs.h>
+
+extern unsigned char __kexec_end[], __kexec_start[];
/**
* @brief Reserve memory for kernel bootstrapping initialization
{
bhctx->prepare(bhctx);
- struct boot_mmapent *mmap = bhctx->mem.mmap, *mmapent;
- for (size_t i = 0; i < bhctx->mem.mmap_len; i++) {
- mmapent = &mmap[i];
- size_t size_pg = PN(ROUNDUP(mmapent->size, PG_SIZE));
+ boot_begin_arch_reserve(bhctx);
+
+ // 将内核占据的页,包括前1MB,hhk_init 设为已占用
+ size_t pg_count = leaf_count(to_kphysical(__kexec_end));
+ pmm_onhold_range(0, pg_count);
- if (mmapent->type == BOOT_MMAP_FREE) {
- pmm_mark_chunk_free(PN(mmapent->start), size_pg);
- continue;
- }
+ size_t i;
+ struct boot_mmapent* ent;
+ for (i = 0; i < bhctx->mem.mmap_len; i++) {
+ ent = &bhctx->mem.mmap[i];
- ptr_t pa = PG_ALIGN(mmapent->start);
- for (size_t j = 0; j < size_pg && pa < KERNEL_EXEC;
- j++, pa += PM_PAGE_SIZE) {
- vmm_set_mapping(VMS_SELF, pa, pa, PG_PREM_RW, VMAP_IGNORE);
+ if (reserved_memregion(ent) || reclaimable_memregion(ent)) {
+ unsigned int counts = leaf_count(ent->size);
+ pmm_onhold_range(pfn(ent->start), counts);
}
}
/* Reserve region for all loaded modules */
- for (size_t i = 0; i < bhctx->mods.mods_num; i++) {
+ for (i = 0; i < bhctx->mods.mods_num; i++) {
struct boot_modent* mod = &bhctx->mods.entries[i];
- pmm_mark_chunk_occupied(KERNEL_PID,
- PN(mod->start),
- CEIL(mod->end - mod->start, PG_SIZE_BITS),
- PP_FGLOCKED);
+ unsigned int counts = leaf_count(mod->end - mod->start);
+
+ pmm_onhold_range(pfn(mod->start), counts);
}
}
-extern u8_t __kexec_boot_end; /* link/linker.ld */
+extern u8_t __kboot_end; /* link/linker.ld */
/**
* @brief Release memory for kernel bootstrapping initialization
void
boot_end(struct boot_handoff* bhctx)
{
- struct boot_mmapent *mmap = bhctx->mem.mmap, *mmapent;
+ struct boot_mmapent* ent;
for (size_t i = 0; i < bhctx->mem.mmap_len; i++) {
- mmapent = &mmap[i];
- size_t size_pg = PN(ROUNDUP(mmapent->size, PG_SIZE));
-
- if (mmapent->start >= KERNEL_EXEC || mmapent->type == BOOT_MMAP_FREE) {
- continue;
- }
-
- if (mmapent->type == BOOT_MMAP_RCLM) {
- pmm_mark_chunk_free(PN(mmapent->start), size_pg);
- }
+ ent = &bhctx->mem.mmap[i];
- ptr_t pa = PG_ALIGN(mmapent->start);
- for (size_t j = 0; j < size_pg && pa < KERNEL_EXEC;
- j++, pa += PM_PAGE_SIZE) {
- vmm_del_mapping(VMS_SELF, pa);
+ if (reclaimable_memregion(ent)) {
+ unsigned int counts = leaf_count(ent->size);
+ pmm_unhold_range(pfn(ent->start), counts);
}
}
bhctx->release(bhctx);
-}
-/**
- * @brief Clean up the boot stage code and data
- *
- */
-void
-boot_cleanup()
-{
- // clean up
- for (size_t i = 0; i < (ptr_t)(&__kexec_boot_end); i += PG_SIZE) {
- vmm_del_mapping(VMS_SELF, (ptr_t)i);
- pmm_free_page(KERNEL_PID, (ptr_t)i);
- }
+ boot_clean_arch_reserve(bhctx);
}
void