1 #include <lunaix/mm/page.h>
2 #include <lunaix/mm/pagetable.h>
4 extern unsigned int __kexec_end[];
7 pmm_arch_init_pool(struct pmem* memory)
9 pmm_declare_pool(POOL_UNIFIED, 1, memory->list_len);
13 pmm_arch_init_remap(struct pmem* memory, struct boot_handoff* bctx)
15 size_t ppfn_total = pfn(bctx->mem.size) + 1;
16 size_t pool_size = ppfn_total * sizeof(struct ppage);
19 struct boot_mmapent* ent;
20 for (; i < bctx->mem.mmap_len; i++) {
21 ent = &bctx->mem.mmap[i];
22 if (free_memregion(ent) && ent->size > pool_size) {
27 // fail to find a viable free region to host pplist
31 ptr_t kexec_end = to_kphysical(__kexec_end);
32 ptr_t aligned_pplist = MAX(ent->start, kexec_end);
34 // FIXME this is a temporary hack, we need a better way to convey
35 // the mem-map for us to settle the pplist safely
37 for (i = 0; i <bctx->mods.mods_num; i++) {
38 aligned_pplist = MAX(aligned_pplist, bctx->mods.entries[i].end);
41 aligned_pplist = napot_upaligned(aligned_pplist, L0T_SIZE);
43 if (aligned_pplist + pool_size > ent->start + ent->size) {
47 // for x86_32, the upper bound of memory requirement for pplist
48 // is sizeof(struct ppage) * 1MiB. For simplicity (as well as
49 // efficiency), we limit the granule to 4M huge page, thus,
50 // it will take away at least 4M worth of vm address resource
51 // regardless the actual physical memory size
53 // anchor the pplist at vmap location (right after kernel)
54 memory->pplist = (struct ppage*)VMAP;
55 memory->list_len = ppfn_total;
57 pfn_t nhuge = page_count(pool_size, L0T_SIZE);
58 pte_t* ptep = mkl0tep_va(VMS_SELF, VMAP);
59 pte_t pte = mkpte(aligned_pplist, KERNEL_DATA);
61 vmm_set_ptes_contig(ptep, pte_mkhuge(pte), L0T_SIZE, nhuge);
62 tlb_flush_kernel(VMAP);
64 // shift the actual vmap start address
65 vmap_set_start(VMAP + nhuge * L0T_SIZE);
67 return aligned_pplist;