1 #include <lunaix/mm/page.h>
2 #include <lunaix/mm/pagetable.h>
3 #include <lunaix/sections.h>
6 pmm_arch_init_pool(struct pmem* memory)
8 pmm_declare_pool(POOL_UNIFIED, 1, memory->list_len);
12 pmm_arch_init_remap(struct pmem* memory, struct boot_handoff* bctx)
14 size_t ppfn_total = pfn(bctx->mem.size);
15 size_t pool_size = ppfn_total * sizeof(struct ppage);
18 struct boot_mmapent* ent;
21 while (i < bctx->mem.mmap_len) {
22 ent = &bctx->mem.mmap[i++];
23 if (free_memregion(ent) && ent->size > pool_size) {
28 // fail to find a viable free region to host pplist
32 ptr_t kexec_end = to_kphysical(kernel_start);
33 ptr_t aligned_pplist = MAX(ent->start, kexec_end);
35 #ifdef CONFIG_ARCH_X86_64
36 aligned_pplist = napot_upaligned(aligned_pplist, L2T_SIZE);
38 aligned_pplist = napot_upaligned(aligned_pplist, L0T_SIZE);
41 if (aligned_pplist + pool_size > ent->start + ent->size) {
45 // for x86_32, the upper bound of memory requirement for pplist
46 // is sizeof(struct ppage) * 1MiB. For simplicity (as well as
47 // efficiency), we limit the granule to 4M huge page, thus,
48 // it will take away at least 4M worth of vm address resource
49 // regardless the actual physical memory size
51 // anchor the pplist at vmap location (right after kernel)
52 memory->pplist = (struct ppage*)PMAP;
53 memory->list_len = ppfn_total;
58 pte_t pte = mkpte(aligned_pplist, KERNEL_DATA);
60 #ifdef CONFIG_ARCH_X86_64
61 nhuge = page_count(pool_size, L2T_SIZE);
62 ptep = mkl2tep_va(VMS_SELF, PMAP);
63 vmm_set_ptes_contig(ptep, pte_mkhuge(pte), L2T_SIZE, nhuge);
65 nhuge = page_count(pool_size, L0T_SIZE);
66 ptep = mkl0tep_va(VMS_SELF, PMAP);
68 // since VMAP and PMAP share same address space
69 // we need to shift VMAP to make room
70 vmap_set_start(VMAP + nhuge * L0T_SIZE);
71 vmm_set_ptes_contig(ptep, pte_mkhuge(pte), L0T_SIZE, nhuge);
74 tlb_flush_kernel(PMAP);
75 return aligned_pplist;