-#include <lunaix/mm/pmm.h>
#include <lunaix/status.h>
#include <lunaix/mm/pagetable.h>
#include <lunaix/spike.h>
-// This is a very large array...
-static struct pp_struct pm_table[PM_BMP_MAX_SIZE];
-export_symbol(debug, pmm, pm_table);
+#include "pmm_internal.h"
-static ptr_t max_pg;
-export_symbol(debug, pmm, max_pg);
-
-void
-pmm_mark_page_free(ptr_t ppn)
+static inline bool
+__check_typemask(struct ppage* page, ppage_type_t typemask)
{
- if ((pm_table[ppn].attr & PP_FGLOCKED)) {
- return;
- }
- pm_table[ppn].ref_counts = 0;
+ return !page->type || (page->type & typemask);
}
-void
-pmm_mark_page_occupied(ptr_t ppn, pp_attr_t attr)
-{
- pm_table[ppn] =
- (struct pp_struct){ .ref_counts = 1, .attr = attr };
-}
+static struct pmem memory;
+export_symbol(debug, pmm, memory);
void
-pmm_mark_chunk_free(ptr_t start_ppn, size_t page_count)
+pmm_init(struct boot_handoff* bctx)
{
- for (size_t i = start_ppn; i < start_ppn + page_count && i < max_pg; i++) {
- if ((pm_table[i].attr & PP_FGLOCKED)) {
- continue;
- }
- pm_table[i].ref_counts = 0;
- }
-}
+ ptr_t pplist_pa;
-void
-pmm_mark_chunk_occupied(u32_t start_ppn,
- size_t page_count,
- pp_attr_t attr)
-{
- for (size_t i = start_ppn; i < start_ppn + page_count && i < max_pg; i++) {
- pm_table[i] =
- (struct pp_struct){ .ref_counts = 1, .attr = attr };
- }
-}
+ llist_init_head(&memory.reserved);
-// 我们跳过位于0x0的页。我们不希望空指针是指向一个有效的内存空间。
-#define LOOKUP_START 1
-
-volatile size_t pg_lookup_ptr;
+ pplist_pa = pmm_arch_init_remap(&memory, bctx);
+
+ if (!pplist_pa) {
+ spin();
+ }
-void
-pmm_init(ptr_t mem_upper_lim)
-{
- max_pg = pfn(mem_upper_lim);
+ pmm_arch_init_pool(&memory);
- pg_lookup_ptr = LOOKUP_START;
+ pmm_allocator_init(&memory);
- // mark all as occupied
- for (size_t i = 0; i < PM_BMP_MAX_SIZE; i++) {
- pm_table[i] =
- (struct pp_struct){ .attr = 0, .ref_counts = 1 };
+ for (size_t i = 0; i < POOL_COUNT; i++)
+ {
+ pmm_allocator_init_pool(&memory.pool[i]);
}
+
+ pfn_t pplist_size = memory.list_len * sizeof(struct ppage);
+ pmm_onhold_range(pfn(pplist_pa), leaf_count(pplist_size));
}
-ptr_t
-pmm_alloc_cpage(size_t num_pages, pp_attr_t attr)
+static inline bool must_inline optimize("-fipa-cp-clone")
+__pmm_mark_range(pfn_t start, size_t npages, const bool hold)
{
- size_t p1 = 0;
- size_t p2 = 0;
-
- while (p2 < max_pg && p2 - p1 < num_pages) {
- (!(&pm_table[p2])->ref_counts) ? (p2++) : (p1 = ++p2);
+ if (start >= memory.list_len) {
+ return true;
}
- if (p2 == max_pg && p2 - p1 < num_pages) {
- return NULLPTR;
- }
+ struct ppage *_start, *_end,
+ *_mark_start, *_mark_end;
- pmm_mark_chunk_occupied(p1, num_pages, attr);
+ _start = ppage(start);
+ _end = ppage(start + npages - 1);
+
+ struct pmem_pool* pool;
+ for (int i = 0; npages && i < POOL_COUNT; i++) {
+ pool = &memory.pool[i];
- return p1 << 12;
-}
+ _mark_start = MAX(pool->pool_start, _start);
+ _mark_end = MIN(pool->pool_end, _end);
+ if (pool->pool_end < _mark_start || _mark_end < pool->pool_start) {
+ continue;
+ }
-ptr_t
-pmm_alloc_page(pp_attr_t attr)
-{
- // Next fit approach. Maximize the throughput!
- ptr_t good_page_found = (ptr_t)NULL;
- size_t old_pg_ptr = pg_lookup_ptr;
- size_t upper_lim = max_pg;
- struct pp_struct* pm;
- while (!good_page_found && pg_lookup_ptr < upper_lim) {
- pm = &pm_table[pg_lookup_ptr];
-
- if (!pm->ref_counts) {
- *pm = (struct pp_struct){ .attr = attr,
- .ref_counts = 1 };
- good_page_found = pg_lookup_ptr << 12;
- break;
+ bool _r;
+ if (hold) {
+ _r = pmm_allocator_trymark_onhold(pool, _mark_start, _mark_end);
} else {
- pg_lookup_ptr++;
-
- // We've searched the interval [old_pg_ptr, max_pg) but failed
- // may be chances in [1, old_pg_ptr) ?
- // Let's find out!
- if (pg_lookup_ptr >= upper_lim && old_pg_ptr != LOOKUP_START) {
- upper_lim = old_pg_ptr;
- pg_lookup_ptr = LOOKUP_START;
- old_pg_ptr = LOOKUP_START;
- }
+ _r = pmm_allocator_trymark_unhold(pool, _mark_start, _mark_end);
}
- }
- return good_page_found;
-}
-int
-pmm_free_one(ptr_t page, pp_attr_t attr_mask)
-{
- pfn_t ppfn = pfn(page);
- struct pp_struct* pm = &pm_table[ppfn];
-
- assert(ppfn < max_pg && pm->ref_counts);
- if (pm->attr && !(pm->attr & attr_mask)) {
- return 0;
+ if (_r)
+ {
+ npages -= (ppfn(_mark_end) - ppfn(_mark_start)) + 1;
+ }
}
- pm->ref_counts--;
- return 1;
+ return !npages;
}
-int
-pmm_ref_page(ptr_t page)
+bool
+pmm_onhold_range(pfn_t start, size_t npages)
{
- u32_t ppn = pfn(page);
-
- if (ppn >= PM_BMP_MAX_SIZE) {
- return 0;
- }
-
- struct pp_struct* pm = &pm_table[ppn];
- assert(ppn < max_pg && pm->ref_counts);
+ return __pmm_mark_range(start, npages, true);
+}
- pm->ref_counts++;
- return 1;
+bool
+pmm_unhold_range(pfn_t start, size_t npages)
+{
+ return __pmm_mark_range(start, npages, false);
}
-void
-pmm_set_attr(ptr_t page, pp_attr_t attr)
+struct pmem_pool*
+pmm_pool_get(int pool_index)
{
- struct pp_struct* pp = &pm_table[pfn(page)];
-
- if (pp->ref_counts) {
- pp->attr = attr;
- }
+ assert(pool_index < POOL_COUNT);
+
+ return &memory.pool[pool_index];
}
-struct pp_struct*
-pmm_query(ptr_t pa)
+struct pmem_pool*
+pmm_declare_pool(int pool, pfn_t start, pfn_t size)
{
- u32_t ppn = pa >> 12;
+ struct pmem_pool* _pool = &memory.pool[pool];
- if (ppn >= PM_BMP_MAX_SIZE) {
- return NULL;
- }
+ _pool->type = POOL_UNIFIED;
+ _pool->pool_end = ppage(start + size - 1);
+ _pool->pool_start = ppage(start);
- return &pm_table[ppn];
+ return _pool;
}
\ No newline at end of file