-#include <lunaix/mm/page.h>
-#include <lunaix/mm/pmm.h>
+#include <lunaix/status.h>
+#include <lunaix/mm/pagetable.h>
+#include <lunaix/spike.h>
-#define MARK_PG_AUX_VAR(ppn) \
- uint32_t group = ppn / 8; \
- uint32_t msk = (0x80U >> (ppn % 8)); \
+#include "pmm_internal.h"
-#define MARK_CHUNK_AUX_VAR(start_ppn, page_count) \
- uint32_t group = start_ppn / 8; \
- uint32_t offset = start_ppn % 8; \
- uint32_t group_count = (page_count + offset) / 8; \
- uint32_t remainder = (page_count + offset) % 8; \
- uint32_t leading_shifts = \
- (page_count + offset) < 8 ? page_count : 8 - offset;
-
-uint8_t pm_bitmap[PM_BMP_MAX_SIZE];
-
-uintptr_t max_pg;
-
-// ... |xxxx xxxx |
-// ... |-->|
-void
-pmm_mark_page_free(uintptr_t ppn)
+static inline bool
+__check_typemask(struct ppage* page, ppage_type_t typemask)
{
- MARK_PG_AUX_VAR(ppn)
- pm_bitmap[group] = pm_bitmap[group] & ~msk;
+ return !page->type || (page->type & typemask);
}
-void
-pmm_mark_page_occupied(uintptr_t ppn)
-{
- MARK_PG_AUX_VAR(ppn)
- pm_bitmap[group] = pm_bitmap[group] | msk;
-}
+static struct pmem memory;
+export_symbol(debug, pmm, memory);
void
-pmm_mark_chunk_free(uintptr_t start_ppn, size_t page_count)
+pmm_init(struct boot_handoff* bctx)
{
- MARK_CHUNK_AUX_VAR(start_ppn, page_count)
+ ptr_t pplist_pa;
- // nasty bit level hacks but it reduce # of iterations.
+ llist_init_head(&memory.reserved);
+
+ pplist_pa = pmm_arch_init_remap(&memory, bctx);
+
+ if (!pplist_pa) {
+ spin();
+ }
- pm_bitmap[group] &= ~(((1U << leading_shifts) - 1) << (8 - offset - leading_shifts));
+ pmm_arch_init_pool(&memory);
- group++;
+ pmm_allocator_init(&memory);
- // prevent unsigned overflow
- for (uint32_t i = 0; group_count !=0 && i < group_count - 1; i++, group++) {
- pm_bitmap[group] = 0;
+ for (size_t i = 0; i < POOL_COUNT; i++)
+ {
+ pmm_allocator_init_pool(&memory.pool[i]);
}
- pm_bitmap[group] &=
- ~(((1U << (page_count > 8 ? remainder : 0)) - 1) << (8 - remainder));
+ pfn_t pplist_size = memory.list_len * sizeof(struct ppage);
+ pmm_onhold_range(pfn(pplist_pa), leaf_count(pplist_size));
}
-void
-pmm_mark_chunk_occupied(uint32_t start_ppn, size_t page_count)
+static inline bool must_inline optimize("-fipa-cp-clone")
+__pmm_mark_range(pfn_t start, size_t npages, const bool hold)
{
- MARK_CHUNK_AUX_VAR(start_ppn, page_count)
+ if (start >= memory.list_len) {
+ return true;
+ }
- pm_bitmap[group] |= (((1U << leading_shifts) - 1) << (8 - offset - leading_shifts));
+ struct ppage *_start, *_end,
+ *_mark_start, *_mark_end;
- group++;
+ _start = ppage(start);
+ _end = ppage(start + npages - 1);
+
+ struct pmem_pool* pool;
+ for (int i = 0; npages && i < POOL_COUNT; i++) {
+ pool = &memory.pool[i];
- // prevent unsigned overflow
- for (uint32_t i = 0; group_count !=0 && i < group_count - 1; i++, group++) {
- pm_bitmap[group] = 0xFFU;
- }
+ _mark_start = MAX(pool->pool_start, _start);
+ _mark_end = MIN(pool->pool_end, _end);
+ if (pool->pool_end < _mark_start || _mark_end < pool->pool_start) {
+ continue;
+ }
- pm_bitmap[group] |=
- (((1U << (page_count > 8 ? remainder : 0)) - 1) << (8 - remainder));
-}
+ bool _r;
+ if (hold) {
+ _r = pmm_allocator_trymark_onhold(pool, _mark_start, _mark_end);
+ } else {
+ _r = pmm_allocator_trymark_unhold(pool, _mark_start, _mark_end);
+ }
-// 我们跳过位于0x0的页。我们不希望空指针是指向一个有效的内存空间。
-#define LOOKUP_START 1
+ if (_r)
+ {
+ npages -= (ppfn(_mark_end) - ppfn(_mark_start)) + 1;
+ }
+ }
-size_t pg_lookup_ptr;
+ return !npages;
+}
-void
-pmm_init(uintptr_t mem_upper_lim)
+bool
+pmm_onhold_range(pfn_t start, size_t npages)
{
- max_pg = (PG_ALIGN(mem_upper_lim) >> 12);
-
- pg_lookup_ptr = LOOKUP_START;
+ return __pmm_mark_range(start, npages, true);
+}
- // mark all as occupied
- for (size_t i = 0; i < PM_BMP_MAX_SIZE; i++) {
- pm_bitmap[i] = 0xFFU;
- }
+bool
+pmm_unhold_range(pfn_t start, size_t npages)
+{
+ return __pmm_mark_range(start, npages, false);
}
-void*
-pmm_alloc_page()
+struct pmem_pool*
+pmm_pool_get(int pool_index)
{
- // Next fit approach. Maximize the throughput!
- uintptr_t good_page_found = NULL;
- size_t old_pg_ptr = pg_lookup_ptr;
- size_t upper_lim = max_pg;
- uint8_t chunk = 0;
- while (!good_page_found && pg_lookup_ptr < upper_lim) {
- chunk = pm_bitmap[pg_lookup_ptr >> 3];
-
- // skip the fully occupied chunk, reduce # of iterations
- if (chunk != 0xFFU) {
- for (size_t i = pg_lookup_ptr % 8; i < 8; i++, pg_lookup_ptr++) {
- if (!(chunk & (0x80U >> i))) {
- pmm_mark_page_occupied(pg_lookup_ptr);
- good_page_found = pg_lookup_ptr << 12;
- break;
- }
- }
- } else {
- pg_lookup_ptr += 8;
-
- // We've searched the interval [old_pg_ptr, max_pg) but failed
- // may be chances in [1, old_pg_ptr) ?
- // Let's find out!
- if (pg_lookup_ptr >= upper_lim && old_pg_ptr != LOOKUP_START) {
- upper_lim = old_pg_ptr;
- pg_lookup_ptr = LOOKUP_START;
- old_pg_ptr = LOOKUP_START;
- }
- }
- }
- return (void*)good_page_found;
+ assert(pool_index < POOL_COUNT);
+
+ return &memory.pool[pool_index];
}
-int
-pmm_free_page(void* page)
+struct pmem_pool*
+pmm_declare_pool(int pool, pfn_t start, pfn_t size)
{
- // TODO: Add kernel reserved memory page check
- uint32_t pg = (uintptr_t)page >> 12;
- if (pg && pg < max_pg)
- {
- pmm_mark_page_free(pg);
- return 1;
- }
- return 0;
+ struct pmem_pool* _pool = &memory.pool[pool];
+
+ _pool->type = POOL_UNIFIED;
+ _pool->pool_end = ppage(start + size - 1);
+ _pool->pool_start = ppage(start);
+
+ return _pool;
}
\ No newline at end of file