1 #include <lunaix/spike.h>
2 #include "pmm_internal.h"
4 #ifdef CONFIG_PMALLOC_SIMPLE
6 // Simple PM Allocator (segregated next fit)
10 static const int po_limit[] = {
11 CONFIG_PMALLOC_SIMPLE_PO0_THRES,
12 CONFIG_PMALLOC_SIMPLE_PO1_THRES,
13 CONFIG_PMALLOC_SIMPLE_PO2_THRES,
14 CONFIG_PMALLOC_SIMPLE_PO3_THRES,
15 CONFIG_PMALLOC_SIMPLE_PO4_THRES,
16 CONFIG_PMALLOC_SIMPLE_PO5_THRES,
17 CONFIG_PMALLOC_SIMPLE_PO6_THRES,
18 CONFIG_PMALLOC_SIMPLE_PO7_THRES,
19 CONFIG_PMALLOC_SIMPLE_PO8_THRES,
20 CONFIG_PMALLOC_SIMPLE_PO9_THRES,
24 __uninitialized_page(struct ppage* page)
26 return !(page->flags & INIT_FLAG);
30 __set_page_initialized(struct ppage* page)
32 page->flags |= INIT_FLAG;
36 __set_pages_uninitialized(struct ppage* lead)
38 for (size_t i = 0; i < (1UL << lead->order); i++)
40 lead[i].flags &= ~INIT_FLAG;
45 pmm_allocator_init(struct pmem* memory)
51 pmm_allocator_init_pool(struct pmem_pool* pool)
53 for (int i = 0; i < MAX_PAGE_ORDERS; i++) {
54 llist_init_head(&pool->idle_order[i]);
58 struct ppage* pooled_page = pool->pool_start;
59 for (; pooled_page <= pool->pool_end; pooled_page++) {
60 *pooled_page = (struct ppage){ };
65 pmm_free_one(struct ppage* page, int type_mask)
67 page = leading_page(page);
70 assert(!reserved_page(page));
71 assert(!__uninitialized_page(page));
77 int order = page->order;
78 assert(order <= MAX_PAGE_ORDERS);
80 struct pmem_pool* pool = pmm_pool_lookup(page);
81 struct llist_header* bucket = &pool->idle_order[order];
83 if (pool->count[order] < po_limit[order]) {
84 llist_append(bucket, &page->sibs);
89 __set_pages_uninitialized(page);
92 static pfn_t index = 0;
95 pmm_looknext(struct pmem_pool* pool, size_t order)
97 struct ppage *lead, *tail = NULL;
98 pfn_t working = index;
100 size_t poolsz = ppfn_of(pool, pool->pool_end) + 1;
106 tail = ppage_of(pool, working);
108 if (__uninitialized_page(tail)) {
115 working = (working + 1) % poolsz;
116 } while (count && working != index);
123 lead = tail - total + 1;
124 for (size_t i = 0; i < total; i++)
126 struct ppage* page = &lead[i];
129 page->pool = pool->type;
130 llist_init_head(&page->sibs);
131 __set_page_initialized(page);
138 pmm_alloc_napot_type(int pool, size_t order, ppage_type_t type)
140 assert(order <= MAX_PAGE_ORDERS);
142 struct pmem_pool* _pool = pmm_pool_get(pool);
143 struct llist_header* bucket = &_pool->idle_order[order];
145 struct ppage* good_page = NULL;
146 if (!llist_empty(bucket)) {
147 (_pool->count[order])--;
148 good_page = list_entry(bucket->next, struct ppage, sibs);
149 llist_delete(&good_page->sibs);
152 good_page = pmm_looknext(_pool, order);
156 assert(!good_page->refs);
159 good_page->type = type;
165 pmm_allocator_trymark_onhold(struct pmem_pool* pool, struct ppage* start, struct ppage* end)
167 while (start <= end) {
168 if (__uninitialized_page(start)) {
170 __set_page_initialized(start);
172 else if (!start->refs) {
173 struct ppage* lead = leading_page(start);
174 llist_delete(&lead->sibs);
176 __set_pages_uninitialized(lead);
180 else if (!reserved_page(start)) {
191 pmm_allocator_trymark_unhold(struct pmem_pool* pool, struct ppage* start, struct ppage* end)
193 while (start <= end) {
194 if (!__uninitialized_page(start) && reserved_page(start)) {
195 __set_pages_uninitialized(start);