3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief A simplified cake(slab) allocator.
5 * P.s. I call it cake as slab sounds more 'ridge' to me. :)
9 * @copyright Copyright (c) 2022
13 #include <klibc/string.h>
14 #include <lunaix/mm/cake.h>
15 #include <lunaix/mm/page.h>
16 #include <lunaix/spike.h>
17 #include <lunaix/syslog.h>
21 #define CACHE_LINE_SIZE 128
23 struct cake_pile master_pile;
25 struct llist_header piles = { .next = &piles, .prev = &piles };
28 __alloc_cake(unsigned int cake_pg)
30 struct leaflet* leaflet = alloc_leaflet(count_order(cake_pg));
35 return (void*)vmap(leaflet, KERNEL_DATA);
39 __new_cake(struct cake_pile* pile)
41 struct cake_s* cake = __alloc_cake(pile->pg_per_cake);
47 u32_t max_piece = pile->pieces_per_cake;
51 cake->first_piece = (void*)((ptr_t)cake + pile->offset);
55 piece_index_t* free_list = cake->free_list;
56 for (size_t i = 0; i < max_piece - 1; i++) {
59 free_list[max_piece - 1] = EO_FREE_PIECE;
61 llist_append(&pile->free, &cake->cakes);
67 __init_pile(struct cake_pile* pile,
69 unsigned int piece_size,
70 unsigned int pg_per_cake,
77 unsigned int offset = sizeof(long);
80 if ((options & PILE_ALIGN_CACHE)) {
81 // 对齐到128字节缓存行大小,主要用于DMA
82 offset = CACHE_LINE_SIZE;
85 piece_size = ROUNDUP(piece_size, offset);
86 *pile = (struct cake_pile){ .piece_size = piece_size,
89 (pg_per_cake * PAGE_SIZE) /
90 (piece_size + sizeof(piece_index_t)),
91 .pg_per_cake = pg_per_cake };
93 unsigned int free_list_size = pile->pieces_per_cake * sizeof(piece_index_t);
95 pile->offset = ROUNDUP(sizeof(struct cake_s) + free_list_size, offset);
96 pile->pieces_per_cake -= ICEIL((pile->offset - free_list_size), piece_size);
98 strncpy(pile->pile_name, name, PILE_NAME_MAXLEN);
100 llist_init_head(&pile->free);
101 llist_init_head(&pile->full);
102 llist_init_head(&pile->partial);
103 llist_append(&piles, &pile->piles);
109 __init_pile(&master_pile, "pinkamina", sizeof(master_pile), 1, 0);
113 cake_new_pile(char* name,
114 unsigned int piece_size,
115 unsigned int pg_per_cake,
118 struct cake_pile* pile = (struct cake_pile*)cake_grab(&master_pile);
120 // must aligned to napot order!
121 assert(is_pot(pg_per_cake));
123 __init_pile(pile, name, piece_size, pg_per_cake, options);
129 cake_set_constructor(struct cake_pile* pile, pile_cb ctor)
135 cake_grab(struct cake_pile* pile)
137 struct cake_s *pos, *n;
138 if (!llist_empty(&pile->partial)) {
139 pos = list_entry(pile->partial.next, typeof(*pos), cakes);
140 } else if (llist_empty(&pile->free)) {
141 pos = __new_cake(pile);
143 pos = list_entry(pile->free.next, typeof(*pos), cakes);
149 piece_index_t found_index = pos->next_free;
150 pos->next_free = pos->free_list[found_index];
152 pile->alloced_pieces++;
154 llist_delete(&pos->cakes);
155 if (pos->free_list[pos->next_free] == EO_FREE_PIECE) {
156 llist_append(&pile->full, &pos->cakes);
158 llist_append(&pile->partial, &pos->cakes);
162 (void*)((ptr_t)pos->first_piece + found_index * pile->piece_size);
165 pile->ctor(pile, ptr);
172 cake_release(struct cake_pile* pile, void* area)
174 piece_index_t piece_index;
176 struct cake_s *pos, *n;
177 struct llist_header* hdrs[2] = { &pile->full, &pile->partial };
179 for (size_t i = 0; i < 2; i++) {
180 llist_for_each(pos, n, hdrs[i], cakes)
182 if (pos->first_piece > area) {
185 dsize = (ptr_t)(area - pos->first_piece);
186 piece_index = dsize / pile->piece_size;
187 if (piece_index < pile->pieces_per_cake) {
196 assert(!(dsize % pile->piece_size));
197 pos->free_list[piece_index] = pos->next_free;
198 pos->next_free = piece_index;
200 assert_msg(pos->free_list[piece_index] != pos->next_free, "double free");
203 pile->alloced_pieces--;
205 llist_delete(&pos->cakes);
206 if (!pos->used_pieces) {
207 llist_append(&pile->free, &pos->cakes);
209 llist_append(&pile->partial, &pos->cakes);
212 *((unsigned int*)area) = DEADCAKE_MARK;
218 cake_ctor_zeroing(struct cake_pile* pile, void* piece)
220 memset(piece, 0, pile->piece_size);