3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief A simplified cake(slab) allocator.
5 * P.s. I call it cake as slab sounds more 'ridge' to me. :)
9 * @copyright Copyright (c) 2022
13 #include <klibc/string.h>
14 #include <lunaix/mm/cake.h>
15 #include <lunaix/mm/pmm.h>
16 #include <lunaix/mm/vmm.h>
17 #include <lunaix/spike.h>
18 #include <lunaix/syslog.h>
22 #define CACHE_LINE_SIZE 128
24 struct cake_pile master_pile;
26 struct llist_header piles = { .next = &piles, .prev = &piles };
29 __alloc_cake(unsigned int cake_pg)
31 ptr_t pa = (ptr_t)pmm_alloc_cpage(cake_pg, 0);
35 return (void*)vmap(pa, cake_pg * PAGE_SIZE, KERNEL_DATA);
39 __new_cake(struct cake_pile* pile)
41 struct cake_s* cake = __alloc_cake(pile->pg_per_cake);
47 u32_t max_piece = pile->pieces_per_cake;
51 cake->first_piece = (void*)((ptr_t)cake + pile->offset);
55 piece_index_t* free_list = cake->free_list;
56 for (size_t i = 0; i < max_piece - 1; i++) {
59 free_list[max_piece - 1] = EO_FREE_PIECE;
61 llist_append(&pile->free, &cake->cakes);
67 __init_pile(struct cake_pile* pile,
69 unsigned int piece_size,
70 unsigned int pg_per_cake,
77 unsigned int offset = sizeof(long);
80 if ((options & PILE_ALIGN_CACHE)) {
81 // 对齐到128字节缓存行大小,主要用于DMA
82 offset = CACHE_LINE_SIZE;
85 piece_size = ROUNDUP(piece_size, offset);
86 *pile = (struct cake_pile){ .piece_size = piece_size,
89 (pg_per_cake * PAGE_SIZE) /
90 (piece_size + sizeof(piece_index_t)),
91 .pg_per_cake = pg_per_cake };
93 unsigned int free_list_size = pile->pieces_per_cake * sizeof(piece_index_t);
95 pile->offset = ROUNDUP(sizeof(struct cake_s) + free_list_size, offset);
96 pile->pieces_per_cake -= ICEIL((pile->offset - free_list_size), piece_size);
98 strncpy(pile->pile_name, name, PILE_NAME_MAXLEN);
100 llist_init_head(&pile->free);
101 llist_init_head(&pile->full);
102 llist_init_head(&pile->partial);
103 llist_append(&piles, &pile->piles);
109 __init_pile(&master_pile, "pinkamina", sizeof(master_pile), 1, 0);
113 cake_new_pile(char* name,
114 unsigned int piece_size,
115 unsigned int pg_per_cake,
118 struct cake_pile* pile = (struct cake_pile*)cake_grab(&master_pile);
120 __init_pile(pile, name, piece_size, pg_per_cake, options);
126 cake_set_constructor(struct cake_pile* pile, pile_cb ctor)
132 cake_grab(struct cake_pile* pile)
134 struct cake_s *pos, *n;
135 if (!llist_empty(&pile->partial)) {
136 pos = list_entry(pile->partial.next, typeof(*pos), cakes);
137 } else if (llist_empty(&pile->free)) {
138 pos = __new_cake(pile);
140 pos = list_entry(pile->free.next, typeof(*pos), cakes);
146 piece_index_t found_index = pos->next_free;
147 pos->next_free = pos->free_list[found_index];
149 pile->alloced_pieces++;
151 llist_delete(&pos->cakes);
152 if (pos->free_list[pos->next_free] == EO_FREE_PIECE) {
153 llist_append(&pile->full, &pos->cakes);
155 llist_append(&pile->partial, &pos->cakes);
159 (void*)((ptr_t)pos->first_piece + found_index * pile->piece_size);
162 pile->ctor(pile, ptr);
169 cake_release(struct cake_pile* pile, void* area)
171 piece_index_t piece_index;
173 struct cake_s *pos, *n;
174 struct llist_header* hdrs[2] = { &pile->full, &pile->partial };
176 for (size_t i = 0; i < 2; i++) {
177 llist_for_each(pos, n, hdrs[i], cakes)
179 if (pos->first_piece > area) {
182 dsize = (ptr_t)(area - pos->first_piece);
183 piece_index = dsize / pile->piece_size;
184 if (piece_index < pile->pieces_per_cake) {
193 assert(!(dsize % pile->piece_size));
194 pos->free_list[piece_index] = pos->next_free;
195 pos->next_free = piece_index;
197 assert_msg(pos->free_list[piece_index] != pos->next_free, "double free");
200 pile->alloced_pieces--;
202 llist_delete(&pos->cakes);
203 if (!pos->used_pieces) {
204 llist_append(&pile->free, &pos->cakes);
206 llist_append(&pile->partial, &pos->cakes);
209 *((unsigned int*)area) = DEADCAKE_MARK;
215 cake_ctor_zeroing(struct cake_pile* pile, void* piece)
217 memset(piece, 0, pile->piece_size);