* @file dmm.c
* @author Lunaixsky
* @brief Dynamic memory manager dedicated to kernel heap. It is not portable at
- * this moment.
+ * this moment. Implicit free list implementation.
* @version 0.1
* @date 2022-02-28
*
*
*/
+// TODO: Make the dmm portable
+
#include <lunaix/mm/dmm.h>
#include <lunaix/mm/page.h>
#include <lunaix/mm/vmm.h>
-#include <lunaix/assert.h>
#include <lunaix/constants.h>
#include <lunaix/spike.h>
-#include <stdbool.h>
-
#define M_ALLOCATED 0x1
#define M_PREV_FREE 0x2
void*
lx_grow_heap(size_t sz);
+void place_chunk(uint8_t* ptr, size_t size);
+
int
dmm_init()
{
SW(heap_start + WSIZE, PACK(0, M_ALLOCATED));
current_heap_top += WSIZE;
- return lx_grow_heap(HEAP_INIT_SIZE);
+ return lx_grow_heap(HEAP_INIT_SIZE) != NULL;
}
int
lxbrk(size_t size)
{
if (size == 0) {
- return NULL;
+ return current_heap_top;
}
// plus WSIZE is the overhead for epilogue marker
size += WSIZE;
void* next = current_heap_top + ROUNDUP((uintptr_t)size, WSIZE);
- if (next >= K_STACK_START) {
+ if ((uintptr_t)next >= K_STACK_START) {
return NULL;
}
if (heap_top_pg != PG_ALIGN(next))
{
// if next do require new pages to be allocated
- if (!vmm_alloc_pages(heap_top_pg + PG_SIZE, ROUNDUP(size, PG_SIZE), PG_PRESENT | PG_WRITE)) {
- // TODO: OOM, panic here! Rather than spinning.
- spin();
- // return NULL
+ if (!vmm_alloc_pages((void*)(heap_top_pg + PG_SIZE), ROUNDUP(size, PG_SIZE), PG_PREM_RW)) {
+ return NULL;
}
}
- uintptr_t old = current_heap_top;
+ void* old = current_heap_top;
current_heap_top = next - WSIZE;
return old;
}
void*
lx_grow_heap(size_t sz) {
- uintptr_t start;
+ void* start;
sz = ROUNDUP(sz, BOUNDARY);
if (!(start = lxbrk(sz))) {
// round to largest 4B aligned value
// and space for header
size = ROUNDUP(size, BOUNDARY) + WSIZE;
- while (ptr < current_heap_top) {
+ while (ptr < (uint8_t*)current_heap_top) {
uint32_t header = *((uint32_t*)ptr);
size_t chunk_size = CHUNK_S(header);
if (chunk_size >= size && !CHUNK_A(header)) {
// found!
- *((uint32_t*)ptr) = PACK(size, CHUNK_PF(header) | M_ALLOCATED);
- uint8_t* n_hdrptr = (uint8_t*)(ptr + size);
- uint32_t diff = chunk_size - size;
- if (!diff) {
- // if the current free block is fully occupied
- uint32_t n_hdr = LW(n_hdrptr);
- // notify the next block about our avaliability
- SW(n_hdrptr, n_hdr & ~0x2);
- } else {
- // if there is remaining free space left
- uint32_t remainder_hdr =
- PACK(diff, M_NOT_ALLOCATED | M_PREV_ALLOCATED);
- SW(n_hdrptr, remainder_hdr);
- SW(FPTR(n_hdrptr, diff), remainder_hdr);
-
- coalesce(n_hdrptr);
- }
+ place_chunk(ptr, size);
return BPTR(ptr);
}
ptr += chunk_size;
}
+ // if heap is full (seems to be!), then allocate more space (if it's okay...)
+ if ((ptr = lx_grow_heap(size))) {
+ place_chunk(ptr, size);
+ return BPTR(ptr);
+ }
+
+ // Well, we are officially OOM!
return NULL;
}
+void place_chunk(uint8_t* ptr, size_t size) {
+ uint32_t header = *((uint32_t*)ptr);
+ size_t chunk_size = CHUNK_S(header);
+ *((uint32_t*)ptr) = PACK(size, CHUNK_PF(header) | M_ALLOCATED);
+ uint8_t* n_hdrptr = (uint8_t*)(ptr + size);
+ uint32_t diff = chunk_size - size;
+ if (!diff) {
+ // if the current free block is fully occupied
+ uint32_t n_hdr = LW(n_hdrptr);
+ // notify the next block about our avaliability
+ SW(n_hdrptr, n_hdr & ~0x2);
+ } else {
+ // if there is remaining free space left
+ uint32_t remainder_hdr =
+ PACK(diff, M_NOT_ALLOCATED | M_PREV_ALLOCATED);
+ SW(n_hdrptr, remainder_hdr);
+ SW(FPTR(n_hdrptr, diff), remainder_hdr);
+
+ coalesce(n_hdrptr);
+ }
+}
+
void
lx_free(void* ptr)
{
+ if (!ptr) {
+ return;
+ }
+
uint8_t* chunk_ptr = (uint8_t*)ptr - WSIZE;
uint32_t hdr = LW(chunk_ptr);
uint8_t* next_hdr = chunk_ptr + CHUNK_S(hdr);
uint32_t hdr = LW(chunk_ptr);
uint32_t pf = CHUNK_PF(hdr);
uint32_t sz = CHUNK_S(hdr);
- uint32_t ftr = LW(chunk_ptr + sz - WSIZE);
uint32_t n_hdr = LW(chunk_ptr + sz);