build/
playground/
.vscode/settings.json
+.vscode/*.log
.VSCodeCounter/
\ No newline at end of file
--- /dev/null
+{
+ // 使用 IntelliSense 了解相关属性。
+ // 悬停以查看现有属性的描述。
+ // 欲了解更多信息,请访问: https://go.microsoft.com/fwlink/?linkid=830387
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "type": "gdb",
+ "request": "attach",
+ "name": "Attach to QEMU",
+ "executable": "${workspaceRoot}/build/bin/lunaix.bin",
+ "target": ":1234",
+ "remote": true,
+ "cwd": "${workspaceRoot}",
+ "valuesFormatting": "parseText",
+ "preLaunchTask": "Launch LunaixOS"
+ }
+ ]
+}
\ No newline at end of file
--- /dev/null
+{
+ "version": "2.0.0",
+ "tasks": [
+ {
+ "label": "Launch LunaixOS",
+ "type": "shell",
+ "command": "make debug-qemu-vscode",
+ "isBackground": true,
+ "problemMatcher": {
+ "background": {
+ "activeOnStart": true,
+ "endsPattern": "^\\(qemu\\).*"
+ }
+ }
+ }
+ ]
+}
\ No newline at end of file
#ifndef __LUNAIX_CONSTANTS_H
#define __LUNAIX_CONSTANTS_H
-#define K_STACK_SIZE 0x100000U
+#define K_STACK_SIZE (64 << 10)
#define K_STACK_START ((0xFFBFFFFFU - K_STACK_SIZE) + 1)
#define HIGHER_HLF_BASE 0xC0000000UL
#define MEM_1MB 0x100000UL
#include <stddef.h>
+#define M_ALLOCATED 0x1
+#define M_PREV_FREE 0x2
+
+#define M_NOT_ALLOCATED 0x0
+#define M_PREV_ALLOCATED 0x0
+
+#define CHUNK_S(header) ((header) & ~0x3)
+#define CHUNK_PF(header) ((header)&M_PREV_FREE)
+#define CHUNK_A(header) ((header)&M_ALLOCATED)
+
+#define PACK(size, flags) (((size) & ~0x3) | (flags))
+
+#define SW(p, w) (*((uint32_t*)(p)) = w)
+#define LW(p) (*((uint32_t*)(p)))
+
+#define HPTR(bp) ((uint32_t*)(bp)-1)
+#define BPTR(bp) ((uint8_t*)(bp) + WSIZE)
+#define FPTR(hp, size) ((uint32_t*)(hp + size - WSIZE))
+#define NEXT_CHK(hp) ((uint8_t*)(hp) + CHUNK_S(LW(hp)))
+
+#define BOUNDARY 4
+#define WSIZE 4
+
#define HEAP_INIT_SIZE 4096
typedef struct
{
void* start;
void* brk;
+ void* max_addr;
} heap_context_t;
lxbrk(heap_context_t* heap, size_t size);
void*
-lx_malloc(heap_context_t* heap, size_t size);
+lx_malloc_internal(heap_context_t* heap, size_t size);
void
-lx_free(void* ptr);
+lx_free_internal(void* ptr);
#endif /* __LUNAIX_DMM_H */
kalloc_init();
/**
- * @brief Allocate an accessible memory region in kernel heap.
+ * @brief Allocate a contiguous and un-initialized memory region in kernel heap.
*
* @remarks
* This is NOT the same as kmalloc in Linux!
* @return void*
*/
void*
-kmalloc(size_t size);
+lxmalloc(size_t size);
/**
- * @brief calloc for kernel heap. A wrapper for kmalloc
+ * @brief Allocate a contiguous and initialized memory region in kernel heap.
* @param size
* @return void*
*/
void*
-kcalloc(size_t size);
+lxcalloc(size_t size);
/**
* @brief Free the memory region allocated by kmalloc
* @return void*
*/
void
-kfree(void* ptr);
+lxfree(void* ptr);
#endif /* __LUNAIX_KALLOC_H */
#define PG_ENTRY_FLAGS(entry) (entry & 0xFFFU)
#define PG_ENTRY_ADDR(entry) (entry & ~0xFFFU)
+#define HAS_FLAGS(entry, flags) ((PG_ENTRY_FLAGS(entry) & (flags)) == flags)
+#define CONTAINS_FLAGS(entry, flags) (PG_ENTRY_FLAGS(entry) & (flags))
+
#define PG_PREM_R PG_PRESENT
#define PG_PREM_RW PG_PRESENT | PG_WRITE
#define PG_PREM_UR PG_PRESENT | PG_ALLOW_USER
}
void __assert_fail(const char* expr, const char* file, unsigned int line) __attribute__((noinline, noreturn));
#else
-#define assert(cond) //nothing
+#define assert(cond) //assert nothing
+#define assert_msg(cond, msg) //assert nothing
#endif
#include <arch/x86/interrupts.h>
-#include <lunaix/tty/tty.h>
+#include <hal/cpu.h>
#include <libc/stdio.h>
+#include <lunaix/tty/tty.h>
-void panic_msg(const char* msg) {
+void
+panic_msg(const char* msg)
+{
tty_set_theme(VGA_COLOR_WHITE, VGA_COLOR_RED);
tty_clear_line(10);
tty_clear_line(11);
printf(" %s", msg);
}
-void panic (const char* msg, isr_param* param) {
+void
+panic(const char* msg, isr_param* param)
+{
char buf[1024];
- sprintf(buf, "INT %u: (%x) [%p: %p] %s", param->vector, param->err_code, param->cs, param->eip, msg);
+ sprintf(buf,
+ "INT %u: (%x) [%p: %p] %s",
+ param->vector,
+ param->err_code,
+ param->cs,
+ param->eip,
+ msg);
panic_msg(buf);
- while(1);
+ while (1)
+ ;
}
-void
-interrupt_handler(isr_param* param) {
- switch (param->vector)
- {
+void
+interrupt_handler(isr_param* param)
+{
+ switch (param->vector) {
case 0:
panic("Division by 0", param);
- break; // never reach
+ break; // never reach
case FAULT_GENERAL_PROTECTION:
panic("General Protection", param);
- break; // never reach
+ break; // never reach
case FAULT_PAGE_FAULT:
- panic("Page Fault", param);
- break; // never reach
+ void* pg_fault_ptr = cpu_rcr2();
+ if (pg_fault_ptr) {
+ panic("Page Fault", param);
+ } else {
+ panic("Null pointer reference", param);
+ }
+ break; // never reach
case LUNAIX_SYS_PANIC:
panic_msg((char*)(param->registers.edi));
- while(1);
- break; // never reach
+ while (1)
+ ;
+ break; // never reach
default:
panic("Unknown Interrupt", param);
- break; // never reach
+ break; // never reach
}
}
\ No newline at end of file
// test malloc & free
- uint32_t** arr = (uint32_t**) kmalloc(10 * sizeof(uint32_t*));
+ uint8_t** arr = (uint8_t**) lxmalloc(10 * sizeof(uint8_t*));
for (size_t i = 0; i < 10; i++)
{
- arr[i] = (uint32_t*) kmalloc((i + 1) * 2);
+ arr[i] = (uint8_t*) lxmalloc((i + 1) * 2);
}
for (size_t i = 0; i < 10; i++)
{
- kfree(arr[i]);
+ lxfree(arr[i]);
}
- void* big_ = kmalloc(8192);
+ uint8_t* big_ = lxmalloc(8192);
+ big_[0] = 123;
+ big_[1] = 23;
+ big_[2] = 3;
+
+ printf("%u, %u, %u", big_[0], big_[1], big_[2]);
// good free
- kfree(arr);
- kfree(big_);
+ lxfree(arr);
+ lxfree(big_);
- uint8_t* bad1 = kmalloc(123);
- void* bad2 = kmalloc(1);
+ // uint8_t* bad1 = lxmalloc(123);
+ // void* bad2 = lxmalloc(1);
- *((uint32_t*)(bad1 - 4)) = 0xc2343312UL;
+ // *((uint32_t*)(bad1 - 4)) = 0xc2343312UL;
- // bad free
- kfree(bad1);
- kfree(bad2 - 2);
+ // // bad free
+ // lxfree(bad1);
+ // lxfree(bad2 - 2);
}
\ No newline at end of file
/**
* @file dmm.c
* @author Lunaixsky
- * @brief Dynamic memory manager dedicated to kernel heap. Using implicit free
- * list implementation. This is designed to be portable, so it can serve as
- * syscalls to malloc/free in the c std lib.
+ * @brief Dynamic memory manager for heap. This design do not incorporate any\
+ * specific implementation of malloc family. The main purpose of this routines is to
+ * provide handy method to initialize & grow the heap as needed by upstream implementation.
*
- * This version of code is however the simplest and yet insecured,
- * it just to demonstrate how the malloc/free works behind the stage
+ * This is designed to be portable, so it can serve as syscalls to malloc/free in the c std lib.
*
* @version 0.2
* @date 2022-03-3
*/
#include <lunaix/mm/dmm.h>
-#include <lunaix/mm/page.h>
#include <lunaix/mm/vmm.h>
+#include <lunaix/mm/page.h>
-#include <lunaix/constants.h>
#include <lunaix/spike.h>
-#define M_ALLOCATED 0x1
-#define M_PREV_FREE 0x2
-
-#define M_NOT_ALLOCATED 0x0
-#define M_PREV_ALLOCATED 0x0
-
-#define CHUNK_S(header) ((header) & ~0x3)
-#define CHUNK_PF(header) ((header)&M_PREV_FREE)
-#define CHUNK_A(header) ((header)&M_ALLOCATED)
-
-#define PACK(size, flags) (((size) & ~0x3) | (flags))
-
-#define SW(p, w) (*((uint32_t*)(p)) = w)
-#define LW(p) (*((uint32_t*)(p)))
-
-#define HPTR(bp) ((uint32_t*)(bp)-1)
-#define BPTR(bp) ((uint8_t*)(bp) + WSIZE)
-#define FPTR(hp, size) ((uint32_t*)(hp + size - WSIZE))
-#define NEXT_CHK(hp) ((uint8_t*)(hp) + CHUNK_S(LW(hp)))
-
-#define BOUNDARY 4
-#define WSIZE 4
-
-void*
-coalesce(uint8_t* chunk_ptr);
-
-void*
-lx_grow_heap(heap_context_t* heap, size_t sz);
-
-void
-place_chunk(uint8_t* ptr, size_t size);
-
int
dmm_init(heap_context_t* heap)
{
heap->brk = heap->start;
- vmm_alloc_page(heap->brk, PG_PREM_RW);
-
- SW(heap->start, PACK(4, M_ALLOCATED));
- SW(heap->start + WSIZE, PACK(0, M_ALLOCATED));
- heap->brk += WSIZE;
-
- return lx_grow_heap(heap, HEAP_INIT_SIZE) != NULL;
+ return vmm_alloc_page(heap->brk, PG_PREM_RW) != NULL;
}
int
return heap->brk;
}
+ void* current_brk = heap->brk;
+
// The upper bound of our next brk of heap given the size.
// This will be used to calculate the page we need to allocate.
- // The "+ WSIZE" capture the overhead for epilogue marker
- void* next = heap->brk + ROUNDUP(size + WSIZE, WSIZE);
+ void* next = current_brk + ROUNDUP(size, BOUNDARY);
- if ((uintptr_t)next >= K_STACK_START) {
+ // any invalid situations
+ if (next >= heap->max_addr || next < current_brk) {
return NULL;
}
- uintptr_t heap_top_pg = PG_ALIGN(heap->brk);
- if (heap_top_pg != PG_ALIGN(next)) {
+ uintptr_t diff = PG_ALIGN(next) - PG_ALIGN(current_brk);
+ if (diff) {
// if next do require new pages to be allocated
- if (!vmm_alloc_pages((void*)(heap_top_pg + PG_SIZE),
- ROUNDUP(size, PG_SIZE),
+ if (!vmm_alloc_pages((void*)(PG_ALIGN(current_brk) + PG_SIZE),
+ diff,
PG_PREM_RW)) {
+ // for debugging
+ assert_msg(0, "unable to brk");
return NULL;
}
}
- void* old = heap->brk;
heap->brk += size;
- return old;
-}
-
-void*
-lx_grow_heap(heap_context_t* heap, size_t sz)
-{
- void* start;
-
- if (!(start = lxbrk(heap, sz))) {
- return NULL;
- }
- sz = ROUNDUP(sz, BOUNDARY);
-
- uint32_t old_marker = *((uint32_t*)start);
- uint32_t free_hdr = PACK(sz, CHUNK_PF(old_marker));
- SW(start, free_hdr);
- SW(FPTR(start, sz), free_hdr);
- SW(NEXT_CHK(start), PACK(0, M_ALLOCATED | M_PREV_FREE));
-
- return coalesce(start);
-}
-
-void*
-lx_malloc(heap_context_t* heap, size_t size)
-{
- // Simplest first fit approach.
-
- uint8_t* ptr = heap->start;
- // round to largest 4B aligned value
- // and space for header
- size = ROUNDUP(size, BOUNDARY) + WSIZE;
- while (ptr < (uint8_t*)heap->brk) {
- uint32_t header = *((uint32_t*)ptr);
- size_t chunk_size = CHUNK_S(header);
- if (chunk_size >= size && !CHUNK_A(header)) {
- // found!
- place_chunk(ptr, size);
- return BPTR(ptr);
- }
- ptr += chunk_size;
- }
-
- // if heap is full (seems to be!), then allocate more space (if it's
- // okay...)
- if ((ptr = lx_grow_heap(heap, size))) {
- place_chunk(ptr, size);
- return BPTR(ptr);
- }
-
- // Well, we are officially OOM!
- return NULL;
-}
-
-void
-place_chunk(uint8_t* ptr, size_t size)
-{
- uint32_t header = *((uint32_t*)ptr);
- size_t chunk_size = CHUNK_S(header);
- *((uint32_t*)ptr) = PACK(size, CHUNK_PF(header) | M_ALLOCATED);
- uint8_t* n_hdrptr = (uint8_t*)(ptr + size);
- uint32_t diff = chunk_size - size;
- if (!diff) {
- // if the current free block is fully occupied
- uint32_t n_hdr = LW(n_hdrptr);
- // notify the next block about our avaliability
- SW(n_hdrptr, n_hdr & ~0x2);
- } else {
- // if there is remaining free space left
- uint32_t remainder_hdr = PACK(diff, M_NOT_ALLOCATED | M_PREV_ALLOCATED);
- SW(n_hdrptr, remainder_hdr);
- SW(FPTR(n_hdrptr, diff), remainder_hdr);
-
- coalesce(n_hdrptr);
- }
-}
-
-void
-lx_free(void* ptr)
-{
- if (!ptr) {
- return;
- }
-
- uint8_t* chunk_ptr = (uint8_t*)ptr - WSIZE;
- uint32_t hdr = LW(chunk_ptr);
- size_t sz = CHUNK_S(hdr);
- uint8_t* next_hdr = chunk_ptr + sz;
-
- // make sure the ptr we are 'bout to free makes sense
- // the size trick comes from:
- // https://sourceware.org/git/?p=glibc.git;a=blob;f=malloc/malloc.c;h=1a1ac1d8f05b6f9bf295d7fdd0f12c2e4650a33c;hb=HEAD#l4437
- assert_msg(((uintptr_t)ptr < (uintptr_t)(-sz)) && !((uintptr_t)ptr & ~0x3),
- "free(): invalid pointer");
- assert_msg(sz > WSIZE && (sz & ~0x3),
- "free(): invalid size");
-
- SW(chunk_ptr, hdr & ~M_ALLOCATED);
- SW(FPTR(chunk_ptr, sz), hdr & ~M_ALLOCATED);
- SW(next_hdr, LW(next_hdr) | M_PREV_FREE);
-
- coalesce(chunk_ptr);
-}
-
-void*
-coalesce(uint8_t* chunk_ptr)
-{
- uint32_t hdr = LW(chunk_ptr);
- uint32_t pf = CHUNK_PF(hdr);
- uint32_t sz = CHUNK_S(hdr);
-
- uint32_t n_hdr = LW(chunk_ptr + sz);
-
- if (CHUNK_A(n_hdr) && pf) {
- // case 1: prev is free
- uint32_t prev_ftr = LW(chunk_ptr - WSIZE);
- size_t prev_chunk_sz = CHUNK_S(prev_ftr);
- uint32_t new_hdr = PACK(prev_chunk_sz + sz, CHUNK_PF(prev_ftr));
- SW(chunk_ptr - prev_chunk_sz, new_hdr);
- SW(FPTR(chunk_ptr, sz), new_hdr);
- chunk_ptr -= prev_chunk_sz;
- } else if (!CHUNK_A(n_hdr) && !pf) {
- // case 2: next is free
- size_t next_chunk_sz = CHUNK_S(n_hdr);
- uint32_t new_hdr = PACK(next_chunk_sz + sz, pf);
- SW(chunk_ptr, new_hdr);
- SW(FPTR(chunk_ptr, sz + next_chunk_sz), new_hdr);
- } else if (!CHUNK_A(n_hdr) && pf) {
- // case 3: both free
- uint32_t prev_ftr = LW(chunk_ptr - WSIZE);
- size_t next_chunk_sz = CHUNK_S(n_hdr);
- size_t prev_chunk_sz = CHUNK_S(prev_ftr);
- uint32_t new_hdr =
- PACK(next_chunk_sz + prev_chunk_sz + sz, CHUNK_PF(prev_ftr));
- SW(chunk_ptr - prev_chunk_sz, new_hdr);
- SW(FPTR(chunk_ptr, sz + next_chunk_sz), new_hdr);
- chunk_ptr -= prev_chunk_sz;
- }
-
- // case 4: prev and next are not free
- return chunk_ptr;
+ return current_brk;
}
\ No newline at end of file
+/**
+ * @file kalloc.c
+ * @author Lunaixsky
+ * @brief Implicit free list implementation of malloc family, for kernel use.
+ *
+ * This version of code is however the simplest and yet insecured, thread unsafe
+ * it just to demonstrate how the malloc/free works behind the curtain
+ * @version 0.1
+ * @date 2022-03-05
+ *
+ * @copyright Copyright (c) 2022
+ *
+ */
#include <lunaix/mm/kalloc.h>
#include <lunaix/mm/dmm.h>
+#include <lunaix/constants.h>
+#include <lunaix/spike.h>
+
#include <libc/string.h>
#include <stdint.h>
heap_context_t __kalloc_kheap;
+void*
+lx_malloc_internal(heap_context_t* heap, size_t size);
+
+void
+place_chunk(uint8_t* ptr, size_t size);
+
+void
+lx_free_internal(void* ptr);
+
+void*
+coalesce(uint8_t* chunk_ptr);
+
+void*
+lx_grow_heap(heap_context_t* heap, size_t sz);
+
+/*
+ At the beginning, we allocate an empty page and put our initial marker
+
+ | 4/1 | 0/1 |
+ ^ ^ brk
+ start
+
+ Then, expand the heap further, with HEAP_INIT_SIZE (evaluated to 4096, i.e., 1 pg size)
+ This will allocate as much pages and override old epilogue marker with a free region hdr
+ and put new epilogue marker. These are handled by lx_grow_heap which is internally used
+ by alloc to expand the heap at many moment when needed.
+
+ | 4/1 | 4096/0 | ....... | 4096/0 | 0/1 |
+ ^ ^ brk_old ^
+ start brk
+
+ Note: the brk always point to the beginning of epilogue.
+*/
+
int
kalloc_init() {
__kalloc_kheap.start = &__kernel_heap_start;
- __kalloc_kheap.brk = 0;
+ __kalloc_kheap.brk = NULL;
+ __kalloc_kheap.max_addr = (void*)K_STACK_START;
- return dmm_init(&__kalloc_kheap);
+ if (!dmm_init(&__kalloc_kheap)) {
+ return 0;
+ }
+
+ SW(__kalloc_kheap.start, PACK(4, M_ALLOCATED));
+ SW(__kalloc_kheap.start + WSIZE, PACK(0, M_ALLOCATED));
+ __kalloc_kheap.brk += WSIZE;
+
+ return lx_grow_heap(&__kalloc_kheap, HEAP_INIT_SIZE) != NULL;
}
void*
-kmalloc(size_t size) {
- return lx_malloc(&__kalloc_kheap, size);
+lxmalloc(size_t size) {
+ return lx_malloc_internal(&__kalloc_kheap, size);
}
void*
-kcalloc(size_t size) {
- void* ptr = kmalloc(size);
+lxcalloc(size_t size) {
+ void* ptr = lxmalloc(size);
if (!ptr) {
return NULL;
}
}
void
-kfree(void* ptr) {
- lx_free(ptr);
+lxfree(void* ptr) {
+ if (!ptr) {
+ return;
+ }
+
+ uint8_t* chunk_ptr = (uint8_t*)ptr - WSIZE;
+ uint32_t hdr = LW(chunk_ptr);
+ size_t sz = CHUNK_S(hdr);
+ uint8_t* next_hdr = chunk_ptr + sz;
+
+ // make sure the ptr we are 'bout to free makes sense
+ // the size trick is stolen from glibc's malloc/malloc.c:4437 ;P
+
+ assert_msg(((uintptr_t)ptr < (uintptr_t)(-sz)) && !((uintptr_t)ptr & 0x3),
+ "free(): invalid pointer");
+
+ assert_msg(sz > WSIZE,
+ "free(): invalid size");
+
+ SW(chunk_ptr, hdr & ~M_ALLOCATED);
+ SW(FPTR(chunk_ptr, sz), hdr & ~M_ALLOCATED);
+ SW(next_hdr, LW(next_hdr) | M_PREV_FREE);
+
+ coalesce(chunk_ptr);
+}
+
+
+void*
+lx_malloc_internal(heap_context_t* heap, size_t size)
+{
+ // Simplest first fit approach.
+
+ if (!size) {
+ return NULL;
+ }
+
+ uint8_t* ptr = heap->start;
+ // round to largest 4B aligned value
+ // and space for header
+ size = ROUNDUP(size + WSIZE, BOUNDARY);
+ while (ptr < (uint8_t*)heap->brk) {
+ uint32_t header = *((uint32_t*)ptr);
+ size_t chunk_size = CHUNK_S(header);
+ if (!chunk_size && CHUNK_A(header)) {
+ break;
+ }
+ if (chunk_size >= size && !CHUNK_A(header)) {
+ // found!
+ place_chunk(ptr, size);
+ return BPTR(ptr);
+ }
+ ptr += chunk_size;
+ }
+
+ // if heap is full (seems to be!), then allocate more space (if it's
+ // okay...)
+ if ((ptr = lx_grow_heap(heap, size))) {
+ place_chunk(ptr, size);
+ return BPTR(ptr);
+ }
+
+ // Well, we are officially OOM!
+ return NULL;
+}
+
+void
+place_chunk(uint8_t* ptr, size_t size)
+{
+ uint32_t header = *((uint32_t*)ptr);
+ size_t chunk_size = CHUNK_S(header);
+ *((uint32_t*)ptr) = PACK(size, CHUNK_PF(header) | M_ALLOCATED);
+ uint8_t* n_hdrptr = (uint8_t*)(ptr + size);
+ uint32_t diff = chunk_size - size;
+
+ if (!diff) {
+ // if the current free block is fully occupied
+ uint32_t n_hdr = LW(n_hdrptr);
+ // notify the next block about our avaliability
+ SW(n_hdrptr, n_hdr & ~0x2);
+ } else {
+ // if there is remaining free space left
+ uint32_t remainder_hdr = PACK(diff, M_NOT_ALLOCATED | M_PREV_ALLOCATED);
+ SW(n_hdrptr, remainder_hdr);
+ SW(FPTR(n_hdrptr, diff), remainder_hdr);
+
+ /*
+ | xxxx | | |
+
+ |
+ v
+
+ | xxxx | |
+ */
+ coalesce(n_hdrptr);
+ }
+}
+
+void*
+coalesce(uint8_t* chunk_ptr)
+{
+ uint32_t hdr = LW(chunk_ptr);
+ uint32_t pf = CHUNK_PF(hdr);
+ uint32_t sz = CHUNK_S(hdr);
+
+ uint32_t n_hdr = LW(chunk_ptr + sz);
+
+ if (CHUNK_A(n_hdr) && pf) {
+ // case 1: prev is free
+ uint32_t prev_ftr = LW(chunk_ptr - WSIZE);
+ size_t prev_chunk_sz = CHUNK_S(prev_ftr);
+ uint32_t new_hdr = PACK(prev_chunk_sz + sz, CHUNK_PF(prev_ftr));
+ SW(chunk_ptr - prev_chunk_sz, new_hdr);
+ SW(FPTR(chunk_ptr, sz), new_hdr);
+ chunk_ptr -= prev_chunk_sz;
+ } else if (!CHUNK_A(n_hdr) && !pf) {
+ // case 2: next is free
+ size_t next_chunk_sz = CHUNK_S(n_hdr);
+ uint32_t new_hdr = PACK(next_chunk_sz + sz, pf);
+ SW(chunk_ptr, new_hdr);
+ SW(FPTR(chunk_ptr, sz + next_chunk_sz), new_hdr);
+ } else if (!CHUNK_A(n_hdr) && pf) {
+ // case 3: both free
+ uint32_t prev_ftr = LW(chunk_ptr - WSIZE);
+ size_t next_chunk_sz = CHUNK_S(n_hdr);
+ size_t prev_chunk_sz = CHUNK_S(prev_ftr);
+ uint32_t new_hdr =
+ PACK(next_chunk_sz + prev_chunk_sz + sz, CHUNK_PF(prev_ftr));
+ SW(chunk_ptr - prev_chunk_sz, new_hdr);
+ SW(FPTR(chunk_ptr, sz + next_chunk_sz), new_hdr);
+ chunk_ptr -= prev_chunk_sz;
+ }
+
+ // (fall through) case 4: prev and next are not free
+ return chunk_ptr;
+}
+
+
+void*
+lx_grow_heap(heap_context_t* heap, size_t sz)
+{
+ void* start;
+
+ // The "+ WSIZE" capture the overhead for epilogue marker
+ if (!(start = lxbrk(heap, sz + WSIZE))) {
+ return NULL;
+ }
+ sz = ROUNDUP(sz, BOUNDARY);
+
+ // minus the overhead for epilogue, keep the invariant.
+ heap->brk -= WSIZE;
+
+ uint32_t old_marker = *((uint32_t*)start);
+ uint32_t free_hdr = PACK(sz, CHUNK_PF(old_marker));
+ SW(start, free_hdr);
+ SW(FPTR(start, sz), free_hdr);
+ SW(NEXT_CHK(start), PACK(0, M_ALLOCATED | M_PREV_FREE));
+
+ return coalesce(start);
}
\ No newline at end of file
memset((void*)L2_VADDR(l1_inx), 0, PG_SIZE);
}
- if (!forced && l2pt->entry[l2_inx]) {
- return 0;
+ x86_pte_t l2pte = l2pt->entry[l2_inx];
+ if (l2pte) {
+ if (!forced) {
+ return 0;
+ }
+ if (HAS_FLAGS(l2pte, PG_PRESENT)) {
+ assert_msg(pmm_free_page(GET_PG_ADDR(l2pte)), "fail to release physical page");
+ }
}
l2pt->entry[l2_inx] = NEW_L2_ENTRY(attr, pa);
all: clean $(BUILD_DIR)/$(OS_ISO)
-all-debug: O := -Og
+all-debug: O := -O0
all-debug: CFLAGS := -g -std=gnu99 -ffreestanding $(O) $(W) $(ARCH_OPT) -D__LUNAIXOS_DEBUG__
all-debug: LDFLAGS := -g -ffreestanding $(O) -nostdlib -lgcc
all-debug: clean $(BUILD_DIR)/$(OS_ISO)
@$(QEMU_MON_TERM) -e "telnet 127.0.0.1 $(QEMU_MON_PORT)"
@gdb -s $(BUILD_DIR)/kernel.dbg -ex "target remote localhost:1234"
+debug-qemu-vscode: all-debug
+ @i686-elf-objcopy --only-keep-debug $(BIN_DIR)/$(OS_BIN) $(BUILD_DIR)/kernel.dbg
+ @qemu-system-i386 -s -S -cdrom $(BUILD_DIR)/$(OS_ISO) -monitor telnet::$(QEMU_MON_PORT),server,nowait &
+ @sleep 0.5
+ @telnet 127.0.0.1 $(QEMU_MON_PORT)
+
debug-bochs: all-debug
@bochs -q -f bochs.cfg