-/**
- * @file dmm.c
- * @author Lunaixsky
- * @brief Dynamic memory manager for heap. This design do not incorporate any\
- * specific implementation of malloc family. The main purpose of this routines
- * is to provide handy method to initialize & grow the heap as needed by
- * upstream implementation.
- *
- * This is designed to be portable, so it can serve as syscalls to malloc/free
- * in the c std lib.
- *
- * @version 0.2
- * @date 2022-03-3
- *
- * @copyright Copyright (c) Lunaixsky 2022
- *
- */
-
-#include <lunaix/mm/dmm.h>
-#include <lunaix/mm/page.h>
+#include <lunaix/mm/mmap.h>
#include <lunaix/mm/vmm.h>
+#include <lunaix/process.h>
#include <lunaix/status.h>
#include <lunaix/spike.h>
#include <lunaix/syscall.h>
+#include <lunaix/syscall_utils.h>
-extern void __kernel_heap_start;
-
-__DEFINE_LXSYSCALL1(int, sbrk, size_t, size)
-{
- heap_context_t* uheap = &__current->mm.u_heap;
- mutex_lock(&uheap->lock);
- void* r = lxsbrk(uheap, size, PG_ALLOW_USER);
- mutex_unlock(&uheap->lock);
- return r;
-}
-
-__DEFINE_LXSYSCALL1(void*, brk, void*, addr)
+void
+__heap_copied(struct mm_region* region)
{
- heap_context_t* uheap = &__current->mm.u_heap;
- mutex_lock(&uheap->lock);
- int r = lxbrk(uheap, addr, PG_ALLOW_USER);
- mutex_unlock(&uheap->lock);
- return r;
+ mm_index((void**)®ion->proc_vms->heap, region);
}
int
-dmm_init(heap_context_t* heap)
+create_heap(struct proc_mm* pvms, ptr_t addr)
{
- assert((uintptr_t)heap->start % BOUNDARY == 0);
-
- heap->brk = heap->start;
- mutex_init(&heap->lock);
-
- int perm = PG_ALLOW_USER;
- if (heap->brk >= &__kernel_heap_start) {
- perm = 0;
+ struct mmap_param map_param = { .pvms = pvms,
+ .vms_mnt = VMS_SELF,
+ .flags = MAP_ANON | MAP_PRIVATE,
+ .type = REGION_TYPE_HEAP,
+ .proct = PROT_READ | PROT_WRITE,
+ .mlen = PAGE_SIZE };
+ int status = 0;
+ struct mm_region* heap;
+ if ((status = mmap_user(NULL, &heap, addr, NULL, &map_param))) {
+ return status;
}
- return vmm_set_mapping(PD_REFERENCED, heap->brk, 0, PG_WRITE | perm) !=
- NULL;
-}
+ heap->region_copied = __heap_copied;
+ mm_index((void**)&pvms->heap, heap);
-int
-lxbrk(heap_context_t* heap, void* addr, int user)
-{
- return -(lxsbrk(heap, addr - heap->brk, user) == (void*)-1);
+ return status;
}
-void*
-lxsbrk(heap_context_t* heap, size_t size, int user)
+__DEFINE_LXSYSCALL1(void*, sbrk, ssize_t, incr)
{
- if (size == 0) {
- return heap->brk;
- }
-
- void* current_brk = heap->brk;
+ struct proc_mm* pvms = vmspace(__current);
+ struct mm_region* heap = pvms->heap;
- // The upper bound of our next brk of heap given the size.
- // This will be used to calculate the page we need to allocate.
- void* next = current_brk + ROUNDUP(size, BOUNDARY);
-
- // any invalid situations
- if (next >= heap->max_addr || next < current_brk) {
- __current->k_status = LXINVLDPTR;
- return (void*)-1;
+ assert(heap);
+ int err = mem_adjust_inplace(&pvms->regions, heap, heap->end + incr);
+ if (err) {
+ return (void*)DO_STATUS(err);
}
+ return (void*)heap->end;
+}
+
+__DEFINE_LXSYSCALL1(int, brk, void*, addr)
+{
+ struct proc_mm* pvms = vmspace(__current);
+ struct mm_region* heap = pvms->heap;
- uintptr_t diff = PG_ALIGN(next) - PG_ALIGN(current_brk);
- if (diff) {
- // if next do require new pages to be mapped
- for (size_t i = 0; i < diff; i += PG_SIZE) {
- vmm_set_mapping(PD_REFERENCED,
- PG_ALIGN(current_brk) + PG_SIZE + i,
- 0,
- PG_WRITE | user);
- }
+ if (!heap) {
+ return DO_STATUS(create_heap(pvms, (ptr_t)addr));
}
- heap->brk += size;
- return current_brk;
+ assert(heap);
+ int err = mem_adjust_inplace(&pvms->regions, heap, (ptr_t)addr);
+ return DO_STATUS(err);
}
\ No newline at end of file