#include <lunaix/mm/pmm.h>
+#include <lunaix/mm/valloc.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/spike.h>
-#define VMAP_START PG_MOUNT_BASE + MEM_4MB
-#define VMAP_END PD_REFERENCED
+#include <sys/mm/mempart.h>
-static uintptr_t start = VMAP_START;
+static ptr_t start = VMAP;
void*
-vmm_vmap(uintptr_t paddr, size_t size, pt_attr attr)
+vmap(ptr_t paddr, size_t size, pt_attr attr, int flags)
{
// next fit
assert_msg((paddr & 0xfff) == 0, "vmap: bad alignment");
size = ROUNDUP(size, PG_SIZE);
- uintptr_t current_addr = start;
+ ptr_t current_addr = start;
size_t examed_size = 0, wrapped = 0;
x86_page_table* pd = (x86_page_table*)L1_BASE_VADDR;
- while (!wrapped || current_addr >= start) {
+ while (!wrapped || current_addr < start) {
size_t l1inx = L1_INDEX(current_addr);
if (!(pd->entry[l1inx])) {
// empty 4mb region
- examed_size += MEM_4MB;
- current_addr = (current_addr & 0xffc00000) + MEM_4MB;
+ examed_size += MEM_4M;
+ current_addr = (current_addr & 0xffc00000) + MEM_4M;
} else {
x86_page_table* ptd = (x86_page_table*)(L2_VADDR(l1inx));
- size_t i = L2_INDEX(current_addr);
- for (; i < 1024 && examed_size < size; i++) {
+ size_t i = L2_INDEX(current_addr), j = 0;
+ for (; i < PG_MAX_ENTRIES && examed_size < size; i++, j++) {
if (!ptd->entry[i]) {
examed_size += PG_SIZE;
} else if (examed_size) {
// found a discontinuity, start from beginning
examed_size = 0;
- i++;
+ j++;
break;
}
}
- current_addr += i << 12;
+ current_addr += j << 12;
}
if (examed_size >= size) {
if (current_addr >= VMAP_END) {
wrapped = 1;
- current_addr = VMAP_START;
+ examed_size = 0;
+ current_addr = VMAP;
}
}
- panick("vmm: out of memory");
+
+ return NULL;
done:
- uintptr_t alloc_begin = current_addr - examed_size;
+ ptr_t alloc_begin = current_addr - examed_size;
+ start = alloc_begin + size;
+
+ if ((flags & VMAP_NOMAP)) {
+ for (size_t i = 0; i < size; i += PG_SIZE) {
+ vmm_set_mapping(VMS_SELF, alloc_begin + i, -1, 0, 0);
+ }
+
+ return (void*)alloc_begin;
+ }
+
for (size_t i = 0; i < size; i += PG_SIZE) {
- vmm_set_mapping(
- PD_REFERENCED, alloc_begin + i, paddr + i, PG_PREM_RW, 0);
+ vmm_set_mapping(VMS_SELF, alloc_begin + i, paddr + i, attr, 0);
pmm_ref_page(KERNEL_PID, paddr + i);
}
- start = alloc_begin + size;
return (void*)alloc_begin;
+}
+
+/*
+ This is a kernel memory region that represent a contiguous virtual memory
+ address such that all memory allocation/deallocation can be concentrated
+ into a single big chunk, which will help to mitigate the external
+ fragmentation in the VMAP address domain. It is significant if our
+ allocation granule is single page or in some use cases.
+
+ XXX (vmap_area)
+ A potential performance improvement on pcache? (need more analysis!)
+ -> In exchange of a fixed size buffer pool. (does it worth?)
+*/
+
+struct vmap_area*
+vmap_varea(size_t size, pt_attr attr)
+{
+ ptr_t start = (ptr_t)vmap(0, size, attr ^ PG_PRESENT, VMAP_NOMAP);
+
+ if (!start) {
+ return NULL;
+ }
+
+ struct vmap_area* varea = valloc(sizeof(struct vmap_area));
+ *varea =
+ (struct vmap_area){ .start = start, .size = size, .area_attr = attr };
+
+ return varea;
+}
+
+ptr_t
+vmap_area_page(struct vmap_area* area, ptr_t paddr, pt_attr attr)
+{
+ ptr_t current = area->start;
+ size_t bound = current + area->size;
+
+ while (current < bound) {
+ x86_pte_t* pte =
+ (x86_pte_t*)(L2_VADDR(L1_INDEX(current)) | L2_INDEX(current));
+ if (PG_IS_PRESENT(*pte)) {
+ current += PG_SIZE;
+ continue;
+ }
+
+ *pte = NEW_L2_ENTRY(attr | PG_PRESENT, paddr);
+ cpu_flush_page(current);
+ break;
+ }
+
+ return current;
+}
+
+ptr_t
+vmap_area_rmpage(struct vmap_area* area, ptr_t vaddr)
+{
+ ptr_t current = area->start;
+ size_t bound = current + area->size;
+
+ if (current > vaddr || vaddr > bound) {
+ return 0;
+ }
+
+ x86_pte_t* pte =
+ (x86_pte_t*)(L2_VADDR(L1_INDEX(current)) | L2_INDEX(current));
+ ptr_t pa = PG_ENTRY_ADDR(*pte);
+
+ *pte = NEW_L2_ENTRY(0, -1);
+ cpu_flush_page(current);
+
+ return pa;
}
\ No newline at end of file