1 #include <lunaix/mm/pmm.h>
2 #include <lunaix/mm/valloc.h>
3 #include <lunaix/mm/vmm.h>
4 #include <lunaix/spike.h>
6 #include <sys/mm/mempart.h>
8 static ptr_t start = VMAP;
11 vmap(ptr_t paddr, size_t size, pt_attr attr, int flags)
14 assert_msg((paddr & 0xfff) == 0, "vmap: bad alignment");
15 size = ROUNDUP(size, PG_SIZE);
17 ptr_t current_addr = start;
18 size_t examed_size = 0, wrapped = 0;
19 x86_page_table* pd = (x86_page_table*)L1_BASE_VADDR;
21 while (!wrapped || current_addr < start) {
22 size_t l1inx = L1_INDEX(current_addr);
23 if (!(pd->entry[l1inx])) {
25 examed_size += MEM_4M;
26 current_addr = (current_addr & 0xffc00000) + MEM_4M;
28 x86_page_table* ptd = (x86_page_table*)(L2_VADDR(l1inx));
29 size_t i = L2_INDEX(current_addr), j = 0;
30 for (; i < PG_MAX_ENTRIES && examed_size < size; i++, j++) {
32 examed_size += PG_SIZE;
33 } else if (examed_size) {
34 // found a discontinuity, start from beginning
40 current_addr += j << 12;
43 if (examed_size >= size) {
47 if (current_addr >= VMAP_END) {
57 ptr_t alloc_begin = current_addr - examed_size;
58 start = alloc_begin + size;
60 if ((flags & VMAP_NOMAP)) {
61 for (size_t i = 0; i < size; i += PG_SIZE) {
62 vmm_set_mapping(VMS_SELF, alloc_begin + i, -1, 0, 0);
65 return (void*)alloc_begin;
68 for (size_t i = 0; i < size; i += PG_SIZE) {
69 vmm_set_mapping(VMS_SELF, alloc_begin + i, paddr + i, attr, 0);
70 pmm_ref_page(KERNEL_PID, paddr + i);
73 return (void*)alloc_begin;
77 This is a kernel memory region that represent a contiguous virtual memory
78 address such that all memory allocation/deallocation can be concentrated
79 into a single big chunk, which will help to mitigate the external
80 fragmentation in the VMAP address domain. It is significant if our
81 allocation granule is single page or in some use cases.
84 A potential performance improvement on pcache? (need more analysis!)
85 -> In exchange of a fixed size buffer pool. (does it worth?)
89 vmap_varea(size_t size, pt_attr attr)
91 ptr_t start = (ptr_t)vmap(0, size, attr ^ PG_PRESENT, VMAP_NOMAP);
97 struct vmap_area* varea = valloc(sizeof(struct vmap_area));
99 (struct vmap_area){ .start = start, .size = size, .area_attr = attr };
105 vmap_area_page(struct vmap_area* area, ptr_t paddr, pt_attr attr)
107 ptr_t current = area->start;
108 size_t bound = current + area->size;
110 while (current < bound) {
112 (x86_pte_t*)(L2_VADDR(L1_INDEX(current)) | L2_INDEX(current));
113 if (PG_IS_PRESENT(*pte)) {
118 *pte = NEW_L2_ENTRY(attr | PG_PRESENT, paddr);
119 cpu_flush_page(current);
127 vmap_area_rmpage(struct vmap_area* area, ptr_t vaddr)
129 ptr_t current = area->start;
130 size_t bound = current + area->size;
132 if (current > vaddr || vaddr > bound) {
137 (x86_pte_t*)(L2_VADDR(L1_INDEX(current)) | L2_INDEX(current));
138 ptr_t pa = PG_ENTRY_ADDR(*pte);
140 *pte = NEW_L2_ENTRY(0, -1);
141 cpu_flush_page(current);