+}
+
+/*
+ This is a kernel memory region that represent a contiguous virtual memory
+ address such that all memory allocation/deallocation can be concentrated
+ into a single big chunk, which will help to mitigate the external
+ fragmentation in the VMAP address domain. It is significant if our
+ allocation granule is single page or in some use cases.
+
+ XXX (vmap_area)
+ A potential performance improvement on pcache? (need more analysis!)
+ -> In exchange of a fixed size buffer pool. (does it worth?)
+*/
+
+struct vmap_area*
+vmap_varea(size_t size, pt_attr attr)
+{
+ ptr_t start = (ptr_t)vmap(0, size, attr ^ PG_PRESENT, VMAP_NOMAP);
+
+ if (!start) {
+ return NULL;
+ }
+
+ struct vmap_area* varea = valloc(sizeof(struct vmap_area));
+ *varea =
+ (struct vmap_area){ .start = start, .size = size, .area_attr = attr };
+
+ return varea;
+}
+
+ptr_t
+vmap_area_page(struct vmap_area* area, ptr_t paddr, pt_attr attr)
+{
+ ptr_t current = area->start;
+ size_t bound = current + area->size;
+
+ while (current < bound) {
+ x86_pte_t* pte =
+ (x86_pte_t*)(L2_VADDR(L1_INDEX(current)) | L2_INDEX(current));
+ if (PG_IS_PRESENT(*pte)) {
+ current += PG_SIZE;
+ continue;
+ }
+
+ *pte = NEW_L2_ENTRY(attr | PG_PRESENT, paddr);
+ cpu_flush_page(current);
+ break;
+ }
+
+ return current;
+}
+
+ptr_t
+vmap_area_rmpage(struct vmap_area* area, ptr_t vaddr)
+{
+ ptr_t current = area->start;
+ size_t bound = current + area->size;
+
+ if (current > vaddr || vaddr > bound) {
+ return 0;
+ }
+
+ x86_pte_t* pte =
+ (x86_pte_t*)(L2_VADDR(L1_INDEX(current)) | L2_INDEX(current));
+ ptr_t pa = PG_ENTRY_ADDR(*pte);
+
+ *pte = NEW_L2_ENTRY(0, -1);
+ cpu_flush_page(current);
+
+ return pa;