+ assert((uintptr_t)va % PG_SIZE == 0) assert(sz % PG_SIZE == 0);
+
+ void* va_ = va;
+ for (size_t i = 0; i < (sz >> PG_SIZE_BITS); i++, va_ += PG_SIZE) {
+ void* pp = pmm_alloc_page(pid, pattr);
+ uint32_t l1_index = L1_INDEX(va_);
+ uint32_t l2_index = L2_INDEX(va_);
+ if (!pp || !__vmm_map_internal(
+ pid,
+ l1_index, l2_index, (uintptr_t)pp, tattr, false)) {
+ // if one failed, release previous allocated pages.
+ va_ = va;
+ for (size_t j = 0; j < i; j++, va_ += PG_SIZE) {
+ vmm_unmap_page(pid, va_);
+ }
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+int
+vmm_set_mapping(pid_t pid, void* va, void* pa, pt_attr attr) {
+ assert(((uintptr_t)va & 0xFFFU) == 0);
+
+ uint32_t l1_index = L1_INDEX(va);
+ uint32_t l2_index = L2_INDEX(va);
+
+ // prevent map of recursive mapping region
+ if (l1_index == 1023) {
+ return 0;
+ }
+
+ __vmm_map_internal(pid, l1_index, l2_index, (uintptr_t)pa, attr, false);
+ return 1;
+}
+
+void
+__vmm_unmap_internal(pid_t pid, void* va, int free_ppage) {
+ assert(((uintptr_t)va & 0xFFFU) == 0);