+ for (u32_t i = 0; i < param->mlen; i += PG_SIZE) {
+ vmm_set_mapping(param->vms_mnt, found_loc + i, 0, attr, 0);
+ }
+
+ if (file) {
+ vfs_ref_file(file);
+ }
+
+ if (addr_out) {
+ *addr_out = (void*)found_loc;
+ }
+ if (created) {
+ *created = region;
+ }
+ return 0;
+}
+
+int
+mem_remap(void** addr_out,
+ struct mm_region** remapped,
+ void* addr,
+ struct v_file* file,
+ struct mmap_param* param)
+{
+ // TODO
+
+ return EINVAL;
+}
+
+void
+mem_sync_pages(ptr_t mnt,
+ struct mm_region* region,
+ ptr_t start,
+ ptr_t length,
+ int options)
+{
+ if (!region->mfile || !(region->attr & REGION_WSHARED)) {
+ return;
+ }
+
+ v_mapping mapping;
+ for (size_t i = 0; i < length; i += PG_SIZE) {
+ if (!vmm_lookupat(mnt, start + i, &mapping)) {
+ continue;
+ }
+
+ if (PG_IS_DIRTY(*mapping.pte)) {
+ size_t offset = mapping.va - region->start + region->foff;
+ struct v_inode* inode = region->mfile->inode;
+
+ region->mfile->ops->write_page(inode, (void*)mapping.va, offset);
+
+ *mapping.pte &= ~PG_DIRTY;
+
+ cpu_flush_page((ptr_t)mapping.pte);
+ } else if ((options & MS_INVALIDATE)) {
+ goto invalidate;
+ }
+
+ if (options & MS_INVALIDATE_ALL) {
+ goto invalidate;
+ }
+
+ continue;
+
+ invalidate:
+ *mapping.pte &= ~PG_PRESENT;
+ pmm_free_page(KERNEL_PID, mapping.pa);
+ cpu_flush_page((ptr_t)mapping.pte);