#include <lunaix/mm/mmap.h>
-#include <lunaix/mm/pmm.h>
+#include <lunaix/mm/page.h>
#include <lunaix/mm/valloc.h>
-#include <lunaix/mm/vmm.h>
#include <lunaix/spike.h>
-
#include <lunaix/syscall.h>
#include <lunaix/syscall_utils.h>
-#include <sys/mm/mempart.h>
+#include <asm/mm_defs.h>
+
+#include <usr/lunaix/mann_flags.h>
// any size beyond this is bullshit
-#define BS_SIZE (KERNEL_EXEC - USR_MMAP)
+#define BS_SIZE (KERNEL_RESIDENT - USR_MMAP)
int
mem_has_overlap(vm_regions_t* regions, ptr_t start, ptr_t end)
return 0;
}
+int
+mmap_user(void** addr_out,
+ struct mm_region** created,
+ ptr_t addr,
+ struct v_file* file,
+ struct mmap_param* param)
+{
+ param->range_end = KERNEL_RESIDENT;
+ param->range_start = USR_EXEC;
+
+ return mem_map(addr_out, created, addr, file, param);
+}
+
+static void
+__remove_ranged_mappings(pte_t* ptep, size_t npages)
+{
+ struct leaflet* leaflet;
+ pte_t pte;
+ for (size_t i = 0, n = 0; i < npages; i++, ptep++) {
+ pte = pte_at(ptep);
+
+ set_pte(ptep, null_pte);
+ if (!pte_isloaded(pte)) {
+ continue;
+ }
+
+ leaflet = pte_leaflet_aligned(pte);
+ leaflet_return(leaflet);
+
+ n = ptep_unmap_leaflet(ptep, leaflet) - 1;
+ i += n;
+ ptep += n;
+ }
+}
+
+static ptr_t
+__mem_find_slot_backward(struct mm_region* lead, struct mmap_param* param, struct mm_region* anchor)
+{
+ ptr_t size = param->mlen;
+ struct mm_region *pos = anchor,
+ *n = next_region(pos);
+ while (pos != lead)
+ {
+ if (pos == lead) {
+ break;
+ }
+
+ ptr_t end = n->start;
+ if (n == lead) {
+ end = param->range_end;
+ }
+
+ if (end - pos->end >= size) {
+ return pos->end;
+ }
+
+ pos = n;
+ n = next_region(pos);
+ }
+
+ return 0;
+}
+
+static ptr_t
+__mem_find_slot_forward(struct mm_region* lead, struct mmap_param* param, struct mm_region* anchor)
+{
+ ptr_t size = param->mlen;
+ struct mm_region *pos = anchor,
+ *prev = prev_region(pos);
+ while (lead != pos)
+ {
+ ptr_t end = prev->end;
+ if (prev == lead) {
+ end = param->range_start;
+ }
+
+ if (pos->start - end >= size) {
+ return pos->start - size;
+ }
+
+ pos = prev;
+ prev = prev_region(pos);
+ }
+
+ return 0;
+}
+
+static ptr_t
+__mem_find_slot(vm_regions_t* lead, struct mmap_param* param, struct mm_region* anchor)
+{
+ ptr_t result = 0;
+ struct mm_region* _lead = get_region(lead);
+ if ((result = __mem_find_slot_backward(_lead, param, anchor))) {
+ return result;
+ }
+
+ return __mem_find_slot_forward(_lead, param, anchor);
+}
+
+static struct mm_region*
+__mem_find_nearest(vm_regions_t* lead, ptr_t addr)
+{
+ ptr_t min_dist = (ptr_t)-1;
+ struct mm_region *pos, *n, *min = NULL;
+ llist_for_each(pos, n, lead, head) {
+ if (region_contains(pos, addr)) {
+ return pos;
+ }
+
+ ptr_t dist = addr - pos->end;
+ if (addr < pos->start) {
+ dist = pos->start - addr;
+ }
+
+ if (dist < min_dist) {
+ min_dist = dist;
+ min = pos;
+ }
+ }
+
+ return min;
+}
+
int
mem_map(void** addr_out,
struct mm_region** created,
{
assert_msg(addr, "addr can not be NULL");
- ptr_t last_end = USR_EXEC, found_loc = addr;
+ ptr_t last_end = USR_EXEC, found_loc = page_aligned(addr);
struct mm_region *pos, *n;
vm_regions_t* vm_regions = ¶m->pvms->regions;
goto found;
}
- llist_for_each(pos, n, vm_regions, head)
- {
- if (last_end < found_loc) {
- size_t avail_space = pos->start - found_loc;
- if (pos->start > found_loc && avail_space > param->mlen) {
- goto found;
- }
- found_loc = pos->end + MEM_PAGE;
- }
+ if (llist_empty(vm_regions)) {
+ goto found;
+ }
- last_end = pos->end;
+ struct mm_region* anchor = __mem_find_nearest(vm_regions, found_loc);
+ if ((found_loc = __mem_find_slot(vm_regions, param, anchor))) {
+ goto found;
}
return ENOMEM;
found:
- if (found_loc >= KERNEL_EXEC || found_loc < USR_EXEC) {
+ if (found_loc >= param->range_end || found_loc < param->range_start) {
return ENOMEM;
}
((param->proct | param->flags) & 0x3f) | (param->type & ~0xffff));
region->mfile = file;
- region->foff = param->offset;
region->flen = param->flen;
+ region->foff = param->offset;
region->proc_vms = param->pvms;
region_add(vm_regions, region);
-
- u32_t attr = PG_ALLOW_USER;
- if ((param->proct & REGION_WRITE)) {
- attr |= PG_WRITE;
- }
-
- for (u32_t i = 0; i < param->mlen; i += PG_SIZE) {
- vmm_set_mapping(param->vms_mnt, found_loc + i, 0, attr, 0);
- }
-
+
if (file) {
vfs_ref_file(file);
}
if (!region->mfile || !(region->attr & REGION_WSHARED)) {
return;
}
+
+ pte_t* ptep = mkptep_va(mnt, start);
+ ptr_t va = page_aligned(start);
- v_mapping mapping;
- for (size_t i = 0; i < length; i += PG_SIZE) {
- if (!vmm_lookupat(mnt, start + i, &mapping)) {
+ for (; va < start + length; va += PAGE_SIZE, ptep++) {
+ pte_t pte = vmm_tryptep(ptep, LFT_SIZE);
+ if (pte_isnull(pte)) {
continue;
}
- if (PG_IS_DIRTY(*mapping.pte)) {
- size_t offset = mapping.va - region->start + region->foff;
+ if (pte_dirty(pte)) {
+ size_t offset = va - region->start + region->foff;
struct v_inode* inode = region->mfile->inode;
- region->mfile->ops->write_page(inode, (void*)mapping.va, offset);
+ region->mfile->ops->write_page(inode, (void*)va, offset);
- *mapping.pte &= ~PG_DIRTY;
-
- cpu_flush_page((ptr_t)mapping.pte);
+ set_pte(ptep, pte_mkclean(pte));
+ tlb_flush_vmr(region, va);
+
} else if ((options & MS_INVALIDATE)) {
goto invalidate;
}
continue;
+ // FIXME what if mem_sync range does not aligned with
+ // a leaflet with order > 1
invalidate:
- *mapping.pte &= ~PG_PRESENT;
- pmm_free_page(KERNEL_PID, mapping.pa);
- cpu_flush_page((ptr_t)mapping.pte);
+ set_pte(ptep, null_pte);
+ leaflet_return(pte_leaflet(pte));
+ tlb_flush_vmr(region, va);
}
}
void
mem_unmap_region(ptr_t mnt, struct mm_region* region)
{
- size_t len = ROUNDUP(region->end - region->start, PG_SIZE);
- mem_sync_pages(mnt, region, region->start, len, 0);
-
- for (size_t i = region->start; i <= region->end; i += PG_SIZE) {
- ptr_t pa = vmm_del_mapping(mnt, i);
- if (pa) {
- pmm_free_page(__current->pid, pa);
- }
+ if (!region) {
+ return;
}
+
+ valloc_ensure_valid(region);
+
+ pfn_t pglen = leaf_count(region->end - region->start);
+ mem_sync_pages(mnt, region, region->start, pglen * PAGE_SIZE, 0);
+
+ pte_t* ptep = mkptep_va(mnt, region->start);
+ __remove_ranged_mappings(ptep, pglen);
+
+ tlb_flush_vmr_all(region);
+
llist_delete(®ion->head);
region_release(region);
}
if (region->mfile) {
size_t f_shifted = new_start - region->start;
region->foff += f_shifted;
- region->flen = MAX(region->flen, f_shifted) - f_shifted;
}
region->start = new_start;
llist_insert_after(&vmr->head, ®ion->head);
shrink = vmr->end - seg_start;
umps_len = shrink;
umps_start = seg_start;
- } else if (CASE_HITE(vmr, seg_start, seg_len)) {
+ }
+ else if (CASE_HITE(vmr, seg_start, seg_len)) {
shrink = vmr->end - seg_start;
umps_len = shrink;
umps_start = seg_start;
- } else if (CASE_HETI(vmr, seg_start, seg_len)) {
+ }
+ else if (CASE_HETI(vmr, seg_start, seg_len)) {
displ = seg_len - (vmr->start - seg_start);
umps_len = displ;
umps_start = vmr->start;
- } else if (CASE_HETE(vmr, seg_start, seg_len)) {
+ }
+ else if (CASE_HETE(vmr, seg_start, seg_len)) {
shrink = vmr->end - vmr->start;
umps_len = shrink;
umps_start = vmr->start;
}
mem_sync_pages(mnt, vmr, vmr->start, umps_len, 0);
- for (size_t i = 0; i < umps_len; i += PG_SIZE) {
- ptr_t pa = vmm_del_mapping(mnt, vmr->start + i);
- if (pa) {
- pmm_free_page(vmr->proc_vms->pid, pa);
- }
- }
+
+ pte_t *ptep = mkptep_va(mnt, vmr->start);
+ __remove_ranged_mappings(ptep, leaf_count(umps_len));
+
+ tlb_flush_vmr_range(vmr, vmr->start, umps_len);
vmr->start += displ;
vmr->end -= shrink;
region_release(vmr);
} else if (vmr->mfile) {
vmr->foff += displ;
- vmr->flen = MAX(vmr->flen, displ) - displ;
}
*addr = umps_start + umps_len;
int
mem_unmap(ptr_t mnt, vm_regions_t* regions, ptr_t addr, size_t length)
{
- length = ROUNDUP(length, PG_SIZE);
- ptr_t cur_addr = PG_ALIGN(addr);
+ length = ROUNDUP(length, PAGE_SIZE);
+ ptr_t cur_addr = page_aligned(addr);
struct mm_region *pos, *n;
llist_for_each(pos, n, regions, head)
}
}
- while (&pos->head != regions && length) {
+ size_t remaining = length;
+ while (&pos->head != regions && remaining) {
n = container_of(pos->head.next, typeof(*pos), head);
- __unmap_overlapped_cases(mnt, pos, &cur_addr, &length);
+ if (pos->start > cur_addr + length) {
+ break;
+ }
+
+ __unmap_overlapped_cases(mnt, pos, &cur_addr, &remaining);
pos = n;
}
return 0;
}
-__DEFINE_LXSYSCALL3(void*, sys_mmap, void*, addr, size_t, length, va_list, lst)
+__DEFINE_LXSYSCALL1(void*, sys_mmap, struct usr_mmap_param*, mparam)
{
- int proct = va_arg(lst, int);
- int fd = va_arg(lst, u32_t);
- off_t offset = va_arg(lst, off_t);
- int options = va_arg(lst, int);
- int errno = 0;
- void* result = (void*)-1;
-
- ptr_t addr_ptr = (ptr_t)addr;
-
- if (!length || length > BS_SIZE || !PG_ALIGNED(addr_ptr)) {
+ off_t offset;
+ size_t length;
+ int proct, fd, options;
+ int errno;
+ void* result;
+ ptr_t addr_ptr;
+
+ proct = mparam->proct;
+ fd = mparam->fd;
+ offset = mparam->offset;
+ options = mparam->flags;
+ addr_ptr = __ptr(mparam->addr);
+ length = mparam->length;
+
+ errno = 0;
+ result = (void*)-1;
+
+ if (!length || length > BS_SIZE || va_offset(addr_ptr)) {
errno = EINVAL;
goto done;
}
}
}
+ length = ROUNDUP(length, PAGE_SIZE);
struct mmap_param param = { .flags = options,
- .mlen = ROUNDUP(length, PG_SIZE),
+ .mlen = length,
.flen = length,
.offset = offset,
.type = REGION_TYPE_GENERAL,
.proct = proct,
- .pvms = (struct proc_mm*)&__current->mm,
+ .pvms = vmspace(__current),
.vms_mnt = VMS_SELF };
- errno = mem_map(&result, NULL, addr_ptr, file, ¶m);
+ errno = mmap_user(&result, NULL, addr_ptr, file, ¶m);
done:
- __current->k_status = errno;
+ syscall_result(errno);
return result;
}
__DEFINE_LXSYSCALL2(int, munmap, void*, addr, size_t, length)
{
return mem_unmap(
- VMS_SELF, (vm_regions_t*)&__current->mm.regions, (ptr_t)addr, length);
+ VMS_SELF, vmregions(__current), (ptr_t)addr, length);
}
__DEFINE_LXSYSCALL3(int, msync, void*, addr, size_t, length, int, flags)
{
- if (!PG_ALIGNED(addr) || ((flags & MS_ASYNC) && (flags & MS_SYNC))) {
+ if (va_offset((ptr_t)addr) || ((flags & MS_ASYNC) && (flags & MS_SYNC))) {
return DO_STATUS(EINVAL);
}
int status = mem_msync(VMS_SELF,
- (vm_regions_t*)&__current->mm.regions,
+ vmregions(__current),
(ptr_t)addr,
length,
flags);