#include <lunaix/mm/valloc.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/spike.h>
-
#include <lunaix/syscall.h>
#include <lunaix/syscall_utils.h>
+#include <sys/mm/mm_defs.h>
+
+#include <usr/lunaix/mann_flags.h>
+
// any size beyond this is bullshit
-#define BS_SIZE (KERNEL_MM_BASE - UMMAP_START)
+#define BS_SIZE (KERNEL_RESIDENT - USR_MMAP)
int
-mem_has_overlap(vm_regions_t* regions, ptr_t start, size_t len)
+mem_has_overlap(vm_regions_t* regions, ptr_t start, ptr_t end)
{
- ptr_t end = start + end - 1;
struct mm_region *pos, *n;
llist_for_each(pos, n, regions, head)
{
return 0;
}
+int
+mem_adjust_inplace(vm_regions_t* regions,
+ struct mm_region* region,
+ ptr_t newend)
+{
+ ssize_t len = newend - region->start;
+ if (len == 0) {
+ return 0;
+ }
+
+ if (len < 0) {
+ return EINVAL;
+ }
+
+ if (mem_has_overlap(regions, region->start, newend)) {
+ return ENOMEM;
+ }
+
+ region->end = newend;
+
+ return 0;
+}
+
+int
+mmap_user(void** addr_out,
+ struct mm_region** created,
+ ptr_t addr,
+ struct v_file* file,
+ struct mmap_param* param)
+{
+ param->range_end = KERNEL_RESIDENT;
+ param->range_start = USR_EXEC;
+
+ return mem_map(addr_out, created, addr, file, param);
+}
+
+static ptr_t
+__mem_find_slot_backward(struct mm_region* lead, struct mmap_param* param, struct mm_region* anchor)
+{
+ ptr_t size = param->mlen;
+ struct mm_region *pos = anchor,
+ *n = next_region(pos);
+ while (pos != lead)
+ {
+ if (pos == lead) {
+ break;
+ }
+
+ ptr_t end = n->start;
+ if (n == lead) {
+ end = param->range_end;
+ }
+
+ if (end - pos->end >= size) {
+ return pos->end;
+ }
+
+ pos = n;
+ n = next_region(pos);
+ }
+
+ return 0;
+}
+
+static ptr_t
+__mem_find_slot_forward(struct mm_region* lead, struct mmap_param* param, struct mm_region* anchor)
+{
+ ptr_t size = param->mlen;
+ struct mm_region *pos = anchor,
+ *prev = prev_region(pos);
+ while (lead != pos)
+ {
+ ptr_t end = prev->end;
+ if (prev == lead) {
+ end = param->range_start;
+ }
+
+ if (pos->start - end >= size) {
+ return pos->start - size;
+ }
+
+ pos = prev;
+ prev = prev_region(pos);
+ }
+
+ return 0;
+}
+
+static ptr_t
+__mem_find_slot(vm_regions_t* lead, struct mmap_param* param, struct mm_region* anchor)
+{
+ ptr_t result = 0;
+ struct mm_region* _lead = get_region(lead);
+ if ((result = __mem_find_slot_backward(_lead, param, anchor))) {
+ return result;
+ }
+
+ return __mem_find_slot_forward(_lead, param, anchor);
+}
+
+static struct mm_region*
+__mem_find_nearest(vm_regions_t* lead, ptr_t addr)
+{
+ ptr_t min_dist = (ptr_t)-1;
+ struct mm_region *pos, *n, *min = NULL;
+ llist_for_each(pos, n, lead, head) {
+ if (region_contains(pos, addr)) {
+ return pos;
+ }
+
+ ptr_t dist = addr - pos->end;
+ if (addr < pos->start) {
+ dist = pos->start - addr;
+ }
+
+ if (dist < min_dist) {
+ min_dist = dist;
+ min = pos;
+ }
+ }
+
+ return min;
+}
+
int
mem_map(void** addr_out,
struct mm_region** created,
- void* addr,
+ ptr_t addr,
struct v_file* file,
struct mmap_param* param)
{
assert_msg(addr, "addr can not be NULL");
- ptr_t last_end = USER_START, found_loc = (ptr_t)addr;
+ ptr_t last_end = USR_EXEC, found_loc = va_align(addr);
struct mm_region *pos, *n;
vm_regions_t* vm_regions = ¶m->pvms->regions;
if ((param->flags & MAP_FIXED_NOREPLACE)) {
- if (mem_has_overlap(vm_regions, found_loc, param->mlen)) {
+ if (mem_has_overlap(vm_regions, found_loc, param->mlen + found_loc)) {
return EEXIST;
}
goto found;
goto found;
}
- llist_for_each(pos, n, vm_regions, head)
- {
- if (last_end < found_loc) {
- size_t avail_space = pos->start - found_loc;
- if (pos->start > found_loc && avail_space > param->mlen) {
- goto found;
- }
- found_loc = pos->end + PG_SIZE;
- }
+ if (llist_empty(vm_regions)) {
+ goto found;
+ }
- last_end = pos->end;
+ struct mm_region* anchor = __mem_find_nearest(vm_regions, found_loc);
+ if ((found_loc = __mem_find_slot(vm_regions, param, anchor))) {
+ goto found;
}
return ENOMEM;
found:
- if (found_loc >= KERNEL_MM_BASE || found_loc < USER_START) {
+ if (found_loc >= param->range_end || found_loc < param->range_start) {
return ENOMEM;
}
region->mfile = file;
region->foff = param->offset;
- region->flen = param->flen;
region->proc_vms = param->pvms;
region_add(vm_regions, region);
-
- u32_t attr = PG_ALLOW_USER;
- if ((param->proct & REGION_WRITE)) {
- attr |= PG_WRITE;
- }
-
- for (u32_t i = 0; i < param->mlen; i += PG_SIZE) {
- vmm_set_mapping(param->vms_mnt, found_loc + i, 0, attr, 0);
- }
-
+
if (file) {
vfs_ref_file(file);
}
if (addr_out) {
- *addr_out = found_loc;
+ *addr_out = (void*)found_loc;
}
if (created) {
*created = region;
return 0;
}
+int
+mem_remap(void** addr_out,
+ struct mm_region** remapped,
+ void* addr,
+ struct v_file* file,
+ struct mmap_param* param)
+{
+ // TODO
+
+ return EINVAL;
+}
+
void
mem_sync_pages(ptr_t mnt,
struct mm_region* region,
if (!region->mfile || !(region->attr & REGION_WSHARED)) {
return;
}
+
+ pte_t* ptep = mkptep_va(mnt, start);
+ ptr_t va = va_align(start);
- v_mapping mapping;
- for (size_t i = 0; i < length; i += PG_SIZE) {
- if (!vmm_lookupat(mnt, start + i, &mapping)) {
+ for (; va < start + length; va += PAGE_SIZE, ptep++) {
+ pte_t pte = vmm_tryptep(ptep, LFT_SIZE);
+ if (pte_isnull(pte)) {
continue;
}
- if (PG_IS_DIRTY(*mapping.pte)) {
- size_t offset = mapping.va - region->start + region->foff;
+ if (pte_dirty(pte)) {
+ size_t offset = va - region->start + region->foff;
struct v_inode* inode = region->mfile->inode;
- region->mfile->ops->write_page(inode, mapping.va, PG_SIZE, offset);
- *mapping.pte &= ~PG_DIRTY;
- cpu_invplg(mapping.pte);
+
+ region->mfile->ops->write_page(inode, (void*)va, offset);
+
+ set_pte(ptep, pte_mkclean(pte));
+ cpu_flush_page(va);
} else if ((options & MS_INVALIDATE)) {
goto invalidate;
}
continue;
invalidate:
- *mapping.pte &= ~PG_PRESENT;
- pmm_free_page(KERNEL_PID, mapping.pa);
- cpu_invplg(mapping.pte);
+ set_pte(ptep, null_pte);
+ pmm_free_page(pte_paddr(pte));
+ cpu_flush_page(va);
}
}
void
mem_unmap_region(ptr_t mnt, struct mm_region* region)
{
- size_t len = ROUNDUP(region->end - region->start, PG_SIZE);
- mem_sync_pages(mnt, region, region->start, len, 0);
-
- for (size_t i = region->start; i <= region->end; i += PG_SIZE) {
- ptr_t pa = vmm_del_mapping(mnt, i);
- if (pa) {
- pmm_free_page(__current->pid, pa);
+ if (!region) {
+ return;
+ }
+
+ valloc_ensure_valid(region);
+
+ pfn_t pglen = leaf_count(region->end - region->start);
+ mem_sync_pages(mnt, region, region->start, pglen * PAGE_SIZE, 0);
+
+ pte_t* ptep = mkptep_va(mnt, region->start);
+ for (size_t i = 0; i < pglen; i++, ptep++) {
+ pte_t pte = pte_at(ptep);
+ ptr_t pa = pte_paddr(pte);
+
+ set_pte(ptep, null_pte);
+ if (pte_isloaded(pte)) {
+ pmm_free_page(pte_paddr(pte));
}
}
+
llist_delete(®ion->head);
region_release(region);
}
-int
-mem_unmap(ptr_t mnt, vm_regions_t* regions, void* addr, size_t length)
+// Case: head inseted, tail inseted
+#define CASE_HITI(vmr, addr, len) \
+ ((vmr)->start <= (addr) && ((addr) + (len)) <= (vmr)->end)
+
+// Case: head inseted, tail extruded
+#define CASE_HITE(vmr, addr, len) \
+ ((vmr)->start <= (addr) && ((addr) + (len)) > (vmr)->end)
+
+// Case: head extruded, tail inseted
+#define CASE_HETI(vmr, addr, len) \
+ ((vmr)->start > (addr) && ((addr) + (len)) <= (vmr)->end)
+
+// Case: head extruded, tail extruded
+#define CASE_HETE(vmr, addr, len) \
+ ((vmr)->start > (addr) && ((addr) + (len)) > (vmr)->end)
+
+static void
+__unmap_overlapped_cases(ptr_t mnt,
+ struct mm_region* vmr,
+ ptr_t* addr,
+ size_t* length)
{
- length = ROUNDUP(length, PG_SIZE);
- ptr_t cur_addr = PG_ALIGN(addr);
- struct mm_region *pos, *n;
+ // seg start, umapped segement start
+ ptr_t seg_start = *addr, umps_start = 0;
- llist_for_each(pos, n, regions, head)
- {
- if (pos->start <= cur_addr && pos->end >= cur_addr) {
- break;
+ // seg len, umapped segement len
+ size_t seg_len = *length, umps_len = 0;
+
+ size_t displ = 0, shrink = 0;
+
+ if (CASE_HITI(vmr, seg_start, seg_len)) {
+ size_t new_start = seg_start + seg_len;
+
+ // Require a split
+ if (new_start < vmr->end) {
+ struct mm_region* region = region_dup(vmr);
+ if (region->mfile) {
+ size_t f_shifted = new_start - region->start;
+ region->foff += f_shifted;
+ }
+ region->start = new_start;
+ llist_insert_after(&vmr->head, ®ion->head);
}
+
+ shrink = vmr->end - seg_start;
+ umps_len = shrink;
+ umps_start = seg_start;
+ }
+ else if (CASE_HITE(vmr, seg_start, seg_len)) {
+ shrink = vmr->end - seg_start;
+ umps_len = shrink;
+ umps_start = seg_start;
+ }
+ else if (CASE_HETI(vmr, seg_start, seg_len)) {
+ displ = seg_len - (vmr->start - seg_start);
+ umps_len = displ;
+ umps_start = vmr->start;
+ }
+ else if (CASE_HETE(vmr, seg_start, seg_len)) {
+ shrink = vmr->end - vmr->start;
+ umps_len = shrink;
+ umps_start = vmr->start;
}
- while (&pos->head != regions && cur_addr >= pos->start) {
- u32_t l = pos->end - cur_addr;
- pos->end = cur_addr;
-
- if (l > length) {
- // unmap cause discontinunity in a memory region - do split
- struct mm_region* region = valloc(sizeof(struct mm_region));
- *region = *pos;
- region->start = cur_addr + length;
- llist_insert_after(&pos->head, ®ion->head);
- l = length;
+ mem_sync_pages(mnt, vmr, vmr->start, umps_len, 0);
+ for (size_t i = 0; i < umps_len; i += PAGE_SIZE) {
+ ptr_t pa = vmm_del_mapping(mnt, vmr->start + i);
+ if (pa) {
+ pmm_free_page(pa);
}
+ }
- mem_sync_pages(mnt, pos, cur_addr, l, 0);
+ vmr->start += displ;
+ vmr->end -= shrink;
- for (size_t i = 0; i < l; i += PG_SIZE) {
- ptr_t pa = vmm_del_mapping(mnt, cur_addr + i);
- if (pa) {
- pmm_free_page(pos->proc_vms->pid, pa);
- }
+ if (vmr->start >= vmr->end) {
+ llist_delete(&vmr->head);
+ region_release(vmr);
+ } else if (vmr->mfile) {
+ vmr->foff += displ;
+ }
+
+ *addr = umps_start + umps_len;
+
+ size_t ump_len = *addr - seg_start;
+ *length = MAX(seg_len, ump_len) - ump_len;
+}
+
+int
+mem_unmap(ptr_t mnt, vm_regions_t* regions, ptr_t addr, size_t length)
+{
+ length = ROUNDUP(length, PAGE_SIZE);
+ ptr_t cur_addr = va_align(addr);
+ struct mm_region *pos, *n;
+
+ llist_for_each(pos, n, regions, head)
+ {
+ u32_t l = pos->start - cur_addr;
+ if ((pos->start <= cur_addr && cur_addr < pos->end) || l <= length) {
+ break;
}
+ }
+ while (&pos->head != regions && length) {
n = container_of(pos->head.next, typeof(*pos), head);
- if (pos->end == pos->start) {
- llist_delete(&pos->head);
- region_release(pos);
- }
+ __unmap_overlapped_cases(mnt, pos, &cur_addr, &length);
pos = n;
- length -= l;
- cur_addr += length;
}
return 0;
int errno = 0;
void* result = (void*)-1;
- if (!length || length > BS_SIZE || !PG_ALIGNED(addr)) {
- errno = EINVAL;
- goto done;
- }
+ ptr_t addr_ptr = (ptr_t)addr;
- if (!addr) {
- addr = UMMAP_START;
- } else if (addr < UMMAP_START || addr + length >= UMMAP_END) {
- errno = ENOMEM;
+ if (!length || length > BS_SIZE || va_offset(addr_ptr)) {
+ errno = EINVAL;
goto done;
}
- struct v_fd* vfd;
- if ((errno = vfs_getfd(fd, &vfd))) {
- goto done;
+ if (!addr_ptr) {
+ addr_ptr = USR_MMAP;
+ } else if (addr_ptr < USR_MMAP || addr_ptr + length >= USR_MMAP_END) {
+ if (!(options & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
+ errno = ENOMEM;
+ goto done;
+ }
}
- struct v_file* file = vfd->file;
+ struct v_file* file = NULL;
if (!(options & MAP_ANON)) {
+ struct v_fd* vfd;
+ if ((errno = vfs_getfd(fd, &vfd))) {
+ goto done;
+ }
+
+ file = vfd->file;
if (!file->ops->read_page) {
errno = ENODEV;
goto done;
}
- } else {
- file = NULL;
}
struct mmap_param param = { .flags = options,
- .mlen = ROUNDUP(length, PG_SIZE),
+ .mlen = ROUNDUP(length, PAGE_SIZE),
.offset = offset,
.type = REGION_TYPE_GENERAL,
.proct = proct,
- .pvms = &__current->mm,
+ .pvms = vmspace(__current),
.vms_mnt = VMS_SELF };
- errno = mem_map(&result, NULL, addr, file, ¶m);
+ errno = mmap_user(&result, NULL, addr_ptr, file, ¶m);
done:
- __current->k_status = errno;
+ syscall_result(errno);
return result;
}
-__DEFINE_LXSYSCALL2(void, munmap, void*, addr, size_t, length)
+__DEFINE_LXSYSCALL2(int, munmap, void*, addr, size_t, length)
{
- return mem_unmap(VMS_SELF, &__current->mm.regions, addr, length);
+ return mem_unmap(
+ VMS_SELF, vmregions(__current), (ptr_t)addr, length);
}
__DEFINE_LXSYSCALL3(int, msync, void*, addr, size_t, length, int, flags)
{
- if (!PG_ALIGNED(addr) || ((flags & MS_ASYNC) && (flags & MS_SYNC))) {
+ if (va_offset((ptr_t)addr) || ((flags & MS_ASYNC) && (flags & MS_SYNC))) {
return DO_STATUS(EINVAL);
}
- int status =
- mem_msync(VMS_SELF, &__current->mm.regions, addr, length, flags);
+ int status = mem_msync(VMS_SELF,
+ vmregions(__current),
+ (ptr_t)addr,
+ length,
+ flags);
return DO_STATUS(status);
}
\ No newline at end of file