1 #include <lunaix/mm/mmap.h>
2 #include <lunaix/mm/pmm.h>
3 #include <lunaix/mm/valloc.h>
4 #include <lunaix/mm/vmm.h>
5 #include <lunaix/spike.h>
6 #include <lunaix/syscall.h>
7 #include <lunaix/syscall_utils.h>
9 #include <sys/mm/mempart.h>
11 #include <usr/lunaix/mann_flags.h>
13 // any size beyond this is bullshit
14 #define BS_SIZE (KERNEL_EXEC - USR_MMAP)
17 mem_has_overlap(vm_regions_t* regions, ptr_t start, ptr_t end)
19 struct mm_region *pos, *n;
20 llist_for_each(pos, n, regions, head)
22 if (pos->end >= start && pos->start < start) {
26 if (pos->end <= end && pos->start >= start) {
30 if (pos->end >= end && pos->start < end) {
39 mem_adjust_inplace(vm_regions_t* regions,
40 struct mm_region* region,
43 ssize_t len = newend - region->start;
52 if (mem_has_overlap(regions, region->start, newend)) {
62 mmap_user(void** addr_out,
63 struct mm_region** created,
66 struct mmap_param* param)
68 param->range_end = KERNEL_EXEC;
69 param->range_start = USR_EXEC;
71 return mem_map(addr_out, created, addr, file, param);
75 __mem_find_slot_backward(struct mm_region* lead, struct mmap_param* param, struct mm_region* anchor)
77 ptr_t size = param->mlen;
78 struct mm_region *pos = anchor,
79 *n = next_region(pos);
88 end = param->range_end;
91 if (end - pos->end >= size) {
103 __mem_find_slot_forward(struct mm_region* lead, struct mmap_param* param, struct mm_region* anchor)
105 ptr_t size = param->mlen;
106 struct mm_region *pos = anchor,
107 *prev = prev_region(pos);
110 ptr_t end = prev->end;
112 end = param->range_start;
115 if (pos->start - end >= size) {
116 return pos->start - size;
120 prev = prev_region(pos);
127 __mem_find_slot(vm_regions_t* lead, struct mmap_param* param, struct mm_region* anchor)
130 struct mm_region* _lead = get_region(lead);
131 if ((result = __mem_find_slot_backward(_lead, param, anchor))) {
135 return __mem_find_slot_forward(_lead, param, anchor);
138 static struct mm_region*
139 __mem_find_nearest(vm_regions_t* lead, ptr_t addr)
141 ptr_t min_dist = (ptr_t)-1;
142 struct mm_region *pos, *n, *min = NULL;
143 llist_for_each(pos, n, lead, head) {
144 if (region_contains(pos, addr)) {
148 ptr_t dist = addr - pos->end;
149 if (addr < pos->start) {
150 dist = pos->start - addr;
153 if (dist < min_dist) {
163 mem_map(void** addr_out,
164 struct mm_region** created,
167 struct mmap_param* param)
169 assert_msg(addr, "addr can not be NULL");
171 ptr_t last_end = USR_EXEC, found_loc = PG_ALIGN(addr);
172 struct mm_region *pos, *n;
174 vm_regions_t* vm_regions = ¶m->pvms->regions;
176 if ((param->flags & MAP_FIXED_NOREPLACE)) {
177 if (mem_has_overlap(vm_regions, found_loc, param->mlen + found_loc)) {
183 if ((param->flags & MAP_FIXED)) {
185 mem_unmap(param->vms_mnt, vm_regions, found_loc, param->mlen);
192 if (llist_empty(vm_regions)) {
196 struct mm_region* anchor = __mem_find_nearest(vm_regions, found_loc);
197 if ((found_loc = __mem_find_slot(vm_regions, param, anchor))) {
204 if (found_loc >= param->range_end || found_loc < param->range_start) {
208 struct mm_region* region = region_create_range(
211 ((param->proct | param->flags) & 0x3f) | (param->type & ~0xffff));
213 region->mfile = file;
214 region->foff = param->offset;
215 region->proc_vms = param->pvms;
217 region_add(vm_regions, region);
219 int proct = param->proct;
220 int attr = PG_ALLOW_USER;
221 if ((proct & REGION_WRITE)) {
224 if ((proct & REGION_KERNEL)) {
225 attr &= ~PG_ALLOW_USER;
228 for (size_t i = 0; i < param->mlen; i += PG_SIZE) {
229 vmm_set_mapping(param->vms_mnt, found_loc + i, 0, attr, 0);
237 *addr_out = (void*)found_loc;
246 mem_remap(void** addr_out,
247 struct mm_region** remapped,
250 struct mmap_param* param)
258 mem_sync_pages(ptr_t mnt,
259 struct mm_region* region,
264 if (!region->mfile || !(region->attr & REGION_WSHARED)) {
269 for (size_t i = 0; i < length; i += PG_SIZE) {
270 if (!vmm_lookupat(mnt, start + i, &mapping)) {
274 if (PG_IS_DIRTY(*mapping.pte)) {
275 size_t offset = mapping.va - region->start + region->foff;
276 struct v_inode* inode = region->mfile->inode;
278 region->mfile->ops->write_page(inode, (void*)mapping.va, offset);
280 *mapping.pte &= ~PG_DIRTY;
282 cpu_flush_page((ptr_t)mapping.pte);
283 } else if ((options & MS_INVALIDATE)) {
287 if (options & MS_INVALIDATE_ALL) {
294 *mapping.pte &= ~PG_PRESENT;
295 pmm_free_page(mapping.pa);
296 cpu_flush_page((ptr_t)mapping.pte);
302 vm_regions_t* regions,
307 struct mm_region* pos = list_entry(regions->next, struct mm_region, head);
308 while (length && (ptr_t)&pos->head != (ptr_t)regions) {
309 if (pos->end >= addr && pos->start <= addr) {
310 size_t l = MIN(length, pos->end - addr);
311 mem_sync_pages(mnt, pos, addr, l, options);
316 pos = list_entry(pos->head.next, struct mm_region, head);
327 mem_unmap_region(ptr_t mnt, struct mm_region* region)
333 valloc_ensure_valid(region);
335 size_t len = ROUNDUP(region->end - region->start, PG_SIZE);
336 mem_sync_pages(mnt, region, region->start, len, 0);
338 for (size_t i = region->start; i <= region->end; i += PG_SIZE) {
339 ptr_t pa = vmm_del_mapping(mnt, i);
345 llist_delete(®ion->head);
346 region_release(region);
349 // Case: head inseted, tail inseted
350 #define CASE_HITI(vmr, addr, len) \
351 ((vmr)->start <= (addr) && ((addr) + (len)) <= (vmr)->end)
353 // Case: head inseted, tail extruded
354 #define CASE_HITE(vmr, addr, len) \
355 ((vmr)->start <= (addr) && ((addr) + (len)) > (vmr)->end)
357 // Case: head extruded, tail inseted
358 #define CASE_HETI(vmr, addr, len) \
359 ((vmr)->start > (addr) && ((addr) + (len)) <= (vmr)->end)
361 // Case: head extruded, tail extruded
362 #define CASE_HETE(vmr, addr, len) \
363 ((vmr)->start > (addr) && ((addr) + (len)) > (vmr)->end)
366 __unmap_overlapped_cases(ptr_t mnt,
367 struct mm_region* vmr,
371 // seg start, umapped segement start
372 ptr_t seg_start = *addr, umps_start = 0;
374 // seg len, umapped segement len
375 size_t seg_len = *length, umps_len = 0;
377 size_t displ = 0, shrink = 0;
379 if (CASE_HITI(vmr, seg_start, seg_len)) {
380 size_t new_start = seg_start + seg_len;
383 if (new_start < vmr->end) {
384 struct mm_region* region = region_dup(vmr);
386 size_t f_shifted = new_start - region->start;
387 region->foff += f_shifted;
389 region->start = new_start;
390 llist_insert_after(&vmr->head, ®ion->head);
393 shrink = vmr->end - seg_start;
395 umps_start = seg_start;
396 } else if (CASE_HITE(vmr, seg_start, seg_len)) {
397 shrink = vmr->end - seg_start;
399 umps_start = seg_start;
400 } else if (CASE_HETI(vmr, seg_start, seg_len)) {
401 displ = seg_len - (vmr->start - seg_start);
403 umps_start = vmr->start;
404 } else if (CASE_HETE(vmr, seg_start, seg_len)) {
405 shrink = vmr->end - vmr->start;
407 umps_start = vmr->start;
410 mem_sync_pages(mnt, vmr, vmr->start, umps_len, 0);
411 for (size_t i = 0; i < umps_len; i += PG_SIZE) {
412 ptr_t pa = vmm_del_mapping(mnt, vmr->start + i);
421 if (vmr->start >= vmr->end) {
422 llist_delete(&vmr->head);
424 } else if (vmr->mfile) {
428 *addr = umps_start + umps_len;
430 size_t ump_len = *addr - seg_start;
431 *length = MAX(seg_len, ump_len) - ump_len;
435 mem_unmap(ptr_t mnt, vm_regions_t* regions, ptr_t addr, size_t length)
437 length = ROUNDUP(length, PG_SIZE);
438 ptr_t cur_addr = PG_ALIGN(addr);
439 struct mm_region *pos, *n;
441 llist_for_each(pos, n, regions, head)
443 u32_t l = pos->start - cur_addr;
444 if ((pos->start <= cur_addr && cur_addr < pos->end) || l <= length) {
449 while (&pos->head != regions && length) {
450 n = container_of(pos->head.next, typeof(*pos), head);
451 __unmap_overlapped_cases(mnt, pos, &cur_addr, &length);
459 __DEFINE_LXSYSCALL3(void*, sys_mmap, void*, addr, size_t, length, va_list, lst)
461 int proct = va_arg(lst, int);
462 int fd = va_arg(lst, u32_t);
463 off_t offset = va_arg(lst, off_t);
464 int options = va_arg(lst, int);
466 void* result = (void*)-1;
468 ptr_t addr_ptr = (ptr_t)addr;
470 if (!length || length > BS_SIZE || !PG_ALIGNED(addr_ptr)) {
477 } else if (addr_ptr < USR_MMAP || addr_ptr + length >= USR_MMAP_END) {
478 if (!(options & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
484 struct v_file* file = NULL;
486 if (!(options & MAP_ANON)) {
488 if ((errno = vfs_getfd(fd, &vfd))) {
493 if (!file->ops->read_page) {
499 struct mmap_param param = { .flags = options,
500 .mlen = ROUNDUP(length, PG_SIZE),
502 .type = REGION_TYPE_GENERAL,
504 .pvms = vmspace(__current),
505 .vms_mnt = VMS_SELF };
507 errno = mmap_user(&result, NULL, addr_ptr, file, ¶m);
510 syscall_result(errno);
514 __DEFINE_LXSYSCALL2(int, munmap, void*, addr, size_t, length)
517 VMS_SELF, vmregions(__current), (ptr_t)addr, length);
520 __DEFINE_LXSYSCALL3(int, msync, void*, addr, size_t, length, int, flags)
522 if (!PG_ALIGNED(addr) || ((flags & MS_ASYNC) && (flags & MS_SYNC))) {
523 return DO_STATUS(EINVAL);
526 int status = mem_msync(VMS_SELF,
527 vmregions(__current),
532 return DO_STATUS(status);