1 #include <lunaix/mm/mmap.h>
2 #include <lunaix/mm/pmm.h>
3 #include <lunaix/mm/valloc.h>
4 #include <lunaix/mm/vmm.h>
5 #include <lunaix/spike.h>
7 #include <lunaix/syscall.h>
8 #include <lunaix/syscall_utils.h>
10 #include <sys/mm/mempart.h>
12 // any size beyond this is bullshit
13 #define BS_SIZE (KERNEL_EXEC - USR_MMAP)
16 mem_has_overlap(vm_regions_t* regions, ptr_t start, ptr_t end)
18 struct mm_region *pos, *n;
19 llist_for_each(pos, n, regions, head)
21 if (pos->end >= start && pos->start < start) {
25 if (pos->end <= end && pos->start >= start) {
29 if (pos->end >= end && pos->start < end) {
38 mem_adjust_inplace(vm_regions_t* regions,
39 struct mm_region* region,
42 ssize_t len = newend - region->start;
51 if (mem_has_overlap(regions, region->start, newend)) {
61 mem_map(void** addr_out,
62 struct mm_region** created,
65 struct mmap_param* param)
67 assert_msg(addr, "addr can not be NULL");
69 ptr_t last_end = USR_EXEC, found_loc = addr;
70 struct mm_region *pos, *n;
72 vm_regions_t* vm_regions = ¶m->pvms->regions;
74 if ((param->flags & MAP_FIXED_NOREPLACE)) {
75 if (mem_has_overlap(vm_regions, found_loc, param->mlen + found_loc)) {
81 if ((param->flags & MAP_FIXED)) {
83 mem_unmap(param->vms_mnt, vm_regions, found_loc, param->mlen);
90 llist_for_each(pos, n, vm_regions, head)
92 if (last_end < found_loc) {
93 size_t avail_space = pos->start - found_loc;
94 if (pos->start > found_loc && avail_space > param->mlen) {
97 found_loc = pos->end + MEM_PAGE;
106 if (found_loc >= KERNEL_EXEC || found_loc < USR_EXEC) {
110 struct mm_region* region = region_create_range(
113 ((param->proct | param->flags) & 0x3f) | (param->type & ~0xffff));
115 region->mfile = file;
116 region->foff = param->offset;
117 region->proc_vms = param->pvms;
119 region_add(vm_regions, region);
121 u32_t attr = PG_ALLOW_USER;
122 if ((param->proct & REGION_WRITE)) {
126 for (u32_t i = 0; i < param->mlen; i += PG_SIZE) {
127 vmm_set_mapping(param->vms_mnt, found_loc + i, 0, attr, 0);
135 *addr_out = (void*)found_loc;
144 mem_remap(void** addr_out,
145 struct mm_region** remapped,
148 struct mmap_param* param)
156 mem_sync_pages(ptr_t mnt,
157 struct mm_region* region,
162 if (!region->mfile || !(region->attr & REGION_WSHARED)) {
167 for (size_t i = 0; i < length; i += PG_SIZE) {
168 if (!vmm_lookupat(mnt, start + i, &mapping)) {
172 if (PG_IS_DIRTY(*mapping.pte)) {
173 size_t offset = mapping.va - region->start + region->foff;
174 struct v_inode* inode = region->mfile->inode;
176 region->mfile->ops->write_page(inode, (void*)mapping.va, offset);
178 *mapping.pte &= ~PG_DIRTY;
180 cpu_flush_page((ptr_t)mapping.pte);
181 } else if ((options & MS_INVALIDATE)) {
185 if (options & MS_INVALIDATE_ALL) {
192 *mapping.pte &= ~PG_PRESENT;
193 pmm_free_page(KERNEL_PID, mapping.pa);
194 cpu_flush_page((ptr_t)mapping.pte);
200 vm_regions_t* regions,
205 struct mm_region* pos = list_entry(regions->next, struct mm_region, head);
206 while (length && (ptr_t)&pos->head != (ptr_t)regions) {
207 if (pos->end >= addr && pos->start <= addr) {
208 size_t l = MIN(length, pos->end - addr);
209 mem_sync_pages(mnt, pos, addr, l, options);
214 pos = list_entry(pos->head.next, struct mm_region, head);
225 mem_unmap_region(ptr_t mnt, struct mm_region* region)
227 size_t len = ROUNDUP(region->end - region->start, PG_SIZE);
228 mem_sync_pages(mnt, region, region->start, len, 0);
230 for (size_t i = region->start; i <= region->end; i += PG_SIZE) {
231 ptr_t pa = vmm_del_mapping(mnt, i);
233 pmm_free_page(__current->pid, pa);
236 llist_delete(®ion->head);
237 region_release(region);
240 // Case: head inseted, tail inseted
241 #define CASE_HITI(vmr, addr, len) \
242 ((vmr)->start <= (addr) && ((addr) + (len)) <= (vmr)->end)
244 // Case: head inseted, tail extruded
245 #define CASE_HITE(vmr, addr, len) \
246 ((vmr)->start <= (addr) && ((addr) + (len)) > (vmr)->end)
248 // Case: head extruded, tail inseted
249 #define CASE_HETI(vmr, addr, len) \
250 ((vmr)->start > (addr) && ((addr) + (len)) <= (vmr)->end)
252 // Case: head extruded, tail extruded
253 #define CASE_HETE(vmr, addr, len) \
254 ((vmr)->start > (addr) && ((addr) + (len)) > (vmr)->end)
257 __unmap_overlapped_cases(ptr_t mnt,
258 struct mm_region* vmr,
262 // seg start, umapped segement start
263 ptr_t seg_start = *addr, umps_start = 0;
265 // seg len, umapped segement len
266 size_t seg_len = *length, umps_len = 0;
268 size_t displ = 0, shrink = 0;
270 if (CASE_HITI(vmr, seg_start, seg_len)) {
271 size_t new_start = seg_start + seg_len;
274 if (new_start < vmr->end) {
275 struct mm_region* region = region_dup(vmr);
277 size_t f_shifted = new_start - region->start;
278 region->foff += f_shifted;
280 region->start = new_start;
281 llist_insert_after(&vmr->head, ®ion->head);
284 shrink = vmr->end - seg_start;
286 umps_start = seg_start;
287 } else if (CASE_HITE(vmr, seg_start, seg_len)) {
288 shrink = vmr->end - seg_start;
290 umps_start = seg_start;
291 } else if (CASE_HETI(vmr, seg_start, seg_len)) {
292 displ = seg_len - (vmr->start - seg_start);
294 umps_start = vmr->start;
295 } else if (CASE_HETE(vmr, seg_start, seg_len)) {
296 shrink = vmr->end - vmr->start;
298 umps_start = vmr->start;
301 mem_sync_pages(mnt, vmr, vmr->start, umps_len, 0);
302 for (size_t i = 0; i < umps_len; i += PG_SIZE) {
303 ptr_t pa = vmm_del_mapping(mnt, vmr->start + i);
305 pmm_free_page(vmr->proc_vms->pid, pa);
312 if (vmr->start >= vmr->end) {
313 llist_delete(&vmr->head);
315 } else if (vmr->mfile) {
319 *addr = umps_start + umps_len;
321 size_t ump_len = *addr - seg_start;
322 *length = MAX(seg_len, ump_len) - ump_len;
326 mem_unmap(ptr_t mnt, vm_regions_t* regions, ptr_t addr, size_t length)
328 length = ROUNDUP(length, PG_SIZE);
329 ptr_t cur_addr = PG_ALIGN(addr);
330 struct mm_region *pos, *n;
332 llist_for_each(pos, n, regions, head)
334 u32_t l = pos->start - cur_addr;
335 if ((pos->start <= cur_addr && cur_addr < pos->end) || l <= length) {
340 while (&pos->head != regions && length) {
341 n = container_of(pos->head.next, typeof(*pos), head);
342 __unmap_overlapped_cases(mnt, pos, &cur_addr, &length);
350 __DEFINE_LXSYSCALL3(void*, sys_mmap, void*, addr, size_t, length, va_list, lst)
352 int proct = va_arg(lst, int);
353 int fd = va_arg(lst, u32_t);
354 off_t offset = va_arg(lst, off_t);
355 int options = va_arg(lst, int);
357 void* result = (void*)-1;
359 ptr_t addr_ptr = (ptr_t)addr;
361 if (!length || length > BS_SIZE || !PG_ALIGNED(addr_ptr)) {
368 } else if (addr_ptr < USR_MMAP || addr_ptr + length >= USR_MMAP_END) {
369 if (!(options & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
375 struct v_file* file = NULL;
377 if (!(options & MAP_ANON)) {
379 if ((errno = vfs_getfd(fd, &vfd))) {
384 if (!file->ops->read_page) {
390 struct mmap_param param = { .flags = options,
391 .mlen = ROUNDUP(length, PG_SIZE),
394 .type = REGION_TYPE_GENERAL,
396 .pvms = (struct proc_mm*)&__current->mm,
397 .vms_mnt = VMS_SELF };
399 errno = mem_map(&result, NULL, addr_ptr, file, ¶m);
402 __current->k_status = errno;
406 __DEFINE_LXSYSCALL2(int, munmap, void*, addr, size_t, length)
409 VMS_SELF, (vm_regions_t*)&__current->mm.regions, (ptr_t)addr, length);
412 __DEFINE_LXSYSCALL3(int, msync, void*, addr, size_t, length, int, flags)
414 if (!PG_ALIGNED(addr) || ((flags & MS_ASYNC) && (flags & MS_SYNC))) {
415 return DO_STATUS(EINVAL);
418 int status = mem_msync(VMS_SELF,
419 (vm_regions_t*)&__current->mm.regions,
424 return DO_STATUS(status);