1 #include <lunaix/mm/mmap.h>
2 #include <lunaix/mm/pmm.h>
3 #include <lunaix/mm/valloc.h>
4 #include <lunaix/mm/vmm.h>
5 #include <lunaix/spike.h>
7 #include <lunaix/syscall.h>
8 #include <lunaix/syscall_utils.h>
10 // any size beyond this is bullshit
11 #define BS_SIZE (KERNEL_MM_BASE - UMMAP_START)
14 mem_has_overlap(vm_regions_t* regions, ptr_t start, ptr_t end)
16 struct mm_region *pos, *n;
17 llist_for_each(pos, n, regions, head)
19 if (pos->end >= start && pos->start < start) {
23 if (pos->end <= end && pos->start >= start) {
27 if (pos->end >= end && pos->start < end) {
36 mem_adjust_inplace(vm_regions_t* regions,
37 struct mm_region* region,
40 ssize_t len = newend - region->start;
49 if (mem_has_overlap(regions, region->start, newend)) {
59 mem_map(void** addr_out,
60 struct mm_region** created,
63 struct mmap_param* param)
65 assert_msg(addr, "addr can not be NULL");
67 ptr_t last_end = USER_START, found_loc = (ptr_t)addr;
68 struct mm_region *pos, *n;
70 vm_regions_t* vm_regions = ¶m->pvms->regions;
72 if ((param->flags & MAP_FIXED_NOREPLACE)) {
73 if (mem_has_overlap(vm_regions, found_loc, param->mlen + found_loc)) {
79 if ((param->flags & MAP_FIXED)) {
81 mem_unmap(param->vms_mnt, vm_regions, found_loc, param->mlen);
88 llist_for_each(pos, n, vm_regions, head)
90 if (last_end < found_loc) {
91 size_t avail_space = pos->start - found_loc;
92 if (pos->start > found_loc && avail_space > param->mlen) {
95 found_loc = pos->end + PG_SIZE;
104 if (found_loc >= KERNEL_MM_BASE || found_loc < USER_START) {
108 struct mm_region* region = region_create_range(
111 ((param->proct | param->flags) & 0x3f) | (param->type & ~0xffff));
113 region->mfile = file;
114 region->foff = param->offset;
115 region->flen = param->flen;
116 region->proc_vms = param->pvms;
118 region_add(vm_regions, region);
120 u32_t attr = PG_ALLOW_USER;
121 if ((param->proct & REGION_WRITE)) {
125 for (u32_t i = 0; i < param->mlen; i += PG_SIZE) {
126 vmm_set_mapping(param->vms_mnt, found_loc + i, 0, attr, 0);
134 *addr_out = found_loc;
143 mem_remap(void** addr_out,
144 struct mm_region** remapped,
147 struct mmap_param* param)
152 mem_sync_pages(ptr_t mnt,
153 struct mm_region* region,
158 if (!region->mfile || !(region->attr & REGION_WSHARED)) {
163 for (size_t i = 0; i < length; i += PG_SIZE) {
164 if (!vmm_lookupat(mnt, start + i, &mapping)) {
168 if (PG_IS_DIRTY(*mapping.pte)) {
169 size_t offset = mapping.va - region->start + region->foff;
170 struct v_inode* inode = region->mfile->inode;
171 region->mfile->ops->write_page(inode, mapping.va, PG_SIZE, offset);
172 *mapping.pte &= ~PG_DIRTY;
173 cpu_invplg(mapping.pte);
174 } else if ((options & MS_INVALIDATE)) {
178 if (options & MS_INVALIDATE_ALL) {
185 *mapping.pte &= ~PG_PRESENT;
186 pmm_free_page(KERNEL_PID, mapping.pa);
187 cpu_invplg(mapping.pte);
193 vm_regions_t* regions,
198 struct mm_region* pos = list_entry(regions->next, struct mm_region, head);
199 while (length && (ptr_t)&pos->head != (ptr_t)regions) {
200 if (pos->end >= addr && pos->start <= addr) {
201 size_t l = MIN(length, pos->end - addr);
202 mem_sync_pages(mnt, pos, addr, l, options);
207 pos = list_entry(pos->head.next, struct mm_region, head);
218 mem_unmap_region(ptr_t mnt, struct mm_region* region)
220 size_t len = ROUNDUP(region->end - region->start, PG_SIZE);
221 mem_sync_pages(mnt, region, region->start, len, 0);
223 for (size_t i = region->start; i <= region->end; i += PG_SIZE) {
224 ptr_t pa = vmm_del_mapping(mnt, i);
226 pmm_free_page(__current->pid, pa);
229 llist_delete(®ion->head);
230 region_release(region);
234 mem_unmap(ptr_t mnt, vm_regions_t* regions, void* addr, size_t length)
236 length = ROUNDUP(length, PG_SIZE);
237 ptr_t cur_addr = PG_ALIGN(addr);
238 struct mm_region *pos, *n;
240 llist_for_each(pos, n, regions, head)
242 if (pos->start <= cur_addr && pos->end >= cur_addr) {
247 while (&pos->head != regions && cur_addr >= pos->start) {
248 u32_t l = pos->end - cur_addr;
252 // unmap cause discontinunity in a memory region - do split
253 struct mm_region* region = valloc(sizeof(struct mm_region));
255 region->start = cur_addr + length;
256 llist_insert_after(&pos->head, ®ion->head);
260 mem_sync_pages(mnt, pos, cur_addr, l, 0);
262 for (size_t i = 0; i < l; i += PG_SIZE) {
263 ptr_t pa = vmm_del_mapping(mnt, cur_addr + i);
265 pmm_free_page(pos->proc_vms->pid, pa);
269 n = container_of(pos->head.next, typeof(*pos), head);
270 if (pos->end == pos->start) {
271 llist_delete(&pos->head);
283 __DEFINE_LXSYSCALL3(void*, sys_mmap, void*, addr, size_t, length, va_list, lst)
285 int proct = va_arg(lst, int);
286 int fd = va_arg(lst, u32_t);
287 off_t offset = va_arg(lst, off_t);
288 int options = va_arg(lst, int);
290 void* result = (void*)-1;
292 if (!length || length > BS_SIZE || !PG_ALIGNED(addr)) {
299 } else if (addr < UMMAP_START || addr + length >= UMMAP_END) {
300 if (!(options & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
306 struct v_file* file = NULL;
308 if (!(options & MAP_ANON)) {
310 if ((errno = vfs_getfd(fd, &vfd))) {
315 if (!file->ops->read_page) {
321 struct mmap_param param = { .flags = options,
322 .mlen = ROUNDUP(length, PG_SIZE),
324 .type = REGION_TYPE_GENERAL,
326 .pvms = &__current->mm,
327 .vms_mnt = VMS_SELF };
329 errno = mem_map(&result, NULL, addr, file, ¶m);
332 __current->k_status = errno;
336 __DEFINE_LXSYSCALL2(void, munmap, void*, addr, size_t, length)
338 return mem_unmap(VMS_SELF, &__current->mm.regions, addr, length);
341 __DEFINE_LXSYSCALL3(int, msync, void*, addr, size_t, length, int, flags)
343 if (!PG_ALIGNED(addr) || ((flags & MS_ASYNC) && (flags & MS_SYNC))) {
344 return DO_STATUS(EINVAL);
348 mem_msync(VMS_SELF, &__current->mm.regions, addr, length, flags);
350 return DO_STATUS(status);