#define EV_CURRENT 1
+// [0x7f, 'E', 'L', 'F']
#define ELFMAGIC 0x464c457fU
#define ELFCLASS32 1
#define ELFCLASS64 2
static inline int
elf_check_exec(struct elf32_ehdr* ehdr)
{
- return (*(u32_t*)(ehdr->e_ident) == ELFMAGIC) ||
- ehdr->e_ident[EI_CLASS] == ELFCLASS32 ||
- ehdr->e_ident[EI_DATA] == ELFDATA2LSB || ehdr->e_type == ET_EXEC;
+ return *(u32_t*)(ehdr->e_ident) == ELFMAGIC &&
+ ehdr->e_ident[EI_CLASS] == ELFCLASS32 &&
+ ehdr->e_ident[EI_DATA] == ELFDATA2LSB && ehdr->e_type == ET_EXEC &&
+ ehdr->e_machine == EM_386;
}
#endif /* __LUNAIX_ELF_H */
#ifndef __LUNAIX_LOADER_H
#define __LUNAIX_LOADER_H
+#include <lunaix/elf.h>
#include <lunaix/process.h>
#include <lunaix/types.h>
#define LD_STAT_FKUP 0x1U
#define MAX_VAR_PAGES 8
+#define DEFAULT_HEAP_PAGES 16
+
+struct ld_info
+{
+ struct elf32_ehdr ehdr_out;
+ ptr_t base;
+ ptr_t end;
+ ptr_t mem_sz;
+};
struct ld_param
{
struct proc_info* proc;
ptr_t vms_mnt;
- struct elf32_ehdr ehdr_out;
+
+ struct ld_info info;
int status;
};
+struct usr_exec_param
+{
+ int argc;
+ char** argv;
+ int envc;
+ char** envp;
+ struct ld_info info;
+} PACKED;
+
+#ifndef __USR_WRAPPER__
int
elf_load(struct ld_param* ldparam, struct v_file* elfile);
void
ld_create_param(struct ld_param* param, struct proc_info* proc, ptr_t vms);
+#endif
#endif /* __LUNAIX_LOADER_H */
+++ /dev/null
-#ifndef __LUNAIX_DMM_H
-#define __LUNAIX_DMM_H
-// Dynamic Memory (i.e., heap) Manager
-
-#include <lunaix/mm/mm.h>
-#include <lunaix/process.h>
-#include <stddef.h>
-
-#define M_ALLOCATED 0x1
-#define M_PREV_FREE 0x2
-
-#define M_NOT_ALLOCATED 0x0
-#define M_PREV_ALLOCATED 0x0
-
-#define CHUNK_S(header) ((header) & ~0x3)
-#define CHUNK_PF(header) ((header)&M_PREV_FREE)
-#define CHUNK_A(header) ((header)&M_ALLOCATED)
-
-#define PACK(size, flags) (((size) & ~0x3) | (flags))
-
-#define SW(p, w) (*((u32_t*)(p)) = w)
-#define LW(p) (*((u32_t*)(p)))
-
-#define HPTR(bp) ((u32_t*)(bp)-1)
-#define BPTR(bp) ((uint8_t*)(bp) + WSIZE)
-#define FPTR(hp, size) ((u32_t*)(hp + size - WSIZE))
-#define NEXT_CHK(hp) ((uint8_t*)(hp) + CHUNK_S(LW(hp)))
-
-#define BOUNDARY 4
-#define WSIZE 4
-
-#define HEAP_INIT_SIZE 4096
-
-int
-dmm_init(heap_context_t* heap);
-
-int
-lxbrk(heap_context_t* heap, void* addr, int user);
-
-void*
-lxsbrk(heap_context_t* heap, size_t size, int user);
-
-void*
-lx_malloc_internal(heap_context_t* heap, size_t size);
-
-void
-lx_free_internal(void* ptr);
-
-#endif /* __LUNAIX_DMM_H */
+++ /dev/null
-#ifndef __LUNAIX_KALLOC_H
-#define __LUNAIX_KALLOC_H
-
-#include <stddef.h>
-
-int
-kalloc_init();
-
-/**
- * @brief Allocate a contiguous and un-initialized memory region in kernel heap.
- *
- * @remarks
- * This is NOT the same as kmalloc in Linux!
- * LunaixOS does NOT guarantee the continuity in physical pages.
- *
- * @param size
- * @return void*
- */
-void*
-lxmalloc(size_t size);
-
-/**
- * @brief Allocate a contiguous and initialized memory region in kernel heap.
- * @param size
- * @return void*
- */
-void*
-lxcalloc(size_t n, size_t elem);
-
-/**
- * @brief Free the memory region allocated by kmalloc
- *
- * @param size
- * @return void*
- */
-void
-lxfree(void* ptr);
-
-#endif /* __LUNAIX_KALLOC_H */
#include <usr/sys/mann_flags.h>
-typedef struct
-{
- void* start;
- void* brk;
- void* max_addr;
- mutex_t lock;
-} heap_context_t;
-
/**
* @brief 私有区域,该区域中的页无法进行任何形式的共享。
*
#define REGION_TYPE_STACK (4 << 16)
#define REGION_TYPE_VARS (5 << 16)
+struct proc_mm;
+
struct mm_region
{
struct llist_header head; // must be first field!
+ struct proc_mm* proc_vms;
+
+ // file mapped to this region
struct v_file* mfile;
- u32_t offset;
+ // mapped file offset
+ off_t foff;
+ // mapped file length
+ u32_t flen;
+
ptr_t start;
ptr_t end;
u32_t attr;
+ void** index; // fast reference, to accelerate access to this very region.
+
void* data;
+ // When a page is mapped and required initialize
int (*init_page)(struct mm_region*, void*, off_t);
+ // when a region is copied
+ void (*region_copied)(struct mm_region*);
+ // when a region is unmapped
void (*destruct_region)(struct mm_region*);
};
+static inline void
+mm_index(void** index, struct mm_region* target)
+{
+ *index = (void*)target;
+ target->index = index;
+}
+
+typedef struct llist_header vm_regions_t;
+
+struct proc_mm
+{
+ vm_regions_t regions;
+ struct mm_region* heap;
+ struct mm_region* stack;
+ pid_t pid;
+};
+
#endif /* __LUNAIX_MM_H */
struct mmap_param
{
- ptr_t vms_mnt;
- vm_regions_t* regions;
- off_t offset;
- size_t length;
- u32_t proct;
- u32_t flags;
- u32_t type;
+ ptr_t vms_mnt; // vm mount point
+ struct proc_mm* pvms; // process vm
+ off_t offset; // mapped file offset
+ size_t mlen; // mapped memory length
+ size_t flen; // mapped file length
+ u32_t proct; // protections
+ u32_t flags; // other options
+ u32_t type; // region type
};
int
int
mem_unmap(ptr_t mnt, vm_regions_t* regions, void* addr, size_t length);
+void
+mem_unmap_region(ptr_t mnt, struct mm_region* region);
+
void
mem_sync_pages(ptr_t mnt,
struct mm_region* region,
#include <lunaix/mm/mm.h>
-typedef struct llist_header vm_regions_t;
-
struct mm_region*
region_create(ptr_t start, ptr_t end, u32_t attr);
region_add(vm_regions_t* lead, struct mm_region* vmregion);
void
-region_release(pid_t pid, struct mm_region* region);
+region_release(struct mm_region* region);
void
-region_release_all(pid_t pid, vm_regions_t* lead);
+region_release_all(vm_regions_t* lead);
struct mm_region*
region_get(vm_regions_t* lead, unsigned long vaddr);
void
-region_copy(vm_regions_t* src, vm_regions_t* dest);
+region_copy(struct proc_mm* src, struct proc_mm* dest);
#endif /* __LUNAIX_REGION_H */
#define PROC_FINPAUSE 1
-struct proc_mm
-{
- heap_context_t u_heap;
- vm_regions_t regions;
-};
-
struct proc_sigstate
{
isr_param proc_regs;
if (hit_region->mfile && !PG_IS_PRESENT(*pte)) {
struct v_file* file = hit_region->mfile;
u32_t offset =
- (ptr - hit_region->start) & (PG_SIZE - 1) + hit_region->offset;
+ (ptr - hit_region->start) & (PG_SIZE - 1) + hit_region->foff;
uintptr_t pa = pmm_alloc_page(__current->pid, 0);
if (!pa) {
int
__elf_populate_mapped(struct mm_region* region, void* pg, off_t offset)
{
- struct elf32_phdr* phdr = (struct elf32_phdr*)region->data;
- size_t segsz = phdr->p_filesz;
- size_t segoff = offset - phdr->p_offset;
+ size_t segsz = region->flen;
+ size_t segoff = offset - region->foff;
if (segoff >= segsz) {
return 0;
}
}
-void
-__elf_destruct_mapped(struct mm_region* region)
-{
- vfree(region->data);
-}
-
int
elf_map_segment(struct ld_param* ldparam,
struct v_file* elfile,
struct mm_region* seg_reg;
struct mmap_param param = { .vms_mnt = ldparam->vms_mnt,
- .regions = &ldparam->proc->mm.regions,
+ .pvms = &ldparam->proc->mm,
.proct = proct,
.offset = phdr->p_offset,
- .length = ROUNDUP(phdr->p_memsz, PG_SIZE),
- .flags =
- MAP_FIXED | MAP_PRIVATE | REGION_TYPE_CODE };
+ .mlen = ROUNDUP(phdr->p_memsz, PG_SIZE),
+ .flen = phdr->p_filesz,
+ .flags = MAP_FIXED | MAP_PRIVATE,
+ .type = REGION_TYPE_CODE };
int status = mem_map(NULL, &seg_reg, PG_ALIGN(phdr->p_va), elfile, ¶m);
if (!status) {
- struct elf32_phdr* phdr_ = valloc(sizeof(SIZE_PHDR));
- *phdr_ = *phdr;
- seg_reg->data = phdr;
-
seg_reg->init_page = __elf_populate_mapped;
- seg_reg->destruct_region = __elf_destruct_mapped;
+
+ size_t next_addr = phdr->p_memsz + phdr->p_va;
+ ldparam->info.end = MAX(ldparam->info.end, ROUNDUP(next_addr, PG_SIZE));
+ ldparam->info.mem_sz += phdr->p_memsz;
}
return status;
tbl_sz = 1 << ILOG2(tbl_sz);
phdrs = elfile->ops->read(elfile->inode, phdrs, tbl_sz, ehdr->e_phoff);
+ if (PG_ALIGN(phdrs[0].p_va) != USER_START) {
+ status = ENOEXEC;
+ goto done;
+ }
+
size_t entries = tbl_sz / SIZE_PHDR;
for (size_t i = 0; i < entries; i++) {
struct elf32_phdr* phdr = &phdrs[i];
if (phdr->p_type == PT_LOAD) {
- status = elf_map_segment(ldparam, elfile, phdr);
+ if (phdr->p_align == PG_SIZE) {
+ status = elf_map_segment(ldparam, elfile, phdr);
+ } else {
+ // surprising alignment!
+ status = ENOEXEC;
+ }
}
// TODO process other types of segments
if (status) {
+ // errno in the middle of mapping restructuring, it is impossible
+ // to recover!
ldparam->status |= LD_STAT_FKUP;
goto done;
}
goto done;
}
- ldparam->ehdr_out = *ehdr;
+ ldparam->info.ehdr_out = *ehdr;
done:
vfree(ehdr);
return sz + 1;
}
+void
+__heap_copied(struct mm_region* region)
+{
+ mm_index((void**)®ion->proc_vms->heap, region);
+}
+
+int
+__exec_remap_heap(struct ld_param* param, struct proc_mm* pvms)
+{
+ if (pvms->heap) {
+ mem_unmap_region(param->vms_mnt, pvms->heap);
+ }
+
+ struct mmap_param map_param = { .pvms = pvms,
+ .vms_mnt = param->vms_mnt,
+ .flags = MAP_ANON | MAP_PRIVATE | MAP_FIXED,
+ .type = REGION_TYPE_HEAP,
+ .proct = PROT_READ | PROT_WRITE,
+ .mlen = DEFAULT_HEAP_PAGES * PG_SIZE };
+ int status = 0;
+ struct mm_region* heap;
+ if ((status = mem_map(NULL, &heap, param->info.end, NULL, &map_param))) {
+ param->status |= LD_STAT_FKUP;
+ return status;
+ }
+
+ heap->region_copied = __heap_copied;
+ mm_index((void**)&pvms->heap, heap);
+}
+
int
exec_loadto(struct ld_param* param,
struct v_file* executable,
goto done;
}
- struct mmap_param map_param = { .regions = ¶m->proc->mm.regions,
- .vms_mnt = param->vms_mnt,
- .flags = MAP_ANON | MAP_PRIVATE | MAP_FIXED,
- .type = REGION_TYPE_VARS,
- .proct = PROT_READ,
- .length = MAX_VAR_PAGES * PG_SIZE };
+ struct proc_mm* pvms = ¶m->proc->mm;
+ struct mmap_param map_vars = { .pvms = pvms,
+ .vms_mnt = param->vms_mnt,
+ .flags = MAP_ANON | MAP_PRIVATE | MAP_FIXED,
+ .type = REGION_TYPE_VARS,
+ .proct = PROT_READ,
+ .mlen = MAX_VAR_PAGES * PG_SIZE };
void* mapped;
isr_param* intr_ctx = ¶m->proc->intr_ctx;
- // TODO reinitialize heap
+ if ((errno = __exec_remap_heap(param, pvms))) {
+ goto done;
+ }
+
+ if ((errno = mem_map(&mapped, NULL, UMMAP_END, NULL, &map_vars))) {
+ goto done;
+ }
if (param->vms_mnt == VMS_SELF) {
// we are loading executable into current addr space
- if ((errno = mem_map(&mapped, NULL, UMMAP_END, NULL, &map_param))) {
- goto done;
- }
-
- memcpy(mapped, (void*)argv, sz_argv);
- memcpy(mapped + sz_argv, (void*)envp, sz_envp);
-
- ptr_t* ustack = (void*)USTACK_TOP;
- size_t* argc = &((size_t*)&ustack[-1])[-1];
+ // make some handy infos available to user space
+ ptr_t arg_start = mapped + sizeof(struct usr_exec_param);
+ memcpy(arg_start, (void*)argv, sz_argv);
+ memcpy(arg_start + sz_argv, (void*)envp, sz_envp);
+
+ struct usr_exec_param* param = mapped;
+ *param = (struct usr_exec_param){ .argc = argv_len,
+ .argv = arg_start,
+ .envc = envp_len,
+ .envp = arg_start + sz_argv,
+ .info = param->info };
+ ptr_t* ustack = (ptr_t*)USTACK_TOP;
ustack[-1] = (ptr_t)mapped;
- *argc = argv_len;
-
- // TODO handle envp.
-
- intr_ctx->esp = argc;
+ intr_ctx->esp = &ustack[-1];
} else {
// TODO need to find a way to inject argv and envp remotely
fail("not implemented");
}
- intr_ctx->eip = param->ehdr_out.e_entry;
- // we will jump to new entry point upon syscall's return
- // so execve will not return from the perspective of it's invoker
+ intr_ctx->eip = param->info.ehdr_out.e_entry;
+ // we will jump to new entry point (_u_start) upon syscall's
+ // return so execve 'will not return' from the perspective of it's invoker
done:
return errno;
#include <hal/cpu.h>
#include <lunaix/clock.h>
#include <lunaix/lxconsole.h>
-#include <lunaix/mm/kalloc.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/spike.h>
#include <lunaix/syslog.h>
-/**
- * @file dmm.c
- * @author Lunaixsky
- * @brief Dynamic memory manager for heap. This design do not incorporate any\
- * specific implementation of malloc family. The main purpose of this routines
- * is to provide handy method to initialize & grow the heap as needed by
- * upstream implementation.
- *
- * This is designed to be portable, so it can serve as syscalls to malloc/free
- * in the c std lib.
- *
- * @version 0.2
- * @date 2022-03-3
- *
- * @copyright Copyright (c) Lunaixsky 2022
- *
- */
-
-#include <lunaix/mm/dmm.h>
#include <lunaix/mm/page.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/status.h>
__DEFINE_LXSYSCALL1(int, sbrk, size_t, size)
{
- heap_context_t* uheap = &__current->mm.u_heap;
- mutex_lock(&uheap->lock);
- void* r = lxsbrk(uheap, size, PG_ALLOW_USER);
- mutex_unlock(&uheap->lock);
- return r;
+ // TODO mem_remap to expand heap region
+ return 0;
}
__DEFINE_LXSYSCALL1(void*, brk, void*, addr)
{
- heap_context_t* uheap = &__current->mm.u_heap;
- mutex_lock(&uheap->lock);
- int r = lxbrk(uheap, addr, PG_ALLOW_USER);
- mutex_unlock(&uheap->lock);
- return r;
-}
-
-int
-dmm_init(heap_context_t* heap)
-{
- assert((uintptr_t)heap->start % BOUNDARY == 0);
-
- heap->brk = heap->start;
- mutex_init(&heap->lock);
-
- return vmm_set_mapping(VMS_SELF,
- heap->brk,
- 0,
- PG_WRITE | PG_ALLOW_USER,
- VMAP_NULL) != NULL;
-}
-
-int
-lxbrk(heap_context_t* heap, void* addr, int user)
-{
- return -(lxsbrk(heap, addr - heap->brk, user) == (void*)-1);
-}
-
-void*
-lxsbrk(heap_context_t* heap, size_t size, int user)
-{
- if (size == 0) {
- return heap->brk;
- }
-
- void* current_brk = heap->brk;
-
- // The upper bound of our next brk of heap given the size.
- // This will be used to calculate the page we need to allocate.
- void* next = current_brk + ROUNDUP(size, BOUNDARY);
-
- // any invalid situations
- if (next >= heap->max_addr || next < current_brk) {
- __current->k_status = LXINVLDPTR;
- return (void*)-1;
- }
-
- uintptr_t diff = PG_ALIGN(next) - PG_ALIGN(current_brk);
- if (diff) {
- // if next do require new pages to be mapped
- for (size_t i = 0; i < diff; i += PG_SIZE) {
- vmm_set_mapping(VMS_SELF,
- PG_ALIGN(current_brk) + PG_SIZE + i,
- 0,
- PG_WRITE | user,
- VMAP_NULL);
- }
- }
-
- heap->brk += size;
- return current_brk;
+ // TODO mem_remap to expand heap region
+ return 0;
}
\ No newline at end of file
+++ /dev/null
-
-/**** DO NOT USE ****/
-
-/**
- * @file kalloc.c
- * @author Lunaixsky
- * @brief Implicit free list implementation of malloc family, for kernel use.
- *
- * This version of code is however the simplest and yet insecured, thread unsafe
- * it just to demonstrate how the malloc/free works behind the curtain
- * @version 0.1
- * @date 2022-03-05
- *
- * @copyright Copyright (c) 2022
- *
- */
-// #include <lunaix/mm/dmm.h>
-// #include <lunaix/mm/kalloc.h>
-// #include <lunaix/mm/vmm.h>
-
-// #include <lunaix/common.h>
-// #include <lunaix/spike.h>
-
-// #include <klibc/string.h>
-
-// #include <stdint.h>
-
-// extern uint8_t __kernel_heap_start;
-
-// void*
-// lx_malloc_internal(heap_context_t* heap, size_t size);
-
-// void
-// place_chunk(uint8_t* ptr, size_t size);
-
-// void
-// lx_free_internal(void* ptr);
-
-// void*
-// coalesce(uint8_t* chunk_ptr);
-
-// void*
-// lx_grow_heap(heap_context_t* heap, size_t sz);
-
-// /*
-// At the beginning, we allocate an empty page and put our initial marker
-
-// | 4/1 | 0/1 |
-// ^ ^ brk
-// start
-
-// Then, expand the heap further, with HEAP_INIT_SIZE (evaluated to 4096,
-// i.e.,
-// 1 pg size) This will allocate as much pages and override old epilogue
-// marker with a free region hdr and put new epilogue marker. These are
-// handled by lx_grow_heap which is internally used by alloc to expand the
-// heap at many moment when needed.
-
-// | 4/1 | 4096/0 | ....... | 4096/0 | 0/1 |
-// ^ ^ brk_old ^
-// start brk
-
-// Note: the brk always point to the beginning of epilogue.
-// */
-
-// static heap_context_t kheap;
-
-// int
-// kalloc_init()
-// {
-// kheap.start = KHEAP_START;
-// kheap.brk = NULL;
-// kheap.max_addr =
-// (void*)PROC_START; // 在新的布局中,堆结束的地方即为进程表开始的地方
-
-// for (size_t i = 0; i < KHEAP_SIZE_MB >> 2; i++) {
-// vmm_set_mapping(PD_REFERENCED,
-// (uintptr_t)kheap.start + (i << 22),
-// 0,
-// PG_PREM_RW,
-// VMAP_NOMAP);
-// }
-
-// if (!dmm_init(&kheap)) {
-// return 0;
-// }
-
-// SW(kheap.start, PACK(4, M_ALLOCATED));
-// SW(kheap.start + WSIZE, PACK(0, M_ALLOCATED));
-// kheap.brk += WSIZE;
-
-// return lx_grow_heap(&kheap, HEAP_INIT_SIZE) != NULL;
-// }
-
-// void*
-// lxmalloc(size_t size)
-// {
-// mutex_lock(&kheap.lock);
-// void* r = lx_malloc_internal(&kheap, size);
-// mutex_unlock(&kheap.lock);
-
-// return r;
-// }
-
-// void*
-// lxcalloc(size_t n, size_t elem)
-// {
-// size_t pd = n * elem;
-
-// // overflow detection
-// if (pd < elem || pd < n) {
-// return NULL;
-// }
-
-// void* ptr = lxmalloc(pd);
-// if (!ptr) {
-// return NULL;
-// }
-
-// return memset(ptr, 0, pd);
-// }
-
-// void
-// lxfree(void* ptr)
-// {
-// if (!ptr) {
-// return;
-// }
-// mutex_lock(&kheap.lock);
-
-// uint8_t* chunk_ptr = (uint8_t*)ptr - WSIZE;
-// u32_t hdr = LW(chunk_ptr);
-// size_t sz = CHUNK_S(hdr);
-// uint8_t* next_hdr = chunk_ptr + sz;
-
-// // make sure the ptr we are 'bout to free makes sense
-// // the size trick is stolen from glibc's malloc/malloc.c:4437 ;P
-
-// assert_msg(((uintptr_t)ptr < (uintptr_t)(-sz)) && !((uintptr_t)ptr &
-// 0x3),
-// "free(): invalid pointer");
-
-// assert_msg(sz > WSIZE, "free(): invalid size");
-
-// SW(chunk_ptr, hdr & ~M_ALLOCATED);
-// SW(FPTR(chunk_ptr, sz), hdr & ~M_ALLOCATED);
-// SW(next_hdr, LW(next_hdr) | M_PREV_FREE);
-
-// coalesce(chunk_ptr);
-
-// mutex_unlock(&kheap.lock);
-// }
-
-// void*
-// lx_malloc_internal(heap_context_t* heap, size_t size)
-// {
-// // Simplest first fit approach.
-
-// if (!size) {
-// return NULL;
-// }
-
-// uint8_t* ptr = heap->start;
-// // round to largest 4B aligned value
-// // and space for header
-// size = ROUNDUP(size + WSIZE, BOUNDARY);
-// while (ptr < (uint8_t*)heap->brk) {
-// u32_t header = *((u32_t*)ptr);
-// size_t chunk_size = CHUNK_S(header);
-// if (!chunk_size && CHUNK_A(header)) {
-// break;
-// }
-// if (chunk_size >= size && !CHUNK_A(header)) {
-// // found!
-// place_chunk(ptr, size);
-// return BPTR(ptr);
-// }
-// ptr += chunk_size;
-// }
-
-// // if heap is full (seems to be!), then allocate more space (if it's
-// // okay...)
-// if ((ptr = lx_grow_heap(heap, size))) {
-// place_chunk(ptr, size);
-// return BPTR(ptr);
-// }
-
-// // Well, we are officially OOM!
-// return NULL;
-// }
-
-// void
-// place_chunk(uint8_t* ptr, size_t size)
-// {
-// u32_t header = *((u32_t*)ptr);
-// size_t chunk_size = CHUNK_S(header);
-// *((u32_t*)ptr) = PACK(size, CHUNK_PF(header) | M_ALLOCATED);
-// uint8_t* n_hdrptr = (uint8_t*)(ptr + size);
-// u32_t diff = chunk_size - size;
-
-// if (!diff) {
-// // if the current free block is fully occupied
-// u32_t n_hdr = LW(n_hdrptr);
-// // notify the next block about our avaliability
-// SW(n_hdrptr, n_hdr & ~0x2);
-// } else {
-// // if there is remaining free space left
-// u32_t remainder_hdr = PACK(diff, M_NOT_ALLOCATED |
-// M_PREV_ALLOCATED); SW(n_hdrptr, remainder_hdr); SW(FPTR(n_hdrptr,
-// diff), remainder_hdr);
-
-// /*
-// | xxxx | | |
-
-// |
-// v
-
-// | xxxx | |
-// */
-// coalesce(n_hdrptr);
-// }
-// }
-
-// void*
-// coalesce(uint8_t* chunk_ptr)
-// {
-// u32_t hdr = LW(chunk_ptr);
-// u32_t pf = CHUNK_PF(hdr);
-// u32_t sz = CHUNK_S(hdr);
-
-// u32_t n_hdr = LW(chunk_ptr + sz);
-
-// if (CHUNK_A(n_hdr) && pf) {
-// // case 1: prev is free
-// u32_t prev_ftr = LW(chunk_ptr - WSIZE);
-// size_t prev_chunk_sz = CHUNK_S(prev_ftr);
-// u32_t new_hdr = PACK(prev_chunk_sz + sz, CHUNK_PF(prev_ftr));
-// SW(chunk_ptr - prev_chunk_sz, new_hdr);
-// SW(FPTR(chunk_ptr, sz), new_hdr);
-// chunk_ptr -= prev_chunk_sz;
-// } else if (!CHUNK_A(n_hdr) && !pf) {
-// // case 2: next is free
-// size_t next_chunk_sz = CHUNK_S(n_hdr);
-// u32_t new_hdr = PACK(next_chunk_sz + sz, pf);
-// SW(chunk_ptr, new_hdr);
-// SW(FPTR(chunk_ptr, sz + next_chunk_sz), new_hdr);
-// } else if (!CHUNK_A(n_hdr) && pf) {
-// // case 3: both free
-// u32_t prev_ftr = LW(chunk_ptr - WSIZE);
-// size_t next_chunk_sz = CHUNK_S(n_hdr);
-// size_t prev_chunk_sz = CHUNK_S(prev_ftr);
-// u32_t new_hdr =
-// PACK(next_chunk_sz + prev_chunk_sz + sz, CHUNK_PF(prev_ftr));
-// SW(chunk_ptr - prev_chunk_sz, new_hdr);
-// SW(FPTR(chunk_ptr, sz + next_chunk_sz), new_hdr);
-// chunk_ptr -= prev_chunk_sz;
-// }
-
-// // (fall through) case 4: prev and next are not free
-// return chunk_ptr;
-// }
-
-// void*
-// lx_grow_heap(heap_context_t* heap, size_t sz)
-// {
-// void* start;
-
-// // The "+ WSIZE" capture the overhead for epilogue marker
-// if (!(start = lxsbrk(heap, sz + WSIZE, 0))) {
-// return NULL;
-// }
-// sz = ROUNDUP(sz, BOUNDARY);
-
-// // minus the overhead for epilogue, keep the invariant.
-// heap->brk -= WSIZE;
-
-// u32_t old_marker = *((u32_t*)start);
-// u32_t free_hdr = PACK(sz, CHUNK_PF(old_marker));
-// SW(start, free_hdr);
-// SW(FPTR(start, sz), free_hdr);
-// SW(NEXT_CHK(start), PACK(0, M_ALLOCATED | M_PREV_FREE));
-
-// return coalesce(start);
-// }
\ No newline at end of file
struct v_file* file,
struct mmap_param* param)
{
- ptr_t last_end = USER_START;
+ assert_msg(addr, "addr can not be NULL");
+
+ ptr_t last_end = USER_START, found_loc = (ptr_t)addr;
struct mm_region *pos, *n;
+ vm_regions_t* vm_regions = ¶m->pvms->regions;
+
if ((param->flags & MAP_FIXED_NOREPLACE)) {
- if (mem_has_overlap(param->regions, addr, param->length)) {
+ if (mem_has_overlap(vm_regions, found_loc, param->mlen)) {
return EEXIST;
}
- last_end = addr;
goto found;
}
if ((param->flags & MAP_FIXED)) {
int status =
- mem_unmap(param->vms_mnt, param->regions, addr, param->length);
+ mem_unmap(param->vms_mnt, vm_regions, found_loc, param->mlen);
if (status) {
return status;
}
- last_end = addr;
goto found;
}
- llist_for_each(pos, n, param->regions, head)
+ llist_for_each(pos, n, vm_regions, head)
{
- if (pos->start - last_end > param->length && last_end > addr) {
- last_end += 1;
- goto found;
+ if (last_end < found_loc) {
+ size_t avail_space = pos->start - found_loc;
+ if ((int)avail_space > 0 && avail_space > param->mlen) {
+ goto found;
+ }
+ found_loc = pos->end + PG_SIZE;
}
- last_end = pos->end;
+
+ last_end = pos->end + PG_SIZE;
}
return ENOMEM;
found:
- addr = last_end;
-
- if (addr >= KERNEL_MM_BASE || addr < USER_START) {
+ if (found_loc >= KERNEL_MM_BASE || found_loc < USER_START) {
return ENOMEM;
}
struct mm_region* region = region_create_range(
- addr,
- param->length,
- ((param->proct | param->flags) & 0x1f) | (param->type & ~0xffff));
+ found_loc,
+ param->mlen,
+ ((param->proct | param->flags) & 0x3f) | (param->type & ~0xffff));
region->mfile = file;
- region->offset = param->offset;
+ region->foff = param->offset;
+ region->flen = param->flen;
+ region->proc_vms = param->pvms;
- region_add(param->regions, region);
+ region_add(vm_regions, region);
u32_t attr = PG_ALLOW_USER;
if ((param->proct & REGION_WRITE)) {
attr |= PG_WRITE;
}
- for (u32_t i = 0; i < param->length; i += PG_SIZE) {
- vmm_set_mapping(param->vms_mnt, addr + i, 0, attr, 0);
+ for (u32_t i = 0; i < param->mlen; i += PG_SIZE) {
+ vmm_set_mapping(param->vms_mnt, found_loc + i, 0, attr, 0);
}
- vfs_ref_file(file);
+ if (file) {
+ vfs_ref_file(file);
+ }
if (addr_out) {
- *addr_out = addr;
+ *addr_out = found_loc;
}
if (created) {
*created = region;
}
if (PG_IS_DIRTY(*mapping.pte)) {
- size_t offset = mapping.va - region->start + region->offset;
+ size_t offset = mapping.va - region->start + region->foff;
struct v_inode* inode = region->mfile->inode;
region->mfile->ops->write_page(inode, mapping.va, PG_SIZE, offset);
*mapping.pte &= ~PG_DIRTY;
return 0;
}
+void
+mem_unmap_region(ptr_t mnt, struct mm_region* region)
+{
+ size_t len = ROUNDUP(region->end - region->start, PG_SIZE);
+ mem_sync_pages(mnt, region, region->start, len, 0);
+
+ for (size_t i = region->start; i <= region->end; i += PG_SIZE) {
+ ptr_t pa = vmm_del_mapping(mnt, i);
+ if (pa) {
+ pmm_free_page(__current->pid, pa);
+ }
+ }
+ llist_delete(®ion->head);
+ region_release(region);
+}
+
int
mem_unmap(ptr_t mnt, vm_regions_t* regions, void* addr, size_t length)
{
length = ROUNDUP(length, PG_SIZE);
- ptr_t cur_addr = ROUNDDOWN((ptr_t)addr, PG_SIZE);
+ ptr_t cur_addr = PG_ALIGN(addr);
struct mm_region *pos, *n;
llist_for_each(pos, n, regions, head)
{
- if (pos->start <= cur_addr) {
+ if (pos->start <= cur_addr && pos->end >= cur_addr) {
break;
}
}
for (size_t i = 0; i < l; i += PG_SIZE) {
ptr_t pa = vmm_del_mapping(mnt, cur_addr + i);
if (pa) {
- pmm_free_page(__current->pid, pa);
+ pmm_free_page(pos->proc_vms->pid, pa);
}
}
n = container_of(pos->head.next, typeof(*pos), head);
if (pos->end == pos->start) {
llist_delete(&pos->head);
- region_release(__current->pid, pos);
+ region_release(pos);
}
pos = n;
}
struct mmap_param param = { .flags = options,
- .length = ROUNDUP(length, PG_SIZE),
+ .mlen = ROUNDUP(length, PG_SIZE),
.offset = offset,
.type = REGION_TYPE_GENERAL,
.proct = proct,
- .regions = &__current->mm.regions,
+ .pvms = &__current->mm,
.vms_mnt = VMS_SELF };
errno = mem_map(&result, NULL, addr, file, ¶m);
struct mm_region* region = valloc(sizeof(struct mm_region));
*region = (struct mm_region){ .attr = attr,
.start = start,
- .end = start + length - 1 };
+ .end = PG_ALIGN(start + length - 1) };
return region;
}
struct mm_region *pos = (struct mm_region*)lead,
*n = list_entry(lead->next, struct mm_region, head);
do {
- if (vmregion->start >= cur_end && vmregion->end <= n->start) {
+ if (vmregion->start > cur_end && vmregion->end < n->start) {
break;
}
cur_end = n->end;
}
void
-region_release(pid_t pid, struct mm_region* region)
+region_release(struct mm_region* region)
{
if (region->destruct_region) {
region->destruct_region(region);
}
if (region->mfile) {
- vfs_pclose(region->mfile, pid);
+ vfs_pclose(region->mfile, region->proc_vms->pid);
+ }
+
+ if (region->index) {
+ *region->index = NULL;
}
vfree(region);
}
void
-region_release_all(pid_t pid, vm_regions_t* lead)
+region_release_all(vm_regions_t* lead)
{
struct mm_region *pos, *n;
llist_for_each(pos, n, lead, head)
{
- region_release(pid, pos);
+ region_release(pos);
}
}
void
-region_copy(vm_regions_t* src, vm_regions_t* dest)
+region_copy(struct proc_mm* src, struct proc_mm* dest)
{
- if (!src) {
- return;
- }
-
struct mm_region *pos, *n, *dup;
- llist_for_each(pos, n, src, head)
+ llist_for_each(pos, n, &src->regions, head)
{
dup = valloc(sizeof(struct mm_region));
memcpy(dup, pos, sizeof(*pos));
- region_add(dest, dup);
+
+ dup->proc_vms = dest;
+
+ if (dup->mfile) {
+ vfs_ref_file(dup->mfile);
+ }
+
+ if (dup->region_copied) {
+ dup->region_copied(dup);
+ }
+
+ region_add(&dest->regions, dup);
}
}
struct mm_region *pos, *n;
+ vaddr = PG_ALIGN(vaddr);
+
llist_for_each(pos, n, lead, head)
{
- if (pos->start <= vaddr && vaddr < pos->end) {
+ if (pos->start <= vaddr && vaddr <= pos->end) {
return pos;
}
}
return 0;
}
+void
+__stack_copied(struct mm_region* region)
+{
+ mm_index((void**)®ion->proc_vms->stack, region);
+}
+
void
init_proc_user_space(struct proc_info* pcb)
{
struct mm_region* mapped;
struct mmap_param param = { .vms_mnt = VMS_MOUNT_1,
- .regions = &pcb->mm.regions,
- .length = USTACK_SIZE,
+ .pvms = &pcb->mm,
+ .mlen = USTACK_SIZE,
.proct = PROT_READ | PROT_WRITE,
.flags = MAP_ANON | MAP_PRIVATE | MAP_FIXED,
.type = REGION_TYPE_STACK };
kprint_panic("fail to alloc user stack: %d", status);
}
+ mapped->region_copied = __stack_copied;
+ mm_index((void**)&pcb->mm.heap, mapped);
+
// TODO other uspace initialization stuff
vmm_unmount_pd(VMS_MOUNT_1);
dup_proc()
{
struct proc_info* pcb = alloc_process();
- pcb->mm.u_heap = __current->mm.u_heap;
pcb->intr_ctx = __current->intr_ctx;
pcb->parent = __current;
}
__copy_fdtable(pcb);
- region_copy(&__current->mm.regions, &pcb->mm.regions);
+ region_copy(&__current->mm, &pcb->mm);
setup_proc_mem(pcb, VMS_SELF);
#include <lunaix/fs/taskfs.h>
#include <lunaix/mm/cake.h>
-#include <lunaix/mm/kalloc.h>
#include <lunaix/mm/mmap.h>
#include <lunaix/mm/pmm.h>
#include <lunaix/mm/valloc.h>
#include <lunaix/syscall.h>
#include <lunaix/syslog.h>
+#include <klibc/string.h>
+
volatile struct proc_info* __current;
static struct proc_info dummy_proc;
proc->state = PS_CREATED;
proc->pid = i;
+ proc->mm.pid = i;
proc->created = clock_systime();
proc->pgid = proc->pid;
proc->fdtable = vzalloc(sizeof(struct v_fdtable));
llist_for_each(pos, n, &proc->mm.regions, head)
{
mem_sync_pages(VMS_MOUNT_1, pos, pos->start, pos->end - pos->start, 0);
- region_release(pid, pos);
+ region_release(pos);
}
__del_pagetable(pid, VMS_MOUNT_1);