+ 内存管理与按需分页(Demand Paging)
+ 键盘输入
+ 多进程
-+ 49个常见的Linux/POSIX系统调用([附录1](#appendix1))
++ 50个常见的Linux/POSIX系统调用([附录1](#appendix1))
+ 用户模式
+ 信号机制
+ PCI 3.0
2. `ioctl(2)`
2. `getpgid(2)`
2. `setpgid(2)`
-2. `mmap(2)`※
-2. `munmap(2)`※
+2. `mmap(2)`
+2. `munmap(2)`
+2. `execve(2)`※
**LunaixOS自有**
+ Memory management & demand paging
+ PS/2 Keyboard support
+ Muti-tasking and task management
-+ 47 commonly used POSIX syscall([See Appendix 1](#appendix1))
++ 50 commonly used POSIX syscall([See Appendix 1](#appendix1))
+ User Space
+ Signal
+ PCI 3.0
2. `setxattr(2)`※
2. `fgetxattr(2)`※
2. `fsetxattr(2)`※
-2. `ioctl(2)`※
+2. `ioctl(2)`
2. `getpgid(2)`
2. `setpgid(2)`
+2. `mmap(2)`
+2. `munmap(2)`
+2. `execve(2)`※
**LunaixOS**
#define MEM_1MB 0x100000
#define MEM_4MB 0x400000
+#define USER_START 0x400000
+
#define KSTACK_SIZE MEM_1MB
-#define KSTACK_START ((0x3FFFFFU - KSTACK_SIZE) + 1)
-#define KSTACK_TOP 0x3FFFF0U
+#define KSTACK_START (USER_START - KSTACK_SIZE)
+#define KSTACK_TOP ((USER_START - 1) & ~0xf)
#define KERNEL_MM_BASE 0xC0000000
#define UDATA_SEG 0x23
#define TSS_SEG 0x28
-#define USER_START 0x400000
-#define USTACK_SIZE 0x100000
+#define USTACK_SIZE MEM_4MB
#define USTACK_TOP 0x9ffffff0
#define USTACK_END (0x9fffffff - USTACK_SIZE + 1)
-#define UMMAP_AREA 0x4D000000
+#define UMMAP_START 0x4D000000
+#define UMMAP_END (USTACK_END - MEM_4MB)
#ifndef __ASM__
#include <stddef.h>
--- /dev/null
+#ifndef __LUNAIX_ELF_H
+#define __LUNAIX_ELF_H
+
+#include <lunaix/types.h>
+
+typedef unsigned int elf32_ptr_t;
+typedef unsigned short elf32_hlf_t;
+typedef unsigned int elf32_off_t;
+typedef unsigned int elf32_swd_t;
+typedef unsigned int elf32_wrd_t;
+
+#define ET_NONE 0
+#define ET_EXEC 2
+
+#define PT_LOAD 1
+
+#define PF_X 0x1
+#define PF_W 0x2
+#define PF_R 0x4
+
+#define EM_NONE 0
+#define EM_386 3
+
+#define EV_CURRENT 1
+
+#define ELFMAGIC 0x464c457fU
+#define ELFCLASS32 1
+#define ELFCLASS64 2
+#define ELFDATA2LSB 1
+#define ELFDATA2MSB 2
+
+#define EI_CLASS 4
+#define EI_DATA 5
+
+struct elf32_ehdr
+{
+ u8_t e_ident[16];
+ elf32_hlf_t e_type;
+ elf32_hlf_t e_machine;
+ elf32_wrd_t e_version;
+ elf32_ptr_t e_entry;
+ elf32_off_t e_phoff;
+ elf32_off_t e_shoff;
+ elf32_wrd_t e_flags;
+ elf32_hlf_t e_ehsize;
+ elf32_hlf_t e_phentsize;
+ elf32_hlf_t e_phnum;
+ elf32_hlf_t e_shentsize;
+ elf32_hlf_t e_shnum;
+ elf32_hlf_t e_shstrndx;
+};
+
+struct elf32_phdr
+{
+ elf32_wrd_t p_type;
+ elf32_off_t p_offset;
+ elf32_ptr_t p_va;
+ elf32_ptr_t p_pa;
+ elf32_wrd_t p_filesz;
+ elf32_wrd_t p_memsz;
+ elf32_wrd_t p_flags;
+ elf32_wrd_t p_align;
+};
+
+#define SIZE_EHDR sizeof(struct elf32_ehdr)
+#define SIZE_PHDR sizeof(struct elf32_phdr)
+
+static inline int
+elf_check_exec(struct elf32_ehdr* ehdr)
+{
+ return (*(u32_t*)(ehdr->e_ident) == ELFMAGIC) ||
+ ehdr->e_ident[EI_CLASS] == ELFCLASS32 ||
+ ehdr->e_ident[EI_DATA] == ELFDATA2LSB || ehdr->e_type == ET_EXEC;
+}
+#endif /* __LUNAIX_ELF_H */
void
vfs_ref_dnode(struct v_dnode* dnode);
+void
+vfs_ref_file(struct v_file* file);
+
void
vfs_unref_dnode(struct v_dnode* dnode);
--- /dev/null
+#ifndef __LUNAIX_LOADER_H
+#define __LUNAIX_LOADER_H
+
+#include <lunaix/process.h>
+#include <lunaix/types.h>
+
+#define LD_STAT_FKUP 0x1U
+
+#define MAX_VAR_PAGES 8
+
+struct ld_param
+{
+ struct proc_info* proc;
+ ptr_t vms_mnt;
+ struct elf32_ehdr ehdr_out;
+ int status;
+};
+
+int
+elf_load(struct ld_param* ldparam, struct v_file* elfile);
+
+void
+ld_create_param(struct ld_param* param, struct proc_info* proc, ptr_t vms);
+
+#endif /* __LUNAIX_LOADER_H */
#define REGION_ANON MAP_ANON
#define REGION_RW REGION_READ | REGION_WRITE
-#define REGION_TYPE_CODE (1 << 16);
-#define REGION_TYPE_GENERAL (2 << 16);
-#define REGION_TYPE_HEAP (3 << 16);
-#define REGION_TYPE_STACK (4 << 16);
+#define REGION_TYPE_CODE (1 << 16)
+#define REGION_TYPE_GENERAL (2 << 16)
+#define REGION_TYPE_HEAP (3 << 16)
+#define REGION_TYPE_STACK (4 << 16)
+#define REGION_TYPE_VARS (5 << 16)
struct mm_region
{
ptr_t start;
ptr_t end;
u32_t attr;
+
+ void* data;
+ int (*init_page)(struct mm_region*, void*, off_t);
+ void (*destruct_region)(struct mm_region*);
};
#endif /* __LUNAIX_MM_H */
#include <lunaix/mm/region.h>
#include <lunaix/types.h>
+struct mmap_param
+{
+ ptr_t vms_mnt;
+ vm_regions_t* regions;
+ off_t offset;
+ size_t length;
+ u32_t proct;
+ u32_t flags;
+ u32_t type;
+};
+
int
mem_map(void** addr_out,
- ptr_t mnt,
- vm_regions_t* regions,
+ struct mm_region** created,
void* addr,
struct v_file* file,
- off_t offset,
- size_t length,
- u32_t proct,
- u32_t options);
+ struct mmap_param* param);
int
mem_unmap(ptr_t mnt, vm_regions_t* regions, void* addr, size_t length);
region_add(vm_regions_t* lead, struct mm_region* vmregion);
void
-region_release_all(vm_regions_t* lead);
+region_release(pid_t pid, struct mm_region* region);
+
+void
+region_release_all(pid_t pid, vm_regions_t* lead);
struct mm_region*
region_get(vm_regions_t* lead, unsigned long vaddr);
if (!(cond)) { \
__assert_fail(msg, __FILE__, __LINE__); \
}
+
+#define fail(msg) __assert_fail(msg, __FILE__, __LINE__);
+
void
__assert_fail(const char* expr, const char* file, unsigned int line)
__attribute__((noinline, noreturn));
#define ENOTDEV -24
#define EOVERFLOW -25
#define ENOTBLK -26
+#define ENOEXEC -27
+#define E2BIG -28
#endif /* __LUNAIX_CODE_H */
#define __SYSCALL_sys_mmap 52
#define __SYSCALL_munmap 53
+#define __SYSCALL_execve 54
+
#define __SYSCALL_MAX 0x100
#ifndef __ASM__
// other MAP_* goes should beyond 0x20
#define MAP_FIXED 0x40
+#define MAP_FIXED_NOREPLACE 0x80
#define MS_ASYNC 0x1
#define MS_SYNC 0x2
#define MS_INVALIDATE 0x4
+#define MS_INVALIDATE_ALL 0x8
#endif /* __LUNAIX_MANN_FLAGS_H */
int
fsetxattr(int fd, const char* name, void* value, size_t len);
+int
+execve(const char* filename, const char* argv[], const char* envp[]);
+
#endif /* __LUNAIX_UNISTD_H */
ptr = PG_ALIGN(ptr);
memset(ptr, 0, PG_SIZE);
- int errno = file->ops->read_page(file->inode, ptr, PG_SIZE, offset);
+ int errno = 0;
+ if (hit_region->init_page) {
+ errno = hit_region->init_page(hit_region, ptr, offset);
+ } else {
+ errno = file->ops->read_page(file->inode, ptr, PG_SIZE, offset);
+ }
+
if (errno < 0) {
- kprintf(KERROR "fail to read page (%d)\n", errno);
+ kprintf(KERROR "fail to populate page (%d)\n", errno);
goto segv_term;
}
.long __lxsys_syslog
.long __lxsys_sys_mmap
.long __lxsys_munmap
+ .long __lxsys_execve
2:
.rept __SYSCALL_MAX - (2b - 1b)/4
.long 0
return DO_STATUS(errno);
}
+void
+vfs_ref_file(struct v_file* file)
+{
+ atomic_fetch_add(&file->ref_count, 1);
+}
+
void
vfs_ref_dnode(struct v_dnode* dnode)
{
--- /dev/null
+#include <lunaix/common.h>
+#include <lunaix/elf.h>
+#include <lunaix/fs.h>
+#include <lunaix/ld.h>
+#include <lunaix/mm/mmap.h>
+#include <lunaix/mm/valloc.h>
+#include <lunaix/mm/vmm.h>
+#include <lunaix/spike.h>
+
+int
+__elf_populate_mapped(struct mm_region* region, void* pg, off_t offset)
+{
+ struct elf32_phdr* phdr = (struct elf32_phdr*)region->data;
+ size_t segsz = phdr->p_filesz;
+ size_t segoff = offset - phdr->p_offset;
+
+ if (segoff >= segsz) {
+ return 0;
+ }
+
+ struct v_file* file = region->mfile;
+ size_t rdlen = MIN(segsz - segoff, PG_SIZE);
+
+ if (rdlen == PG_SIZE) {
+ // This is because we want to exploit any optimization on read_page
+ return file->ops->read_page(file->inode, pg, PG_SIZE, offset);
+ } else {
+ return file->ops->read(file->inode, pg, rdlen, offset);
+ }
+}
+
+void
+__elf_destruct_mapped(struct mm_region* region)
+{
+ vfree(region->data);
+}
+
+int
+elf_map_segment(struct ld_param* ldparam,
+ struct v_file* elfile,
+ struct elf32_phdr* phdr)
+{
+ int proct = 0;
+ if ((phdr->p_flags & PF_R)) {
+ proct |= PROT_READ;
+ }
+ if ((phdr->p_flags & PF_W)) {
+ proct |= PROT_WRITE;
+ }
+ if ((phdr->p_flags & PF_X)) {
+ proct |= PROT_EXEC;
+ }
+
+ struct mm_region* seg_reg;
+ struct mmap_param param = { .vms_mnt = ldparam->vms_mnt,
+ .regions = &ldparam->proc->mm.regions,
+ .proct = proct,
+ .offset = phdr->p_offset,
+ .length = ROUNDUP(phdr->p_memsz, PG_SIZE),
+ .flags =
+ MAP_FIXED | MAP_PRIVATE | REGION_TYPE_CODE };
+
+ int status = mem_map(NULL, &seg_reg, PG_ALIGN(phdr->p_va), elfile, ¶m);
+
+ if (!status) {
+ struct elf32_phdr* phdr_ = valloc(sizeof(SIZE_PHDR));
+ *phdr_ = *phdr;
+ seg_reg->data = phdr;
+
+ seg_reg->init_page = __elf_populate_mapped;
+ seg_reg->destruct_region = __elf_destruct_mapped;
+ }
+
+ return status;
+}
+
+int
+elf_setup_mapping(struct ld_param* ldparam,
+ struct v_file* elfile,
+ struct elf32_ehdr* ehdr)
+{
+ int status = 0;
+ size_t tbl_sz = ehdr->e_phnum * SIZE_PHDR;
+ struct elf32_phdr* phdrs = valloc(tbl_sz);
+
+ if (!phdrs) {
+ status = ENOMEM;
+ goto done;
+ }
+
+ tbl_sz = 1 << ILOG2(tbl_sz);
+ phdrs = elfile->ops->read(elfile->inode, phdrs, tbl_sz, ehdr->e_phoff);
+
+ size_t entries = tbl_sz / SIZE_PHDR;
+ for (size_t i = 0; i < entries; i++) {
+ struct elf32_phdr* phdr = &phdrs[i];
+
+ if (phdr->p_type == PT_LOAD) {
+ status = elf_map_segment(ldparam, elfile, phdr);
+ }
+ // TODO process other types of segments
+
+ if (status) {
+ ldparam->status |= LD_STAT_FKUP;
+ goto done;
+ }
+ }
+
+done:
+ vfree(phdrs);
+ return status;
+}
+
+int
+elf_load(struct ld_param* ldparam, struct v_file* elfile)
+{
+ struct elf32_ehdr* ehdr = valloc(SIZE_EHDR);
+ int status = elfile->ops->read(elfile->inode, ehdr, SIZE_EHDR, 0);
+
+ if (status) {
+ goto done;
+ }
+
+ if (!elf_check_exec(ehdr)) {
+ status = ENOEXEC;
+ goto done;
+ }
+
+ if ((status = elf_setup_mapping(ldparam, elfile, ehdr))) {
+ goto done;
+ }
+
+ ldparam->ehdr_out = *ehdr;
+
+done:
+ vfree(ehdr);
+ return status;
+}
\ No newline at end of file
--- /dev/null
+#include <lunaix/elf.h>
+#include <lunaix/fs.h>
+#include <lunaix/ld.h>
+#include <lunaix/mm/mmap.h>
+#include <lunaix/mm/vmm.h>
+#include <lunaix/process.h>
+#include <lunaix/spike.h>
+#include <lunaix/status.h>
+#include <lunaix/syscall.h>
+
+#include <klibc/string.h>
+
+size_t
+exec_str_size(const char** str_arr, size_t* length)
+{
+ const char* chr = *str_arr;
+ size_t sz = 0, len = 0;
+
+ while (chr) {
+ sz += strlen(chr);
+ len++;
+
+ chr = *(str_arr + sz);
+ }
+
+ *length = len;
+ return sz + 1;
+}
+
+int
+exec_loadto(struct ld_param* param,
+ struct v_file* executable,
+ const char** argv,
+ const char** envp)
+{
+ int errno = 0;
+
+ size_t argv_len, envp_len;
+ size_t sz_argv = exec_str_size(argv, &argv_len);
+ size_t sz_envp = exec_str_size(envp, &envp_len);
+ size_t total_sz = ROUNDUP(sz_argv + sz_envp, PG_SIZE);
+
+ if (total_sz / PG_SIZE > MAX_VAR_PAGES) {
+ errno = E2BIG;
+ goto done;
+ }
+
+ if ((errno = elf_load(param, executable))) {
+ goto done;
+ }
+
+ struct mmap_param map_param = { .regions = ¶m->proc->mm.regions,
+ .vms_mnt = param->vms_mnt,
+ .flags = MAP_ANON | MAP_PRIVATE | MAP_FIXED,
+ .type = REGION_TYPE_VARS,
+ .proct = PROT_READ,
+ .length = MAX_VAR_PAGES * PG_SIZE };
+
+ void* mapped;
+ isr_param* intr_ctx = ¶m->proc->intr_ctx;
+
+ // TODO reinitialize heap
+
+ if (param->vms_mnt == VMS_SELF) {
+ // we are loading executable into current addr space
+ if ((errno = mem_map(&mapped, NULL, UMMAP_END, NULL, &map_param))) {
+ goto done;
+ }
+
+ memcpy(mapped, (void*)argv, sz_argv);
+ memcpy(mapped + sz_argv, (void*)envp, sz_envp);
+
+ ptr_t* ustack = (void*)USTACK_TOP;
+ size_t* argc = &((size_t*)&ustack[-1])[-1];
+
+ ustack[-1] = (ptr_t)mapped;
+ *argc = argv_len;
+
+ // TODO handle envp.
+
+ intr_ctx->esp = argc;
+ } else {
+ // TODO need to find a way to inject argv and envp remotely
+ fail("not implemented");
+ }
+
+ intr_ctx->eip = param->ehdr_out.e_entry;
+ // we will jump to new entry point upon syscall's return
+ // so execve will not return from the perspective of it's invoker
+
+done:
+ return errno;
+}
+
+__DEFINE_LXSYSCALL3(int,
+ execve,
+ const char*,
+ filename,
+ const char*,
+ argv[],
+ const char*,
+ envp[])
+{
+ int errno = 0;
+ struct v_dnode* dnode;
+ struct v_file* file;
+
+ if ((errno = vfs_walk_proc(filename, &dnode, NULL, 0))) {
+ goto done;
+ }
+
+ if ((errno = vfs_open(dnode, &file))) {
+ goto done;
+ }
+
+ struct ld_param ldparam;
+ ld_create_param(&ldparam, __current, VMS_SELF);
+
+ if ((errno = exec_loadto(&ldparam, file, argv, envp))) {
+ vfs_pclose(file, __current->pid);
+
+ if ((ldparam.status & LD_STAT_FKUP)) {
+ // we fucked up our address space.
+ terminate_proc(11451);
+ schedule();
+ fail("should not reach");
+ }
+ }
+
+done:
+ return errno;
+}
\ No newline at end of file
--- /dev/null
+#include <lunaix/ld.h>
+
+void
+ld_create_param(struct ld_param* param, struct proc_info* proc, ptr_t vms)
+{
+ *param = (struct ld_param){ .proc = proc, .vms_mnt = vms };
+}
\ No newline at end of file
#include <lunaix/syscall_utils.h>
// any size beyond this is bullshit
-#define BS_SIZE (2 << 30)
+#define BS_SIZE (KERNEL_MM_BASE - UMMAP_START)
+
+int
+mem_has_overlap(vm_regions_t* regions, ptr_t start, size_t len)
+{
+ ptr_t end = start + end - 1;
+ struct mm_region *pos, *n;
+ llist_for_each(pos, n, regions, head)
+ {
+ if (pos->end >= start && pos->start < start) {
+ return 1;
+ }
+
+ if (pos->end <= end && pos->start >= start) {
+ return 1;
+ }
+
+ if (pos->end >= end && pos->start < end) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
int
mem_map(void** addr_out,
- ptr_t mnt,
- vm_regions_t* regions,
+ struct mm_region** created,
void* addr,
struct v_file* file,
- off_t offset,
- size_t length,
- u32_t proct,
- u32_t options)
+ struct mmap_param* param)
{
ptr_t last_end = USER_START;
struct mm_region *pos, *n;
- if ((options & MAP_FIXED)) {
- pos = region_get(regions, addr);
- if (!pos) {
- last_end = addr;
- goto found;
+ if ((param->flags & MAP_FIXED_NOREPLACE)) {
+ if (mem_has_overlap(param->regions, addr, param->length)) {
+ return EEXIST;
}
- return EEXIST;
+ last_end = addr;
+ goto found;
}
- llist_for_each(pos, n, regions, head)
+ if ((param->flags & MAP_FIXED)) {
+ int status =
+ mem_unmap(param->vms_mnt, param->regions, addr, param->length);
+ if (status) {
+ return status;
+ }
+ last_end = addr;
+ goto found;
+ }
+
+ llist_for_each(pos, n, param->regions, head)
{
- if (pos->start - last_end >= length && last_end >= addr) {
+ if (pos->start - last_end > param->length && last_end > addr) {
+ last_end += 1;
goto found;
}
last_end = pos->end;
found:
addr = last_end;
- struct mm_region* region =
- region_create_range(addr, length, proct | (options & 0x1f));
+ if (addr >= KERNEL_MM_BASE || addr < USER_START) {
+ return ENOMEM;
+ }
+
+ struct mm_region* region = region_create_range(
+ addr,
+ param->length,
+ ((param->proct | param->flags) & 0x1f) | (param->type & ~0xffff));
+
region->mfile = file;
- region->offset = offset;
+ region->offset = param->offset;
- region_add(regions, region);
+ region_add(param->regions, region);
u32_t attr = PG_ALLOW_USER;
- if ((proct & REGION_WRITE)) {
+ if ((param->proct & REGION_WRITE)) {
attr |= PG_WRITE;
}
- for (u32_t i = 0; i < length; i += PG_SIZE) {
- vmm_set_mapping(mnt, addr + i, 0, attr, 0);
+ for (u32_t i = 0; i < param->length; i += PG_SIZE) {
+ vmm_set_mapping(param->vms_mnt, addr + i, 0, attr, 0);
}
- *addr_out = addr;
+ vfs_ref_file(file);
+
+ if (addr_out) {
+ *addr_out = addr;
+ }
+ if (created) {
+ *created = region;
+ }
return 0;
}
if (!vmm_lookupat(mnt, start + i, &mapping)) {
continue;
}
+
if (PG_IS_DIRTY(*mapping.pte)) {
size_t offset = mapping.va - region->start + region->offset;
struct v_inode* inode = region->mfile->inode;
region->mfile->ops->write_page(inode, mapping.va, PG_SIZE, offset);
*mapping.pte &= ~PG_DIRTY;
- cpu_invplg(mapping.va);
+ cpu_invplg(mapping.pte);
} else if ((options & MS_INVALIDATE)) {
- *mapping.pte &= ~PG_PRESENT;
- cpu_invplg(mapping.va);
+ goto invalidate;
}
+
+ if (options & MS_INVALIDATE_ALL) {
+ goto invalidate;
+ }
+
+ continue;
+
+ invalidate:
+ *mapping.pte &= ~PG_PRESENT;
+ pmm_free_page(KERNEL_PID, mapping.pa);
+ cpu_invplg(mapping.pte);
}
}
n = container_of(pos->head.next, typeof(*pos), head);
if (pos->end == pos->start) {
llist_delete(&pos->head);
- vfree(pos);
+ region_release(__current->pid, pos);
}
pos = n;
length -= l;
cur_addr += length;
}
+
+ return 0;
}
__DEFINE_LXSYSCALL3(void*, sys_mmap, void*, addr, size_t, length, va_list, lst)
goto done;
}
+ if (!addr) {
+ addr = UMMAP_START;
+ } else if (addr < UMMAP_START || addr + length >= UMMAP_END) {
+ errno = ENOMEM;
+ goto done;
+ }
+
struct v_fd* vfd;
if ((errno = vfs_getfd(fd, &vfd))) {
goto done;
file = NULL;
}
- length = ROUNDUP(length, PG_SIZE);
+ struct mmap_param param = { .flags = options,
+ .length = ROUNDUP(length, PG_SIZE),
+ .offset = offset,
+ .type = REGION_TYPE_GENERAL,
+ .proct = proct,
+ .regions = &__current->mm.regions,
+ .vms_mnt = VMS_SELF };
- errno = mem_map(&result,
- VMS_SELF,
- &__current->mm.regions,
- addr,
- file,
- offset,
- length,
- proct,
- options);
+ errno = mem_map(&result, NULL, addr, file, ¶m);
done:
__current->k_status = errno;
}
void
-region_release_all(vm_regions_t* lead)
+region_release(pid_t pid, struct mm_region* region)
+{
+ if (region->destruct_region) {
+ region->destruct_region(region);
+ }
+
+ if (region->mfile) {
+ vfs_pclose(region->mfile, pid);
+ }
+
+ vfree(region);
+}
+
+void
+region_release_all(pid_t pid, vm_regions_t* lead)
{
struct mm_region *pos, *n;
llist_for_each(pos, n, lead, head)
{
- vfree(pos);
+ region_release(pid, pos);
}
}
#include <klibc/string.h>
#include <lunaix/clock.h>
#include <lunaix/common.h>
+#include <lunaix/mm/mmap.h>
#include <lunaix/mm/pmm.h>
#include <lunaix/mm/region.h>
#include <lunaix/mm/valloc.h>
/*--- 分配用户栈 ---*/
- struct mm_region* stack_vm;
-
- stack_vm = region_create_range(
- USTACK_END, USTACK_SIZE, REGION_RW | REGION_RSHARED | REGION_ANON);
- // 注册用户栈区域
- region_add(&pcb->mm.regions, stack_vm);
-
- // 预留地址空间,具体物理页将由Page Fault Handler按需分配。
- for (uintptr_t i = PG_ALIGN(USTACK_END); i < USTACK_TOP; i += PG_SIZE) {
- vmm_set_mapping(VMS_MOUNT_1, i, 0, PG_ALLOW_USER | PG_WRITE, VMAP_NULL);
+ struct mm_region* mapped;
+ struct mmap_param param = { .vms_mnt = VMS_MOUNT_1,
+ .regions = &pcb->mm.regions,
+ .length = USTACK_SIZE,
+ .proct = PROT_READ | PROT_WRITE,
+ .flags = MAP_ANON | MAP_PRIVATE | MAP_FIXED,
+ .type = REGION_TYPE_STACK };
+
+ int status = 0;
+ if ((status = mem_map(NULL, &mapped, USTACK_END, NULL, ¶m))) {
+ kprint_panic("fail to alloc user stack: %d", status);
}
// TODO other uspace initialization stuff
llist_for_each(pos, n, &proc->mm.regions, head)
{
mem_sync_pages(VMS_MOUNT_1, pos, pos->start, pos->end - pos->start, 0);
- vfree(pos);
+ region_release(pid, pos);
}
__del_pagetable(pid, VMS_MOUNT_1);
__LXSYSCALL2(int, unlinkat, int, fd, const char*, pathname)
-__LXSYSCALL1(int, mkdir, const char*, path)
\ No newline at end of file
+__LXSYSCALL1(int, mkdir, const char*, path)
+
+__LXSYSCALL3(int,
+ execve,
+ const char*,
+ filename,
+ const char**,
+ argv,
+ const char**,
+ envp)
\ No newline at end of file