* @brief 私有区域,该区域中的页无法进行任何形式的共享。
*
*/
-#define REGION_PRIVATE MAP_PRIVATE
+#define REGION_PRIVATE MAP_EXCLUSIVE
/**
* @brief
* 读共享区域,该区域中的页可以被两个进程之间读共享,但任何写操作须应用Copy-On-Write
+ * 等价于 POSIX 的 MAP_PRIVATE
*
*/
-#define REGION_RSHARED MAP_RSHARED
+#define REGION_RSHARED MAP_PRIVATE
/**
* @brief
* 写共享区域,该区域中的页可以被两个进程之间读共享,任何的写操作无需执行Copy-On-Write
- *
+ * 等价于 POSIX 的 MAP_SHARED
*/
-#define REGION_WSHARED MAP_WSHARED
+#define REGION_WSHARED MAP_SHARED
#define REGION_PERM_MASK 0x1c
#define REGION_MODE_MASK 0x3
#include <lunaix/mm/region.h>
#include <lunaix/types.h>
-void*
-mem_map(ptr_t pd_ref,
+int
+mem_map(void** addr_out,
+ ptr_t mnt,
vm_regions_t* regions,
void* addr,
struct v_file* file,
off_t offset,
size_t length,
- u32_t attrs,
+ u32_t proct,
u32_t options);
-void*
+int
mem_unmap(ptr_t mnt, vm_regions_t* regions, void* addr, size_t length);
+void
+mem_sync_pages(ptr_t mnt,
+ struct mm_region* region,
+ ptr_t start,
+ ptr_t length,
+ int options);
+
+int
+mem_msync(ptr_t mnt,
+ vm_regions_t* regions,
+ ptr_t addr,
+ size_t length,
+ int options);
+
#endif /* __LUNAIX_MMAP_H */
#define PTE_NULL 0
-#define P2V(paddr) ((uintptr_t)(paddr) + KERNEL_MM_BASE)
-#define V2P(vaddr) ((uintptr_t)(vaddr)-KERNEL_MM_BASE)
+#define P2V(paddr) ((ptr_t)(paddr) + KERNEL_MM_BASE)
+#define V2P(vaddr) ((ptr_t)(vaddr)-KERNEL_MM_BASE)
-#define PG_ALIGN(addr) ((uintptr_t)(addr)&0xFFFFF000UL)
+#define PG_ALIGN(addr) ((ptr_t)(addr)&0xFFFFF000UL)
+#define PG_ALIGNED(addr) (!((ptr_t)(addr)&0x00000FFFUL))
-#define L1_INDEX(vaddr) (u32_t)(((uintptr_t)(vaddr)&0xFFC00000UL) >> 22)
-#define L2_INDEX(vaddr) (u32_t)(((uintptr_t)(vaddr)&0x003FF000UL) >> 12)
-#define PG_OFFSET(vaddr) (u32_t)((uintptr_t)(vaddr)&0x00000FFFUL)
+#define L1_INDEX(vaddr) (u32_t)(((ptr_t)(vaddr)&0xFFC00000UL) >> 22)
+#define L2_INDEX(vaddr) (u32_t)(((ptr_t)(vaddr)&0x003FF000UL) >> 12)
+#define PG_OFFSET(vaddr) (u32_t)((ptr_t)(vaddr)&0x00000FFFUL)
#define GET_PT_ADDR(pde) PG_ALIGN(pde)
#define GET_PG_ADDR(pte) PG_ALIGN(pte)
-#define PG_DIRTY(pte) ((pte & (1 << 6)) >> 6)
-#define PG_ACCESSED(pte) ((pte & (1 << 5)) >> 5)
-#define PG_PRESENTED(pte) ((pte)&PG_PRESENT)
-
#define IS_CACHED(entry) ((entry & 0x1))
#define PG_PRESENT (0x1)
+#define PG_DIRTY (1 << 6)
+#define PG_ACCESSED (1 << 5)
#define PG_WRITE (0x1 << 1)
#define PG_ALLOW_USER (0x1 << 2)
#define PG_WRITE_THROUGH (1 << 3)
#define PG_DISABLE_CACHE (1 << 4)
#define PG_PDE_4MB (1 << 7)
+#define PG_IS_DIRTY(pte) ((pte)&PG_DIRTY)
+#define PG_IS_ACCESSED(pte) ((pte)&PG_ACCESSED)
+#define PG_IS_PRESENT(pte) ((pte)&PG_PRESENT)
+
#define NEW_L1_ENTRY(flags, pt_addr) \
(PG_ALIGN(pt_addr) | (((flags) | PG_WRITE_THROUGH) & 0xfff))
#define NEW_L2_ENTRY(flags, pg_addr) (PG_ALIGN(pg_addr) | ((flags)&0xfff))
typedef struct
{
// 虚拟页地址
- uintptr_t va;
+ ptr_t va;
// 物理页码(如果不存在映射,则为0)
u32_t pn;
// 物理页地址(如果不存在映射,则为0)
- uintptr_t pa;
+ ptr_t pa;
// 映射的flags
uint16_t flags;
// PTE地址
/* 四个页挂载点,两个页目录挂载点: 用于临时创建&编辑页表 */
#define PG_MOUNT_RANGE(l1_index) (701 <= l1_index && l1_index <= 703)
-#define PD_MOUNT_1 (KERNEL_MM_BASE + MEM_4MB)
-#define PG_MOUNT_BASE (PD_MOUNT_1 + MEM_4MB)
+#define VMS_MOUNT_1 (KERNEL_MM_BASE + MEM_4MB)
+#define PG_MOUNT_BASE (VMS_MOUNT_1 + MEM_4MB)
#define PG_MOUNT_1 (PG_MOUNT_BASE)
#define PG_MOUNT_2 (PG_MOUNT_BASE + 0x1000)
#define PG_MOUNT_3 (PG_MOUNT_BASE + 0x2000)
#define PG_MOUNT_4 (PG_MOUNT_BASE + 0x3000)
-#define PD_REFERENCED L2_BASE_VADDR
+#define VMS_SELF L2_BASE_VADDR
#define CURPROC_PTE(vpn) \
- (&((x86_page_table*)(PD_MOUNT_1 | (((vpn)&0xffc00) << 2))) \
+ (&((x86_page_table*)(VMS_MOUNT_1 | (((vpn)&0xffc00) << 2))) \
->entry[(vpn)&0x3ff])
#define PTE_MOUNTED(mnt, vpn) \
(((x86_page_table*)((mnt) | (((vpn)&0xffc00) << 2)))->entry[(vpn)&0x3ff])
struct mm_region*
region_create(ptr_t start, ptr_t end, u32_t attr);
+struct mm_region*
+region_create_range(ptr_t start, size_t length, u32_t attr);
+
void
region_add(vm_regions_t* lead, struct mm_region* vmregion);
vmm_del_mapping(uintptr_t mnt, uintptr_t va);
/**
- * @brief 查找一个映射
+ * @brief 在当前虚拟地址空间里查找一个映射
*
* @param va 虚拟地址
- * @return v_mapping 映射相关属性
+ * @param mapping 映射相关属性
*/
int
vmm_lookup(uintptr_t va, v_mapping* mapping);
+/**
+ * @brief 在指定的虚拟地址空间里查找一个映射
+ *
+ * @param mnt 地址空间锚定点
+ * @param va 虚拟地址
+ * @param mapping 映射相关属性
+ * @return int
+ */
+int
+vmm_lookupat(ptr_t mnt, uintptr_t va, v_mapping* mapping);
+
/**
* @brief (COW) 为虚拟页创建副本。
*
void*
vmm_vmap(uintptr_t paddr, size_t size, pt_attr attr);
+/**
+ * @brief 将当前地址空间的虚拟地址转译为物理地址。
+ *
+ * @param va 虚拟地址
+ * @return void*
+ */
void*
vmm_v2p(void* va);
+/**
+ * @brief 将指定地址空间的虚拟地址转译为物理地址
+ *
+ * @param mnt 地址空间锚定点
+ * @param va 虚拟地址
+ * @return void*
+ */
+void*
+vmm_v2pat(ptr_t mnt, void* va);
+
#endif /* __LUNAIX_VMM_H */
#define PROT_EXEC (1 << 4)
// identity mapped to region attributes
+
#define MAP_WSHARED 0x2
#define MAP_RSHARED 0x1
-#define MAP_SHARED (MAP_WSHARED | MAP_RSHARED)
-#define MAP_PRIVATE 0x0
+#define MAP_SHARED MAP_WSHARED
+#define MAP_PRIVATE MAP_RSHARED
+#define MAP_EXCLUSIVE 0x0
#define MAP_ANON (1 << 5)
#define MAP_STACK 0 // no effect in Lunaix
+
// other MAP_* goes should beyond 0x20
+#define MAP_FIXED 0x40
+
#define MS_ASYNC 0x1
#define MS_SYNC 0x2
#define MS_INVALIDATE 0x4
goto segv_term;
}
- volatile x86_pte_t* pte = &PTE_MOUNTED(PD_REFERENCED, ptr >> 12);
- if (PG_PRESENTED(*pte)) {
+ volatile x86_pte_t* pte = &PTE_MOUNTED(VMS_SELF, ptr >> 12);
+ if (PG_IS_PRESENT(*pte)) {
if ((hit_region->attr & COW_MASK) == COW_MASK) {
// normal page fault, do COW
cpu_invplg(pte);
uintptr_t pa =
(uintptr_t)vmm_dup_page(__current->pid, PG_ENTRY_ADDR(*pte));
pmm_free_page(__current->pid, *pte & ~0xFFF);
- *pte = (*pte & 0xFFF) | pa | PG_WRITE;
+ *pte = (*pte & 0xFFF & ~PG_DIRTY) | pa | PG_WRITE;
goto resolved;
}
// impossible cases or accessing privileged page
// an anonymous page and not present
// -> a new page need to be alloc
if ((hit_region->attr & REGION_ANON)) {
- if (!PG_PRESENTED(*pte)) {
+ if (!PG_IS_PRESENT(*pte)) {
cpu_invplg(pte);
uintptr_t pa = pmm_alloc_page(__current->pid, 0);
if (!pa) {
*pte = *pte | pa | PG_PRESENT;
goto resolved;
}
- // permission denied on anon page
+ // permission denied on anon page (e.g., write on readonly page)
goto segv_term;
}
// if mfile is set (Non-anonymous), then it is a mem map
- if (hit_region->mfile && !PG_PRESENTED(*pte)) {
+ if (hit_region->mfile && !PG_IS_PRESENT(*pte)) {
struct v_file* file = hit_region->mfile;
u32_t offset =
(ptr - hit_region->start) & (PG_SIZE - 1) + hit_region->offset;
cpu_invplg(pte);
*pte = (*pte & 0xFFF) | pa | PG_PRESENT;
- ptr = ptr & ~(PG_SIZE - 1);
+ ptr = PG_ALIGN(ptr);
memset(ptr, 0, PG_SIZE);
int errno = file->ops->read_page(file->inode, ptr, PG_SIZE, offset);
goto segv_term;
}
+ *pte &= ~PG_DIRTY;
+
goto resolved;
}
#include <klibc/stdio.h>
#include <klibc/string.h>
-#define TWIMAP_BUFFER_SIZE 1024
+#define TWIMAP_BUFFER_SIZE 4096
void
__twimap_default_reset(struct twimap* map)
acc_size += rdlen;
}
+ if (acc_size <= len - 1) {
+ // pad zero
+ *(char*)(buffer + acc_size + 1) = 0;
+ }
+
vfree(map->buffer);
return acc_size;
}
char* buf = mapping->buffer + mapping->size_acc;
mapping->size_acc +=
- __ksprintf_internal(buf, fmt, TWIMAP_BUFFER_SIZE, args);
+ __ksprintf_internal(buf, fmt, TWIMAP_BUFFER_SIZE, args) - 1;
va_end(args);
}
struct v_file_ops twimap_file_ops = { .close = default_file_close,
.read = __twimap_file_read,
+ .read_page = __twimap_file_read,
.readdir = default_file_readdir,
.seek = default_file_seek,
.write = default_file_write };
\ No newline at end of file
// 为内核创建一个专属栈空间。
for (size_t i = 0; i < (KSTACK_SIZE >> PG_SIZE_BITS); i++) {
uintptr_t pa = pmm_alloc_page(KERNEL_PID, 0);
- vmm_set_mapping(PD_REFERENCED,
+ vmm_set_mapping(VMS_SELF,
KSTACK_START + (i << PG_SIZE_BITS),
pa,
PG_PREM_RW,
pmm_mark_chunk_occupied(KERNEL_PID, 0, pg_count, PP_FGLOCKED);
for (uintptr_t i = &__usrtext_start; i < &__usrtext_end; i += PG_SIZE) {
- vmm_set_mapping(PD_REFERENCED, i, V2P(i), PG_PREM_UR, VMAP_NULL);
+ vmm_set_mapping(VMS_SELF, i, V2P(i), PG_PREM_UR, VMAP_NULL);
}
// reserve higher half
for (size_t i = L1_INDEX(KERNEL_MM_BASE); i < 1023; i++) {
- assert(vmm_set_mapping(PD_REFERENCED, i << 22, 0, 0, VMAP_NOMAP));
+ assert(vmm_set_mapping(VMS_SELF, i << 22, 0, 0, VMAP_NOMAP));
}
}
vmm_dup_page(pid_t pid, void* pa)
{
void* new_ppg = pmm_alloc_page(pid, 0);
- vmm_set_mapping(PD_REFERENCED, PG_MOUNT_3, new_ppg, PG_PREM_RW, VMAP_NULL);
- vmm_set_mapping(PD_REFERENCED, PG_MOUNT_4, pa, PG_PREM_RW, VMAP_NULL);
+ vmm_set_mapping(VMS_SELF, PG_MOUNT_3, new_ppg, PG_PREM_RW, VMAP_NULL);
+ vmm_set_mapping(VMS_SELF, PG_MOUNT_4, pa, PG_PREM_RW, VMAP_NULL);
asm volatile("movl %1, %%edi\n"
"movl %2, %%esi\n"
"r"(PG_MOUNT_4)
: "memory", "%edi", "%esi");
- vmm_del_mapping(PD_REFERENCED, PG_MOUNT_3);
- vmm_del_mapping(PD_REFERENCED, PG_MOUNT_4);
+ vmm_del_mapping(VMS_SELF, PG_MOUNT_3);
+ vmm_del_mapping(VMS_SELF, PG_MOUNT_4);
return new_ppg;
}
\ No newline at end of file
heap->brk = heap->start;
mutex_init(&heap->lock);
- return vmm_set_mapping(PD_REFERENCED,
+ return vmm_set_mapping(VMS_SELF,
heap->brk,
0,
PG_WRITE | PG_ALLOW_USER,
if (diff) {
// if next do require new pages to be mapped
for (size_t i = 0; i < diff; i += PG_SIZE) {
- vmm_set_mapping(PD_REFERENCED,
+ vmm_set_mapping(VMS_SELF,
PG_ALIGN(current_brk) + PG_SIZE + i,
0,
PG_WRITE | user,
#include <lunaix/spike.h>
#include <lunaix/syscall.h>
+#include <lunaix/syscall_utils.h>
-void*
-mem_map(ptr_t mnt,
+// any size beyond this is bullshit
+#define BS_SIZE (2 << 30)
+
+int
+mem_map(void** addr_out,
+ ptr_t mnt,
vm_regions_t* regions,
void* addr,
struct v_file* file,
u32_t proct,
u32_t options)
{
- if (!length || (length & (PG_SIZE - 1)) || (offset & (PG_SIZE - 1))) {
- __current->k_status = EINVAL;
- return (void*)-1;
- }
+ ptr_t last_end = USER_START;
+ struct mm_region *pos, *n;
- // read_page is not supported
- if (!file->ops->read_page) {
- __current->k_status = ENODEV;
- return (void*)-1;
+ if ((options & MAP_FIXED)) {
+ pos = region_get(regions, addr);
+ if (!pos) {
+ last_end = addr;
+ goto found;
+ }
+ return EEXIST;
}
- ptr_t last_end = USER_START;
- struct mm_region *pos, *n;
llist_for_each(pos, n, regions, head)
{
if (pos->start - last_end >= length && last_end >= addr) {
last_end = pos->end;
}
- __current->k_status = ENOMEM;
- return (void*)-1;
+ return ENOMEM;
found:
addr = last_end;
- ptr_t end = addr + length;
struct mm_region* region =
- region_create(addr, end, proct | (options & 0x1f));
+ region_create_range(addr, length, proct | (options & 0x1f));
region->mfile = file;
region->offset = offset;
vmm_set_mapping(mnt, addr + i, 0, attr, 0);
}
- return addr;
+ *addr_out = addr;
+ return 0;
+}
+
+void
+mem_sync_pages(ptr_t mnt,
+ struct mm_region* region,
+ ptr_t start,
+ ptr_t length,
+ int options)
+{
+ if (!region->mfile || !(region->attr & REGION_WSHARED)) {
+ return;
+ }
+
+ v_mapping mapping;
+ for (size_t i = 0; i < length; i += PG_SIZE) {
+ if (!vmm_lookupat(mnt, start + i, &mapping)) {
+ continue;
+ }
+ if (PG_IS_DIRTY(*mapping.pte)) {
+ size_t offset = mapping.va - region->start + region->offset;
+ struct v_inode* inode = region->mfile->inode;
+ region->mfile->ops->write_page(inode, mapping.va, PG_SIZE, offset);
+ *mapping.pte &= ~PG_DIRTY;
+ cpu_invplg(mapping.va);
+ } else if ((options & MS_INVALIDATE)) {
+ *mapping.pte &= ~PG_PRESENT;
+ cpu_invplg(mapping.va);
+ }
+ }
+}
+
+int
+mem_msync(ptr_t mnt,
+ vm_regions_t* regions,
+ ptr_t addr,
+ size_t length,
+ int options)
+{
+ struct mm_region* pos = list_entry(regions->next, struct mm_region, head);
+ while (length && (ptr_t)&pos->head != (ptr_t)regions) {
+ if (pos->end >= addr && pos->start <= addr) {
+ size_t l = MIN(length, pos->end - addr);
+ mem_sync_pages(mnt, pos, addr, l, options);
+
+ addr += l;
+ length -= l;
+ }
+ pos = list_entry(pos->head.next, struct mm_region, head);
+ }
+
+ if (length) {
+ return ENOMEM;
+ }
+
+ return 0;
}
-void*
+int
mem_unmap(ptr_t mnt, vm_regions_t* regions, void* addr, size_t length)
{
length = ROUNDUP(length, PG_SIZE);
l = length;
}
- // TODO for shared mappings, sync page content if modified. (also
- // implement msync)
+ mem_sync_pages(mnt, pos, cur_addr, l, 0);
for (size_t i = 0; i < l; i += PG_SIZE) {
ptr_t pa = vmm_del_mapping(mnt, cur_addr + i);
int fd = va_arg(lst, u32_t);
off_t offset = va_arg(lst, off_t);
int options = va_arg(lst, int);
-
int errno = 0;
+ void* result = (void*)-1;
+
+ if (!length || length > BS_SIZE || !PG_ALIGNED(addr)) {
+ errno = EINVAL;
+ goto done;
+ }
+
struct v_fd* vfd;
if ((errno = vfs_getfd(fd, &vfd))) {
- __current->k_status = errno;
- return (void*)-1;
+ goto done;
+ }
+
+ struct v_file* file = vfd->file;
+
+ if (!(options & MAP_ANON)) {
+ if (!file->ops->read_page) {
+ errno = ENODEV;
+ goto done;
+ }
+ } else {
+ file = NULL;
}
length = ROUNDUP(length, PG_SIZE);
- return mem_map(PD_REFERENCED,
- &__current->mm.regions,
- addr,
- vfd->file,
- offset,
- length,
- proct,
- options);
+ errno = mem_map(&result,
+ VMS_SELF,
+ &__current->mm.regions,
+ addr,
+ file,
+ offset,
+ length,
+ proct,
+ options);
+
+done:
+ __current->k_status = errno;
+ return result;
}
__DEFINE_LXSYSCALL2(void, munmap, void*, addr, size_t, length)
{
- return mem_unmap(PD_REFERENCED, &__current->mm.regions, addr, length);
+ return mem_unmap(VMS_SELF, &__current->mm.regions, addr, length);
+}
+
+__DEFINE_LXSYSCALL3(int, msync, void*, addr, size_t, length, int, flags)
+{
+ if (!PG_ALIGNED(addr) || ((flags & MS_ASYNC) && (flags & MS_SYNC))) {
+ return DO_STATUS(EINVAL);
+ }
+
+ int status =
+ mem_msync(VMS_SELF, &__current->mm.regions, addr, length, flags);
+
+ return DO_STATUS(status);
}
\ No newline at end of file
iounmap(uintptr_t vaddr, u32_t size)
{
for (size_t i = 0; i < size; i += PG_SIZE) {
- uintptr_t paddr = vmm_del_mapping(PD_REFERENCED, vaddr + i);
+ uintptr_t paddr = vmm_del_mapping(VMS_SELF, vaddr + i);
pmm_free_page(KERNEL_PID, paddr);
}
}
\ No newline at end of file
+#include <lunaix/mm/page.h>
#include <lunaix/mm/region.h>
#include <lunaix/mm/valloc.h>
+#include <lunaix/spike.h>
#include <klibc/string.h>
struct mm_region*
region_create(ptr_t start, ptr_t end, u32_t attr)
{
- return valloc(sizeof(struct mm_region));
+ assert_msg(PG_ALIGNED(start), "not page aligned");
+ assert_msg(PG_ALIGNED(end), "not page aligned");
+ struct mm_region* region = valloc(sizeof(struct mm_region));
+ *region =
+ (struct mm_region){ .attr = attr, .start = start, .end = end - 1 };
+ return region;
+}
+
+struct mm_region*
+region_create_range(ptr_t start, size_t length, u32_t attr)
+{
+ assert_msg(PG_ALIGNED(start), "not page aligned");
+ assert_msg(PG_ALIGNED(length), "not page aligned");
+ struct mm_region* region = valloc(sizeof(struct mm_region));
+ *region = (struct mm_region){ .attr = attr,
+ .start = start,
+ .end = start + length - 1 };
+ return region;
}
void
{
if (llist_empty(lead)) {
llist_append(lead, &vmregion->head);
- return vmregion;
+ return;
}
ptr_t cur_end = 0;
#include <lunaix/spike.h>
#define VMAP_START PG_MOUNT_BASE + MEM_4MB
-#define VMAP_END PD_REFERENCED
+#define VMAP_END VMS_SELF
static uintptr_t start = VMAP_START;
done:
uintptr_t alloc_begin = current_addr - examed_size;
for (size_t i = 0; i < size; i += PG_SIZE) {
- vmm_set_mapping(
- PD_REFERENCED, alloc_begin + i, paddr + i, PG_PREM_RW, 0);
+ vmm_set_mapping(VMS_SELF, alloc_begin + i, paddr + i, PG_PREM_RW, 0);
pmm_ref_page(KERNEL_PID, paddr + i);
}
start = alloc_begin + size;
}
}
- if (mnt == PD_REFERENCED) {
+ if (mnt == VMS_SELF) {
cpu_invplg(va);
}
int
vmm_lookup(uintptr_t va, v_mapping* mapping)
+{
+ return vmm_lookupat(VMS_SELF, va, mapping);
+}
+
+int
+vmm_lookupat(ptr_t mnt, uintptr_t va, v_mapping* mapping)
{
u32_t l1_index = L1_INDEX(va);
u32_t l2_index = L2_INDEX(va);
- x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
+ x86_page_table* l1pt = (x86_page_table*)(mnt | 1023 << 12);
x86_pte_t l1pte = l1pt->entry[l1_index];
if (l1pte) {
x86_pte_t* l2pte =
- &((x86_page_table*)L2_VADDR(l1_index))->entry[l2_index];
+ &((x86_page_table*)(mnt | (l1_index << 12)))->entry[l2_index];
if (l2pte) {
mapping->flags = PG_ENTRY_FLAGS(*l2pte);
mapping->pa = PG_ENTRY_ADDR(*l2pte);
return 0;
}
+void*
+vmm_v2pat(ptr_t mnt, void* va)
+{
+ u32_t l1_index = L1_INDEX(va);
+ u32_t l2_index = L2_INDEX(va);
+
+ x86_page_table* l1pt = (x86_page_table*)(mnt | 1023 << 12);
+ x86_pte_t l1pte = l1pt->entry[l1_index];
+
+ if (l1pte) {
+ x86_pte_t* l2pte =
+ &((x86_page_table*)(mnt | (l1_index << 12)))->entry[l2_index];
+ if (l2pte) {
+ return PG_ENTRY_ADDR(*l2pte) | ((uintptr_t)va & 0xfff);
+ }
+ }
+ return 0;
+}
+
void*
vmm_mount_pd(uintptr_t mnt, void* pde)
{
// clean up
for (size_t i = 0; i < (uintptr_t)(&__init_hhk_end); i += PG_SIZE) {
- vmm_del_mapping(PD_REFERENCED, (void*)i);
+ vmm_del_mapping(VMS_SELF, (void*)i);
pmm_free_page(KERNEL_PID, (void*)i);
}
}
// Don't fuck up our kernel space!
break;
}
- vmm_set_mapping(PD_REFERENCED, _pa, _pa, PG_PREM_R, VMAP_NULL);
+ vmm_set_mapping(VMS_SELF, _pa, _pa, PG_PREM_R, VMAP_NULL);
pmm_mark_page_occupied(
KERNEL_PID, _pa >> PG_SIZE_BITS, PP_FGLOCKED);
}
mmap.type);
for (; j < pg_num; j++) {
uintptr_t _pa = pa + (j << PG_SIZE_BITS);
- vmm_del_mapping(PD_REFERENCED, _pa);
+ vmm_del_mapping(VMS_SELF, _pa);
if (mmap.type == MULTIBOOT_MEMORY_ACPI_RECLAIMABLE) {
pmm_mark_page_free(_pa >> PG_SIZE_BITS);
}
__dup_pagetable(pid_t pid, uintptr_t mount_point)
{
void* ptd_pp = pmm_alloc_page(pid, PP_FGPERSIST);
- vmm_set_mapping(PD_REFERENCED, PG_MOUNT_1, ptd_pp, PG_PREM_RW, VMAP_NULL);
+ vmm_set_mapping(VMS_SELF, PG_MOUNT_1, ptd_pp, PG_PREM_RW, VMAP_NULL);
x86_page_table* ptd = PG_MOUNT_1;
x86_page_table* pptd = (x86_page_table*)(mount_point | (0x3FF << 12));
// 复制L2页表
void* pt_pp = pmm_alloc_page(pid, PP_FGPERSIST);
- vmm_set_mapping(
- PD_REFERENCED, PG_MOUNT_2, pt_pp, PG_PREM_RW, VMAP_NULL);
+ vmm_set_mapping(VMS_SELF, PG_MOUNT_2, pt_pp, PG_PREM_RW, VMAP_NULL);
x86_page_table* ppt = (x86_page_table*)(mount_point | (i << 12));
x86_page_table* pt = PG_MOUNT_2;
void*
vmm_dup_vmspace(pid_t pid)
{
- return __dup_pagetable(pid, PD_REFERENCED);
+ return __dup_pagetable(pid, VMS_SELF);
}
__DEFINE_LXSYSCALL(pid_t, fork)
void
init_proc_user_space(struct proc_info* pcb)
{
- vmm_mount_pd(PD_MOUNT_1, pcb->page_table);
+ vmm_mount_pd(VMS_MOUNT_1, pcb->page_table);
/*--- 分配用户栈 ---*/
struct mm_region* stack_vm;
- stack_vm = region_create(
- USTACK_END, USTACK_TOP, REGION_RW | REGION_RSHARED | REGION_ANON);
+ stack_vm = region_create_range(
+ USTACK_END, USTACK_SIZE, REGION_RW | REGION_RSHARED | REGION_ANON);
// 注册用户栈区域
region_add(&pcb->mm.regions, stack_vm);
// 预留地址空间,具体物理页将由Page Fault Handler按需分配。
for (uintptr_t i = PG_ALIGN(USTACK_END); i < USTACK_TOP; i += PG_SIZE) {
- vmm_set_mapping(PD_MOUNT_1, i, 0, PG_ALLOW_USER | PG_WRITE, VMAP_NULL);
+ vmm_set_mapping(VMS_MOUNT_1, i, 0, PG_ALLOW_USER | PG_WRITE, VMAP_NULL);
}
// TODO other uspace initialization stuff
- vmm_unmount_pd(PD_MOUNT_1);
+ vmm_unmount_pd(VMS_MOUNT_1);
}
void
__mark_region(uintptr_t start_vpn, uintptr_t end_vpn, int attr)
{
for (size_t i = start_vpn; i <= end_vpn; i++) {
- x86_pte_t* curproc = &PTE_MOUNTED(PD_REFERENCED, i);
- x86_pte_t* newproc = &PTE_MOUNTED(PD_MOUNT_1, i);
+ x86_pte_t* curproc = &PTE_MOUNTED(VMS_SELF, i);
+ x86_pte_t* newproc = &PTE_MOUNTED(VMS_MOUNT_1, i);
cpu_invplg(newproc);
if ((attr & REGION_MODE_MASK) == REGION_RSHARED) {
__copy_fdtable(pcb);
region_copy(&__current->mm.regions, &pcb->mm.regions);
- setup_proc_mem(pcb, PD_REFERENCED);
+ setup_proc_mem(pcb, VMS_SELF);
// 根据 mm_region 进一步配置页表
__mark_region(start_vpn, end_vpn, pos->attr);
}
- vmm_unmount_pd(PD_MOUNT_1);
+ vmm_unmount_pd(VMS_MOUNT_1);
// 正如同fork,返回两次。
pcb->intr_ctx.registers.eax = 0;
pid_t pid = proc->pid;
void* pt_copy = __dup_pagetable(pid, usedMnt);
- vmm_mount_pd(PD_MOUNT_1, pt_copy); // 将新进程的页表挂载到挂载点#2
+ vmm_mount_pd(VMS_MOUNT_1, pt_copy); // 将新进程的页表挂载到挂载点#2
// copy the kernel stack
for (size_t i = KSTACK_START >> 12; i <= KSTACK_TOP >> 12; i++) {
- volatile x86_pte_t* ppte = &PTE_MOUNTED(PD_MOUNT_1, i);
+ volatile x86_pte_t* ppte = &PTE_MOUNTED(VMS_MOUNT_1, i);
/*
This is a fucking nightmare, the TLB caching keep the rewrite to PTE
#include <lunaix/fs/taskfs.h>
#include <lunaix/mm/cake.h>
#include <lunaix/mm/kalloc.h>
+#include <lunaix/mm/mmap.h>
#include <lunaix/mm/pmm.h>
#include <lunaix/mm/valloc.h>
#include <lunaix/mm/vmm.h>
vfree(proc->fdtable);
vfree_dma(proc->fxstate);
- // TODO unmap all regions
+ vmm_mount_pd(VMS_MOUNT_1, proc->page_table);
+
struct mm_region *pos, *n;
llist_for_each(pos, n, &proc->mm.regions, head)
{
+ mem_sync_pages(VMS_MOUNT_1, pos, pos->start, pos->end - pos->start, 0);
vfree(pos);
}
- vmm_mount_pd(PD_MOUNT_1, proc->page_table);
-
- __del_pagetable(pid, PD_MOUNT_1);
+ __del_pagetable(pid, VMS_MOUNT_1);
- vmm_unmount_pd(PD_MOUNT_1);
+ vmm_unmount_pd(VMS_MOUNT_1);
cake_release(proc_pile, proc);