refactor: some formatting on code style.
+++ /dev/null
-# bx_enh_dbg_ini
-SeeReg[0] = TRUE
-SeeReg[1] = TRUE
-SeeReg[2] = TRUE
-SeeReg[3] = TRUE
-SeeReg[4] = FALSE
-SeeReg[5] = FALSE
-SeeReg[6] = FALSE
-SeeReg[7] = FALSE
-SingleCPU = FALSE
-ShowIOWindows = TRUE
-ShowButtons = TRUE
-SeeRegColors = TRUE
-ignoreNxtT = TRUE
-ignSSDisasm = TRUE
-UprCase = 1
-DumpInAsciiMode = 3
-isLittleEndian = TRUE
-DefaultAsmLines = 512
-DumpWSIndex = 2
-DockOrder = 0x123
-ListWidthPix[0] = 203
-ListWidthPix[1] = 249
-ListWidthPix[2] = 287
-MainWindow = 2536, 358, 743, 500
-FontName = Normal
#define __LUNAIX_DMM_H
// Dynamic Memory (i.e., heap) Manager
-#include <stddef.h>
#include <lunaix/mm/mm.h>
#include <lunaix/process.h>
+#include <stddef.h>
#define M_ALLOCATED 0x1
#define M_PREV_FREE 0x2
#define HEAP_INIT_SIZE 4096
-
int
dmm_init(heap_context_t* heap);
int
-lxbrk(heap_context_t* heap, void* addr);
+lxbrk(heap_context_t* heap, void* addr, int user);
void*
-lxsbrk(heap_context_t* heap, size_t size);
+lxsbrk(heap_context_t* heap, size_t size, int user);
void*
lx_malloc_internal(heap_context_t* heap, size_t size);
#ifndef __LUNAIX_PAGE_H
#define __LUNAIX_PAGE_H
-#include <stdint.h>
#include <lunaix/common.h>
+#include <stdint.h>
-#define PG_SIZE_BITS 12
-#define PG_SIZE (1 << PG_SIZE_BITS)
-#define PG_INDEX_BITS 10
+#define PG_SIZE_BITS 12
+#define PG_SIZE (1 << PG_SIZE_BITS)
+#define PG_INDEX_BITS 10
-#define PG_MAX_ENTRIES 1024U
-#define PG_LAST_TABLE PG_MAX_ENTRIES - 1
-#define PG_FIRST_TABLE 0
+#define PG_MAX_ENTRIES 1024U
+#define PG_LAST_TABLE PG_MAX_ENTRIES - 1
+#define PG_FIRST_TABLE 0
-#define PTE_NULL 0
+#define PTE_NULL 0
-#define P2V(paddr) ((uintptr_t)(paddr) + HIGHER_HLF_BASE)
-#define V2P(vaddr) ((uintptr_t)(vaddr) - HIGHER_HLF_BASE)
+#define P2V(paddr) ((uintptr_t)(paddr) + HIGHER_HLF_BASE)
+#define V2P(vaddr) ((uintptr_t)(vaddr)-HIGHER_HLF_BASE)
-#define PG_ALIGN(addr) ((uintptr_t)(addr) & 0xFFFFF000UL)
+#define PG_ALIGN(addr) ((uintptr_t)(addr)&0xFFFFF000UL)
-#define L1_INDEX(vaddr) (uint32_t)(((uintptr_t)(vaddr) & 0xFFC00000UL) >> 22)
-#define L2_INDEX(vaddr) (uint32_t)(((uintptr_t)(vaddr) & 0x003FF000UL) >> 12)
-#define PG_OFFSET(vaddr) (uint32_t)((uintptr_t)(vaddr) & 0x00000FFFUL)
+#define L1_INDEX(vaddr) (uint32_t)(((uintptr_t)(vaddr)&0xFFC00000UL) >> 22)
+#define L2_INDEX(vaddr) (uint32_t)(((uintptr_t)(vaddr)&0x003FF000UL) >> 12)
+#define PG_OFFSET(vaddr) (uint32_t)((uintptr_t)(vaddr)&0x00000FFFUL)
-#define GET_PT_ADDR(pde) PG_ALIGN(pde)
-#define GET_PG_ADDR(pte) PG_ALIGN(pte)
+#define GET_PT_ADDR(pde) PG_ALIGN(pde)
+#define GET_PG_ADDR(pte) PG_ALIGN(pte)
-#define PG_DIRTY(pte) ((pte & (1 << 6)) >> 6)
-#define PG_ACCESSED(pte) ((pte & (1 << 5)) >> 5)
+#define PG_DIRTY(pte) ((pte & (1 << 6)) >> 6)
+#define PG_ACCESSED(pte) ((pte & (1 << 5)) >> 5)
-#define IS_CACHED(entry) ((entry & 0x1))
+#define IS_CACHED(entry) ((entry & 0x1))
-#define PG_PRESENT (0x1)
-#define PG_WRITE (0x1 << 1)
-#define PG_ALLOW_USER (0x1 << 2)
-#define PG_WRITE_THROUGH (1 << 3)
-#define PG_DISABLE_CACHE (1 << 4)
-#define PG_PDE_4MB (1 << 7)
+#define PG_PRESENT (0x1)
+#define PG_WRITE (0x1 << 1)
+#define PG_ALLOW_USER (0x1 << 2)
+#define PG_WRITE_THROUGH (1 << 3)
+#define PG_DISABLE_CACHE (1 << 4)
+#define PG_PDE_4MB (1 << 7)
-#define NEW_L1_ENTRY(flags, pt_addr) (PG_ALIGN(pt_addr) | (((flags) | PG_WRITE_THROUGH) & 0xfff))
-#define NEW_L2_ENTRY(flags, pg_addr) (PG_ALIGN(pg_addr) | ((flags) & 0xfff))
+#define NEW_L1_ENTRY(flags, pt_addr) \
+ (PG_ALIGN(pt_addr) | (((flags) | PG_WRITE_THROUGH) & 0xfff))
+#define NEW_L2_ENTRY(flags, pg_addr) (PG_ALIGN(pg_addr) | ((flags)&0xfff))
-#define V_ADDR(pd, pt, offset) ((pd) << 22 | (pt) << 12 | (offset))
-#define P_ADDR(ppn, offset) ((ppn << 12) | (offset))
+#define V_ADDR(pd, pt, offset) ((pd) << 22 | (pt) << 12 | (offset))
+#define P_ADDR(ppn, offset) ((ppn << 12) | (offset))
-#define PG_ENTRY_FLAGS(entry) ((entry) & 0xFFFU)
-#define PG_ENTRY_ADDR(entry) ((entry) & ~0xFFFU)
+#define PG_ENTRY_FLAGS(entry) ((entry)&0xFFFU)
+#define PG_ENTRY_ADDR(entry) ((entry) & ~0xFFFU)
-#define HAS_FLAGS(entry, flags) ((PG_ENTRY_FLAGS(entry) & (flags)) == flags)
-#define CONTAINS_FLAGS(entry, flags) (PG_ENTRY_FLAGS(entry) & (flags))
+#define HAS_FLAGS(entry, flags) ((PG_ENTRY_FLAGS(entry) & (flags)) == flags)
+#define CONTAINS_FLAGS(entry, flags) (PG_ENTRY_FLAGS(entry) & (flags))
-#define PG_PREM_R PG_PRESENT
-#define PG_PREM_RW PG_PRESENT | PG_WRITE
-#define PG_PREM_UR PG_PRESENT | PG_ALLOW_USER
-#define PG_PREM_URW PG_PRESENT | PG_WRITE | PG_ALLOW_USER
+#define PG_PREM_R PG_PRESENT
+#define PG_PREM_RW PG_PRESENT | PG_WRITE
+#define PG_PREM_UR PG_PRESENT | PG_ALLOW_USER
+#define PG_PREM_URW PG_PRESENT | PG_WRITE | PG_ALLOW_USER
// 用于对PD进行循环映射,因为我们可能需要对PD进行频繁操作,我们在这里禁用TLB缓存
-#define T_SELF_REF_PERM PG_PREM_RW | PG_DISABLE_CACHE | PG_WRITE_THROUGH
-
+#define T_SELF_REF_PERM PG_PREM_RW | PG_DISABLE_CACHE | PG_WRITE_THROUGH
// 页目录的虚拟基地址,可以用来访问到各个PDE
-#define L1_BASE_VADDR 0xFFFFF000U
+#define L1_BASE_VADDR 0xFFFFF000U
// 页表的虚拟基地址,可以用来访问到各个PTE
-#define L2_BASE_VADDR 0xFFC00000U
+#define L2_BASE_VADDR 0xFFC00000U
// 用来获取特定的页表的虚拟地址
-#define L2_VADDR(pd_offset) (L2_BASE_VADDR | (pd_offset << 12))
+#define L2_VADDR(pd_offset) (L2_BASE_VADDR | (pd_offset << 12))
typedef unsigned long ptd_t;
typedef unsigned long pt_t;
/**
* @brief 虚拟映射属性
- *
+ *
*/
-typedef struct {
+typedef struct
+{
+ // 虚拟页地址
+ uintptr_t va;
// 物理页码(如果不存在映射,则为0)
uint32_t pn;
// 物理页地址(如果不存在映射,则为0)
// 映射的flags
uint16_t flags;
// PTE地址
- x86_pte_t *pte;
+ x86_pte_t* pte;
} v_mapping;
typedef struct
x86_pte_t entry[PG_MAX_ENTRIES];
} __attribute__((packed)) x86_page_table;
-
extern void __pg_mount_point;
/* 四个页挂载点,两个页目录挂载点: 用于临时创建&编辑页表 */
-#define PD_MOUNT_1 0xAFC00000
-#define PD_MOUNT_2 0xAF800000
-#define PG_MOUNT_BASE 0xAF7FF000
-#define PG_MOUNT_1 (PG_MOUNT_BASE)
-#define PG_MOUNT_2 (PG_MOUNT_BASE - 0x1000)
-#define PG_MOUNT_3 (PG_MOUNT_BASE - 0x2000)
-#define PG_MOUNT_4 (PG_MOUNT_BASE - 0x3000)
-#define PD_REFERENCED L2_BASE_VADDR
-
-#define CURPROC_PTE(vpn) (&((x86_page_table*)(PD_MOUNT_1 | (((vpn) & 0xffc00) << 2)))->entry[(vpn) & 0x3ff])
-#define PTE_MOUNTED(mnt, vpn) (((x86_page_table*)((mnt) | (((vpn) & 0xffc00) << 2)))->entry[(vpn) & 0x3ff])
+#define PD_MOUNT_1 0xAFC00000
+#define PD_MOUNT_2 0xAF800000
+#define PG_MOUNT_BASE 0xAF7FF000
+#define PG_MOUNT_1 (PG_MOUNT_BASE)
+#define PG_MOUNT_2 (PG_MOUNT_BASE - 0x1000)
+#define PG_MOUNT_3 (PG_MOUNT_BASE - 0x2000)
+#define PG_MOUNT_4 (PG_MOUNT_BASE - 0x3000)
+#define PD_REFERENCED L2_BASE_VADDR
+
+#define CURPROC_PTE(vpn) \
+ (&((x86_page_table*)(PD_MOUNT_1 | (((vpn)&0xffc00) << 2))) \
+ ->entry[(vpn)&0x3ff])
+#define PTE_MOUNTED(mnt, vpn) \
+ (((x86_page_table*)((mnt) | (((vpn)&0xffc00) << 2)))->entry[(vpn)&0x3ff])
#endif /* __LUNAIX_PAGE_H */
#include <lunaix/process.h>
#include <stddef.h>
#include <stdint.h>
-#include <lunaix/mm/pmm.h>
// Virtual memory manager
/**
vmm_init_pd();
/**
- * @brief 尝试建立一个映射关系。映射指定的物理页地址至虚拟页地址,如果指定的虚拟页地址已被占用
- * 则尝试寻找新的可用地址(该地址总是大于指定的地址)。
+ * @brief 在指定地址空间中,添加一个映射
*
- * @param vpn 虚拟页地址
- * @param pa 物理页地址
- * @param dattr PDE 的属性
- * @param tattr PTE 的属性
- * @return 虚拟页地址,如不成功,则为 NULL
- */
-void*
-vmm_map_page(pid_t pid, void* va, void* pa, pt_attr tattr);
-
-/**
- * @brief 建立一个映射关系,映射指定的物理页地址至虚拟页地址。如果指定的虚拟页地址已被占用,
- * 则覆盖。
- *
- * @param va 虚拟页地址
- * @param pa 物理页地址
- * @param dattr PDE 的属性
- * @param tattr PTE 的属性
- * @return 虚拟页地址
- */
-void*
-vmm_fmap_page(pid_t pid, void* va, void* pa, pt_attr tattr);
-
-/**
- * @brief 尝试为一个虚拟页地址创建一个可用的物理页映射
- *
- * @param va 虚拟页地址
- * @return 虚拟页地址,如不成功,则为 NULL
- */
-void*
-vmm_alloc_page(pid_t pid, void* va, void** pa, pt_attr tattr, pp_attr_t pattr);
-
-
-/**
- * @brief 尝试分配多个连续的虚拟页
- *
- * @param va 起始虚拟地址
- * @param sz 大小(必须为4K对齐)
- * @param tattr 属性
- * @return int 是否成功
- */
-int
-vmm_alloc_pages(pid_t pid, void* va, size_t sz, pt_attr tattr, pp_attr_t pattr);
-
-/**
- * @brief 设置一个映射,如果映射已存在,则忽略。
- *
- * @param va
- * @param pa
- * @param attr
+ * @param mnt 地址空间挂载点
+ * @param va 虚拟地址
+ * @param pa 物理地址
+ * @param attr 映射属性
+ * @return int
*/
int
-vmm_set_mapping(pid_t pid, void* va, void* pa, pt_attr attr);
-
-/**
- * @brief 删除并释放一个映射
- *
- * @param vpn
- */
-void
-vmm_unmap_page(pid_t pid, void* va);
+vmm_set_mapping(uintptr_t mnt, uintptr_t va, uintptr_t pa, pt_attr attr);
/**
* @brief 删除一个映射
*
- * @param vpn
+ * @param mnt
+ * @param pid
+ * @param va
+ * @return int
*/
-void
-vmm_unset_mapping(void* va);
-
-/**
- * @brief 将虚拟地址翻译为其对应的物理映射
- *
- * @param va 虚拟地址
- * @return void* 物理地址,如映射不存在,则为NULL
- */
-void*
-vmm_v2p(void* va);
+int
+vmm_del_mapping(uintptr_t mnt, uintptr_t va);
/**
* @brief 查找一个映射
* @param va 虚拟地址
* @return v_mapping 映射相关属性
*/
-v_mapping
-vmm_lookup(void* va);
+int
+vmm_lookup(uintptr_t va, v_mapping* mapping);
/**
* @brief (COW) 为虚拟页创建副本。
- *
+ *
* @return void* 包含虚拟页副本的物理页地址。
- *
+ *
*/
-void* vmm_dup_page(pid_t pid, void* pa);
+void*
+vmm_dup_page(pid_t pid, void* pa);
/**
* @brief 挂载另一个虚拟地址空间至当前虚拟地址空间
- *
+ *
* @param pde 页目录的物理地址
- * @return void*
+ * @return void*
*/
void*
vmm_mount_pd(uintptr_t mnt, void* pde);
/**
* @brief 卸载已挂载的虚拟地址空间
- *
+ *
*/
void*
vmm_unmount_pd(uintptr_t mnt);
extern void
__print_panic_msg(const char* msg, const isr_param* param);
+extern void __kernel_heap_start;
+
void
intr_routine_page_fault(const isr_param* param)
{
goto segv_term;
}
+ v_mapping mapping;
+ if (!vmm_lookup(ptr, &mapping)) {
+ goto segv_term;
+ }
+
+ if (!SEL_RPL(param->cs)) {
+ // 如果是内核页错误……
+ if (do_kernel(&mapping)) {
+ return;
+ }
+ goto segv_term;
+ }
+
struct mm_region* hit_region = region_get(__current, ptr);
if (!hit_region) {
param->eip);
terminate_proc(LXSEGFAULT);
// should not reach
+}
+
+int
+do_kernel(v_mapping* mapping)
+{
+ uintptr_t addr = mapping->va;
+ if (addr >= &__kernel_heap_start && addr < L2_BASE_VADDR) {
+ // This is kernel heap page
+ uintptr_t pa = pmm_alloc_page(KERNEL_PID, 0);
+ *mapping->pte = (*mapping->pte & 0xfff) | pa | PG_PRESENT;
+ cpu_invplg(mapping->pte);
+ cpu_invplg(addr);
+ goto done;
+ }
+
+ return 0;
+done:
+ return 1;
}
\ No newline at end of file
// 为内核创建一个专属栈空间。
for (size_t i = 0; i < (KSTACK_SIZE >> PG_SIZE_BITS); i++) {
- vmm_alloc_page(KERNEL_PID,
- (void*)(KSTACK_START + (i << PG_SIZE_BITS)),
- NULL,
- PG_PREM_RW,
- 0);
+ uintptr_t pa = pmm_alloc_page(KERNEL_PID, 0);
+ vmm_set_mapping(
+ PD_REFERENCED, KSTACK_START + (i << PG_SIZE_BITS), pa, PG_PREM_RW);
}
kprintf(KINFO "[MM] Allocated %d pages for stack start at %p\n",
KSTACK_SIZE >> PG_SIZE_BITS,
// 重映射VGA文本缓冲区(以后会变成显存,i.e., framebuffer)
for (size_t i = 0; i < vga_buf_pgs; i++) {
- vmm_map_page(KERNEL_PID,
- (void*)(VGA_BUFFER_VADDR + (i << PG_SIZE_BITS)),
- (void*)(VGA_BUFFER_PADDR + (i << PG_SIZE_BITS)),
- PG_PREM_URW);
+ vmm_set_mapping(PD_REFERENCED,
+ VGA_BUFFER_VADDR + (i << PG_SIZE_BITS),
+ VGA_BUFFER_PADDR + (i << PG_SIZE_BITS),
+ PG_PREM_URW);
}
// 更新VGA缓冲区位置至虚拟地址
#include <lunaix/mm/vmm.h>
-void* vmm_dup_page(pid_t pid, void* pa) {
+void*
+vmm_dup_page(pid_t pid, void* pa)
+{
void* new_ppg = pmm_alloc_page(pid, 0);
- vmm_fmap_page(pid, PG_MOUNT_3, new_ppg, PG_PREM_RW);
- vmm_fmap_page(pid, PG_MOUNT_4, pa, PG_PREM_RW);
+ vmm_set_mapping(PD_REFERENCED, PG_MOUNT_3, new_ppg, PG_PREM_RW);
+ vmm_set_mapping(PD_REFERENCED, PG_MOUNT_4, pa, PG_PREM_RW);
- asm volatile (
- "movl %1, %%edi\n"
- "movl %2, %%esi\n"
- "rep movsl\n"
- :: "c"(1024), "r"(PG_MOUNT_3), "r"(PG_MOUNT_4)
- : "memory", "%edi", "%esi");
+ asm volatile("movl %1, %%edi\n"
+ "movl %2, %%esi\n"
+ "rep movsl\n" ::"c"(1024),
+ "r"(PG_MOUNT_3),
+ "r"(PG_MOUNT_4)
+ : "memory", "%edi", "%esi");
- vmm_unset_mapping(PG_MOUNT_3);
- vmm_unset_mapping(PG_MOUNT_4);
+ vmm_del_mapping(PD_REFERENCED, PG_MOUNT_3);
+ vmm_del_mapping(PD_REFERENCED, PG_MOUNT_4);
return new_ppg;
}
\ No newline at end of file
* @file dmm.c
* @author Lunaixsky
* @brief Dynamic memory manager for heap. This design do not incorporate any\
- * specific implementation of malloc family. The main purpose of this routines is to
- * provide handy method to initialize & grow the heap as needed by upstream implementation.
- *
- * This is designed to be portable, so it can serve as syscalls to malloc/free in the c std lib.
- *
+ * specific implementation of malloc family. The main purpose of this routines
+ * is to provide handy method to initialize & grow the heap as needed by
+ * upstream implementation.
+ *
+ * This is designed to be portable, so it can serve as syscalls to malloc/free
+ * in the c std lib.
+ *
* @version 0.2
* @date 2022-03-3
*
*/
#include <lunaix/mm/dmm.h>
-#include <lunaix/mm/vmm.h>
#include <lunaix/mm/page.h>
+#include <lunaix/mm/vmm.h>
#include <lunaix/status.h>
#include <lunaix/spike.h>
#include <lunaix/syscall.h>
+extern void __kernel_heap_start;
-__DEFINE_LXSYSCALL1(int, sbrk, size_t, size) {
+__DEFINE_LXSYSCALL1(int, sbrk, size_t, size)
+{
heap_context_t* uheap = &__current->mm.u_heap;
mutex_lock(&uheap->lock);
- void* r = lxsbrk(uheap, size);
+ void* r = lxsbrk(uheap, size, PG_ALLOW_USER);
mutex_unlock(&uheap->lock);
return r;
}
-__DEFINE_LXSYSCALL1(void*, brk, void*, addr) {
+__DEFINE_LXSYSCALL1(void*, brk, void*, addr)
+{
heap_context_t* uheap = &__current->mm.u_heap;
mutex_lock(&uheap->lock);
- int r = lxbrk(uheap, addr);
+ int r = lxbrk(uheap, addr, PG_ALLOW_USER);
mutex_unlock(&uheap->lock);
return r;
}
heap->brk = heap->start;
mutex_init(&heap->lock);
- return vmm_alloc_page(__current->pid, heap->brk, NULL, PG_PREM_RW, 0) != NULL;
+ int perm = PG_ALLOW_USER;
+ if (heap->brk >= &__kernel_heap_start) {
+ perm = 0;
+ }
+
+ return vmm_set_mapping(PD_REFERENCED, heap->brk, 0, PG_WRITE | perm) !=
+ NULL;
}
int
-lxbrk(heap_context_t* heap, void* addr)
+lxbrk(heap_context_t* heap, void* addr, int user)
{
- return -(lxsbrk(heap, addr - heap->brk) == (void*)-1);
+ return -(lxsbrk(heap, addr - heap->brk, user) == (void*)-1);
}
void*
-lxsbrk(heap_context_t* heap, size_t size)
+lxsbrk(heap_context_t* heap, size_t size, int user)
{
if (size == 0) {
return heap->brk;
uintptr_t diff = PG_ALIGN(next) - PG_ALIGN(current_brk);
if (diff) {
- // if next do require new pages to be allocated
- if (!vmm_alloc_pages(__current->pid, (void*)(PG_ALIGN(current_brk) + PG_SIZE),
- diff,
- PG_PREM_RW, 0)) {
- __current->k_status = LXHEAPFULL;
- return NULL;
+ // if next do require new pages to be mapped
+ for (size_t i = 0; i < diff; i += PG_SIZE) {
+ vmm_set_mapping(PD_REFERENCED,
+ PG_ALIGN(current_brk) + PG_SIZE + i,
+ 0,
+ PG_WRITE | user);
}
}
* @file kalloc.c
* @author Lunaixsky
* @brief Implicit free list implementation of malloc family, for kernel use.
- *
+ *
* This version of code is however the simplest and yet insecured, thread unsafe
* it just to demonstrate how the malloc/free works behind the curtain
* @version 0.1
* @date 2022-03-05
- *
+ *
* @copyright Copyright (c) 2022
- *
+ *
*/
-#include <lunaix/mm/kalloc.h>
#include <lunaix/mm/dmm.h>
+#include <lunaix/mm/kalloc.h>
#include <lunaix/common.h>
#include <lunaix/spike.h>
/*
At the beginning, we allocate an empty page and put our initial marker
-
+
| 4/1 | 0/1 |
^ ^ brk
start
- Then, expand the heap further, with HEAP_INIT_SIZE (evaluated to 4096, i.e., 1 pg size)
- This will allocate as much pages and override old epilogue marker with a free region hdr
- and put new epilogue marker. These are handled by lx_grow_heap which is internally used
- by alloc to expand the heap at many moment when needed.
-
+ Then, expand the heap further, with HEAP_INIT_SIZE (evaluated to 4096, i.e.,
+ 1 pg size) This will allocate as much pages and override old epilogue marker
+ with a free region hdr and put new epilogue marker. These are handled by
+ lx_grow_heap which is internally used by alloc to expand the heap at many
+ moment when needed.
+
| 4/1 | 4096/0 | ....... | 4096/0 | 0/1 |
^ ^ brk_old ^
start brk
Note: the brk always point to the beginning of epilogue.
*/
+// FIXME: This should be per-process but not global!
static heap_context_t kheap;
int
-kalloc_init() {
+kalloc_init()
+{
kheap.start = &__kernel_heap_start;
kheap.brk = NULL;
kheap.max_addr = (void*)KSTACK_START;
}
void*
-lxmalloc(size_t size) {
+lxmalloc(size_t size)
+{
mutex_lock(&kheap.lock);
void* r = lx_malloc_internal(&kheap, size);
mutex_unlock(&kheap.lock);
}
void*
-lxcalloc(size_t n, size_t elem) {
+lxcalloc(size_t n, size_t elem)
+{
size_t pd = n * elem;
// overflow detection
}
void
-lxfree(void* ptr) {
+lxfree(void* ptr)
+{
if (!ptr) {
return;
}
// make sure the ptr we are 'bout to free makes sense
// the size trick is stolen from glibc's malloc/malloc.c:4437 ;P
-
+
assert_msg(((uintptr_t)ptr < (uintptr_t)(-sz)) && !((uintptr_t)ptr & 0x3),
"free(): invalid pointer");
-
- assert_msg(sz > WSIZE,
- "free(): invalid size");
+
+ assert_msg(sz > WSIZE, "free(): invalid size");
SW(chunk_ptr, hdr & ~M_ALLOCATED);
SW(FPTR(chunk_ptr, sz), hdr & ~M_ALLOCATED);
SW(next_hdr, LW(next_hdr) | M_PREV_FREE);
-
+
coalesce(chunk_ptr);
mutex_unlock(&kheap.lock);
}
-
void*
lx_malloc_internal(heap_context_t* heap, size_t size)
{
|
v
-
+
| xxxx | |
*/
coalesce(n_hdrptr);
return chunk_ptr;
}
-
void*
lx_grow_heap(heap_context_t* heap, size_t sz)
{
void* start;
// The "+ WSIZE" capture the overhead for epilogue marker
- if (!(start = lxsbrk(heap, sz + WSIZE))) {
+ if (!(start = lxsbrk(heap, sz + WSIZE, 0))) {
return NULL;
}
sz = ROUNDUP(sz, BOUNDARY);
#include <hal/cpu.h>
#include <klibc/string.h>
+#include <lunaix/mm/pmm.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/spike.h>
x86_page_table*
vmm_init_pd()
{
- x86_page_table* dir = (x86_page_table*)pmm_alloc_page(KERNEL_PID, PP_FGPERSIST);
+ x86_page_table* dir =
+ (x86_page_table*)pmm_alloc_page(KERNEL_PID, PP_FGPERSIST);
for (size_t i = 0; i < PG_MAX_ENTRIES; i++) {
dir->entry[i] = PTE_NULL;
}
}
int
-__vmm_map_internal(pid_t pid,
- uint32_t l1_inx,
- uint32_t l2_inx,
- uintptr_t pa,
- pt_attr attr,
- int forced)
+vmm_set_mapping(uintptr_t mnt, uintptr_t va, uintptr_t pa, pt_attr attr)
{
- x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
- x86_page_table* l2pt = (x86_page_table*)L2_VADDR(l1_inx);
+ assert((uintptr_t)va % PG_SIZE == 0);
+
+ uintptr_t l1_inx = L1_INDEX(va);
+ uintptr_t l2_inx = L2_INDEX(va);
+ x86_page_table* l1pt = (x86_page_table*)(mnt | (1023 << 12));
+ x86_page_table* l2pt = (x86_page_table*)(mnt | (l1_inx << 12));
// See if attr make sense
assert(attr <= 128);
if (!l1pt->entry[l1_inx]) {
- x86_page_table* new_l1pt_pa = pmm_alloc_page(pid, PP_FGPERSIST);
+ x86_page_table* new_l1pt_pa = pmm_alloc_page(KERNEL_PID, PP_FGPERSIST);
// 物理内存已满!
if (!new_l1pt_pa) {
// This must be writable
l1pt->entry[l1_inx] = NEW_L1_ENTRY(attr | PG_WRITE, new_l1pt_pa);
- memset((void*)L2_VADDR(l1_inx), 0, PG_SIZE);
- }
-
- x86_pte_t l2pte = l2pt->entry[l2_inx];
- if (l2pte) {
- if (!forced) {
- return 0;
- }
+ memset((void*)l2pt, 0, PG_SIZE);
}
- if ((HAS_FLAGS(attr, PG_PRESENT))) {
- // add one on reference count, regardless of existence.
- pmm_ref_page(pid, pa);
+ if (mnt == PD_REFERENCED) {
+ cpu_invplg(va);
}
l2pt->entry[l2_inx] = NEW_L2_ENTRY(attr, pa);
-
return 1;
}
-void*
-vmm_map_page(pid_t pid, void* va, void* pa, pt_attr tattr)
-{
- // 显然,对空指针进行映射没有意义。
- if (!pa || !va) {
- return NULL;
- }
-
- assert(((uintptr_t)va & 0xFFFU) == 0) assert(((uintptr_t)pa & 0xFFFU) == 0);
-
- uint32_t l1_index = L1_INDEX(va);
- uint32_t l2_index = L2_INDEX(va);
- x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
-
- // 在页表与页目录中找到一个可用的空位进行映射(位于va或其附近)
- x86_pte_t l1pte = l1pt->entry[l1_index];
- x86_page_table* l2pt = (x86_page_table*)L2_VADDR(l1_index);
- while (l1pte && l1_index < PG_MAX_ENTRIES) {
- if (l2_index == PG_MAX_ENTRIES) {
- l1_index++;
- l2_index = 0;
- l1pte = l1pt->entry[l1_index];
- l2pt = (x86_page_table*)L2_VADDR(l1_index);
- }
- // 页表有空位,只需要开辟一个新的 PTE (Level 2)
- if (__vmm_map_internal(pid, l1_index, l2_index, pa, tattr, false)) {
- return (void*)V_ADDR(l1_index, l2_index, PG_OFFSET(va));
- }
- l2_index++;
- }
-
- // 页目录与所有页表已满!
- if (l1_index > PG_MAX_ENTRIES) {
- return NULL;
- }
-
- if (!__vmm_map_internal(pid, l1_index, l2_index, (uintptr_t)pa, tattr, false)) {
- return NULL;
- }
-
- return (void*)V_ADDR(l1_index, l2_index, PG_OFFSET(va));
-}
-
-void*
-vmm_fmap_page(pid_t pid, void* va, void* pa, pt_attr tattr)
-{
- if (!pa || !va) {
- return NULL;
- }
-
- assert(((uintptr_t)va & 0xFFFU) == 0) assert(((uintptr_t)pa & 0xFFFU) == 0);
-
- uint32_t l1_index = L1_INDEX(va);
- uint32_t l2_index = L2_INDEX(va);
-
- if (!__vmm_map_internal(pid, l1_index, l2_index, (uintptr_t)pa, tattr, true)) {
- return NULL;
- }
-
- cpu_invplg(va);
-
- return va;
-}
-
-void*
-vmm_alloc_page(pid_t pid, void* vpn, void** pa, pt_attr tattr, pp_attr_t pattr)
-{
- void* pp = pmm_alloc_page(pid, pattr);
- void* result = vmm_map_page(pid, vpn, pp, tattr);
- if (!result) {
- pmm_free_page(pp, pid);
- }
- pa ? (*pa = pp) : 0;
- return result;
-}
-
int
-vmm_alloc_pages(pid_t pid, void* va, size_t sz, pt_attr tattr, pp_attr_t pattr)
+vmm_del_mapping(uintptr_t mnt, uintptr_t va)
{
- assert((uintptr_t)va % PG_SIZE == 0) assert(sz % PG_SIZE == 0);
-
- void* va_ = va;
- for (size_t i = 0; i < (sz >> PG_SIZE_BITS); i++, va_ += PG_SIZE) {
- void* pp = pmm_alloc_page(pid, pattr);
- uint32_t l1_index = L1_INDEX(va_);
- uint32_t l2_index = L2_INDEX(va_);
- if (!pp || !__vmm_map_internal(
- pid,
- l1_index, l2_index, (uintptr_t)pp, tattr, false)) {
- // if one failed, release previous allocated pages.
- va_ = va;
- for (size_t j = 0; j < i; j++, va_ += PG_SIZE) {
- vmm_unmap_page(pid, va_);
- }
-
- return false;
- }
- }
-
- return true;
-}
-
-int
-vmm_set_mapping(pid_t pid, void* va, void* pa, pt_attr attr) {
- assert(((uintptr_t)va & 0xFFFU) == 0);
-
- uint32_t l1_index = L1_INDEX(va);
- uint32_t l2_index = L2_INDEX(va);
-
- // prevent map of recursive mapping region
- if (l1_index == 1023) {
- return 0;
- }
-
- __vmm_map_internal(pid, l1_index, l2_index, (uintptr_t)pa, attr, false);
- return 1;
-}
-
-void
-__vmm_unmap_internal(pid_t pid, void* va, int free_ppage) {
assert(((uintptr_t)va & 0xFFFU) == 0);
uint32_t l1_index = L1_INDEX(va);
// prevent unmap of recursive mapping region
if (l1_index == 1023) {
- return;
+ return 0;
}
- x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
+ x86_page_table* l1pt = (x86_page_table*)(mnt | (1023 << 12));
x86_pte_t l1pte = l1pt->entry[l1_index];
if (l1pte) {
- x86_page_table* l2pt = (x86_page_table*)L2_VADDR(l1_index);
+ x86_page_table* l2pt = (x86_page_table*)(mnt | (l1_index << 12));
x86_pte_t l2pte = l2pt->entry[l2_index];
- if (IS_CACHED(l2pte) && free_ppage) {
- pmm_free_page(pid, (void*)l2pte);
- }
+
cpu_invplg(va);
l2pt->entry[l2_index] = PTE_NULL;
}
}
-void
-vmm_unset_mapping(void* va) {
- __vmm_unmap_internal(0, va, false);
-}
-
-void
-vmm_unmap_page(pid_t pid, void* va)
-{
- __vmm_unmap_internal(pid, va, true);
-}
-
-v_mapping
-vmm_lookup(void* va)
+int
+vmm_lookup(uintptr_t va, v_mapping* mapping)
{
- assert(((uintptr_t)va & 0xFFFU) == 0);
+ // va = va & ~0xfff;
uint32_t l1_index = L1_INDEX(va);
uint32_t l2_index = L2_INDEX(va);
x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
x86_pte_t l1pte = l1pt->entry[l1_index];
- v_mapping mapping = { .flags = 0, .pa = 0, .pn = 0 };
if (l1pte) {
x86_pte_t* l2pte =
&((x86_page_table*)L2_VADDR(l1_index))->entry[l2_index];
if (l2pte) {
- mapping.flags = PG_ENTRY_FLAGS(*l2pte);
- mapping.pa = PG_ENTRY_ADDR(*l2pte);
- mapping.pn = mapping.pa >> PG_SIZE_BITS;
- mapping.pte = l2pte;
+ mapping->flags = PG_ENTRY_FLAGS(*l2pte);
+ mapping->pa = PG_ENTRY_ADDR(*l2pte);
+ mapping->pn = mapping->pa >> PG_SIZE_BITS;
+ mapping->pte = l2pte;
+ mapping->va = va;
+ return 1;
}
}
-
- return mapping;
+ return 0;
}
void*
-vmm_v2p(void* va)
+vmm_mount_pd(uintptr_t mnt, void* pde)
{
- return (void*)vmm_lookup(va).pa;
-}
-
-void*
-vmm_mount_pd(uintptr_t mnt, void* pde) {
x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
l1pt->entry[(mnt >> 22)] = NEW_L1_ENTRY(T_SELF_REF_PERM, pde);
cpu_invplg(mnt);
}
void*
-vmm_unmount_pd(uintptr_t mnt) {
+vmm_unmount_pd(uintptr_t mnt)
+{
x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
l1pt->entry[(mnt >> 22)] = 0;
cpu_invplg(mnt);
#include <arch/x86/boot/multiboot.h>
#include <lunaix/common.h>
#include <lunaix/lunistd.h>
+#include <lunaix/mm/pmm.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/peripheral/ps2kbd.h>
#include <lunaix/proc.h>
// Fuck it, I will no longer bother this little 1MiB
// I just release 4 pages for my APIC & IOAPIC remappings
for (size_t i = 0; i < 3; i++) {
- vmm_unmap_page(KERNEL_PID, (void*)(i << PG_SIZE_BITS));
+ vmm_del_mapping(PD_REFERENCED, (void*)(i << PG_SIZE_BITS));
}
// 锁定所有系统预留页(内存映射IO,ACPI之类的),并且进行1:1映射
KERNEL_PID, FLOOR(__APIC_BASE_PADDR, PG_SIZE_BITS), 0);
pmm_mark_page_occupied(KERNEL_PID, FLOOR(ioapic_addr, PG_SIZE_BITS), 0);
- vmm_set_mapping(KERNEL_PID, APIC_BASE_VADDR, __APIC_BASE_PADDR, PG_PREM_RW);
- vmm_set_mapping(KERNEL_PID, IOAPIC_BASE_VADDR, ioapic_addr, PG_PREM_RW);
+ vmm_set_mapping(
+ PD_REFERENCED, APIC_BASE_VADDR, __APIC_BASE_PADDR, PG_PREM_RW);
+ vmm_set_mapping(PD_REFERENCED, IOAPIC_BASE_VADDR, ioapic_addr, PG_PREM_RW);
apic_init();
ioapic_init();
syscall_install();
for (size_t i = 256; i < hhk_init_pg_count; i++) {
- vmm_unmap_page(KERNEL_PID, (void*)(i << PG_SIZE_BITS));
+ vmm_del_mapping(PD_REFERENCED, (void*)(i << PG_SIZE_BITS));
}
}
multiboot_memory_map_t* mmaps = _k_init_mb_info->mmap_addr;
size_t map_size =
_k_init_mb_info->mmap_length / sizeof(multiboot_memory_map_t);
+ v_mapping mapping;
for (unsigned int i = 0; i < map_size; i++) {
multiboot_memory_map_t mmap = mmaps[i];
if (mmap.type == MULTIBOOT_MEMORY_AVAILABLE) {
uint8_t* pa = PG_ALIGN(mmap.addr_low);
size_t pg_num = CEIL(mmap.len_low, PG_SIZE_BITS);
for (size_t j = 0; j < pg_num; j++) {
- vmm_set_mapping(KERNEL_PID,
- (pa + (j << PG_SIZE_BITS)),
- (pa + (j << PG_SIZE_BITS)),
- PG_PREM_R);
+ uintptr_t _pa = pa + (j << PG_SIZE_BITS);
+ if (vmm_lookup(_pa, &mapping) && *mapping.pte) {
+ continue;
+ }
+ vmm_set_mapping(PD_REFERENCED, _pa, _pa, PG_PREM_R);
+ pmm_mark_page_occupied(KERNEL_PID, _pa >> 12, 0);
}
}
}
\ No newline at end of file
#include <klibc/string.h>
#include <lunaix/clock.h>
#include <lunaix/common.h>
+#include <lunaix/mm/pmm.h>
#include <lunaix/mm/region.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/process.h>
__dup_pagetable(pid_t pid, uintptr_t mount_point)
{
void* ptd_pp = pmm_alloc_page(pid, PP_FGPERSIST);
- x86_page_table* ptd = vmm_fmap_page(pid, PG_MOUNT_1, ptd_pp, PG_PREM_RW);
+ vmm_set_mapping(PD_REFERENCED, PG_MOUNT_1, ptd_pp, PG_PREM_RW);
+
+ x86_page_table* ptd = PG_MOUNT_1;
x86_page_table* pptd = (x86_page_table*)(mount_point | (0x3FF << 12));
for (size_t i = 0; i < PG_MAX_ENTRIES - 1; i++) {
continue;
}
- x86_page_table* ppt = (x86_page_table*)(mount_point | (i << 12));
void* pt_pp = pmm_alloc_page(pid, PP_FGPERSIST);
- x86_page_table* pt = vmm_fmap_page(pid, PG_MOUNT_2, pt_pp, PG_PREM_RW);
+ vmm_set_mapping(PD_REFERENCED, PG_MOUNT_2, pt_pp, PG_PREM_RW);
+
+ x86_page_table* ppt = (x86_page_table*)(mount_point | (i << 12));
+ x86_page_table* pt = PG_MOUNT_2;
for (size_t j = 0; j < PG_MAX_ENTRIES; j++) {
x86_pte_t pte = ppt->entry[j];
#include <hal/cpu.h>
#include <lunaix/mm/kalloc.h>
+#include <lunaix/mm/pmm.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/process.h>
#include <lunaix/sched.h>
sched_init()
{
size_t pg_size = ROUNDUP(sizeof(struct proc_info) * MAX_PROCESS, 0x1000);
- assert_msg(vmm_alloc_pages(
- KERNEL_PID, &__proc_table, pg_size, PG_PREM_RW, PP_FGPERSIST),
- "Fail to allocate proc table");
+
+ for (size_t i = 0; i <= pg_size; i += 4096) {
+ uintptr_t pa = pmm_alloc_page(KERNEL_PID, PP_FGPERSIST);
+ vmm_set_mapping(PD_REFERENCED, &__proc_table + i, pa, PG_PREM_RW);
+ }
sched_ctx = (struct scheduler){ ._procs = (struct proc_info*)&__proc_table,
.ptable_len = 0,