fix: pde's perm overrides pte's when pte has lower access level.
feat: template generator now with infinite horizon when inferencing the memory region's base and size
#define __BOOT_CODE__
-#include <lunaix/common.h>
#include <lunaix/mm/page.h>
#include <sys/boot/bstage.h>
#include <hal/acpi/acpi.h>
#include <hal/intc.h>
-#include <lunaix/common.h>
#include <lunaix/mm/mmio.h>
#include <sys/interrupts.h>
#define __LUNAIX_APIC_H
#include <hal/intc.h>
-#include <lunaix/common.h>
#include <lunaix/types.h>
#define __APIC_BASE_PADDR 0xFEE00000
#define KERNEL_STACK_SIZE 0x100000UL
#define KERNEL_STACK_END 0x3ffff0UL
+#define USR_EXEC 0x400000UL
+#define USR_EXEC_SIZE 0x20000000UL
+#define USR_EXEC_END 0x203fffffUL
+
+#define USR_MMAP 0x20400000UL
+#define USR_MMAP_SIZE 0x9f800000UL
+#define USR_MMAP_END 0xbfbfffffUL
+
+#define USR_STACK 0xbfc00000UL
+#define USR_STACK_SIZE 0x400000UL
+#define USR_STACK_END 0xbffffff0UL
+
#define KERNEL_EXEC 0xc0000000UL
#define KERNEL_EXEC_SIZE 0x4000000UL
#define KERNEL_EXEC_END 0xc3ffffffUL
-#include <lunaix/common.h>
#include <lunaix/mm/mm.h>
#include <lunaix/mm/pmm.h>
#include <lunaix/mm/region.h>
+++ /dev/null
-#ifndef __LUNAIX_CONSTANTS_H
-#define __LUNAIX_CONSTANTS_H
-
-#define MEM_1MB 0x100000
-#define MEM_4MB 0x400000
-
-#define USER_START 0x400000
-
-#define KSTACK_SIZE MEM_1MB
-#define KSTACK_START (USER_START - KSTACK_SIZE)
-#define KSTACK_TOP ((USER_START - 1) & ~0xf)
-#define within_kstack(addr) (KSTACK_START <= (addr) && (addr) <= KSTACK_TOP)
-
-#define VGA_FRAMEBUFFER 0xB8000
-
-#define KCODE_SEG 0x08
-#define KDATA_SEG 0x10
-
-#define KSIZE (MEM_4MB * 16)
-
-#define USTACK_SIZE MEM_4MB
-#define USTACK_TOP 0x9ffffff0
-#define USTACK_END (0x9fffffff - USTACK_SIZE + 1)
-#define UMMAP_START 0x4D000000
-#define UMMAP_END (USTACK_END - MEM_4MB)
-
-#ifndef __ASM__
-#include <lunaix/types.h>
-// From Linux kernel v2.6.0 <kernel.h:194>
-/**
- * container_of - cast a member of a structure out to the containing structure
- *
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
- */
-#define container_of(ptr, type, member) \
- ({ \
- const typeof(((type*)0)->member)* __mptr = (ptr); \
- (ptr) ? (type*)((char*)__mptr - offsetof(type, member)) : 0; \
- })
-
-#endif
-#endif /* __LUNAIX_CONSTANTS_H */
#ifndef __LUNAIX_LLIST_H
#define __LUNAIX_LLIST_H
-#include <lunaix/common.h>
+#include <lunaix/types.h>
struct llist_header
{
#ifndef __LUNAIX_PAGE_H
#define __LUNAIX_PAGE_H
-#include <lunaix/common.h>
#include <lunaix/types.h>
#define PG_SIZE_BITS 12
#define __LUNAIX_VMM_H
#include <lunaix/mm/page.h>
#include <lunaix/process.h>
-#include <stddef.h>
-#include <stdint.h>
+#include <lunaix/types.h>
// Virtual memory manager
#define VMAP_NULL 0
void*
vmm_next_free(ptr_t start, int options);
-/**
- * @brief 将连续的物理地址空间映射到内核虚拟地址空间
- *
- * @param paddr 物理地址空间的基地址
- * @param size 物理地址空间的大小
- * @return void*
- */
-void*
-vmm_vmap(ptr_t paddr, size_t size, pt_attr attr);
-
/**
* @brief 将当前地址空间的虚拟地址转译为物理地址。
*
ptr_t
vmm_v2pat(ptr_t mnt, ptr_t va);
+/*
+ 表示一个 vmap 区域
+ (One must not get confused with vmap_area in Linux!)
+*/
+struct vmap_area
+{
+ ptr_t start;
+ size_t size;
+ pt_attr area_attr;
+};
+
+/**
+ * @brief 将连续的物理地址空间映射到内核虚拟地址空间
+ *
+ * @param paddr 物理地址空间的基地址
+ * @param size 物理地址空间的大小
+ * @return void*
+ */
+void*
+vmap(ptr_t paddr, size_t size, pt_attr attr, int flags);
+
+/**
+ * @brief 创建一个 vmap 区域
+ *
+ * @param paddr
+ * @param attr
+ * @return ptr_t
+ */
+struct vmap_area*
+vmap_varea(size_t size, pt_attr attr);
+
+/**
+ * @brief 在 vmap区域内映射一个单页
+ *
+ * @param paddr
+ * @param attr
+ * @return ptr_t
+ */
+ptr_t
+vmap_area_page(struct vmap_area* area, ptr_t paddr, pt_attr attr);
+
+/**
+ * @brief 在 vmap区域删除一个已映射的页
+ *
+ * @param paddr
+ * @return ptr_t
+ */
+ptr_t
+vmap_area_rmpage(struct vmap_area* area, ptr_t vaddr);
+
#endif /* __LUNAIX_VMM_H */
typedef u64_t lba_t;
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ *
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) \
+ ({ \
+ const typeof(((type*)0)->member)* __mptr = (ptr); \
+ (ptr) ? (type*)((char*)__mptr - offsetof(type, member)) : 0; \
+ })
+
#endif /* __LUNAIX_TYPES_H */
ptr_t end = ROUNDUP(mod->end, PG_SIZE);
ptr_t ksym_va =
- (ptr_t)vmm_vmap(mod->start, (end - mod->start), PG_PREM_R);
+ (ptr_t)vmap(mod->start, (end - mod->start), PG_PREM_R, 0);
assert(ksym_va);
trace_ctx.ksym_table = (struct ksyms*)ksym_va;
-#include <lunaix/common.h>
#include <lunaix/exebi/elf32.h>
#include <lunaix/fs.h>
#include <lunaix/mm/valloc.h>
#include <lunaix/mm/valloc.h>
#include <lunaix/spike.h>
+#include <sys/mm/mempart.h>
+
int
elf32_smap(struct load_context* ldctx,
const struct elf32* elf,
goto done_close_elf32;
}
- load_base = UMMAP_START;
+ load_base = USR_MMAP;
}
context->entry = elf.eheader.e_entry + load_base;
#include <lunaix/syscall_utils.h>
#include <sys/abi.h>
+#include <sys/mm/mempart.h>
#include <klibc/string.h>
if (container->vms_mnt == VMS_SELF) {
// we are loading executable into current addr space
- ptr_t ustack = USTACK_TOP;
+ ptr_t ustack = USR_STACK_END;
size_t argv_len = 0, envp_len = 0;
ptr_t argv_ptr = 0, envp_ptr = 0;
#include <lunaix/block.h>
#include <lunaix/boot_generic.h>
-#include <lunaix/common.h>
#include <lunaix/device.h>
#include <lunaix/foptions.h>
#include <lunaix/fs/twifs.h>
trace_modksyms_init(bhctx);
// crt
- tty_init(ioremap(VGA_FRAMEBUFFER, PG_SIZE));
+ tty_init(ioremap(0xB8000, PG_SIZE));
tty_set_theme(VGA_COLOR_WHITE, VGA_COLOR_BLACK);
lxconsole_init();
cpu_chvmspace(proc0->page_table);
// 为内核创建一个专属栈空间。
- for (size_t i = 0; i < (KSTACK_SIZE >> PG_SIZE_BITS); i++) {
+ for (size_t i = 0; i < KERNEL_STACK_SIZE; i += PG_SIZE) {
ptr_t pa = pmm_alloc_page(KERNEL_PID, 0);
- vmm_set_mapping(VMS_SELF,
- KSTACK_START + (i << PG_SIZE_BITS),
- pa,
- PG_PREM_RW,
- VMAP_NULL);
+ vmm_set_mapping(VMS_SELF, KERNEL_STACK + i, pa, PG_PREM_RW, VMAP_NULL);
}
- proc_init_transfer(proc0, KSTACK_TOP, (ptr_t)__proc0, 0);
+ proc_init_transfer(proc0, KERNEL_STACK_END, (ptr_t)__proc0, 0);
// 向调度器注册进程。
commit_process(proc0);
if (!pa) {
return NULL;
}
- return vmm_vmap(pa, cake_pg * PG_SIZE, PG_PREM_RW);
+ return vmap(pa, cake_pg * PG_SIZE, PG_PREM_RW, 0);
}
struct cake_s*
#include <sys/mm/mempart.h>
// any size beyond this is bullshit
-#define BS_SIZE (KERNEL_EXEC - UMMAP_START)
+#define BS_SIZE (KERNEL_EXEC - USR_MMAP)
int
mem_has_overlap(vm_regions_t* regions, ptr_t start, ptr_t end)
{
assert_msg(addr, "addr can not be NULL");
- ptr_t last_end = USER_START, found_loc = addr;
+ ptr_t last_end = USR_EXEC, found_loc = addr;
struct mm_region *pos, *n;
vm_regions_t* vm_regions = ¶m->pvms->regions;
return ENOMEM;
found:
- if (found_loc >= KERNEL_EXEC || found_loc < USER_START) {
+ if (found_loc >= KERNEL_EXEC || found_loc < USR_EXEC) {
return ENOMEM;
}
}
if (!addr_ptr) {
- addr_ptr = UMMAP_START;
- } else if (addr_ptr < UMMAP_START || addr_ptr + length >= UMMAP_END) {
+ addr_ptr = USR_MMAP;
+ } else if (addr_ptr < USR_MMAP || addr_ptr + length >= USR_MMAP_END) {
if (!(options & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
errno = ENOMEM;
goto done;
void*
ioremap(ptr_t paddr, u32_t size)
{
- void* ptr = vmm_vmap(paddr, size, PG_PREM_RW | PG_DISABLE_CACHE);
+ void* ptr = vmap(paddr, size, PG_PREM_RW | PG_DISABLE_CACHE, 0);
if (ptr) {
pmm_mark_chunk_occupied(KERNEL_PID,
#include <lunaix/mm/pmm.h>
+#include <lunaix/mm/valloc.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/spike.h>
static ptr_t start = VMAP;
void*
-vmm_vmap(ptr_t paddr, size_t size, pt_attr attr)
+vmap(ptr_t paddr, size_t size, pt_attr attr, int flags)
{
// next fit
assert_msg((paddr & 0xfff) == 0, "vmap: bad alignment");
size_t examed_size = 0, wrapped = 0;
x86_page_table* pd = (x86_page_table*)L1_BASE_VADDR;
- while (!wrapped || current_addr >= start) {
+ while (!wrapped || current_addr < start) {
size_t l1inx = L1_INDEX(current_addr);
if (!(pd->entry[l1inx])) {
// empty 4mb region
- examed_size += MEM_4MB;
- current_addr = (current_addr & 0xffc00000) + MEM_4MB;
+ examed_size += MEM_4M;
+ current_addr = (current_addr & 0xffc00000) + MEM_4M;
} else {
x86_page_table* ptd = (x86_page_table*)(L2_VADDR(l1inx));
size_t i = L2_INDEX(current_addr), j = 0;
done:
ptr_t alloc_begin = current_addr - examed_size;
+ start = alloc_begin + size;
+
+ if ((flags & VMAP_NOMAP)) {
+ for (size_t i = 0; i < size; i += PG_SIZE) {
+ vmm_set_mapping(VMS_SELF, alloc_begin + i, -1, 0, 0);
+ }
+
+ return (void*)alloc_begin;
+ }
+
for (size_t i = 0; i < size; i += PG_SIZE) {
vmm_set_mapping(VMS_SELF, alloc_begin + i, paddr + i, attr, 0);
pmm_ref_page(KERNEL_PID, paddr + i);
}
- start = alloc_begin + size;
return (void*)alloc_begin;
+}
+
+/*
+ This is a kernel memory region that represent a contiguous virtual memory
+ address such that all memory allocation/deallocation can be concentrated
+ into a single big chunk, which will help to mitigate the external
+ fragmentation in the VMAP address domain. It is significant if our
+ allocation granule is single page or in some use cases.
+
+ XXX (vmap_area)
+ A potential performance improvement on pcache? (need more analysis!)
+ -> In exchange of a fixed size buffer pool. (does it worth?)
+*/
+
+struct vmap_area*
+vmap_varea(size_t size, pt_attr attr)
+{
+ ptr_t start = (ptr_t)vmap(0, size, attr ^ PG_PRESENT, VMAP_NOMAP);
+
+ if (!start) {
+ return NULL;
+ }
+
+ struct vmap_area* varea = valloc(sizeof(struct vmap_area));
+ *varea =
+ (struct vmap_area){ .start = start, .size = size, .area_attr = attr };
+
+ return varea;
+}
+
+ptr_t
+vmap_area_page(struct vmap_area* area, ptr_t paddr, pt_attr attr)
+{
+ ptr_t current = area->start;
+ size_t bound = current + area->size;
+
+ while (current < bound) {
+ x86_pte_t* pte =
+ (x86_pte_t*)(L2_VADDR(L1_INDEX(current)) | L2_INDEX(current));
+ if (PG_IS_PRESENT(*pte)) {
+ current += PG_SIZE;
+ continue;
+ }
+
+ *pte = NEW_L2_ENTRY(attr | PG_PRESENT, paddr);
+ cpu_flush_page(current);
+ break;
+ }
+
+ return current;
+}
+
+ptr_t
+vmap_area_rmpage(struct vmap_area* area, ptr_t vaddr)
+{
+ ptr_t current = area->start;
+ size_t bound = current + area->size;
+
+ if (current > vaddr || vaddr > bound) {
+ return 0;
+ }
+
+ x86_pte_t* pte =
+ (x86_pte_t*)(L2_VADDR(L1_INDEX(current)) | L2_INDEX(current));
+ ptr_t pa = PG_ENTRY_ADDR(*pte);
+
+ *pte = NEW_L2_ENTRY(0, -1);
+ cpu_flush_page(current);
+
+ return pa;
}
\ No newline at end of file
-#include <sys/cpu.h>
#include <klibc/string.h>
#include <lunaix/mm/pmm.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/spike.h>
#include <lunaix/syslog.h>
+#include <sys/cpu.h>
LOG_MODULE("VMM")
// See if attr make sense
assert(attr <= 128);
- if (!l1pt->entry[l1_inx]) {
+ x86_pte_t* l1pte = &l1pt->entry[l1_inx];
+ if (!*l1pte) {
x86_page_table* new_l1pt_pa =
(x86_page_table*)pmm_alloc_page(KERNEL_PID, PP_FGPERSIST);
}
// This must be writable
- l1pt->entry[l1_inx] =
- NEW_L1_ENTRY(attr | PG_WRITE | PG_PRESENT, new_l1pt_pa);
+ *l1pte = NEW_L1_ENTRY(attr | PG_WRITE | PG_PRESENT, new_l1pt_pa);
// make sure our new l2 table is visible to CPU
cpu_flush_page((ptr_t)l2pt);
memset((void*)l2pt, 0, PG_SIZE);
} else {
+ if ((attr & PG_ALLOW_USER) && !(*l1pte & PG_ALLOW_USER)) {
+ *l1pte |= PG_ALLOW_USER;
+ }
+
x86_pte_t pte = l2pt->entry[l2_inx];
if (pte && (options & VMAP_IGNORE)) {
return 1;
#include <lunaix/clock.h>
-#include <lunaix/common.h>
#include <lunaix/input.h>
#include <lunaix/isrm.h>
#include <lunaix/peripheral/ps2kbd.h>
#include <lunaix/syslog.h>
#include <lunaix/timer.h>
-#include <sys/cpu.h>
#include <hal/intc.h>
+#include <sys/cpu.h>
#include <klibc/string.h>
#include <lunaix/block.h>
#include <lunaix/boot_generic.h>
-#include <lunaix/common.h>
#include <lunaix/exec.h>
#include <lunaix/foptions.h>
#include <lunaix/fs.h>
#include <klibc/string.h>
#include <lunaix/clock.h>
-#include <lunaix/common.h>
#include <lunaix/mm/mmap.h>
#include <lunaix/mm/pmm.h>
#include <lunaix/mm/region.h>
struct mm_region* mapped;
struct mmap_param param = { .vms_mnt = VMS_MOUNT_1,
.pvms = &pcb->mm,
- .mlen = USTACK_SIZE,
+ .mlen = USR_STACK_SIZE,
.proct = PROT_READ | PROT_WRITE,
.flags = MAP_ANON | MAP_PRIVATE | MAP_FIXED,
.type = REGION_TYPE_STACK };
int status = 0;
- if ((status = mem_map(NULL, &mapped, USTACK_END, NULL, ¶m))) {
+ if ((status = mem_map(NULL, &mapped, USR_STACK, NULL, ¶m))) {
kprint_panic("fail to alloc user stack: %d", status);
}
vmm_mount_pd(VMS_MOUNT_1, pt_copy); // 将新进程的页表挂载到挂载点#2
// copy the kernel stack
- for (size_t i = KSTACK_START >> 12; i <= KSTACK_TOP >> 12; i++) {
+ for (size_t i = KERNEL_STACK >> 12; i <= KERNEL_STACK_END >> 12; i++) {
volatile x86_pte_t* ppte = &PTE_MOUNTED(VMS_MOUNT_1, i);
/*
#include <lunaix/syscall.h>
#include <lunaix/syslog.h>
-LOG_MODULE("SIG")
-
#include <klibc/string.h>
+#include <sys/mm/mempart.h>
+
+LOG_MODULE("SIG")
+
extern struct scheduler sched_ctx; /* kernel/sched.c */
#define UNMASKABLE (sigset(SIGKILL) | sigset(SIGTERM))
#define TERMSIG (sigset(SIGSEGV) | sigset(SIGINT) | UNMASKABLE)
#define CORE (sigset(SIGSEGV))
+#define within_kstack(addr) \
+ (KERNEL_STACK <= (addr) && (addr) <= KERNEL_STACK_END)
static inline void
signal_terminate(int errcode)
ptr_t ustack = __current->ustack_top;
- if ((int)(ustack - USTACK_END) < (int)sizeof(struct proc_sig)) {
+ if ((int)(ustack - USR_STACK) < (int)sizeof(struct proc_sig)) {
// 用户栈没有空间存放信号上下文
return 0;
}
#include <klibc/string.h>
-#include <lunaix/common.h>
#include <lunaix/spike.h>
#include <lunaix/tty/console.h>
#include <lunaix/tty/tty.h>
ENTRY(start_)
-/*
- FUTURE: Use disk reader
- A bit of messy here.
- We will pull our higher half kernel out of this shit
- and load it separately once we have our disk reader.
-*/
-
SECTIONS {
. = 0x100000;
}
__kexec_boot_end = ALIGN(4K);
- /* Relocation of our higher half kernel */
+ /* ---- boot end ---- */
+
+ /* ---- kernel start ---- */
+
. += 0xC0000000;
/* 好了,我们的内核…… */
/* ---- */
+ /* align to 8 bytes, so it can cover both 32 and 64 bits address line*/
+ . = ALIGN(8);
+
PROVIDE(__lga_pci_dev_drivers_start = .);
KEEP(*(.lga.pci_dev_drivers));
PROVIDE(__lga_pci_dev_drivers_end = .);
+
+ /* ---- */
+
+ . = ALIGN(8);
+
+ PROVIDE(__lga_platdev_db_start = .);
+
+ KEEP(*(.lga.platdev_db));
+
+ PROVIDE(__lga_platdev_db_end = .);
}
.bss BLOCK(4K) : AT ( ADDR(.bss) - 0xC0000000 ) {
if "width" in record:
self.__width = DataObject.create("width", record["width"])
- def __process(self, start_addr, idx, regions, size_lookahead = False):
+ def __process(self, start_addr, idx, regions):
if idx >= len(regions):
raise Exception("Unbounded region definition")
e = regions[idx]
+ if "start" not in e:
+ ne = regions[idx + 1]
+ if "start" not in ne or "size" not in e:
+ e["start"] = start_addr
+ else:
+ self.__process(start_addr + e["size"], idx + 1, regions)
+ e["start"] = ne['start'] - e["size"]
+
if "block" in e:
b = e["block"] - 1
- start_addr = (start_addr + b) & ~b
+ e["start"] = (e["start"] + b) & ~b
- if "start" not in e:
- e["start"] = start_addr
- elif e["start"] < start_addr:
+ if e["start"] < start_addr:
raise Exception(f"starting addr {hex(e['start'])} overrlapping with {hex(start_addr)}")
- else:
- start_addr = e["start"]
- if "size" not in e:
- if size_lookahead:
- raise Exception("could not infer size from unbounded region")
- tmp_addr = self.__process(start_addr, idx + 1, regions, size_lookahead=True)
- e["size"] = tmp_addr - start_addr
+ start_addr = e["start"]
- if not size_lookahead:
- start_addr += e["size"]
+ if "size" not in e:
+ self.__process(start_addr, idx + 1, regions)
+ ne = regions[idx + 1]
+ e["size"] = ne['start'] - start_addr
- return start_addr
+ return start_addr + e["size"]
def expand(self, param={}):
super().expand(param)
"size": "1@1M",
"stk_align": 16
},
+ {
+ "name": "usr_exec",
+ "start": "4@1M",
+ "size": "512@1M"
+ },
+ {
+ "name": "usr_mmap"
+ },
+ {
+ "name": "usr_stack",
+ "size": "1@4M",
+ "stk_align": 16
+ },
{
"name": "kernel_exec",
"start": "3@1G",