#include <arch/x86/boot/multiboot.h>
#include <arch/x86/idt.h>
-#include <lunaix/mm/page.h>
#include <lunaix/common.h>
+#include <lunaix/mm/page.h>
-#define PT_ADDR(ptd, pt_index) ((ptd_t*)ptd + (pt_index + 1) * 1024)
-#define SET_PDE(ptd, pde_index, pde) *((ptd_t*)ptd + pde_index) = pde;
-#define SET_PTE(ptd, pt_index, pte_index, pte) *(PT_ADDR(ptd, pt_index) + pte_index) = pte;
-#define sym_val(sym) (uintptr_t)(&sym)
+#define PT_ADDR(ptd, pt_index) ((ptd_t*)ptd + (pt_index + 1) * 1024)
+#define SET_PDE(ptd, pde_index, pde) *((ptd_t*)ptd + pde_index) = pde;
+#define SET_PTE(ptd, pt_index, pte_index, pte) \
+ *(PT_ADDR(ptd, pt_index) + pte_index) = pte;
+#define sym_val(sym) (uintptr_t)(&sym)
-#define KERNEL_PAGE_COUNT ((sym_val(__kernel_end) - sym_val(__kernel_start) + 0x1000 - 1) >> 12);
-#define HHK_PAGE_COUNT ((sym_val(__init_hhk_end) - 0x100000 + 0x1000 - 1) >> 12)
+#define KERNEL_PAGE_COUNT \
+ ((sym_val(__kernel_end) - sym_val(__kernel_start) + 0x1000 - 1) >> 12);
+#define HHK_PAGE_COUNT ((sym_val(__init_hhk_end) - 0x100000 + 0x1000 - 1) >> 12)
// use table #1
-#define PG_TABLE_IDENTITY 0
+#define PG_TABLE_IDENTITY 0
// use table #2-8
// hence the max size of kernel is 8MiB
-#define PG_TABLE_KERNEL 1
+#define PG_TABLE_KERNEL 1
// use table #9
-#define PG_TABLE_STACK 8
+#define PG_TABLE_STACK 8
// Provided by linker (see linker.ld)
extern uint8_t __kernel_start;
extern uint8_t __init_hhk_end;
extern uint8_t _k_stack;
-void
-_init_page(ptd_t* ptd) {
+void
+_init_page(ptd_t* ptd)
+{
SET_PDE(ptd, 0, NEW_L1_ENTRY(PG_PREM_RW, ptd + PG_MAX_ENTRIES))
-
- // 对低1MiB空间进行对等映射(Identity mapping),也包括了我们的VGA,方便内核操作。
- for (uint32_t i = 0; i < 256; i++)
- {
- SET_PTE(ptd, PG_TABLE_IDENTITY, i, NEW_L2_ENTRY(PG_PREM_RW, (i << PG_SIZE_BITS)))
+
+ // 对低1MiB空间进行对等映射(Identity
+ // mapping),也包括了我们的VGA,方便内核操作。
+ for (uint32_t i = 0; i < 256; i++) {
+ SET_PTE(ptd,
+ PG_TABLE_IDENTITY,
+ i,
+ NEW_L2_ENTRY(PG_PREM_RW, (i << PG_SIZE_BITS)))
}
- // 对等映射我们的hhk_init,这样一来,当分页与地址转换开启后,我们依然能够照常执行最终的 jmp 指令来跳转至
+ // 对等映射我们的hhk_init,这样一来,当分页与地址转换开启后,我们依然能够照常执行最终的
+ // jmp 指令来跳转至
// 内核的入口点
- for (uint32_t i = 0; i < HHK_PAGE_COUNT; i++)
- {
- SET_PTE(ptd, PG_TABLE_IDENTITY, 256 + i, NEW_L2_ENTRY(PG_PREM_RW, 0x100000 + (i << PG_SIZE_BITS)))
+ for (uint32_t i = 0; i < HHK_PAGE_COUNT; i++) {
+ SET_PTE(ptd,
+ PG_TABLE_IDENTITY,
+ 256 + i,
+ NEW_L2_ENTRY(PG_PREM_RW, 0x100000 + (i << PG_SIZE_BITS)))
}
-
+
// --- 将内核重映射至高半区 ---
-
- // 这里是一些计算,主要是计算应当映射进的 页目录 与 页表 的条目索引(Entry Index)
+
+ // 这里是一些计算,主要是计算应当映射进的 页目录 与 页表 的条目索引(Entry
+ // Index)
uint32_t kernel_pde_index = L1_INDEX(sym_val(__kernel_start));
uint32_t kernel_pte_index = L2_INDEX(sym_val(__kernel_start));
uint32_t kernel_pg_counts = KERNEL_PAGE_COUNT;
-
+
// 将内核所需要的页表注册进页目录
// 当然,就现在而言,我们的内核只占用不到50个页(每个页表包含1024个页)
// 这里分配了3个页表(12MiB),未雨绸缪。
- for (uint32_t i = 0; i < PG_TABLE_STACK - PG_TABLE_KERNEL; i++)
- {
- SET_PDE(
- ptd,
- kernel_pde_index + i,
- NEW_L1_ENTRY(PG_PREM_RW, PT_ADDR(ptd, PG_TABLE_KERNEL + i))
- )
+ for (uint32_t i = 0; i < PG_TABLE_STACK - PG_TABLE_KERNEL; i++) {
+ SET_PDE(ptd,
+ kernel_pde_index + i,
+ NEW_L1_ENTRY(PG_PREM_URW, PT_ADDR(ptd, PG_TABLE_KERNEL + i)))
}
-
+
// 首先,检查内核的大小是否可以fit进我们这几个表(12MiB)
- if (kernel_pg_counts > (PG_TABLE_STACK - PG_TABLE_KERNEL) * PG_MAX_ENTRIES) {
+ if (kernel_pg_counts >
+ (PG_TABLE_STACK - PG_TABLE_KERNEL) * PG_MAX_ENTRIES) {
// ERROR: require more pages
// here should do something else other than head into blocking
- asm ("ud2");
+ asm("ud2");
}
-
+
// 计算内核.text段的物理地址
uintptr_t kernel_pm = V2P(&__kernel_start);
-
+
// 重映射内核至高半区地址(>=0xC0000000)
- for (uint32_t i = 0; i < kernel_pg_counts; i++)
- {
- SET_PTE(
- ptd,
- PG_TABLE_KERNEL,
- kernel_pte_index + i,
- NEW_L2_ENTRY(PG_PREM_RW, kernel_pm + (i << PG_SIZE_BITS))
- )
+ for (uint32_t i = 0; i < kernel_pg_counts; i++) {
+ // FIXME: 只是用作用户模式(R3)测试!
+ // 在实际中,内核代码除了极少部分需要暴露给R3(如从信号返回),其余的应为R0。
+ SET_PTE(ptd,
+ PG_TABLE_KERNEL,
+ kernel_pte_index + i,
+ NEW_L2_ENTRY(PG_PREM_URW, kernel_pm + (i << PG_SIZE_BITS)))
}
// 最后一个entry用于循环映射
- SET_PDE(
- ptd,
- PG_MAX_ENTRIES - 1,
- NEW_L1_ENTRY(T_SELF_REF_PERM, ptd)
- );
+ SET_PDE(ptd, PG_MAX_ENTRIES - 1, NEW_L1_ENTRY(T_SELF_REF_PERM, ptd));
}
-uint32_t __save_subset(uint8_t* destination, uint8_t* base, unsigned int size) {
+uint32_t
+__save_subset(uint8_t* destination, uint8_t* base, unsigned int size)
+{
unsigned int i = 0;
- for (; i < size; i++)
- {
+ for (; i < size; i++) {
*(destination + i) = *(base + i);
}
return i;
}
-void
-_save_multiboot_info(multiboot_info_t* info, uint8_t* destination) {
+void
+_save_multiboot_info(multiboot_info_t* info, uint8_t* destination)
+{
uint32_t current = 0;
- uint8_t* info_b = (uint8_t*) info;
- for (; current < sizeof(multiboot_info_t); current++)
- {
+ uint8_t* info_b = (uint8_t*)info;
+ for (; current < sizeof(multiboot_info_t); current++) {
*(destination + current) = *(info_b + current);
}
- ((multiboot_info_t*) destination)->mmap_addr = (uintptr_t)destination + current;
- current += __save_subset(destination + current, (uint8_t*)info->mmap_addr, info->mmap_length);
+ ((multiboot_info_t*)destination)->mmap_addr =
+ (uintptr_t)destination + current;
+ current += __save_subset(
+ destination + current, (uint8_t*)info->mmap_addr, info->mmap_length);
if (present(info->flags, MULTIBOOT_INFO_DRIVE_INFO)) {
- ((multiboot_info_t*) destination)->drives_addr = (uintptr_t)destination + current;
- current += __save_subset(destination + current, (uint8_t*)info->drives_addr, info->drives_length);
+ ((multiboot_info_t*)destination)->drives_addr =
+ (uintptr_t)destination + current;
+ current += __save_subset(destination + current,
+ (uint8_t*)info->drives_addr,
+ info->drives_length);
}
}
-void
-_hhk_init(ptd_t* ptd, uint32_t kpg_size) {
+void
+_hhk_init(ptd_t* ptd, uint32_t kpg_size)
+{
// 初始化 kpg 全为0
// P.s. 真没想到GRUB会在这里留下一堆垃圾! 老子的页表全乱套了!
- uint8_t* kpg = (uint8_t*) ptd;
- for (uint32_t i = 0; i < kpg_size; i++)
- {
+ uint8_t* kpg = (uint8_t*)ptd;
+ for (uint32_t i = 0; i < kpg_size; i++) {
*(kpg + i) = 0;
}
-
+
_init_page(ptd);
}
\ No newline at end of file
+++ /dev/null
-# bx_enh_dbg_ini
-SeeReg[0] = TRUE
-SeeReg[1] = TRUE
-SeeReg[2] = TRUE
-SeeReg[3] = TRUE
-SeeReg[4] = FALSE
-SeeReg[5] = FALSE
-SeeReg[6] = FALSE
-SeeReg[7] = FALSE
-SingleCPU = FALSE
-ShowIOWindows = TRUE
-ShowButtons = TRUE
-SeeRegColors = TRUE
-ignoreNxtT = TRUE
-ignSSDisasm = TRUE
-UprCase = 1
-DumpInAsciiMode = 3
-isLittleEndian = TRUE
-DefaultAsmLines = 512
-DumpWSIndex = 2
-DockOrder = 0x123
-ListWidthPix[0] = 257
-ListWidthPix[1] = 318
-ListWidthPix[2] = 367
-MainWindow = 0, 0, 743, 500
-FontName = Normal
// Hardware enable the APIC
// By setting bit 11 of IA32_APIC_BASE register
- // Note: After this point, you can't disable then re-enable it until a reset (i.e., reboot)
- asm volatile (
- "movl %0, %%ecx\n"
- "rdmsr\n"
- "orl %1, %%eax\n"
- "wrmsr\n"
- ::"i"(IA32_MSR_APIC_BASE), "i"(IA32_APIC_ENABLE)
- : "eax", "ecx", "edx"
- );
+ // Note: After this point, you can't disable then re-enable it until a reset
+ // (i.e., reboot)
+ asm volatile("movl %0, %%ecx\n"
+ "rdmsr\n"
+ "orl %1, %%eax\n"
+ "wrmsr\n" ::"i"(IA32_MSR_APIC_BASE),
+ "i"(IA32_APIC_ENABLE)
+ : "eax", "ecx", "edx");
// Print the basic information of our current local APIC
uint32_t apic_id = apic_read_reg(APIC_IDR) >> 24;
uint32_t apic_ver = apic_read_reg(APIC_VER);
kprintf(KINFO "ID: %x, Version: %x, Max LVT: %u\n",
- apic_id,
- apic_ver & 0xff,
- (apic_ver >> 16) & 0xff);
+ apic_id,
+ apic_ver & 0xff,
+ (apic_ver >> 16) & 0xff);
// initialize the local vector table (LVT)
apic_setup_lvts();
// initialize priority registers
-
- // set the task priority to the lowest possible, so all external interrupts are acceptable
- // Note, the lowest possible priority class is 2, not 0, 1, as they are reserved for
- // internal interrupts (vector 0-31, and each p-class resposible for 16 vectors).
- // See Intel Manual Vol. 3A, 10-29
+
+ // set the task priority to the lowest possible, so all external interrupts
+ // are acceptable
+ // Note, the lowest possible priority class is 2, not 0, 1, as they are
+ // reserved for internal interrupts (vector 0-31, and each p-class
+ // resposible for 16 vectors). See Intel Manual Vol. 3A, 10-29
apic_write_reg(APIC_TPR, APIC_PRIORITY(2, 0));
// enable APIC
uint32_t spiv = apic_read_reg(APIC_SPIVR);
// install our handler for spurious interrupt.
- spiv = (spiv & ~0xff) | APIC_SPIV_APIC_ENABLE | APIC_SPIV_IV;
+ spiv = (spiv & ~0xff) | APIC_SPIV_APIC_ENABLE | APIC_SPIV_IV;
apic_write_reg(APIC_SPIVR, spiv);
}
-#define LVT_ENTRY_LINT0(vector) (LVT_DELIVERY_FIXED | vector)
+#define LVT_ENTRY_LINT0(vector) (LVT_DELIVERY_FIXED | vector)
// Pin LINT#1 is configured for relaying NMI, but we masked it here as I think
// it is too early for that
// LINT#1 *must* be edge trigged (Intel manual vol3. 10-14)
-#define LVT_ENTRY_LINT1 (LVT_DELIVERY_NMI | LVT_MASKED | LVT_TRIGGER_EDGE)
-#define LVT_ENTRY_ERROR(vector) (LVT_DELIVERY_FIXED | vector)
+#define LVT_ENTRY_LINT1 (LVT_DELIVERY_NMI | LVT_MASKED | LVT_TRIGGER_EDGE)
+#define LVT_ENTRY_ERROR(vector) (LVT_DELIVERY_FIXED | vector)
void
apic_setup_lvts()
#include <arch/x86/interrupts.h>
-#include <hal/ioapic.h>
#include <hal/acpi/acpi.h>
+#include <hal/ioapic.h>
+#include <lunaix/common.h>
-
-#define IOAPIC_REG_SEL *((volatile uint32_t*)(IOAPIC_BASE_VADDR + IOAPIC_IOREGSEL))
-#define IOAPIC_REG_WIN *((volatile uint32_t*)(IOAPIC_BASE_VADDR + IOAPIC_IOWIN))
+#define IOAPIC_REG_SEL *((volatile uint32_t*)(MMIO_IOAPIC + IOAPIC_IOREGSEL))
+#define IOAPIC_REG_WIN *((volatile uint32_t*)(MMIO_IOAPIC + IOAPIC_IOWIN))
uint8_t
ioapic_get_irq(acpi_context* acpi_ctx, uint8_t old_irq);
void
-ioapic_init() {
+ioapic_init()
+{
// Remapping the IRQs
-
+
acpi_context* acpi_ctx = acpi_get_context();
// Remap the IRQ 8 (rtc timer's vector) to RTC_TIMER_IV in ioapic
// (Remarks IRQ 8 is pin INTIN8)
// See IBM PC/AT Technical Reference 1-10 for old RTC IRQ
- // See Intel's Multiprocessor Specification for IRQ - IOAPIC INTIN mapping config.
-
+ // See Intel's Multiprocessor Specification for IRQ - IOAPIC INTIN
+ // mapping config.
+
// The ioapic_get_irq is to make sure we capture those overriden IRQs
// grab ourselves these irq numbers
uint8_t irq_rtc = ioapic_get_irq(acpi_ctx, PC_AT_IRQ_RTC);
- // PC_AT_IRQ_RTC -> RTC_TIMER_IV, fixed, edge trigged, polarity=high, physical, APIC ID 0
+ // PC_AT_IRQ_RTC -> RTC_TIMER_IV, fixed, edge trigged, polarity=high,
+ // physical, APIC ID 0
ioapic_redirect(irq_rtc, RTC_TIMER_IV, 0, IOAPIC_DELMOD_FIXED);
}
uint8_t
-ioapic_get_irq(acpi_context* acpi_ctx, uint8_t old_irq) {
+ioapic_get_irq(acpi_context* acpi_ctx, uint8_t old_irq)
+{
if (old_irq >= 24) {
return old_irq;
}
}
void
-ioapic_write(uint8_t sel, uint32_t val) {
+ioapic_write(uint8_t sel, uint32_t val)
+{
IOAPIC_REG_SEL = sel;
IOAPIC_REG_WIN = val;
}
uint32_t
-ioapic_read(uint8_t sel) {
+ioapic_read(uint8_t sel)
+{
IOAPIC_REG_SEL = sel;
return IOAPIC_REG_WIN;
}
void
-ioapic_redirect(uint8_t irq, uint8_t vector, uint8_t dest, uint32_t flags) {
+ioapic_redirect(uint8_t irq, uint8_t vector, uint8_t dest, uint32_t flags)
+{
uint8_t reg_sel = IOAPIC_IOREDTBL_BASE + irq * 2;
// Write low 32 bits
#ifndef __LUNAIX_APIC_H
#define __LUNAIX_APIC_H
+#include <lunaix/common.h>
#include <stdint.h>
-#define APIC_BASE_VADDR 0x1000
#define __APIC_BASE_PADDR 0xFEE00000
#define IA32_MSR_APIC_BASE 0x1B
-#define IA32_APIC_ENABLE 0x800
+#define IA32_APIC_ENABLE 0x800
/*
- * Common APIC memory-mapped registers
+ * Common APIC memory-mapped registers
* Ref: Intel Manual, Vol. 3A, Table 10-1
*/
-#define APIC_IDR 0x20 // ID Reg
-#define APIC_VER 0x30 // Version Reg
-#define APIC_TPR 0x80 // Task Priority
-#define APIC_APR 0x90 // Arbitration Priority
-#define APIC_PPR 0xA0 // Processor Priority
-#define APIC_EOI 0xB0 // End-Of-Interrupt
-#define APIC_RRD 0xC0 // Remote Read
-#define APIC_LDR 0xD0 // Local Destination Reg
-#define APIC_DFR 0xE0 // Destination Format Reg
-#define APIC_SPIVR 0xF0 // Spurious Interrupt Vector Reg
-#define APIC_ISR_BASE 0x100 // Base address for In-Service-Interrupt bitmap register (256bits)
-#define APIC_TMR_BASE 0x180 // Base address for Trigger-Mode bitmap register (256bits)
-#define APIC_IRR_BASE 0x200 // Base address for Interrupt-Request bitmap register (256bits)
-#define APIC_ESR 0x280 // Error Status Reg
-#define APIC_ICR_BASE 0x300 // Interrupt Command
-#define APIC_LVT_LINT0 0x350
-#define APIC_LVT_LINT1 0x360
-#define APIC_LVT_ERROR 0x370
+#define APIC_IDR 0x20 // ID Reg
+#define APIC_VER 0x30 // Version Reg
+#define APIC_TPR 0x80 // Task Priority
+#define APIC_APR 0x90 // Arbitration Priority
+#define APIC_PPR 0xA0 // Processor Priority
+#define APIC_EOI 0xB0 // End-Of-Interrupt
+#define APIC_RRD 0xC0 // Remote Read
+#define APIC_LDR 0xD0 // Local Destination Reg
+#define APIC_DFR 0xE0 // Destination Format Reg
+#define APIC_SPIVR 0xF0 // Spurious Interrupt Vector Reg
+#define APIC_ISR_BASE \
+ 0x100 // Base address for In-Service-Interrupt bitmap register (256bits)
+#define APIC_TMR_BASE \
+ 0x180 // Base address for Trigger-Mode bitmap register (256bits)
+#define APIC_IRR_BASE \
+ 0x200 // Base address for Interrupt-Request bitmap register (256bits)
+#define APIC_ESR 0x280 // Error Status Reg
+#define APIC_ICR_BASE 0x300 // Interrupt Command
+#define APIC_LVT_LINT0 0x350
+#define APIC_LVT_LINT1 0x360
+#define APIC_LVT_ERROR 0x370
// APIC Timer specific
-#define APIC_TIMER_LVT 0x320
-#define APIC_TIMER_ICR 0x380 // Initial Count
-#define APIC_TIMER_CCR 0x390 // Current Count
-#define APIC_TIMER_DCR 0x3E0 // Divide Configuration
+#define APIC_TIMER_LVT 0x320
+#define APIC_TIMER_ICR 0x380 // Initial Count
+#define APIC_TIMER_CCR 0x390 // Current Count
+#define APIC_TIMER_DCR 0x3E0 // Divide Configuration
-#define APIC_SPIV_FOCUS_DISABLE 0x200
-#define APIC_SPIV_APIC_ENABLE 0x100
-#define APIC_SPIV_EOI_BROADCAST 0x1000
+#define APIC_SPIV_FOCUS_DISABLE 0x200
+#define APIC_SPIV_APIC_ENABLE 0x100
+#define APIC_SPIV_EOI_BROADCAST 0x1000
-#define LVT_DELIVERY_FIXED 0
-#define LVT_DELIVERY_NMI (0x4 << 8)
-#define LVT_TRIGGER_EDGE (0 << 15)
-#define LVT_TRIGGER_LEVEL (1 << 15)
-#define LVT_MASKED (1 << 16)
-#define LVT_TIMER_ONESHOT (0 << 17)
-#define LVT_TIMER_PERIODIC (1 << 17)
+#define LVT_DELIVERY_FIXED 0
+#define LVT_DELIVERY_NMI (0x4 << 8)
+#define LVT_TRIGGER_EDGE (0 << 15)
+#define LVT_TRIGGER_LEVEL (1 << 15)
+#define LVT_MASKED (1 << 16)
+#define LVT_TIMER_ONESHOT (0 << 17)
+#define LVT_TIMER_PERIODIC (1 << 17)
// Dividers for timer. See Intel Manual Vol3A. 10-17 (pp. 3207), Figure 10-10
-#define APIC_TIMER_DIV1 0b1011
-#define APIC_TIMER_DIV2 0b0000
-#define APIC_TIMER_DIV4 0b0001
-#define APIC_TIMER_DIV8 0b0010
-#define APIC_TIMER_DIV16 0b0011
-#define APIC_TIMER_DIV32 0b1000
-#define APIC_TIMER_DIV64 0b1001
-#define APIC_TIMER_DIV128 0b1010
+#define APIC_TIMER_DIV1 0b1011
+#define APIC_TIMER_DIV2 0b0000
+#define APIC_TIMER_DIV4 0b0001
+#define APIC_TIMER_DIV8 0b0010
+#define APIC_TIMER_DIV16 0b0011
+#define APIC_TIMER_DIV32 0b1000
+#define APIC_TIMER_DIV64 0b1001
+#define APIC_TIMER_DIV128 0b1010
-#define APIC_PRIORITY(cls, subcls) (((cls) << 4) | (subcls))
+#define APIC_PRIORITY(cls, subcls) (((cls) << 4) | (subcls))
-#define apic_read_reg(reg) (*(uint32_t*)(APIC_BASE_VADDR + (reg)))
-#define apic_write_reg(reg, val) (*(uint32_t*)(APIC_BASE_VADDR + (reg)) = (val))
+#define apic_read_reg(reg) (*(uint32_t*)(MMIO_APIC + (reg)))
+#define apic_write_reg(reg, val) (*(uint32_t*)(MMIO_APIC + (reg)) = (val))
void
apic_init();
/**
* @brief Tell the APIC that the handler for current interrupt is finished.
* This will issue a write action to EOI register.
- *
+ *
*/
inline static void
-apic_done_servicing() {
+apic_done_servicing()
+{
apic_write_reg(APIC_EOI, 0);
}
#include <stdint.h>
+#define SEL_RPL(selector) ((selector)&0x3)
+
typedef unsigned int reg32;
typedef unsigned short reg16;
#ifndef __LUNAIX_CONSTANTS_H
#define __LUNAIX_CONSTANTS_H
-#define KSTACK_SIZE (64 << 10)
-#define KSTACK_START ((0xFFBFFFFFU - KSTACK_SIZE) + 1)
-#define KSTACK_TOP 0xffbffff0
-#define HIGHER_HLF_BASE 0xC0000000
-#define MEM_1MB 0x100000
-
-#define VGA_BUFFER_VADDR 0xB0000000
-#define VGA_BUFFER_PADDR 0xB8000
-#define VGA_BUFFER_SIZE 4096
-
-#define KCODE_SEG 0x08
-#define KDATA_SEG 0x10
-#define UCODE_SEG 0x1B
-#define UDATA_SEG 0x23
-#define TSS_SEG 0x28
-
-#define USER_START 0x400000
-#define USTACK_SIZE 0x100000
-#define USTACK_TOP 0x9fffffff
-#define USTACK_END (USTACK_TOP - USTACK_SIZE + 1)
-#define UMMAP_AREA 0x4D000000
-
-#define SYS_TIMER_FREQUENCY_HZ 2048
+#define KSTACK_SIZE (64 << 10)
+#define KSTACK_START ((0x3FFFFFU - KSTACK_SIZE) + 1)
+#define KSTACK_TOP 0x3FFFF0U
+
+#define KERNEL_MM_BASE 0xC0000000
+#define MEM_1MB 0x100000
+#define MEM_4MB 0x400000
+
+#define KCODE_MAX_SIZE MEM_4MB
+#define KHEAP_START (KERNEL_MM_BASE + KCODE_MAX_SIZE)
+#define KHEAP_SIZE_MB 256
+
+#define PROC_TABLE_SIZE_MB 4
+#define PROC_START (KHEAP_START + (KHEAP_SIZE_MB * MEM_1MB))
+
+#define VGA_BUFFER_VADDR (PROC_START + (PROC_TABLE_SIZE_MB * MEM_1MB))
+#define VGA_BUFFER_PADDR 0xB8000
+#define VGA_BUFFER_SIZE 4096
+
+#define MMIO_BASE (VGA_BUFFER_VADDR + MEM_4MB)
+#define MMIO_APIC (MMIO_BASE)
+#define MMIO_IOAPIC (MMIO_BASE + 4096)
+
+#define KCODE_SEG 0x08
+#define KDATA_SEG 0x10
+#define UCODE_SEG 0x1B
+#define UDATA_SEG 0x23
+#define TSS_SEG 0x28
+
+#define USER_START 0x400000
+#define USTACK_SIZE 0x100000
+#define USTACK_TOP 0x9ffffff0
+#define USTACK_END (0x9fffffff - USTACK_SIZE + 1)
+#define UMMAP_AREA 0x4D000000
+
+#define SYS_TIMER_FREQUENCY_HZ 2048
#ifndef __ASM__
#include <stddef.h>
* @member: the name of the member within the struct.
*
*/
-#define container_of(ptr, type, member) ({ \
- const typeof( ((type *)0)->member ) *__mptr = (ptr); \
- (type *)( (char *)__mptr - offsetof(type,member) );})
+#define container_of(ptr, type, member) \
+ ({ \
+ const typeof(((type*)0)->member)* __mptr = (ptr); \
+ (type*)((char*)__mptr - offsetof(type, member)); \
+ })
#endif
#endif /* __LUNAIX_CONSTANTS_H */
__LXSYSCALL1(unsigned int, sleep, unsigned int, seconds)
+__LXSYSCALL(int, pause)
+
#endif /* __LUNAIX_UNISTD_H */
#define __LUNAIX_DMM_H
// Dynamic Memory (i.e., heap) Manager
-#include <stddef.h>
#include <lunaix/mm/mm.h>
#include <lunaix/process.h>
+#include <stddef.h>
#define M_ALLOCATED 0x1
#define M_PREV_FREE 0x2
#define HEAP_INIT_SIZE 4096
-
int
dmm_init(heap_context_t* heap);
int
-lxbrk(heap_context_t* heap, void* addr);
+lxbrk(heap_context_t* heap, void* addr, int user);
void*
-lxsbrk(heap_context_t* heap, size_t size);
+lxsbrk(heap_context_t* heap, size_t size, int user);
void*
lx_malloc_internal(heap_context_t* heap, size_t size);
#include <lunaix/ds/llist.h>
#include <lunaix/ds/mutex.h>
-typedef struct
+typedef struct
{
void* start;
void* brk;
/**
* @brief 私有区域,该区域中的页无法进行任何形式的共享。
- *
+ *
*/
-#define REGION_PRIVATE 0x0
+#define REGION_PRIVATE 0x0
/**
- * @brief 读共享区域,该区域中的页可以被两个进程之间读共享,但任何写操作须应用Copy-On-Write
- *
+ * @brief
+ * 读共享区域,该区域中的页可以被两个进程之间读共享,但任何写操作须应用Copy-On-Write
+ *
*/
-#define REGION_RSHARED 0x1
+#define REGION_RSHARED 0x1
/**
- * @brief 写共享区域,该区域中的页可以被两个进程之间读共享,任何的写操作无需执行Copy-On-Write
- *
+ * @brief
+ * 写共享区域,该区域中的页可以被两个进程之间读共享,任何的写操作无需执行Copy-On-Write
+ *
*/
-#define REGION_WSHARED 0x2
+#define REGION_WSHARED 0x2
-#define REGION_PERM_MASK 0x1c
+#define REGION_PERM_MASK 0x1c
+#define REGION_MODE_MASK 0x3
-#define REGION_READ (1 << 2)
-#define REGION_WRITE (1 << 3)
-#define REGION_EXEC (1 << 4)
-#define REGION_RW REGION_READ | REGION_WRITE
+#define REGION_READ (1 << 2)
+#define REGION_WRITE (1 << 3)
+#define REGION_EXEC (1 << 4)
+#define REGION_RW REGION_READ | REGION_WRITE
+
+#define REGION_TYPE_CODE (1 << 16);
+#define REGION_TYPE_GENERAL (2 << 16);
+#define REGION_TYPE_HEAP (3 << 16);
+#define REGION_TYPE_STACK (4 << 16);
struct mm_region
{
#ifndef __LUNAIX_PAGE_H
#define __LUNAIX_PAGE_H
-#include <stdint.h>
#include <lunaix/common.h>
+#include <stdint.h>
-#define PG_SIZE_BITS 12
-#define PG_SIZE (1 << PG_SIZE_BITS)
-#define PG_INDEX_BITS 10
+#define PG_SIZE_BITS 12
+#define PG_SIZE (1 << PG_SIZE_BITS)
+#define PG_INDEX_BITS 10
-#define PG_MAX_ENTRIES 1024U
-#define PG_LAST_TABLE PG_MAX_ENTRIES - 1
-#define PG_FIRST_TABLE 0
+#define PG_MAX_ENTRIES 1024U
+#define PG_LAST_TABLE PG_MAX_ENTRIES - 1
+#define PG_FIRST_TABLE 0
-#define PTE_NULL 0
+#define PTE_NULL 0
-#define P2V(paddr) ((uintptr_t)(paddr) + HIGHER_HLF_BASE)
-#define V2P(vaddr) ((uintptr_t)(vaddr) - HIGHER_HLF_BASE)
+#define P2V(paddr) ((uintptr_t)(paddr) + KERNEL_MM_BASE)
+#define V2P(vaddr) ((uintptr_t)(vaddr)-KERNEL_MM_BASE)
-#define PG_ALIGN(addr) ((uintptr_t)(addr) & 0xFFFFF000UL)
+#define PG_ALIGN(addr) ((uintptr_t)(addr)&0xFFFFF000UL)
-#define L1_INDEX(vaddr) (uint32_t)(((uintptr_t)(vaddr) & 0xFFC00000UL) >> 22)
-#define L2_INDEX(vaddr) (uint32_t)(((uintptr_t)(vaddr) & 0x003FF000UL) >> 12)
-#define PG_OFFSET(vaddr) (uint32_t)((uintptr_t)(vaddr) & 0x00000FFFUL)
+#define L1_INDEX(vaddr) (uint32_t)(((uintptr_t)(vaddr)&0xFFC00000UL) >> 22)
+#define L2_INDEX(vaddr) (uint32_t)(((uintptr_t)(vaddr)&0x003FF000UL) >> 12)
+#define PG_OFFSET(vaddr) (uint32_t)((uintptr_t)(vaddr)&0x00000FFFUL)
-#define GET_PT_ADDR(pde) PG_ALIGN(pde)
-#define GET_PG_ADDR(pte) PG_ALIGN(pte)
+#define GET_PT_ADDR(pde) PG_ALIGN(pde)
+#define GET_PG_ADDR(pte) PG_ALIGN(pte)
-#define PG_DIRTY(pte) ((pte & (1 << 6)) >> 6)
-#define PG_ACCESSED(pte) ((pte & (1 << 5)) >> 5)
+#define PG_DIRTY(pte) ((pte & (1 << 6)) >> 6)
+#define PG_ACCESSED(pte) ((pte & (1 << 5)) >> 5)
-#define IS_CACHED(entry) ((entry & 0x1))
+#define IS_CACHED(entry) ((entry & 0x1))
-#define PG_PRESENT (0x1)
-#define PG_WRITE (0x1 << 1)
-#define PG_ALLOW_USER (0x1 << 2)
-#define PG_WRITE_THROUGH (1 << 3)
-#define PG_DISABLE_CACHE (1 << 4)
-#define PG_PDE_4MB (1 << 7)
+#define PG_PRESENT (0x1)
+#define PG_WRITE (0x1 << 1)
+#define PG_ALLOW_USER (0x1 << 2)
+#define PG_WRITE_THROUGH (1 << 3)
+#define PG_DISABLE_CACHE (1 << 4)
+#define PG_PDE_4MB (1 << 7)
-#define NEW_L1_ENTRY(flags, pt_addr) (PG_ALIGN(pt_addr) | (((flags) | PG_WRITE_THROUGH) & 0xfff))
-#define NEW_L2_ENTRY(flags, pg_addr) (PG_ALIGN(pg_addr) | ((flags) & 0xfff))
+#define NEW_L1_ENTRY(flags, pt_addr) \
+ (PG_ALIGN(pt_addr) | (((flags) | PG_WRITE_THROUGH) & 0xfff))
+#define NEW_L2_ENTRY(flags, pg_addr) (PG_ALIGN(pg_addr) | ((flags)&0xfff))
-#define V_ADDR(pd, pt, offset) ((pd) << 22 | (pt) << 12 | (offset))
-#define P_ADDR(ppn, offset) ((ppn << 12) | (offset))
+#define V_ADDR(pd, pt, offset) ((pd) << 22 | (pt) << 12 | (offset))
+#define P_ADDR(ppn, offset) ((ppn << 12) | (offset))
-#define PG_ENTRY_FLAGS(entry) ((entry) & 0xFFFU)
-#define PG_ENTRY_ADDR(entry) ((entry) & ~0xFFFU)
+#define PG_ENTRY_FLAGS(entry) ((entry)&0xFFFU)
+#define PG_ENTRY_ADDR(entry) ((entry) & ~0xFFFU)
-#define HAS_FLAGS(entry, flags) ((PG_ENTRY_FLAGS(entry) & (flags)) == flags)
-#define CONTAINS_FLAGS(entry, flags) (PG_ENTRY_FLAGS(entry) & (flags))
+#define HAS_FLAGS(entry, flags) ((PG_ENTRY_FLAGS(entry) & (flags)) == flags)
+#define CONTAINS_FLAGS(entry, flags) (PG_ENTRY_FLAGS(entry) & (flags))
-#define PG_PREM_R PG_PRESENT
-#define PG_PREM_RW PG_PRESENT | PG_WRITE
-#define PG_PREM_UR PG_PRESENT | PG_ALLOW_USER
-#define PG_PREM_URW PG_PRESENT | PG_WRITE | PG_ALLOW_USER
+#define PG_PREM_R PG_PRESENT
+#define PG_PREM_RW PG_PRESENT | PG_WRITE
+#define PG_PREM_UR PG_PRESENT | PG_ALLOW_USER
+#define PG_PREM_URW PG_PRESENT | PG_WRITE | PG_ALLOW_USER
// 用于对PD进行循环映射,因为我们可能需要对PD进行频繁操作,我们在这里禁用TLB缓存
-#define T_SELF_REF_PERM PG_PREM_RW | PG_DISABLE_CACHE | PG_WRITE_THROUGH
-
+#define T_SELF_REF_PERM PG_PREM_RW | PG_DISABLE_CACHE | PG_WRITE_THROUGH
// 页目录的虚拟基地址,可以用来访问到各个PDE
-#define L1_BASE_VADDR 0xFFFFF000U
+#define L1_BASE_VADDR 0xFFFFF000U
// 页表的虚拟基地址,可以用来访问到各个PTE
-#define L2_BASE_VADDR 0xFFC00000U
+#define L2_BASE_VADDR 0xFFC00000U
// 用来获取特定的页表的虚拟地址
-#define L2_VADDR(pd_offset) (L2_BASE_VADDR | (pd_offset << 12))
+#define L2_VADDR(pd_offset) (L2_BASE_VADDR | (pd_offset << 12))
typedef unsigned long ptd_t;
typedef unsigned long pt_t;
/**
* @brief 虚拟映射属性
- *
+ *
*/
-typedef struct {
+typedef struct
+{
+ // 虚拟页地址
+ uintptr_t va;
// 物理页码(如果不存在映射,则为0)
uint32_t pn;
// 物理页地址(如果不存在映射,则为0)
// 映射的flags
uint16_t flags;
// PTE地址
- x86_pte_t *pte;
+ x86_pte_t* pte;
} v_mapping;
typedef struct
x86_pte_t entry[PG_MAX_ENTRIES];
} __attribute__((packed)) x86_page_table;
-
extern void __pg_mount_point;
/* 四个页挂载点,两个页目录挂载点: 用于临时创建&编辑页表 */
-
-#define PD_MOUNT_1 0xAFC00000
-#define PD_MOUNT_2 0xAF800000
-#define PG_MOUNT_BASE 0xAF7FF000
-#define PG_MOUNT_1 (PG_MOUNT_BASE)
-#define PG_MOUNT_2 (PG_MOUNT_BASE - 0x1000)
-#define PG_MOUNT_3 (PG_MOUNT_BASE - 0x2000)
-#define PG_MOUNT_4 (PG_MOUNT_BASE - 0x3000)
-#define PD_REFERENCED L2_BASE_VADDR
-
-#define CURPROC_PTE(vpn) (&((x86_page_table*)(PD_MOUNT_1 | (((vpn) & 0xffc00) << 2)))->entry[(vpn) & 0x3ff])
-#define PTE_MOUNTED(mnt, vpn) (((x86_page_table*)((mnt) | (((vpn) & 0xffc00) << 2)))->entry[(vpn) & 0x3ff])
+#define PG_MOUNT_RANGE(l1_index) (701 <= l1_index && l1_index <= 703)
+#define PD_MOUNT_1 (MMIO_BASE + MEM_4MB)
+#define PG_MOUNT_BASE (PD_MOUNT_1 + MEM_4MB)
+#define PG_MOUNT_1 (PG_MOUNT_BASE)
+#define PG_MOUNT_2 (PG_MOUNT_BASE + 0x1000)
+#define PG_MOUNT_3 (PG_MOUNT_BASE + 0x2000)
+#define PG_MOUNT_4 (PG_MOUNT_BASE + 0x3000)
+
+#define PD_REFERENCED L2_BASE_VADDR
+
+#define CURPROC_PTE(vpn) \
+ (&((x86_page_table*)(PD_MOUNT_1 | (((vpn)&0xffc00) << 2))) \
+ ->entry[(vpn)&0x3ff])
+#define PTE_MOUNTED(mnt, vpn) \
+ (((x86_page_table*)((mnt) | (((vpn)&0xffc00) << 2)))->entry[(vpn)&0x3ff])
#endif /* __LUNAIX_PAGE_H */
#define __LUNAIX_REGION_H
#include <lunaix/mm/mm.h>
-#include <lunaix/process.h>
-void region_add(struct proc_info* proc, unsigned long start, unsigned long end, unsigned int attr);
+void
+region_add(struct mm_region* proc,
+ unsigned long start,
+ unsigned long end,
+ unsigned int attr);
-void region_release_all(struct proc_info* proc);
+void
+region_release_all(struct mm_region* proc);
-struct mm_region* region_get(struct proc_info* proc, unsigned long vaddr);
+struct mm_region*
+region_get(struct mm_region* proc, unsigned long vaddr);
+void
+region_copy(struct mm_region* src, struct mm_region* dest);
#endif /* __LUNAIX_REGION_H */
#include <lunaix/process.h>
#include <stddef.h>
#include <stdint.h>
-#include <lunaix/mm/pmm.h>
// Virtual memory manager
+#define VMAP_NULL 0
/**
- * @brief 初始化虚拟内存管理器
+ * @brief 映射模式:忽略已存在映射
*
*/
-void
-vmm_init();
-
+#define VMAP_IGNORE 1
/**
- * @brief 创建一个页目录
+ * @brief 映射模式:不作实际映射。该功能用于预留出特定的地址空间
*
- * @return ptd_entry* 页目录的物理地址,随时可以加载进CR3
*/
-x86_page_table*
-vmm_init_pd();
+#define VMAP_NOMAP 2
/**
- * @brief 尝试建立一个映射关系。映射指定的物理页地址至虚拟页地址,如果指定的虚拟页地址已被占用
- * 则尝试寻找新的可用地址(该地址总是大于指定的地址)。
+ * @brief 初始化虚拟内存管理器
*
- * @param vpn 虚拟页地址
- * @param pa 物理页地址
- * @param dattr PDE 的属性
- * @param tattr PTE 的属性
- * @return 虚拟页地址,如不成功,则为 NULL
*/
-void*
-vmm_map_page(pid_t pid, void* va, void* pa, pt_attr tattr);
+void
+vmm_init();
/**
- * @brief 建立一个映射关系,映射指定的物理页地址至虚拟页地址。如果指定的虚拟页地址已被占用,
- * 则覆盖。
+ * @brief 创建一个页目录
*
- * @param va 虚拟页地址
- * @param pa 物理页地址
- * @param dattr PDE 的属性
- * @param tattr PTE 的属性
- * @return 虚拟页地址
+ * @return ptd_entry* 页目录的物理地址,随时可以加载进CR3
*/
-void*
-vmm_fmap_page(pid_t pid, void* va, void* pa, pt_attr tattr);
+x86_page_table*
+vmm_init_pd();
/**
- * @brief å°\9dè¯\95为ä¸\80个è\99\9aæ\8b\9f页å\9c°å\9d\80å\88\9b建ä¸\80个å\8f¯ç\94¨ç\9a\84ç\89©ç\90\86页映射
+ * @brief å\9c¨æ\8c\87å®\9aå\9c°å\9d\80空é\97´ä¸ï¼\8cæ·»å\8a ä¸\80个映射
*
- * @param va 虚拟页地址
- * @return 虚拟页地址,如不成功,则为 NULL
- */
-void*
-vmm_alloc_page(pid_t pid, void* va, void** pa, pt_attr tattr, pp_attr_t pattr);
-
-
-/**
- * @brief 尝试分配多个连续的虚拟页
- *
- * @param va 起始虚拟地址
- * @param sz 大小(必须为4K对齐)
- * @param tattr 属性
- * @return int 是否成功
- */
-int
-vmm_alloc_pages(pid_t pid, void* va, size_t sz, pt_attr tattr, pp_attr_t pattr);
-
-/**
- * @brief 设置一个映射,如果映射已存在,则忽略。
- *
- * @param va
- * @param pa
- * @param attr
+ * @param mnt 地址空间挂载点
+ * @param va 虚拟地址
+ * @param pa 物理地址
+ * @param attr 映射属性
+ * @return int
*/
int
-vmm_set_mapping(pid_t pid, void* va, void* pa, pt_attr attr);
-
-/**
- * @brief 删除并释放一个映射
- *
- * @param vpn
- */
-void
-vmm_unmap_page(pid_t pid, void* va);
+vmm_set_mapping(uintptr_t mnt,
+ uintptr_t va,
+ uintptr_t pa,
+ pt_attr attr,
+ int options);
/**
* @brief 删除一个映射
*
- * @param vpn
+ * @param mnt
+ * @param pid
+ * @param va
+ * @return int
*/
-void
-vmm_unset_mapping(void* va);
-
-/**
- * @brief 将虚拟地址翻译为其对应的物理映射
- *
- * @param va 虚拟地址
- * @return void* 物理地址,如映射不存在,则为NULL
- */
-void*
-vmm_v2p(void* va);
+uintptr_t
+vmm_del_mapping(uintptr_t mnt, uintptr_t va);
/**
* @brief 查找一个映射
* @param va 虚拟地址
* @return v_mapping 映射相关属性
*/
-v_mapping
-vmm_lookup(void* va);
+int
+vmm_lookup(uintptr_t va, v_mapping* mapping);
/**
* @brief (COW) 为虚拟页创建副本。
- *
+ *
* @return void* 包含虚拟页副本的物理页地址。
- *
+ *
*/
-void* vmm_dup_page(pid_t pid, void* pa);
+void*
+vmm_dup_page(pid_t pid, void* pa);
+
+void*
+vmm_dup_vmspace(pid_t pid);
/**
* @brief 挂载另一个虚拟地址空间至当前虚拟地址空间
- *
+ *
* @param pde 页目录的物理地址
- * @return void*
+ * @return void*
*/
void*
vmm_mount_pd(uintptr_t mnt, void* pde);
/**
* @brief 卸载已挂载的虚拟地址空间
- *
+ *
*/
void*
vmm_unmount_pd(uintptr_t mnt);
__LXSYSCALL(void, yield);
__LXSYSCALL1(pid_t, wait, int*, status);
+
__LXSYSCALL3(pid_t, waitpid, pid_t, pid, int*, status, int, options);
#endif /* __LUNAIX_SYS_H */
#include <arch/x86/interrupts.h>
#include <lunaix/clock.h>
#include <lunaix/mm/mm.h>
+#include <lunaix/signal.h>
#include <lunaix/timer.h>
#include <lunaix/types.h>
#include <stdint.h>
#define PROC_TERMMASK 0x6
+#define PROC_FINPAUSE 1
+
struct proc_mm
{
heap_context_t u_heap;
- struct mm_region* regions;
+ struct mm_region regions;
};
+struct proc_sig
+{
+ void* signal_handler;
+ int sig_num;
+ isr_param prev_context;
+};
+
+#define PROC_SIG_SIZE sizeof(struct proc_sig) // size=84
+
struct proc_info
{
+ /*
+ Any change to *critical section*, including layout, size
+ must be reflected in kernel/asm/x86/interrupt.S to avoid
+ disaster!
+ */
+
+ /* ---- critical section start ---- */
+
pid_t pid;
struct proc_info* parent;
- isr_param intr_ctx;
+ isr_param intr_ctx; // size=76
+ uintptr_t ustack_top;
+ void* page_table;
+
+ /* ---- critical section end ---- */
+
struct llist_header siblings;
struct llist_header children;
struct llist_header grp_member;
struct proc_mm mm;
- void* page_table;
time_t created;
uint8_t state;
int32_t exit_code;
int32_t k_status;
+ sigset_t sig_pending;
+ sigset_t sig_mask;
+ int flags;
+ void* sig_handler[_SIG_NUM];
pid_t pgid;
struct lx_timer* timer;
};
extern volatile struct proc_info* __current;
-pid_t
-alloc_pid();
+/**
+ * @brief 分配并初始化一个进程控制块
+ *
+ * @return struct proc_info*
+ */
+struct proc_info*
+alloc_process();
+/**
+ * @brief 初始化进程用户空间
+ *
+ * @param pcb
+ */
void
-init_proc(struct proc_info* pcb);
+init_proc_user_space(struct proc_info* pcb);
/**
* @brief 向系统发布一个进程,使其可以被调度。
* @param process
*/
void
-push_process(struct proc_info* process);
+commit_process(struct proc_info* process);
pid_t
destroy_process(pid_t pid);
--- /dev/null
+#ifndef __LUNAIX_SIGNAL_H
+#define __LUNAIX_SIGNAL_H
+
+#include <lunaix/syscall.h>
+
+#define _SIG_NUM 8
+
+#define _SIG_PENDING(bitmap, sig) ((bitmap) & (1 << (sig)))
+
+#define _SIGSEGV 0
+#define _SIGALRM 1
+#define _SIGCHLD 2
+#define _SIGCLD _SIGCHLD
+#define _SIGINT 3
+#define _SIGKILL 4
+#define _SIGSTOP 5
+#define _SIGCONT 6
+
+#define __SIGNAL(num) (1 << (num))
+#define __SET_SIGNAL(bitmap, num) (bitmap = bitmap | __SIGNAL(num))
+
+#define _SIGNAL_UNMASKABLE (__SIGNAL(_SIGKILL) | __SIGNAL(_SIGSTOP))
+
+#define _SIG_BLOCK 1
+#define _SIG_UNBLOCK 2
+#define _SIG_SETMASK 3
+
+typedef unsigned int sigset_t;
+typedef void (*sighandler_t)(int);
+
+__LXSYSCALL2(int, signal, int, signum, sighandler_t, handler);
+
+#endif /* __LUNAIX_SIGNAL_H */
// Some helper functions. As helpful as Spike the Dragon! :)
// 除法向上取整
-#define CEIL(v, k) (((v) + (1 << (k)) - 1) >> (k))
+#define CEIL(v, k) (((v) + (1 << (k)) - 1) >> (k))
// 除法向下取整
-#define FLOOR(v, k) ((v) >> (k))
+#define FLOOR(v, k) ((v) >> (k))
// 获取v最近的最大k倍数
-#define ROUNDUP(v, k) (((v) + (k) - 1) & ~((k) - 1))
+#define ROUNDUP(v, k) (((v) + (k)-1) & ~((k)-1))
// 获取v最近的最小k倍数
-#define ROUNDDOWN(v, k) ((v) & ~((k) - 1))
+#define ROUNDDOWN(v, k) ((v) & ~((k)-1))
-inline static void spin() {
- while(1);
+#define __USER__ __attribute__((section(".usrtext")))
+
+inline static void
+spin()
+{
+ while (1)
+ ;
}
#ifdef __LUNAIXOS_DEBUG__
-#define assert(cond) \
- if (!(cond)) { \
- __assert_fail(#cond, __FILE__, __LINE__); \
+#define assert(cond) \
+ if (!(cond)) { \
+ __assert_fail(#cond, __FILE__, __LINE__); \
}
-#define assert_msg(cond, msg) \
- if (!(cond)) { \
- __assert_fail(msg, __FILE__, __LINE__); \
+#define assert_msg(cond, msg) \
+ if (!(cond)) { \
+ __assert_fail(msg, __FILE__, __LINE__); \
}
-void __assert_fail(const char* expr, const char* file, unsigned int line) __attribute__((noinline, noreturn));
+void
+__assert_fail(const char* expr, const char* file, unsigned int line)
+ __attribute__((noinline, noreturn));
#else
-#define assert(cond) (void)(cond); //assert nothing
-#define assert_msg(cond, msg) (void)(cond); //assert nothing
+#define assert(cond) (void)(cond); // assert nothing
+#define assert_msg(cond, msg) (void)(cond); // assert nothing
#endif
-void panick(const char* msg);
-
-
-#define wait_until(cond) while(!(cond));
-#define loop_until(cond) while(!(cond));
+void
+panick(const char* msg);
+#define wait_until(cond) \
+ while (!(cond)) \
+ ;
+#define loop_until(cond) \
+ while (!(cond)) \
+ ;
#endif /* __LUNAIX_SPIKE_H */
#define LXSEGFAULT -(5)
#define LXINVL -(6)
+#define EINTR -(7)
+
#endif /* __LUNAIX_CODE_H */
#define __SYSCALL__exit 8
#define __SYSCALL_wait 9
#define __SYSCALL_waitpid 10
+#define __SYSCALL_sigreturn 11
+#define __SYSCALL_sigprocmask 12
+#define __SYSCALL_signal 13
+#define __SYSCALL_pause 14
#define __SYSCALL_MAX 0x100
#define __PARAM_MAP5(t1, p1, ...) t1 p1, __PARAM_MAP4(__VA_ARGS__)
#define __PARAM_MAP6(t1, p1, ...) t1 p1, __PARAM_MAP5(__VA_ARGS__)
-#define ___DOINT33(callcode, rettype) \
- int v; \
- asm volatile("int %1\n" : "=a"(v) : "i"(LUNAIX_SYS_CALL), "a"(callcode)); \
- return (rettype)v;
-
#define __DEFINE_LXSYSCALL(rettype, name) asmlinkage rettype __lxsys_##name()
#define __DEFINE_LXSYSCALL1(rettype, name, t1, p1) \
asmlinkage rettype __lxsys_##name(__PARAM_MAP3(t1, p1, t2, p2, t3, p3))
#define __DEFINE_LXSYSCALL4(rettype, name, t1, p1, t2, p2, t3, p3, t4, p4) \
- asmlinkage rettype __lxsys_##nam( \
+ asmlinkage rettype __lxsys_##name( \
__PARAM_MAP4(t1, p1, t2, p2, t3, p3, t4, p4))
+#define __SYSCALL_INTERRUPTIBLE(code) \
+ asm("sti"); \
+ { code }; \
+ asm("cli");
+
+#define ___DOINT33(callcode, rettype) \
+ int v; \
+ asm volatile("int %1\n" : "=a"(v) : "i"(LUNAIX_SYS_CALL), "a"(callcode)); \
+ return (rettype)v;
+
#define __LXSYSCALL(rettype, name) \
static rettype name() \
{ \
_set_idt_intr_entry(FAULT_DIVISION_ERROR, 0x08, _asm_isr0, 0);
_set_idt_intr_entry(FAULT_GENERAL_PROTECTION, 0x08, _asm_isr13, 0);
_set_idt_intr_entry(FAULT_PAGE_FAULT, 0x08, _asm_isr14, 0);
+ _set_idt_intr_entry(FAULT_STACK_SEG_FAULT, 0x08, _asm_isr12, 0);
_set_idt_intr_entry(APIC_ERROR_IV, 0x08, _asm_isr250, 0);
_set_idt_intr_entry(APIC_LINT0_IV, 0x08, _asm_isr251, 0);
// system defined interrupts
_set_idt_intr_entry(LUNAIX_SYS_PANIC, 0x08, _asm_isr32, 0);
- // syscall is a trap gate (recall: trap does NOT clear IF flag upon
- // interruption)
- // // XXX: this should be fine, as our design of context switch support
- // interruptible syscall We make this a non-trap entry, and enable interrupt
+ // We make this a non-trap entry, and enable interrupt
// only when needed!
- // FIXME: This may cause nasty concurrency bug! We should 'lockify' our
- // code!
_set_idt_intr_entry(LUNAIX_SYS_CALL, 0x08, _asm_isr33, 3);
}
\ No newline at end of file
#define __ASM__
#include <arch/x86/interrupts.h>
#include <lunaix/common.h>
+#include <lunaix/syscall.h>
#define __ASM_INTR_DIAGNOSIS
.macro isr_template vector, no_error_code=1
.skip 16
#endif
+.section .bss
+ .align 16
+ lo_tmp_stack:
+ .skip 128
+ tmp_stack:
+
.section .text
isr_template FAULT_DIVISION_ERROR
isr_template FAULT_GENERAL_PROTECTION, no_error_code=0
isr_template FAULT_PAGE_FAULT, no_error_code=0
+ isr_template FAULT_STACK_SEG_FAULT, no_error_code=0
isr_template LUNAIX_SYS_PANIC
isr_template LUNAIX_SYS_CALL
interrupt_wrapper:
/*
- Stack layout
+ Stack layout (layout of struct isr_param)
msa: [ss]
[esp]
- eflags
+ eflags > offset = 48 + 16 = 64
cs
eip
err_code
las: Least Significant Address
msa: Most Significant Address
*/
+ cld
pushl %esp
subl $16, %esp
movw %ax, %ds
movw %ax, %es
+ # 保存用户栈顶指针。这是因为我们允许系统调用内进行上下文切换,而这样一来,我们就失去了用户栈的信息,
+ # 这样一来,就无法设置信号上下文。这主要是为了实现了pause()而做的准备
+ movl (__current), %eax
+ movl 68(%esp), %ebx # 取出esp
+ movl %ebx, 84(%eax) # 存入__current->ustack_top
+
1:
movl %esp, %eax
andl $0xfffffff0, %esp
addl $8, %esp
-#ifdef __ASM_INTR_DIAGNOSIS
pushl %eax
+#ifdef __ASM_INTR_DIAGNOSIS
movl 4(%esp), %eax
movl %eax, debug_resv
- popl %eax
#endif
+ # 处理TSS.ESP的一些边界条件。如果是正常iret(即从内核模式*优雅地*退出)
+ # 那么TSS.ESP0应该为iret进行弹栈后,%esp的值。
+ # 所以这里的边界条件是:如返回用户模式,iret会额外弹出8个字节(ss,esp)
+ movl 8(%esp), %eax
+ andl $3, %eax
+ setnz %al
+ shll $3, %eax
+ addl $16, %eax
+ addl %esp, %eax
+ movl %eax, (_tss + 4)
+ popl %eax
iret
+
+ .global switch_to
+ switch_to:
+ # 约定
+ # arg1: 目标进程PCB地址 (next
+
+ popl %ecx # next
+ movl __current, %eax
+ movl 88(%eax), %ebx # __current->pagetable
+ movl 88(%ecx), %eax # next->pagetable
+
+ cmpl %ebx, %eax # if(next->pagtable != __current->pagetable) {
+ jz 1f
+ movl %eax, %cr3 # cpu_lcr3(next->pagetable)
+ # }
+ 1:
+ movl %ecx, __current # __current = next
+
+ # 我们已经处在了新的地址空间,为了避免影响其先前的栈布局
+ # 需要使用一个临时的栈空间
+ movl $tmp_stack, %esp
+ call signal_dispatch # kernel/signal.c
+
+ test %eax, %eax # do we have signal to handle?
+ jz 1f
+ jmp handle_signal
+ 1:
+ leal 8(%ecx), %eax
+ jmp soft_iret
+
+ .global handle_signal
+ handle_signal:
+ # 注意1:任何对proc_sig的布局改动,都须及时的保证这里的一致性!
+ # 注意2:handle_signal在调用之前,须确保proc_sig已经写入用户栈!
+ leal 8(%eax), %ebx # arg1 in %eax: addr of proc_sig structure in user stack
+
+ pushl $UDATA_SEG # proc_sig->prev_context.ss
+ pushl %eax # esp
+ pushl 64(%ebx) # proc_sig->prev_context.eflags
+ pushl $UCODE_SEG # cs
+ pushl $sig_wrapper # eip for sig wrapper
+
+ movw $UDATA_SEG, %cx # switch data seg to user mode
+ movw %cx, %es
+ movw %cx, %ds
+ movw %cx, %fs
+ movw %cx, %gs
+
+ iret
+
+.section .usrtext
+ sig_wrapper: # in user mode
+ movl %esp, %eax
+ and $0xfffffff0, %esp
+ subl $8, %esp
+ pushl %eax # Addr to proc_sig structure
+ pushl 4(%eax) # proc_sig->sig_num ---- 16 bytes aligned
+
+ call (%eax) # invoke signal handler
+
+ # invoke the sigreturn syscall to exit the signal wrapper
+ movl $__SYSCALL_sigreturn, %eax
+ movl 4(%esp), %ebx
+ int $LUNAIX_SYS_CALL
+
+ ud2 # never reach!
\ No newline at end of file
#include <arch/x86/interrupts.h>
+#include <arch/x86/tss.h>
#include <hal/apic.h>
#include <hal/cpu.h>
#include <lunaix/mm/page.h>
{
__current->intr_ctx = *param;
-#ifdef USE_KERNEL_PT
- cpu_lcr3(__kernel_ptd);
-
- vmm_mount_pd(PD_MOUNT_1, __current->page_table);
-#endif
-
isr_param* lparam = &__current->intr_ctx;
if (lparam->vector <= 255) {
apic_done_servicing();
}
-#ifdef USE_KERNEL_PT
- cpu_lcr3(__current->page_table);
-#endif
return;
}
\ No newline at end of file
intr_subscribe(FAULT_DIVISION_ERROR, intr_routine_divide_zero);
intr_subscribe(FAULT_GENERAL_PROTECTION, intr_routine_general_protection);
intr_subscribe(FAULT_PAGE_FAULT, intr_routine_page_fault);
+ intr_subscribe(FAULT_STACK_SEG_FAULT, intr_routine_page_fault);
intr_subscribe(LUNAIX_SYS_PANIC, intr_routine_sys_panic);
intr_subscribe(APIC_SPIV_IV, intr_routine_apic_spi);
intr_subscribe(APIC_ERROR_IV, intr_routine_apic_error);
- intr_set_fallback_handler(intr_set_fallback_handler);
+ intr_set_fallback_handler(intr_routine_fallback);
}
\ No newline at end of file
va_end(args);
}
+#define COW_MASK (REGION_RSHARED | REGION_READ | REGION_WRITE)
+
extern void
__print_panic_msg(const char* msg, const isr_param* param);
goto segv_term;
}
- struct mm_region* hit_region = region_get(__current, ptr);
+ v_mapping mapping;
+ if (!vmm_lookup(ptr, &mapping)) {
+ goto segv_term;
+ }
+
+ if (!SEL_RPL(param->cs)) {
+ // 如果是内核页错误……
+ if (do_kernel(&mapping)) {
+ return;
+ }
+ goto segv_term;
+ }
+
+ struct mm_region* hit_region = region_get(&__current->mm.regions, ptr);
if (!hit_region) {
// Into the void...
goto segv_term;
}
- x86_pte_t* pte = PTE_MOUNTED(PD_REFERENCED, ptr >> 12);
- if (*pte & PG_PRESENT) {
- if ((hit_region->attr & REGION_PERM_MASK) ==
- (REGION_RSHARED | REGION_READ)) {
+ x86_pte_t* pte = &PTE_MOUNTED(PD_REFERENCED, ptr >> 12);
+ if ((*pte & PG_PRESENT)) {
+ if ((hit_region->attr & COW_MASK) == COW_MASK) {
// normal page fault, do COW
cpu_invplg(pte);
uintptr_t pa =
(uintptr_t)vmm_dup_page(__current->pid, PG_ENTRY_ADDR(*pte));
pmm_free_page(__current->pid, *pte & ~0xFFF);
*pte = (*pte & 0xFFF) | pa | PG_WRITE;
- return;
+ goto resolved;
}
// impossible cases or accessing privileged page
goto segv_term;
// Invalid location
goto segv_term;
}
+
uintptr_t loc = *pte & ~0xfff;
- // a writable page, not present, pte attr is not null
- // and no indication of cached page -> a new page need to be alloc
+
+ // a writable page, not present, not cached, pte attr is not null
+ // -> a new page need to be alloc
if ((hit_region->attr & REGION_WRITE) && (*pte & 0xfff) && !loc) {
cpu_invplg(pte);
uintptr_t pa = pmm_alloc_page(__current->pid, 0);
*pte = *pte | pa | PG_PRESENT;
- return;
+ goto resolved;
}
+
// page not present, bring it from disk or somewhere else
__print_panic_msg("WIP page fault route", param);
while (1)
param->eip);
terminate_proc(LXSEGFAULT);
// should not reach
+ while (1)
+ ;
+
+resolved:
+ cpu_invplg(ptr);
+ return;
+}
+
+int
+do_kernel(v_mapping* mapping)
+{
+ uintptr_t addr = mapping->va;
+ if (addr >= KHEAP_START && addr < PROC_START) {
+ // This is kernel heap page
+ uintptr_t pa = pmm_alloc_page(KERNEL_PID, 0);
+ *mapping->pte = (*mapping->pte & 0xfff) | pa | PG_PRESENT;
+ cpu_invplg(mapping->pte);
+ cpu_invplg(addr);
+ goto done;
+ }
+
+ return 0;
+done:
+ return 1;
}
\ No newline at end of file
.long __lxsys_exit
.long __lxsys_wait
.long __lxsys_waitpid /* 10 */
+ .long __lxsys_sigreturn
+ .long __lxsys_sigprocmask
+ .long __lxsys_signal
+ .long __lxsys_pause
2:
.rept __SYSCALL_MAX - (2b - 1b)/4
.long 0
popl %ebp
- ret
-
-
+ ret
\ No newline at end of file
#include <arch/x86/tss.h>
#include <lunaix/common.h>
-struct x86_tss _tss = {
- .link = 0,
- .esp0 = KSTACK_START,
- .ss0 = KDATA_SEG
-};
+struct x86_tss _tss = { .link = 0, .esp0 = KSTACK_TOP, .ss0 = KDATA_SEG };
-void tss_update_esp(uint32_t esp0) {
+void
+tss_update_esp(uint32_t esp0)
+{
_tss.esp0 = esp0;
}
\ No newline at end of file
setup_memory((multiboot_memory_map_t*)_k_init_mb_info->mmap_addr, map_size);
- // 为内核创建一个专属栈空间。
- for (size_t i = 0; i < (KSTACK_SIZE >> PG_SIZE_BITS); i++) {
- vmm_alloc_page(KERNEL_PID,
- (void*)(KSTACK_START + (i << PG_SIZE_BITS)),
- NULL,
- PG_PREM_RW,
- 0);
- }
kprintf(KINFO "[MM] Allocated %d pages for stack start at %p\n",
KSTACK_SIZE >> PG_SIZE_BITS,
KSTACK_START);
void
spawn_proc0()
{
- struct proc_info proc0;
+ struct proc_info* proc0 = alloc_process();
/**
* @brief
* 目前的解决方案是2
*/
- init_proc(&proc0);
- proc0.intr_ctx = (isr_param){ .registers = { .ds = KDATA_SEG,
- .es = KDATA_SEG,
- .fs = KDATA_SEG,
- .gs = KDATA_SEG },
- .cs = KCODE_SEG,
- .eip = (void*)__proc0,
- .ss = KDATA_SEG,
- .eflags = cpu_reflags() };
+ proc0->intr_ctx = (isr_param){ .registers = { .ds = KDATA_SEG,
+ .es = KDATA_SEG,
+ .fs = KDATA_SEG,
+ .gs = KDATA_SEG },
+ .cs = KCODE_SEG,
+ .eip = (void*)__proc0,
+ .ss = KDATA_SEG,
+ .eflags = cpu_reflags() };
// 方案1:必须在读取eflags之后禁用。否则当进程被调度时,中断依然是关闭的!
// cpu_disable_interrupt();
- setup_proc_mem(&proc0, PD_REFERENCED);
-
- // Ok... 首先fork进我们的零号进程,而后由那里,我们fork进init进程。
- /*
- 这里是一些栈的设置,因为我们将切换到一个新的地址空间里,并且使用一个全新的栈。
- 让iret满意!
- */
- asm volatile("movl %%cr3, %%eax\n"
- "movl %%esp, %%ebx\n"
- "movl %1, %%cr3\n"
- "movl %2, %%esp\n"
+ /* Ok... 首先fork进我们的零号进程,而后由那里,我们fork进init进程。 */
+
+ // 把当前虚拟地址空间(内核)复制一份。
+ proc0->page_table = vmm_dup_vmspace(proc0->pid);
+
+ // 直接切换到新的拷贝,进行配置。
+ cpu_lcr3(proc0->page_table);
+
+ // 为内核创建一个专属栈空间。
+ for (size_t i = 0; i < (KSTACK_SIZE >> PG_SIZE_BITS); i++) {
+ uintptr_t pa = pmm_alloc_page(KERNEL_PID, 0);
+ vmm_set_mapping(PD_REFERENCED,
+ KSTACK_START + (i << PG_SIZE_BITS),
+ pa,
+ PG_PREM_RW,
+ VMAP_NULL);
+ }
+
+ // 手动设置进程上下文:用于第一次调度
+ asm volatile("movl %%esp, %%ebx\n"
+ "movl %1, %%esp\n"
"pushf\n"
+ "pushl %2\n"
"pushl %3\n"
- "pushl %4\n"
"pushl $0\n"
"pushl $0\n"
"movl %%esp, %0\n"
- "movl %%eax, %%cr3\n"
"movl %%ebx, %%esp\n"
- : "=m"(proc0.intr_ctx.registers.esp)
- : "r"(proc0.page_table),
- "i"(KSTACK_TOP),
- "i"(KCODE_SEG),
- "r"(proc0.intr_ctx.eip)
- : "%eax", "%ebx", "memory");
+ : "=m"(proc0->intr_ctx.registers.esp)
+ : "i"(KSTACK_TOP), "i"(KCODE_SEG), "r"(proc0->intr_ctx.eip)
+ : "%ebx", "memory");
// 向调度器注册进程。
- push_process(&proc0);
+ commit_process(proc0);
- // 由于时钟中断未就绪,我们需要手动通知调度器进行第一次调度。这里也会同时隐式地恢复我们的eflags.IF位
- schedule();
+ // 由于时钟中断与APIC未就绪,我们需要手动进行第一次调度。这里也会同时隐式地恢复我们的eflags.IF位
+ proc0->state = PROC_RUNNING;
+ asm volatile("pushl %0\n"
+ "jmp switch_to\n" ::"r"(proc0));
/* Should not return */
assert_msg(0, "Unexpected Return");
}
+extern void __usrtext_start;
+extern void __usrtext_end;
+
// 按照 Memory map 标识可用的物理页
void
setup_memory(multiboot_memory_map_t* map, size_t map_size)
// 重映射VGA文本缓冲区(以后会变成显存,i.e., framebuffer)
for (size_t i = 0; i < vga_buf_pgs; i++) {
- vmm_map_page(KERNEL_PID,
- (void*)(VGA_BUFFER_VADDR + (i << PG_SIZE_BITS)),
- (void*)(VGA_BUFFER_PADDR + (i << PG_SIZE_BITS)),
- PG_PREM_URW);
+ vmm_set_mapping(PD_REFERENCED,
+ VGA_BUFFER_VADDR + (i << PG_SIZE_BITS),
+ VGA_BUFFER_PADDR + (i << PG_SIZE_BITS),
+ PG_PREM_URW,
+ VMAP_NULL);
+ }
+
+ assert_msg(!((uintptr_t)&__usrtext_start & 0xfff) &&
+ !((uintptr_t)&__usrtext_end & 0xfff),
+ "Bad usrtext alignment");
+
+ for (uintptr_t i = &__usrtext_start; i < &__usrtext_end; i += PG_SIZE) {
+ vmm_set_mapping(PD_REFERENCED, i, V2P(i), PG_PREM_UR, VMAP_NULL);
}
// 更新VGA缓冲区位置至虚拟地址
#include <lunaix/mm/kalloc.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/proc.h>
+#include <lunaix/signal.h>
#include <lunaix/spike.h>
#include <lunaix/syslog.h>
#include <lunaix/timer.h>
LOG_MODULE("INIT")
// #define FORK_BOMB_DEMO
-// #define WAIT_DEMO
+#define WAIT_DEMO
+#define IN_USER_MODE
-void
+void __USER__
+sigchild_handler(int signum)
+{
+ kprintf(KINFO "SIGCHLD received\n");
+}
+
+void __USER__
_lxinit_main()
{
#ifdef FORK_BOMB_DEMO
}
#endif
+ signal(_SIGCHLD, sigchild_handler);
+
int status;
#ifdef WAIT_DEMO
// 测试wait
kprintf("I am child, I am about to terminated\n");
_exit(1);
}
+ pause();
pid_t child = wait(&status);
kprintf("I am parent, my child (%d) terminated normally with code: %d.\n",
child,
pid_t p = 0;
if (!fork()) {
- kprintf("Test no hang!");
- sleep(12);
+ kprintf("Test no hang!\n");
+ sleep(6);
_exit(0);
}
waitpid(-1, &status, WNOHANG);
- // 这里是就是LunaixOS的第一个进程了!
- for (size_t i = 0; i < 5; i++) {
+ for (size_t i = 0; i < 10; i++) {
pid_t pid = 0;
if (!(pid = fork())) {
sleep(i);
struct kdb_keyinfo_pkt keyevent;
while (1) {
if (!kbd_recv_key(&keyevent)) {
- // yield();
+ yield();
continue;
}
if ((keyevent.state & KBD_KEY_FPRESSED) &&
(keyevent.keycode & 0xff00) <= KEYPAD) {
tty_put_char((char)(keyevent.keycode & 0x00ff));
- tty_sync_cursor();
+ // FIXME: io to vga port is privileged and cause #GP in user mode
+ // tty_sync_cursor();
}
}
-
spin();
}
\ No newline at end of file
#include <lunaix/mm/vmm.h>
-void* vmm_dup_page(pid_t pid, void* pa) {
+void*
+vmm_dup_page(pid_t pid, void* pa)
+{
void* new_ppg = pmm_alloc_page(pid, 0);
- vmm_fmap_page(pid, PG_MOUNT_3, new_ppg, PG_PREM_RW);
- vmm_fmap_page(pid, PG_MOUNT_4, pa, PG_PREM_RW);
+ vmm_set_mapping(PD_REFERENCED, PG_MOUNT_3, new_ppg, PG_PREM_RW, VMAP_NULL);
+ vmm_set_mapping(PD_REFERENCED, PG_MOUNT_4, pa, PG_PREM_RW, VMAP_NULL);
- asm volatile (
- "movl %1, %%edi\n"
- "movl %2, %%esi\n"
- "rep movsl\n"
- :: "c"(1024), "r"(PG_MOUNT_3), "r"(PG_MOUNT_4)
- : "memory", "%edi", "%esi");
+ asm volatile("movl %1, %%edi\n"
+ "movl %2, %%esi\n"
+ "rep movsl\n" ::"c"(1024),
+ "r"(PG_MOUNT_3),
+ "r"(PG_MOUNT_4)
+ : "memory", "%edi", "%esi");
- vmm_unset_mapping(PG_MOUNT_3);
- vmm_unset_mapping(PG_MOUNT_4);
+ vmm_del_mapping(PD_REFERENCED, PG_MOUNT_3);
+ vmm_del_mapping(PD_REFERENCED, PG_MOUNT_4);
return new_ppg;
}
\ No newline at end of file
* @file dmm.c
* @author Lunaixsky
* @brief Dynamic memory manager for heap. This design do not incorporate any\
- * specific implementation of malloc family. The main purpose of this routines is to
- * provide handy method to initialize & grow the heap as needed by upstream implementation.
- *
- * This is designed to be portable, so it can serve as syscalls to malloc/free in the c std lib.
- *
+ * specific implementation of malloc family. The main purpose of this routines
+ * is to provide handy method to initialize & grow the heap as needed by
+ * upstream implementation.
+ *
+ * This is designed to be portable, so it can serve as syscalls to malloc/free
+ * in the c std lib.
+ *
* @version 0.2
* @date 2022-03-3
*
*/
#include <lunaix/mm/dmm.h>
-#include <lunaix/mm/vmm.h>
#include <lunaix/mm/page.h>
+#include <lunaix/mm/vmm.h>
#include <lunaix/status.h>
#include <lunaix/spike.h>
#include <lunaix/syscall.h>
-
-__DEFINE_LXSYSCALL1(int, sbrk, size_t, size) {
+__DEFINE_LXSYSCALL1(int, sbrk, size_t, size)
+{
heap_context_t* uheap = &__current->mm.u_heap;
mutex_lock(&uheap->lock);
- void* r = lxsbrk(uheap, size);
+ void* r = lxsbrk(uheap, size, PG_ALLOW_USER);
mutex_unlock(&uheap->lock);
return r;
}
-__DEFINE_LXSYSCALL1(void*, brk, void*, addr) {
+__DEFINE_LXSYSCALL1(void*, brk, void*, addr)
+{
heap_context_t* uheap = &__current->mm.u_heap;
mutex_lock(&uheap->lock);
- int r = lxbrk(uheap, addr);
+ int r = lxbrk(uheap, addr, PG_ALLOW_USER);
mutex_unlock(&uheap->lock);
return r;
}
heap->brk = heap->start;
mutex_init(&heap->lock);
- return vmm_alloc_page(__current->pid, heap->brk, NULL, PG_PREM_RW, 0) != NULL;
+ int perm = PG_ALLOW_USER;
+ if (heap->brk >= KHEAP_START) {
+ perm = 0;
+ }
+
+ return vmm_set_mapping(
+ PD_REFERENCED, heap->brk, 0, PG_WRITE | perm, VMAP_NULL) != NULL;
}
int
-lxbrk(heap_context_t* heap, void* addr)
+lxbrk(heap_context_t* heap, void* addr, int user)
{
- return -(lxsbrk(heap, addr - heap->brk) == (void*)-1);
+ return -(lxsbrk(heap, addr - heap->brk, user) == (void*)-1);
}
void*
-lxsbrk(heap_context_t* heap, size_t size)
+lxsbrk(heap_context_t* heap, size_t size, int user)
{
if (size == 0) {
return heap->brk;
uintptr_t diff = PG_ALIGN(next) - PG_ALIGN(current_brk);
if (diff) {
- // if next do require new pages to be allocated
- if (!vmm_alloc_pages(__current->pid, (void*)(PG_ALIGN(current_brk) + PG_SIZE),
- diff,
- PG_PREM_RW, 0)) {
- __current->k_status = LXHEAPFULL;
- return NULL;
+ // if next do require new pages to be mapped
+ for (size_t i = 0; i < diff; i += PG_SIZE) {
+ vmm_set_mapping(PD_REFERENCED,
+ PG_ALIGN(current_brk) + PG_SIZE + i,
+ 0,
+ PG_WRITE | user,
+ VMAP_NULL);
}
}
* @file kalloc.c
* @author Lunaixsky
* @brief Implicit free list implementation of malloc family, for kernel use.
- *
+ *
* This version of code is however the simplest and yet insecured, thread unsafe
* it just to demonstrate how the malloc/free works behind the curtain
* @version 0.1
* @date 2022-03-05
- *
+ *
* @copyright Copyright (c) 2022
- *
+ *
*/
-#include <lunaix/mm/kalloc.h>
#include <lunaix/mm/dmm.h>
+#include <lunaix/mm/kalloc.h>
+#include <lunaix/mm/vmm.h>
#include <lunaix/common.h>
#include <lunaix/spike.h>
/*
At the beginning, we allocate an empty page and put our initial marker
-
+
| 4/1 | 0/1 |
^ ^ brk
start
- Then, expand the heap further, with HEAP_INIT_SIZE (evaluated to 4096, i.e., 1 pg size)
- This will allocate as much pages and override old epilogue marker with a free region hdr
- and put new epilogue marker. These are handled by lx_grow_heap which is internally used
- by alloc to expand the heap at many moment when needed.
-
+ Then, expand the heap further, with HEAP_INIT_SIZE (evaluated to 4096, i.e.,
+ 1 pg size) This will allocate as much pages and override old epilogue marker
+ with a free region hdr and put new epilogue marker. These are handled by
+ lx_grow_heap which is internally used by alloc to expand the heap at many
+ moment when needed.
+
| 4/1 | 4096/0 | ....... | 4096/0 | 0/1 |
^ ^ brk_old ^
start brk
Note: the brk always point to the beginning of epilogue.
*/
+// FIXME: This should be per-process but not global!
static heap_context_t kheap;
int
-kalloc_init() {
- kheap.start = &__kernel_heap_start;
+kalloc_init()
+{
+ kheap.start = KHEAP_START;
kheap.brk = NULL;
- kheap.max_addr = (void*)KSTACK_START;
+ kheap.max_addr =
+ (void*)PROC_START; // 在新的布局中,堆结束的地方即为进程表开始的地方
+
+ for (size_t i = 0; i < KHEAP_SIZE_MB >> 2; i++) {
+ vmm_set_mapping(PD_REFERENCED,
+ (uintptr_t)kheap.start + (i << 22),
+ 0,
+ PG_PREM_RW,
+ VMAP_NOMAP);
+ }
if (!dmm_init(&kheap)) {
return 0;
}
void*
-lxmalloc(size_t size) {
+lxmalloc(size_t size)
+{
mutex_lock(&kheap.lock);
void* r = lx_malloc_internal(&kheap, size);
mutex_unlock(&kheap.lock);
}
void*
-lxcalloc(size_t n, size_t elem) {
+lxcalloc(size_t n, size_t elem)
+{
size_t pd = n * elem;
// overflow detection
}
void
-lxfree(void* ptr) {
+lxfree(void* ptr)
+{
if (!ptr) {
return;
}
// make sure the ptr we are 'bout to free makes sense
// the size trick is stolen from glibc's malloc/malloc.c:4437 ;P
-
+
assert_msg(((uintptr_t)ptr < (uintptr_t)(-sz)) && !((uintptr_t)ptr & 0x3),
"free(): invalid pointer");
-
- assert_msg(sz > WSIZE,
- "free(): invalid size");
+
+ assert_msg(sz > WSIZE, "free(): invalid size");
SW(chunk_ptr, hdr & ~M_ALLOCATED);
SW(FPTR(chunk_ptr, sz), hdr & ~M_ALLOCATED);
SW(next_hdr, LW(next_hdr) | M_PREV_FREE);
-
+
coalesce(chunk_ptr);
mutex_unlock(&kheap.lock);
}
-
void*
lx_malloc_internal(heap_context_t* heap, size_t size)
{
|
v
-
+
| xxxx | |
*/
coalesce(n_hdrptr);
return chunk_ptr;
}
-
void*
lx_grow_heap(heap_context_t* heap, size_t sz)
{
void* start;
// The "+ WSIZE" capture the overhead for epilogue marker
- if (!(start = lxsbrk(heap, sz + WSIZE))) {
+ if (!(start = lxsbrk(heap, sz + WSIZE, 0))) {
return NULL;
}
sz = ROUNDUP(sz, BOUNDARY);
-#include <lunaix/mm/region.h>
#include <lunaix/mm/kalloc.h>
-#include <lunaix/process.h>
+#include <lunaix/mm/region.h>
-void region_add(struct proc_info* proc,unsigned long start, unsigned long end, unsigned int attr) {
+void
+region_add(struct mm_region* regions,
+ unsigned long start,
+ unsigned long end,
+ unsigned int attr)
+{
struct mm_region* region = lxmalloc(sizeof(struct mm_region));
- *region = (struct mm_region) {
- .attr = attr,
- .end = end,
- .start = start
- };
+ *region = (struct mm_region){ .attr = attr, .end = end, .start = start };
- if (!proc->mm.regions) {
- llist_init_head(®ion->head);
- proc->mm.regions = region;
- }
- else {
- llist_append(&proc->mm.regions->head, ®ion->head);
- }
+ llist_append(®ions->head, ®ion->head);
}
-void region_release_all(struct proc_info* proc) {
- struct mm_region* head = proc->mm.regions;
+void
+region_release_all(struct mm_region* regions)
+{
struct mm_region *pos, *n;
- llist_for_each(pos, n, &head->head, head) {
+ llist_for_each(pos, n, ®ions->head, head)
+ {
lxfree(pos);
}
+}
+
+void
+region_copy(struct mm_region* src, struct mm_region* dest)
+{
+ if (!src) {
+ return;
+ }
- proc->mm.regions = NULL;
+ struct mm_region *pos, *n;
+
+ llist_for_each(pos, n, &src->head, head)
+ {
+ region_add(dest, pos->start, pos->end, pos->attr);
+ }
}
-struct mm_region* region_get(struct proc_info* proc, unsigned long vaddr) {
- struct mm_region* head = proc->mm.regions;
-
- if (!head) {
+struct mm_region*
+region_get(struct mm_region* regions, unsigned long vaddr)
+{
+ if (!regions) {
return NULL;
}
struct mm_region *pos, *n;
- llist_for_each(pos, n, &head->head, head) {
- if (vaddr >= pos->start && vaddr < pos->end) {
+ llist_for_each(pos, n, ®ions->head, head)
+ {
+ if (pos->start <= vaddr && vaddr < pos->end) {
return pos;
}
}
#include <hal/cpu.h>
#include <klibc/string.h>
+#include <lunaix/mm/pmm.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/spike.h>
-#include <stdbool.h>
-
void
vmm_init()
{
x86_page_table*
vmm_init_pd()
{
- x86_page_table* dir = (x86_page_table*)pmm_alloc_page(KERNEL_PID, PP_FGPERSIST);
+ x86_page_table* dir =
+ (x86_page_table*)pmm_alloc_page(KERNEL_PID, PP_FGPERSIST);
for (size_t i = 0; i < PG_MAX_ENTRIES; i++) {
dir->entry[i] = PTE_NULL;
}
}
int
-__vmm_map_internal(pid_t pid,
- uint32_t l1_inx,
- uint32_t l2_inx,
- uintptr_t pa,
- pt_attr attr,
- int forced)
+vmm_set_mapping(uintptr_t mnt,
+ uintptr_t va,
+ uintptr_t pa,
+ pt_attr attr,
+ int options)
{
- x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
- x86_page_table* l2pt = (x86_page_table*)L2_VADDR(l1_inx);
+ assert((uintptr_t)va % PG_SIZE == 0);
+
+ uintptr_t l1_inx = L1_INDEX(va);
+ uintptr_t l2_inx = L2_INDEX(va);
+ x86_page_table* l1pt = (x86_page_table*)(mnt | (1023 << 12));
+ x86_page_table* l2pt = (x86_page_table*)(mnt | (l1_inx << 12));
// See if attr make sense
assert(attr <= 128);
if (!l1pt->entry[l1_inx]) {
- x86_page_table* new_l1pt_pa = pmm_alloc_page(pid, PP_FGPERSIST);
+ x86_page_table* new_l1pt_pa = pmm_alloc_page(KERNEL_PID, PP_FGPERSIST);
// 物理内存已满!
if (!new_l1pt_pa) {
}
// This must be writable
- l1pt->entry[l1_inx] = NEW_L1_ENTRY(attr | PG_WRITE, new_l1pt_pa);
- memset((void*)L2_VADDR(l1_inx), 0, PG_SIZE);
+ l1pt->entry[l1_inx] =
+ NEW_L1_ENTRY(attr | PG_WRITE | PG_PRESENT, new_l1pt_pa);
+
+ memset((void*)l2pt, 0, PG_SIZE);
+ } else {
+ x86_pte_t pte = l2pt->entry[l2_inx];
+ if (pte && (options & VMAP_IGNORE)) {
+ return 1;
+ }
}
- x86_pte_t l2pte = l2pt->entry[l2_inx];
- if (l2pte) {
- if (!forced) {
- return 0;
- }
+ if (mnt == PD_REFERENCED) {
+ cpu_invplg(va);
}
- if ((HAS_FLAGS(attr, PG_PRESENT))) {
- // add one on reference count, regardless of existence.
- pmm_ref_page(pid, pa);
+ if ((options & VMAP_NOMAP)) {
+ return 1;
}
l2pt->entry[l2_inx] = NEW_L2_ENTRY(attr, pa);
-
return 1;
}
-void*
-vmm_map_page(pid_t pid, void* va, void* pa, pt_attr tattr)
-{
- // 显然,对空指针进行映射没有意义。
- if (!pa || !va) {
- return NULL;
- }
-
- assert(((uintptr_t)va & 0xFFFU) == 0) assert(((uintptr_t)pa & 0xFFFU) == 0);
-
- uint32_t l1_index = L1_INDEX(va);
- uint32_t l2_index = L2_INDEX(va);
- x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
-
- // 在页表与页目录中找到一个可用的空位进行映射(位于va或其附近)
- x86_pte_t l1pte = l1pt->entry[l1_index];
- x86_page_table* l2pt = (x86_page_table*)L2_VADDR(l1_index);
- while (l1pte && l1_index < PG_MAX_ENTRIES) {
- if (l2_index == PG_MAX_ENTRIES) {
- l1_index++;
- l2_index = 0;
- l1pte = l1pt->entry[l1_index];
- l2pt = (x86_page_table*)L2_VADDR(l1_index);
- }
- // 页表有空位,只需要开辟一个新的 PTE (Level 2)
- if (__vmm_map_internal(pid, l1_index, l2_index, pa, tattr, false)) {
- return (void*)V_ADDR(l1_index, l2_index, PG_OFFSET(va));
- }
- l2_index++;
- }
-
- // 页目录与所有页表已满!
- if (l1_index > PG_MAX_ENTRIES) {
- return NULL;
- }
-
- if (!__vmm_map_internal(pid, l1_index, l2_index, (uintptr_t)pa, tattr, false)) {
- return NULL;
- }
-
- return (void*)V_ADDR(l1_index, l2_index, PG_OFFSET(va));
-}
-
-void*
-vmm_fmap_page(pid_t pid, void* va, void* pa, pt_attr tattr)
-{
- if (!pa || !va) {
- return NULL;
- }
-
- assert(((uintptr_t)va & 0xFFFU) == 0) assert(((uintptr_t)pa & 0xFFFU) == 0);
-
- uint32_t l1_index = L1_INDEX(va);
- uint32_t l2_index = L2_INDEX(va);
-
- if (!__vmm_map_internal(pid, l1_index, l2_index, (uintptr_t)pa, tattr, true)) {
- return NULL;
- }
-
- cpu_invplg(va);
-
- return va;
-}
-
-void*
-vmm_alloc_page(pid_t pid, void* vpn, void** pa, pt_attr tattr, pp_attr_t pattr)
-{
- void* pp = pmm_alloc_page(pid, pattr);
- void* result = vmm_map_page(pid, vpn, pp, tattr);
- if (!result) {
- pmm_free_page(pp, pid);
- }
- pa ? (*pa = pp) : 0;
- return result;
-}
-
-int
-vmm_alloc_pages(pid_t pid, void* va, size_t sz, pt_attr tattr, pp_attr_t pattr)
+uintptr_t
+vmm_del_mapping(uintptr_t mnt, uintptr_t va)
{
- assert((uintptr_t)va % PG_SIZE == 0) assert(sz % PG_SIZE == 0);
-
- void* va_ = va;
- for (size_t i = 0; i < (sz >> PG_SIZE_BITS); i++, va_ += PG_SIZE) {
- void* pp = pmm_alloc_page(pid, pattr);
- uint32_t l1_index = L1_INDEX(va_);
- uint32_t l2_index = L2_INDEX(va_);
- if (!pp || !__vmm_map_internal(
- pid,
- l1_index, l2_index, (uintptr_t)pp, tattr, false)) {
- // if one failed, release previous allocated pages.
- va_ = va;
- for (size_t j = 0; j < i; j++, va_ += PG_SIZE) {
- vmm_unmap_page(pid, va_);
- }
-
- return false;
- }
- }
-
- return true;
-}
-
-int
-vmm_set_mapping(pid_t pid, void* va, void* pa, pt_attr attr) {
- assert(((uintptr_t)va & 0xFFFU) == 0);
-
- uint32_t l1_index = L1_INDEX(va);
- uint32_t l2_index = L2_INDEX(va);
-
- // prevent map of recursive mapping region
- if (l1_index == 1023) {
- return 0;
- }
-
- __vmm_map_internal(pid, l1_index, l2_index, (uintptr_t)pa, attr, false);
- return 1;
-}
-
-void
-__vmm_unmap_internal(pid_t pid, void* va, int free_ppage) {
assert(((uintptr_t)va & 0xFFFU) == 0);
uint32_t l1_index = L1_INDEX(va);
// prevent unmap of recursive mapping region
if (l1_index == 1023) {
- return;
+ return 0;
}
- x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
+ x86_page_table* l1pt = (x86_page_table*)(mnt | (1023 << 12));
x86_pte_t l1pte = l1pt->entry[l1_index];
if (l1pte) {
- x86_page_table* l2pt = (x86_page_table*)L2_VADDR(l1_index);
+ x86_page_table* l2pt = (x86_page_table*)(mnt | (l1_index << 12));
x86_pte_t l2pte = l2pt->entry[l2_index];
- if (IS_CACHED(l2pte) && free_ppage) {
- pmm_free_page(pid, (void*)l2pte);
- }
+
cpu_invplg(va);
l2pt->entry[l2_index] = PTE_NULL;
+
+ return PG_ENTRY_ADDR(l2pte);
}
-}
-void
-vmm_unset_mapping(void* va) {
- __vmm_unmap_internal(0, va, false);
+ return 0;
}
-void
-vmm_unmap_page(pid_t pid, void* va)
-{
- __vmm_unmap_internal(pid, va, true);
-}
-
-v_mapping
-vmm_lookup(void* va)
+int
+vmm_lookup(uintptr_t va, v_mapping* mapping)
{
- assert(((uintptr_t)va & 0xFFFU) == 0);
-
uint32_t l1_index = L1_INDEX(va);
uint32_t l2_index = L2_INDEX(va);
x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
x86_pte_t l1pte = l1pt->entry[l1_index];
- v_mapping mapping = { .flags = 0, .pa = 0, .pn = 0 };
if (l1pte) {
x86_pte_t* l2pte =
&((x86_page_table*)L2_VADDR(l1_index))->entry[l2_index];
if (l2pte) {
- mapping.flags = PG_ENTRY_FLAGS(*l2pte);
- mapping.pa = PG_ENTRY_ADDR(*l2pte);
- mapping.pn = mapping.pa >> PG_SIZE_BITS;
- mapping.pte = l2pte;
+ mapping->flags = PG_ENTRY_FLAGS(*l2pte);
+ mapping->pa = PG_ENTRY_ADDR(*l2pte);
+ mapping->pn = mapping->pa >> PG_SIZE_BITS;
+ mapping->pte = l2pte;
+ mapping->va = va;
+ return 1;
}
}
-
- return mapping;
+ return 0;
}
void*
-vmm_v2p(void* va)
+vmm_mount_pd(uintptr_t mnt, void* pde)
{
- return (void*)vmm_lookup(va).pa;
-}
-
-void*
-vmm_mount_pd(uintptr_t mnt, void* pde) {
x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
l1pt->entry[(mnt >> 22)] = NEW_L1_ENTRY(T_SELF_REF_PERM, pde);
cpu_invplg(mnt);
}
void*
-vmm_unmount_pd(uintptr_t mnt) {
+vmm_unmount_pd(uintptr_t mnt)
+{
x86_page_table* l1pt = (x86_page_table*)L1_BASE_VADDR;
l1pt->entry[(mnt >> 22)] = 0;
cpu_invplg(mnt);
#include <arch/x86/boot/multiboot.h>
#include <lunaix/common.h>
#include <lunaix/lunistd.h>
+#include <lunaix/mm/pmm.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/peripheral/ps2kbd.h>
#include <lunaix/proc.h>
void
lock_reserved_memory();
+void
+unlock_reserved_memory();
+
+void
+__do_reserved_memory(int unlock);
+
+void __USER__
+__proc0_usr()
+{
+ if (!fork()) {
+ asm("jmp _lxinit_main");
+ }
+
+ while (1) {
+ yield();
+ }
+}
+
/**
* @brief LunaixOS的零号进程,该进程永远为可执行。
*
__proc0()
{
init_platform();
- if (!fork()) {
- asm("jmp _lxinit_main");
- }
- while (1) {
- yield();
- }
+ init_proc_user_space(__current);
+
+ asm volatile("movw %0, %%ax\n"
+ "movw %%ax, %%es\n"
+ "movw %%ax, %%ds\n"
+ "movw %%ax, %%fs\n"
+ "movw %%ax, %%gs\n"
+ "pushl %0\n"
+ "pushl %1\n"
+ "pushl %2\n"
+ "pushl %3\n"
+ "retf" ::"i"(UDATA_SEG),
+ "i"(USTACK_TOP & ~0xf),
+ "i"(UCODE_SEG),
+ "r"(__proc0_usr)
+ : "eax", "memory");
}
extern uint8_t __kernel_start; /* link/linker.ld */
{
assert_msg(kalloc_init(), "Fail to initialize heap");
- size_t hhk_init_pg_count = ((uintptr_t)(&__init_hhk_end)) >> PG_SIZE_BITS;
- kprintf(KINFO "[MM] Releaseing %d pages from 0x0.\n", hhk_init_pg_count);
-
// Fuck it, I will no longer bother this little 1MiB
// I just release 4 pages for my APIC & IOAPIC remappings
- for (size_t i = 0; i < 3; i++) {
- vmm_unmap_page(KERNEL_PID, (void*)(i << PG_SIZE_BITS));
- }
+ // for (size_t i = 0; i < 3; i++) {
+ // vmm_del_mapping(PD_REFERENCED, (void*)(i << PG_SIZE_BITS));
+ // }
// 锁定所有系统预留页(内存映射IO,ACPI之类的),并且进行1:1映射
lock_reserved_memory();
KERNEL_PID, FLOOR(__APIC_BASE_PADDR, PG_SIZE_BITS), 0);
pmm_mark_page_occupied(KERNEL_PID, FLOOR(ioapic_addr, PG_SIZE_BITS), 0);
- vmm_set_mapping(KERNEL_PID, APIC_BASE_VADDR, __APIC_BASE_PADDR, PG_PREM_RW);
- vmm_set_mapping(KERNEL_PID, IOAPIC_BASE_VADDR, ioapic_addr, PG_PREM_RW);
+ vmm_set_mapping(
+ PD_REFERENCED, MMIO_APIC, __APIC_BASE_PADDR, PG_PREM_RW, VMAP_NULL);
+ vmm_set_mapping(
+ PD_REFERENCED, MMIO_IOAPIC, ioapic_addr, PG_PREM_RW, VMAP_NULL);
apic_init();
ioapic_init();
syscall_install();
- for (size_t i = 256; i < hhk_init_pg_count; i++) {
- vmm_unmap_page(KERNEL_PID, (void*)(i << PG_SIZE_BITS));
+ unlock_reserved_memory();
+
+ for (size_t i = 0; i < (uintptr_t)(&__init_hhk_end); i += PG_SIZE) {
+ vmm_del_mapping(PD_REFERENCED, (void*)i);
+ pmm_free_page(KERNEL_PID, (void*)i);
}
}
void
lock_reserved_memory()
+{
+ __do_reserved_memory(0);
+}
+
+void
+unlock_reserved_memory()
+{
+ __do_reserved_memory(1);
+}
+
+void
+__do_reserved_memory(int unlock)
{
multiboot_memory_map_t* mmaps = _k_init_mb_info->mmap_addr;
size_t map_size =
_k_init_mb_info->mmap_length / sizeof(multiboot_memory_map_t);
+ // v_mapping mapping;
for (unsigned int i = 0; i < map_size; i++) {
multiboot_memory_map_t mmap = mmaps[i];
- if (mmap.type == MULTIBOOT_MEMORY_AVAILABLE) {
+ uint8_t* pa = PG_ALIGN(mmap.addr_low);
+ if (mmap.type == MULTIBOOT_MEMORY_AVAILABLE || pa <= MEM_4MB) {
+ // Don't fuck up our kernel code or any free area!
continue;
}
- uint8_t* pa = PG_ALIGN(mmap.addr_low);
size_t pg_num = CEIL(mmap.len_low, PG_SIZE_BITS);
- for (size_t j = 0; j < pg_num; j++) {
- vmm_set_mapping(KERNEL_PID,
- (pa + (j << PG_SIZE_BITS)),
- (pa + (j << PG_SIZE_BITS)),
- PG_PREM_R);
+ size_t j = 0;
+ if (!unlock) {
+ for (; j < pg_num; j++) {
+ uintptr_t _pa = pa + (j << PG_SIZE_BITS);
+ if (_pa >= KERNEL_MM_BASE) {
+ // Don't fuck up our kernel space!
+ break;
+ }
+ vmm_set_mapping(PD_REFERENCED, _pa, _pa, PG_PREM_R, VMAP_NULL);
+ }
+ // Save the progress for later unmapping.
+ mmaps[i].len_low = j * PG_SIZE;
+ } else {
+ for (; j < pg_num; j++) {
+ uintptr_t _pa = pa + (j << PG_SIZE_BITS);
+ vmm_del_mapping(PD_REFERENCED, _pa);
+ }
}
}
}
\ No newline at end of file
#include <klibc/string.h>
#include <lunaix/clock.h>
#include <lunaix/common.h>
+#include <lunaix/mm/pmm.h>
#include <lunaix/mm/region.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/process.h>
__dup_pagetable(pid_t pid, uintptr_t mount_point)
{
void* ptd_pp = pmm_alloc_page(pid, PP_FGPERSIST);
- x86_page_table* ptd = vmm_fmap_page(pid, PG_MOUNT_1, ptd_pp, PG_PREM_RW);
+ vmm_set_mapping(PD_REFERENCED, PG_MOUNT_1, ptd_pp, PG_PREM_RW, VMAP_NULL);
+
+ x86_page_table* ptd = PG_MOUNT_1;
x86_page_table* pptd = (x86_page_table*)(mount_point | (0x3FF << 12));
+ size_t kspace_l1inx = L1_INDEX(KERNEL_MM_BASE);
+
for (size_t i = 0; i < PG_MAX_ENTRIES - 1; i++) {
+
x86_pte_t ptde = pptd->entry[i];
- if (!ptde || !(ptde & PG_PRESENT)) {
+ // 空或者是未在内存中的L1页表项直接照搬过去。
+ // 内核地址空间直接共享过去。
+ if (!ptde || i >= kspace_l1inx || !(ptde & PG_PRESENT)) {
ptd->entry[i] = ptde;
continue;
}
- x86_page_table* ppt = (x86_page_table*)(mount_point | (i << 12));
+ // 复制L2页表
void* pt_pp = pmm_alloc_page(pid, PP_FGPERSIST);
- x86_page_table* pt = vmm_fmap_page(pid, PG_MOUNT_2, pt_pp, PG_PREM_RW);
+ vmm_set_mapping(
+ PD_REFERENCED, PG_MOUNT_2, pt_pp, PG_PREM_RW, VMAP_NULL);
+
+ x86_page_table* ppt = (x86_page_table*)(mount_point | (i << 12));
+ x86_page_table* pt = PG_MOUNT_2;
for (size_t j = 0; j < PG_MAX_ENTRIES; j++) {
x86_pte_t pte = ppt->entry[j];
pt->entry[j] = pte;
}
- ptd->entry[i] = (uintptr_t)pt_pp | PG_PREM_RW;
+ ptd->entry[i] = (uintptr_t)pt_pp | PG_ENTRY_FLAGS(ptde);
}
ptd->entry[PG_MAX_ENTRIES - 1] = NEW_L1_ENTRY(T_SELF_REF_PERM, ptd_pp);
{
x86_page_table* pptd = (x86_page_table*)(mount_point | (0x3FF << 12));
- for (size_t i = 0; i < PG_MAX_ENTRIES - 1; i++) {
+ // only remove user address space
+ for (size_t i = 0; i < L1_INDEX(KERNEL_MM_BASE); i++) {
x86_pte_t ptde = pptd->entry[i];
if (!ptde || !(ptde & PG_PRESENT)) {
continue;
}
void*
-dup_pagetable(pid_t pid)
+vmm_dup_vmspace(pid_t pid)
{
return __dup_pagetable(pid, PD_REFERENCED);
}
pgid = pgid ? pgid : proc->pid;
- llist_delete(&proc->grp_member);
struct proc_info* gruppenfuhrer = get_process(pgid);
- if (!gruppenfuhrer) {
+ if (!gruppenfuhrer || proc->pgid == proc->pid) {
__current->k_status = LXINVL;
return -1;
}
+ llist_delete(&proc->grp_member);
llist_append(&gruppenfuhrer->grp_member, &proc->grp_member);
proc->pgid = pgid;
}
void
-init_proc(struct proc_info* pcb)
+init_proc_user_space(struct proc_info* pcb)
{
- memset(pcb, 0, sizeof(*pcb));
+ vmm_mount_pd(PD_MOUNT_1, pcb->page_table);
+
+ /*--- 分配用户栈 ---*/
+
+ // 注册用户栈区域
+ region_add(
+ &pcb->mm.regions, USTACK_END, USTACK_TOP, REGION_RW | REGION_RSHARED);
+
+ // 预留地址空间,具体物理页将由Page Fault Handler按需分配。
+ for (uintptr_t i = PG_ALIGN(USTACK_END); i < USTACK_TOP; i += PG_SIZE) {
+ vmm_set_mapping(PD_MOUNT_1, i, 0, PG_ALLOW_USER | PG_WRITE, VMAP_NULL);
+ }
+
+ // todo: other uspace initialization stuff
- pcb->pid = alloc_pid();
- pcb->created = clock_systime();
- pcb->state = PROC_CREATED;
- pcb->pgid = pcb->pid;
+ vmm_unmount_pd(PD_MOUNT_1);
+}
+
+void
+__mark_region(uintptr_t start_vpn, uintptr_t end_vpn, int attr)
+{
+ for (size_t i = start_vpn; i <= end_vpn; i++) {
+ x86_pte_t* curproc = &PTE_MOUNTED(PD_REFERENCED, i);
+ x86_pte_t* newproc = &PTE_MOUNTED(PD_MOUNT_1, i);
+ cpu_invplg(newproc);
+
+ if ((attr & REGION_MODE_MASK) == REGION_RSHARED) {
+ // 如果读共享,则将两者的都标注为只读,那么任何写入都将会应用COW策略。
+ cpu_invplg(curproc);
+ cpu_invplg(i << 12);
+ *curproc = *curproc & ~PG_WRITE;
+ *newproc = *newproc & ~PG_WRITE;
+ } else {
+ // 如果是私有页,则将该页从新进程中移除。
+ *newproc = 0;
+ }
+ }
}
pid_t
dup_proc()
{
- struct proc_info pcb;
- init_proc(&pcb);
- pcb.mm = __current->mm;
- pcb.intr_ctx = __current->intr_ctx;
- pcb.parent = __current;
-
-#ifdef USE_KERNEL_PG
- setup_proc_mem(&pcb, PD_MOUNT_1); //挂载点#1是当前进程的页表
-#else
- setup_proc_mem(&pcb, PD_REFERENCED);
-#endif
+ struct proc_info* pcb = alloc_process();
+ pcb->mm.u_heap = __current->mm.u_heap;
+ pcb->intr_ctx = __current->intr_ctx;
+ pcb->parent = __current;
+
+ region_copy(&__current->mm.regions, &pcb->mm.regions);
+
+ setup_proc_mem(pcb, PD_REFERENCED);
// 根据 mm_region 进一步配置页表
- if (!__current->mm.regions) {
- goto not_copy;
- }
- llist_init_head(&pcb.mm.regions);
struct mm_region *pos, *n;
- llist_for_each(pos, n, &__current->mm.regions->head, head)
+ llist_for_each(pos, n, &pcb->mm.regions.head, head)
{
- region_add(&pcb, pos->start, pos->end, pos->attr);
-
// 如果写共享,则不作处理。
if ((pos->attr & REGION_WSHARED)) {
continue;
}
- uintptr_t start_vpn = PG_ALIGN(pos->start) >> 12;
- uintptr_t end_vpn = PG_ALIGN(pos->end) >> 12;
- for (size_t i = start_vpn; i < end_vpn; i++) {
- x86_pte_t* curproc = &PTE_MOUNTED(PD_MOUNT_1, i);
- x86_pte_t* newproc = &PTE_MOUNTED(PD_MOUNT_2, i);
- cpu_invplg(newproc);
-
- if (pos->attr == REGION_RSHARED) {
- // 如果读共享,则将两者的都标注为只读,那么任何写入都将会应用COW策略。
- cpu_invplg(curproc);
- *curproc = *curproc & ~PG_WRITE;
- *newproc = *newproc & ~PG_WRITE;
- } else {
- // 如果是私有页,则将该页从新进程中移除。
- *newproc = 0;
- }
- }
+ uintptr_t start_vpn = pos->start >> 12;
+ uintptr_t end_vpn = pos->end >> 12;
+ __mark_region(start_vpn, end_vpn, pos->attr);
}
-not_copy:
- vmm_unmount_pd(PD_MOUNT_2);
+ vmm_unmount_pd(PD_MOUNT_1);
// 正如同fork,返回两次。
- pcb.intr_ctx.registers.eax = 0;
+ pcb->intr_ctx.registers.eax = 0;
- push_process(&pcb);
+ commit_process(pcb);
- return pcb.pid;
+ return pcb->pid;
}
extern void __kernel_end;
pid_t pid = proc->pid;
void* pt_copy = __dup_pagetable(pid, usedMnt);
- vmm_mount_pd(PD_MOUNT_2, pt_copy); // 将新进程的页表挂载到挂载点#2
+ vmm_mount_pd(PD_MOUNT_1, pt_copy); // 将新进程的页表挂载到挂载点#2
// copy the kernel stack
for (size_t i = KSTACK_START >> 12; i <= KSTACK_TOP >> 12; i++) {
- volatile x86_pte_t* ppte = &PTE_MOUNTED(PD_MOUNT_2, i);
+ volatile x86_pte_t* ppte = &PTE_MOUNTED(PD_MOUNT_1, i);
/*
This is a fucking nightmare, the TLB caching keep the rewrite to PTE
x86_pte_t p = *ppte;
void* ppa = vmm_dup_page(pid, PG_ENTRY_ADDR(p));
+ pmm_free_page(pid, PG_ENTRY_ADDR(p));
*ppte = (p & 0xfff) | (uintptr_t)ppa;
}
// 我们不需要分配内核的区域,因为所有的内核代码和数据段只能通过系统调用来访问,任何非法的访问
// 都会导致eip落在区域外面,从而segmentation fault.
- // 定义用户栈区域,但是不分配实际的物理页。我们会在Page fault
- // handler里面实现动态分配物理页的逻辑。(虚拟内存的好处!)
- // FIXME: 这里应该放到spawn_proc里面。
- // region_add(proc, USTACK_END, USTACK_SIZE, REGION_PRIVATE | REGION_RW);
-
// 至于其他的区域我们暂时没有办法知道,因为那需要知道用户程序的信息。我们留到之后在处理。
-
proc->page_table = pt_copy;
}
\ No newline at end of file
#include <arch/x86/interrupts.h>
#include <arch/x86/tss.h>
+
#include <hal/apic.h>
#include <hal/cpu.h>
+
#include <lunaix/mm/kalloc.h>
+#include <lunaix/mm/pmm.h>
#include <lunaix/mm/vmm.h>
#include <lunaix/process.h>
#include <lunaix/sched.h>
-
+#include <lunaix/signal.h>
#include <lunaix/spike.h>
#include <lunaix/status.h>
#include <lunaix/syscall.h>
struct proc_info dummy;
-extern void __proc_table;
-
struct scheduler sched_ctx;
LOG_MODULE("SCHED")
sched_init()
{
size_t pg_size = ROUNDUP(sizeof(struct proc_info) * MAX_PROCESS, 0x1000);
- assert_msg(vmm_alloc_pages(
- KERNEL_PID, &__proc_table, pg_size, PG_PREM_RW, PP_FGPERSIST),
- "Fail to allocate proc table");
- sched_ctx = (struct scheduler){ ._procs = (struct proc_info*)&__proc_table,
+ for (size_t i = 0; i <= pg_size; i += 4096) {
+ uintptr_t pa = pmm_alloc_page(KERNEL_PID, PP_FGPERSIST);
+ vmm_set_mapping(
+ PD_REFERENCED, PROC_START + i, pa, PG_PREM_RW, VMAP_NULL);
+ }
+
+ sched_ctx = (struct scheduler){ ._procs = (struct proc_info*)PROC_START,
.ptable_len = 0,
.procs_index = 0 };
}
}
proc->state = PROC_RUNNING;
- // FIXME: 这里还是得再考虑一下。
- // tss_update_esp(__current->intr_ctx.esp);
-
- if (__current->page_table != proc->page_table) {
- __current = proc;
- cpu_lcr3(__current->page_table);
- // from now on, the we are in the kstack of another process
- } else {
- __current = proc;
- }
+ /*
+ 将tss.esp0设置为上次调度前的esp值。
+ 当处理信号时,上下文信息是不会恢复的,而是保存在用户栈中,然后直接跳转进位于用户空间的sig_wrapper进行
+ 信号的处理。当用户自定义的信号处理函数返回时,sigreturn的系统调用才开始进行上下文的恢复(或者说是进行
+ 另一次调度。
+ 由于这中间没有进行地址空间的交换,所以第二次跳转使用的是同一个内核栈,而之前默认tss.esp0的值是永远指向最顶部
+ 这样一来就有可能会覆盖更早的上下文信息(比如嵌套的信号捕获函数)
+ */
+ tss_update_esp(proc->intr_ctx.registers.esp);
apic_done_servicing();
- asm volatile("movl %0, %%eax\n"
- "jmp soft_iret\n" ::"r"(&__current->intr_ctx)
- : "eax", "memory");
+ asm volatile("pushl %0\n"
+ "jmp switch_to\n" ::"r"(proc)); // kernel/asm/x86/interrupt.S
}
void
return destroy_process(proc->pid);
}
-pid_t
-alloc_pid()
+struct proc_info*
+alloc_process()
{
pid_t i = 0;
for (;
if (i == MAX_PROCESS) {
panick("Panic in Ponyville shimmer!");
}
- return i;
-}
-
-void
-push_process(struct proc_info* process)
-{
- int index = process->pid;
- if (index < 0 || index > sched_ctx.ptable_len) {
- __current->k_status = LXINVLDPID;
- return;
- }
- if (index == sched_ctx.ptable_len) {
+ if (i == sched_ctx.ptable_len) {
sched_ctx.ptable_len++;
}
- sched_ctx._procs[index] = *process;
+ struct proc_info* proc = &sched_ctx._procs[i];
+ memset(proc, 0, sizeof(*proc));
- process = &sched_ctx._procs[index];
+ proc->state = PROC_CREATED;
+ proc->pid = i;
+ proc->created = clock_systime();
+ proc->pgid = proc->pid;
- // make sure the reference is relative to process table
- llist_init_head(&process->children);
- llist_init_head(&process->grp_member);
+ llist_init_head(&proc->mm.regions);
+ llist_init_head(&proc->children);
+ llist_init_head(&proc->grp_member);
+
+ return proc;
+}
+
+void
+commit_process(struct proc_info* process)
+{
+ assert(process == &sched_ctx._procs[process->pid]);
+
+ if (process->state != PROC_CREATED) {
+ __current->k_status = LXINVL;
+ return;
+ }
// every process is the child of first process (pid=1)
if (process->parent) {
proc->state = PROC_DESTROY;
llist_delete(&proc->siblings);
- if (proc->mm.regions) {
- struct mm_region *pos, *n;
- llist_for_each(pos, n, &proc->mm.regions->head, head)
- {
- lxfree(pos);
- }
+ struct mm_region *pos, *n;
+ llist_for_each(pos, n, &proc->mm.regions.head, head)
+ {
+ lxfree(pos);
}
- vmm_mount_pd(PD_MOUNT_2, proc->page_table);
+ vmm_mount_pd(PD_MOUNT_1, proc->page_table);
- __del_pagetable(pid, PD_MOUNT_2);
+ __del_pagetable(pid, PD_MOUNT_1);
- vmm_unmount_pd(PD_MOUNT_2);
+ vmm_unmount_pd(PD_MOUNT_1);
return pid;
}
__current->state = PROC_TERMNAT;
__current->exit_code = exit_code;
+ __SET_SIGNAL(__current->parent->sig_pending, _SIGCHLD);
+
schedule();
}
--- /dev/null
+#include <lunaix/process.h>
+#include <lunaix/sched.h>
+#include <lunaix/signal.h>
+#include <lunaix/status.h>
+#include <lunaix/syscall.h>
+
+extern struct scheduler sched_ctx; /* kernel/sched.c */
+
+void* default_handlers[_SIG_NUM] = {
+ // TODO: 添加默认handler
+};
+
+// Referenced in kernel/asm/x86/interrupt.S
+void*
+signal_dispatch()
+{
+ // if (!(SEL_RPL(__current->intr_ctx.cs))) {
+ // // 同特权级间调度不进行信号处理
+ // return 0;
+ // }
+
+ if (!__current->sig_pending) {
+ // 没有待处理信号
+ return 0;
+ }
+
+ int sig_selected =
+ 31 - __builtin_clz(__current->sig_pending & ~__current->sig_mask);
+
+ __current->sig_pending = __current->sig_pending & ~__SIGNAL(sig_selected);
+
+ if (!__current->sig_handler[sig_selected] &&
+ !default_handlers[sig_selected]) {
+ // 如果该信号没有handler,则忽略
+ return 0;
+ }
+
+ uintptr_t ustack = __current->ustack_top & ~0xf;
+
+ if ((int)(ustack - USTACK_END) < (int)sizeof(struct proc_sig)) {
+ // 用户栈没有空间存放信号上下文
+ return 0;
+ }
+
+ struct proc_sig* sig_ctx =
+ (struct proc_sig*)(ustack - sizeof(struct proc_sig));
+
+ sig_ctx->prev_context = __current->intr_ctx;
+ sig_ctx->sig_num = sig_selected;
+ sig_ctx->signal_handler = __current->sig_handler[sig_selected];
+
+ if (!sig_ctx->signal_handler) {
+ // 如果没有用户自定义的Handler,则使用系统默认Handler。
+ sig_ctx->signal_handler = default_handlers[sig_selected];
+ }
+
+ __current->sig_mask |= __SIGNAL(sig_selected);
+
+ return sig_ctx;
+}
+
+__DEFINE_LXSYSCALL1(int, sigreturn, struct proc_sig, *sig_ctx)
+{
+ __current->intr_ctx = sig_ctx->prev_context;
+ __current->sig_mask &= ~__SIGNAL(sig_ctx->sig_num);
+ __current->flags &= ~PROC_FINPAUSE;
+ schedule();
+}
+
+__DEFINE_LXSYSCALL3(int,
+ sigprocmask,
+ int,
+ how,
+ const sigset_t,
+ *set,
+ sigset_t,
+ *oldset)
+{
+ *oldset = __current->sig_mask;
+ if (how == _SIG_BLOCK) {
+ __current->sig_mask |= *set;
+ } else if (how == _SIG_UNBLOCK) {
+ __current->sig_mask &= ~(*set);
+ } else if (how == _SIG_SETMASK) {
+ __current->sig_mask = *set;
+ } else {
+ return 0;
+ }
+ __current->sig_mask &= ~_SIGNAL_UNMASKABLE;
+ return 1;
+}
+
+__DEFINE_LXSYSCALL2(int, signal, int, signum, sighandler_t, handler)
+{
+ if (signum < 0 || signum >= _SIG_NUM) {
+ return -1;
+ }
+
+ if (((1 << signum) & _SIGNAL_UNMASKABLE)) {
+ return -1;
+ }
+
+ __current->sig_handler[signum] = (void*)handler;
+
+ return 0;
+}
+
+__DEFINE_LXSYSCALL(int, pause)
+{
+ __current->flags |= PROC_FINPAUSE;
+
+ __SYSCALL_INTERRUPTIBLE({
+ while ((__current->flags & PROC_FINPAUSE)) {
+ sched_yield();
+ }
+ })
+ __current->k_status = EINTR;
+ return -1;
+}
\ No newline at end of file
-#include <lunaix/spike.h>
#include <arch/x86/interrupts.h>
#include <klibc/stdio.h>
+#include <lunaix/spike.h>
static char buffer[1024];
-void __assert_fail(const char* expr, const char* file, unsigned int line) {
+void
+__assert_fail(const char* expr, const char* file, unsigned int line)
+{
sprintf(buffer, "%s (%s:%u)", expr, file, line);
// Here we load the buffer's address into %edi ("D" constraint)
// This is a convention we made that the LUNAIX_SYS_PANIC syscall will
- // print the panic message passed via %edi. (see kernel/asm/x86/interrupts.c)
- asm(
- "int %0"
- ::"i"(LUNAIX_SYS_PANIC), "D"(buffer)
- );
+ // print the panic message passed via %edi. (see
+ // kernel/asm/x86/interrupts.c)
+ asm("int %0" ::"i"(LUNAIX_SYS_PANIC), "D"(buffer));
- spin(); // never reach
+ spin(); // never reach
}
-void panick(const char* msg) {
- asm(
- "int %0"
- ::"i"(LUNAIX_SYS_PANIC), "D"(msg)
- );
+void
+panick(const char* msg)
+{
+ asm("int %0" ::"i"(LUNAIX_SYS_PANIC), "D"(msg));
spin();
-}
\ No newline at end of file
+}
-#include <lunaix/syscall.h>
#include <arch/x86/interrupts.h>
#include <lunaix/process.h>
#include <lunaix/sched.h>
+#include <lunaix/syscall.h>
#include <lunaix/syslog.h>
LOG_MODULE("SYSCALL")
-extern void syscall_hndlr(isr_param* param);
+extern void
+syscall_hndlr(isr_param* param);
-void syscall_install() {
+void
+syscall_install()
+{
intr_subscribe(LUNAIX_SYS_CALL, syscall_hndlr);
}
\ No newline at end of file
+#include <hal/io.h>
#include <klibc/string.h>
-#include <lunaix/tty/tty.h>
#include <lunaix/common.h>
+#include <lunaix/tty/tty.h>
#include <stdint.h>
-#include <hal/io.h>
#define TTY_WIDTH 80
#define TTY_HEIGHT 25
static uint32_t tty_x = 0;
static uint32_t tty_y = 0;
-void tty_init(void* vga_buf) {
+void
+tty_init(void* vga_buf)
+{
tty_vga_buffer = (vga_attribute*)vga_buf;
tty_clear();
io_outb(0x3D4, 0x0A);
- io_outb(0x3D5, (io_inb(0x3D5) & 0xC0) | 13);
-
- io_outb(0x3D4, 0x0B);
- io_outb(0x3D5, (io_inb(0x3D5) & 0xE0) | 15);
+ io_outb(0x3D5, (io_inb(0x3D5) & 0xC0) | 13);
+
+ io_outb(0x3D4, 0x0B);
+ io_outb(0x3D5, (io_inb(0x3D5) & 0xE0) | 15);
}
-void tty_set_buffer(void* vga_buf) {
+void
+tty_set_buffer(void* vga_buf)
+{
tty_vga_buffer = (vga_attribute*)vga_buf;
}
break;
case '\x08':
tty_x = tty_x ? tty_x - 1 : 0;
- *(tty_vga_buffer + tty_x + tty_y * TTY_WIDTH) = (tty_theme_color | 0x20);
+ *(tty_vga_buffer + tty_x + tty_y * TTY_WIDTH) =
+ (tty_theme_color | 0x20);
break;
default:
- *(tty_vga_buffer + tty_x + tty_y * TTY_WIDTH) = (tty_theme_color | chr);
+ *(tty_vga_buffer + tty_x + tty_y * TTY_WIDTH) =
+ (tty_theme_color | chr);
tty_x++;
break;
}
}
}
-void tty_sync_cursor() {
+void
+tty_sync_cursor()
+{
tty_set_cursor(tty_x, tty_y);
}
-
-void tty_set_cursor(uint8_t x, uint8_t y) {
+void
+tty_set_cursor(uint8_t x, uint8_t y)
+{
if (x >= TTY_WIDTH || y >= TTY_HEIGHT) {
x = y = 0;
}
tty_put_char(*str);
str++;
}
- tty_sync_cursor();
+ // FIXME: This does not work in user mode.
+ // Work around:
+ // 1. (Easy) Define an IO Permission bitmap in TSS
+ // 2. (More effort) Mount onto file system. (/dev/tty)
+ // tty_sync_cursor();
}
void
}
void
-tty_clear_line(unsigned int y) {
- for (size_t i = 0; i < TTY_WIDTH; i++)
- {
+tty_clear_line(unsigned int y)
+{
+ for (size_t i = 0; i < TTY_WIDTH; i++) {
*(tty_vga_buffer + i + y * TTY_WIDTH) = tty_theme_color;
}
}
void
-tty_set_cpos(unsigned int x, unsigned int y) {
+tty_set_cpos(unsigned int x, unsigned int y)
+{
tty_x = x % TTY_WIDTH;
tty_y = y % TTY_HEIGHT;
}
void
-tty_get_cpos(unsigned int* x, unsigned int* y) {
+tty_get_cpos(unsigned int* x, unsigned int* y)
+{
*x = tty_x;
*y = tty_y;
}
vga_attribute
-tty_get_theme() {
+tty_get_theme()
+{
return tty_theme_color;
}
\ No newline at end of file
-#include <stdint.h>
#include <klibc/string.h>
+#include <stdint.h>
void*
memcpy(void* dest, const void* src, size_t num)
{
- uint8_t* dest_ptr = (uint8_t*)dest;
- const uint8_t* src_ptr = (const uint8_t*)src;
- for (size_t i = 0; i < num; i++) {
- *(dest_ptr + i) = *(src_ptr + i);
- }
+ asm volatile("movl %1, %%edi\n"
+ "rep movsb\n" ::"S"(src),
+ "r"(dest),
+ "c"(num)
+ : "edi", "memory");
return dest;
}
void*
memset(void* ptr, int value, size_t num)
{
- uint8_t* c_ptr = (uint8_t*)ptr;
- for (size_t i = 0; i < num; i++) {
- *(c_ptr + i) = (uint8_t)value;
- }
+ asm volatile("movl %1, %%edi\n"
+ "rep stosb\n" ::"c"(num),
+ "r"(ptr),
+ "a"(value)
+ : "edi", "memory");
return ptr;
}
build/obj/hal/*.o (.text)
}
+ __usrtext_start = ALIGN(4K);
+ .usrtext BLOCK(4K) : AT ( ADDR(.usrtext) - 0xC0000000 ) {
+ build/obj/kernel/*.o (.usrtext)
+ }
+ __usrtext_end = ALIGN(4K);
+
.bss BLOCK(4K) : AT ( ADDR(.bss) - 0xC0000000 ) {
build/obj/kernel/*.o (.bss)
build/obj/hal/*.o (.bss)
}
__kernel_end = ALIGN(4K);
- __proc_table = ALIGN(4K);
- . += 4M;
- __kernel_heap_start = ALIGN(4K); /* 内核结束的地方即堆开始的地方 */
}
\ No newline at end of file