-use("soc/gic")
-
-sources([
- "boot/init.c",
- "boot/kremap.c",
- "boot/start.S"
-])
+use("boot")
+use("exception")
+use("klibc")
-sources([
- "exception/entries.S",
- "exception/context.S",
- "exception/handler.c"
-])
+use("soc/gic")
sources([
"fault.c",
- "trace.c"
+ "trace.c",
+ "vmutils.c"
])
headers([
--- /dev/null
+sources([
+ "init.c",
+ "kremap.c",
+ "start.S"
+])
\ No newline at end of file
#include <lunaix/boot_generic.h>
#include <asm/aa64.h>
#include <asm/aa64_spsr.h>
+#include <hal/devtree.h>
#include "init.h"
set_sysreg(TCR_EL1, tcr);
}
+#define MMAP_ENTS_CHUNK_SIZE 16
+
static inline void
extract_dtb_bootinfo(ptr_t dtb, struct boot_handoff* handoff)
{
+ struct fdt_blob fdt;
+ struct fdt_memscan mscan;
+ struct dt_memory_node mnode;
+
+ int mmap_len = 0, mmap_max_len = 16;
+ size_t pmem_size = 0;
+ struct boot_mmapent* mmap;
+
+ mmap = bootmem_alloc(sizeof(*mmap) * MMAP_ENTS_CHUNK_SIZE);
handoff->kexec.dtb_pa = dtb;
// TODO extract /memory, /reserved-memories from dtb
+
+ fdt_load(&fdt, dtb);
+ fdt_memscan_begin(&mscan, &fdt);
+
+ struct boot_mmapent* mmap_ent;
+
+ while(fdt_memscan_nextnode(&mscan, &fdt))
+ {
+ while (fdt_memscan_nextrange(&mscan, &mnode))
+ {
+ mmap_ent = &mmap[mmap_len++];
+ *mmap_ent = (struct boot_mmapent) {
+ .size = mnode.size,
+ .start = mnode.base
+ };
+
+ if (mnode.type == FDT_MEM_FREE) {
+ mmap_ent->type = BOOT_MMAP_FREE;
+ pmem_size += mnode.size;
+ }
+ else {
+ mmap_ent->type = BOOT_MMAP_RSVD;
+ }
+
+
+ if (mmap_len < mmap_max_len) {
+ continue;
+ }
+
+ mmap_max_len += MMAP_ENTS_CHUNK_SIZE;
+
+ // another allocation is just expanding the previous allocation.
+ bootmem_alloc(sizeof(*mmap) * MMAP_ENTS_CHUNK_SIZE);
+ }
+ }
+
+ handoff->mem.mmap = mmap;
+ handoff->mem.mmap_len = mmap_len;
+ handoff->mem.size = pmem_size;
}
static inline void
sysreg_flagging(ICC_SRE_EL1, ICC_SRE_SRE, 0);
}
-void
+void boot_text
aarch64_pre_el1_init()
{
setup_gic_sysreg();
}
-bool
+bool boot_text
aarch64_prepare_el1_transfer()
{
ptr_t spsr;
el = read_sysreg(CurrentEL) >> 2;
if (el == 1) {
+ // no transfer required
return false;
}
- spsr = SPSR_AllInt | SPSR_I | SPSR_F | SPSR_SP;
- spsr = BITS_SET(spsr, SPSR_EL, 1);
+ spsr = SPSR_EL1_preset;
+ spsr |= SPSR_AllInt | SPSR_I | SPSR_F | SPSR_A;
if (el == 2) {
set_sysreg(SPSR_EL2, spsr);
return true;
}
-struct boot_handoff*
+struct boot_handoff* boot_text
aarch64_init(ptr_t dtb)
{
setup_evbar();
static pte_t kpt[LEVEL_SIZE][32];
-ptr_t
+ptr_t boot_text
kremap()
{
struct pt_alloc alloc;
1:
mov x0, x19
adr x4, aarch64_init
- bl x4
+ blr x4
// x0: ptr to boot_handoff
adr x4, kernel_bootstrap
- bl x4
\ No newline at end of file
+ blr x4
\ No newline at end of file
--- /dev/null
+sources([
+ "entries.S",
+ "context.S",
+ "syscall_nr.S",
+ "handler.c",
+ "syscall.c",
+ "vmswitch.c"
+])
\ No newline at end of file
#define __ASM__
#include <asm/aa64_msrs.h>
+#include <asm/bits.h>
#include "hart_fields.inc"
+.section .data
+ .align 4
+ .skip 256
+ _aa64_csw_temp_stack:
+
.section .text
.globl _aa64_evec_prehandle
_aa64_switch_task:
// TODO
- b do_eret
\ No newline at end of file
+ adr sp, _aa64_csw_temp_stack
+
+ bl aa64_switch_ttbr
+
+ bl switch_signposting
+ cbnz x0, _aa64_handle_signal
+
+ b do_eret
+
+ _aa64_handle_signal:
+ mov x1, #SPSR_EL0_preset
+ msr SPSR_EL1, x1
+
+ ldr x1, [x0, #sigact]
+ msr ELR_E1, x1
+
+ msr SP_EL0, x0
+
+ eret
\ No newline at end of file
#include <asm/hart.h>
#include <asm/aa64_exception.h>
+extern void
+handle_mm_abort(struct hart_state* state);
+
+extern void
+aa64_syscall(struct hart_state* hstate);
static inline void
update_thread_context(struct hart_state* state)
}
}
-extern void
-handle_mm_abort(struct hart_state* state);
-
-static void
+static inline void
handle_sync_exception(struct hart_state* hstate)
{
unsigned int ec;
handle_mm_abort(hstate);
break;
+ case EC_SVC:
+ aa64_syscall(hstate);
+ break;
+
default:
fail("unhandled exception (synced)");
break;
}
}
-static void
+static inline void
handle_async_exception(struct hart_state* hstate)
{
+ int err = 0;
+
+ err = gic_handle_irq(hstate);
+ if (!err) {
+ return;
+ }
+ // TODO do we have other cases of async exception?
}
struct hart_state*
hart_execp:
.struct hart_execp + execp_end
hart_end:
+
+ .struct 0
+sig_num:
+ .struct sig_pend + 4
+sigact:
+ .struct sigact + 8
+sighand:
+ .struct sighand + 8
+saved_hstate:
+ .struct saved_hstate + 8
+sig_end:
\ No newline at end of file
--- /dev/null
+#include <lunaix/syscall.h>
+#include <lunaix/status.h>
+
+#include <asm/hart.h>
+#include "asm/aa64_exception.h"
+
+extern ptr_t syscall_table[__SYSCALL_MAX];
+
+void
+aa64_syscall(struct hart_state* hstate)
+{
+ unsigned int call_id;
+
+ call_id = esr_iss(hstate->execp.syndrome);
+ call_id = call_id & 0xffff;
+
+ if (call_id >= __SYSCALL_MAX) {
+ return EINVAL;
+ }
+
+ if (!syscall_table[call_id]) {
+ return EINVAL;
+ }
+
+ register reg_t param0 asm("x0") = hstate->registers.x[0];
+ register reg_t param1 asm("x1") = hstate->registers.x[1];
+ register reg_t param2 asm("x2") = hstate->registers.x[2];
+ register reg_t param3 asm("x3") = hstate->registers.x[3];
+ register reg_t param4 asm("x4") = hstate->registers.x[4];
+
+ asm volatile (
+ "blr %[call_fn]"
+ :
+ "=r"(param0)
+ :
+ [call_fn] "r"(syscall_table[call_id]),
+ "r"(param0),
+ "r"(param1),
+ "r"(param2),
+ "r"(param3),
+ "r"(param4)
+ );
+
+ hstate->registers.x[0] = param0;
+}
\ No newline at end of file
--- /dev/null
+
+.globl syscall_table
+
+#include <asm-generic/syscall_nr.inc>
\ No newline at end of file
--- /dev/null
+#include <lunaix/process.h>
+#include <asm/aa64_mmu.h>
+#include <asm/tlb.h>
+
+void
+aa64_switch_ttbr()
+{
+ ptr_t ttbr;
+ struct proc_mm* vms;
+
+ vms = proc_vmroot();
+
+ ttbr = read_sysreg(TTBR0_EL1);
+
+ /*
+ We don't differentiate ASID for now
+ and CnP=1
+ */
+
+ if (!BITS_GET(ttbr, TTBR_BADDR) == vms->vmroot) {
+ return;
+ }
+
+ BITS_SET(ttbr, TTBR_BADDR, vms->vmroot);
+
+ set_sysreg(TTBR0_EL1, ttbr);
+
+ /*
+ TODO a more fine grain control of flushing
+ Unlike x86, hardware will not flush TLB upon switching
+ the translation base.
+
+ as kernel address space are shared, flushing should be avoided,
+ thus the apporachs:
+ 1. enable the use of ASID and flush accordingly
+ 2. enable the use of ASID and TTBR1 to house kernel,
+ use TCR_EL1.A1 to switch between ASIDs in TTBR0 and TTBR1.
+ 3. range flushing (RVAAE1) on all memory regions used by user space.
+
+ */
+ __tlb_flush_all();
+}
\ No newline at end of file
#include <lunaix/mm/fault.h>
#include <asm/aa64_exception.h>
+#include <asm/aa64_msrs.h>
#include <asm/hart.h>
+bool
+__arch_prepare_fault_context(struct fault_context* fault)
+{
+ struct hart_state* hs = fault->hstate;
+
+ ptr_t ptr = read_sysreg(FAR_EL1);
+ if (!ptr) {
+ return false;
+ }
+
+ fault->fault_ptep = mkptep_va(VMS_SELF, ptr);
+ fault->fault_data = esr_ec(hs->execp.syndrome);
+ fault->fault_instn = hart_pc(hs);
+ fault->fault_va = ptr;
+
+ return true;
+}
+
void
handle_mm_abort(struct hart_state* state)
{
- // TODO
+ struct fault_context fault;
+
+ __arch_prepare_fault_context(&fault);
+
+ if (!handle_page_fault(&fault)) {
+ fault_resolving_failed(&fault);
+ }
+
+ return;
}
\ No newline at end of file
#define EC_BTI 0b001101
#define EC_EXEC_STATE 0b001110
#define EC_SYS_INST 0b011000
+#define EC_SVC 0b010101
#define EC_I_ABORT 0b100000
#define EC_I_ABORT_EL 0b100001
#include <lunaix/bits.h>
#include "aa64_msrs.h"
+#include "hart.h"
#define FRAME_SIZE 0x10000
#define GITS_CWRRD_OFF BITFIELD(19, 5)
+int
+gic_handle_irq(struct hart_state* hs);
+
#endif /* __LUNAIX_AA64_GIC_H */
#define __LUNAIX_AA64_MMU_H
#include "aa64_asm.h"
+#include <lunaix/bits.h>
#if defined(CONFIG_AA64_PAGE_GRAN_4K)
#define _MMU_TG 0b01
#define TCR_EPD0 (1UL << 7)
#define TCR_A1 (1UL << 22)
+#define TTBR_ASID BITFIELD(63, 48)
+#define TTBR_BADDR BITFIELD(47, 1)
+#define TTBR_CnP BITFLAG(0)
+
#endif /* __LUNAIX_AA64_MMU_H */
#define SP_EL0 __sr_encode(3, 0, 4, 1, 0)
#define SP_EL1 __sr_encode(3, 4, 4, 1, 0)
+#define FAR_EL1 __sr_encode(3, 0, 6, 0, 0)
+
#define ID_AA64PFR0_EL1 __sr_encode(3, 0, 0, 4, 0)
#ifndef __ASM__
#define SPSR_EL BITFIELD(3, 2)
#define SPSR_SP BITFLAG(0)
-#define SPSR_I BITFLAG(7)
#define SPSR_F BITFLAG(6)
#define SPSR_I BITFLAG(7)
+#define SPSR_A BITFLAG(8)
#define SPSR_AllInt BITFLAG(13)
+#define SPSR_PAN BITFLAG(22)
+#define SPSR_UAO BITFLAG(23)
+
+#define SPSR_EL0_preset (BITS_AT(0, SPSR_EL) | SPSR_SP)
+#define SPSR_EL1_preset (BITS_AT(1, SPSR_EL) | SPSR_SP | SPSR_UAO)
static inline bool
spsr_from_el0(reg_t spsr)
_x; \
})
+#define data_barrier asm ("dsb sy")
+
#endif /* __LUNAIX_AA64_SYSINST_H */
#ifndef __ASM__
+#include <asm/aa64_msrs.h>
+#include <asm/aa64_spsr.h>
+
#define align_stack(ptr) ((ptr) & ~15)
-#define store_retval(retval) current_thread->hstate->registers.x[0] = (retval)
-#define store_retval_to(th, retval) (th)->hstate->registers.x[0] = (retval)
+#define store_retval(retval) \
+ current_thread->hstate->registers.x[0] = (retval)
+
+#define store_retval_to(th, retval) \
+ (th)->hstate->registers.x[0] = (retval)
static inline void must_inline noret
-switch_context() {
- // TODO
+switch_context()
+{
asm ("b _aa64_switch_task");
unreachable;
}
static inline void must_inline
j_usr(ptr_t sp, ptr_t pc)
{
- // TODO
+ set_sysreg(SPSR_EL1, SPSR_EL0_preset);
+ set_sysreg(SP_EL0, sp);
+ set_sysreg(ELR_E1, pc);
+ asm ("eret");
+
+ unreachable;
}
#endif
#include <asm-generic/bits.h>
+#ifndef __ASM__
#undef _BITS_EXTRACT
#undef _BITS_INSERT
_r; \
})
-
+#endif
#endif /* __LUNAIX_ARCH_BITS_H */
} compact align(16);
static inline int
-hart_vector_stamp(struct hart_state* hstate) {
+hart_vector_stamp(struct hart_state* hstate)
+{
return BITS_GET(hstate->execp.syndrome, SYNDROME_ETYPE);
}
static inline unsigned int
-hart_ecause(struct hart_state* hstate) {
+hart_ecause(struct hart_state* hstate)
+{
return hstate->execp.syndrome;
}
static inline bool
kernel_context(struct hart_state* hstate)
{
- // TODO
- return false;
+ reg_t spsr;
+
+ spsr = hstate->execp.spsr;
+ return !spsr_from_el0(spsr);
}
static inline ptr_t
#include <lunaix/compiler.h>
#include "aa64_mmu.h"
+#include <lunaix/bits.h>
/* ******** Page Table Manipulation ******** */
// upper attributes
-#define _PTE_UXN (1UL << 54)
-#define _PTE_PXN (1UL << 53)
+#define _PTE_UXN BITFLAG(54)
+#define _PTE_PXN BITFLAG(53)
#define _PTE_XN (_PTE_UXN | _PTE_PXN)
-#define _PTE_Contig (1UL << 52)
-#define _PTE_DBM (1UL << 51)
+#define _PTE_Contig BITFLAG(52)
+#define _PTE_DBM BITFLAG(51)
#ifdef _MMU_USE_OA52
#if CONFIG_AA64_PAGE_GRAN_64K
// lower attributes
-#define _PTE_nG (1UL << 11)
-#define _PTE_AF (1UL << 10)
+#define _PTE_nG BITFLAG(11)
+#define _PTE_AF BITFLAG(10)
// AP bits: R_RNGJG
#define _PTE_AP(p, u) ((((p) & 1) << 1 | ((u) & 1)) << 6)
-#define _PTE_PRW _PTE_AP(0 , 0) // priv rw, unpriv none
-#define _PTE_PRWURW _PTE_AP(0 , 1) // priv rw, unpriv rw
-#define _PTE_U _PTE_AP(0 , 1) // generic unpriv flag
-#define _PTE_PRO _PTE_AP(1 , 0) // priv ro, unpriv none
-#define _PTE_PROURO _PTE_AP(1 , 1) // priv ro, unpriv ro
+
+// el1 rw, el0 deny all
+#define _PTE_PRW _PTE_AP(0 , 0)
+// el1 rw, el0 rw
+#define _PTE_PRWURW _PTE_AP(0 , 1)
+// el0 allow
+#define _PTE_U _PTE_AP(0 , 1)
+#define _PTE_nDIRTY _PTE_AP(1 , 0)
+// el1 ro, el0 deny all
+#define _PTE_PRO _PTE_AP(1 , 0) | _PTE_DBM
+// el1 ro, el0 ro
+#define _PTE_PROURO _PTE_AP(1 , 1) | _PTE_DBM
#define _PTE_BLKDESC (0b01)
#define _PTE_TABDESC (0b11)
static inline pte_t
pte_mkclean(pte_t pte)
{
- return __mkpte_from(pte.val & ~_PTE_DBM);
+ return __mkpte_from(pte.val | _PTE_nDIRTY);
}
static inline bool
pte_dirty(pte_t pte)
{
- return !!(pte.val & _PTE_DBM);
+ return (pte.val & _PTE_DBM) && (pte.val & ~_PTE_nDIRTY);
}
static inline void
__tlb_invalidate(ptr_t va)
{
sys_a1(tlbi_vaae1, pack_va(0, 0, va));
+ data_barrier;
}
/**
__tlb_flush_asid(unsigned int asid, ptr_t va)
{
sys_a1(tlbi_vae1, pack_va(asid, 0, va));
+ data_barrier;
}
/**
__tlb_flush_all()
{
sys_a0(tlbi_alle1);
+ data_barrier;
}
/**
__tlb_flush_asid_all(unsigned int asid)
{
sys_a1(tlbi_aside1, pack_va(asid, 0, 0));
+ data_barrier;
}
}
#else
sys_a1(tlbi_rvaae1, pack_rva(0, 0, addr, npages, 0));
+ data_barrier;
#endif
}
}
#else
sys_a1(tlbi_rvae1, pack_rva(asid, 0, addr, npages, 0));
+ data_barrier;
#endif
}
--- /dev/null
+sources([
+ "crc.c",
+ "string.c"
+])
\ No newline at end of file
--- /dev/null
+#include <lunaix/types.h>
+#include <klibc/crc.h>
+
+unsigned int
+crc32b(unsigned char* data, unsigned int size)
+{
+ unsigned int register ret asm("x0");
+
+ asm volatile(
+ "1: \n"
+ "ldrb x2, [%[dest], %[l]] \n"
+ "crc32b %0, %0, x2 \n"
+ "sub %[l], %[l], %1 \n"
+ "cbnz %[l], 1b \n"
+ :
+ "=r"(ret)
+ :
+ "I"(1),
+ [dest] "r"(data),
+ [l] "r"(size)
+ : "x2"
+ );
+
+ return ret;
+}
--- /dev/null
+#include <klibc/string.h>
+#include <lunaix/compiler.h>
+
+void*
+memcpy(void* dest, const void* src, unsigned long num)
+{
+ if (unlikely(!num))
+ return dest;
+
+ asm volatile(
+ "1: \n"
+ "ldrb x0, [%[src], %[l]] \n"
+ "strb x0, [%[dest], %[l]] \n"
+ "sub %[l], %[l], %0 \n"
+ "cbnz %[l], 1b \n"
+ ::
+ "I"(1),
+ [src] "r"(dest),
+ [dest] "r"(src),
+ [l] "r"(num)
+ : "x0" "x1"
+ );
+
+ return dest;
+}
+
+void*
+memset(void* ptr, int value, unsigned long num)
+{
+ asm volatile(
+ "1: \n"
+ "strb %[val], [%[dest], %[l]] \n"
+ "sub %[l], %[l], %0 \n"
+ "cbnz %[l], 1b \n"
+ ::
+ "I"(1),
+ [val] "r"(value),
+ [dest] "r"(ptr),
+ [l] "r"(num)
+ : "x1"
+ );
+
+ return ptr;
+}
+
+unsigned long
+strlen(const char* str)
+{
+ unsigned long register _c asm("x0");
+
+ _c = 0;
+ asm volatile(
+ "1: \n"
+ "ldrb x1, [%[ptr], %0] \n"
+ "add %0, %0, %1 \n"
+ "cbnz %0, 1b \n"
+ :
+ "=r"(_c)
+ :
+ "I"(1),
+ [ptr] "r"(str)
+ : "x1"
+ );
+
+ return _c;
+}
+
+unsigned long
+strnlen(const char* str, unsigned long max_len)
+{
+ unsigned long register _c asm("x0");
+
+ _c = 0;
+ asm volatile(
+ "1: \n"
+ "ldrb x1, [%[ptr], %0] \n"
+ "add %0, %0, %1 \n"
+ "sub x2, %[len], %0 \n"
+ "ands x2, x2, x1 \n"
+ "bne 1b \n"
+ :
+ "=r"(_c)
+ :
+ "I"(1),
+ [ptr] "r"(str),
+ [len] "r"(max_len)
+ : "x1", "x2"
+ );
+
+ return _c;
+}
\ No newline at end of file
gic = valloc(sizeof(*gic));
domain = irq_create_domain(gicdev, &gic_domain_ops);
+ irq_set_default_domain(domain);
irq_set_domain_object(gic, domain);
gic->domain = domain;
return gic;
+}
+
+int
+gic_handle_irq(struct hart_state* hs)
+{
+ int err = 0;
+ struct gic* gic;
+ struct gic_pe* pe;
+ struct irq_domain* domain;
+ irq_t irq;
+
+ domain = irq_get_default_domain();
+ gic = irq_domain_obj(domain, struct gic);
+
+ pe = gic->cores[current_cpu];
+ if (!pe->ops->ack_int(pe)) {
+ return ENOENT;
+ }
+
+ err = gic_get_interrupt(gic, pe->active_id, &pe->active_int);
+ if (err) {
+ return err;
+ }
+
+ irq = pe->active_int.irq;
+ assert(irq);
+
+ irq->serve(irq, hs);
+
+ pe->has_active_int = false;
+ return pe->ops->notify_eoi(pe);
}
\ No newline at end of file
if (type == 0b001) {
nr_ents = BITS_GET(gits->typer, GITS_TYPER_Devbits);
+ nr_ents = 1 << (nr_ents + 1);
}
else if (type) {
nr_ents = gic->nr_cpus;
val = dt_getprop(dtn, "redistributor-stride");
red_stride = val ? val->ref->u32_val : 0;
+ /*
+ We assume only a max 16 cores in all scenarios,
+ no doubt a bad assumption, but the kernel is uniprocessor,
+ just show some respects to the gic.
+ */
struct gic_pe *pe, *pes[16];
for (int i = 0; i < nr_red_regions; i++)
{
--- /dev/null
+#include <lunaix/mm/pagetable.h>
+#include <lunaix/mm/page.h>
+
+struct leaflet*
+dup_leaflet(struct leaflet* leaflet)
+{
+ ptr_t dest_va, src_va;
+ struct leaflet* new_leaflet;
+
+ new_leaflet = alloc_leaflet(leaflet_order(leaflet));
+
+ src_va = leaflet_mount(leaflet);
+ dest_va = vmap(new_leaflet, KERNEL_DATA);
+
+ memcpy((void*)dest_va, (void*)src_va, PAGE_SIZE);
+
+ leaflet_unmount(leaflet);
+ vunmap(dest_va, new_leaflet);
+
+ return new_leaflet;
+}
+
+ptr_t
+pmm_arch_init_remap(struct pmem* memory, struct boot_handoff* bctx)
+{
+ unsigned long plist_len;
+
+ plist_len = leaf_count(bctx->mem.size) * sizeof(struct ppage);
+
+ for (int i = 0; i < bctx->mem.mmap_len; i++) {
+
+ }
+}
#ifndef __LUNAIX_BITS_H
#define __LUNAIX_BITS_H
-#include <lunaix/compiler.h>
#include <asm/bits.h>
+#define BITS_AT(val, bitfield) _BITS_STATIC(val, bitfield)
+
+#ifndef __ASM__
+#include <lunaix/compiler.h>
+
#define BITFIELD(h, l) (h), (l)
#define BIT(p) BITFIELD(p, p)
#define BITS_SET(to, bitfield, val) _BITS_INSERT(to, val, bitfield)
+#endif
+
#endif /* __LUNAIX_BITS_H */
void
thread_release_mem(struct thread* thread);
+ptr_t
+proc_vmroot();
+
/*
========= Signal =========
*/