config("arch"): {
"i386": "x86",
"x86_64": "x86",
- "aarch64": "arm",
+ "aarch64": "aarch64",
"rv64": "riscv"
}
})
\ No newline at end of file
include("x86/LConfig")
+include("aarch64/LConfig")
@Collection("Platform")
def architecture_support():
Config ISA support
"""
# type(["i386", "x86_64", "aarch64", "rv64"])
- type(["i386", "x86_64"])
+ type(["i386", "x86_64", "aarch64"])
default("x86_64")
env_val = env("ARCH")
--- /dev/null
+
+sources([
+ "boot/init.c",
+ "boot/kremap.c",
+ "boot/start.S"
+])
+
+sources([
+ "exception/entries.S",
+ "exception/context.S",
+ "exception/handler.c"
+])
+
+sources([
+ "soc/gic.c",
+])
+
+sources([
+ "fault.c",
+ "trace.c"
+])
+
+headers([
+ "includes"
+])
+
+compile_opts([
+ "-mlittle-endian",
+ "-mgeneral-regs-only",
+ "-mabi=lp64",
+ "-mno-omit-leaf-frame-pointer"
+])
\ No newline at end of file
--- /dev/null
+
+@Group()
+def aarch64():
+ add_to_collection(architecture_support)
+
+ @Collection("MMU")
+ def mmu_feature():
+
+ @Term("Page Granularity")
+ def aa64_page_gran():
+ type(["4K", "16K", "64K"])
+
+ default("4K")
+
+ @Term("OA Size")
+ def aa64_oa_size():
+ type([52, 48])
+
+ default(48)
+
+ return v(arch) in ["aarch64"]
\ No newline at end of file
--- /dev/null
+#include <lunaix/boot_generic.h>
+#include <asm/aa64.h>
+
+#include "init.h"
+
+static inline void
+setup_pstate()
+{
+ /*
+ SCTRL_EL1
+ EE=0, E0E=0 // all little endian
+ WXN=1 // write implie exec never
+ SA0=1, SA=1 // alignment check on SP
+ A=1 // alignment check on memref
+ NMI=1 // mask interrupt
+ M=1 // enable mmu
+ */
+
+ unsigned long sctrl = 0;
+
+ sctrl |= SCTRL_NMI;
+ sctrl |= SCTRL_WXN | SCTRL_nAA;
+ sctrl |= SCTRL_SA | SCTRL_SA0;
+ sctrl |= SCTRL_A | SCTRL_M;
+
+ set_sysreg(TCR_EL1, sctrl);
+ set_sysreg(SPSel, 1);
+}
+
+extern void aa64_vbase();
+
+static inline void
+setup_evbar()
+{
+ set_sysreg(VBAR_EL1, __ptr(aa64_vbase));
+}
+
+static inline void
+setup_ttbr()
+{
+ /*
+
+ TCR_EL1
+ SH0=3 // Inner sharable
+ ORGN0=0 // Normal memory, Outer Non-cacheable.
+ IRGN0=1 // Normal memory, Inner Write-Back Read-Allocate Write-Allocate Cacheable.
+ A1=0 // TTBR0 define ASID
+ EPD1=0
+ T1SZ=0
+ EPD0=1
+ T0SZ=16 // disable TTBR1, Use TTBR0 for all translation
+ TG0=0 // VA48, 256T, 4K Granule
+ TBI1=0,
+ TBI0=0 // Ignore top bits
+ AS=1 // 16bits asid
+ HA=1
+ HD=1 // Hardware managed dirty and access
+
+
+ We may use the follow practice later
+ TTBR0: Translation for user-land (lowmem)
+ TTBR1: Translation for kernel-land (highmem)
+ */
+
+ unsigned long tcr = 0;
+ ptr_t ttb;
+
+ tcr |= TCR_T1SZ(0) | TCR_T0SZ(16);
+ tcr |= TCR_TG0(TCR_G4K);
+ tcr |= TCR_AS | TCR_HA | TCR_HD;
+ tcr |= TCR_EPD0;
+
+ ttb = kremap();
+
+ set_sysreg(TTBR0_EL1, ttb);
+ set_sysreg(TCR_EL1, tcr);
+}
+
+static inline void
+extract_dtb_bootinfo(ptr_t dtb, struct boot_handoff* handoff)
+{
+ handoff->kexec.dtb_pa = dtb;
+
+ // TODO extract /memory, /reserved-memories from dtb
+}
+
+struct boot_handoff*
+aarch64_init(ptr_t dtb)
+{
+ setup_evbar();
+ setup_ttbr();
+ setup_pstate();
+
+ struct boot_handoff* handoff;
+
+ handoff = bootmem_alloc(sizeof(*handoff));
+
+ extract_dtb_bootinfo(dtb, handoff);
+
+ return handoff;
+}
\ No newline at end of file
--- /dev/null
+#ifndef __LUNAIX_AA64_INIT_H
+#define __LUNAIX_AA64_INIT_H
+
+#include <lunaix/types.h>
+#include <sys-generic/bootmem.h>
+#include <asm/boot_stage.h>
+
+ptr_t
+kremap();
+
+#endif /* __LUNAIX_AA64_INIT_H */
--- /dev/null
+#include <lunaix/sections.h>
+
+#include <asm/mm_defs.h>
+#include <asm-generic/init_pagetable.h>
+#include <asm/boot_stage.h>
+
+#include "init.h"
+
+static pte_t kpt[LEVEL_SIZE][32];
+
+ptr_t
+kremap()
+{
+ struct pt_alloc alloc;
+ struct ptw_state ptw;
+ pte_t pte;
+ unsigned long nr;
+
+ init_pt_alloc(&alloc, to_kphysical(&kpt), sizeof(kpt));
+ init_ptw_state(&ptw, &alloc, kpt_alloc_table(&alloc));
+
+ pte = mkpte(bootsec_start, KERNEL_DATA);
+ pte = pte_mkexec(pte);
+ nr = leaf_count(bootsec_end - bootsec_start);
+ kpt_set_ptes(&ptw, bootsec_start, pte, LFT_SIZE, nr);
+
+ kpt_mktable_at(&ptw, VMAP, L0T_SIZE);
+ kpt_mktable_at(&ptw, PMAP, L2T_SIZE);
+ kpt_mktable_at(&ptw, KMAP, LFT_SIZE);
+
+ kpt_migrate_highmem(&ptw);
+
+ pte = mkpte(__ptr(ptw.root), KERNEL_PGTAB);
+ kpt_set_ptes(&ptw, VMS_SELF, pte, L0T_SIZE, 1);
+
+ return __ptr(ptw.root);
+}
\ No newline at end of file
--- /dev/null
+.section .boot.data
+ .align 4
+ stack_end:
+ .skip 512
+ stack_top:
+
+.section .boot.text
+ .global start_
+
+ /*
+ We follow Linux-arm64 boot protocol
+ ldr x0, dtb
+ mov x1, xzr
+ mov x2, xzr
+ mov x3, xzr
+ */
+ start_:
+ adr x4, stack_top
+ mov sp, x4
+ mov fp, xzr
+
+ adr x4, aarch64_init
+ bl x4
+
+ // x0: ptr to boot_handoff
+ adr x4, kernel_bootstrap
+ bl x4
\ No newline at end of file
--- /dev/null
+#define __ASM__
+#include <asm/aa64_msrs.h>
+#include "hart_fields.inc"
+
+.section .text
+
+ .globl _aa64_evec_prehandle
+ .globl _aa64_switch_task
+
+ _aa64_evec_prehandle:
+ // reservation for struct exec_param
+ sub sp, sp, #8 // push xzr
+ sub sp, sp, #16 // push {sp_el0, link}
+ sub sp, sp, #16 // push {spsr, parent_hart}
+
+ stp lr, fp, [sp, #-16]! //push {x31-x1}
+ stp x28, x27, [sp, #-16]!
+ stp x26, x25, [sp, #-16]!
+ stp x24, x23, [sp, #-16]!
+ stp x22, x21, [sp, #-16]!
+ stp x20, x19, [sp, #-16]!
+ stp x18, x17, [sp, #-16]!
+ stp x16, x15, [sp, #-16]!
+ stp x14, x13, [sp, #-16]!
+ stp x12, x11, [sp, #-16]!
+ stp x10, x9, [sp, #-16]!
+ stp x8, x7, [sp, #-16]!
+ stp x6, x5, [sp, #-16]!
+ stp x4, x3, [sp, #-16]!
+ stp x2, x1, [sp, #-16]!
+ stp x0, xzr, [sp, #-16]! // push {x0, xzr}
+
+ add x1, sp, #hart_execp
+
+ mrs x0, SP_EL0
+ str x0, [x1, #execp_spel0_saved]
+
+ mrs x0, ELR_E1
+ str x0, [x1, #execp_link]
+
+ mrs x0, SPSR_EL1
+ str x0, [x1, #execp_spsr]
+
+ mov x0, sp
+ bl handle_exception
+
+ do_eret:
+ add sp, x0, xzr
+
+ add x1, x0, #hart_execp
+
+ ldr x0, [x1, #execp_spsr]
+ msr SPSR_EL1, x0
+
+ ldr x0, [x1, #execp_link]
+ msr ELR_E1, x0
+
+ ldr x0, [x1, #execp_spel0_saved]
+ msr SP_EL0, x0
+
+ add sp, sp, #hart_regs
+
+ ldr x0, [sp, #8 ]!
+ ldp x1, x2, [sp, #16]!
+ ldp x3, x4, [sp, #16]!
+ ldp x5, x6, [sp, #16]!
+ ldp x7, x8, [sp, #16]!
+ ldp x9, x10, [sp, #16]!
+ ldp x11, x12, [sp, #16]!
+ ldp x13, x14, [sp, #16]!
+ ldp x15, x16, [sp, #16]!
+ ldp x17, x18, [sp, #16]!
+ ldp x19, x20, [sp, #16]!
+ ldp x21, x22, [sp, #16]!
+ ldp x23, x24, [sp, #16]!
+ ldp x25, x26, [sp, #16]!
+ ldp x27, x28, [sp, #16]!
+ ldp fp, lr, [sp, #16]!
+
+ // sp now point to the start of exec_param
+
+ sub sp, sp, #execp_end
+
+ eret
+
+ _aa64_switch_task:
+ // TODO
+ b do_eret
\ No newline at end of file
--- /dev/null
+#define __ASM__
+#include <asm/aa64_exception.h>
+#include <asm/aa64_msrs.h>
+
+.macro aa64_exception_entry type
+ .align 7
+ .type _exception_entry_t\type, @function
+
+ // each handler has at most 128 bytes (32 insts)
+ _exception_entry_t\type:
+ stp x1, x0, [sp, #-16]
+
+ // re-purpose the [63, 56] of ESR to exception
+ // type identifier
+
+ mov x0, #\type
+ lsl x0, x0, #56
+ mrs x1, ESR_EL1
+ orr x0, x0, x1
+
+ ldr x1, [sp, #-8]
+ str x0, [sp, #-8]!
+ ldr x0, [sp, #-8]
+
+ b _aa64_evec_prehandle
+.endm
+
+.section .text
+
+ .globl aa64_vbase
+ .align 7
+
+ aa64_vbase:
+ aa64_exception_entry EXCEPTION_SYNC
+ aa64_exception_entry EXCEPTION_IRQ
+ aa64_exception_entry EXCEPTION_FIQ
+ aa64_exception_entry EXCEPTION_SERR
--- /dev/null
+#include <lunaix/process.h>
+#include <asm/hart.h>
+#include <asm/aa64_exception.h>
+
+
+static inline void
+update_thread_context(struct hart_state* state)
+{
+ if (!current_thread) {
+ return;
+ }
+
+ struct hart_state* parent = current_thread->hstate;
+ hart_push_state(parent, state);
+
+ current_thread->hstate = state;
+ current_thread->ustack_top = state->execp.sp_el0;
+
+ if (parent) {
+ state->depth = parent->depth + 1;
+ }
+}
+
+extern void
+handle_mm_abort(struct hart_state* state);
+
+static void
+handle_sync_exception(struct hart_state* hstate)
+{
+ unsigned int ec;
+
+ ec = esr_ec(hstate->execp.syndrome);
+
+ switch (ec)
+ {
+ case EC_I_ABORT:
+ case EC_D_ABORT:
+ case EC_I_ABORT_EL:
+ case EC_D_ABORT_EL:
+ handle_mm_abort(hstate);
+ break;
+
+ default:
+ fail("unhandled exception (synced)");
+ break;
+ }
+}
+
+static void
+handle_async_exception(struct hart_state* hstate)
+{
+
+}
+
+struct hart_state*
+handle_exception(struct hart_state* hstate)
+{
+ update_thread_context(hstate);
+
+ if (hart_vector_stamp(hstate) == EXCEPTION_SYNC) {
+ handle_sync_exception(hstate);
+ } else {
+ handle_async_exception(hstate);
+ }
+
+ return hstate;
+}
\ No newline at end of file
--- /dev/null
+ .struct 0
+reg_xn:
+ .struct reg_xn + 8 * 29
+reg_fp:
+ .struct reg_fp + 8
+reg_lr:
+ .struct reg_lr + 8
+reg_end:
+
+ .struct 0
+execp_parent:
+ .struct execp_parent + 8
+execp_spsr:
+ .struct execp_spsr + 8
+execp_link:
+ .struct execp_link + 8
+execp_spel0_saved:
+ .struct execp_spel0_saved + 8
+execp_rsvd:
+ .struct execp_rsvd + 8
+execp_syndrome:
+ .struct execp_syndrome + 8
+execp_end:
+
+ .struct 0
+hart_depth:
+ .struct hart_depth + 8
+hart_regs:
+ .struct hart_regs + reg_end
+hart_execp:
+ .struct hart_execp + execp_end
+hart_end:
--- /dev/null
+#include <lunaix/mm/fault.h>
+#include <asm/aa64_exception.h>
+#include <asm/hart.h>
+
+void
+handle_mm_abort(struct hart_state* state)
+{
+ // TODO
+}
\ No newline at end of file
--- /dev/null
+#ifndef __LUNAIX_AA64_H
+#define __LUNAIX_AA64_H
+
+#include "aa64_asm.h"
+#include "aa64_mmu.h"
+#include "aa64_msrs.h"
+#include "aa64_sysinst.h"
+
+#endif /* __LUNAIX_AA64_H */
--- /dev/null
+#ifndef __LUNAIX_AA64_ASM_H
+#define __LUNAIX_AA64_ASM_H
+
+#define __const_expr_sign #
+#define __comma() ,
+#define __const_expr() __const_expr_sign
+
+#define __sr_encode(op0, op1, crn, crm, op2) \
+ s##op0##_##op1##_c##crn##_c##crm##_##op2
+#define __sysop_encode(op1, crn, crm, op2) \
+ "#" #op1 ",C" #crn ",C" #crm ",#" #op2
+
+#ifndef __ASM__
+
+#include <lunaix/compiler.h>
+
+#else
+#endif
+#endif /* __LUNAIX_AA64_ASM_H */
--- /dev/null
+#ifndef __LUNAIX_AA64_ESR_H
+#define __LUNAIX_AA64_ESR_H
+
+#define EXCEPTION_SYNC 0
+#define EXCEPTION_FIQ 1
+#define EXCEPTION_IRQ 2
+#define EXCEPTION_SERR 3
+
+#ifndef __ASM__
+
+#include <lunaix/bits.h>
+#include <lunaix/types.h>
+
+#define ESR_ISS2 BITFIELD(55, 32)
+#define ESR_EC BITFIELD(31, 26)
+#define ESR_IL BIT(25)
+#define ESR_ISS BITFIELD(24, 0)
+
+#define EC_UNKNOWN 0b000000
+#define EC_WF 0b000001
+#define EC_SIMD 0b000111
+#define EC_LS64 0b001010
+#define EC_BTI 0b001101
+#define EC_EXEC_STATE 0b001110
+#define EC_SYS_INST 0b011000
+
+#define EC_I_ABORT 0b100000
+#define EC_I_ABORT_EL 0b100001
+
+#define EC_D_ABORT 0b100100
+#define EC_D_ABORT_EL 0b100101
+
+#define EC_PC_ALIGN 0b100010
+#define EC_SP_ALIGN 0b100110
+
+#define EC_SERROR 0b101111
+
+static inline bool
+esr_inst32(reg_t esr)
+{
+ return !!BITS_GET(esr, ESR_IL);
+}
+
+static inline unsigned int
+esr_ec(reg_t esr)
+{
+ return (unsigned int)BITS_GET(esr, ESR_EC);
+}
+
+static inline reg_t
+esr_iss(reg_t esr)
+{
+ return (reg_t)BITS_GET(esr, ESR_ISS);
+}
+
+#endif /* !__ASM__ */
+#endif /* __LUNAIX_AA64_ESR_H */
--- /dev/null
+#ifndef __LUNAIX_AA64_GIC_H
+#define __LUNAIX_AA64_GIC_H
+
+#include <lunaix/bits.h>
+#include "aa64_msrs.h"
+
+#define FRAME_SIZE 0x10000
+
+typedef unsigned int gicreg_t;
+typedef unsigned long gicreg64_t;
+#define FRAME_LEN (FRAME_SIZE / sizeof(gicreg_t))
+#define REG_INDEX(addr) ((addr) / sizeof(gicreg_t))
+
+#define ICC_CTLR_EL1 __sr_encode(3, 0, 12, 12, 4)
+#define ICC_SRE_EL1 __sr_encode(3, 0, 12, 12, 5)
+#define ICC_IGRPEN0_EL1 __sr_encode(3, 0, 12, 12, 6)
+#define ICC_IGRPEN1_EL1 __sr_encode(3, 0, 12, 12, 7)
+
+#define ICC_IAR1_EL1 __sr_encode(3, 0, 12, 12, 0)
+#define ICC_EOIR1_EL1 __sr_encode(3, 0, 12, 12, 1)
+
+#define INTID_ACKED_S 1020
+#define INTID_ACKED_NS 1021
+#define INTID_IAR1_NMI 1022
+#define INTID_NOTHING 1023
+#define check_special_intid(intid) \
+ ((intid) >= INTID_ACKED_S && (intid) <= INTID_NOTHING)
+
+#define LPI_PRIORITY BITFIELD(7, 2)
+#define LPI_EN 1UL
+
+#define ICC_SRE_SRE BITFLAG(0)
+#define ICC_SRE_DFB BITFLAG(1)
+#define ICC_SRE_DIB BITFLAG(2)
+
+#define ICC_CTRL_EXTRAN BITFLAG(19)
+#define ICC_CTRL_IDbits BITFIELD(13, 11)
+#define ICC_CTRL_PRIbits BITFIELD(10, 8)
+#define ICC_CTRL_PMHE BITFLAG(6)
+#define ICC_CTRL_EOImode BITFLAG(1)
+#define ICC_CTRL_CBPR BITFLAG(0)
+
+#define ICC_IGRPEN_ENABLE BITFLAG(0)
+
+#define GICD_CTLR REG_INDEX(0x0000)
+#define GICD_TYPER REG_INDEX(0x0004)
+#define GICD_IIDR REG_INDEX(0x0008)
+
+#define GICD_IGROUPRn REG_INDEX(0x0080)
+#define GICD_ISENABLER REG_INDEX(0x0100)
+#define GICD_ICENABLER REG_INDEX(0x0180)
+#define GICD_IPRIORITYR REG_INDEX(0x0400)
+#define GICD_ICFGR REG_INDEX(0x0C00)
+#define GICD_IGRPMODRn REG_INDEX(0x0D00)
+#define GICD_INMIR REG_INDEX(0x0F80)
+
+#define GICR_CTLR REG_INDEX(0x0000)
+#define GICR_TYPER REG_INDEX(0x0008)
+#define GICR_PROPBASER REG_INDEX(0x0070)
+#define GICR_PENDBASER REG_INDEX(0x0078)
+
+#define GICD_CTLR_G1SEN BITFLAG(2)
+#define GICD_CTLR_G1NSEN BITFLAG(1)
+#define GICD_CTLR_G0EN BITFLAG(0)
+
+#define GICD_TYPER_nESPI BITFIELD(31, 27)
+#define GICD_TYPER_No1N BITFLAG(25)
+#define GICD_TYPER_LPIS BITFLAG(17)
+#define GICD_TYPER_MBIS BITFLAG(16)
+#define GICD_TYPER_nLPI BITFIELD(15, 11)
+#define GICD_TYPER_NMI BITFLAG(9)
+#define GICD_TYPER_ESPI BITFLAG(8)
+#define GICD_TYPER_nSPI BITFIELD(4, 0)
+#define GICD_TYPER_IDbits BITFIELD(23, 19)
+
+#define GICR_TYPER_AffVal BITFIELD(63, 32)
+#define GICR_TYPER_PPInum BITFIELD(31, 27)
+
+#define GICR_BASER_PAddr BITFIELD(51, 12)
+#define GICR_BASER_Share BITFIELD(11, 10)
+#define GICR_PENDBASER_PTZ BITFLAG(62)
+#define GICR_PROPBASER_IDbits\
+ BITFIELD(4, 0)
+
+#endif /* __LUNAIX_AA64_GIC_H */
--- /dev/null
+#ifndef __LUNAIX_AA64_ISRM_H
+#define __LUNAIX_AA64_ISRM_H
+
+#include <asm-generic/isrm.h>
+#include "soc/gic.h"
+
+unsigned int
+aa64_isrm_ivalloc(struct gic_int_param* ivcfg, isr_cb handler);
+
+#endif /* __LUNAIX_AA64_ISRM_H */
--- /dev/null
+#ifndef __LUNAIX_AA64_MMU_H
+#define __LUNAIX_AA64_MMU_H
+
+#include "aa64_asm.h"
+
+#if defined(CONFIG_AA64_PAGE_GRAN_4K)
+#define _MMU_TG 0b01
+#elif defined(CONFIG_AA64_PAGE_GRAN_16K)
+#define _MMU_TG 0b10
+#elif defined(CONFIG_AA64_PAGE_GRAN_64K)
+#define _MMU_TG 0b11
+#endif
+
+#if CONFIG_AA_OA_SIZE == 52
+#define _MMU_USE_OA52
+#endif
+
+
+#define TCR_DS (1UL << 59)
+#define TCR_E0PD1 (1UL << 56)
+#define TCR_E0PD0 (1UL << 55)
+#define TCR_TBID1 (1UL << 52)
+#define TCR_TBID0 (1UL << 51)
+#define TCR_HPD1 (1UL << 42)
+#define TCR_HPD0 (1UL << 41)
+#define TCR_HD (1UL << 40)
+#define TCR_HA (1UL << 39)
+#define TCR_TBI1 (1UL << 38)
+#define TCR_TBI0 (1UL << 37)
+#define TCR_AS (1UL << 36)
+
+#define TCR_G4K (0b01)
+#define TCR_G16K (0b10)
+#define TCR_G64K (0b11)
+
+#define TCR_SHNS (0b00)
+#define TCR_SHOS (0b10)
+#define TCR_SHIS (0b11)
+
+#define TCR_TG1(g) (((g) & 0b11) << 30)
+#define TCR_TG0(g) (((g) & 0b11) << 14)
+
+#define TCR_T1SZ(sz) (((sz) & 0b111111) << 16)
+#define TCR_T0SZ(sz) (((sz) & 0b111111))
+
+#define TCR_EPD1 (1UL << 23)
+#define TCR_EPD0 (1UL << 7)
+#define TCR_A1 (1UL << 22)
+
+#endif /* __LUNAIX_AA64_MMU_H */
--- /dev/null
+#ifndef __LUNAIX_AA64_MSRS_H
+#define __LUNAIX_AA64_MSRS_H
+
+#include "aa64_asm.h"
+
+#define SCTLR_EL1 __sr_encode(3, 0, 1, 0, 0)
+#define TCR_EL1 __sr_encode(3, 0, 2, 0, 2)
+#define TTBR0_EL1 __sr_encode(3, 0, 2, 0, 0)
+#define TTBR1_EL1 __sr_encode(3, 0, 2, 0, 1)
+#define VBAR_EL1 __sr_encode(3, 0, 12, 0, 1)
+#define CurrentEL __sr_encode(3, 0, 4, 2, 2)
+#define ELR_E1 __sr_encode(3, 0, 4, 0, 1)
+#define SPSel __sr_encode(3, 0, 4, 2, 0)
+#define SPSR_EL1 __sr_encode(3, 0, 4, 0, 0)
+#define DAIF_EL1 __sr_encode(3, 3, 4, 2, 1)
+#define ALLINT_EL1 __sr_encode(3, 0, 4, 3, 0)
+#define SP_EL0 __sr_encode(3, 0, 4, 1, 0)
+#define SP_EL1 __sr_encode(3, 4, 4, 1, 0)
+
+#ifndef __ASM__
+#define read_sysreg(reg) \
+ ({ unsigned long _x; \
+ asm ("mrs %0, " stringify(reg):"=r"(_x)); \
+ _x; \
+ })
+
+#define set_sysreg(reg, v) \
+ ({ unsigned long _x = v; \
+ asm ("msr " stringify(reg) ", %0"::"r"(_x)); \
+ _x; \
+ })
+
+#define SCTRL_SPINTMASK (1UL << 62)
+#define SCTRL_NMI (1UL << 61)
+#define SCTRL_EE (1UL << 25)
+#define SCTRL_E0E (1UL << 24)
+#define SCTRL_WXN (1UL << 19)
+#define SCTRL_nAA (1UL << 6)
+#define SCTRL_SA0 (1UL << 4)
+#define SCTRL_SA (1UL << 3)
+#define SCTRL_A (1UL << 1)
+#define SCTRL_M (1UL << 0)
+
+#define sysreg_flagging(reg, set, unset) \
+ ({ \
+ unsigned long _x; \
+ _x = read_sysreg(reg); \
+ _x = (_x & ~(unset)) | (set); \
+ set_sysreg(reg, _x); \
+ _x; \
+ })
+
+#endif
+#endif /* __LUNAIX_AA64_MSRS_H */
--- /dev/null
+#ifndef __LUNAIX_AA64_SPSR_H
+#define __LUNAIX_AA64_SPSR_H
+
+#include <lunaix/types.h>
+#include <lunaix/bits.h>
+
+#define SPSR_EL BITFIELD(3, 2)
+#define SPSR_SP BIT(0)
+
+static inline bool
+spsr_from_el0(reg_t spsr)
+{
+ return BITS_GET(spsr, SPSR_EL) == 0;
+}
+
+#endif /* __LUNAIX_AA64_SPSR_H */
--- /dev/null
+#ifndef __LUNAIX_AA64_SYSINST_H
+#define __LUNAIX_AA64_SYSINST_H
+
+#include "aa64_asm.h"
+
+#define tlbi_alle1 __sysop_encode(4, 8, 7, 4)
+#define tlbi_aside1 __sysop_encode(0, 8, 7, 2)
+#define tlbi_rvaae1 __sysop_encode(0, 8, 6, 3)
+#define tlbi_rvae1 __sysop_encode(0, 8, 6, 1)
+#define tlbi_vaae1 __sysop_encode(0, 8, 7, 3)
+#define tlbi_vae1 __sysop_encode(0, 8, 7, 1)
+
+#define sys_a0(op) \
+ ({ asm ("sys " op); })
+
+#define sys_a1(op, xt) \
+ ({ asm ("sys " op ", %0" :: "r"(xt)); })
+
+#define sysl(op) \
+ ({ unsigned long _x; \
+ asm ("sysl %0, " op :"=r"(_x)); \
+ _x; \
+ })
+
+#endif /* __LUNAIX_AA64_SYSINST_H */
--- /dev/null
+#ifndef __LUNAIX_AA64_ABI_H
+#define __LUNAIX_AA64_ABI_H
+
+#include <lunaix/types.h>
+
+#ifndef __ASM__
+
+#define align_stack(ptr) ((ptr) & ~15)
+
+#define store_retval(retval) current_thread->hstate->registers.x[0] = (retval)
+#define store_retval_to(th, retval) (th)->hstate->registers.x[0] = (retval)
+
+
+static inline void must_inline noret
+switch_context() {
+ // TODO
+ asm ("b _aa64_switch_task");
+ unreachable;
+}
+
+
+static inline ptr_t
+abi_get_retaddr()
+{
+ reg_t lr;
+ asm ("mov %0, lr" : "=r"(lr));
+
+ return lr;
+}
+
+static inline ptr_t
+abi_get_retaddrat(ptr_t fp)
+{
+ return ((ptr_t*)fp)[1];
+}
+
+static inline ptr_t must_inline
+abi_get_callframe()
+{
+ ptr_t val;
+ asm volatile("mov %0, fp" : "=r"(val));
+ return val;
+}
+
+static inline void must_inline
+j_usr(ptr_t sp, ptr_t pc)
+{
+ // TODO
+}
+
+#endif
+
+#endif /* __LUNAIX_AA64_ABI_H */
--- /dev/null
+#ifndef __LUNAIX_ARCH_BITS_H
+#define __LUNAIX_ARCH_BITS_H
+
+#include <asm-generic/bits.h>
+
+#undef _BITS_EXTRACT
+#undef _BITS_INSERT
+
+#define _BITS_EXTRACT(from, h, l) \
+ ({ \
+ unsigned long _r; \
+ asm ("ubfm %0, %1, %2, %3" \
+ : "=r"(_r) \
+ : "r"(from), \
+ "i"(l),"i"(h)); \
+ _r; \
+ })
+
+#define _BITS_INSERT(to, from, h, l) \
+ ({ \
+ unsigned long _r = to; \
+ asm ("bfi %0, %1, %2, %3" \
+ : "=r"(_r) \
+ : "r"(from), \
+ "i"(l), \
+ "i"(h - l + 1)); \
+ _r; \
+ })
+
+
+#endif /* __LUNAIX_ARCH_BITS_H */
--- /dev/null
+#ifndef __LUNAIX_ARCH_BOOT_STAGE_H
+#define __LUNAIX_ARCH_BOOT_STAGE_H
+
+#include <asm-generic/boot_stage.h>
+
+#endif /* __LUNAIX_BOOT_STAGE_H */
--- /dev/null
+#ifndef __LUNAIX_ARCH_CPU_H
+#define __LUNAIX_ARCH_CPU_H
+
+#include "aa64.h"
+
+void
+cpu_trap_sched();
+
+static inline void
+cpu_enable_interrupt()
+{
+ set_sysreg(ALLINT_EL1, 0);
+}
+
+static inline void
+cpu_disable_interrupt()
+{
+ set_sysreg(ALLINT_EL1, 1 << 12);
+}
+
+static inline void
+cpu_wait()
+{
+ asm volatile ( "wfi" );
+}
+
+#endif /* __LUNAIX_CPU_H */
--- /dev/null
+#ifndef __LUNAIX_ARCH_HART_H
+#define __LUNAIX_ARCH_HART_H
+
+#ifndef __ASM__
+#include <lunaix/types.h>
+#include <lunaix/bits.h>
+#include <asm/aa64_spsr.h>
+
+#define SYNDROME_ETYPE BITFIELD(63, 56)
+
+struct hart_state;
+
+struct regcontext
+{
+ union {
+ reg_t x[31];
+ struct {
+ reg_t x_[29];
+ reg_t fp;
+ reg_t lr;
+ };
+ };
+} compact align(8);
+
+struct exec_param
+{
+ struct hart_state* parent_state;
+ reg_t spsr;
+ reg_t link;
+ struct {
+ reg_t sp_el0;
+ reg_t rsvd;
+ };
+
+ reg_t syndrome;
+} compact align(8);
+
+struct hart_state
+{
+ reg_t depth;
+ struct regcontext registers;
+ struct exec_param execp;
+} compact align(16);
+
+static inline int
+hart_vector_stamp(struct hart_state* hstate) {
+ return BITS_GET(hstate->execp.syndrome, SYNDROME_ETYPE);
+}
+
+static inline unsigned int
+hart_ecause(struct hart_state* hstate) {
+ return hstate->execp.syndrome;
+}
+
+static inline struct hart_state*
+hart_parent_state(struct hart_state* hstate)
+{
+ return hstate->execp.parent_state;
+}
+
+static inline void
+hart_push_state(struct hart_state* p_hstate, struct hart_state* hstate)
+{
+ hstate->execp.parent_state = p_hstate;
+}
+
+static inline ptr_t
+hart_pc(struct hart_state* hstate)
+{
+ return hstate->execp.link;
+}
+
+static inline ptr_t
+hart_sp(struct hart_state* hstate)
+{
+ return __ptr(&hstate[-1]);
+}
+
+static inline bool
+kernel_context(struct hart_state* hstate)
+{
+ // TODO
+ return false;
+}
+
+static inline ptr_t
+hart_stack_frame(struct hart_state* hstate)
+{
+ return hstate->registers.fp;
+}
+
+#endif
+
+#endif /* __LUNAIX_ARCH_HART_H */
--- /dev/null
+#ifndef __LUNAIX_MEMPART_H
+#define __LUNAIX_MEMPART_H
+
+
+#define END_POINT(name) (name + name##_SIZE - 1)
+
+#ifdef __LD__
+#define __ulong(val) val
+#else
+#define __ulong(val) val##UL
+#endif
+
+#define KSTACK_AREA __ulong(0x0000000100000000)
+#define KSTACK_AREA_SIZE __ulong(0x0000000040000000)
+#define KSTACK_AREA_END END_POINT(KSTACK_AREA)
+
+#define USR_EXEC __ulong(0x0000008000000000)
+#define USR_EXEC_SIZE __ulong(0x0000002000000000)
+#define USR_EXEC_END END_POINT(USR_EXEC)
+
+#define USR_MMAP __ulong(0x0000010000000000)
+#define USR_MMAP_SIZE __ulong(0x0000008000000000)
+#define USR_MMAP_END END_POINT(USR_MMAP)
+
+#define USR_STACK __ulong(0x00007f8000000000)
+#define USR_STACK_SIZE __ulong(0x0000001fc0000000)
+#define USR_STACK_SIZE_THREAD __ulong(0x0000000000200000)
+#define USR_STACK_END END_POINT(USR_STACK)
+
+
+// la casa del kernel
+
+#define KERNEL_RESIDENT __ulong(0xfffffd8000000000) // -2.5T
+#define VMAP KERNEL_RESIDENT // -2.5T
+#define VMAP_SIZE __ulong(0x0000010000000000)
+#define VMAP_END END_POINT(VMAP)
+
+#define VMS_MOUNT_1 __ulong(0xfffffe8000000000) // -1.5T
+#define VMS_MOUNT_1_SIZE __ulong(0x0000008000000000)
+#define VMS_MOUNT_1_END END_POINT(VMS_MOUNT_1)
+
+#define VMS_SELF_MOUNT __ulong(0xffffff0000000000) // -1T
+
+#define KMAP __ulong(0xffffff8000000000)
+#define PG_MOUNT_1 KMAP // -512G
+#define PG_MOUNT_1_SIZE __ulong(0x0000000000001000)
+#define PG_MOUNT_1_END END_POINT(PG_MOUNT_1)
+
+#define PG_MOUNT_2 __ulong(0xffffff8000001000)
+#define PG_MOUNT_2_SIZE __ulong(0x0000000000001000)
+#define PG_MOUNT_2_END END_POINT(PG_MOUNT_2)
+
+#define PG_MOUNT_3 __ulong(0xffffff8000002000)
+#define PG_MOUNT_3_SIZE __ulong(0x0000000000001000)
+#define PG_MOUNT_3_END END_POINT(PG_MOUNT_3)
+
+#define PG_MOUNT_4 __ulong(0xffffff8000003000)
+#define PG_MOUNT_4_SIZE __ulong(0x0000000000001000)
+#define PG_MOUNT_4_END END_POINT(PG_MOUNT_4)
+
+#define PG_MOUNT_VAR __ulong(0xffffff8000004000)
+#define PG_MOUNT_VAR_SIZE __ulong(0x000000003fffc000)
+#define PG_MOUNT_VAR_END END_POINT(PG_MOUNT_VAR)
+
+#define PMAP __ulong(0xffffff8040000000)
+
+#define KERNEL_IMG __ulong(0xffffffff80000000) // -2G
+#define KERNEL_IMG_SIZE __ulong(0x0000000080000000)
+#define KERNEL_IMG_END END_POINT(KERNEL_IMG)
+
+#endif
\ No newline at end of file
--- /dev/null
+#ifndef __LUNAIX_MM_DEFS_H
+#define __LUNAIX_MM_DEFS_H
+
+#include "mempart.h"
+#include "pagetable.h"
+
+/*
+ Regardless architecture we need to draw the line very carefully, and must
+ take the size of VM into account. In general, we aims to achieve
+ "sufficiently large" of memory for kernel
+
+ In terms of x86_32:
+ * #768~1022 PTEs of PD (0x00000000c0000000, ~1GiB)
+
+ In light of upcomming x86_64 support (for Level 4&5 Paging):
+ * #510 entry of PML4 (0x0000ff0000000000, ~512GiB)
+ * #510 entry of PML5 (0x01fe000000000000, ~256TiB)
+
+
+ KERNEL_RESIDENT - a high-mem region, kernel should be
+ KSTACK_PAGES - kernel stack, pages allocated to
+ KEXEC_RSVD - page reserved for kernel images
+*/
+
+#define KSTACK_PAGES 4
+#define KEXEC_RSVD 32
+
+#define KSTACK_SIZE (KSTACK_PAGES * PAGE_SIZE)
+
+#define kernel_addr(addr) ((addr) >= KERNEL_RESIDENT || (addr) < USR_EXEC)
+
+#define to_kphysical(k_va) ((ptr_t)(k_va) - KERNEL_IMG)
+#define to_kvirtual(k_pa) ((ptr_t)(k_pa) + KERNEL_IMG)
+
+#endif /* __LUNAIX_MM_DEFS_H */
--- /dev/null
+#include <asm-generic/muldiv64.h>
--- /dev/null
+#ifndef __LUNAIX_ARCH_PAGETABLE_H
+#define __LUNAIX_ARCH_PAGETABLE_H
+
+#include <lunaix/types.h>
+#include <lunaix/compiler.h>
+
+#include "aa64_mmu.h"
+
+/* ******** Page Table Manipulation ******** */
+
+#define _PTW_LEVEL 4
+
+
+// Note: we set VMS_SIZE = VMS_MASK as it is impossible
+// to express 4Gi in 32bit unsigned integer
+
+
+#define VMS_BITS 48
+
+#define PMS_BITS CONFIG_AA64_OA_SIZE
+
+#define VMS_SIZE ( 1UL << VMS_BITS)
+#define VMS_MASK ( VMS_SIZE - 1 )
+#define PMS_SIZE ( 1UL << PMS_BITS )
+#define PMS_MASK ( PMS_SIZE - 1 )
+
+#define __index(va) ( (va) & VMS_MASK )
+#define __vaddr(va) \
+ ( (__index(va) ^ ((VMS_MASK + 1) >> 1)) - ((VMS_MASK + 1) >> 1) )
+#define __paddr(pa) ( (pa) & PMS_MASK )
+
+
+#if defined(CONFIG_AA64_PAGE_GRAN_4K)
+#define _PAGE_BASE_SHIFT 12
+#elif defined(CONFIG_AA64_PAGE_GRAN_16K)
+#define _PAGE_BASE_SHIFT 14
+#elif defined(CONFIG_AA64_PAGE_GRAN_64K)
+#define _PAGE_BASE_SHIFT 16
+#endif
+
+#define _PAGE_BASE_SIZE ( 1UL << _PAGE_BASE_SHIFT )
+#define _PAGE_BASE_MASK ( (_PAGE_BASE_SIZE - 1) & VMS_MASK )
+
+#define _PAGE_LEVEL_SHIFT 9
+#define _PAGE_LEVEL_SIZE ( 1UL << _PAGE_LEVEL_SHIFT )
+#define _PAGE_LEVEL_MASK ( _PAGE_LEVEL_SIZE - 1 )
+#define _PAGE_Ln_SIZE(n) \
+ ( 1UL << (_PAGE_BASE_SHIFT + _PAGE_LEVEL_SHIFT * (_PTW_LEVEL - (n) - 1)) )
+
+/* General size of a LnT huge page */
+
+#define L0T_SIZE _PAGE_Ln_SIZE(0)
+#define L1T_SIZE _PAGE_Ln_SIZE(1)
+#define L2T_SIZE _PAGE_Ln_SIZE(2)
+#define L3T_SIZE _PAGE_Ln_SIZE(3)
+#define LFT_SIZE _PAGE_Ln_SIZE(3)
+
+
+struct __pte {
+ unsigned long val;
+} align(8);
+
+// upper attributes
+
+#define _PTE_UXN (1UL << 54)
+#define _PTE_PXN (1UL << 53)
+#define _PTE_XN (_PTE_UXN | _PTE_PXN)
+#define _PTE_Contig (1UL << 52)
+#define _PTE_DBM (1UL << 51)
+
+#ifdef _MMU_USE_OA52
+#if CONFIG_AA64_PAGE_GRAN_64K
+#define __OA_HIGH_MASK ( 0b1111 << 12 )
+#define __OA_HEAD(pa) ((pa) & ((1UL << 48) - 1) & ~PAGE_MASK)
+#define __OA_TAIL(pa) ((((pa) >> 48) & 0b1111) << 12)
+#else
+#define __OA_HIGH_MASK ( 0b0011 << 8 )
+#define __OA_HEAD(pa) ((pa) & ((1UL << 50) - 1) & ~PAGE_MASK)
+#define __OA_TAIL(pa) ((((pa) >> 50) & 0b0011) << 8)
+#endif
+#else
+#define __OA_HIGH_MASK (0)
+#define __OA_HEAD(pa) (__paddr(pa) & ~PAGE_MASK)
+#define __OA_TAIL(pa) (0)
+#endif
+
+#define _PTE_OA(pa) (__OA_HEAD(pa) | __OA_TAIL(pa))
+
+// lower attributes
+
+#define _PTE_nG (1UL << 11)
+#define _PTE_AF (1UL << 10)
+
+// AP bits: R_RNGJG
+
+#define _PTE_AP(p, u) ((((p) & 1) << 1 | ((u) & 1)) << 6)
+#define _PTE_PRW _PTE_AP(0 , 0) // priv rw, unpriv none
+#define _PTE_PRWURW _PTE_AP(0 , 1) // priv rw, unpriv rw
+#define _PTE_U _PTE_AP(0 , 1) // generic unpriv flag
+#define _PTE_PRO _PTE_AP(1 , 0) // priv ro, unpriv none
+#define _PTE_PROURO _PTE_AP(1 , 1) // priv ro, unpriv ro
+
+#define _PTE_BLKDESC (0b01)
+#define _PTE_TABDESC (0b11)
+#define _PTE_LFTDESC (0b11)
+#define _PTE_VALID (0b01)
+#define _PTE_DESC_MASK (0b11)
+#define _PTE_SET_DESC(pte_val, desc) \
+ ( ((pte_val) & ~_PTE_DESC_MASK) | ((desc) & _PTE_DESC_MASK) )
+#define _PTE_GET_DESC(pte_val) \
+ ( (pte_val) & _PTE_DESC_MASK )
+
+#define __MEMGUARD 0xf0f0f0f0f0f0f0f0UL
+
+typedef unsigned long pte_attr_t;
+typedef unsigned long pfn_t;
+
+// always do sign extend on x86_64
+
+
+
+/* General mask to get page offset of a LnT huge page */
+
+#define L0T_MASK ( L0T_SIZE - 1 )
+#define L1T_MASK ( L1T_SIZE - 1 )
+#define L2T_MASK ( L2T_SIZE - 1 )
+#define L3T_MASK ( L3T_SIZE - 1 )
+#define LFT_MASK ( LFT_SIZE - 1 )
+
+/* Masks to get index of a LnTE */
+
+#define L0T_INDEX_MASK ( VMS_MASK ^ L0T_MASK )
+#define L1T_INDEX_MASK ( L0T_MASK ^ L1T_MASK )
+#define L2T_INDEX_MASK ( L1T_MASK ^ L2T_MASK )
+#define L3T_INDEX_MASK ( L2T_MASK ^ L3T_MASK )
+#define LFT_INDEX_MASK ( L3T_MASK ^ LFT_MASK )
+
+#define PAGE_SHIFT _PAGE_BASE_SHIFT
+#define PAGE_SIZE _PAGE_BASE_SIZE
+#define PAGE_MASK _PAGE_BASE_MASK
+
+#define LEVEL_SHIFT _PAGE_LEVEL_SHIFT
+#define LEVEL_SIZE _PAGE_LEVEL_SIZE
+#define LEVEL_MASK _PAGE_LEVEL_MASK
+
+// max PTEs number
+#define MAX_PTEN _PAGE_LEVEL_SIZE
+
+// max translation level supported
+#define MAX_LEVEL _PTW_LEVEL
+
+typedef struct __pte pte_t;
+
+#define _PTE_PROT_MASK ( ~((1UL << 50) - 1) | (PAGE_MASK & __OA_HIGH_MASK) )
+#define _PTE_PPFN_MASK ( ~_PTE_PROT_MASK )
+
+#define _PAGE_BASIC ( _PTE_VALID )
+
+#define KERNEL_EXEC ( _PAGE_BASIC | _PTE_PRO | _PTE_UXN )
+#define KERNEL_DATA ( _PAGE_BASIC | _PTE_PRW | _PTE_XN )
+#define KERNEL_RDONLY ( _PAGE_BASIC | _PTE_PRO | _PTE_XN )
+#define KERNEL_ROEXEC KERNEL_EXEC
+#define KERNEL_PGTAB ( _PAGE_BASIC | _PTE_TABDESC )
+
+#define USER_EXEC ( _PAGE_BASIC | _PTE_PROURO | _PTE_PXN )
+#define USER_DATA ( _PAGE_BASIC | _PTE_PRWURW | _PTE_XN )
+#define USER_RDONLY ( _PAGE_BASIC | _PTE_PROURO )
+#define USER_ROEXEC USER_EXEC
+#define USER_PGTAB ( _PAGE_BASIC | _PTE_TABDESC )
+
+#define SELF_MAP ( KERNEL_DATA | _PTE_TABDESC )
+
+#define __mkpte_from(pte_val) ((pte_t){ .val = (pte_val) })
+
+#define null_pte ( __mkpte_from(0) )
+#define guard_pte ( __mkpte_from(__MEMGUARD) )
+#define pte_val(pte) ( pte.val )
+
+
+static inline bool
+pte_isguardian(pte_t pte)
+{
+ return pte.val == __MEMGUARD;
+}
+
+static inline pte_t
+mkpte_prot(pte_attr_t prot)
+{
+ pte_attr_t attrs = (prot & _PTE_PROT_MASK) | _PTE_LFTDESC;
+ return __mkpte_from(attrs);
+}
+
+static inline pte_t
+mkpte(ptr_t paddr, pte_attr_t prot)
+{
+ pte_attr_t attrs = (prot & _PTE_PROT_MASK) | _PTE_LFTDESC;
+ return __mkpte_from((paddr & ~_PAGE_BASE_MASK) | attrs);
+}
+
+static inline pte_t
+mkpte_root(ptr_t paddr, pte_attr_t prot)
+{
+ pte_attr_t attrs = (prot & _PTE_PROT_MASK) | _PTE_TABDESC;
+ return __mkpte_from((paddr & ~_PAGE_BASE_MASK) | attrs);
+}
+
+static inline pte_t
+mkpte_raw(unsigned long pte_val)
+{
+ return __mkpte_from(pte_val);
+}
+
+static inline pte_t
+pte_setpaddr(pte_t pte, ptr_t paddr)
+{
+ return __mkpte_from((pte.val & _PTE_PROT_MASK) | (paddr & ~_PTE_PROT_MASK));
+}
+
+static inline pte_t
+pte_setppfn(pte_t pte, pfn_t ppfn)
+{
+ return pte_setpaddr(pte, ppfn * PAGE_SIZE);
+}
+
+static inline ptr_t
+pte_paddr(pte_t pte)
+{
+ return __paddr(pte.val) & ~_PTE_PROT_MASK;
+}
+
+static inline pfn_t
+pte_ppfn(pte_t pte)
+{
+ return pte_paddr(pte) >> _PAGE_BASE_SHIFT;
+}
+
+static inline pte_t
+pte_setprot(pte_t pte, ptr_t prot)
+{
+ return __mkpte_from((pte.val & ~_PTE_PROT_MASK) | (prot & _PTE_PROT_MASK));
+}
+
+static inline pte_attr_t
+pte_prot(pte_t pte)
+{
+ return (pte.val & _PTE_PROT_MASK);
+}
+
+static inline bool
+pte_isnull(pte_t pte)
+{
+ return !pte.val;
+}
+
+static inline pte_t
+pte_mkhuge(pte_t pte)
+{
+ return __mkpte_from(_PTE_SET_DESC(pte.val, _PTE_BLKDESC));
+}
+
+static inline pte_t
+pte_mkvolatile(pte_t pte)
+{
+ return __mkpte_from(pte.val);
+}
+
+static inline pte_t
+pte_mkroot(pte_t pte)
+{
+ return __mkpte_from(_PTE_SET_DESC(pte.val, _PTE_TABDESC));
+}
+
+static inline bool
+pte_huge(pte_t pte)
+{
+ return _PTE_GET_DESC(pte.val) == _PTE_BLKDESC;
+}
+
+static inline pte_t
+pte_mkloaded(pte_t pte)
+{
+ return __mkpte_from(pte.val | _PTE_VALID);
+}
+
+static inline pte_t
+pte_mkunloaded(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_VALID);
+}
+
+static inline bool
+pte_isloaded(pte_t pte)
+{
+ return !!(pte.val & _PTE_VALID);
+}
+
+static inline pte_t
+pte_mkwprotect(pte_t pte)
+{
+ return __mkpte_from(pte.val | _PTE_PRO);
+}
+
+static inline pte_t
+pte_mkwritable(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_PRO);
+}
+
+static inline bool
+pte_iswprotect(pte_t pte)
+{
+ return !(pte.val & _PTE_PRO);
+}
+
+static inline pte_t
+pte_mkuser(pte_t pte)
+{
+ return __mkpte_from(pte.val | _PTE_U);
+}
+
+static inline pte_t
+pte_mkkernel(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_U);
+}
+
+static inline bool
+pte_allow_user(pte_t pte)
+{
+ return !!(pte.val & _PTE_U);
+}
+
+static inline pte_t
+pte_mkexec(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_PXN);
+}
+
+static inline pte_t
+pte_mknexec(pte_t pte)
+{
+ return __mkpte_from(pte.val | _PTE_PXN);
+}
+
+static inline pte_t
+pte_mkuexec(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_UXN);
+}
+
+static inline pte_t
+pte_mknuexec(pte_t pte)
+{
+ return __mkpte_from(pte.val | _PTE_UXN);
+}
+
+static inline bool
+pte_isexec(pte_t pte)
+{
+ return !(pte.val & _PTE_PXN);
+}
+
+static inline bool
+pte_isuexec(pte_t pte)
+{
+ return !(pte.val & _PTE_UXN);
+}
+
+static inline pte_t
+pte_mkuntouch(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_AF);
+}
+
+static inline bool
+pte_istouched(pte_t pte)
+{
+ return !!(pte.val & _PTE_AF);
+}
+
+static inline pte_t
+pte_mkclean(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_DBM);
+}
+
+static inline bool
+pte_dirty(pte_t pte)
+{
+ return !!(pte.val & _PTE_DBM);
+}
+
+static inline void
+set_pte(pte_t* ptep, pte_t pte)
+{
+ ptep->val = pte.val;
+}
+
+static inline pte_t
+pte_at(pte_t* ptep) {
+ return *ptep;
+}
+
+pte_t
+translate_vmr_prot(unsigned int vmr_prot, pte_t pte);
+
+#endif /* __LUNAIX_ARCH_PAGETABLE_H */
--- /dev/null
+#ifndef __LUNAIX_ARCH_PHYSICAL_H
+#define __LUNAIX_ARCH_PHYSICAL_H
+
+#include <lunaix/ds/llist.h>
+#include "mm_defs.h"
+
+#define PPLIST_STARTVM PMAP
+
+struct ppage_arch
+{
+
+};
+
+#endif /* __LUNAIX_ARCH_PHYSICAL_H */
--- /dev/null
+#ifndef __LUNAIX_GIC_H
+#define __LUNAIX_GIC_H
+
+#include <lunaix/types.h>
+#include <lunaix/ds/bitmap.h>
+#include <lunaix/ds/hashtable.h>
+#include <asm/aa64_gic.h>
+#include <asm-generic/isrm.h>
+
+#define NR_CPU 1
+#define gic_bmp PREP_BITMAP(gicreg_t, gic_intr, BMP_ORIENT_LSB)
+
+#define INITID_SGI_BASE 0
+#define INITID_SGI_END 15
+
+#define INITID_PPI_BASE 16
+#define INITID_PPI_END 31
+
+#define INITID_SPI_BASE 32
+#define INITID_SPI_END 1019
+
+#define INITID_SPEC_BASE 1020
+#define INITID_SPEC_END 1023
+
+#define INITID_ePPI_BASE 1056
+#define INITID_ePPI_END 1119
+
+#define INITID_eSPI_BASE 4096
+#define INITID_eSPI_END 5119
+
+#define INITID_LPI_BASE 8192
+
+#define INITID_SPI_NR (INITID_SPEC_BASE - INITID_SPI_BASE)
+
+enum gic_int_type
+{
+ GIC_IDL,
+ GIC_LPI,
+ GIC_SPI,
+ GIC_PPI,
+ GIC_SGI,
+ GIC_RSV
+} compact;
+
+enum gic_tri_type
+{
+ GIC_TRIG_EDGE,
+ GIC_TRIG_LEVEL
+} compact;
+
+enum gic_grp_type
+{
+ GIC_G0,
+ GIC_G1S,
+ GIC_G1NS
+} compact;
+
+DECLARE_BITMAP(gic_bmp);
+
+struct gic_int_param
+{
+ enum gic_int_type class;
+ enum gic_tri_type trigger;
+ enum gic_grp_type group;
+ unsigned int priority;
+ int cpu_id;
+ bool as_nmi;
+ bool ext_range;
+};
+
+struct gic_intcfg {
+ enum gic_int_type class;
+ enum gic_tri_type trigger;
+ enum gic_grp_type group;
+ bool as_nmi;
+};
+
+struct gic_idomain
+{
+ DECLARE_HASHTABLE(recs, 32);
+ BITMAP(gic_bmp) ivmap;
+ unsigned int base;
+ bool extended;
+};
+
+struct gic_interrupt
+{
+ struct hlist_node node;
+
+ struct gic_idomain* domain;
+
+ unsigned int intid;
+ struct gic_intcfg config;
+
+ isr_cb handler;
+ void* payload;
+};
+
+struct gic_distributor
+{
+ BITMAP(gic_bmp) group;
+ BITMAP(gic_bmp) grpmod;
+ BITMAP(gic_bmp) enable;
+ BITMAP(gic_bmp) disable;
+ BITMAP(gic_bmp) icfg;
+ BITMAP(gic_bmp) nmi;
+};
+
+struct gic_rd
+{
+ gicreg_t base[FRAME_LEN];
+ gicreg_t sgi_base[FRAME_LEN];
+} compact align(4);
+
+struct gic_cpuif
+{
+ gicreg_t base[FRAME_LEN];
+} compact align(4);
+
+#define gic_reg64(base, index) (*(gicreg64_t*)(&base[index]))
+#define gic_regptr(base, index) (__ptr(&base[index]))
+
+struct gic_pe
+{
+ struct gic_interrupt* active;
+ reg_t iar_val;
+ unsigned int priority;
+
+ struct gic_rd* _rd;
+ struct gic_cpuif* _if;
+
+ struct gic_distributor rdist;
+
+ struct {
+ struct gic_idomain* local_ints;
+ struct gic_idomain* eppi;
+ } idomain;
+
+ struct {
+ unsigned int affinity;
+ unsigned int ppi_nr;
+ bool eppi_ready;
+ };
+};
+
+struct gic_its
+{
+ gicreg_t base[FRAME_LEN]; // control regs
+ gicreg_t trn_space[FRAME_LEN]; // translation space
+} compact align(4);
+
+struct gic_its_v41
+{
+ gicreg_t base[FRAME_LEN]; // control regs
+ gicreg_t trn_space[FRAME_LEN]; // translation space
+ gicreg_t vsgi_space[FRAME_LEN]; // vSGI space (v4.1+)
+} compact align(4);
+
+typedef unsigned char lpi_entry_t;
+
+struct arm_gic
+{
+ unsigned int max_intid;
+ struct gic_pe pes[NR_CPU];
+
+ struct {
+ unsigned int lpi_nr;
+ unsigned int spi_nr;
+ unsigned int espi_nr;
+ bool lpi_ready;
+ bool nmi_ready;
+ bool has_espi;
+ bool msi_via_spi;
+ };
+
+ struct {
+ gicreg_t* dist_base;
+ union {
+ struct gic_its* its;
+ struct gic_its_v41* its_v41;
+ };
+ } mmrs;
+
+ struct {
+ union {
+ ptr_t prop;
+ lpi_entry_t* property;
+ };
+
+ ptr_t pend;
+ BITMAP(gic_bmp) pendings;
+ } lpi_tables;
+
+ struct gic_distributor dist;
+ struct gic_distributor dist_e;
+
+ struct {
+ struct gic_idomain* lpi;
+ struct gic_idomain* spi;
+ struct gic_idomain* espi;
+ } idomain;
+};
+
+#endif /* __LUNAIX_GIC_H */
--- /dev/null
+#ifndef __LUNAIX_ARCH_TLB_H
+#define __LUNAIX_ARCH_TLB_H
+
+#include <lunaix/types.h>
+
+#include <asm/aa64_mmu.h>
+#include <asm/aa64_sysinst.h>
+
+#define pack_va(asid, ttl, va) \
+ (((asid & 0xffffUL) << 48) | \
+ ((ttl & 0b1111UL) << 44) | \
+ (pfn(va) & ((1UL << 44) - 1)))
+
+#define pack_rva(asid, ttl, base, n, scale) \
+ (((asid & 0xffffUL) << 48) | \
+ ((_MMU_TG & 0b11UL) << 46) | \
+ ((n & 0x1fUL) << 39) | \
+ ((scale & 0b11UL) << 37) | \
+ ((ttl & 0b1111UL) << 44) | \
+ (pfn(base)& ((1UL << 37) - 1)))
+
+/**
+ * @brief Invalidate an entry of all address space
+ *
+ * @param va
+ */
+static inline void must_inline
+__tlb_invalidate(ptr_t va)
+{
+ sys_a1(tlbi_vaae1, pack_va(0, 0, va));
+}
+
+/**
+ * @brief Invalidate an entry of an address space indetified
+ * by ASID
+ *
+ * @param va
+ */
+static inline void must_inline
+__tlb_flush_asid(unsigned int asid, ptr_t va)
+{
+ sys_a1(tlbi_vae1, pack_va(asid, 0, va));
+}
+
+/**
+ * @brief Invalidate an entry of global address space
+ *
+ * @param va
+ */
+static inline void must_inline
+__tlb_flush_global(ptr_t va)
+{
+ __tlb_flush_asid(0, va);
+}
+
+/**
+ * @brief Invalidate an entire TLB
+ *
+ * @param va
+ */
+static inline void must_inline
+__tlb_flush_all()
+{
+ sys_a0(tlbi_alle1);
+}
+
+/**
+ * @brief Invalidate an entire address space
+ *
+ * @param va
+ */
+static inline void must_inline
+__tlb_flush_asid_all(unsigned int asid)
+{
+ sys_a1(tlbi_aside1, pack_va(asid, 0, 0));
+}
+
+
+/**
+ * @brief Invalidate entries of all address spaces
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+static inline void
+tlb_flush_range(ptr_t addr, unsigned int npages)
+{
+#ifdef _MMU_USE_OA52
+ for (unsigned int i = 0; i < npages; i++)
+ {
+ __tlb_invalidate(addr + i * PAGE_SIZE);
+ }
+#else
+ sys_a1(tlbi_rvaae1, pack_rva(0, 0, addr, npages, 0));
+#endif
+}
+
+/**
+ * @brief Invalidate entries of an address space identified
+ * by ASID
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+static inline void
+tlb_flush_asid_range(unsigned int asid, ptr_t addr, unsigned int npages)
+{
+#ifdef _MMU_USE_OA52
+ for (unsigned int i = 0; i < npages; i++)
+ {
+ __tlb_flush_asid(asid, addr + i * PAGE_SIZE);
+ }
+#else
+ sys_a1(tlbi_rvae1, pack_rva(asid, 0, addr, npages, 0));
+#endif
+}
+
+#include <asm-generic/tlb-shared.h>
+
+#endif /* __LUNAIX_VMTLB_H */
--- /dev/null
+#ifndef __LUNAIX_BASE_DEFS_LD_INC
+#define __LUNAIX_BASE_DEFS_LD_INC
+
+#define __LD__
+#include <asm/mempart.h>
+
+#define KEXEC_BASE KERNEL_IMG
+#define PAGE_GRAN 4K
+
+#define ENTRY_POINT start_
+
+#define LOAD_OFF 0x100000
+
+
+#endif /* __LUNAIX_BASE_DEFS_LD_INC */
--- /dev/null
+#ifndef __LUNAIX_BOOT_SECS_LD_INC
+#define __LUNAIX_BOOT_SECS_LD_INC
+
+#include "base_defs.ld.inc"
+
+.boot.text BLOCK(PAGE_GRAN) :
+{
+ *(.boot.text)
+} : boot_text
+
+.boot.data BLOCK(PAGE_GRAN) :
+{
+ *(.boot.data)
+ *(.boot.bss)
+} : boot_data
+
+#endif
\ No newline at end of file
--- /dev/null
+#ifndef __LUNAIX_ARCH_ELF_H
+#define __LUNAIX_ARCH_ELF_H
+
+#include <sys-generic/elf.h>
+
+#endif /* __LUNAIX_ARCH_ELF_H */
--- /dev/null
+#ifndef __LUNAIX_ARCH_FAILSAFE_H
+#define __LUNAIX_ARCH_FAILSAFE_H
+
+#define STACK_SANITY 0xbeefc0de
+
+#ifndef __ASM__
+
+#include <lunaix/types.h>
+
+static inline bool
+check_bootstack_sanity()
+{
+ extern unsigned int __kinit_stack_end[];
+
+ return ( __kinit_stack_end[0]
+ | __kinit_stack_end[1]
+ | __kinit_stack_end[2]
+ | __kinit_stack_end[3]) == STACK_SANITY;
+}
+
+static inline void must_inline noret
+failsafe_diagnostic() {
+ // TODO
+ unreachable;
+}
+
+#endif
+
+#endif /* __LUNAIX_FAILSAFE_H */
--- /dev/null
+#ifndef __LUNAIX_ARCH_SYSCALL_UTILS_H
+#define __LUNAIX_ARCH_SYSCALL_UTILS_H
+
+#include <klibc/string.h>
+
+static inline void
+convert_valist(va_list* ap_ref, sc_va_list syscall_ap)
+{
+ memcpy(ap_ref, syscall_ap, sizeof(*ap_ref));
+}
+
+#endif /* __LUNAIX_SYSCALL_UTILS_H */
--- /dev/null
+#include <lunaix/types.h>
+#include <lunaix/device.h>
+#include <lunaix/spike.h>
+#include <lunaix/mm/valloc.h>
+#include <lunaix/mm/page.h>
+#include <lunaix/mm/mmio.h>
+#include <lunaix/syslog.h>
+
+#include <hal/devtree.h>
+
+#include <asm/aa64_isrm.h>
+#include <asm/soc/gic.h>
+
+static struct arm_gic gic;
+
+LOG_MODULE("gic")
+
+DEFINE_BMP_INIT_OP(gic_bmp, valloc);
+
+DEFINE_BMP_QUERY_OP(gic_bmp);
+
+DEFINE_BMP_SET_OP(gic_bmp);
+
+DEFINE_BMP_ALLOCFROM_OP(gic_bmp);
+
+
+/* ++++++ GIC device-tree retrieval ++++++ */
+
+static void
+__setup_pe_rdist(struct dt_prop_iter* prop)
+{
+ ptr_t base;
+ size_t len, off;
+ int i;
+
+ base = dtprop_reg_nextaddr(prop);
+ len = dtprop_reg_nextlen(prop);
+
+ assert(len >= NR_CPU * FRAME_SIZE * 2);
+
+ i = 0;
+ base = ioremap(base, len);
+ off = base;
+
+ for (; i < NR_CPU; i++) {
+ gic.pes[i]._rd = (struct gic_rd*) (base + off);
+ off += sizeof(struct gic_rd);
+ }
+}
+
+static void
+__create_its(struct dt_node* gic_node)
+{
+ struct dt_node* its_node;
+ struct dt_node_iter iter;
+ struct dt_prop_iter prop;
+ ptr_t its_base;
+ size_t its_size;
+
+ dt_begin_find(&iter, gic_node, "its");
+
+ if (!dt_find_next(&iter, (struct dt_node_base**)&its_node)) {
+ return;
+ }
+
+ dt_decode_reg(&prop, its_node, reg);
+
+ its_base = dtprop_reg_nextaddr(&prop);
+ its_size = dtprop_reg_nextlen(&prop);
+
+ assert(its_size >= sizeof(struct gic_its));
+
+ gic.mmrs.its = (struct gic_its*)ioremap(its_base, its_size);
+}
+
+static void
+gic_create_from_dt()
+{
+ struct dt_node* gic_node;
+ struct dt_node_iter iter;
+ struct dt_prop_iter prop;
+ ptr_t ptr;
+ size_t sz;
+
+ dt_begin_find(&iter, NULL, "interrupt-controller");
+
+ if (!dt_find_next(&iter, (struct dt_node_base**)&gic_node)) {
+ fail("expected /interrupt-controller node, but found none");
+ }
+
+ dt_decode_reg(&prop, gic_node, reg);
+
+ ptr = dtprop_reg_nextaddr(&prop);
+ sz = dtprop_reg_nextlen(&prop);
+ gic.mmrs.dist_base = (gicreg_t*)ioremap(ptr, sz);
+
+ __setup_pe_rdist(&prop);
+
+ // ignore cpu_if, as we use sysreg to access them
+ dtprop_next_n(&prop, 2);
+
+ // ignore vcpu_if, as we dont do any EL2 stuff
+
+ __create_its(gic_node);
+}
+
+
+/* ++++++ GIC dirver ++++++ */
+
+static void
+__config_interrupt(struct arm_gic* gic, struct gic_distributor* dist,
+ struct gic_interrupt* ent)
+{
+ unsigned int intid_rel;
+ unsigned long trig_index;
+
+ intid_rel = ent->intid - ent->domain->base;
+
+ if (ent->config.class == GIC_LPI) {
+ lpi_entry_t entry = 0;
+
+ entry |= LPI_EN;
+
+ gic->lpi_tables.property[intid_rel] = entry;
+
+ // clear any pending when we (re-)configuring
+ bitmap_set(gic_bmp, &gic->lpi_tables.pendings, intid_rel, false);
+
+ return;
+ }
+
+
+ bitmap_set(gic_bmp, &dist->group, intid_rel, 0);
+ bitmap_set(gic_bmp, &dist->grpmod, intid_rel, 1);
+
+ trig_index = intid_rel * 2;
+ bitmap_set(gic_bmp, &dist->icfg, trig_index, 0);
+ if (ent->config.trigger == GIC_TRIG_EDGE) {
+ bitmap_set(gic_bmp, &dist->icfg, trig_index + 1, 1);
+ } else {
+ bitmap_set(gic_bmp, &dist->icfg, trig_index + 1, 0);
+ }
+
+ if (gic->nmi_ready) {
+ bitmap_set(gic_bmp, &dist->nmi, intid_rel, ent->config.as_nmi);
+ }
+
+ bitmap_set(gic_bmp, &dist->enable, intid_rel, true);
+}
+
+static void
+__undone_interrupt(struct arm_gic* gic, struct gic_distributor* dist,
+ struct gic_interrupt* ent)
+{
+ unsigned int intid_rel;
+
+ intid_rel = ent->intid - ent->domain->base;
+
+ if (ent->config.class == GIC_LPI) {
+ gic->lpi_tables.property[intid_rel] = 0;
+
+ // clear any pending when we (re-)configuring
+ bitmap_set(gic_bmp, &gic->lpi_tables.pendings, intid_rel, false);
+
+ return;
+ }
+
+ bitmap_set(gic_bmp, &dist->disable, intid_rel, true);
+
+ if (gic->nmi_ready) {
+ bitmap_set(gic_bmp, &dist->nmi, intid_rel, false);
+ }
+}
+
+static struct gic_idomain*
+__idomain(int nr_ints, unsigned int base, bool extended)
+{
+ struct gic_idomain* rec;
+
+ rec = valloc(sizeof(*rec));
+
+ bitmap_init(gic_bmp, &rec->ivmap, nr_ints);
+ hashtable_init(rec->recs);
+
+ rec->base = base;
+ rec->extended = extended;
+
+ return rec;
+}
+
+static inline void
+__init_distributor(struct gic_distributor* d,
+ gicreg_t* base, unsigned int nr_ints)
+{
+ bitmap_init_ptr(gic_bmp,
+ &d->group, nr_ints, gic_regptr(base, GICD_IGROUPRn));
+
+ bitmap_init_ptr(gic_bmp,
+ &d->grpmod, nr_ints, gic_regptr(base, GICD_IGRPMODRn));
+
+ bitmap_init_ptr(gic_bmp,
+ &d->enable, nr_ints, gic_regptr(base, GICD_ISENABLER));
+
+ bitmap_init_ptr(gic_bmp,
+ &d->disable, nr_ints, gic_regptr(base, GICD_ICENABLER));
+
+ bitmap_init_ptr(gic_bmp,
+ &d->icfg, nr_ints * 2, gic_regptr(base, GICD_ICFGR));
+
+ bitmap_init_ptr(gic_bmp,
+ &d->nmi, nr_ints, gic_regptr(base, GICD_INMIR));
+}
+
+static inline struct leaflet*
+__alloc_lpi_table(size_t table_sz)
+{
+ unsigned int val;
+ struct leaflet* tab;
+
+ val = page_aligned(table_sz);
+ tab = alloc_leaflet(count_order(leaf_count(val)));
+ leaflet_wipe(tab);
+
+ return leaflet_addr(tab);
+}
+
+static struct gic_idomain*
+__deduce_domain(unsigned int intid)
+{
+ if (intid <= INITID_SGI_END) {
+ return gic.pes[0].idomain.local_ints;
+ }
+
+ if (intid <= INITID_PPI_END) {
+ return gic.pes[0].idomain.local_ints;
+ }
+
+ if (intid <= INITID_SPI_END) {
+ return gic.idomain.spi;
+ }
+
+ if (INITID_ePPI_BASE <= intid && intid <= INITID_ePPI_END) {
+ return gic.pes[0].idomain.eppi;
+ }
+
+ if (INITID_eSPI_BASE <= intid && intid <= INITID_eSPI_END) {
+ return gic.idomain.espi;
+ }
+
+ if (intid >= INITID_LPI_BASE) {
+ return gic.idomain.lpi;
+ }
+
+ return NULL;
+}
+
+static struct gic_interrupt*
+__find_interrupt_record(unsigned int intid)
+{
+ struct gic_idomain* domain;
+
+ domain = __deduce_domain(intid);
+
+ if (!domain) {
+ return NULL;
+ }
+
+ struct gic_interrupt *pos, *n;
+
+ hashtable_hash_foreach(domain->recs, intid, pos, n, node)
+ {
+ if (pos->intid == intid) {
+ return pos;
+ }
+ }
+
+ return NULL;
+}
+
+static inline struct gic_interrupt*
+__register_interrupt(struct gic_idomain* domain,
+ unsigned int intid, struct gic_int_param* param)
+{
+ struct gic_interrupt* interrupt;
+
+ interrupt = valloc(sizeof(*interrupt));
+ interrupt->config = (struct gic_intcfg) {
+ .class = param->class,
+ .trigger = param->trigger,
+ .group = param->group,
+ .as_nmi = param->as_nmi
+ };
+
+ interrupt->intid = intid;
+ interrupt->domain = domain;
+
+ hashtable_hash_in(domain->recs, &interrupt->node, intid);
+
+ return interrupt;
+}
+
+static struct gic_distributor*
+__attached_distributor(int cpu, struct gic_interrupt* ent)
+{
+ enum gic_int_type iclass;
+
+ iclass = ent->config.class;
+
+ if (iclass == GIC_PPI || iclass == GIC_SGI) {
+ return &gic.pes[cpu].rdist;
+ }
+
+ if (ent->domain->extended) {
+ return &gic.dist_e;
+ }
+
+ return &gic.dist;
+}
+
+static void
+gic_configure_icc()
+{
+ reg_t v;
+
+ v =
+ sysreg_flagging(ICC_SRE_EL1,
+ ICC_SRE_SRE | ICC_SRE_DFB | ICC_SRE_DIB,
+ 0);
+
+
+ v =
+ sysreg_flagging(ICC_CTLR_EL1,
+ ICC_CTRL_CBPR,
+ ICC_CTRL_EOImode | ICC_CTRL_PMHE);
+
+ // disable all group 0 interrupts as those are meant for EL3
+ v=
+ sysreg_flagging(ICC_IGRPEN0_EL1, 0, ICC_IGRPEN_ENABLE);
+
+ // enable all group 1 interrupts, we'll stick with EL1_NS
+ v=
+ sysreg_flagging(ICC_IGRPEN1_EL1, ICC_IGRPEN_ENABLE, 0);
+}
+
+static void
+gic_configure_global(struct arm_gic* gic)
+{
+ gicreg64_t reg;
+ unsigned int val, max_nr_spi;
+
+ reg = gic->mmrs.dist_base[GICD_TYPER];
+
+ // check if eSPI supported
+ gic->has_espi = (reg & GICD_TYPER_ESPI);
+ if (gic->has_espi) {
+ val = BITS_GET(reg, GICD_TYPER_nESPI);
+ gic->espi_nr = 32 * (val + 1);
+ }
+
+ // Parse IDbits
+ val = BITS_GET(reg, GICD_TYPER_IDbits);
+ gic->max_intid = 1 << (val + 1) - 1;
+
+ // LPI is supported
+ if (val + 1 >= 14) {
+ val = BITS_GET(reg, GICD_TYPER_nLPI);
+ if (val) {
+ gic->lpi_nr = 1 << (val + 1);
+ }
+ else {
+ gic->lpi_nr = gic->max_intid - INITID_LPI_BASE + 1;
+ }
+ }
+
+ // check if SPI supported
+ val = BITS_GET(reg, GICD_TYPER_nSPI);
+ if (val) {
+ max_nr_spi = 32 * (val + 1);
+ gic->spi_nr = MIN(max_nr_spi, INITID_SPEC_BASE);
+ gic->spi_nr -= INITID_SPI_BASE;
+ } else {
+ gic->spi_nr = 0;
+ }
+
+ gic->nmi_ready = (reg & GICD_TYPER_NMI);
+ gic->msi_via_spi = (reg & GICD_TYPER_MBIS);
+
+ __init_distributor(&gic->dist, gic->mmrs.dist_base, gic->spi_nr);
+ __init_distributor(&gic->dist_e, gic->mmrs.dist_base, gic->espi_nr);
+
+
+ if (gic->spi_nr) {
+ gic->idomain.spi = __idomain(gic->spi_nr, INITID_SPI_BASE, false);
+ }
+ if (gic->espi_nr) {
+ gic->idomain.espi = __idomain(gic->espi_nr, INITID_eSPI_BASE, true);
+ }
+ if (gic->lpi_nr) {
+ gic->idomain.lpi = __idomain(gic->lpi_nr, INITID_LPI_BASE, false);
+ }
+
+ gic->lpi_tables.prop = __alloc_lpi_table(gic->lpi_nr);
+ gic->lpi_tables.pend = __alloc_lpi_table(gic->lpi_nr / 8);
+
+ bitmap_init_ptr(gic_bmp,
+ &gic->lpi_tables.pendings, gic->lpi_nr, gic->lpi_tables.pend);
+}
+
+static void
+gic_configure_pe(struct arm_gic* gic, struct gic_pe* pe)
+{
+ unsigned int nr_local_ints;
+ gicreg64_t reg;
+
+ reg = gic_reg64(pe->_rd, GICR_TYPER);
+
+ pe->affinity = BITS_GET(reg, GICR_TYPER_AffVal);
+ pe->ppi_nr = INITID_PPI_BASE;
+ switch (BITS_GET(reg, GICR_TYPER_PPInum))
+ {
+ case 1:
+ pe->ppi_nr += 1088 - INITID_ePPI_BASE;
+ pe->eppi_ready = true;
+ break;
+ case 2:
+ pe->ppi_nr += 1120 - INITID_ePPI_BASE;
+ pe->eppi_ready = true;
+ break;
+ }
+
+ nr_local_ints = pe->ppi_nr + INITID_PPI_BASE;
+
+ pe->idomain.local_ints = __idomain(32, 0, false);
+ pe->idomain.eppi = __idomain(nr_local_ints - 32, INITID_ePPI_BASE, true);
+
+ __init_distributor(&pe->rdist, pe->_rd->sgi_base, nr_local_ints);
+
+ reg = 0;
+ BITS_SET(reg, GICR_BASER_PAddr, gic->lpi_tables.prop);
+ BITS_SET(reg, GICR_BASER_Share, 0b01);
+ BITS_SET(reg, GICR_PROPBASER_IDbits, ilog2(gic->max_intid));
+ pe->_rd->sgi_base[GICR_PROPBASER] = reg;
+
+ reg = 0;
+ reg |= GICR_PENDBASER_PTZ;
+ BITS_SET(reg, GICR_BASER_PAddr, gic->lpi_tables.pend);
+ BITS_SET(reg, GICR_BASER_Share, 0b01);
+ pe->_rd->sgi_base[GICR_PENDBASER] = reg;
+}
+
+struct gic_interrupt*
+aa64_isrm_ivalloc(struct gic_int_param* param, isr_cb handler)
+{
+ unsigned int iv;
+ struct gic_idomain* domain;
+ int cpu;
+
+ cpu = param->cpu_id;
+
+ assert(cpu == 0);
+
+ switch (param->class)
+ {
+ case GIC_PPI:
+ if (!param->ext_range) {
+ domain = gic.pes[cpu].idomain.local_ints;
+ }
+ else {
+ domain = gic.pes[cpu].idomain.eppi;
+ }
+ break;
+
+ case GIC_SGI:
+ domain = gic.pes[cpu].idomain.local_ints;
+ break;
+
+ case GIC_SPI:
+ if (!param->ext_range) {
+ assert(gic.spi_nr > 0);
+ domain = gic.idomain.spi;
+ }
+ else {
+ assert(gic.has_espi);
+ domain = gic.idomain.espi;
+ }
+ break;
+
+ case GIC_LPI:
+ assert(gic.lpi_ready);
+ domain = gic.idomain.lpi;
+ break;
+
+ default:
+ fail("unknown interrupt class");
+ break;
+ }
+
+ if (!bitmap_alloc(gic_bmp, &domain->ivmap, 0, &iv)) {
+ FATAL("out of usable iv for class=%d", param->class);
+ }
+
+ iv += domain->base;
+
+ if (param->class == GIC_SPI && !param->ext_range && iv >= INITID_ePPI_BASE)
+ {
+ WARN("PPI vector=%d falls in extended range, while not requested.", iv);
+ param->ext_range = true;
+ }
+
+ struct gic_interrupt* ent;
+ struct gic_distributor* dist;
+
+ ent = __register_interrupt(domain, iv, param);
+ dist = __attached_distributor(cpu, ent);
+
+ __config_interrupt(&gic, dist, ent);
+
+ ent->handler = handler;
+
+ return iv;
+}
+
+static void
+gic_update_active()
+{
+ reg_t val;
+ unsigned int intid;
+ struct gic_interrupt* intr;
+ struct gic_pe* pe;
+
+ pe = &gic.pes[0];
+ val = read_sysreg(ICC_IAR1_EL1);
+ intid = (unsigned int)val & ((1 << 24) - 1);
+
+ if (check_special_intid(intid)) {
+ return;
+ }
+
+ intr = __find_interrupt_record(intid);
+ pe->active = intr;
+ pe->iar_val = val;
+}
+
+static void
+gic_signal_eoi()
+{
+ struct gic_pe* pe;
+
+ pe = &gic.pes[0];
+ if (!pe->active) {
+ return;
+ }
+
+ pe->active = NULL;
+ set_sysreg(ICC_EOIR1_EL1, pe->iar_val);
+}
+
+void
+isrm_init()
+{
+ // nothing to do
+}
+
+void
+isrm_ivfree(int iv)
+{
+ struct gic_interrupt* ent;
+ struct gic_distributor* dist;
+
+ ent = __find_interrupt_record(iv);
+ if (!ent) {
+ return;
+ }
+
+ dist = __attached_distributor(0, ent);
+ __undone_interrupt(&gic, dist, ent);
+
+ hlist_delete(&ent->node);
+ vfree(ent);
+}
+
+int
+isrm_ivosalloc(isr_cb handler)
+{
+ return isrm_ivexalloc(handler);
+}
+
+int
+isrm_ivexalloc(isr_cb handler)
+{
+ struct gic_int_param param;
+ struct gic_interrupt* intr;
+
+ param = (struct gic_int_param) {
+ .class = GIC_SPI,
+ .group = GIC_G1NS,
+ .trigger = GIC_TRIG_EDGE,
+ };
+
+ intr = aa64_isrm_ivalloc(¶m, handler);
+
+ return intr->intid;
+}
+
+int
+isrm_bindirq(int irq, isr_cb irq_handler)
+{
+ // Not supported
+}
+
+void
+isrm_bindiv(int iv, isr_cb handler)
+{
+ // Not supported
+}
+
+isr_cb
+isrm_get(int iv)
+{
+ struct gic_interrupt* intr;
+
+ intr = __find_interrupt_record(iv);
+ if (!intr) {
+ return NULL;
+ }
+
+ return intr->handler;
+}
+
+ptr_t
+isrm_get_payload(const struct hart_state* state)
+{
+ struct gic_interrupt* active;
+
+ active = gic.pes[0].active;
+ assert(active);
+
+ return active->handler;
+}
+
+void
+isrm_set_payload(int iv, ptr_t payload)
+{
+ struct gic_interrupt* intr;
+
+ intr = __find_interrupt_record(iv);
+ if (!intr) {
+ return NULL;
+ }
+
+ intr->payload = payload;
+}
+
+void
+isrm_irq_attach(int irq, int iv, cpu_t dest, u32_t flags)
+{
+ // Not supported
+}
+
+void
+isrm_notify_eoi(cpu_t id, int iv)
+{
+ struct gic_interrupt* active;
+
+ active = gic.pes[0].active;
+ assert(active);
+}
+
+void
+isrm_notify_eos(cpu_t id)
+{
+ isrm_notify_eoi(id, 0);
+}
+
+
+static void
+gic_init()
+{
+ memset(&gic, 0, sizeof(gic));
+
+ gic_create_from_dt();
+
+ // configure the system interfaces
+ gic_configure_icc();
+
+ // configure global distributor
+ gic_configure_global(&gic);
+
+ // configure per-PE local distributor (redistributor)
+ for (int i = 0; i < NR_CPU; i++)
+ {
+ gic_configure_pe(&gic, &gic.pes[i]);
+ }
+}
+
+static struct device_def dev_arm_gic = {
+ .name = "ARM Generic Interrupt Controller",
+ .class = DEVCLASS(DEVIF_SOC, DEVFN_CFG, DEV_INTC),
+ .init = gic_init
+};
+EXPORT_DEVICE(arm_gic, &dev_arm_gic, load_sysconf);
\ No newline at end of file
--- /dev/null
+#include <lunaix/trace.h>
+#include <asm/aa64_exception.h>
+#include <sys-generic/trace_arch.h>
+
+static inline char*
+__type_name(reg_t syndrome)
+{
+ switch (BITS_GET(syndrome, SYNDROME_ETYPE))
+ {
+ case EXCEPTION_SYNC:
+ return "sync";
+ case EXCEPTION_IRQ:
+ return "async (irq)";
+ case EXCEPTION_FIQ:
+ return "async (fiq)";
+ case EXCEPTION_SERR:
+ return "async (serr)";
+ }
+
+ return "unknwon";
+}
+
+void
+trace_print_transistion_short(struct hart_state* hstate)
+{
+ struct exec_param* execp;
+ reg_t syndrome;
+
+ execp = &hstate->execp;
+ syndrome = execp->syndrome;
+
+ trace_log("%s from EL%d: ec=%04x, iss=%08lx, il=%d",
+ __type_name(syndrome), !spsr_from_el0(execp->syndrome),
+ esr_ec(syndrome), esr_iss(syndrome), esr_inst32(syndrome));
+}
+
+void
+trace_print_transition_full(struct hart_state* hstate)
+{
+ struct exec_param* execp;
+ reg_t syndrome;
+
+ execp = &hstate->execp;
+ syndrome = execp->syndrome;
+
+ trace_log("exception %s from EL%d",
+ __type_name(syndrome), !spsr_from_el0(execp->syndrome));
+ trace_log(" ec=0x%08lx, iss=0x%08lx, il=%d",
+ esr_ec(syndrome), esr_iss(syndrome), esr_inst32(syndrome));
+ trace_log(" esr=0x%016lx, spsr=0x%016lx",
+ syndrome, execp->spsr);
+ trace_log(" sp_el0=0x%016lx, sp_el1=0x%016lx",
+ execp->sp_el0, hart_sp(hstate));
+ trace_log(" pc=0x%016lx", execp->link);
+}
+
+void
+trace_dump_state(struct hart_state* hstate)
+{
+ struct regcontext* r;
+
+ r = &hstate->registers;
+
+ trace_log("hart state dump (depth=%d)", hstate->depth);
+
+ for (int i = 0; i < 30; i+=3)
+ {
+ trace_log(" x%02d=0x%016lx x%02d=0x%016lx x%02d=0x%016lx",
+ i, r->x[i],
+ i + 1, r->x[i + 1],
+ i + 2, r->x[i + 2]);
+ }
+
+ trace_log(" x30=0x%016lx x31=0x%016lx (sp)",
+ r->x[30], hart_sp(hstate));
+}
\ No newline at end of file
--- /dev/null
+#ifndef __LUNAIX_ARCH_GENERIC_BITS_H
+#define __LUNAIX_ARCH_GENERIC_BITS_H
+
+#define _BITS_GENMASK(h, l) \
+ (((1UL << ((h) + 1)) - 1) ^ ((1UL << (l)) - 1))
+
+#define _BITS_EXTRACT(from, h, l) \
+ (((from) & (((1UL << (h + 1)) - 1) ^ ((1UL << l) - 1))) >> l)
+
+#define _BITS_INSERT(to, from, h, l) \
+ (((to) & ~_BITS_GENMASK(h, l)) | (((from) << l) & _BITS_GENMASK(h, l)))
+
+#endif /* __LUNAIX_ARCH_BITS_H */
--- /dev/null
+#ifndef __LUNAIX_TLB_SHARED_H
+#define __LUNAIX_TLB_SHARED_H
+
+#include <lunaix/types.h>
+#include <lunaix/mm/procvm.h>
+
+/**
+ * @brief Invalidate an entry of kernel address spaces
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+static inline void
+tlb_flush_kernel(ptr_t addr)
+{
+ __tlb_flush_global(addr);
+}
+
+/**
+ * @brief Invalidate entries of kernel address spaces
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+static inline void
+tlb_flush_kernel_ranged(ptr_t addr, unsigned int npages)
+{
+ for (unsigned int i = 0; i < npages; i++)
+ {
+ tlb_flush_kernel(addr + i * PAGE_SIZE);
+ }
+}
+
+/**
+ * @brief Invalidate an entry within a process memory space
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+void
+tlb_flush_mm(struct proc_mm* mm, ptr_t addr);
+
+/**
+ * @brief Invalidate entries within a process memory space
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+void
+tlb_flush_mm_range(struct proc_mm* mm, ptr_t addr, unsigned int npages);
+
+/**
+ * @brief Invalidate an entry within a vm region
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+void
+tlb_flush_vmr(struct mm_region* vmr, ptr_t va);
+
+/**
+ * @brief Invalidate all entries within a vm region
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+void
+tlb_flush_vmr_all(struct mm_region* vmr);
+
+/**
+ * @brief Invalidate entries within a vm region
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+void
+tlb_flush_vmr_range(struct mm_region* vmr, ptr_t addr, unsigned int npages);
+
+#endif /* __LUNAIX_TLB_SHARED_H */
#include <lunaix/hart_state.h>
+static inline bool
+arch_valid_fp(ptr_t ptr) {
+ extern int __bsskstack_end[];
+ extern int __bsskstack_start[];
+ return ((ptr_t)__bsskstack_start <= ptr && ptr <= (ptr_t)__bsskstack_end);
+}
+
void
trace_print_transistion_short(struct hart_state* hstate);
#include <asm/tlb.h>
#include <lunaix/process.h>
-void
+_default void
tlb_flush_mm(struct proc_mm* mm, ptr_t addr)
{
__tlb_flush_asid(procvm_asid(mm), addr);
}
-void
+_default void
tlb_flush_mm_range(struct proc_mm* mm, ptr_t addr, unsigned int npages)
{
tlb_flush_asid_range(procvm_asid(mm), addr, npages);
}
-void
+_default void
tlb_flush_vmr(struct mm_region* vmr, ptr_t va)
{
__tlb_flush_asid(procvm_asid(vmr->proc_vms), va);
}
-void
+_default void
tlb_flush_vmr_all(struct mm_region* vmr)
{
tlb_flush_asid_range(procvm_asid(vmr->proc_vms),
vmr->start, leaf_count(vmr->end - vmr->start));
}
-void
+_default void
tlb_flush_vmr_range(struct mm_region* vmr, ptr_t addr, unsigned int npages)
{
tlb_flush_asid_range(procvm_asid(vmr->proc_vms), addr, npages);
#define KERNEL_RDONLY ( KERNEL_PAGE | _PTE_NX )
#define KERNEL_ROEXEC ( KERNEL_PAGE | _PTE_X )
#define KERNEL_PGTAB ( KERNEL_PAGE | _PTE_W )
-#define KERNEL_DEFAULT KERNEL_PGTAB
#define USER_PAGE ( _PTE_P | _PTE_U )
#define USER_EXEC ( USER_PAGE | _PTE_X )
#define __LUNAIX_ARCH_TLB_H
#include <lunaix/compiler.h>
-#include <lunaix/mm/procvm.h>
-#include <lunaix/mm/physical.h>
+#include <asm/mm_defs.h>
/**
* @brief Invalidate an entry of all address space
}
}
-/**
- * @brief Invalidate an entry within a process memory space
- *
- * @param asid
- * @param addr
- * @param npages
- */
-void
-tlb_flush_mm(struct proc_mm* mm, ptr_t addr);
-
-/**
- * @brief Invalidate entries within a process memory space
- *
- * @param asid
- * @param addr
- * @param npages
- */
-void
-tlb_flush_mm_range(struct proc_mm* mm, ptr_t addr, unsigned int npages);
-
-/**
- * @brief Invalidate an entry within a vm region
- *
- * @param asid
- * @param addr
- * @param npages
- */
-void
-tlb_flush_vmr(struct mm_region* vmr, ptr_t va);
-
-/**
- * @brief Invalidate all entries within a vm region
- *
- * @param asid
- * @param addr
- * @param npages
- */
-void
-tlb_flush_vmr_all(struct mm_region* vmr);
-
-/**
- * @brief Invalidate entries within a vm region
- *
- * @param asid
- * @param addr
- * @param npages
- */
-void
-tlb_flush_vmr_range(struct mm_region* vmr, ptr_t addr, unsigned int npages);
+#include <asm-generic/tlb-shared.h>
#endif /* __LUNAIX_VMTLB_H */
"""
type(bool)
- default(not v(arch).startswith("x86"))
+ default(False)
+
+ if v(arch) == "aarch64":
+ set_value(True)
@ReadOnly
@Term("Maximum size of device tree blob (in KiB)")
#include <hal/ahci/scsi.h>
#include <hal/pci.h>
-#include <asm/x86_pmio.h>
-
#include <klibc/string.h>
#include <lunaix/block.h>
#include <asm-generic/isrm.h>
}
// 如果port未响应,则继续执行重置
port_reg[HBA_RPxSCTL] = (port_reg[HBA_RPxSCTL] & ~0xf) | 1;
- port_delay(100000); // 等待至少一毫秒,差不多就行了
+
port_reg[HBA_RPxSCTL] &= ~0xf;
}
sources([
"16x50_base.c",
- "16x50_pmio.c",
"16x50_mmio.c",
])
if config("xt_16x50"):
- sources("16x50_isa.c")
+ sources([
+ "16x50_pmio.c",
+ "16x50_isa.c"
+ ])
if config("pci_16x50"):
- sources("16x50_pci.c")
\ No newline at end of file
+ sources([
+ "16x50_pmio.c",
+ "16x50_pci.c"
+ ])
\ No newline at end of file
-use("vga")
+# use("vga")
sources([
"gfxm.c"
}
#define dt_decode_reg(dtpi, node, field) \
- dt_decode(dtpi, &(node)->base, (node)->(field), \
+ dt_decode(dtpi, &(node)->base, &(node)->field, \
(node)->base.sz_c + (node)->base.addr_c);
#define dt_decode_range(dtpi, node, field) \
- dt_decode(dtpi, &(node)->base, (node)->field, \
+ dt_decode(dtpi, &(node)->base, &(node)->field, \
(node)->base.sz_c * 2 + (node)->base.addr_c);
static inline void
return dtprop_extract(dtpi, 0);
}
+static inline ptr_t
+dtprop_reg_nextaddr(struct dt_prop_iter* dtpi)
+{
+ ptr_t t;
+
+ t = (ptr_t)dtprop_to_u64(dtprop_reg_addr(dtpi));
+ dtprop_next(dtpi);
+
+ return t;
+}
+
static inline dt_enc_t
dtprop_reg_len(struct dt_prop_iter* dtpi)
{
return dtprop_extract(dtpi, dtpi->node->addr_c);
}
+static inline size_t
+dtprop_reg_nextlen(struct dt_prop_iter* dtpi)
+{
+ size_t t;
+
+ t = (size_t)dtprop_to_u64(dtprop_reg_len(dtpi));
+ dtprop_next(dtpi);
+
+ return t;
+}
+
static inline dt_enc_t
dtprop_range_childbus(struct dt_prop_iter* dtpi)
{
--- /dev/null
+#ifndef __LUNAIX_BITS_H
+#define __LUNAIX_BITS_H
+
+#include <lunaix/compiler.h>
+#include <asm/bits.h>
+
+#define BITFIELD(h, l) (h), (l)
+
+#define BIT(p) BITFIELD(p, p)
+#define BITFLAG(p) (1UL << (p))
+
+#define BITS_GENMASK(bitfield) _BITS_GENMASK(bitfield)
+
+#define BITS_GET(from, bitfield) _BITS_EXTRACT(from, bitfield)
+
+#define BITS_SET(to, bitfield, val) _BITS_INSERT(to, val, bitfield)
+
+#endif /* __LUNAIX_BITS_H */
#define must_emit __attribute__((used))
#define unreachable __builtin_unreachable()
#define no_inline __attribute__((noinline))
+#define asmlinkage
#define _default weak
#define msbiti (sizeof(int) * 8 - 1)
#define clz(bits) __builtin_clz(bits)
+#define ctz(bits) __builtin_ctz(bits)
#ifdef CONFIG_ARCH_BITS_64
#define msbitl (sizeof(long) * 8 - 1)
#define clzl(bits) __builtin_clzl(bits)
+#define ctzl(bits) __builtin_ctzl(bits)
#else
#define msbitl msbiti
#define clzl(bits) clz(bits)
+#define ctzl(bits) ctz(bits)
#endif
#define sadd_of(a, b, of) __builtin_sadd_overflow(a, b, of)
#define umul_of(a, b, of) __builtin_umul_overflow(a, b, of)
#define umull_of(a, b, of) __builtin_umull_overflow(a, b, of)
#define offsetof(f, m) __builtin_offsetof(f, m)
+#define select(cond, y, n) __builtin_choose_expr(cond, y, n)
+#define is_const(v) __builtin_constant_p(v)
#define prefetch_rd(ptr, ll) __builtin_prefetch((ptr), 0, ll)
#define prefetch_wr(ptr, ll) __builtin_prefetch((ptr), 1, ll)
unreachable;
}
+#ifdef CONFIG_ARCH_X86_32
+#undef asmlinkage
+#define asmlinkage __attribute__((regparm(0)))
+#endif
+
#endif /* __LUNAIX_COMPILER_H */
#define DEV_GFXA 12
#define DEV_VGA 13
#define DEV_ACPI 14
+#define DEV_INTC 15
struct devident
{
--- /dev/null
+#ifndef __LUNAIX_BITMAP_H
+#define __LUNAIX_BITMAP_H
+
+#include <lunaix/types.h>
+#include <lunaix/spike.h>
+#include <klibc/string.h>
+
+// first bit of a bitmap chunk placed at the most significant bit
+#define BMP_ORIENT_MSB 0
+// first bit of a bitmap chunk placed at the least significant bit
+#define BMP_ORIENT_LSB 1
+
+#define BMP_NAME(name) bitmap_##name
+#define BMP_PARAM_NAME bmp
+#define BMP_PARAM(name) struct BMP_NAME(name) *BMP_PARAM_NAME
+#define BMP_RAWBYTES(bits) (((bits) + 7) / 8)
+#define BMP_SIZE(t, bits) \
+ (BMP_RAWBYTES(bits) + (BMP_RAWBYTES(bits) % sizeof(t)))
+
+#define BMP_LEN(t, bits) (BMP_SIZE(t, bits) / sizeof(t))
+
+#define _BITMAP_STRUCT(type, size, name, orient) struct BMP_NAME(name)
+
+
+#define _DECLARE_BMP(type, size, name, orient) \
+struct BMP_NAME(name) { \
+ type *_map; \
+ unsigned long nr_bits; \
+}
+
+#define _DECLARE_BMP_STATIC(type, nr_bits, name, orient) \
+struct BMP_NAME(name) { \
+ type _map[BMP_LEN(type, nr_bits)]; \
+}
+
+#define BMP_STRCUT_MAP BMP_PARAM_NAME->_map
+#define BMP_STRCUT_NRB BMP_PARAM_NAME->nr_bits
+
+#define _BMP_OP_CALL(type, size, name, orient, suffix, ...) \
+bitmap_##name##_##suffix##(__VA_ARGS__)
+
+#define _DEFINE_BMP_INIT_OP(type, size, name, orient, allocfn) \
+static inline void \
+bitmap_##name##_init_with(BMP_PARAM(name), unsigned long nr_bits, ptr_t map) \
+{ \
+(void)(is_const(size) ? ({ \
+ BMP_STRCUT_MAP = map; \
+ BMP_STRCUT_NRB = nr_bits; \
+ memset(BMP_STRCUT_MAP, 0, BMP_SIZE(type, nr_bits) * sizeof(type));0; \
+}) : ({ \
+ memset(BMP_STRCUT_MAP, 0, sizeof(BMP_STRCUT_MAP));0; \
+})); \
+} \
+static inline void \
+bitmap_##name##_init(BMP_PARAM(name), unsigned long nr_bits) \
+{ \
+(void)(is_const(size) ? ({ \
+ bitmap_##name##_init_with(BMP_PARAM_NAME, nr_bits, \
+ allocfn(BMP_SIZE(type, nr_bits)));0; \
+}) : ({ \
+ bitmap_##name##_init_with(BMP_PARAM_NAME, nr_bits, NULL);0; \
+})); \
+}
+
+
+#define _DEFINE_BMP_QUERY_OP(type, size, name, orient) \
+static inline bool \
+bitmap_##name##_query(BMP_PARAM(name), unsigned long pos) \
+{ \
+ assert(pos < size); \
+ unsigned long n = pos / (sizeof(type) * 8); \
+ int i = pos % (sizeof(type) * 8); \
+ type at = BMP_STRCUT_MAP[n]; \
+ type msk = 1 << select(orient == BMP_ORIENT_MSB, \
+ sizeof(type) * 8 - 1 - i, i ); \
+ return !!(at & msk); \
+}
+
+
+#define _DEFINE_BMP_SET_OP(type, size, name, orient) \
+static inline void \
+bitmap_##name##_set(BMP_PARAM(name), unsigned long pos, bool val) \
+{ \
+ assert(pos < size); \
+ unsigned long n = pos / (sizeof(type) * 8); \
+ int i = pos % (sizeof(type) * 8); \
+ type at = BMP_STRCUT_MAP[n]; \
+ unsigned int off = select(orient == BMP_ORIENT_MSB, \
+ sizeof(type) * 8 - 1 - i, i ); \
+ BMP_STRCUT_MAP[n] = (at & ~(1 << off)) | (!!val << off); \
+}
+
+
+#define _DEFINE_BMP_ALLOCFROM_OP(type, size, name, orient) \
+static inline bool \
+bitmap_##name##_alloc_from(BMP_PARAM(name), unsigned long start, \
+ unsigned long* _out) \
+{ \
+ unsigned long i, p = 0; \
+ int shift; \
+ type u; \
+ i = start / 8 / sizeof(type); \
+ shift = select(orient == BMP_ORIENT_MSB, sizeof(type) * 8 - 1, 0); \
+ while ((u = BMP_STRCUT_MAP[i]) == (type)-1) i++; \
+ while ((u & (type)(1U << shift)) && p++ < sizeof(type) * 8) \
+ select(orient == BMP_ORIENT_MSB, u <<= 1, u >>= 1); \
+ if (p < sizeof(type) * 8) \
+ return false; \
+ BMP_STRCUT_MAP[i] |= 1UL << shift; \
+ *_out = (i + p); \
+ return true; \
+}
+
+
+#define PREP_STATIC_BITMAP(type, name, nr_bits, orient) \
+ type, (nr_bits), name, orient
+#define PREP_BITMAP(type, name, orient) \
+ type, (BMP_STRCUT_NRB), name, orient
+
+
+#define DECLARE_BITMAP(bmpdef) _DECLARE_BMP(bmpdef)
+#define DECLARE_STATIC_BITMAP(bmpdef) _DECLARE_BMP_STATIC(bmpdef)
+#define BITMAP(bmpdef) _BITMAP_STRUCT(bmpdef)
+
+#define DEFINE_BMP_INIT_OP(bmpdef, allocfn) _DEFINE_BMP_INIT_OP(bmpdef, allocfn)
+
+#define DEFINE_BMP_QUERY_OP(bmpdef) _DEFINE_BMP_QUERY_OP(bmpdef)
+#define DEFINE_BMP_SET_OP(bmpdef) _DEFINE_BMP_SET_OP(bmpdef)
+#define DEFINE_BMP_ALLOCFROM_OP(bmpdef) _DEFINE_BMP_ALLOCFROM_OP(bmpdef)
+
+#define bitmap_query(bitmap, bmp, pos) \
+ _BMP_OP_CALL(bitmap, query, bmp, pos)
+
+#define bitmap_set(bitmap, bmp, pos, val) \
+ _BMP_OP_CALL(bitmap, set, bmp, pos, val)
+
+#define bitmap_alloc(bitmap, bmp, start, out) \
+ _BMP_OP_CALL(bitmap, alloc_from, bmp, start, out)
+
+#define bitmap_init(bitmap, bmp, nr_bits) \
+ _BMP_OP_CALL(bitmap, init, bmp, nr_bits)
+
+#define bitmap_init_ptr(bitmap, bmp, nr_bits, ptr) \
+ _BMP_OP_CALL(bitmap, init_with, bmp, nr_bits, ptr)
+
+
+#endif /* __LUNAIX_BITMAP_H */
struct {
struct hart_state state;
struct exec_param eret;
- } compact transfer;
+ } transfer;
};
bool
#define reclaimable_start __section_mark(bssreclaim, start)
#define reclaimable_end __section_mark(bssreclaim, end)
+#define bootsec_start __section_mark(kboot, start)
+#define bootsec_end __section_mark(kboot, end)
+
#define kernel_start __section_mark(kexec, start)
#define kernel_load_end __section_mark(kexec, end)
#define kernel_end __section_mark(kimg, end)
#ifndef __ASM__
-#define SYSCALL_ESTATUS(errno) -((errno) != 0)
+#include <lunaix/compiler.h>
-#define asmlinkage __attribute__((regparm(0)))
+#define SYSCALL_ESTATUS(errno) -((errno) != 0)
#define __PARAM_MAP1(t1, p1) t1 p1
#define __PARAM_MAP2(t1, p1, ...) t1 p1, __PARAM_MAP1(__VA_ARGS__)
#include <asm/abi.h>
#include <asm/mm_defs.h>
-#include <sys/trace.h>
#include <klibc/string.h>
#include <lunaix/trace.h>
#include <lunaix/failsafe.h>
+#include <asm/cpu.h>
+
LOG_MODULE("spike")
void noret