config("arch"): {
"i386": "x86",
"x86_64": "x86",
- "aarch64": "arm",
+ "aarch64": "aarch64",
"rv64": "riscv"
}
})
\ No newline at end of file
include("x86/LConfig")
+include("aarch64/LConfig")
@Collection("Platform")
def architecture_support():
Config ISA support
"""
# type(["i386", "x86_64", "aarch64", "rv64"])
- type(["i386", "x86_64"])
+ type(["i386", "x86_64", "aarch64"])
default("x86_64")
env_val = env("ARCH")
--- /dev/null
+
+compile_opts([
+ "-mlittle-endian",
+ "-mgeneral-regs-only",
+ "-mabi=lp64",
+ "-mno-omit-leaf-frame-pointer"
+])
\ No newline at end of file
--- /dev/null
+
+@Group()
+def aarch64():
+ add_to_collection(architecture_support)
+
+ @Collection("MMU")
+ def mmu_feature():
+
+ @Term("Page Granularity")
+ def aa64_page_gran():
+ type(["4K", "16K", "64K"])
+
+ default("4K")
+
+ @Term("OA Size")
+ def aa64_oa_size():
+ type([52, 48])
+
+ default(48)
+
+ return v(arch) in ["aarch64"]
\ No newline at end of file
--- /dev/null
+#include <lunaix/boot_generic.h>
+#include <asm/aa64_msrs.h>
+
+#include "init.h"
+
+static inline void
+setup_pstate()
+{
+ /*
+ SCTRL_EL1
+ EE=0, E0E=0 // all little endian
+ WXN=1 // write implie exec never
+ SA0=1, SA=1 // alignment check on SP
+ A=1 // alignment check on memref
+ NMI=1 // mask interrupt
+ M=1 // enable mmu
+ */
+
+ unsigned long sctrl = 0;
+
+ sctrl |= SCTRL_NMI;
+ sctrl |= SCTRL_WXN | SCTRL_nAA;
+ sctrl |= SCTRL_SA | SCTRL_SA0;
+ sctrl |= SCTRL_A | SCTRL_M;
+
+ set_sysreg(TCR_EL1, sctrl);
+ set_sysreg(SPSel, 1);
+}
+
+static inline void
+setup_evbar()
+{
+ // TODO install exception vectors, setup VBAR
+}
+
+static inline void
+setup_ttbr()
+{
+ /*
+
+ TCR_EL1
+ SH0=3 // Inner sharable
+ ORGN0=0 // Normal memory, Outer Non-cacheable.
+ IRGN0=1 // Normal memory, Inner Write-Back Read-Allocate Write-Allocate Cacheable.
+ A1=0 // TTBR0 define ASID
+ EPD1=0
+ T1SZ=0
+ EPD0=1
+ T0SZ=16 // disable TTBR1, Use TTBR0 for all translation
+ TG0=0 // VA48, 256T, 4K Granule
+ TBI1=0,
+ TBI0=0 // Ignore top bits
+ AS=1 // 16bits asid
+ HA=1
+ HD=1 // Hardware managed dirty and access
+
+
+ We may use the follow practice later
+ TTBR0: Translation for user-land (lowmem)
+ TTBR1: Translation for kernel-land (highmem)
+ */
+
+ unsigned long tcr = 0;
+ ptr_t ttb;
+
+ tcr |= TCR_T1SZ(0) | TCR_T0SZ(16);
+ tcr |= TCR_TG0(TCR_G4K);
+ tcr |= TCR_AS | TCR_HA | TCR_HD;
+ tcr |= TCR_EPD0;
+
+ ttb = kremap();
+
+ set_sysreg(TTBR0_EL1, ttb);
+ set_sysreg(TCR_EL1, tcr);
+}
+
+static inline void
+extract_dtb_bootinfo(ptr_t dtb, struct boot_handoff* handoff)
+{
+ handoff->kexec.dtb_pa = dtb;
+
+ // TODO extract /memory, /reserved-memories from dtb
+}
+
+struct boot_handoff*
+aarch64_init(ptr_t dtb)
+{
+ setup_evbar();
+ setup_ttbr();
+ setup_pstate();
+
+ struct boot_handoff* handoff;
+
+ handoff = bootmem_alloc(sizeof(*handoff));
+
+ extract_dtb_bootinfo(dtb, handoff);
+
+ return handoff;
+}
\ No newline at end of file
--- /dev/null
+#ifndef __LUNAIX_AA64_INIT_H
+#define __LUNAIX_AA64_INIT_H
+
+#include <lunaix/types.h>
+#include <sys-generic/bootmem.h>
+
+#define boot_text __attribute__((section(".boot.text")))
+#define boot_data __attribute__((section(".boot.data")))
+#define boot_bss __attribute__((section(".boot.bss")))
+
+ptr_t
+kremap();
+
+#endif /* __LUNAIX_AA64_INIT_H */
--- /dev/null
+#include "init.h"
+#include <lunaix/sections.h>
+#include <lunaix/spike.h>
+
+#define BOOTMEM_SIZE (4 * 4096)
+
+static reclaimable char bootmem_pool[BOOTMEM_SIZE];
+static unsigned int pos;
+
+void*
+bootmem_alloc(unsigned int size)
+{
+ ptr_t res;
+
+ res = __ptr(bootmem_pool) + pos;
+
+ size = ROUNDUP(size, 4);
+ pos += size;
+
+ if (pos >= BOOTMEM_SIZE) {
+ spin();
+ }
+
+ return (void*)res;
+}
+
+void
+bootmem_free(void* ptr)
+{
+ // not need to support, as they are all one-shot
+ return;
+}
\ No newline at end of file
--- /dev/null
+.section .boot.data
+ .align 16
+ stack_end:
+ .skip 512
+ stack_top:
+
+.section .boot.text
+ .global start_
+
+ /*
+ We follow Linux-arm64 boot protocol
+ ldr x0, dtb
+ mov x1, xzr
+ mov x2, xzr
+ mov x3, xzr
+ */
+ start_:
+ ldr sp, =stack_top
+ mov fp, xzr
+
+ ldr x4, =aarch64_init
+ blx x4
+
+ // x0: ptr to boot_handoff
+ ldr x4, =kernel_bootstrap
+ blx x4
\ No newline at end of file
--- /dev/null
+#ifndef __LUNAIX_AA64_H
+#define __LUNAIX_AA64_H
+
+#include "aa64_asm.h"
+#include "aa64_mmu.h"
+#include "aa64_msrs.h"
+#include "aa64_sysinst.h"
+
+#endif /* __LUNAIX_AA64_H */
--- /dev/null
+#ifndef __LUNAIX_AA64_ASM_H
+#define __LUNAIX_AA64_ASM_H
+
+#include <lunaix/compiler.h>
+
+#define __sr_encode(op0, op1, crn, crm, op2) \
+ s##op0##_##op1##_c##crn##_c##crm##_##op2
+
+#endif /* __LUNAIX_AA64_ASM_H */
--- /dev/null
+#ifndef __LUNAIX_AA64_MMU_H
+#define __LUNAIX_AA64_MMU_H
+
+#include "aa64_asm.h"
+
+#if defined(CONFIG_AA64_PAGE_GRAN_4K)
+#define _MMU_TG 0b01
+#elif defined(CONFIG_AA64_PAGE_GRAN_16K)
+#define _MMU_TG 0b10
+#elif defined(CONFIG_AA64_PAGE_GRAN_64K)
+#define _MMU_TG 0b11
+#endif
+
+#if CONFIG_AA_OA_SIZE == 52
+#define _MMU_USE_OA52
+#endif
+
+
+#define TCR_DS (1UL << 59)
+#define TCR_E0PD1 (1UL << 56)
+#define TCR_E0PD0 (1UL << 55)
+#define TCR_TBID1 (1UL << 52)
+#define TCR_TBID0 (1UL << 51)
+#define TCR_HPD1 (1UL << 42)
+#define TCR_HPD0 (1UL << 41)
+#define TCR_HD (1UL << 40)
+#define TCR_HA (1UL << 39)
+#define TCR_TBI1 (1UL << 38)
+#define TCR_TBI0 (1UL << 37)
+#define TCR_AS (1UL << 36)
+
+#define TCR_G4K (0b01)
+#define TCR_G16K (0b10)
+#define TCR_G64K (0b11)
+
+#define TCR_SHNS (0b01)
+#define TCR_SHOS (0b10)
+#define TCR_SHIS (0b11)
+
+#define TCR_TG1(g) (((g) & 0b11) << 30)
+#define TCR_TG0(g) (((g) & 0b11) << 14)
+
+#define TCR_T1SZ(sz) (((sz) & 0b111111) << 16)
+#define TCR_T0SZ(sz) (((sz) & 0b111111))
+
+#define TCR_EPD1 (1UL << 23)
+#define TCR_EPD0 (1UL << 7)
+#define TCR_A1 (1UL << 22)
+
+#endif /* __LUNAIX_AA64_MMU_H */
--- /dev/null
+#ifndef __LUNAIX_AA64_MSRS_H
+#define __LUNAIX_AA64_MSRS_H
+
+#include "aa64_asm.h"
+
+#define SCTLR_EL1 __sr_encode(3, 0, 1, 0, 0)
+#define TCR_EL1 __sr_encode(3, 0, 2, 0, 2)
+#define TTBR0_EL1 __sr_encode(3, 0, 2, 0, 0)
+#define TTBR1_EL1 __sr_encode(3, 0, 2, 0, 1)
+#define VBAR_EL1 __sr_encode(3, 0, 12, 0, 1)
+#define CurrentEL __sr_encode(3, 0, 4, 2, 2)
+#define ELR_E1 __sr_encode(3, 0, 4, 0, 1)
+#define SPSel __sr_encode(3, 0, 4, 2, 0)
+#define SPSR_EL1 __sr_encode(3, 0, 4, 0, 0)
+#define DAIF_EL1 __sr_encode(3, 3, 4, 2, 1)
+#define ALLINT_EL1 __sr_encode(3, 0, 4, 3, 0)
+#define SP_EL0 __sr_encode(3, 0, 4, 1, 0)
+#define SP_EL1 __sr_encode(3, 4, 4, 1, 0)
+
+#define read_sysreg(reg) \
+ ({ unsigned long _x; \
+ asm ("mrs %0, " stringify(reg):"=r"(_x)); \
+ _x; \
+ })
+
+#define set_sysreg(reg, v) \
+ ({ unsigned long _x = v; \
+ asm ("msr " stringify(reg) ", %0"::"r"(_x)); \
+ _x; \
+ })
+
+#define SCTRL_SPINTMASK (1UL << 62)
+#define SCTRL_NMI (1UL << 61)
+#define SCTRL_EE (1UL << 25)
+#define SCTRL_E0E (1UL << 24)
+#define SCTRL_WXN (1UL << 19)
+#define SCTRL_nAA (1UL << 6)
+#define SCTRL_SA0 (1UL << 4)
+#define SCTRL_SA (1UL << 3)
+#define SCTRL_A (1UL << 1)
+#define SCTRL_M (1UL << 0)
+
+
+#endif /* __LUNAIX_AA64_MSRS_H */
--- /dev/null
+#ifndef __LUNAIX_AA64_SYSINST_H
+#define __LUNAIX_AA64_SYSINST_H
+
+#include "aa64_asm.h"
+
+#define tlbi_alle1 __sr_encode(1, 4, 8, 7, 4)
+#define tlbi_aside1 __sr_encode(1, 0, 8, 7, 2)
+#define tlbi_rvaae1 __sr_encode(1, 0, 8, 6, 3)
+#define tlbi_rvae1 __sr_encode(1, 0, 8, 6, 1)
+#define tlbi_vaae1 __sr_encode(1, 0, 8, 7, 3)
+#define tlbi_vae1 __sr_encode(1, 0, 8, 7, 1)
+
+#define sys_a0(op) \
+ ({ asm ("sys " stringify(op)); })
+
+#define sys_a1(op, xt) \
+ ({ asm ("sys " stringify(op) ", %0" :: "r"(xt)); })
+
+#define sysl(op) \
+ ({ unsigned long _x; \
+ asm ("sysl %0, " stringify(op):"=r"(_x)); \
+ _x; \
+ })
+
+#endif /* __LUNAIX_AA64_SYSINST_H */
--- /dev/null
+#ifndef __LUNAIX_AA64_ABI_H
+#define __LUNAIX_AA64_ABI_H
+
+#include <lunaix/types.h>
+
+#ifndef __ASM__
+#define align_stack(ptr) ((ptr) & stack_alignment)
+
+static inline void must_inline noret
+switch_context() {
+ // TODO
+ unreachable;
+}
+
+
+static inline ptr_t
+abi_get_retaddr()
+{
+ reg_t lr;
+ asm ("mov %0, lr" : "=r"(lr));
+
+ return lr;
+}
+
+static inline ptr_t
+abi_get_retaddrat(ptr_t fp)
+{
+ return ((ptr_t*)fp)[1];
+}
+
+#endif
+
+#endif /* __LUNAIX_AA64_ABI_H */
--- /dev/null
+#ifndef __LUNAIX_ARCH_CPU_H
+#define __LUNAIX_ARCH_CPU_H
+
+#include "aa64.h"
+
+void
+cpu_trap_sched();
+
+static inline void
+cpu_enable_interrupt()
+{
+ set_sysreg(ALLINT_EL1, 0);
+}
+
+static inline void
+cpu_disable_interrupt()
+{
+ set_sysreg(ALLINT_EL1, 1 << 12);
+}
+
+static inline void
+cpu_wait()
+{
+ asm volatile ( "wfi" );
+}
+
+#endif /* __LUNAIX_CPU_H */
--- /dev/null
+#ifndef __LUNAIX_ARCH_HART_H
+#define __LUNAIX_ARCH_HART_H
+
+#ifndef __ASM__
+#include <lunaix/types.h>
+
+struct hart_state;
+
+struct regcontext
+{
+ union {
+ reg_t x[32];
+ struct {
+ reg_t x[29];
+ reg_t fp;
+ reg_t lr;
+ reg_t sp;
+ };
+ };
+} compact;
+
+struct exec_param
+{
+ struct hart_state* parent_state;
+ reg_t vector;
+ reg_t syndrome;
+ reg_t elink;
+ reg_t sp;
+} compact;
+
+struct hart_state
+{
+ reg_t depth;
+ struct regcontext registers;
+ union
+ {
+ reg_t sp;
+ volatile struct exec_param* execp;
+ };
+} compact;
+
+static inline int
+hart_vector_stamp(struct hart_state* hstate) {
+ return hstate->execp->vector;
+}
+
+static inline unsigned int
+hart_ecause(struct hart_state* hstate) {
+ return hstate->execp->syndrome;
+}
+
+static inline struct hart_state*
+hart_parent_state(struct hart_state* hstate)
+{
+ return hstate->execp->parent_state;
+}
+
+static inline void
+hart_push_state(struct hart_state* p_hstate, struct hart_state* hstate)
+{
+ hstate->execp->parent_state = p_hstate;
+}
+
+static inline ptr_t
+hart_pc(struct hart_state* hstate)
+{
+ return hstate->execp->elink;
+}
+
+static inline ptr_t
+hart_sp(struct hart_state* hstate)
+{
+ return hstate->execp->sp;
+}
+
+static inline bool
+kernel_context(struct hart_state* hstate)
+{
+ // TODO
+ return false;
+}
+
+static inline ptr_t
+hart_stack_frame(struct hart_state* hstate)
+{
+ return hstate->registers.fp;
+}
+
+#endif
+
+#endif /* __LUNAIX_ARCH_HART_H */
--- /dev/null
+#ifndef __LUNAIX_MEMPART_H
+#define __LUNAIX_MEMPART_H
+
+
+#define END_POINT(name) (name + name##_SIZE - 1)
+
+#ifdef __LD__
+#define __ulong(val) val
+#else
+#define __ulong(val) val##UL
+#endif
+
+#define KSTACK_AREA __ulong(0x0000000100000000)
+#define KSTACK_AREA_SIZE __ulong(0x0000000040000000)
+#define KSTACK_AREA_END END_POINT(KSTACK_AREA)
+
+#define USR_EXEC __ulong(0x0000008000000000)
+#define USR_EXEC_SIZE __ulong(0x0000002000000000)
+#define USR_EXEC_END END_POINT(USR_EXEC)
+
+#define USR_MMAP __ulong(0x0000010000000000)
+#define USR_MMAP_SIZE __ulong(0x0000008000000000)
+#define USR_MMAP_END END_POINT(USR_MMAP)
+
+#define USR_STACK __ulong(0x00007f8000000000)
+#define USR_STACK_SIZE __ulong(0x0000001fc0000000)
+#define USR_STACK_SIZE_THREAD __ulong(0x0000000000200000)
+#define USR_STACK_END END_POINT(USR_STACK)
+
+
+// la casa del kernel
+
+#define KERNEL_RESIDENT __ulong(0xfffffd8000000000) // -2.5T
+#define VMAP KERNEL_RESIDENT // -2.5T
+#define VMAP_SIZE __ulong(0x0000010000000000)
+#define VMAP_END END_POINT(VMAP)
+
+#define VMS_MOUNT_1 __ulong(0xfffffe8000000000) // -1.5T
+#define VMS_MOUNT_1_SIZE __ulong(0x0000008000000000)
+#define VMS_MOUNT_1_END END_POINT(VMS_MOUNT_1)
+
+#define VMS_SELF_MOUNT __ulong(0xffffff0000000000) // -1T
+
+#define KMAP __ulong(0xffffff8000000000)
+#define PG_MOUNT_1 KMAP // -512G
+#define PG_MOUNT_1_SIZE __ulong(0x0000000000001000)
+#define PG_MOUNT_1_END END_POINT(PG_MOUNT_1)
+
+#define PG_MOUNT_2 __ulong(0xffffff8000001000)
+#define PG_MOUNT_2_SIZE __ulong(0x0000000000001000)
+#define PG_MOUNT_2_END END_POINT(PG_MOUNT_2)
+
+#define PG_MOUNT_3 __ulong(0xffffff8000002000)
+#define PG_MOUNT_3_SIZE __ulong(0x0000000000001000)
+#define PG_MOUNT_3_END END_POINT(PG_MOUNT_3)
+
+#define PG_MOUNT_4 __ulong(0xffffff8000003000)
+#define PG_MOUNT_4_SIZE __ulong(0x0000000000001000)
+#define PG_MOUNT_4_END END_POINT(PG_MOUNT_4)
+
+#define PG_MOUNT_VAR __ulong(0xffffff8000004000)
+#define PG_MOUNT_VAR_SIZE __ulong(0x000000003fffc000)
+#define PG_MOUNT_VAR_END END_POINT(PG_MOUNT_VAR)
+
+#define PMAP __ulong(0xffffff8040000000)
+
+#define KERNEL_IMG __ulong(0xffffffff80000000) // -2G
+#define KERNEL_IMG_SIZE __ulong(0x0000000080000000)
+#define KERNEL_IMG_END END_POINT(KERNEL_IMG)
+
+#endif
\ No newline at end of file
--- /dev/null
+#ifndef __LUNAIX_MM_DEFS_H
+#define __LUNAIX_MM_DEFS_H
+
+#include "mempart.h"
+#include "pagetable.h"
+
+/*
+ Regardless architecture we need to draw the line very carefully, and must
+ take the size of VM into account. In general, we aims to achieve
+ "sufficiently large" of memory for kernel
+
+ In terms of x86_32:
+ * #768~1022 PTEs of PD (0x00000000c0000000, ~1GiB)
+
+ In light of upcomming x86_64 support (for Level 4&5 Paging):
+ * #510 entry of PML4 (0x0000ff0000000000, ~512GiB)
+ * #510 entry of PML5 (0x01fe000000000000, ~256TiB)
+
+
+ KERNEL_RESIDENT - a high-mem region, kernel should be
+ KSTACK_PAGES - kernel stack, pages allocated to
+ KEXEC_RSVD - page reserved for kernel images
+*/
+
+#define KSTACK_PAGES 4
+#define KEXEC_RSVD 32
+
+#define KSTACK_SIZE (KSTACK_PAGES * PAGE_SIZE)
+
+#define kernel_addr(addr) ((addr) >= KERNEL_RESIDENT || (addr) < USR_EXEC)
+
+#define to_kphysical(k_va) ((ptr_t)(k_va) - KERNEL_IMG)
+#define to_kvirtual(k_pa) ((ptr_t)(k_pa) + KERNEL_IMG)
+
+#endif /* __LUNAIX_MM_DEFS_H */
--- /dev/null
+#include <asm-generic/muldiv64.h>
--- /dev/null
+#ifndef __LUNAIX_ARCH_PAGETABLE_H
+#define __LUNAIX_ARCH_PAGETABLE_H
+
+#include <lunaix/types.h>
+#include <lunaix/compiler.h>
+
+#include "aa64_mmu.h"
+
+/* ******** Page Table Manipulation ******** */
+
+#define _PTW_LEVEL 4
+
+
+// Note: we set VMS_SIZE = VMS_MASK as it is impossible
+// to express 4Gi in 32bit unsigned integer
+
+
+#define VMS_BITS 48
+
+#define PMS_BITS CONFIG_AA64_OA_SIZE
+
+#define VMS_SIZE ( 1UL << VMS_BITS)
+#define VMS_MASK ( VMS_SIZE - 1 )
+#define PMS_SIZE ( 1UL << PMS_BITS )
+#define PMS_MASK ( PMS_SIZE - 1 )
+
+#define __index(va) ( (va) & VMS_MASK )
+#define __vaddr(va) \
+ ( (__index(va) ^ ((VMS_MASK + 1) >> 1)) - ((VMS_MASK + 1) >> 1) )
+#define __paddr(pa) ( (pa) & PMS_MASK )
+
+
+#if defined(CONFIG_AA64_PAGE_GRAN_4K)
+#define _PAGE_BASE_SHIFT 12
+#elif defined(CONFIG_AA64_PAGE_GRAN_16K)
+#define _PAGE_BASE_SHIFT 14
+#elif defined(CONFIG_AA64_PAGE_GRAN_64K)
+#define _PAGE_BASE_SHIFT 16
+#endif
+
+#define _PAGE_BASE_SIZE ( 1UL << _PAGE_BASE_SHIFT )
+#define _PAGE_BASE_MASK ( (_PAGE_BASE_SIZE - 1) & VMS_MASK )
+
+#define _PAGE_LEVEL_SHIFT 9
+#define _PAGE_LEVEL_SIZE ( 1UL << _PAGE_LEVEL_SHIFT )
+#define _PAGE_LEVEL_MASK ( _PAGE_LEVEL_SIZE - 1 )
+#define _PAGE_Ln_SIZE(n) \
+ ( 1UL << (_PAGE_BASE_SHIFT + _PAGE_LEVEL_SHIFT * (_PTW_LEVEL - (n) - 1)) )
+
+/* General size of a LnT huge page */
+
+#define L0T_SIZE _PAGE_Ln_SIZE(0)
+#define L1T_SIZE _PAGE_Ln_SIZE(1)
+#define L2T_SIZE _PAGE_Ln_SIZE(2)
+#define L3T_SIZE _PAGE_Ln_SIZE(3)
+#define LFT_SIZE _PAGE_Ln_SIZE(3)
+
+
+struct __pte {
+ unsigned long val;
+} align(8);
+
+// upper attributes
+
+#define _PTE_UXN (1UL << 54)
+#define _PTE_PXN (1UL << 53)
+#define _PTE_XN (_PTE_UXN | _PTE_PXN)
+#define _PTE_Contig (1UL << 52)
+#define _PTE_DBM (1UL << 51)
+
+#ifdef _MMU_USE_OA52
+#if CONFIG_AA64_PAGE_GRAN_64K
+#define __OA_HIGH_MASK ( 0b1111 << 12 )
+#define __OA_HEAD(pa) ((pa) & ((1UL << 48) - 1) & ~PAGE_MASK)
+#define __OA_TAIL(pa) ((((pa) >> 48) & 0b1111) << 12)
+#else
+#define __OA_HIGH_MASK ( 0b0011 << 8 )
+#define __OA_HEAD(pa) ((pa) & ((1UL << 50) - 1) & ~PAGE_MASK)
+#define __OA_TAIL(pa) ((((pa) >> 50) & 0b0011) << 8)
+#endif
+#else
+#define __OA_HIGH_MASK (0)
+#define __OA_HEAD(pa) (__paddr(pa) & ~PAGE_MASK)
+#define __OA_TAIL(pa) (0)
+#endif
+
+#define _PTE_OA(pa) (__OA_HEAD(pa) | __OA_TAIL(pa))
+
+// lower attributes
+
+#define _PTE_nG (1UL << 11)
+#define _PTE_AF (1UL << 10)
+
+// AP bits: R_RNGJG
+
+#define _PTE_AP(p, u) ((((p) & 1) << 1 | ((u) & 1)) << 6)
+#define _PTE_PRW _PTE_AP(0 , 0) // priv rw, unpriv none
+#define _PTE_PRWURW _PTE_AP(0 , 1) // priv rw, unpriv rw
+#define _PTE_U _PTE_AP(0 , 1) // generic unpriv flag
+#define _PTE_PRO _PTE_AP(1 , 0) // priv ro, unpriv none
+#define _PTE_PROURO _PTE_AP(1 , 1) // priv ro, unpriv ro
+
+#define _PTE_BLKDESC (0b01)
+#define _PTE_TABDESC (0b11)
+#define _PTE_LFTDESC (0b11)
+#define _PTE_VALID (0b01)
+#define _PTE_DESC_MASK (0b11)
+#define _PTE_SET_DESC(pte_val, desc) \
+ ( ((pte_val) & ~_PTE_DESC_MASK) | ((desc) & _PTE_DESC_MASK) )
+#define _PTE_GET_DESC(pte_val) \
+ ( (pte_val) & _PTE_DESC_MASK )
+
+#define __MEMGUARD 0xf0f0f0f0f0f0f0f0UL
+
+typedef unsigned long pte_attr_t;
+typedef unsigned long pfn_t;
+
+// always do sign extend on x86_64
+
+
+
+/* General mask to get page offset of a LnT huge page */
+
+#define L0T_MASK ( L0T_SIZE - 1 )
+#define L1T_MASK ( L1T_SIZE - 1 )
+#define L2T_MASK ( L2T_SIZE - 1 )
+#define L3T_MASK ( L3T_SIZE - 1 )
+#define LFT_MASK ( LFT_SIZE - 1 )
+
+/* Masks to get index of a LnTE */
+
+#define L0T_INDEX_MASK ( VMS_MASK ^ L0T_MASK )
+#define L1T_INDEX_MASK ( L0T_MASK ^ L1T_MASK )
+#define L2T_INDEX_MASK ( L1T_MASK ^ L2T_MASK )
+#define L3T_INDEX_MASK ( L2T_MASK ^ L3T_MASK )
+#define LFT_INDEX_MASK ( L3T_MASK ^ LFT_MASK )
+
+#define PAGE_SHIFT _PAGE_BASE_SHIFT
+#define PAGE_SIZE _PAGE_BASE_SIZE
+#define PAGE_MASK _PAGE_BASE_MASK
+
+#define LEVEL_SHIFT _PAGE_LEVEL_SHIFT
+#define LEVEL_SIZE _PAGE_LEVEL_SIZE
+#define LEVEL_MASK _PAGE_LEVEL_MASK
+
+// max PTEs number
+#define MAX_PTEN _PAGE_LEVEL_SIZE
+
+// max translation level supported
+#define MAX_LEVEL _PTW_LEVEL
+
+typedef struct __pte pte_t;
+
+#define _PTE_PROT_MASK ( ~((1UL << 50) - 1) | (PAGE_MASK & __OA_HIGH_MASK) )
+#define _PTE_PPFN_MASK ( ~_PTE_PROT_MASK )
+
+#define _PAGE_BASIC ( _PTE_VALID )
+
+#define KERNEL_EXEC ( _PAGE_BASIC | _PTE_PRO | _PTE_UXN )
+#define KERNEL_DATA ( _PAGE_BASIC | _PTE_PRW | _PTE_XN )
+#define KERNEL_RDONLY ( _PAGE_BASIC | _PTE_PRO | _PTE_XN )
+#define KERNEL_ROEXEC KERNEL_EXEC
+#define KERNEL_PGTAB ( _PAGE_BASIC | _PTE_TABDESC )
+
+#define USER_EXEC ( _PAGE_BASIC | _PTE_PROURO | _PTE_PXN )
+#define USER_DATA ( _PAGE_BASIC | _PTE_PRWURW | _PTE_XN )
+#define USER_RDONLY ( _PAGE_BASIC | _PTE_PROURO )
+#define USER_ROEXEC USER_EXEC
+#define USER_PGTAB ( _PAGE_BASIC | _PTE_TABDESC )
+
+#define SELF_MAP ( KERNEL_DATA | _PTE_TABDESC )
+
+#define __mkpte_from(pte_val) ((pte_t){ .val = (pte_val) })
+
+#define null_pte ( __mkpte_from(0) )
+#define guard_pte ( __mkpte_from(__MEMGUARD) )
+#define pte_val(pte) ( pte.val )
+
+
+static inline bool
+pte_isguardian(pte_t pte)
+{
+ return pte.val == __MEMGUARD;
+}
+
+static inline pte_t
+mkpte_prot(pte_attr_t prot)
+{
+ pte_attr_t attrs = (prot & _PTE_PROT_MASK) | _PTE_LFTDESC;
+ return __mkpte_from(attrs);
+}
+
+static inline pte_t
+mkpte(ptr_t paddr, pte_attr_t prot)
+{
+ pte_attr_t attrs = (prot & _PTE_PROT_MASK) | _PTE_LFTDESC;
+ return __mkpte_from((paddr & ~_PAGE_BASE_MASK) | attrs);
+}
+
+static inline pte_t
+mkpte_root(ptr_t paddr, pte_attr_t prot)
+{
+ pte_attr_t attrs = (prot & _PTE_PROT_MASK) | _PTE_TABDESC;
+ return __mkpte_from((paddr & ~_PAGE_BASE_MASK) | attrs);
+}
+
+static inline pte_t
+mkpte_raw(unsigned long pte_val)
+{
+ return __mkpte_from(pte_val);
+}
+
+static inline pte_t
+pte_setpaddr(pte_t pte, ptr_t paddr)
+{
+ return __mkpte_from((pte.val & _PTE_PROT_MASK) | (paddr & ~_PTE_PROT_MASK));
+}
+
+static inline pte_t
+pte_setppfn(pte_t pte, pfn_t ppfn)
+{
+ return pte_setpaddr(pte, ppfn * PAGE_SIZE);
+}
+
+static inline ptr_t
+pte_paddr(pte_t pte)
+{
+ return __paddr(pte.val) & ~_PTE_PROT_MASK;
+}
+
+static inline pfn_t
+pte_ppfn(pte_t pte)
+{
+ return pte_paddr(pte) >> _PAGE_BASE_SHIFT;
+}
+
+static inline pte_t
+pte_setprot(pte_t pte, ptr_t prot)
+{
+ return __mkpte_from((pte.val & ~_PTE_PROT_MASK) | (prot & _PTE_PROT_MASK));
+}
+
+static inline pte_attr_t
+pte_prot(pte_t pte)
+{
+ return (pte.val & _PTE_PROT_MASK);
+}
+
+static inline bool
+pte_isnull(pte_t pte)
+{
+ return !pte.val;
+}
+
+static inline pte_t
+pte_mkhuge(pte_t pte)
+{
+ return __mkpte_from(_PTE_SET_DESC(pte.val, _PTE_BLKDESC));
+}
+
+static inline pte_t
+pte_mkvolatile(pte_t pte)
+{
+ return __mkpte_from(pte.val);
+}
+
+static inline pte_t
+pte_mkroot(pte_t pte)
+{
+ return __mkpte_from(_PTE_SET_DESC(pte.val, _PTE_TABDESC));
+}
+
+static inline bool
+pte_huge(pte_t pte)
+{
+ return _PTE_GET_DESC(pte.val) == _PTE_BLKDESC;
+}
+
+static inline pte_t
+pte_mkloaded(pte_t pte)
+{
+ return __mkpte_from(pte.val | _PTE_VALID);
+}
+
+static inline pte_t
+pte_mkunloaded(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_VALID);
+}
+
+static inline bool
+pte_isloaded(pte_t pte)
+{
+ return !!(pte.val & _PTE_VALID);
+}
+
+static inline pte_t
+pte_mkwprotect(pte_t pte)
+{
+ return __mkpte_from(pte.val | _PTE_PRO);
+}
+
+static inline pte_t
+pte_mkwritable(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_PRO);
+}
+
+static inline bool
+pte_iswprotect(pte_t pte)
+{
+ return !(pte.val & _PTE_PRO);
+}
+
+static inline pte_t
+pte_mkuser(pte_t pte)
+{
+ return __mkpte_from(pte.val | _PTE_U);
+}
+
+static inline pte_t
+pte_mkkernel(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_U);
+}
+
+static inline bool
+pte_allow_user(pte_t pte)
+{
+ return !!(pte.val & _PTE_U);
+}
+
+static inline pte_t
+pte_mkexec(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_PXN);
+}
+
+static inline pte_t
+pte_mknexec(pte_t pte)
+{
+ return __mkpte_from(pte.val | _PTE_PXN);
+}
+
+static inline pte_t
+pte_mkuexec(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_UXN);
+}
+
+static inline pte_t
+pte_mknuexec(pte_t pte)
+{
+ return __mkpte_from(pte.val | _PTE_UXN);
+}
+
+static inline bool
+pte_isexec(pte_t pte)
+{
+ return !(pte.val & _PTE_PXN);
+}
+
+static inline bool
+pte_isuexec(pte_t pte)
+{
+ return !(pte.val & _PTE_UXN);
+}
+
+static inline pte_t
+pte_mkuntouch(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_AF);
+}
+
+static inline bool
+pte_istouched(pte_t pte)
+{
+ return !!(pte.val & _PTE_AF);
+}
+
+static inline pte_t
+pte_mkclean(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_DBM);
+}
+
+static inline bool
+pte_dirty(pte_t pte)
+{
+ return !!(pte.val & _PTE_DBM);
+}
+
+static inline void
+set_pte(pte_t* ptep, pte_t pte)
+{
+ ptep->val = pte.val;
+}
+
+static inline pte_t
+pte_at(pte_t* ptep) {
+ return *ptep;
+}
+
+pte_t
+translate_vmr_prot(unsigned int vmr_prot, pte_t pte);
+
+#endif /* __LUNAIX_ARCH_PAGETABLE_H */
--- /dev/null
+#ifndef __LUNAIX_ARCH_PHYSICAL_H
+#define __LUNAIX_ARCH_PHYSICAL_H
+
+#include <lunaix/ds/llist.h>
+#include "mm_defs.h"
+
+#define PPLIST_STARTVM PMAP
+
+struct ppage_arch
+{
+
+};
+
+#endif /* __LUNAIX_ARCH_PHYSICAL_H */
--- /dev/null
+#ifndef __LUNAIX_ARCH_TLB_H
+#define __LUNAIX_ARCH_TLB_H
+
+#include <lunaix/types.h>
+
+#include <asm/aa64_mmu.h>
+#include <asm/aa64_sysinst.h>
+
+#define pack_va(asid, ttl, va) \
+ (((asid & 0xffff) << 48) | \
+ ((ttl & 0b1111) << 44) | \
+ (pfn(va) & ((1 << 44) - 1)))
+
+#define pack_rva(asid, ttl, base, n, scale) \
+ (((asid & 0xffff) << 48) | \
+ ((_MMU_TG & 0b11) << 46) | \
+ ((n & 0x1f) << 39) | \
+ ((scale & 0b11) << 37) | \
+ ((ttl & 0b1111) << 44) | \
+ (pfn(base)& ((1 << 37) - 1)))
+
+/**
+ * @brief Invalidate an entry of all address space
+ *
+ * @param va
+ */
+static inline void must_inline
+__tlb_invalidate(ptr_t va)
+{
+ sys_a1(tlbi_vaae1, pack_va(0, 0, va));
+}
+
+/**
+ * @brief Invalidate an entry of an address space indetified
+ * by ASID
+ *
+ * @param va
+ */
+static inline void must_inline
+__tlb_flush_asid(unsigned int asid, ptr_t va)
+{
+ sys_a1(tlbi_vae1, pack_va(asid, 0, va));
+}
+
+/**
+ * @brief Invalidate an entry of global address space
+ *
+ * @param va
+ */
+static inline void must_inline
+__tlb_flush_global(ptr_t va)
+{
+ __tlb_flush_asid(0, va);
+}
+
+/**
+ * @brief Invalidate an entire TLB
+ *
+ * @param va
+ */
+static inline void must_inline
+__tlb_flush_all()
+{
+ sys_a0(tlbi_alle1);
+}
+
+/**
+ * @brief Invalidate an entire address space
+ *
+ * @param va
+ */
+static inline void must_inline
+__tlb_flush_asid_all(unsigned int asid)
+{
+ sys_a1(tlbi_aside1, pack_va(asid, 0, 0));
+}
+
+
+/**
+ * @brief Invalidate entries of all address spaces
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+static inline void
+tlb_flush_range(ptr_t addr, unsigned int npages)
+{
+#ifdef _MMU_USE_OA52
+ for (unsigned int i = 0; i < npages; i++)
+ {
+ __tlb_invalidate(addr + i * PAGE_SIZE);
+ }
+#else
+ sys_a1(tlbi_rvaae1, pack_rva(0, 0, addr, npages, 0));
+#endif
+}
+
+/**
+ * @brief Invalidate entries of an address space identified
+ * by ASID
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+static inline void
+tlb_flush_asid_range(unsigned int asid, ptr_t addr, unsigned int npages)
+{
+#ifdef _MMU_USE_OA52
+ for (unsigned int i = 0; i < npages; i++)
+ {
+ __tlb_flush_asid(asid, addr + i * PAGE_SIZE);
+ }
+#else
+ sys_a1(tlbi_rvae1, pack_rva(asid, 0, addr, npages, 0));
+#endif
+}
+
+#include <asm-generic/tlb-shared.h>
+
+#endif /* __LUNAIX_VMTLB_H */
--- /dev/null
+#ifndef __LUNAIX_BASE_DEFS_LD_INC
+#define __LUNAIX_BASE_DEFS_LD_INC
+
+#define __LD__
+#include <sys/mm/mempart.h>
+
+#define KEXEC_BASE KERNEL_IMG
+#define PAGE_GRAN 4K
+
+#define ENTRY_POINT start_
+
+#define LOAD_OFF 0x100000
+
+
+#endif /* __LUNAIX_BASE_DEFS_LD_INC */
--- /dev/null
+#ifndef __LUNAIX_BOOT_SECS_LD_INC
+#define __LUNAIX_BOOT_SECS_LD_INC
+
+#include "base_defs.ld.inc"
+
+.boot.text BLOCK(PAGE_GRAN) :
+{
+ *(.boot.text)
+} : boot_text
+
+.boot.data BLOCK(PAGE_GRAN) :
+{
+ *(.boot.data)
+ *(.boot.bss)
+} : boot_data
+
+#endif
\ No newline at end of file
--- /dev/null
+#ifndef __LUNAIX_TLB_SHARED_H
+#define __LUNAIX_TLB_SHARED_H
+
+#include <lunaix/types.h>
+#include <lunaix/mm/procvm.h>
+
+/**
+ * @brief Invalidate an entry of kernel address spaces
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+static inline void
+tlb_flush_kernel(ptr_t addr)
+{
+ __tlb_flush_global(addr);
+}
+
+/**
+ * @brief Invalidate entries of kernel address spaces
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+static inline void
+tlb_flush_kernel_ranged(ptr_t addr, unsigned int npages)
+{
+ for (unsigned int i = 0; i < npages; i++)
+ {
+ tlb_flush_kernel(addr + i * PAGE_SIZE);
+ }
+}
+
+/**
+ * @brief Invalidate an entry within a process memory space
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+void
+tlb_flush_mm(struct proc_mm* mm, ptr_t addr);
+
+/**
+ * @brief Invalidate entries within a process memory space
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+void
+tlb_flush_mm_range(struct proc_mm* mm, ptr_t addr, unsigned int npages);
+
+/**
+ * @brief Invalidate an entry within a vm region
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+void
+tlb_flush_vmr(struct mm_region* vmr, ptr_t va);
+
+/**
+ * @brief Invalidate all entries within a vm region
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+void
+tlb_flush_vmr_all(struct mm_region* vmr);
+
+/**
+ * @brief Invalidate entries within a vm region
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+void
+tlb_flush_vmr_range(struct mm_region* vmr, ptr_t addr, unsigned int npages);
+
+#endif /* __LUNAIX_TLB_SHARED_H */
#include <asm/tlb.h>
#include <lunaix/process.h>
-void
+_default void
tlb_flush_mm(struct proc_mm* mm, ptr_t addr)
{
__tlb_flush_asid(procvm_asid(mm), addr);
}
-void
+_default void
tlb_flush_mm_range(struct proc_mm* mm, ptr_t addr, unsigned int npages)
{
tlb_flush_asid_range(procvm_asid(mm), addr, npages);
}
-void
+_default void
tlb_flush_vmr(struct mm_region* vmr, ptr_t va)
{
__tlb_flush_asid(procvm_asid(vmr->proc_vms), va);
}
-void
+_default void
tlb_flush_vmr_all(struct mm_region* vmr)
{
tlb_flush_asid_range(procvm_asid(vmr->proc_vms),
vmr->start, leaf_count(vmr->end - vmr->start));
}
-void
+_default void
tlb_flush_vmr_range(struct mm_region* vmr, ptr_t addr, unsigned int npages)
{
tlb_flush_asid_range(procvm_asid(vmr->proc_vms), addr, npages);
#define KERNEL_RDONLY ( KERNEL_PAGE | _PTE_NX )
#define KERNEL_ROEXEC ( KERNEL_PAGE | _PTE_X )
#define KERNEL_PGTAB ( KERNEL_PAGE | _PTE_W )
-#define KERNEL_DEFAULT KERNEL_PGTAB
#define USER_PAGE ( _PTE_P | _PTE_U )
#define USER_EXEC ( USER_PAGE | _PTE_X )
#define __LUNAIX_ARCH_TLB_H
#include <lunaix/compiler.h>
-#include <lunaix/mm/procvm.h>
-#include <lunaix/mm/physical.h>
+#include <asm/mm_defs.h>
/**
* @brief Invalidate an entry of all address space
}
}
-/**
- * @brief Invalidate an entry within a process memory space
- *
- * @param asid
- * @param addr
- * @param npages
- */
-void
-tlb_flush_mm(struct proc_mm* mm, ptr_t addr);
-
-/**
- * @brief Invalidate entries within a process memory space
- *
- * @param asid
- * @param addr
- * @param npages
- */
-void
-tlb_flush_mm_range(struct proc_mm* mm, ptr_t addr, unsigned int npages);
-
-/**
- * @brief Invalidate an entry within a vm region
- *
- * @param asid
- * @param addr
- * @param npages
- */
-void
-tlb_flush_vmr(struct mm_region* vmr, ptr_t va);
-
-/**
- * @brief Invalidate all entries within a vm region
- *
- * @param asid
- * @param addr
- * @param npages
- */
-void
-tlb_flush_vmr_all(struct mm_region* vmr);
-
-/**
- * @brief Invalidate entries within a vm region
- *
- * @param asid
- * @param addr
- * @param npages
- */
-void
-tlb_flush_vmr_range(struct mm_region* vmr, ptr_t addr, unsigned int npages);
+#include <asm-generic/tlb-shared.h>
#endif /* __LUNAIX_VMTLB_H */
"""
type(bool)
- default(not v(arch).startswith("x86"))
+ default(False)
+
+ if v(arch) == "aarch64":
+ set_value(True)
@ReadOnly
@Term("Maximum size of device tree blob (in KiB)")