include("x86/LConfig")
+include("aarch64/LConfig")
@Collection("Platform")
def architecture_support():
--- /dev/null
+
+@Group()
+def aarch64():
+ add_to_collection(architecture_support)
+
+ @Collection("MMU")
+ def mmu_feature():
+
+ @Term("Page Granularity")
+ def aa64_page_gran():
+ type(["4K", "16K", "64K"])
+
+ default("4K")
+
+ @Term("OA Size")
+ def aa64_oa_size():
+ type([52, 48])
+
+ default(48)
+
+ return v(arch) in ["aarch64"]
\ No newline at end of file
#include <lunaix/boot_generic.h>
-#include <sys/msrs.h>
+#include <asm/aa64_msrs.h>
#include "init.h"
#define __LUNAIX_AA64_INIT_H
#include <lunaix/types.h>
-#include <lunaix/generic/bootmem.h>
+#include <sys-generic/bootmem.h>
#define boot_text __attribute__((section(".boot.text")))
#define boot_data __attribute__((section(".boot.data")))
--- /dev/null
+#ifndef __LUNAIX_AA64_H
+#define __LUNAIX_AA64_H
+
+#include "aa64_mmu.h"
+#include "aa64_msrs.h"
+
+#endif /* __LUNAIX_AA64_H */
--- /dev/null
+#ifndef __LUNAIX_AA64_MMU_H
+#define __LUNAIX_AA64_MMU_H
+
+#if defined(CONFIG_AA64_PAGE_GRAN_4K)
+#define _MMU_TG 0b01
+#elif defined(CONFIG_AA64_PAGE_GRAN_16K)
+#define _MMU_TG 0b10
+#elif defined(CONFIG_AA64_PAGE_GRAN_64K)
+#define _MMU_TG 0b11
+#endif
+
+#if CONFIG_AA_OA_SIZE == 52
+#define _MMU_USE_OA52
+#endif
+
+#endif /* __LUNAIX_AA64_MMU_H */
--- /dev/null
+#ifndef __LUNAIX_AA64_SYSINST_H
+#define __LUNAIX_AA64_SYSINST_H
+
+#include "aa64_msrs.h"
+
+#define tlbi_alle1 __sr_encode(1, 4, 8, 7, 4)
+#define tlbi_aside1 __sr_encode(1, 0, 8, 7, 2)
+#define tlbi_rvaae1 __sr_encode(1, 0, 8, 6, 3)
+#define tlbi_rvae1 __sr_encode(1, 0, 8, 6, 1)
+#define tlbi_vaae1 __sr_encode(1, 0, 8, 7, 3)
+#define tlbi_vae1 __sr_encode(1, 0, 8, 7, 1)
+
+#define sys_a0(op) \
+ ({ asm ("sys " stringify(op)); })
+
+#define sys_a1(op, xt) \
+ ({ asm ("sys " stringify(op) ", %0" :: "r"(xt)); })
+
+#define sysl(op) \
+ ({ unsigned long _x; \
+ asm ("sysl %0, " stringify(op):"=r"(_x)); \
+ _x; \
+ })
+
+#endif /* __LUNAIX_AA64_SYSINST_H */
--- /dev/null
+#ifndef __LUNAIX_AA64_ABI_H
+#define __LUNAIX_AA64_ABI_H
+
+#include <lunaix/types.h>
+
+#ifndef __ASM__
+#define align_stack(ptr) ((ptr) & stack_alignment)
+
+static inline void must_inline noret
+switch_context() {
+ // TODO
+ unreachable;
+}
+
+
+static inline ptr_t
+abi_get_retaddr()
+{
+ reg_t lr;
+ asm ("mov %0, lr" : "=r"(lr));
+
+ return lr;
+}
+
+static inline ptr_t
+abi_get_retaddrat(ptr_t fp)
+{
+ return ((ptr_t*)fp)[1];
+}
+
+#endif
+
+#endif /* __LUNAIX_AA64_ABI_H */
-#ifndef __LUNAIX_AA64_CPU_H
-#define __LUNAIX_AA64_CPU_H
+#ifndef __LUNAIX_ARCH_CPU_H
+#define __LUNAIX_ARCH_CPU_H
-#include <sys/msrs.h>
+#include "aa64.h"
void
cpu_trap_sched();
set_sysreg(ALLINT_EL1, 1 << 12);
}
-#endif /* __LUNAIX_AA64_CPU_H */
+static inline void
+cpu_wait()
+{
+ asm volatile ( "wfi" );
+}
+
+#endif /* __LUNAIX_CPU_H */
--- /dev/null
+#ifndef __LUNAIX_ARCH_HART_H
+#define __LUNAIX_ARCH_HART_H
+
+#ifndef __ASM__
+#include <lunaix/types.h>
+
+struct hart_state;
+
+struct regcontext
+{
+ union {
+ reg_t x[32];
+ struct {
+ reg_t x[29];
+ reg_t fp;
+ reg_t lr;
+ reg_t sp;
+ };
+ };
+} compact;
+
+struct exec_param
+{
+ struct hart_state* parent_state;
+ reg_t vector;
+ reg_t syndrome;
+ reg_t elink;
+ reg_t sp;
+} compact;
+
+struct hart_state
+{
+ reg_t depth;
+ struct regcontext registers;
+ union
+ {
+ reg_t sp;
+ volatile struct exec_param* execp;
+ };
+} compact;
+
+static inline int
+hart_vector_stamp(struct hart_state* hstate) {
+ return hstate->execp->vector;
+}
+
+static inline unsigned int
+hart_ecause(struct hart_state* hstate) {
+ return hstate->execp->syndrome;
+}
+
+static inline struct hart_state*
+hart_parent_state(struct hart_state* hstate)
+{
+ return hstate->execp->parent_state;
+}
+
+static inline void
+hart_push_state(struct hart_state* p_hstate, struct hart_state* hstate)
+{
+ hstate->execp->parent_state = p_hstate;
+}
+
+static inline ptr_t
+hart_pc(struct hart_state* hstate)
+{
+ return hstate->execp->elink;
+}
+
+static inline ptr_t
+hart_sp(struct hart_state* hstate)
+{
+ return hstate->execp->sp;
+}
+
+static inline bool
+kernel_context(struct hart_state* hstate)
+{
+ // TODO
+ return false;
+}
+
+static inline ptr_t
+hart_stack_frame(struct hart_state* hstate)
+{
+ return hstate->registers.fp;
+}
+
+#endif
+
+#endif /* __LUNAIX_ARCH_HART_H */
--- /dev/null
+#ifndef __LUNAIX_MEMPART_H
+#define __LUNAIX_MEMPART_H
+
+
+#define END_POINT(name) (name + name##_SIZE - 1)
+
+#ifdef __LD__
+#define __ulong(val) val
+#else
+#define __ulong(val) val##UL
+#endif
+
+#define KSTACK_AREA __ulong(0x0000000100000000)
+#define KSTACK_AREA_SIZE __ulong(0x0000000040000000)
+#define KSTACK_AREA_END END_POINT(KSTACK_AREA)
+
+#define USR_EXEC __ulong(0x0000008000000000)
+#define USR_EXEC_SIZE __ulong(0x0000002000000000)
+#define USR_EXEC_END END_POINT(USR_EXEC)
+
+#define USR_MMAP __ulong(0x0000010000000000)
+#define USR_MMAP_SIZE __ulong(0x0000008000000000)
+#define USR_MMAP_END END_POINT(USR_MMAP)
+
+#define USR_STACK __ulong(0x00007f8000000000)
+#define USR_STACK_SIZE __ulong(0x0000001fc0000000)
+#define USR_STACK_SIZE_THREAD __ulong(0x0000000000200000)
+#define USR_STACK_END END_POINT(USR_STACK)
+
+
+// la casa del kernel
+
+#define KERNEL_RESIDENT __ulong(0xfffffd8000000000) // -2.5T
+#define VMAP KERNEL_RESIDENT // -2.5T
+#define VMAP_SIZE __ulong(0x0000010000000000)
+#define VMAP_END END_POINT(VMAP)
+
+#define VMS_MOUNT_1 __ulong(0xfffffe8000000000) // -1.5T
+#define VMS_MOUNT_1_SIZE __ulong(0x0000008000000000)
+#define VMS_MOUNT_1_END END_POINT(VMS_MOUNT_1)
+
+#define VMS_SELF_MOUNT __ulong(0xffffff0000000000) // -1T
+
+#define KMAP __ulong(0xffffff8000000000)
+#define PG_MOUNT_1 KMAP // -512G
+#define PG_MOUNT_1_SIZE __ulong(0x0000000000001000)
+#define PG_MOUNT_1_END END_POINT(PG_MOUNT_1)
+
+#define PG_MOUNT_2 __ulong(0xffffff8000001000)
+#define PG_MOUNT_2_SIZE __ulong(0x0000000000001000)
+#define PG_MOUNT_2_END END_POINT(PG_MOUNT_2)
+
+#define PG_MOUNT_3 __ulong(0xffffff8000002000)
+#define PG_MOUNT_3_SIZE __ulong(0x0000000000001000)
+#define PG_MOUNT_3_END END_POINT(PG_MOUNT_3)
+
+#define PG_MOUNT_4 __ulong(0xffffff8000003000)
+#define PG_MOUNT_4_SIZE __ulong(0x0000000000001000)
+#define PG_MOUNT_4_END END_POINT(PG_MOUNT_4)
+
+#define PG_MOUNT_VAR __ulong(0xffffff8000004000)
+#define PG_MOUNT_VAR_SIZE __ulong(0x000000003fffc000)
+#define PG_MOUNT_VAR_END END_POINT(PG_MOUNT_VAR)
+
+#define PMAP __ulong(0xffffff8040000000)
+
+#define KERNEL_IMG __ulong(0xffffffff80000000) // -2G
+#define KERNEL_IMG_SIZE __ulong(0x0000000080000000)
+#define KERNEL_IMG_END END_POINT(KERNEL_IMG)
+
+#endif
\ No newline at end of file
--- /dev/null
+#ifndef __LUNAIX_MM_DEFS_H
+#define __LUNAIX_MM_DEFS_H
+
+#include "mempart.h"
+#include "pagetable.h"
+
+/*
+ Regardless architecture we need to draw the line very carefully, and must
+ take the size of VM into account. In general, we aims to achieve
+ "sufficiently large" of memory for kernel
+
+ In terms of x86_32:
+ * #768~1022 PTEs of PD (0x00000000c0000000, ~1GiB)
+
+ In light of upcomming x86_64 support (for Level 4&5 Paging):
+ * #510 entry of PML4 (0x0000ff0000000000, ~512GiB)
+ * #510 entry of PML5 (0x01fe000000000000, ~256TiB)
+
+
+ KERNEL_RESIDENT - a high-mem region, kernel should be
+ KSTACK_PAGES - kernel stack, pages allocated to
+ KEXEC_RSVD - page reserved for kernel images
+*/
+
+#define KSTACK_PAGES 4
+#define KEXEC_RSVD 32
+
+#define KSTACK_SIZE (KSTACK_PAGES * PAGE_SIZE)
+
+#define kernel_addr(addr) ((addr) >= KERNEL_RESIDENT || (addr) < USR_EXEC)
+
+#define to_kphysical(k_va) ((ptr_t)(k_va) - KERNEL_IMG)
+#define to_kvirtual(k_pa) ((ptr_t)(k_pa) + KERNEL_IMG)
+
+#endif /* __LUNAIX_MM_DEFS_H */
--- /dev/null
+#include <asm-generic/muldiv64.h>
--- /dev/null
+#ifndef __LUNAIX_ARCH_PAGETABLE_H
+#define __LUNAIX_ARCH_PAGETABLE_H
+
+#include <lunaix/types.h>
+#include <lunaix/compiler.h>
+
+#include "aa64_mmu.h"
+
+/* ******** Page Table Manipulation ******** */
+
+#define _PTW_LEVEL 4
+
+
+// Note: we set VMS_SIZE = VMS_MASK as it is impossible
+// to express 4Gi in 32bit unsigned integer
+
+
+#define VMS_BITS 48
+
+#define PMS_BITS CONFIG_AA64_OA_SIZE
+
+#define VMS_SIZE ( 1UL << VMS_BITS)
+#define VMS_MASK ( VMS_SIZE - 1 )
+#define PMS_SIZE ( 1UL << PMS_BITS )
+#define PMS_MASK ( PMS_SIZE - 1 )
+
+#define __index(va) ( (va) & VMS_MASK )
+#define __vaddr(va) \
+ ( (__index(va) ^ ((VMS_MASK + 1) >> 1)) - ((VMS_MASK + 1) >> 1) )
+#define __paddr(pa) ( (pa) & PMS_MASK )
+
+
+#if defined(CONFIG_AA64_PAGE_GRAN_4K)
+#define _PAGE_BASE_SHIFT 12
+#elif defined(CONFIG_AA64_PAGE_GRAN_16K)
+#define _PAGE_BASE_SHIFT 14
+#elif defined(CONFIG_AA64_PAGE_GRAN_64K)
+#define _PAGE_BASE_SHIFT 16
+#endif
+
+#define _PAGE_BASE_SIZE ( 1UL << _PAGE_BASE_SHIFT )
+#define _PAGE_BASE_MASK ( (_PAGE_BASE_SIZE - 1) & VMS_MASK )
+
+#define _PAGE_LEVEL_SHIFT 9
+#define _PAGE_LEVEL_SIZE ( 1UL << _PAGE_LEVEL_SHIFT )
+#define _PAGE_LEVEL_MASK ( _PAGE_LEVEL_SIZE - 1 )
+#define _PAGE_Ln_SIZE(n) \
+ ( 1UL << (_PAGE_BASE_SHIFT + _PAGE_LEVEL_SHIFT * (_PTW_LEVEL - (n) - 1)) )
+
+/* General size of a LnT huge page */
+
+#define L0T_SIZE _PAGE_Ln_SIZE(0)
+#define L1T_SIZE _PAGE_Ln_SIZE(1)
+#define L2T_SIZE _PAGE_Ln_SIZE(2)
+#define L3T_SIZE _PAGE_Ln_SIZE(3)
+#define LFT_SIZE _PAGE_Ln_SIZE(3)
+
+
+struct __pte {
+ unsigned long val;
+} align(8);
+
+// upper attributes
+
+#define _PTE_UXN (1UL << 54)
+#define _PTE_PXN (1UL << 53)
+#define _PTE_XN (_PTE_UXN | _PTE_PXN)
+#define _PTE_Contig (1UL << 52)
+#define _PTE_DBM (1UL << 51)
+
+#ifdef _MMU_USE_OA52
+#if CONFIG_AA64_PAGE_GRAN_64K
+#define __OA_HIGH_MASK ( 0b1111 << 12 )
+#define __OA_HEAD(pa) ((pa) & ((1UL << 48) - 1) & ~PAGE_MASK)
+#define __OA_TAIL(pa) ((((pa) >> 48) & 0b1111) << 12)
+#else
+#define __OA_HIGH_MASK ( 0b0011 << 8 )
+#define __OA_HEAD(pa) ((pa) & ((1UL << 50) - 1) & ~PAGE_MASK)
+#define __OA_TAIL(pa) ((((pa) >> 50) & 0b0011) << 8)
+#endif
+#else
+#define __OA_HIGH_MASK (0)
+#define __OA_HEAD(pa) (__paddr(pa) & ~PAGE_MASK)
+#define __OA_TAIL(pa) (0)
+#endif
+
+#define _PTE_OA(pa) (__OA_HEAD(pa) | __OA_TAIL(pa))
+
+// lower attributes
+
+#define _PTE_nG (1UL << 11)
+#define _PTE_AF (1UL << 10)
+
+// AP bits: R_RNGJG
+
+#define _PTE_AP(p, u) ((((p) & 1) << 1 | ((u) & 1)) << 6)
+#define _PTE_PRW _PTE_AP(0 , 0) // priv rw, unpriv none
+#define _PTE_PRWURW _PTE_AP(0 , 1) // priv rw, unpriv rw
+#define _PTE_U _PTE_AP(0 , 1) // generic unpriv flag
+#define _PTE_PRO _PTE_AP(1 , 0) // priv ro, unpriv none
+#define _PTE_PROURO _PTE_AP(1 , 1) // priv ro, unpriv ro
+
+#define _PTE_BLKDESC (0b01)
+#define _PTE_TABDESC (0b11)
+#define _PTE_LFTDESC (0b11)
+#define _PTE_VALID (0b01)
+#define _PTE_DESC_MASK (0b11)
+#define _PTE_SET_DESC(pte_val, desc) \
+ ( ((pte_val) & ~_PTE_DESC_MASK) | ((desc) & _PTE_DESC_MASK) )
+#define _PTE_GET_DESC(pte_val) \
+ ( (pte_val) & _PTE_DESC_MASK )
+
+#define __MEMGUARD 0xf0f0f0f0f0f0f0f0UL
+
+typedef unsigned long pte_attr_t;
+typedef unsigned long pfn_t;
+
+// always do sign extend on x86_64
+
+
+
+/* General mask to get page offset of a LnT huge page */
+
+#define L0T_MASK ( L0T_SIZE - 1 )
+#define L1T_MASK ( L1T_SIZE - 1 )
+#define L2T_MASK ( L2T_SIZE - 1 )
+#define L3T_MASK ( L3T_SIZE - 1 )
+#define LFT_MASK ( LFT_SIZE - 1 )
+
+/* Masks to get index of a LnTE */
+
+#define L0T_INDEX_MASK ( VMS_MASK ^ L0T_MASK )
+#define L1T_INDEX_MASK ( L0T_MASK ^ L1T_MASK )
+#define L2T_INDEX_MASK ( L1T_MASK ^ L2T_MASK )
+#define L3T_INDEX_MASK ( L2T_MASK ^ L3T_MASK )
+#define LFT_INDEX_MASK ( L3T_MASK ^ LFT_MASK )
+
+#define PAGE_SHIFT _PAGE_BASE_SHIFT
+#define PAGE_SIZE _PAGE_BASE_SIZE
+#define PAGE_MASK _PAGE_BASE_MASK
+
+#define LEVEL_SHIFT _PAGE_LEVEL_SHIFT
+#define LEVEL_SIZE _PAGE_LEVEL_SIZE
+#define LEVEL_MASK _PAGE_LEVEL_MASK
+
+// max PTEs number
+#define MAX_PTEN _PAGE_LEVEL_SIZE
+
+// max translation level supported
+#define MAX_LEVEL _PTW_LEVEL
+
+typedef struct __pte pte_t;
+
+#define _PTE_PROT_MASK ( ~((1UL << 50) - 1) | (PAGE_MASK & __OA_HIGH_MASK) )
+#define _PTE_PPFN_MASK ( ~_PTE_PROT_MASK )
+
+#define _PAGE_BASIC ( _PTE_VALID )
+
+#define KERNEL_EXEC ( _PAGE_BASIC | _PTE_PRO | _PTE_UXN )
+#define KERNEL_DATA ( _PAGE_BASIC | _PTE_PRW | _PTE_XN )
+#define KERNEL_RDONLY ( _PAGE_BASIC | _PTE_PRO | _PTE_XN )
+#define KERNEL_ROEXEC KERNEL_EXEC
+#define KERNEL_PGTAB ( _PAGE_BASIC | _PTE_TABDESC )
+
+#define USER_EXEC ( _PAGE_BASIC | _PTE_PROURO | _PTE_PXN )
+#define USER_DATA ( _PAGE_BASIC | _PTE_PRWURW | _PTE_XN )
+#define USER_RDONLY ( _PAGE_BASIC | _PTE_PROURO )
+#define USER_ROEXEC USER_EXEC
+#define USER_PGTAB ( _PAGE_BASIC | _PTE_TABDESC )
+
+#define SELF_MAP ( KERNEL_DATA | _PTE_TABDESC )
+
+#define __mkpte_from(pte_val) ((pte_t){ .val = (pte_val) })
+
+#define null_pte ( __mkpte_from(0) )
+#define guard_pte ( __mkpte_from(__MEMGUARD) )
+#define pte_val(pte) ( pte.val )
+
+
+static inline bool
+pte_isguardian(pte_t pte)
+{
+ return pte.val == __MEMGUARD;
+}
+
+static inline pte_t
+mkpte_prot(pte_attr_t prot)
+{
+ pte_attr_t attrs = (prot & _PTE_PROT_MASK) | _PTE_LFTDESC;
+ return __mkpte_from(attrs);
+}
+
+static inline pte_t
+mkpte(ptr_t paddr, pte_attr_t prot)
+{
+ pte_attr_t attrs = (prot & _PTE_PROT_MASK) | _PTE_LFTDESC;
+ return __mkpte_from((paddr & ~_PAGE_BASE_MASK) | attrs);
+}
+
+static inline pte_t
+mkpte_root(ptr_t paddr, pte_attr_t prot)
+{
+ pte_attr_t attrs = (prot & _PTE_PROT_MASK) | _PTE_TABDESC;
+ return __mkpte_from((paddr & ~_PAGE_BASE_MASK) | attrs);
+}
+
+static inline pte_t
+mkpte_raw(unsigned long pte_val)
+{
+ return __mkpte_from(pte_val);
+}
+
+static inline pte_t
+pte_setpaddr(pte_t pte, ptr_t paddr)
+{
+ return __mkpte_from((pte.val & _PTE_PROT_MASK) | (paddr & ~_PTE_PROT_MASK));
+}
+
+static inline pte_t
+pte_setppfn(pte_t pte, pfn_t ppfn)
+{
+ return pte_setpaddr(pte, ppfn * PAGE_SIZE);
+}
+
+static inline ptr_t
+pte_paddr(pte_t pte)
+{
+ return __paddr(pte.val) & ~_PTE_PROT_MASK;
+}
+
+static inline pfn_t
+pte_ppfn(pte_t pte)
+{
+ return pte_paddr(pte) >> _PAGE_BASE_SHIFT;
+}
+
+static inline pte_t
+pte_setprot(pte_t pte, ptr_t prot)
+{
+ return __mkpte_from((pte.val & ~_PTE_PROT_MASK) | (prot & _PTE_PROT_MASK));
+}
+
+static inline pte_attr_t
+pte_prot(pte_t pte)
+{
+ return (pte.val & _PTE_PROT_MASK);
+}
+
+static inline bool
+pte_isnull(pte_t pte)
+{
+ return !pte.val;
+}
+
+static inline pte_t
+pte_mkhuge(pte_t pte)
+{
+ return __mkpte_from(_PTE_SET_DESC(pte.val, _PTE_BLKDESC));
+}
+
+static inline pte_t
+pte_mkvolatile(pte_t pte)
+{
+ return __mkpte_from(pte.val);
+}
+
+static inline pte_t
+pte_mkroot(pte_t pte)
+{
+ return __mkpte_from(_PTE_SET_DESC(pte.val, _PTE_TABDESC));
+}
+
+static inline bool
+pte_huge(pte_t pte)
+{
+ return _PTE_GET_DESC(pte.val) == _PTE_BLKDESC;
+}
+
+static inline pte_t
+pte_mkloaded(pte_t pte)
+{
+ return __mkpte_from(pte.val | _PTE_VALID);
+}
+
+static inline pte_t
+pte_mkunloaded(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_VALID);
+}
+
+static inline bool
+pte_isloaded(pte_t pte)
+{
+ return !!(pte.val & _PTE_VALID);
+}
+
+static inline pte_t
+pte_mkwprotect(pte_t pte)
+{
+ return __mkpte_from(pte.val | _PTE_PRO);
+}
+
+static inline pte_t
+pte_mkwritable(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_PRO);
+}
+
+static inline bool
+pte_iswprotect(pte_t pte)
+{
+ return !(pte.val & _PTE_PRO);
+}
+
+static inline pte_t
+pte_mkuser(pte_t pte)
+{
+ return __mkpte_from(pte.val | _PTE_U);
+}
+
+static inline pte_t
+pte_mkkernel(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_U);
+}
+
+static inline bool
+pte_allow_user(pte_t pte)
+{
+ return !!(pte.val & _PTE_U);
+}
+
+static inline pte_t
+pte_mkexec(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_PXN);
+}
+
+static inline pte_t
+pte_mknexec(pte_t pte)
+{
+ return __mkpte_from(pte.val | _PTE_PXN);
+}
+
+static inline pte_t
+pte_mkuexec(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_UXN);
+}
+
+static inline pte_t
+pte_mknuexec(pte_t pte)
+{
+ return __mkpte_from(pte.val | _PTE_UXN);
+}
+
+static inline bool
+pte_isexec(pte_t pte)
+{
+ return !(pte.val & _PTE_PXN);
+}
+
+static inline bool
+pte_isuexec(pte_t pte)
+{
+ return !(pte.val & _PTE_UXN);
+}
+
+static inline pte_t
+pte_mkuntouch(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_AF);
+}
+
+static inline bool
+pte_istouched(pte_t pte)
+{
+ return !!(pte.val & _PTE_AF);
+}
+
+static inline pte_t
+pte_mkclean(pte_t pte)
+{
+ return __mkpte_from(pte.val & ~_PTE_DBM);
+}
+
+static inline bool
+pte_dirty(pte_t pte)
+{
+ return !!(pte.val & _PTE_DBM);
+}
+
+static inline void
+set_pte(pte_t* ptep, pte_t pte)
+{
+ ptep->val = pte.val;
+}
+
+static inline pte_t
+pte_at(pte_t* ptep) {
+ return *ptep;
+}
+
+pte_t
+translate_vmr_prot(unsigned int vmr_prot, pte_t pte);
+
+#endif /* __LUNAIX_ARCH_PAGETABLE_H */
--- /dev/null
+#ifndef __LUNAIX_ARCH_PHYSICAL_H
+#define __LUNAIX_ARCH_PHYSICAL_H
+
+#include <lunaix/ds/llist.h>
+#include "mm_defs.h"
+
+#define PPLIST_STARTVM PMAP
+
+struct ppage_arch
+{
+
+};
+
+#endif /* __LUNAIX_ARCH_PHYSICAL_H */
--- /dev/null
+#ifndef __LUNAIX_ARCH_TLB_H
+#define __LUNAIX_ARCH_TLB_H
+
+#include <lunaix/types.h>
+
+#include <asm/aa64_mmu.h>
+#include <asm/aa64_sysinst.h>
+
+#define pack_va(asid, ttl, va) \
+ (((asid & 0xffff) << 48) | \
+ ((ttl & 0b1111) << 44) | \
+ (pfn(va) & ((1 << 44) - 1)))
+
+#define pack_rva(asid, ttl, base, n, scale) \
+ (((asid & 0xffff) << 48) | \
+ ((_MMU_TG & 0b11) << 46) | \
+ ((n & 0x1f) << 39) | \
+ ((scale & 0b11) << 37) | \
+ ((ttl & 0b1111) << 44) | \
+ (pfn(base)& ((1 << 37) - 1)))
+
+/**
+ * @brief Invalidate an entry of all address space
+ *
+ * @param va
+ */
+static inline void must_inline
+__tlb_invalidate(ptr_t va)
+{
+ sys_a1(tlbi_vaae1, pack_va(0, 0, va));
+}
+
+/**
+ * @brief Invalidate an entry of an address space indetified
+ * by ASID
+ *
+ * @param va
+ */
+static inline void must_inline
+__tlb_flush_asid(unsigned int asid, ptr_t va)
+{
+ sys_a1(tlbi_vae1, pack_va(asid, 0, va));
+}
+
+/**
+ * @brief Invalidate an entry of global address space
+ *
+ * @param va
+ */
+static inline void must_inline
+__tlb_flush_global(ptr_t va)
+{
+ __tlb_flush_asid(0, va);
+}
+
+/**
+ * @brief Invalidate an entire TLB
+ *
+ * @param va
+ */
+static inline void must_inline
+__tlb_flush_all()
+{
+ sys_a0(tlbi_alle1);
+}
+
+/**
+ * @brief Invalidate an entire address space
+ *
+ * @param va
+ */
+static inline void must_inline
+__tlb_flush_asid_all(unsigned int asid)
+{
+ sys_a1(tlbi_aside1, pack_va(asid, 0, 0));
+}
+
+
+/**
+ * @brief Invalidate entries of all address spaces
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+static inline void
+tlb_flush_range(ptr_t addr, unsigned int npages)
+{
+#ifdef _MMU_USE_OA52
+ for (unsigned int i = 0; i < npages; i++)
+ {
+ __tlb_invalidate(addr + i * PAGE_SIZE);
+ }
+#else
+ sys_a1(tlbi_rvaae1, pack_rva(0, 0, addr, npages, 0));
+#endif
+}
+
+/**
+ * @brief Invalidate entries of an address space identified
+ * by ASID
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+static inline void
+tlb_flush_asid_range(unsigned int asid, ptr_t addr, unsigned int npages)
+{
+#ifdef _MMU_USE_OA52
+ for (unsigned int i = 0; i < npages; i++)
+ {
+ __tlb_flush_asid(asid, addr + i * PAGE_SIZE);
+ }
+#else
+ sys_a1(tlbi_rvae1, pack_rva(asid, 0, addr, npages, 0));
+#endif
+}
+
+#include <asm-generic/tlb-shared.h>
+
+#endif /* __LUNAIX_VMTLB_H */
--- /dev/null
+#ifndef __LUNAIX_BASE_DEFS_LD_INC
+#define __LUNAIX_BASE_DEFS_LD_INC
+
+#define __LD__
+#include <sys/mm/mempart.h>
+
+#define KEXEC_BASE KERNEL_IMG
+#define PAGE_GRAN 4K
+
+#define ENTRY_POINT start_
+
+#define LOAD_OFF 0x100000
+
+
+#endif /* __LUNAIX_BASE_DEFS_LD_INC */
--- /dev/null
+#ifndef __LUNAIX_BOOT_SECS_LD_INC
+#define __LUNAIX_BOOT_SECS_LD_INC
+
+#include "base_defs.ld.inc"
+
+.boot.text BLOCK(PAGE_GRAN) :
+{
+ *(.boot.text)
+} : boot_text
+
+.boot.data BLOCK(PAGE_GRAN) :
+{
+ *(.boot.data)
+ *(.boot.bss)
+} : boot_data
+
+#endif
\ No newline at end of file
--- /dev/null
+#ifndef __LUNAIX_TLB_SHARED_H
+#define __LUNAIX_TLB_SHARED_H
+
+#include <lunaix/types.h>
+#include <lunaix/mm/procvm.h>
+
+/**
+ * @brief Invalidate an entry of kernel address spaces
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+static inline void
+tlb_flush_kernel(ptr_t addr)
+{
+ __tlb_flush_global(addr);
+}
+
+/**
+ * @brief Invalidate entries of kernel address spaces
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+static inline void
+tlb_flush_kernel_ranged(ptr_t addr, unsigned int npages)
+{
+ for (unsigned int i = 0; i < npages; i++)
+ {
+ tlb_flush_kernel(addr + i * PAGE_SIZE);
+ }
+}
+
+/**
+ * @brief Invalidate an entry within a process memory space
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+void
+tlb_flush_mm(struct proc_mm* mm, ptr_t addr);
+
+/**
+ * @brief Invalidate entries within a process memory space
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+void
+tlb_flush_mm_range(struct proc_mm* mm, ptr_t addr, unsigned int npages);
+
+/**
+ * @brief Invalidate an entry within a vm region
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+void
+tlb_flush_vmr(struct mm_region* vmr, ptr_t va);
+
+/**
+ * @brief Invalidate all entries within a vm region
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+void
+tlb_flush_vmr_all(struct mm_region* vmr);
+
+/**
+ * @brief Invalidate entries within a vm region
+ *
+ * @param asid
+ * @param addr
+ * @param npages
+ */
+void
+tlb_flush_vmr_range(struct mm_region* vmr, ptr_t addr, unsigned int npages);
+
+#endif /* __LUNAIX_TLB_SHARED_H */
#include <asm/tlb.h>
#include <lunaix/process.h>
-void
+_default void
tlb_flush_mm(struct proc_mm* mm, ptr_t addr)
{
__tlb_flush_asid(procvm_asid(mm), addr);
}
-void
+_default void
tlb_flush_mm_range(struct proc_mm* mm, ptr_t addr, unsigned int npages)
{
tlb_flush_asid_range(procvm_asid(mm), addr, npages);
}
-void
+_default void
tlb_flush_vmr(struct mm_region* vmr, ptr_t va)
{
__tlb_flush_asid(procvm_asid(vmr->proc_vms), va);
}
-void
+_default void
tlb_flush_vmr_all(struct mm_region* vmr)
{
tlb_flush_asid_range(procvm_asid(vmr->proc_vms),
vmr->start, leaf_count(vmr->end - vmr->start));
}
-void
+_default void
tlb_flush_vmr_range(struct mm_region* vmr, ptr_t addr, unsigned int npages)
{
tlb_flush_asid_range(procvm_asid(vmr->proc_vms), addr, npages);
#define KERNEL_RDONLY ( KERNEL_PAGE | _PTE_NX )
#define KERNEL_ROEXEC ( KERNEL_PAGE | _PTE_X )
#define KERNEL_PGTAB ( KERNEL_PAGE | _PTE_W )
-#define KERNEL_DEFAULT KERNEL_PGTAB
#define USER_PAGE ( _PTE_P | _PTE_U )
#define USER_EXEC ( USER_PAGE | _PTE_X )
#define __LUNAIX_ARCH_TLB_H
#include <lunaix/compiler.h>
-#include <lunaix/mm/procvm.h>
-#include <lunaix/mm/physical.h>
+#include <asm/mm_defs.h>
/**
* @brief Invalidate an entry of all address space
}
}
-/**
- * @brief Invalidate an entry within a process memory space
- *
- * @param asid
- * @param addr
- * @param npages
- */
-void
-tlb_flush_mm(struct proc_mm* mm, ptr_t addr);
-
-/**
- * @brief Invalidate entries within a process memory space
- *
- * @param asid
- * @param addr
- * @param npages
- */
-void
-tlb_flush_mm_range(struct proc_mm* mm, ptr_t addr, unsigned int npages);
-
-/**
- * @brief Invalidate an entry within a vm region
- *
- * @param asid
- * @param addr
- * @param npages
- */
-void
-tlb_flush_vmr(struct mm_region* vmr, ptr_t va);
-
-/**
- * @brief Invalidate all entries within a vm region
- *
- * @param asid
- * @param addr
- * @param npages
- */
-void
-tlb_flush_vmr_all(struct mm_region* vmr);
-
-/**
- * @brief Invalidate entries within a vm region
- *
- * @param asid
- * @param addr
- * @param npages
- */
-void
-tlb_flush_vmr_range(struct mm_region* vmr, ptr_t addr, unsigned int npages);
+#include <asm-generic/tlb-shared.h>
#endif /* __LUNAIX_VMTLB_H */
"""
type(bool)
- default(not v(arch).startswith("x86"))
+ default(False)
+
+ if v(arch) == "aarch64":
+ set_value(True)
@ReadOnly
@Term("Maximum size of device tree blob (in KiB)")