#ifndef __LUNAIX_PAGE_H
#define __LUNAIX_PAGE_H
-#include <lunaix/common.h>
-#include <stdint.h>
-#define PG_SIZE_BITS 12
-#define PG_SIZE (1 << PG_SIZE_BITS)
-#define PG_INDEX_BITS 10
+#include <lunaix/mm/pmm.h>
+#include <lunaix/mm/vmm.h>
+#include <lunaix/mm/vmtlb.h>
-#define PG_MAX_ENTRIES 1024U
-#define PG_LAST_TABLE PG_MAX_ENTRIES - 1
-#define PG_FIRST_TABLE 0
+#include <klibc/string.h>
-#define PTE_NULL 0
+/**
+ * @brief A leaflet represent a bunch 4k ppage
+ * as single multi-ordered page, as such
+ * big page can seen as an unfolded version
+ * of these small 4k ppages hence the name.
+ * It is introduced to solve the issue that
+ * is discovered during refactoring - It is
+ * jolly unclear whether the ppage is a head,
+ * tail, or even worse, the middle one, when
+ * passing around between functions.
+ * This concept is surprisingly similar to
+ * Linux's struct folio (I swear to the
+ * Almighty Princess of the Sun, Celestia,
+ * that I don't quite understand what folio
+ * is until I've wrote the conceptually same
+ * thing)
+ *
+ */
+struct leaflet
+{
+ struct ppage lead_page;
+};
+
+static inline struct leaflet*
+get_leaflet(struct ppage* page)
+{
+ return (struct leaflet*)leading_page(page);
+}
+
+static inline struct ppage*
+get_ppage(struct leaflet* leaflet)
+{
+ return (struct ppage*)leaflet;
+}
+
+static inline struct leaflet*
+alloc_leaflet(int order)
+{
+ return (struct leaflet*)pmm_alloc_napot_type(POOL_UNIFIED, order, 0);
+}
+
+static inline struct leaflet*
+alloc_leaflet_pinned(int order)
+{
+ return (struct leaflet*)pmm_alloc_napot_type(POOL_UNIFIED, order, PP_FGLOCKED);
+}
+
+static inline void
+leaflet_borrow(struct leaflet* leaflet)
+{
+ struct ppage* const page = get_ppage(leaflet);
+ assert(page->refs);
+ if (reserved_page(page)) {
+ return;
+ }
+
+ page->refs++;
+}
+
+static inline void
+leaflet_return(struct leaflet* leaflet)
+{
+ struct ppage* const page = get_ppage(leaflet);
+ assert(page->refs);
+ pmm_free_one(page, 0);
+}
+
+static inline unsigned int
+leaflet_refcount(struct leaflet* leaflet)
+{
+ return get_ppage(leaflet)->refs;
+}
+
+static inline int
+leaflet_order(struct leaflet* leaflet)
+{
+ return ppage_order(get_ppage(leaflet));
+}
+
+static inline int
+leaflet_size(struct leaflet* leaflet)
+{
+ return PAGE_SIZE << leaflet_order(leaflet);
+}
+
+static inline int
+leaflet_nfold(struct leaflet* leaflet)
+{
+ return 1 << leaflet_order(leaflet);
+}
+
+static inline struct leaflet*
+ppfn_leaflet(pfn_t ppfn)
+{
+ return get_leaflet(ppage(ppfn));
+}
+
+static inline struct leaflet*
+pte_leaflet(pte_t pte)
+{
+ struct ppage* ppfn = ppage(pfn(pte_paddr(pte)));
+ return get_leaflet(ppfn);
+}
-#define P2V(paddr) ((uintptr_t)(paddr) + KERNEL_MM_BASE)
-#define V2P(vaddr) ((uintptr_t)(vaddr)-KERNEL_MM_BASE)
+static inline struct leaflet*
+pte_leaflet_aligned(pte_t pte)
+{
+ struct ppage* ppfn = ppage(pfn(pte_paddr(pte)));
+ struct leaflet* _l = get_leaflet(ppfn);
-#define PG_ALIGN(addr) ((uintptr_t)(addr)&0xFFFFF000UL)
+ assert((ptr_t)_l == (ptr_t)ppfn);
+ return _l;
+}
-#define L1_INDEX(vaddr) (uint32_t)(((uintptr_t)(vaddr)&0xFFC00000UL) >> 22)
-#define L2_INDEX(vaddr) (uint32_t)(((uintptr_t)(vaddr)&0x003FF000UL) >> 12)
-#define PG_OFFSET(vaddr) (uint32_t)((uintptr_t)(vaddr)&0x00000FFFUL)
+static inline pfn_t
+leaflet_ppfn(struct leaflet* leaflet)
+{
+ return ppfn(get_ppage(leaflet));
+}
-#define GET_PT_ADDR(pde) PG_ALIGN(pde)
-#define GET_PG_ADDR(pte) PG_ALIGN(pte)
+static inline ptr_t
+leaflet_addr(struct leaflet* leaflet)
+{
+ return page_addr(ppfn(get_ppage(leaflet)));
+}
+
+static inline void
+unpin_leaflet(struct leaflet* leaflet)
+{
+ change_page_type(get_ppage(leaflet), 0);
+}
+
+static inline void
+pin_leaflet(struct leaflet* leaflet)
+{
+ change_page_type(get_ppage(leaflet), PP_FGLOCKED);
+}
+
+/**
+ * @brief Map a leaflet
+ *
+ * @param ptep
+ * @param leaflet
+ * @return pages folded into that leaflet
+ */
+static inline size_t
+ptep_map_leaflet(pte_t* ptep, pte_t pte, struct leaflet* leaflet)
+{
+ // We do not support huge leaflet yet
+ assert(leaflet_order(leaflet) < LEVEL_SHIFT);
-#define PG_DIRTY(pte) ((pte & (1 << 6)) >> 6)
-#define PG_ACCESSED(pte) ((pte & (1 << 5)) >> 5)
+ pte = pte_setppfn(pte, leaflet_ppfn(leaflet));
+ pte = pte_mkloaded(pte);
-#define IS_CACHED(entry) ((entry & 0x1))
+ int n = leaflet_nfold(leaflet);
+ vmm_set_ptes_contig(ptep, pte, LFT_SIZE, n);
-#define PG_PRESENT (0x1)
-#define PG_WRITE (0x1 << 1)
-#define PG_ALLOW_USER (0x1 << 2)
-#define PG_WRITE_THROUGH (1 << 3)
-#define PG_DISABLE_CACHE (1 << 4)
-#define PG_PDE_4MB (1 << 7)
+ return n;
+}
-#define NEW_L1_ENTRY(flags, pt_addr) \
- (PG_ALIGN(pt_addr) | (((flags) | PG_WRITE_THROUGH) & 0xfff))
-#define NEW_L2_ENTRY(flags, pg_addr) (PG_ALIGN(pg_addr) | ((flags)&0xfff))
+/**
+ * @brief Unmap a leaflet
+ *
+ * @param ptep
+ * @param leaflet
+ * @return pages folded into that leaflet
+ */
+static inline size_t
+ptep_unmap_leaflet(pte_t* ptep, struct leaflet* leaflet)
+{
+ // We do not support huge leaflet yet
+ assert(leaflet_order(leaflet) < LEVEL_SHIFT);
-#define V_ADDR(pd, pt, offset) ((pd) << 22 | (pt) << 12 | (offset))
-#define P_ADDR(ppn, offset) ((ppn << 12) | (offset))
+ int n = leaflet_nfold(leaflet);
+ vmm_unset_ptes(ptep, n);
-#define PG_ENTRY_FLAGS(entry) ((entry)&0xFFFU)
-#define PG_ENTRY_ADDR(entry) ((entry) & ~0xFFFU)
+ return n;
+}
-#define HAS_FLAGS(entry, flags) ((PG_ENTRY_FLAGS(entry) & (flags)) == flags)
-#define CONTAINS_FLAGS(entry, flags) (PG_ENTRY_FLAGS(entry) & (flags))
+static inline ptr_t
+leaflet_mount(struct leaflet* leaflet)
+{
+ pte_t* ptep = mkptep_va(VMS_SELF, PG_MOUNT_VAR);
+ ptep_map_leaflet(ptep, mkpte_prot(KERNEL_DATA), leaflet);
-#define PG_PREM_R PG_PRESENT
-#define PG_PREM_RW PG_PRESENT | PG_WRITE
-#define PG_PREM_UR PG_PRESENT | PG_ALLOW_USER
-#define PG_PREM_URW PG_PRESENT | PG_WRITE | PG_ALLOW_USER
+ tlb_flush_kernel_ranged(PG_MOUNT_VAR, leaflet_nfold(leaflet));
-// 用于对PD进行循环映射,因为我们可能需要对PD进行频繁操作,我们在这里禁用TLB缓存
-#define T_SELF_REF_PERM PG_PREM_RW | PG_DISABLE_CACHE | PG_WRITE_THROUGH
+ return PG_MOUNT_VAR;
+}
-// 页目录的虚拟基地址,可以用来访问到各个PDE
-#define L1_BASE_VADDR 0xFFFFF000U
+static inline void
+leaflet_unmount(struct leaflet* leaflet)
+{
+ pte_t* ptep = mkptep_va(VMS_SELF, PG_MOUNT_VAR);
+ vmm_unset_ptes(ptep, leaflet_nfold(leaflet));
-// 页表的虚拟基地址,可以用来访问到各个PTE
-#define L2_BASE_VADDR 0xFFC00000U
+ tlb_flush_kernel_ranged(PG_MOUNT_VAR, leaflet_nfold(leaflet));
+}
-// 用来获取特定的页表的虚拟地址
-#define L2_VADDR(pd_offset) (L2_BASE_VADDR | (pd_offset << 12))
+static inline void
+leaflet_fill(struct leaflet* leaflet, unsigned int val)
+{
+ ptr_t mnt;
+
+ mnt = leaflet_mount(leaflet);
+ memset((void*)mnt, val, leaflet_size(leaflet));
+ leaflet_unmount(leaflet);
+}
-typedef unsigned long ptd_t;
-typedef unsigned long pt_t;
-typedef unsigned int pt_attr;
-typedef uint32_t x86_pte_t;
+static inline void
+leaflet_wipe(struct leaflet* leaflet)
+{
+ leaflet_fill(leaflet, 0);
+}
/**
- * @brief 虚拟映射属性
+ * @brief Duplicate the leaflet
*
+ * @return Duplication of given leaflet
+ *
+ */
+struct leaflet*
+dup_leaflet(struct leaflet* leaflet);
+
+
+/**
+ * @brief Maps a number of contiguous ptes in kernel
+ * address space
+ *
+ * @param pte the pte to be mapped
+ * @param lvl_size size of the page pointed by the given pte
+ * @param n number of ptes
+ * @return ptr_t
+ */
+ptr_t
+vmap_ptes_at(pte_t pte, size_t lvl_size, int n);
+
+/**
+ * @brief Maps a number of contiguous ptes in kernel
+ * address space (leaf page size)
+ *
+ * @param pte the pte to be mapped
+ * @param n number of ptes
+ * @return ptr_t
+ */
+static inline ptr_t
+vmap_leaf_ptes(pte_t pte, int n)
+{
+ return vmap_ptes_at(pte, LFT_SIZE, n);
+}
+
+/**
+ * @brief Maps a contiguous range of physical address
+ * into kernel address space (leaf page size)
+ *
+ * @param paddr start of the physical address range
+ * @param size size of the physical range
+ * @param prot default protection to be applied
+ * @return ptr_t
+ */
+static inline ptr_t
+vmap(struct leaflet* leaflet, pte_attr_t prot)
+{
+ pte_t _pte = mkpte(page_addr(leaflet_ppfn(leaflet)), prot);
+ return vmap_ptes_at(_pte, LFT_SIZE, leaflet_nfold(leaflet));
+}
+
+void
+vunmap(ptr_t ptr, struct leaflet* leaflet);
+
+static inline ptr_t
+vmap_range(pfn_t start, size_t npages, pte_attr_t prot)
+{
+ pte_t _pte = mkpte(page_addr(start), prot);
+ return vmap_ptes_at(_pte, LFT_SIZE, npages);
+}
+
+static inline void
+vunmap_range(pfn_t start, size_t npages)
+{
+ pte_t* ptep = mkptep_va(VMS_SELF, start);
+ vmm_set_ptes_contig(ptep, null_pte, LFT_SIZE, npages);
+}
+
+
+/**
+ * @brief Allocate a page in kernel space.
+ *
+ * @param ptep
+ * @param pte
+ * @param order
+ * @return pte_t
*/
-typedef struct
-{
- // 虚拟页地址
- uintptr_t va;
- // 物理页码(如果不存在映射,则为0)
- uint32_t pn;
- // 物理页地址(如果不存在映射,则为0)
- uintptr_t pa;
- // 映射的flags
- uint16_t flags;
- // PTE地址
- x86_pte_t* pte;
-} v_mapping;
-
-typedef struct
-{
- x86_pte_t entry[PG_MAX_ENTRIES];
-} __attribute__((packed)) x86_page_table;
-
-extern void __pg_mount_point;
-
-/* 四个页挂载点,两个页目录挂载点: 用于临时创建&编辑页表 */
-#define PG_MOUNT_RANGE(l1_index) (701 <= l1_index && l1_index <= 703)
-#define PD_MOUNT_1 (PROC_START + MEM_4MB)
-#define PG_MOUNT_BASE (PD_MOUNT_1 + MEM_4MB)
-#define PG_MOUNT_1 (PG_MOUNT_BASE)
-#define PG_MOUNT_2 (PG_MOUNT_BASE + 0x1000)
-#define PG_MOUNT_3 (PG_MOUNT_BASE + 0x2000)
-#define PG_MOUNT_4 (PG_MOUNT_BASE + 0x3000)
-
-#define PD_REFERENCED L2_BASE_VADDR
-
-#define CURPROC_PTE(vpn) \
- (&((x86_page_table*)(PD_MOUNT_1 | (((vpn)&0xffc00) << 2))) \
- ->entry[(vpn)&0x3ff])
-#define PTE_MOUNTED(mnt, vpn) \
- (((x86_page_table*)((mnt) | (((vpn)&0xffc00) << 2)))->entry[(vpn)&0x3ff])
+pte_t
+alloc_kpage_at(pte_t* ptep, pte_t pte, int order);
+
+static inline void*
+vmalloc_page(int order)
+{
+ struct leaflet* leaf = alloc_leaflet(0);
+ if (!leaf) {
+ return NULL;
+ }
+
+ return (void*)vmap(leaf, KERNEL_DATA);
+}
+
+static inline void
+vmfree(void* ptr)
+{
+ struct leaflet* leaf = ppfn_leaflet(pfn((ptr_t)ptr));
+ leaflet_return(leaf);
+}
#endif /* __LUNAIX_PAGE_H */