1 #ifndef __LUNAIX_PAGE_H
2 #define __LUNAIX_PAGE_H
4 #include <lunaix/mm/pmm.h>
5 #include <lunaix/mm/vmm.h>
6 #include <lunaix/mm/vmtlb.h>
8 #include <klibc/string.h>
11 * @brief A leaflet represent a bunch 4k ppage
12 * as single multi-ordered page, as such
13 * big page can seen as an unfolded version
14 * of these small 4k ppages hence the name.
15 * It is introduced to solve the issue that
16 * is discovered during refactoring - It is
17 * jolly unclear whether the ppage is a head,
18 * tail, or even worse, the middle one, when
19 * passing around between functions.
20 * This concept is surprisingly similar to
21 * Linux's struct folio (I swear to the
22 * Almighty Princess of the Sun, Celestia,
23 * that I don't quite understand what folio
24 * is until I've wrote the conceptually same
30 struct ppage lead_page;
33 static inline struct leaflet*
34 get_leaflet(struct ppage* page)
36 return (struct leaflet*)leading_page(page);
39 static inline struct ppage*
40 get_ppage(struct leaflet* leaflet)
42 return (struct ppage*)leaflet;
45 static inline struct leaflet*
46 alloc_leaflet(int order)
48 return (struct leaflet*)pmm_alloc_napot_type(POOL_UNIFIED, order, 0);
51 static inline struct leaflet*
52 alloc_leaflet_pinned(int order)
54 return (struct leaflet*)pmm_alloc_napot_type(POOL_UNIFIED, order, PP_FGLOCKED);
58 leaflet_borrow(struct leaflet* leaflet)
60 struct ppage* const page = get_ppage(leaflet);
62 if (reserved_page(page)) {
70 leaflet_return(struct leaflet* leaflet)
72 struct ppage* const page = get_ppage(leaflet);
74 pmm_free_one(page, 0);
77 static inline unsigned int
78 leaflet_refcount(struct leaflet* leaflet)
80 return get_ppage(leaflet)->refs;
84 leaflet_order(struct leaflet* leaflet)
86 return ppage_order(get_ppage(leaflet));
90 leaflet_size(struct leaflet* leaflet)
92 return PAGE_SIZE << leaflet_order(leaflet);
96 leaflet_nfold(struct leaflet* leaflet)
98 return 1 << leaflet_order(leaflet);
101 static inline struct leaflet*
102 ppfn_leaflet(pfn_t ppfn)
104 return get_leaflet(ppage(ppfn));
107 static inline struct leaflet*
108 pte_leaflet(pte_t pte)
110 struct ppage* ppfn = ppage(pfn(pte_paddr(pte)));
111 return get_leaflet(ppfn);
114 static inline struct leaflet*
115 pte_leaflet_aligned(pte_t pte)
117 struct ppage* ppfn = ppage(pfn(pte_paddr(pte)));
118 struct leaflet* _l = get_leaflet(ppfn);
120 assert((ptr_t)_l == (ptr_t)ppfn);
125 leaflet_ppfn(struct leaflet* leaflet)
127 return ppfn(get_ppage(leaflet));
131 leaflet_addr(struct leaflet* leaflet)
133 return page_addr(ppfn(get_ppage(leaflet)));
137 unpin_leaflet(struct leaflet* leaflet)
139 change_page_type(get_ppage(leaflet), 0);
143 pin_leaflet(struct leaflet* leaflet)
145 change_page_type(get_ppage(leaflet), PP_FGLOCKED);
149 * @brief Map a leaflet
153 * @return pages folded into that leaflet
156 ptep_map_leaflet(pte_t* ptep, pte_t pte, struct leaflet* leaflet)
158 // We do not support huge leaflet yet
159 assert(leaflet_order(leaflet) < LEVEL_SHIFT);
161 pte = pte_setppfn(pte, leaflet_ppfn(leaflet));
162 pte = pte_mkloaded(pte);
164 int n = leaflet_nfold(leaflet);
165 vmm_set_ptes_contig(ptep, pte, LFT_SIZE, n);
171 * @brief Unmap a leaflet
175 * @return pages folded into that leaflet
178 ptep_unmap_leaflet(pte_t* ptep, struct leaflet* leaflet)
180 // We do not support huge leaflet yet
181 assert(leaflet_order(leaflet) < LEVEL_SHIFT);
183 int n = leaflet_nfold(leaflet);
184 vmm_unset_ptes(ptep, n);
190 leaflet_mount(struct leaflet* leaflet)
192 pte_t* ptep = mkptep_va(VMS_SELF, PG_MOUNT_VAR);
193 ptep_map_leaflet(ptep, mkpte_prot(KERNEL_DATA), leaflet);
195 tlb_flush_kernel_ranged(PG_MOUNT_VAR, leaflet_nfold(leaflet));
201 leaflet_unmount(struct leaflet* leaflet)
203 pte_t* ptep = mkptep_va(VMS_SELF, PG_MOUNT_VAR);
204 vmm_unset_ptes(ptep, leaflet_nfold(leaflet));
206 tlb_flush_kernel_ranged(PG_MOUNT_VAR, leaflet_nfold(leaflet));
210 leaflet_fill(struct leaflet* leaflet, unsigned int val)
214 mnt = leaflet_mount(leaflet);
215 memset((void*)mnt, val, leaflet_size(leaflet));
216 leaflet_unmount(leaflet);
220 leaflet_wipe(struct leaflet* leaflet)
222 leaflet_fill(leaflet, 0);
226 * @brief Duplicate the leaflet
228 * @return Duplication of given leaflet
232 dup_leaflet(struct leaflet* leaflet);
236 * @brief Maps a number of contiguous ptes in kernel
239 * @param pte the pte to be mapped
240 * @param lvl_size size of the page pointed by the given pte
241 * @param n number of ptes
245 vmap_ptes_at(pte_t pte, size_t lvl_size, int n);
248 * @brief Maps a number of contiguous ptes in kernel
249 * address space (leaf page size)
251 * @param pte the pte to be mapped
252 * @param n number of ptes
256 vmap_leaf_ptes(pte_t pte, int n)
258 return vmap_ptes_at(pte, LFT_SIZE, n);
262 * @brief Maps a contiguous range of physical address
263 * into kernel address space (leaf page size)
265 * @param paddr start of the physical address range
266 * @param size size of the physical range
267 * @param prot default protection to be applied
271 vmap(struct leaflet* leaflet, pte_attr_t prot)
273 pte_t _pte = mkpte(page_addr(leaflet_ppfn(leaflet)), prot);
274 return vmap_ptes_at(_pte, LFT_SIZE, leaflet_nfold(leaflet));
278 vunmap(ptr_t ptr, struct leaflet* leaflet);
281 vmap_range(pfn_t start, size_t npages, pte_attr_t prot)
283 pte_t _pte = mkpte(page_addr(start), prot);
284 return vmap_ptes_at(_pte, LFT_SIZE, npages);
288 vunmap_range(pfn_t start, size_t npages)
290 pte_t* ptep = mkptep_va(VMS_SELF, start);
291 vmm_set_ptes_contig(ptep, null_pte, LFT_SIZE, npages);
296 * @brief Allocate a page in kernel space.
304 alloc_kpage_at(pte_t* ptep, pte_t pte, int order);
306 #endif /* __LUNAIX_PAGE_H */