1 #ifndef __LUNAIX_PAGE_H
2 #define __LUNAIX_PAGE_H
4 #include <lunaix/mm/pmm.h>
5 #include <lunaix/mm/vmm.h>
6 #include <lunaix/mm/vmtlb.h>
8 #include <klibc/string.h>
11 * @brief A leaflet represent a bunch 4k ppage
12 * as single multi-ordered page, as such
13 * big page can seen as an unfolded version
14 * of these small 4k ppages hence the name.
15 * It is introduced to solve the issue that
16 * is discovered during refactoring - It is
17 * jolly unclear whether the ppage is a head,
18 * tail, or even worse, the middle one, when
19 * passing around between functions.
20 * This concept is surprisingly similar to
21 * Linux's struct folio (I swear to the
22 * Almighty Princess of the Sun, Celestia,
23 * that I don't quite understand what folio
24 * is until I've wrote the conceptually same
30 struct ppage lead_page;
33 static inline struct leaflet*
34 get_leaflet(struct ppage* page)
36 return (struct leaflet*)leading_page(page);
39 static inline struct ppage*
40 get_ppage(struct leaflet* leaflet)
42 return (struct ppage*)leaflet;
45 static inline struct leaflet*
46 alloc_leaflet(int order)
48 return (struct leaflet*)pmm_alloc_napot_type(POOL_UNIFIED, order, 0);
51 static inline struct leaflet*
52 alloc_leaflet_pinned(int order)
54 return (struct leaflet*)pmm_alloc_napot_type(POOL_UNIFIED, order, PP_FGLOCKED);
58 leaflet_borrow(struct leaflet* leaflet)
60 struct ppage* const page = get_ppage(leaflet);
62 if (reserved_page(page)) {
70 leaflet_return(struct leaflet* leaflet)
72 struct ppage* const page = get_ppage(leaflet);
74 pmm_free_one(page, 0);
77 static inline unsigned int
78 leaflet_refcount(struct leaflet* leaflet)
80 return get_ppage(leaflet)->refs;
84 leaflet_order(struct leaflet* leaflet)
86 return ppage_order(get_ppage(leaflet));
90 leaflet_size(struct leaflet* leaflet)
92 return PAGE_SIZE << leaflet_order(leaflet);
96 leaflet_nfold(struct leaflet* leaflet)
98 return 1 << leaflet_order(leaflet);
101 static inline struct leaflet*
102 ppfn_leaflet(pfn_t ppfn)
104 return get_leaflet(ppage(ppfn));
107 static inline struct leaflet*
108 pte_leaflet(pte_t pte)
110 struct ppage* ppfn = ppage(pfn(pte_paddr(pte)));
111 return get_leaflet(ppfn);
114 static inline struct leaflet*
115 pte_leaflet_aligned(pte_t pte)
117 struct ppage* ppfn = ppage(pfn(pte_paddr(pte)));
118 struct leaflet* _l = get_leaflet(ppfn);
120 assert((ptr_t)_l == (ptr_t)ppfn);
125 leaflet_ppfn(struct leaflet* leaflet)
127 return ppfn(get_ppage(leaflet));
131 leaflet_addr(struct leaflet* leaflet)
133 return page_addr(ppfn(get_ppage(leaflet)));
137 unpin_leaflet(struct leaflet* leaflet)
139 change_page_type(get_ppage(leaflet), 0);
143 pin_leaflet(struct leaflet* leaflet)
145 change_page_type(get_ppage(leaflet), PP_FGLOCKED);
149 to_napot_order(int nr_pages)
153 order = ilog2(nr_pages);
154 return is_pot(nr_pages) ? order : order + 1;
158 * @brief Map a leaflet
162 * @return pages folded into that leaflet
165 ptep_map_leaflet(pte_t* ptep, pte_t pte, struct leaflet* leaflet)
167 // We do not support huge leaflet yet
168 assert(leaflet_order(leaflet) < LEVEL_SHIFT);
170 pte = pte_setppfn(pte, leaflet_ppfn(leaflet));
171 pte = pte_mkloaded(pte);
173 int n = leaflet_nfold(leaflet);
174 vmm_set_ptes_contig(ptep, pte, LFT_SIZE, n);
180 * @brief Unmap a leaflet
184 * @return pages folded into that leaflet
187 ptep_unmap_leaflet(pte_t* ptep, struct leaflet* leaflet)
189 // We do not support huge leaflet yet
190 assert(leaflet_order(leaflet) < LEVEL_SHIFT);
192 int n = leaflet_nfold(leaflet);
193 vmm_unset_ptes(ptep, n);
199 leaflet_mount(struct leaflet* leaflet)
201 pte_t* ptep = mkptep_va(VMS_SELF, PG_MOUNT_VAR);
202 ptep_map_leaflet(ptep, mkpte_prot(KERNEL_DATA), leaflet);
204 tlb_flush_kernel_ranged(PG_MOUNT_VAR, leaflet_nfold(leaflet));
210 leaflet_unmount(struct leaflet* leaflet)
212 pte_t* ptep = mkptep_va(VMS_SELF, PG_MOUNT_VAR);
213 vmm_unset_ptes(ptep, leaflet_nfold(leaflet));
215 tlb_flush_kernel_ranged(PG_MOUNT_VAR, leaflet_nfold(leaflet));
219 leaflet_fill(struct leaflet* leaflet, unsigned int val)
223 mnt = leaflet_mount(leaflet);
224 memset((void*)mnt, val, leaflet_size(leaflet));
225 leaflet_unmount(leaflet);
229 leaflet_wipe(struct leaflet* leaflet)
231 leaflet_fill(leaflet, 0);
235 * @brief Duplicate the leaflet
237 * @return Duplication of given leaflet
241 dup_leaflet(struct leaflet* leaflet);
245 * @brief Maps a number of contiguous ptes in kernel
248 * @param pte the pte to be mapped
249 * @param lvl_size size of the page pointed by the given pte
250 * @param n number of ptes
254 vmap_ptes_at(pte_t pte, size_t lvl_size, int n);
257 * @brief Maps a number of contiguous ptes in kernel
258 * address space (leaf page size)
260 * @param pte the pte to be mapped
261 * @param n number of ptes
265 vmap_leaf_ptes(pte_t pte, int n)
267 return vmap_ptes_at(pte, LFT_SIZE, n);
271 * @brief Maps a contiguous range of physical address
272 * into kernel address space (leaf page size)
274 * @param paddr start of the physical address range
275 * @param size size of the physical range
276 * @param prot default protection to be applied
280 vmap(struct leaflet* leaflet, pte_attr_t prot)
282 pte_t _pte = mkpte(page_addr(leaflet_ppfn(leaflet)), prot);
283 return vmap_ptes_at(_pte, LFT_SIZE, leaflet_nfold(leaflet));
287 vunmap(ptr_t ptr, struct leaflet* leaflet);
290 vmap_range(pfn_t start, size_t npages, pte_attr_t prot)
292 pte_t _pte = mkpte(page_addr(start), prot);
293 return vmap_ptes_at(_pte, LFT_SIZE, npages);
297 vunmap_range(pfn_t start, size_t npages)
299 pte_t* ptep = mkptep_va(VMS_SELF, start);
300 vmm_set_ptes_contig(ptep, null_pte, LFT_SIZE, npages);
305 * @brief Allocate a page in kernel space.
313 alloc_kpage_at(pte_t* ptep, pte_t pte, int order);
316 vmalloc_page(int order)
318 struct leaflet* leaf = alloc_leaflet(0);
323 return (void*)vmap(leaf, KERNEL_DATA);
329 struct leaflet* leaf = ppfn_leaflet(pfn((ptr_t)ptr));
330 leaflet_return(leaf);
333 #endif /* __LUNAIX_PAGE_H */