+ assert((ptr_t)_l == (ptr_t)ppfn);
+ return _l;
+}
+
+static inline pfn_t
+leaflet_ppfn(struct leaflet* leaflet)
+{
+ return ppfn(get_ppage(leaflet));
+}
+
+static inline ptr_t
+leaflet_addr(struct leaflet* leaflet)
+{
+ return page_addr(ppfn(get_ppage(leaflet)));
+}
+
+static inline void
+unpin_leaflet(struct leaflet* leaflet)
+{
+ change_page_type(get_ppage(leaflet), 0);
+}
+
+static inline void
+pin_leaflet(struct leaflet* leaflet)
+{
+ change_page_type(get_ppage(leaflet), PP_FGLOCKED);
+}
+
+/**
+ * @brief Map a leaflet
+ *
+ * @param ptep
+ * @param leaflet
+ * @return pages folded into that leaflet
+ */
+static inline size_t
+ptep_map_leaflet(pte_t* ptep, pte_t pte, struct leaflet* leaflet)
+{
+ // We do not support huge leaflet yet
+ assert(leaflet_order(leaflet) < LEVEL_SHIFT);
+
+ pte = pte_setppfn(pte, leaflet_ppfn(leaflet));
+ pte = pte_mkloaded(pte);
+
+ int n = leaflet_nfold(leaflet);
+ vmm_set_ptes_contig(ptep, pte, LFT_SIZE, n);
+
+ return n;
+}
+
+/**
+ * @brief Unmap a leaflet
+ *
+ * @param ptep
+ * @param leaflet
+ * @return pages folded into that leaflet
+ */
+static inline size_t
+ptep_unmap_leaflet(pte_t* ptep, struct leaflet* leaflet)
+{
+ // We do not support huge leaflet yet
+ assert(leaflet_order(leaflet) < LEVEL_SHIFT);
+
+ int n = leaflet_nfold(leaflet);
+ vmm_unset_ptes(ptep, n);
+
+ return n;
+}
+
+static inline ptr_t
+leaflet_mount(struct leaflet* leaflet)
+{
+ pte_t* ptep = mkptep_va(VMS_SELF, PG_MOUNT_VAR);
+ ptep_map_leaflet(ptep, mkpte_prot(KERNEL_DATA), leaflet);
+
+ tlb_flush_kernel_ranged(PG_MOUNT_VAR, leaflet_nfold(leaflet));
+
+ return PG_MOUNT_VAR;
+}
+
+static inline void
+leaflet_unmount(struct leaflet* leaflet)
+{
+ pte_t* ptep = mkptep_va(VMS_SELF, PG_MOUNT_VAR);
+ vmm_unset_ptes(ptep, leaflet_nfold(leaflet));
+
+ tlb_flush_kernel_ranged(PG_MOUNT_VAR, leaflet_nfold(leaflet));
+}
+
+static inline void
+leaflet_fill(struct leaflet* leaflet, unsigned int val)
+{
+ ptr_t mnt;
+
+ mnt = leaflet_mount(leaflet);
+ memset((void*)mnt, val, leaflet_size(leaflet));
+ leaflet_unmount(leaflet);
+}
+
+static inline void
+leaflet_wipe(struct leaflet* leaflet)
+{
+ leaflet_fill(leaflet, 0);
+}
+
+/**
+ * @brief Duplicate the leaflet
+ *
+ * @return Duplication of given leaflet
+ *
+ */
+struct leaflet*
+dup_leaflet(struct leaflet* leaflet);
+
+
+/**
+ * @brief Maps a number of contiguous ptes in kernel
+ * address space
+ *
+ * @param pte the pte to be mapped
+ * @param lvl_size size of the page pointed by the given pte
+ * @param n number of ptes
+ * @return ptr_t
+ */
+ptr_t
+vmap_ptes_at(pte_t pte, size_t lvl_size, int n);
+
+/**
+ * @brief Maps a number of contiguous ptes in kernel
+ * address space (leaf page size)
+ *
+ * @param pte the pte to be mapped
+ * @param n number of ptes
+ * @return ptr_t
+ */
+static inline ptr_t
+vmap_leaf_ptes(pte_t pte, int n)
+{
+ return vmap_ptes_at(pte, LFT_SIZE, n);
+}
+
+/**
+ * @brief Maps a contiguous range of physical address
+ * into kernel address space (leaf page size)
+ *
+ * @param paddr start of the physical address range
+ * @param size size of the physical range
+ * @param prot default protection to be applied
+ * @return ptr_t
+ */
+static inline ptr_t
+vmap(struct leaflet* leaflet, pte_attr_t prot)
+{
+ pte_t _pte = mkpte(page_addr(leaflet_ppfn(leaflet)), prot);
+ return vmap_ptes_at(_pte, LFT_SIZE, leaflet_nfold(leaflet));
+}
+
+void
+vunmap(ptr_t ptr, struct leaflet* leaflet);
+
+static inline ptr_t
+vmap_range(pfn_t start, size_t npages, pte_attr_t prot)
+{
+ pte_t _pte = mkpte(page_addr(start), prot);
+ return vmap_ptes_at(_pte, LFT_SIZE, npages);
+}
+
+
+/**
+ * @brief Allocate a page in kernel space.
+ *
+ * @param ptep
+ * @param pte
+ * @param order
+ * @return pte_t
+ */
+pte_t
+alloc_kpage_at(pte_t* ptep, pte_t pte, int order);