1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_GENERIC_PGALLOC_H
3 #define __ASM_GENERIC_PGALLOC_H
7 #define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO)
8 #define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
11 * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
12 * @mm: the mm_struct of the current context
14 * This function is intended for architectures that need
15 * anything beyond simple page allocation.
17 * Return: pointer to the allocated memory or %NULL on error
19 static inline pte_t
*__pte_alloc_one_kernel(struct mm_struct
*mm
)
21 return (pte_t
*)__get_free_page(GFP_PGTABLE_KERNEL
);
24 #ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
26 * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
27 * @mm: the mm_struct of the current context
29 * Return: pointer to the allocated memory or %NULL on error
31 static inline pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
)
33 return __pte_alloc_one_kernel(mm
);
38 * pte_free_kernel - free PTE-level kernel page table page
39 * @mm: the mm_struct of the current context
40 * @pte: pointer to the memory containing the page table
42 static inline void pte_free_kernel(struct mm_struct
*mm
, pte_t
*pte
)
44 free_page((unsigned long)pte
);
48 * __pte_alloc_one - allocate a page for PTE-level user page table
49 * @mm: the mm_struct of the current context
50 * @gfp: GFP flags to use for the allocation
52 * Allocates a page and runs the pgtable_pte_page_ctor().
54 * This function is intended for architectures that need
55 * anything beyond simple page allocation or must have custom GFP flags.
57 * Return: `struct page` initialized as page table or %NULL on error
59 static inline pgtable_t
__pte_alloc_one(struct mm_struct
*mm
, gfp_t gfp
)
63 pte
= alloc_page(gfp
);
66 if (!pgtable_pte_page_ctor(pte
)) {
74 #ifndef __HAVE_ARCH_PTE_ALLOC_ONE
76 * pte_alloc_one - allocate a page for PTE-level user page table
77 * @mm: the mm_struct of the current context
79 * Allocates a page and runs the pgtable_pte_page_ctor().
81 * Return: `struct page` initialized as page table or %NULL on error
83 static inline pgtable_t
pte_alloc_one(struct mm_struct
*mm
)
85 return __pte_alloc_one(mm
, GFP_PGTABLE_USER
);
90 * Should really implement gc for free page table pages. This could be
91 * done with a reference count in struct page.
95 * pte_free - free PTE-level user page table page
96 * @mm: the mm_struct of the current context
97 * @pte_page: the `struct page` representing the page table
99 static inline void pte_free(struct mm_struct
*mm
, struct page
*pte_page
)
101 pgtable_pte_page_dtor(pte_page
);
102 __free_page(pte_page
);
106 #if CONFIG_PGTABLE_LEVELS > 2
108 #ifndef __HAVE_ARCH_PMD_ALLOC_ONE
110 * pmd_alloc_one - allocate a page for PMD-level page table
111 * @mm: the mm_struct of the current context
113 * Allocates a page and runs the pgtable_pmd_page_ctor().
114 * Allocations use %GFP_PGTABLE_USER in user context and
115 * %GFP_PGTABLE_KERNEL in kernel context.
117 * Return: pointer to the allocated memory or %NULL on error
119 static inline pmd_t
*pmd_alloc_one(struct mm_struct
*mm
, unsigned long addr
)
122 gfp_t gfp
= GFP_PGTABLE_USER
;
125 gfp
= GFP_PGTABLE_KERNEL
;
126 page
= alloc_pages(gfp
, 0);
129 if (!pgtable_pmd_page_ctor(page
)) {
130 __free_pages(page
, 0);
133 return (pmd_t
*)page_address(page
);
137 #ifndef __HAVE_ARCH_PMD_FREE
138 static inline void pmd_free(struct mm_struct
*mm
, pmd_t
*pmd
)
140 BUG_ON((unsigned long)pmd
& (PAGE_SIZE
-1));
141 pgtable_pmd_page_dtor(virt_to_page(pmd
));
142 free_page((unsigned long)pmd
);
146 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
148 #if CONFIG_PGTABLE_LEVELS > 3
150 #ifndef __HAVE_ARCH_PUD_ALLOC_ONE
152 * pud_alloc_one - allocate a page for PUD-level page table
153 * @mm: the mm_struct of the current context
155 * Allocates a page using %GFP_PGTABLE_USER for user context and
156 * %GFP_PGTABLE_KERNEL for kernel context.
158 * Return: pointer to the allocated memory or %NULL on error
160 static inline pud_t
*pud_alloc_one(struct mm_struct
*mm
, unsigned long addr
)
162 gfp_t gfp
= GFP_PGTABLE_USER
;
165 gfp
= GFP_PGTABLE_KERNEL
;
166 return (pud_t
*)get_zeroed_page(gfp
);
170 static inline void pud_free(struct mm_struct
*mm
, pud_t
*pud
)
172 BUG_ON((unsigned long)pud
& (PAGE_SIZE
-1));
173 free_page((unsigned long)pud
);
176 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
178 #ifndef __HAVE_ARCH_PGD_FREE
179 static inline void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
181 free_page((unsigned long)pgd
);
185 #endif /* CONFIG_MMU */
187 #endif /* __ASM_GENERIC_PGALLOC_H */