1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_M32R_PGALLOC_H
3 #define _ASM_M32R_PGALLOC_H
9 #define pmd_populate_kernel(mm, pmd, pte) \
10 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
12 static __inline__
void pmd_populate(struct mm_struct
*mm
, pmd_t
*pmd
,
15 set_pmd(pmd
, __pmd(_PAGE_TABLE
+ page_to_phys(pte
)));
17 #define pmd_pgtable(pmd) pmd_page(pmd)
20 * Allocate and free page tables.
22 static __inline__ pgd_t
*pgd_alloc(struct mm_struct
*mm
)
24 pgd_t
*pgd
= (pgd_t
*)__get_free_page(GFP_KERNEL
|__GFP_ZERO
);
29 static inline void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
31 free_page((unsigned long)pgd
);
34 static __inline__ pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
,
35 unsigned long address
)
37 pte_t
*pte
= (pte_t
*)__get_free_page(GFP_KERNEL
|__GFP_ZERO
);
42 static __inline__ pgtable_t
pte_alloc_one(struct mm_struct
*mm
,
43 unsigned long address
)
45 struct page
*pte
= alloc_page(GFP_KERNEL
|__GFP_ZERO
);
49 if (!pgtable_page_ctor(pte
)) {
56 static inline void pte_free_kernel(struct mm_struct
*mm
, pte_t
*pte
)
58 free_page((unsigned long)pte
);
61 static inline void pte_free(struct mm_struct
*mm
, pgtable_t pte
)
63 pgtable_page_dtor(pte
);
67 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
70 * allocating and freeing a pmd is trivial: the 1-entry pmd is
71 * inside the pgd, so has no extra memory associated with it.
72 * (In the PAE case we free the pmds as part of the pgd.)
75 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
76 #define pmd_free(mm, x) do { } while (0)
77 #define __pmd_free_tlb(tlb, x, addr) do { } while (0)
78 #define pgd_populate(mm, pmd, pte) BUG()
80 #define check_pgt_cache() do { } while (0)
82 #endif /* _ASM_M32R_PGALLOC_H */