1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_HUGETLB_H
3 #define _ASM_POWERPC_HUGETLB_H
5 #ifdef CONFIG_HUGETLB_PAGE
7 #include <asm-generic/hugetlb.h>
9 extern struct kmem_cache
*hugepte_cache
;
11 #ifdef CONFIG_PPC_BOOK3S_64
13 #include <asm/book3s/64/hugetlb.h>
15 * This should work for other subarchs too. But right now we use the
16 * new format only for 64bit book3s
18 static inline pte_t
*hugepd_page(hugepd_t hpd
)
20 BUG_ON(!hugepd_ok(hpd
));
22 * We have only four bits to encode, MMU page size
24 BUILD_BUG_ON((MMU_PAGE_COUNT
- 1) > 0xf);
25 return __va(hpd_val(hpd
) & HUGEPD_ADDR_MASK
);
28 static inline unsigned int hugepd_mmu_psize(hugepd_t hpd
)
30 return (hpd_val(hpd
) & HUGEPD_SHIFT_MASK
) >> 2;
33 static inline unsigned int hugepd_shift(hugepd_t hpd
)
35 return mmu_psize_to_shift(hugepd_mmu_psize(hpd
));
37 static inline void flush_hugetlb_page(struct vm_area_struct
*vma
,
41 return radix__flush_hugetlb_page(vma
, vmaddr
);
46 static inline pte_t
*hugepd_page(hugepd_t hpd
)
48 BUG_ON(!hugepd_ok(hpd
));
50 return (pte_t
*)__va(hpd_val(hpd
) & ~HUGEPD_SHIFT_MASK
);
52 return (pte_t
*)((hpd_val(hpd
) &
53 ~HUGEPD_SHIFT_MASK
) | PD_HUGE
);
57 static inline unsigned int hugepd_shift(hugepd_t hpd
)
60 return ((hpd_val(hpd
) & _PMD_PAGE_MASK
) >> 1) + 17;
62 return hpd_val(hpd
) & HUGEPD_SHIFT_MASK
;
66 #endif /* CONFIG_PPC_BOOK3S_64 */
69 static inline pte_t
*hugepte_offset(hugepd_t hpd
, unsigned long addr
,
73 * On FSL BookE, we have multiple higher-level table entries that
74 * point to the same hugepte. Just use the first one since they're all
75 * identical. So for that case, idx=0.
77 unsigned long idx
= 0;
79 pte_t
*dir
= hugepd_page(hpd
);
80 #ifndef CONFIG_PPC_FSL_BOOK3E
81 idx
= (addr
& ((1UL << pdshift
) - 1)) >> hugepd_shift(hpd
);
87 pte_t
*huge_pte_offset_and_shift(struct mm_struct
*mm
,
88 unsigned long addr
, unsigned *shift
);
90 void flush_dcache_icache_hugepage(struct page
*page
);
92 #if defined(CONFIG_PPC_MM_SLICES)
93 int is_hugepage_only_range(struct mm_struct
*mm
, unsigned long addr
,
96 static inline int is_hugepage_only_range(struct mm_struct
*mm
,
104 void book3e_hugetlb_preload(struct vm_area_struct
*vma
, unsigned long ea
,
106 #ifdef CONFIG_PPC_8xx
107 static inline void flush_hugetlb_page(struct vm_area_struct
*vma
,
108 unsigned long vmaddr
)
110 flush_tlb_page(vma
, vmaddr
);
113 void flush_hugetlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
);
116 void hugetlb_free_pgd_range(struct mmu_gather
*tlb
, unsigned long addr
,
117 unsigned long end
, unsigned long floor
,
118 unsigned long ceiling
);
121 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
122 * to override the version in mm/hugetlb.c
124 #define vma_mmu_pagesize vma_mmu_pagesize
127 * If the arch doesn't supply something else, assume that hugepage
128 * size aligned regions are ok without further preparation.
130 static inline int prepare_hugepage_range(struct file
*file
,
131 unsigned long addr
, unsigned long len
)
133 struct hstate
*h
= hstate_file(file
);
134 if (len
& ~huge_page_mask(h
))
136 if (addr
& ~huge_page_mask(h
))
141 static inline void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
142 pte_t
*ptep
, pte_t pte
)
144 set_pte_at(mm
, addr
, ptep
, pte
);
147 static inline pte_t
huge_ptep_get_and_clear(struct mm_struct
*mm
,
148 unsigned long addr
, pte_t
*ptep
)
151 return __pte(pte_update(mm
, addr
, ptep
, ~0UL, 0, 1));
153 return __pte(pte_update(ptep
, ~0UL, 0));
157 static inline void huge_ptep_clear_flush(struct vm_area_struct
*vma
,
158 unsigned long addr
, pte_t
*ptep
)
161 pte
= huge_ptep_get_and_clear(vma
->vm_mm
, addr
, ptep
);
162 flush_hugetlb_page(vma
, addr
);
165 static inline int huge_pte_none(pte_t pte
)
167 return pte_none(pte
);
170 static inline pte_t
huge_pte_wrprotect(pte_t pte
)
172 return pte_wrprotect(pte
);
175 static inline int huge_ptep_set_access_flags(struct vm_area_struct
*vma
,
176 unsigned long addr
, pte_t
*ptep
,
177 pte_t pte
, int dirty
)
179 #ifdef HUGETLB_NEED_PRELOAD
181 * The "return 1" forces a call of update_mmu_cache, which will write a
182 * TLB entry. Without this, platforms that don't do a write of the TLB
183 * entry in the TLB miss handler asm will fault ad infinitum.
185 ptep_set_access_flags(vma
, addr
, ptep
, pte
, dirty
);
188 return ptep_set_access_flags(vma
, addr
, ptep
, pte
, dirty
);
192 static inline pte_t
huge_ptep_get(pte_t
*ptep
)
197 static inline void arch_clear_hugepage_flags(struct page
*page
)
201 #else /* ! CONFIG_HUGETLB_PAGE */
202 static inline void flush_hugetlb_page(struct vm_area_struct
*vma
,
203 unsigned long vmaddr
)
207 #define hugepd_shift(x) 0
208 static inline pte_t
*hugepte_offset(hugepd_t hpd
, unsigned long addr
,
213 #endif /* CONFIG_HUGETLB_PAGE */
215 #endif /* _ASM_POWERPC_HUGETLB_H */