1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
3 #define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
5 * For radix we want generic code to handle hugetlb. But then if we want
6 * both hash and radix to be enabled together we need to workaround the
9 void radix__flush_hugetlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
);
10 void radix__local_flush_hugetlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
);
12 radix__hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
13 unsigned long len
, unsigned long pgoff
,
16 static inline int hstate_get_psize(struct hstate
*hstate
)
20 shift
= huge_page_shift(hstate
);
21 if (shift
== mmu_psize_defs
[MMU_PAGE_2M
].shift
)
23 else if (shift
== mmu_psize_defs
[MMU_PAGE_1G
].shift
)
25 else if (shift
== mmu_psize_defs
[MMU_PAGE_16M
].shift
)
27 else if (shift
== mmu_psize_defs
[MMU_PAGE_16G
].shift
)
30 WARN(1, "Wrong huge page shift\n");
31 return mmu_virtual_psize
;
35 #define arch_make_huge_pte arch_make_huge_pte
36 static inline pte_t
arch_make_huge_pte(pte_t entry
, struct vm_area_struct
*vma
,
37 struct page
*page
, int writable
)
39 unsigned long page_shift
;
41 if (!cpu_has_feature(CPU_FTR_POWER9_DD1
))
44 page_shift
= huge_page_shift(hstate_vma(vma
));
46 * We don't support 1G hugetlb pages yet.
48 VM_WARN_ON(page_shift
== mmu_psize_defs
[MMU_PAGE_1G
].shift
);
49 if (page_shift
== mmu_psize_defs
[MMU_PAGE_2M
].shift
)
50 return __pte(pte_val(entry
) | R_PAGE_LARGE
);
55 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
56 static inline bool gigantic_page_supported(void)