1 #ifndef _ASM_POWERPC_HUGETLB_H
2 #define _ASM_POWERPC_HUGETLB_H
4 #ifdef CONFIG_HUGETLB_PAGE
6 #include <asm-generic/hugetlb.h>
8 extern struct kmem_cache
*hugepte_cache
;
10 #ifdef CONFIG_PPC_BOOK3S_64
12 * This should work for other subarchs too. But right now we use the
13 * new format only for 64bit book3s
15 static inline pte_t
*hugepd_page(hugepd_t hpd
)
17 BUG_ON(!hugepd_ok(hpd
));
19 * We have only four bits to encode, MMU page size
21 BUILD_BUG_ON((MMU_PAGE_COUNT
- 1) > 0xf);
22 return (pte_t
*)(hpd
.pd
& ~HUGEPD_SHIFT_MASK
);
25 static inline unsigned int hugepd_mmu_psize(hugepd_t hpd
)
27 return (hpd
.pd
& HUGEPD_SHIFT_MASK
) >> 2;
30 static inline unsigned int hugepd_shift(hugepd_t hpd
)
32 return mmu_psize_to_shift(hugepd_mmu_psize(hpd
));
37 static inline pte_t
*hugepd_page(hugepd_t hpd
)
39 BUG_ON(!hugepd_ok(hpd
));
40 return (pte_t
*)((hpd
.pd
& ~HUGEPD_SHIFT_MASK
) | PD_HUGE
);
43 static inline unsigned int hugepd_shift(hugepd_t hpd
)
45 return hpd
.pd
& HUGEPD_SHIFT_MASK
;
48 #endif /* CONFIG_PPC_BOOK3S_64 */
51 static inline pte_t
*hugepte_offset(hugepd_t
*hpdp
, unsigned long addr
,
55 * On FSL BookE, we have multiple higher-level table entries that
56 * point to the same hugepte. Just use the first one since they're all
57 * identical. So for that case, idx=0.
59 unsigned long idx
= 0;
61 pte_t
*dir
= hugepd_page(*hpdp
);
62 #ifndef CONFIG_PPC_FSL_BOOK3E
63 idx
= (addr
& ((1UL << pdshift
) - 1)) >> hugepd_shift(*hpdp
);
69 pte_t
*huge_pte_offset_and_shift(struct mm_struct
*mm
,
70 unsigned long addr
, unsigned *shift
);
72 void flush_dcache_icache_hugepage(struct page
*page
);
74 #if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT)
75 int is_hugepage_only_range(struct mm_struct
*mm
, unsigned long addr
,
78 static inline int is_hugepage_only_range(struct mm_struct
*mm
,
86 void book3e_hugetlb_preload(struct vm_area_struct
*vma
, unsigned long ea
,
88 void flush_hugetlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
);
90 void hugetlb_free_pgd_range(struct mmu_gather
*tlb
, unsigned long addr
,
91 unsigned long end
, unsigned long floor
,
92 unsigned long ceiling
);
95 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
96 * to override the version in mm/hugetlb.c
98 #define vma_mmu_pagesize vma_mmu_pagesize
101 * If the arch doesn't supply something else, assume that hugepage
102 * size aligned regions are ok without further preparation.
104 static inline int prepare_hugepage_range(struct file
*file
,
105 unsigned long addr
, unsigned long len
)
107 struct hstate
*h
= hstate_file(file
);
108 if (len
& ~huge_page_mask(h
))
110 if (addr
& ~huge_page_mask(h
))
115 static inline void hugetlb_prefault_arch_hook(struct mm_struct
*mm
)
120 static inline void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
121 pte_t
*ptep
, pte_t pte
)
123 set_pte_at(mm
, addr
, ptep
, pte
);
126 static inline pte_t
huge_ptep_get_and_clear(struct mm_struct
*mm
,
127 unsigned long addr
, pte_t
*ptep
)
130 return __pte(pte_update(mm
, addr
, ptep
, ~0UL, 1));
132 return __pte(pte_update(ptep
, ~0UL, 0));
136 static inline void huge_ptep_clear_flush(struct vm_area_struct
*vma
,
137 unsigned long addr
, pte_t
*ptep
)
140 pte
= huge_ptep_get_and_clear(vma
->vm_mm
, addr
, ptep
);
141 flush_tlb_page(vma
, addr
);
144 static inline int huge_pte_none(pte_t pte
)
146 return pte_none(pte
);
149 static inline pte_t
huge_pte_wrprotect(pte_t pte
)
151 return pte_wrprotect(pte
);
154 static inline int huge_ptep_set_access_flags(struct vm_area_struct
*vma
,
155 unsigned long addr
, pte_t
*ptep
,
156 pte_t pte
, int dirty
)
158 #ifdef HUGETLB_NEED_PRELOAD
160 * The "return 1" forces a call of update_mmu_cache, which will write a
161 * TLB entry. Without this, platforms that don't do a write of the TLB
162 * entry in the TLB miss handler asm will fault ad infinitum.
164 ptep_set_access_flags(vma
, addr
, ptep
, pte
, dirty
);
167 return ptep_set_access_flags(vma
, addr
, ptep
, pte
, dirty
);
171 static inline pte_t
huge_ptep_get(pte_t
*ptep
)
176 static inline int arch_prepare_hugepage(struct page
*page
)
181 static inline void arch_release_hugepage(struct page
*page
)
185 static inline void arch_clear_hugepage_flags(struct page
*page
)
189 #else /* ! CONFIG_HUGETLB_PAGE */
190 static inline void flush_hugetlb_page(struct vm_area_struct
*vma
,
191 unsigned long vmaddr
)
195 #define hugepd_shift(x) 0
196 static inline pte_t
*hugepte_offset(hugepd_t
*hpdp
, unsigned long addr
,
201 #endif /* CONFIG_HUGETLB_PAGE */
204 * FSL Book3E platforms require special gpage handling - the gpages
205 * are reserved early in the boot process by memblock instead of via
206 * the .dts as on IBM platforms.
208 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
209 extern void __init
reserve_hugetlb_gpages(void);
211 static inline void reserve_hugetlb_gpages(void)
216 #endif /* _ASM_POWERPC_HUGETLB_H */