1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
3 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
6 * TLB flushing for 64-bit hash-MMU CPUs
9 #include <linux/percpu.h>
12 #define PPC64_TLB_BATCH_NR 192
14 struct ppc64_tlb_batch
{
18 real_pte_t pte
[PPC64_TLB_BATCH_NR
];
19 unsigned long vpn
[PPC64_TLB_BATCH_NR
];
23 DECLARE_PER_CPU(struct ppc64_tlb_batch
, ppc64_tlb_batch
);
25 extern void __flush_tlb_pending(struct ppc64_tlb_batch
*batch
);
27 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
29 static inline void arch_enter_lazy_mmu_mode(void)
31 struct ppc64_tlb_batch
*batch
;
35 batch
= this_cpu_ptr(&ppc64_tlb_batch
);
39 static inline void arch_leave_lazy_mmu_mode(void)
41 struct ppc64_tlb_batch
*batch
;
45 batch
= this_cpu_ptr(&ppc64_tlb_batch
);
48 __flush_tlb_pending(batch
);
52 #define arch_flush_lazy_mmu_mode() do {} while (0)
54 extern void hash__tlbiel_all(unsigned int action
);
56 extern void flush_hash_page(unsigned long vpn
, real_pte_t pte
, int psize
,
57 int ssize
, unsigned long flags
);
58 extern void flush_hash_range(unsigned long number
, int local
);
59 extern void flush_hash_hugepage(unsigned long vsid
, unsigned long addr
,
60 pmd_t
*pmdp
, unsigned int psize
, int ssize
,
62 static inline void hash__local_flush_tlb_mm(struct mm_struct
*mm
)
66 static inline void hash__flush_tlb_mm(struct mm_struct
*mm
)
70 static inline void hash__local_flush_all_mm(struct mm_struct
*mm
)
73 * There's no Page Walk Cache for hash, so what is needed is
74 * the same as flush_tlb_mm(), which doesn't really make sense
75 * with hash. So the only thing we could do is flush the
76 * entire LPID! Punt for now, as it's not being used.
81 static inline void hash__flush_all_mm(struct mm_struct
*mm
)
84 * There's no Page Walk Cache for hash, so what is needed is
85 * the same as flush_tlb_mm(), which doesn't really make sense
86 * with hash. So the only thing we could do is flush the
87 * entire LPID! Punt for now, as it's not being used.
92 static inline void hash__local_flush_tlb_page(struct vm_area_struct
*vma
,
97 static inline void hash__flush_tlb_page(struct vm_area_struct
*vma
,
102 static inline void hash__flush_tlb_range(struct vm_area_struct
*vma
,
103 unsigned long start
, unsigned long end
)
107 static inline void hash__flush_tlb_kernel_range(unsigned long start
,
114 extern void hash__tlb_flush(struct mmu_gather
*tlb
);
115 /* Private function for use by PCI IO mapping code */
116 extern void __flush_hash_table_range(struct mm_struct
*mm
, unsigned long start
,
118 extern void flush_tlb_pmd_range(struct mm_struct
*mm
, pmd_t
*pmd
,
120 #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */