ARM: amba: Make driver_override output consistent with other buses
[linux/fpc-iii.git] / arch / powerpc / include / asm / book3s / 64 / tlbflush-hash.h
blob64d02a704bcb596f146e95ebdff1343d459e4c20
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
3 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
5 /*
6 * TLB flushing for 64-bit hash-MMU CPUs
7 */
9 #include <linux/percpu.h>
10 #include <asm/page.h>
12 #define PPC64_TLB_BATCH_NR 192
14 struct ppc64_tlb_batch {
15 int active;
16 unsigned long index;
17 struct mm_struct *mm;
18 real_pte_t pte[PPC64_TLB_BATCH_NR];
19 unsigned long vpn[PPC64_TLB_BATCH_NR];
20 unsigned int psize;
21 int ssize;
23 DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
25 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
27 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
29 static inline void arch_enter_lazy_mmu_mode(void)
31 struct ppc64_tlb_batch *batch;
33 if (radix_enabled())
34 return;
35 batch = this_cpu_ptr(&ppc64_tlb_batch);
36 batch->active = 1;
39 static inline void arch_leave_lazy_mmu_mode(void)
41 struct ppc64_tlb_batch *batch;
43 if (radix_enabled())
44 return;
45 batch = this_cpu_ptr(&ppc64_tlb_batch);
47 if (batch->index)
48 __flush_tlb_pending(batch);
49 batch->active = 0;
52 #define arch_flush_lazy_mmu_mode() do {} while (0)
54 extern void hash__tlbiel_all(unsigned int action);
56 extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
57 int ssize, unsigned long flags);
58 extern void flush_hash_range(unsigned long number, int local);
59 extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
60 pmd_t *pmdp, unsigned int psize, int ssize,
61 unsigned long flags);
62 static inline void hash__local_flush_tlb_mm(struct mm_struct *mm)
66 static inline void hash__flush_tlb_mm(struct mm_struct *mm)
70 static inline void hash__local_flush_all_mm(struct mm_struct *mm)
73 * There's no Page Walk Cache for hash, so what is needed is
74 * the same as flush_tlb_mm(), which doesn't really make sense
75 * with hash. So the only thing we could do is flush the
76 * entire LPID! Punt for now, as it's not being used.
78 WARN_ON_ONCE(1);
81 static inline void hash__flush_all_mm(struct mm_struct *mm)
84 * There's no Page Walk Cache for hash, so what is needed is
85 * the same as flush_tlb_mm(), which doesn't really make sense
86 * with hash. So the only thing we could do is flush the
87 * entire LPID! Punt for now, as it's not being used.
89 WARN_ON_ONCE(1);
92 static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma,
93 unsigned long vmaddr)
97 static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
98 unsigned long vmaddr)
102 static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
103 unsigned long start, unsigned long end)
107 static inline void hash__flush_tlb_kernel_range(unsigned long start,
108 unsigned long end)
113 struct mmu_gather;
114 extern void hash__tlb_flush(struct mmu_gather *tlb);
115 /* Private function for use by PCI IO mapping code */
116 extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
117 unsigned long end);
118 extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
119 unsigned long addr);
120 #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */