Staging: Panel: panel: Fixed checkpatch line length warnings
[linux/fpc-iii.git] / arch / arm / kernel / smp_tlb.c
blob9a52a07aa40ee3c017c55b5b5cce3b92ae7c599c
1 /*
2 * linux/arch/arm/kernel/smp_tlb.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/preempt.h>
11 #include <linux/smp.h>
13 #include <asm/smp_plat.h>
14 #include <asm/tlbflush.h>
15 #include <asm/mmu_context.h>
17 /**********************************************************************/
20 * TLB operations
22 struct tlb_args {
23 struct vm_area_struct *ta_vma;
24 unsigned long ta_start;
25 unsigned long ta_end;
28 static inline void ipi_flush_tlb_all(void *ignored)
30 local_flush_tlb_all();
33 static inline void ipi_flush_tlb_mm(void *arg)
35 struct mm_struct *mm = (struct mm_struct *)arg;
37 local_flush_tlb_mm(mm);
40 static inline void ipi_flush_tlb_page(void *arg)
42 struct tlb_args *ta = (struct tlb_args *)arg;
44 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
47 static inline void ipi_flush_tlb_kernel_page(void *arg)
49 struct tlb_args *ta = (struct tlb_args *)arg;
51 local_flush_tlb_kernel_page(ta->ta_start);
54 static inline void ipi_flush_tlb_range(void *arg)
56 struct tlb_args *ta = (struct tlb_args *)arg;
58 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
61 static inline void ipi_flush_tlb_kernel_range(void *arg)
63 struct tlb_args *ta = (struct tlb_args *)arg;
65 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
68 static inline void ipi_flush_bp_all(void *ignored)
70 local_flush_bp_all();
73 #ifdef CONFIG_ARM_ERRATA_798181
74 static int erratum_a15_798181(void)
76 unsigned int midr = read_cpuid_id();
78 /* Cortex-A15 r0p0..r3p2 affected */
79 if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
80 return 0;
81 return 1;
83 #else
84 static int erratum_a15_798181(void)
86 return 0;
88 #endif
90 static void ipi_flush_tlb_a15_erratum(void *arg)
92 dmb();
95 static void broadcast_tlb_a15_erratum(void)
97 if (!erratum_a15_798181())
98 return;
100 dummy_flush_tlb_a15_erratum();
101 smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
104 static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
106 int cpu, this_cpu;
107 cpumask_t mask = { CPU_BITS_NONE };
109 if (!erratum_a15_798181())
110 return;
112 dummy_flush_tlb_a15_erratum();
113 this_cpu = get_cpu();
114 for_each_online_cpu(cpu) {
115 if (cpu == this_cpu)
116 continue;
118 * We only need to send an IPI if the other CPUs are running
119 * the same ASID as the one being invalidated. There is no
120 * need for locking around the active_asids check since the
121 * switch_mm() function has at least one dmb() (as required by
122 * this workaround) in case a context switch happens on
123 * another CPU after the condition below.
125 if (atomic64_read(&mm->context.id) ==
126 atomic64_read(&per_cpu(active_asids, cpu)))
127 cpumask_set_cpu(cpu, &mask);
129 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
130 put_cpu();
133 void flush_tlb_all(void)
135 if (tlb_ops_need_broadcast())
136 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
137 else
138 local_flush_tlb_all();
139 broadcast_tlb_a15_erratum();
142 void flush_tlb_mm(struct mm_struct *mm)
144 if (tlb_ops_need_broadcast())
145 on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
146 else
147 local_flush_tlb_mm(mm);
148 broadcast_tlb_mm_a15_erratum(mm);
151 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
153 if (tlb_ops_need_broadcast()) {
154 struct tlb_args ta;
155 ta.ta_vma = vma;
156 ta.ta_start = uaddr;
157 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
158 &ta, 1);
159 } else
160 local_flush_tlb_page(vma, uaddr);
161 broadcast_tlb_mm_a15_erratum(vma->vm_mm);
164 void flush_tlb_kernel_page(unsigned long kaddr)
166 if (tlb_ops_need_broadcast()) {
167 struct tlb_args ta;
168 ta.ta_start = kaddr;
169 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
170 } else
171 local_flush_tlb_kernel_page(kaddr);
172 broadcast_tlb_a15_erratum();
175 void flush_tlb_range(struct vm_area_struct *vma,
176 unsigned long start, unsigned long end)
178 if (tlb_ops_need_broadcast()) {
179 struct tlb_args ta;
180 ta.ta_vma = vma;
181 ta.ta_start = start;
182 ta.ta_end = end;
183 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range,
184 &ta, 1);
185 } else
186 local_flush_tlb_range(vma, start, end);
187 broadcast_tlb_mm_a15_erratum(vma->vm_mm);
190 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
192 if (tlb_ops_need_broadcast()) {
193 struct tlb_args ta;
194 ta.ta_start = start;
195 ta.ta_end = end;
196 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
197 } else
198 local_flush_tlb_kernel_range(start, end);
199 broadcast_tlb_a15_erratum();
202 void flush_bp_all(void)
204 if (tlb_ops_need_broadcast())
205 on_each_cpu(ipi_flush_bp_all, NULL, 1);
206 else
207 local_flush_bp_all();