2 * arch/arm/include/asm/tlbflush.h
4 * Copyright (C) 1999-2003 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #ifndef _ASMARM_TLBFLUSH_H
11 #define _ASMARM_TLBFLUSH_H
17 #define TLB_V3_PAGE (1 << 0)
18 #define TLB_V4_U_PAGE (1 << 1)
19 #define TLB_V4_D_PAGE (1 << 2)
20 #define TLB_V4_I_PAGE (1 << 3)
21 #define TLB_V6_U_PAGE (1 << 4)
22 #define TLB_V6_D_PAGE (1 << 5)
23 #define TLB_V6_I_PAGE (1 << 6)
25 #define TLB_V3_FULL (1 << 8)
26 #define TLB_V4_U_FULL (1 << 9)
27 #define TLB_V4_D_FULL (1 << 10)
28 #define TLB_V4_I_FULL (1 << 11)
29 #define TLB_V6_U_FULL (1 << 12)
30 #define TLB_V6_D_FULL (1 << 13)
31 #define TLB_V6_I_FULL (1 << 14)
33 #define TLB_V6_U_ASID (1 << 16)
34 #define TLB_V6_D_ASID (1 << 17)
35 #define TLB_V6_I_ASID (1 << 18)
37 /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
38 #define TLB_V7_UIS_PAGE (1 << 19)
39 #define TLB_V7_UIS_FULL (1 << 20)
40 #define TLB_V7_UIS_ASID (1 << 21)
42 #define TLB_BARRIER (1 << 28)
43 #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
44 #define TLB_DCLEAN (1 << 30)
45 #define TLB_WB (1 << 31)
51 * We have the following to choose from:
53 * v4 - ARMv4 without write buffer
54 * v4wb - ARMv4 with write buffer without I TLB flush entry instruction
55 * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
56 * fr - Feroceon (v4wbi with non-outer-cacheable page table walks)
57 * fa - Faraday (v4 with write buffer with UTLB)
58 * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
59 * v7wbi - identical to v6wbi
64 #ifdef CONFIG_SMP_ON_UP
68 #define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE)
70 #ifdef CONFIG_CPU_TLB_V4WT
71 # define v4_possible_flags v4_tlb_flags
72 # define v4_always_flags v4_tlb_flags
79 # define v4_possible_flags 0
80 # define v4_always_flags (-1UL)
83 #define fa_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
84 TLB_V4_U_FULL | TLB_V4_U_PAGE)
86 #ifdef CONFIG_CPU_TLB_FA
87 # define fa_possible_flags fa_tlb_flags
88 # define fa_always_flags fa_tlb_flags
95 # define fa_possible_flags 0
96 # define fa_always_flags (-1UL)
99 #define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
100 TLB_V4_I_FULL | TLB_V4_D_FULL | \
101 TLB_V4_I_PAGE | TLB_V4_D_PAGE)
103 #ifdef CONFIG_CPU_TLB_V4WBI
104 # define v4wbi_possible_flags v4wbi_tlb_flags
105 # define v4wbi_always_flags v4wbi_tlb_flags
112 # define v4wbi_possible_flags 0
113 # define v4wbi_always_flags (-1UL)
116 #define fr_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_L2CLEAN_FR | \
117 TLB_V4_I_FULL | TLB_V4_D_FULL | \
118 TLB_V4_I_PAGE | TLB_V4_D_PAGE)
120 #ifdef CONFIG_CPU_TLB_FEROCEON
121 # define fr_possible_flags fr_tlb_flags
122 # define fr_always_flags fr_tlb_flags
129 # define fr_possible_flags 0
130 # define fr_always_flags (-1UL)
133 #define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \
134 TLB_V4_I_FULL | TLB_V4_D_FULL | \
137 #ifdef CONFIG_CPU_TLB_V4WB
138 # define v4wb_possible_flags v4wb_tlb_flags
139 # define v4wb_always_flags v4wb_tlb_flags
146 # define v4wb_possible_flags 0
147 # define v4wb_always_flags (-1UL)
150 #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
151 TLB_V6_I_FULL | TLB_V6_D_FULL | \
152 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
153 TLB_V6_I_ASID | TLB_V6_D_ASID)
155 #ifdef CONFIG_CPU_TLB_V6
156 # define v6wbi_possible_flags v6wbi_tlb_flags
157 # define v6wbi_always_flags v6wbi_tlb_flags
164 # define v6wbi_possible_flags 0
165 # define v6wbi_always_flags (-1UL)
168 #define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
169 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
170 #define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
171 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
173 #ifdef CONFIG_CPU_TLB_V7
175 # ifdef CONFIG_SMP_ON_UP
176 # define v7wbi_possible_flags (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up)
177 # define v7wbi_always_flags (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up)
178 # elif defined(CONFIG_SMP)
179 # define v7wbi_possible_flags v7wbi_tlb_flags_smp
180 # define v7wbi_always_flags v7wbi_tlb_flags_smp
182 # define v7wbi_possible_flags v7wbi_tlb_flags_up
183 # define v7wbi_always_flags v7wbi_tlb_flags_up
191 # define v7wbi_possible_flags 0
192 # define v7wbi_always_flags (-1UL)
196 #error Unknown TLB model
201 #include <linux/sched.h>
204 void (*flush_user_range
)(unsigned long, unsigned long, struct vm_area_struct
*);
205 void (*flush_kern_range
)(unsigned long, unsigned long);
206 unsigned long tlb_flags
;
210 * Select the calling method
214 #define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range
215 #define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range
219 #define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range)
220 #define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range)
222 extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct
*);
223 extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
227 extern struct cpu_tlb_fns cpu_tlb
;
229 #define __cpu_tlb_flags cpu_tlb.tlb_flags
235 * The arch/arm/mm/tlb-*.S files implement these methods.
237 * The TLB specific code is expected to perform whatever tests it
238 * needs to determine if it should invalidate the TLB for each
239 * call. Start addresses are inclusive and end addresses are
240 * exclusive; it is safe to round these addresses down.
244 * Invalidate the entire TLB.
248 * Invalidate all TLB entries in a particular address
250 * - mm - mm_struct describing address space
252 * flush_tlb_range(mm,start,end)
254 * Invalidate a range of TLB entries in the specified
256 * - mm - mm_struct describing address space
257 * - start - start address (may not be aligned)
258 * - end - end address (exclusive, may not be aligned)
260 * flush_tlb_page(vaddr,vma)
262 * Invalidate the specified page in the specified address range.
263 * - vaddr - virtual address (may not be aligned)
264 * - vma - vma_struct describing address range
266 * flush_kern_tlb_page(kaddr)
268 * Invalidate the TLB entry for the specified page. The address
269 * will be in the kernels virtual memory space. Current uses
270 * only require the D-TLB to be invalidated.
271 * - kaddr - Kernel virtual memory address
275 * We optimise the code below by:
276 * - building a set of TLB flags that might be set in __cpu_tlb_flags
277 * - building a set of TLB flags that will always be set in __cpu_tlb_flags
278 * - if we're going to need __cpu_tlb_flags, access it once and only once
280 * This allows us to build optimal assembly for the single-CPU type case,
281 * and as close to optimal given the compiler constrants for multi-CPU
282 * case. We could do better for the multi-CPU case if the compiler
283 * implemented the "%?" method, but this has been discontinued due to too
284 * many people getting it wrong.
286 #define possible_tlb_flags (v4_possible_flags | \
287 v4wbi_possible_flags | \
288 fr_possible_flags | \
289 v4wb_possible_flags | \
290 fa_possible_flags | \
291 v6wbi_possible_flags | \
292 v7wbi_possible_flags)
294 #define always_tlb_flags (v4_always_flags & \
295 v4wbi_always_flags & \
297 v4wb_always_flags & \
299 v6wbi_always_flags & \
302 #define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
304 #define __tlb_op(f, insnarg, arg) \
306 if (always_tlb_flags & (f)) \
308 : : "r" (arg) : "cc"); \
309 else if (possible_tlb_flags & (f)) \
310 asm("tst %1, %2\n\t" \
312 : : "r" (arg), "r" (__tlb_flag), "Ir" (f) \
316 #define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg)
317 #define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg)
319 static inline void local_flush_tlb_all(void)
322 const unsigned int __tlb_flag
= __cpu_tlb_flags
;
324 if (tlb_flag(TLB_WB
))
327 tlb_op(TLB_V3_FULL
, "c6, c0, 0", zero
);
328 tlb_op(TLB_V4_U_FULL
| TLB_V6_U_FULL
, "c8, c7, 0", zero
);
329 tlb_op(TLB_V4_D_FULL
| TLB_V6_D_FULL
, "c8, c6, 0", zero
);
330 tlb_op(TLB_V4_I_FULL
| TLB_V6_I_FULL
, "c8, c5, 0", zero
);
331 tlb_op(TLB_V7_UIS_FULL
, "c8, c3, 0", zero
);
333 if (tlb_flag(TLB_BARRIER
)) {
339 static inline void local_flush_tlb_mm(struct mm_struct
*mm
)
342 const int asid
= ASID(mm
);
343 const unsigned int __tlb_flag
= __cpu_tlb_flags
;
345 if (tlb_flag(TLB_WB
))
348 if (possible_tlb_flags
& (TLB_V3_FULL
|TLB_V4_U_FULL
|TLB_V4_D_FULL
|TLB_V4_I_FULL
)) {
349 if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm
))) {
350 tlb_op(TLB_V3_FULL
, "c6, c0, 0", zero
);
351 tlb_op(TLB_V4_U_FULL
, "c8, c7, 0", zero
);
352 tlb_op(TLB_V4_D_FULL
, "c8, c6, 0", zero
);
353 tlb_op(TLB_V4_I_FULL
, "c8, c5, 0", zero
);
358 tlb_op(TLB_V6_U_ASID
, "c8, c7, 2", asid
);
359 tlb_op(TLB_V6_D_ASID
, "c8, c6, 2", asid
);
360 tlb_op(TLB_V6_I_ASID
, "c8, c5, 2", asid
);
361 #ifdef CONFIG_ARM_ERRATA_720789
362 tlb_op(TLB_V7_UIS_ASID
, "c8, c3, 0", zero
);
364 tlb_op(TLB_V7_UIS_ASID
, "c8, c3, 2", asid
);
367 if (tlb_flag(TLB_BARRIER
))
372 local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long uaddr
)
375 const unsigned int __tlb_flag
= __cpu_tlb_flags
;
377 uaddr
= (uaddr
& PAGE_MASK
) | ASID(vma
->vm_mm
);
379 if (tlb_flag(TLB_WB
))
382 if (possible_tlb_flags
& (TLB_V3_PAGE
|TLB_V4_U_PAGE
|TLB_V4_D_PAGE
|TLB_V4_I_PAGE
|TLB_V4_I_FULL
) &&
383 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma
->vm_mm
))) {
384 tlb_op(TLB_V3_PAGE
, "c6, c0, 0", uaddr
);
385 tlb_op(TLB_V4_U_PAGE
, "c8, c7, 1", uaddr
);
386 tlb_op(TLB_V4_D_PAGE
, "c8, c6, 1", uaddr
);
387 tlb_op(TLB_V4_I_PAGE
, "c8, c5, 1", uaddr
);
388 if (!tlb_flag(TLB_V4_I_PAGE
) && tlb_flag(TLB_V4_I_FULL
))
389 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero
) : "cc");
392 tlb_op(TLB_V6_U_PAGE
, "c8, c7, 1", uaddr
);
393 tlb_op(TLB_V6_D_PAGE
, "c8, c6, 1", uaddr
);
394 tlb_op(TLB_V6_I_PAGE
, "c8, c5, 1", uaddr
);
395 #ifdef CONFIG_ARM_ERRATA_720789
396 tlb_op(TLB_V7_UIS_PAGE
, "c8, c3, 3", uaddr
& PAGE_MASK
);
398 tlb_op(TLB_V7_UIS_PAGE
, "c8, c3, 1", uaddr
);
401 if (tlb_flag(TLB_BARRIER
))
405 static inline void local_flush_tlb_kernel_page(unsigned long kaddr
)
408 const unsigned int __tlb_flag
= __cpu_tlb_flags
;
412 if (tlb_flag(TLB_WB
))
415 tlb_op(TLB_V3_PAGE
, "c6, c0, 0", kaddr
);
416 tlb_op(TLB_V4_U_PAGE
, "c8, c7, 1", kaddr
);
417 tlb_op(TLB_V4_D_PAGE
, "c8, c6, 1", kaddr
);
418 tlb_op(TLB_V4_I_PAGE
, "c8, c5, 1", kaddr
);
419 if (!tlb_flag(TLB_V4_I_PAGE
) && tlb_flag(TLB_V4_I_FULL
))
420 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero
) : "cc");
422 tlb_op(TLB_V6_U_PAGE
, "c8, c7, 1", kaddr
);
423 tlb_op(TLB_V6_D_PAGE
, "c8, c6, 1", kaddr
);
424 tlb_op(TLB_V6_I_PAGE
, "c8, c5, 1", kaddr
);
425 tlb_op(TLB_V7_UIS_PAGE
, "c8, c3, 1", kaddr
);
427 if (tlb_flag(TLB_BARRIER
)) {
436 * Flush a PMD entry (word aligned, or double-word aligned) to
437 * RAM if the TLB for the CPU we are running on requires this.
438 * This is typically used when we are creating PMD entries.
442 * Clean (but don't drain the write buffer) if the CPU requires
443 * these operations. This is typically used when we are removing
446 static inline void flush_pmd_entry(void *pmd
)
448 const unsigned int __tlb_flag
= __cpu_tlb_flags
;
450 tlb_op(TLB_DCLEAN
, "c7, c10, 1 @ flush_pmd", pmd
);
451 tlb_l2_op(TLB_L2CLEAN_FR
, "c15, c9, 1 @ L2 flush_pmd", pmd
);
453 if (tlb_flag(TLB_WB
))
457 static inline void clean_pmd_entry(void *pmd
)
459 const unsigned int __tlb_flag
= __cpu_tlb_flags
;
461 tlb_op(TLB_DCLEAN
, "c7, c10, 1 @ flush_pmd", pmd
);
462 tlb_l2_op(TLB_L2CLEAN_FR
, "c15, c9, 1 @ L2 flush_pmd", pmd
);
467 #undef always_tlb_flags
468 #undef possible_tlb_flags
471 * Convert calls to our calling convention.
473 #define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
474 #define local_flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e)
477 #define flush_tlb_all local_flush_tlb_all
478 #define flush_tlb_mm local_flush_tlb_mm
479 #define flush_tlb_page local_flush_tlb_page
480 #define flush_tlb_kernel_page local_flush_tlb_kernel_page
481 #define flush_tlb_range local_flush_tlb_range
482 #define flush_tlb_kernel_range local_flush_tlb_kernel_range
484 extern void flush_tlb_all(void);
485 extern void flush_tlb_mm(struct mm_struct
*mm
);
486 extern void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long uaddr
);
487 extern void flush_tlb_kernel_page(unsigned long kaddr
);
488 extern void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
489 extern void flush_tlb_kernel_range(unsigned long start
, unsigned long end
);
493 * If PG_dcache_clean is not set for the page, we need to ensure that any
494 * cache entries for the kernels virtual memory range are written
495 * back to the page. On ARMv6 and later, the cache coherency is handled via
496 * the set_pte_at() function.
498 #if __LINUX_ARM_ARCH__ < 6
499 extern void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long addr
,
502 static inline void update_mmu_cache(struct vm_area_struct
*vma
,
503 unsigned long addr
, pte_t
*ptep
)
510 #endif /* CONFIG_MMU */