2 * arch/arm/include/asm/tlbflush.h
4 * Copyright (C) 1999-2003 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #ifndef _ASMARM_TLBFLUSH_H
11 #define _ASMARM_TLBFLUSH_H
16 #define tlb_flush(tlb) ((void) tlb)
18 #else /* CONFIG_MMU */
22 #define TLB_V3_PAGE (1 << 0)
23 #define TLB_V4_U_PAGE (1 << 1)
24 #define TLB_V4_D_PAGE (1 << 2)
25 #define TLB_V4_I_PAGE (1 << 3)
26 #define TLB_V6_U_PAGE (1 << 4)
27 #define TLB_V6_D_PAGE (1 << 5)
28 #define TLB_V6_I_PAGE (1 << 6)
30 #define TLB_V3_FULL (1 << 8)
31 #define TLB_V4_U_FULL (1 << 9)
32 #define TLB_V4_D_FULL (1 << 10)
33 #define TLB_V4_I_FULL (1 << 11)
34 #define TLB_V6_U_FULL (1 << 12)
35 #define TLB_V6_D_FULL (1 << 13)
36 #define TLB_V6_I_FULL (1 << 14)
38 #define TLB_V6_U_ASID (1 << 16)
39 #define TLB_V6_D_ASID (1 << 17)
40 #define TLB_V6_I_ASID (1 << 18)
42 #define TLB_BTB (1 << 28)
44 /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
45 #define TLB_V7_UIS_PAGE (1 << 19)
46 #define TLB_V7_UIS_FULL (1 << 20)
47 #define TLB_V7_UIS_ASID (1 << 21)
49 #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
50 #define TLB_DCLEAN (1 << 30)
51 #define TLB_WB (1 << 31)
57 * We have the following to choose from:
59 * v4 - ARMv4 without write buffer
60 * v4wb - ARMv4 with write buffer without I TLB flush entry instruction
61 * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
62 * fr - Feroceon (v4wbi with non-outer-cacheable page table walks)
63 * fa - Faraday (v4 with write buffer with UTLB and branch target buffer (BTB))
64 * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
65 * v7wbi - identical to v6wbi
70 #define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
72 #ifdef CONFIG_CPU_TLB_V3
73 # define v3_possible_flags v3_tlb_flags
74 # define v3_always_flags v3_tlb_flags
81 # define v3_possible_flags 0
82 # define v3_always_flags (-1UL)
85 #define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE)
87 #ifdef CONFIG_CPU_TLB_V4WT
88 # define v4_possible_flags v4_tlb_flags
89 # define v4_always_flags v4_tlb_flags
96 # define v4_possible_flags 0
97 # define v4_always_flags (-1UL)
100 #define fa_tlb_flags (TLB_WB | TLB_BTB | TLB_DCLEAN | \
101 TLB_V4_U_FULL | TLB_V4_U_PAGE)
103 #ifdef CONFIG_CPU_TLB_FA
104 # define fa_possible_flags fa_tlb_flags
105 # define fa_always_flags fa_tlb_flags
112 # define fa_possible_flags 0
113 # define fa_always_flags (-1UL)
116 #define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
117 TLB_V4_I_FULL | TLB_V4_D_FULL | \
118 TLB_V4_I_PAGE | TLB_V4_D_PAGE)
120 #ifdef CONFIG_CPU_TLB_V4WBI
121 # define v4wbi_possible_flags v4wbi_tlb_flags
122 # define v4wbi_always_flags v4wbi_tlb_flags
129 # define v4wbi_possible_flags 0
130 # define v4wbi_always_flags (-1UL)
133 #define fr_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_L2CLEAN_FR | \
134 TLB_V4_I_FULL | TLB_V4_D_FULL | \
135 TLB_V4_I_PAGE | TLB_V4_D_PAGE)
137 #ifdef CONFIG_CPU_TLB_FEROCEON
138 # define fr_possible_flags fr_tlb_flags
139 # define fr_always_flags fr_tlb_flags
146 # define fr_possible_flags 0
147 # define fr_always_flags (-1UL)
150 #define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \
151 TLB_V4_I_FULL | TLB_V4_D_FULL | \
154 #ifdef CONFIG_CPU_TLB_V4WB
155 # define v4wb_possible_flags v4wb_tlb_flags
156 # define v4wb_always_flags v4wb_tlb_flags
163 # define v4wb_possible_flags 0
164 # define v4wb_always_flags (-1UL)
167 #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
168 TLB_V6_I_FULL | TLB_V6_D_FULL | \
169 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
170 TLB_V6_I_ASID | TLB_V6_D_ASID)
172 #ifdef CONFIG_CPU_TLB_V6
173 # define v6wbi_possible_flags v6wbi_tlb_flags
174 # define v6wbi_always_flags v6wbi_tlb_flags
181 # define v6wbi_possible_flags 0
182 # define v6wbi_always_flags (-1UL)
186 #define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
187 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
189 #define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
190 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
193 #ifdef CONFIG_CPU_TLB_V7
194 # define v7wbi_possible_flags v7wbi_tlb_flags
195 # define v7wbi_always_flags v7wbi_tlb_flags
202 # define v7wbi_possible_flags 0
203 # define v7wbi_always_flags (-1UL)
207 #error Unknown TLB model
212 #include <linux/sched.h>
215 void (*flush_user_range
)(unsigned long, unsigned long, struct vm_area_struct
*);
216 void (*flush_kern_range
)(unsigned long, unsigned long);
217 unsigned long tlb_flags
;
221 * Select the calling method
225 #define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range
226 #define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range
230 #define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range)
231 #define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range)
233 extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct
*);
234 extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
238 extern struct cpu_tlb_fns cpu_tlb
;
240 #define __cpu_tlb_flags cpu_tlb.tlb_flags
246 * The arch/arm/mm/tlb-*.S files implement these methods.
248 * The TLB specific code is expected to perform whatever tests it
249 * needs to determine if it should invalidate the TLB for each
250 * call. Start addresses are inclusive and end addresses are
251 * exclusive; it is safe to round these addresses down.
255 * Invalidate the entire TLB.
259 * Invalidate all TLB entries in a particular address
261 * - mm - mm_struct describing address space
263 * flush_tlb_range(mm,start,end)
265 * Invalidate a range of TLB entries in the specified
267 * - mm - mm_struct describing address space
268 * - start - start address (may not be aligned)
269 * - end - end address (exclusive, may not be aligned)
271 * flush_tlb_page(vaddr,vma)
273 * Invalidate the specified page in the specified address range.
274 * - vaddr - virtual address (may not be aligned)
275 * - vma - vma_struct describing address range
277 * flush_kern_tlb_page(kaddr)
279 * Invalidate the TLB entry for the specified page. The address
280 * will be in the kernels virtual memory space. Current uses
281 * only require the D-TLB to be invalidated.
282 * - kaddr - Kernel virtual memory address
286 * We optimise the code below by:
287 * - building a set of TLB flags that might be set in __cpu_tlb_flags
288 * - building a set of TLB flags that will always be set in __cpu_tlb_flags
289 * - if we're going to need __cpu_tlb_flags, access it once and only once
291 * This allows us to build optimal assembly for the single-CPU type case,
292 * and as close to optimal given the compiler constrants for multi-CPU
293 * case. We could do better for the multi-CPU case if the compiler
294 * implemented the "%?" method, but this has been discontinued due to too
295 * many people getting it wrong.
297 #define possible_tlb_flags (v3_possible_flags | \
298 v4_possible_flags | \
299 v4wbi_possible_flags | \
300 fr_possible_flags | \
301 v4wb_possible_flags | \
302 fa_possible_flags | \
303 v6wbi_possible_flags | \
304 v7wbi_possible_flags)
306 #define always_tlb_flags (v3_always_flags & \
308 v4wbi_always_flags & \
310 v4wb_always_flags & \
312 v6wbi_always_flags & \
315 #define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
317 static inline void local_flush_tlb_all(void)
320 const unsigned int __tlb_flag
= __cpu_tlb_flags
;
322 if (tlb_flag(TLB_WB
))
325 if (tlb_flag(TLB_V3_FULL
))
326 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero
) : "cc");
327 if (tlb_flag(TLB_V4_U_FULL
| TLB_V6_U_FULL
))
328 asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero
) : "cc");
329 if (tlb_flag(TLB_V4_D_FULL
| TLB_V6_D_FULL
))
330 asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero
) : "cc");
331 if (tlb_flag(TLB_V4_I_FULL
| TLB_V6_I_FULL
))
332 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero
) : "cc");
333 if (tlb_flag(TLB_V7_UIS_FULL
))
334 asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero
) : "cc");
336 if (tlb_flag(TLB_BTB
)) {
337 /* flush the branch target cache */
338 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero
) : "cc");
344 static inline void local_flush_tlb_mm(struct mm_struct
*mm
)
347 const int asid
= ASID(mm
);
348 const unsigned int __tlb_flag
= __cpu_tlb_flags
;
350 if (tlb_flag(TLB_WB
))
353 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm
))) {
354 if (tlb_flag(TLB_V3_FULL
))
355 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero
) : "cc");
356 if (tlb_flag(TLB_V4_U_FULL
))
357 asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero
) : "cc");
358 if (tlb_flag(TLB_V4_D_FULL
))
359 asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero
) : "cc");
360 if (tlb_flag(TLB_V4_I_FULL
))
361 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero
) : "cc");
364 if (tlb_flag(TLB_V6_U_ASID
))
365 asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid
) : "cc");
366 if (tlb_flag(TLB_V6_D_ASID
))
367 asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid
) : "cc");
368 if (tlb_flag(TLB_V6_I_ASID
))
369 asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid
) : "cc");
370 if (tlb_flag(TLB_V7_UIS_ASID
))
371 asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid
) : "cc");
373 if (tlb_flag(TLB_BTB
)) {
374 /* flush the branch target cache */
375 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero
) : "cc");
381 local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long uaddr
)
384 const unsigned int __tlb_flag
= __cpu_tlb_flags
;
386 uaddr
= (uaddr
& PAGE_MASK
) | ASID(vma
->vm_mm
);
388 if (tlb_flag(TLB_WB
))
391 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma
->vm_mm
))) {
392 if (tlb_flag(TLB_V3_PAGE
))
393 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr
) : "cc");
394 if (tlb_flag(TLB_V4_U_PAGE
))
395 asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr
) : "cc");
396 if (tlb_flag(TLB_V4_D_PAGE
))
397 asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr
) : "cc");
398 if (tlb_flag(TLB_V4_I_PAGE
))
399 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr
) : "cc");
400 if (!tlb_flag(TLB_V4_I_PAGE
) && tlb_flag(TLB_V4_I_FULL
))
401 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero
) : "cc");
404 if (tlb_flag(TLB_V6_U_PAGE
))
405 asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr
) : "cc");
406 if (tlb_flag(TLB_V6_D_PAGE
))
407 asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr
) : "cc");
408 if (tlb_flag(TLB_V6_I_PAGE
))
409 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr
) : "cc");
410 if (tlb_flag(TLB_V7_UIS_PAGE
))
411 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr
) : "cc");
413 if (tlb_flag(TLB_BTB
)) {
414 /* flush the branch target cache */
415 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero
) : "cc");
420 static inline void local_flush_tlb_kernel_page(unsigned long kaddr
)
423 const unsigned int __tlb_flag
= __cpu_tlb_flags
;
427 if (tlb_flag(TLB_WB
))
430 if (tlb_flag(TLB_V3_PAGE
))
431 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr
) : "cc");
432 if (tlb_flag(TLB_V4_U_PAGE
))
433 asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr
) : "cc");
434 if (tlb_flag(TLB_V4_D_PAGE
))
435 asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr
) : "cc");
436 if (tlb_flag(TLB_V4_I_PAGE
))
437 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr
) : "cc");
438 if (!tlb_flag(TLB_V4_I_PAGE
) && tlb_flag(TLB_V4_I_FULL
))
439 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero
) : "cc");
441 if (tlb_flag(TLB_V6_U_PAGE
))
442 asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr
) : "cc");
443 if (tlb_flag(TLB_V6_D_PAGE
))
444 asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr
) : "cc");
445 if (tlb_flag(TLB_V6_I_PAGE
))
446 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr
) : "cc");
447 if (tlb_flag(TLB_V7_UIS_PAGE
))
448 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr
) : "cc");
450 if (tlb_flag(TLB_BTB
)) {
451 /* flush the branch target cache */
452 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero
) : "cc");
461 * Flush a PMD entry (word aligned, or double-word aligned) to
462 * RAM if the TLB for the CPU we are running on requires this.
463 * This is typically used when we are creating PMD entries.
467 * Clean (but don't drain the write buffer) if the CPU requires
468 * these operations. This is typically used when we are removing
471 static inline void flush_pmd_entry(pmd_t
*pmd
)
473 const unsigned int __tlb_flag
= __cpu_tlb_flags
;
475 if (tlb_flag(TLB_DCLEAN
))
476 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd"
477 : : "r" (pmd
) : "cc");
479 if (tlb_flag(TLB_L2CLEAN_FR
))
480 asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd"
481 : : "r" (pmd
) : "cc");
483 if (tlb_flag(TLB_WB
))
487 static inline void clean_pmd_entry(pmd_t
*pmd
)
489 const unsigned int __tlb_flag
= __cpu_tlb_flags
;
491 if (tlb_flag(TLB_DCLEAN
))
492 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd"
493 : : "r" (pmd
) : "cc");
495 if (tlb_flag(TLB_L2CLEAN_FR
))
496 asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd"
497 : : "r" (pmd
) : "cc");
501 #undef always_tlb_flags
502 #undef possible_tlb_flags
505 * Convert calls to our calling convention.
507 #define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
508 #define local_flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e)
511 #define flush_tlb_all local_flush_tlb_all
512 #define flush_tlb_mm local_flush_tlb_mm
513 #define flush_tlb_page local_flush_tlb_page
514 #define flush_tlb_kernel_page local_flush_tlb_kernel_page
515 #define flush_tlb_range local_flush_tlb_range
516 #define flush_tlb_kernel_range local_flush_tlb_kernel_range
518 extern void flush_tlb_all(void);
519 extern void flush_tlb_mm(struct mm_struct
*mm
);
520 extern void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long uaddr
);
521 extern void flush_tlb_kernel_page(unsigned long kaddr
);
522 extern void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
523 extern void flush_tlb_kernel_range(unsigned long start
, unsigned long end
);
527 * if PG_dcache_dirty is set for the page, we need to ensure that any
528 * cache entries for the kernels virtual memory range are written
531 extern void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long addr
, pte_t pte
);
535 #endif /* CONFIG_MMU */