2 * Based on arch/arm/include/asm/tlbflush.h
4 * Copyright (C) 1999-2003 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #ifndef __ASM_TLBFLUSH_H
20 #define __ASM_TLBFLUSH_H
24 #include <linux/sched.h>
25 #include <asm/cputype.h>
29 * Raw TLBI operations.
31 * Where necessary, use the __tlbi() macro to avoid asm()
32 * boilerplate. Drivers and most kernel code should use the TLB
33 * management routines in preference to the macro below.
35 * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
36 * on whether a particular TLBI operation takes an argument or
37 * not. The macros handles invoking the asm with or without the
38 * register argument as appropriate.
40 #define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \
41 ALTERNATIVE("nop\n nop", \
42 "dsb ish\n tlbi " #op, \
43 ARM64_WORKAROUND_REPEAT_TLBI, \
44 CONFIG_QCOM_FALKOR_ERRATUM_1009) \
47 #define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \
48 ALTERNATIVE("nop\n nop", \
49 "dsb ish\n tlbi " #op ", %0", \
50 ARM64_WORKAROUND_REPEAT_TLBI, \
51 CONFIG_QCOM_FALKOR_ERRATUM_1009) \
54 #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
56 #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
58 #define __tlbi_user(op, arg) do { \
59 if (arm64_kernel_unmapped_at_el0()) \
60 __tlbi(op, (arg) | USER_ASID_FLAG); \
63 /* This macro creates a properly formatted VA operand for the TLBI */
64 #define __TLBI_VADDR(addr, asid) \
66 unsigned long __ta = (addr) >> 12; \
67 __ta &= GENMASK_ULL(43, 0); \
68 __ta |= (unsigned long)(asid) << 48; \
76 * The TLB specific code is expected to perform whatever tests it needs
77 * to determine if it should invalidate the TLB for each call. Start
78 * addresses are inclusive and end addresses are exclusive; it is safe to
79 * round these addresses down.
83 * Invalidate the entire TLB.
87 * Invalidate all TLB entries in a particular address space.
88 * - mm - mm_struct describing address space
90 * flush_tlb_range(mm,start,end)
92 * Invalidate a range of TLB entries in the specified address
94 * - mm - mm_struct describing address space
95 * - start - start address (may not be aligned)
96 * - end - end address (exclusive, may not be aligned)
98 * flush_tlb_page(vaddr,vma)
100 * Invalidate the specified page in the specified address range.
101 * - vaddr - virtual address (may not be aligned)
102 * - vma - vma_struct describing address range
104 * flush_kern_tlb_page(kaddr)
106 * Invalidate the TLB entry for the specified page. The address
107 * will be in the kernels virtual memory space. Current uses
108 * only require the D-TLB to be invalidated.
109 * - kaddr - Kernel virtual memory address
111 static inline void local_flush_tlb_all(void)
119 static inline void flush_tlb_all(void)
127 static inline void flush_tlb_mm(struct mm_struct
*mm
)
129 unsigned long asid
= __TLBI_VADDR(0, ASID(mm
));
132 __tlbi(aside1is
, asid
);
133 __tlbi_user(aside1is
, asid
);
137 static inline void flush_tlb_page(struct vm_area_struct
*vma
,
140 unsigned long addr
= __TLBI_VADDR(uaddr
, ASID(vma
->vm_mm
));
143 __tlbi(vale1is
, addr
);
144 __tlbi_user(vale1is
, addr
);
149 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
150 * necessarily a performance improvement.
152 #define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
154 static inline void __flush_tlb_range(struct vm_area_struct
*vma
,
155 unsigned long start
, unsigned long end
,
158 unsigned long asid
= ASID(vma
->vm_mm
);
161 if ((end
- start
) > MAX_TLB_RANGE
) {
162 flush_tlb_mm(vma
->vm_mm
);
166 start
= __TLBI_VADDR(start
, asid
);
167 end
= __TLBI_VADDR(end
, asid
);
170 for (addr
= start
; addr
< end
; addr
+= 1 << (PAGE_SHIFT
- 12)) {
172 __tlbi(vale1is
, addr
);
173 __tlbi_user(vale1is
, addr
);
175 __tlbi(vae1is
, addr
);
176 __tlbi_user(vae1is
, addr
);
182 static inline void flush_tlb_range(struct vm_area_struct
*vma
,
183 unsigned long start
, unsigned long end
)
185 __flush_tlb_range(vma
, start
, end
, false);
188 static inline void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
192 if ((end
- start
) > MAX_TLB_RANGE
) {
197 start
= __TLBI_VADDR(start
, 0);
198 end
= __TLBI_VADDR(end
, 0);
201 for (addr
= start
; addr
< end
; addr
+= 1 << (PAGE_SHIFT
- 12))
202 __tlbi(vaae1is
, addr
);
208 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
209 * table levels (pgd/pud/pmd).
211 static inline void __flush_tlb_pgtable(struct mm_struct
*mm
,
214 unsigned long addr
= __TLBI_VADDR(uaddr
, ASID(mm
));
216 __tlbi(vae1is
, addr
);
217 __tlbi_user(vae1is
, addr
);