ARM: rockchip: fix broken build
[linux/fpc-iii.git] / arch / arm64 / include / asm / tlbflush.h
blob934815d45eda0f180aa3bc5d5a6d043d6832cb8d
1 /*
2 * Based on arch/arm/include/asm/tlbflush.h
4 * Copyright (C) 1999-2003 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #ifndef __ASM_TLBFLUSH_H
20 #define __ASM_TLBFLUSH_H
22 #ifndef __ASSEMBLY__
24 #include <linux/sched.h>
25 #include <asm/cputype.h>
28 * TLB Management
29 * ==============
31 * The TLB specific code is expected to perform whatever tests it needs
32 * to determine if it should invalidate the TLB for each call. Start
33 * addresses are inclusive and end addresses are exclusive; it is safe to
34 * round these addresses down.
36 * flush_tlb_all()
38 * Invalidate the entire TLB.
40 * flush_tlb_mm(mm)
42 * Invalidate all TLB entries in a particular address space.
43 * - mm - mm_struct describing address space
45 * flush_tlb_range(mm,start,end)
47 * Invalidate a range of TLB entries in the specified address
48 * space.
49 * - mm - mm_struct describing address space
50 * - start - start address (may not be aligned)
51 * - end - end address (exclusive, may not be aligned)
53 * flush_tlb_page(vaddr,vma)
55 * Invalidate the specified page in the specified address range.
56 * - vaddr - virtual address (may not be aligned)
57 * - vma - vma_struct describing address range
59 * flush_kern_tlb_page(kaddr)
61 * Invalidate the TLB entry for the specified page. The address
62 * will be in the kernels virtual memory space. Current uses
63 * only require the D-TLB to be invalidated.
64 * - kaddr - Kernel virtual memory address
66 static inline void flush_tlb_all(void)
68 dsb(ishst);
69 asm("tlbi vmalle1is");
70 dsb(ish);
71 isb();
74 static inline void flush_tlb_mm(struct mm_struct *mm)
76 unsigned long asid = (unsigned long)ASID(mm) << 48;
78 dsb(ishst);
79 asm("tlbi aside1is, %0" : : "r" (asid));
80 dsb(ish);
83 static inline void flush_tlb_page(struct vm_area_struct *vma,
84 unsigned long uaddr)
86 unsigned long addr = uaddr >> 12 |
87 ((unsigned long)ASID(vma->vm_mm) << 48);
89 dsb(ishst);
90 asm("tlbi vae1is, %0" : : "r" (addr));
91 dsb(ish);
94 static inline void __flush_tlb_range(struct vm_area_struct *vma,
95 unsigned long start, unsigned long end)
97 unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
98 unsigned long addr;
99 start = asid | (start >> 12);
100 end = asid | (end >> 12);
102 dsb(ishst);
103 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
104 asm("tlbi vae1is, %0" : : "r"(addr));
105 dsb(ish);
108 static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
110 unsigned long addr;
111 start >>= 12;
112 end >>= 12;
114 dsb(ishst);
115 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
116 asm("tlbi vaae1is, %0" : : "r"(addr));
117 dsb(ish);
118 isb();
122 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
123 * necessarily a performance improvement.
125 #define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
127 static inline void flush_tlb_range(struct vm_area_struct *vma,
128 unsigned long start, unsigned long end)
130 if ((end - start) <= MAX_TLB_RANGE)
131 __flush_tlb_range(vma, start, end);
132 else
133 flush_tlb_mm(vma->vm_mm);
136 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
138 if ((end - start) <= MAX_TLB_RANGE)
139 __flush_tlb_kernel_range(start, end);
140 else
141 flush_tlb_all();
145 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
146 * table levels (pgd/pud/pmd).
148 static inline void __flush_tlb_pgtable(struct mm_struct *mm,
149 unsigned long uaddr)
151 unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48);
153 dsb(ishst);
154 asm("tlbi vae1is, %0" : : "r" (addr));
155 dsb(ish);
158 * On AArch64, the cache coherency is handled via the set_pte_at() function.
160 static inline void update_mmu_cache(struct vm_area_struct *vma,
161 unsigned long addr, pte_t *ptep)
164 * set_pte() does not have a DSB for user mappings, so make sure that
165 * the page table write is visible.
167 dsb(ishst);
170 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
172 #endif
174 #endif