1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/flush.c
5 * Copyright (C) 1995-2002 Russell King
7 #include <linux/module.h>
9 #include <linux/pagemap.h>
10 #include <linux/highmem.h>
12 #include <asm/cacheflush.h>
13 #include <asm/cachetype.h>
14 #include <asm/highmem.h>
15 #include <asm/smp_plat.h>
16 #include <asm/tlbflush.h>
17 #include <linux/hugetlb.h>
21 #ifdef CONFIG_ARM_HEAVY_MB
24 void arm_heavy_mb(void)
26 #ifdef CONFIG_OUTER_CACHE_SYNC
33 EXPORT_SYMBOL(arm_heavy_mb
);
36 #ifdef CONFIG_CPU_CACHE_VIPT
38 static void flush_pfn_alias(unsigned long pfn
, unsigned long vaddr
)
40 unsigned long to
= FLUSH_ALIAS_START
+ (CACHE_COLOUR(vaddr
) << PAGE_SHIFT
);
43 set_top_pte(to
, pfn_pte(pfn
, PAGE_KERNEL
));
45 asm( "mcrr p15, 0, %1, %0, c14\n"
46 " mcr p15, 0, %2, c7, c10, 4"
48 : "r" (to
), "r" (to
+ PAGE_SIZE
- 1), "r" (zero
)
52 static void flush_icache_alias(unsigned long pfn
, unsigned long vaddr
, unsigned long len
)
54 unsigned long va
= FLUSH_ALIAS_START
+ (CACHE_COLOUR(vaddr
) << PAGE_SHIFT
);
55 unsigned long offset
= vaddr
& (PAGE_SIZE
- 1);
58 set_top_pte(va
, pfn_pte(pfn
, PAGE_KERNEL
));
60 flush_icache_range(to
, to
+ len
);
63 void flush_cache_mm(struct mm_struct
*mm
)
65 if (cache_is_vivt()) {
66 vivt_flush_cache_mm(mm
);
70 if (cache_is_vipt_aliasing()) {
71 asm( "mcr p15, 0, %0, c7, c14, 0\n"
72 " mcr p15, 0, %0, c7, c10, 4"
79 void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
81 if (cache_is_vivt()) {
82 vivt_flush_cache_range(vma
, start
, end
);
86 if (cache_is_vipt_aliasing()) {
87 asm( "mcr p15, 0, %0, c7, c14, 0\n"
88 " mcr p15, 0, %0, c7, c10, 4"
94 if (vma
->vm_flags
& VM_EXEC
)
98 void flush_cache_pages(struct vm_area_struct
*vma
, unsigned long user_addr
, unsigned long pfn
, unsigned int nr
)
100 if (cache_is_vivt()) {
101 vivt_flush_cache_pages(vma
, user_addr
, pfn
, nr
);
105 if (cache_is_vipt_aliasing()) {
106 flush_pfn_alias(pfn
, user_addr
);
107 __flush_icache_all();
110 if (vma
->vm_flags
& VM_EXEC
&& icache_is_vivt_asid_tagged())
111 __flush_icache_all();
115 #define flush_pfn_alias(pfn,vaddr) do { } while (0)
116 #define flush_icache_alias(pfn,vaddr,len) do { } while (0)
119 #define FLAG_PA_IS_EXEC 1
120 #define FLAG_PA_CORE_IN_MM 2
122 static void flush_ptrace_access_other(void *args
)
124 __flush_icache_all();
128 void __flush_ptrace_access(struct page
*page
, unsigned long uaddr
, void *kaddr
,
129 unsigned long len
, unsigned int flags
)
131 if (cache_is_vivt()) {
132 if (flags
& FLAG_PA_CORE_IN_MM
) {
133 unsigned long addr
= (unsigned long)kaddr
;
134 __cpuc_coherent_kern_range(addr
, addr
+ len
);
139 if (cache_is_vipt_aliasing()) {
140 flush_pfn_alias(page_to_pfn(page
), uaddr
);
141 __flush_icache_all();
145 /* VIPT non-aliasing D-cache */
146 if (flags
& FLAG_PA_IS_EXEC
) {
147 unsigned long addr
= (unsigned long)kaddr
;
148 if (icache_is_vipt_aliasing())
149 flush_icache_alias(page_to_pfn(page
), uaddr
, len
);
151 __cpuc_coherent_kern_range(addr
, addr
+ len
);
152 if (cache_ops_need_broadcast())
153 smp_call_function(flush_ptrace_access_other
,
159 void flush_ptrace_access(struct vm_area_struct
*vma
, struct page
*page
,
160 unsigned long uaddr
, void *kaddr
, unsigned long len
)
162 unsigned int flags
= 0;
163 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma
->vm_mm
)))
164 flags
|= FLAG_PA_CORE_IN_MM
;
165 if (vma
->vm_flags
& VM_EXEC
)
166 flags
|= FLAG_PA_IS_EXEC
;
167 __flush_ptrace_access(page
, uaddr
, kaddr
, len
, flags
);
170 void flush_uprobe_xol_access(struct page
*page
, unsigned long uaddr
,
171 void *kaddr
, unsigned long len
)
173 unsigned int flags
= FLAG_PA_CORE_IN_MM
|FLAG_PA_IS_EXEC
;
175 __flush_ptrace_access(page
, uaddr
, kaddr
, len
, flags
);
179 * Copy user data from/to a page which is mapped into a different
180 * processes address space. Really, we want to allow our "user
181 * space" model to handle this.
183 * Note that this code needs to run on the current CPU.
185 void copy_to_user_page(struct vm_area_struct
*vma
, struct page
*page
,
186 unsigned long uaddr
, void *dst
, const void *src
,
192 memcpy(dst
, src
, len
);
193 flush_ptrace_access(vma
, page
, uaddr
, dst
, len
);
199 void __flush_dcache_folio(struct address_space
*mapping
, struct folio
*folio
)
202 * Writeback any data associated with the kernel mapping of this
203 * page. This ensures that data in the physical page is mutually
204 * coherent with the kernels mapping.
206 if (!folio_test_highmem(folio
)) {
207 __cpuc_flush_dcache_area(folio_address(folio
),
211 if (cache_is_vipt_nonaliasing()) {
212 for (i
= 0; i
< folio_nr_pages(folio
); i
++) {
213 void *addr
= kmap_local_folio(folio
,
215 __cpuc_flush_dcache_area(addr
, PAGE_SIZE
);
219 for (i
= 0; i
< folio_nr_pages(folio
); i
++) {
220 void *addr
= kmap_high_get(folio_page(folio
, i
));
222 __cpuc_flush_dcache_area(addr
, PAGE_SIZE
);
223 kunmap_high(folio_page(folio
, i
));
230 * If this is a page cache page, and we have an aliasing VIPT cache,
231 * we only need to do one flush - which would be at the relevant
232 * userspace colour, which is congruent with page->index.
234 if (mapping
&& cache_is_vipt_aliasing())
235 flush_pfn_alias(folio_pfn(folio
), folio_pos(folio
));
238 static void __flush_dcache_aliases(struct address_space
*mapping
, struct folio
*folio
)
240 struct mm_struct
*mm
= current
->active_mm
;
241 struct vm_area_struct
*vma
;
242 pgoff_t pgoff
, pgoff_end
;
245 * There are possible user space mappings of this page:
246 * - VIVT cache: we need to also write back and invalidate all user
247 * data in the current VM view associated with this page.
248 * - aliasing VIPT: we only need to find one mapping of this page.
250 pgoff
= folio
->index
;
251 pgoff_end
= pgoff
+ folio_nr_pages(folio
) - 1;
253 flush_dcache_mmap_lock(mapping
);
254 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, pgoff
, pgoff_end
) {
255 unsigned long start
, offset
, pfn
;
259 * If this VMA is not in our MM, we can ignore it.
261 if (vma
->vm_mm
!= mm
)
263 if (!(vma
->vm_flags
& VM_MAYSHARE
))
266 start
= vma
->vm_start
;
267 pfn
= folio_pfn(folio
);
268 nr
= folio_nr_pages(folio
);
269 offset
= pgoff
- vma
->vm_pgoff
;
274 start
+= offset
* PAGE_SIZE
;
276 if (start
+ nr
* PAGE_SIZE
> vma
->vm_end
)
277 nr
= (vma
->vm_end
- start
) / PAGE_SIZE
;
279 flush_cache_pages(vma
, start
, pfn
, nr
);
281 flush_dcache_mmap_unlock(mapping
);
284 #if __LINUX_ARM_ARCH__ >= 6
285 void __sync_icache_dcache(pte_t pteval
)
289 struct address_space
*mapping
;
291 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval
))
292 /* only flush non-aliasing VIPT caches for exec mappings */
294 pfn
= pte_pfn(pteval
);
298 folio
= page_folio(pfn_to_page(pfn
));
299 if (folio_test_reserved(folio
))
302 if (cache_is_vipt_aliasing())
303 mapping
= folio_flush_mapping(folio
);
307 if (!test_and_set_bit(PG_dcache_clean
, &folio
->flags
))
308 __flush_dcache_folio(mapping
, folio
);
310 if (pte_exec(pteval
))
311 __flush_icache_all();
316 * Ensure cache coherency between kernel mapping and userspace mapping
319 * We have three cases to consider:
320 * - VIPT non-aliasing cache: fully coherent so nothing required.
321 * - VIVT: fully aliasing, so we need to handle every alias in our
323 * - VIPT aliasing: need to handle one alias in our current VM view.
325 * If we need to handle aliasing:
326 * If the page only exists in the page cache and there are no user
327 * space mappings, we can be lazy and remember that we may have dirty
328 * kernel cache lines for later. Otherwise, we assume we have
331 * Note that we disable the lazy flush for SMP configurations where
332 * the cache maintenance operations are not automatically broadcasted.
334 void flush_dcache_folio(struct folio
*folio
)
336 struct address_space
*mapping
;
339 * The zero page is never written to, so never has any dirty
340 * cache lines, and therefore never needs to be flushed.
342 if (is_zero_pfn(folio_pfn(folio
)))
345 if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
346 if (test_bit(PG_dcache_clean
, &folio
->flags
))
347 clear_bit(PG_dcache_clean
, &folio
->flags
);
351 mapping
= folio_flush_mapping(folio
);
353 if (!cache_ops_need_broadcast() &&
354 mapping
&& !folio_mapped(folio
))
355 clear_bit(PG_dcache_clean
, &folio
->flags
);
357 __flush_dcache_folio(mapping
, folio
);
358 if (mapping
&& cache_is_vivt())
359 __flush_dcache_aliases(mapping
, folio
);
361 __flush_icache_all();
362 set_bit(PG_dcache_clean
, &folio
->flags
);
365 EXPORT_SYMBOL(flush_dcache_folio
);
367 void flush_dcache_page(struct page
*page
)
369 flush_dcache_folio(page_folio(page
));
371 EXPORT_SYMBOL(flush_dcache_page
);
373 * Flush an anonymous page so that users of get_user_pages()
374 * can safely access the data. The expected sequence is:
378 * memcpy() to/from page
379 * if written to page, flush_dcache_page()
381 void __flush_anon_page(struct vm_area_struct
*vma
, struct page
*page
, unsigned long vmaddr
);
382 void __flush_anon_page(struct vm_area_struct
*vma
, struct page
*page
, unsigned long vmaddr
)
386 /* VIPT non-aliasing caches need do nothing */
387 if (cache_is_vipt_nonaliasing())
391 * Write back and invalidate userspace mapping.
393 pfn
= page_to_pfn(page
);
394 if (cache_is_vivt()) {
395 flush_cache_page(vma
, vmaddr
, pfn
);
398 * For aliasing VIPT, we can flush an alias of the
399 * userspace address only.
401 flush_pfn_alias(pfn
, vmaddr
);
402 __flush_icache_all();
406 * Invalidate kernel mapping. No data should be contained
407 * in this mapping of the page. FIXME: this is overkill
408 * since we actually ask for a write-back and invalidate.
410 __cpuc_flush_dcache_area(page_address(page
), PAGE_SIZE
);