2 * linux/arch/arm/mm/flush.c
4 * Copyright (C) 1995-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
12 #include <linux/pagemap.h>
13 #include <linux/highmem.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cachetype.h>
17 #include <asm/highmem.h>
18 #include <asm/smp_plat.h>
19 #include <asm/tlbflush.h>
20 #include <linux/hugetlb.h>
24 #ifdef CONFIG_ARM_HEAVY_MB
27 void arm_heavy_mb(void)
29 #ifdef CONFIG_OUTER_CACHE_SYNC
36 EXPORT_SYMBOL(arm_heavy_mb
);
39 #ifdef CONFIG_CPU_CACHE_VIPT
41 static void flush_pfn_alias(unsigned long pfn
, unsigned long vaddr
)
43 unsigned long to
= FLUSH_ALIAS_START
+ (CACHE_COLOUR(vaddr
) << PAGE_SHIFT
);
46 set_top_pte(to
, pfn_pte(pfn
, PAGE_KERNEL
));
48 asm( "mcrr p15, 0, %1, %0, c14\n"
49 " mcr p15, 0, %2, c7, c10, 4"
51 : "r" (to
), "r" (to
+ PAGE_SIZE
- 1), "r" (zero
)
55 static void flush_icache_alias(unsigned long pfn
, unsigned long vaddr
, unsigned long len
)
57 unsigned long va
= FLUSH_ALIAS_START
+ (CACHE_COLOUR(vaddr
) << PAGE_SHIFT
);
58 unsigned long offset
= vaddr
& (PAGE_SIZE
- 1);
61 set_top_pte(va
, pfn_pte(pfn
, PAGE_KERNEL
));
63 flush_icache_range(to
, to
+ len
);
66 void flush_cache_mm(struct mm_struct
*mm
)
68 if (cache_is_vivt()) {
69 vivt_flush_cache_mm(mm
);
73 if (cache_is_vipt_aliasing()) {
74 asm( "mcr p15, 0, %0, c7, c14, 0\n"
75 " mcr p15, 0, %0, c7, c10, 4"
82 void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
84 if (cache_is_vivt()) {
85 vivt_flush_cache_range(vma
, start
, end
);
89 if (cache_is_vipt_aliasing()) {
90 asm( "mcr p15, 0, %0, c7, c14, 0\n"
91 " mcr p15, 0, %0, c7, c10, 4"
97 if (vma
->vm_flags
& VM_EXEC
)
101 void flush_cache_page(struct vm_area_struct
*vma
, unsigned long user_addr
, unsigned long pfn
)
103 if (cache_is_vivt()) {
104 vivt_flush_cache_page(vma
, user_addr
, pfn
);
108 if (cache_is_vipt_aliasing()) {
109 flush_pfn_alias(pfn
, user_addr
);
110 __flush_icache_all();
113 if (vma
->vm_flags
& VM_EXEC
&& icache_is_vivt_asid_tagged())
114 __flush_icache_all();
118 #define flush_pfn_alias(pfn,vaddr) do { } while (0)
119 #define flush_icache_alias(pfn,vaddr,len) do { } while (0)
122 #define FLAG_PA_IS_EXEC 1
123 #define FLAG_PA_CORE_IN_MM 2
125 static void flush_ptrace_access_other(void *args
)
127 __flush_icache_all();
131 void __flush_ptrace_access(struct page
*page
, unsigned long uaddr
, void *kaddr
,
132 unsigned long len
, unsigned int flags
)
134 if (cache_is_vivt()) {
135 if (flags
& FLAG_PA_CORE_IN_MM
) {
136 unsigned long addr
= (unsigned long)kaddr
;
137 __cpuc_coherent_kern_range(addr
, addr
+ len
);
142 if (cache_is_vipt_aliasing()) {
143 flush_pfn_alias(page_to_pfn(page
), uaddr
);
144 __flush_icache_all();
148 /* VIPT non-aliasing D-cache */
149 if (flags
& FLAG_PA_IS_EXEC
) {
150 unsigned long addr
= (unsigned long)kaddr
;
151 if (icache_is_vipt_aliasing())
152 flush_icache_alias(page_to_pfn(page
), uaddr
, len
);
154 __cpuc_coherent_kern_range(addr
, addr
+ len
);
155 if (cache_ops_need_broadcast())
156 smp_call_function(flush_ptrace_access_other
,
162 void flush_ptrace_access(struct vm_area_struct
*vma
, struct page
*page
,
163 unsigned long uaddr
, void *kaddr
, unsigned long len
)
165 unsigned int flags
= 0;
166 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma
->vm_mm
)))
167 flags
|= FLAG_PA_CORE_IN_MM
;
168 if (vma
->vm_flags
& VM_EXEC
)
169 flags
|= FLAG_PA_IS_EXEC
;
170 __flush_ptrace_access(page
, uaddr
, kaddr
, len
, flags
);
173 void flush_uprobe_xol_access(struct page
*page
, unsigned long uaddr
,
174 void *kaddr
, unsigned long len
)
176 unsigned int flags
= FLAG_PA_CORE_IN_MM
|FLAG_PA_IS_EXEC
;
178 __flush_ptrace_access(page
, uaddr
, kaddr
, len
, flags
);
182 * Copy user data from/to a page which is mapped into a different
183 * processes address space. Really, we want to allow our "user
184 * space" model to handle this.
186 * Note that this code needs to run on the current CPU.
188 void copy_to_user_page(struct vm_area_struct
*vma
, struct page
*page
,
189 unsigned long uaddr
, void *dst
, const void *src
,
195 memcpy(dst
, src
, len
);
196 flush_ptrace_access(vma
, page
, uaddr
, dst
, len
);
202 void __flush_dcache_page(struct address_space
*mapping
, struct page
*page
)
205 * Writeback any data associated with the kernel mapping of this
206 * page. This ensures that data in the physical page is mutually
207 * coherent with the kernels mapping.
209 if (!PageHighMem(page
)) {
210 size_t page_size
= PAGE_SIZE
<< compound_order(page
);
211 __cpuc_flush_dcache_area(page_address(page
), page_size
);
214 if (cache_is_vipt_nonaliasing()) {
215 for (i
= 0; i
< (1 << compound_order(page
)); i
++) {
216 void *addr
= kmap_atomic(page
+ i
);
217 __cpuc_flush_dcache_area(addr
, PAGE_SIZE
);
221 for (i
= 0; i
< (1 << compound_order(page
)); i
++) {
222 void *addr
= kmap_high_get(page
+ i
);
224 __cpuc_flush_dcache_area(addr
, PAGE_SIZE
);
225 kunmap_high(page
+ i
);
232 * If this is a page cache page, and we have an aliasing VIPT cache,
233 * we only need to do one flush - which would be at the relevant
234 * userspace colour, which is congruent with page->index.
236 if (mapping
&& cache_is_vipt_aliasing())
237 flush_pfn_alias(page_to_pfn(page
),
238 page
->index
<< PAGE_CACHE_SHIFT
);
241 static void __flush_dcache_aliases(struct address_space
*mapping
, struct page
*page
)
243 struct mm_struct
*mm
= current
->active_mm
;
244 struct vm_area_struct
*mpnt
;
248 * There are possible user space mappings of this page:
249 * - VIVT cache: we need to also write back and invalidate all user
250 * data in the current VM view associated with this page.
251 * - aliasing VIPT: we only need to find one mapping of this page.
253 pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
255 flush_dcache_mmap_lock(mapping
);
256 vma_interval_tree_foreach(mpnt
, &mapping
->i_mmap
, pgoff
, pgoff
) {
257 unsigned long offset
;
260 * If this VMA is not in our MM, we can ignore it.
262 if (mpnt
->vm_mm
!= mm
)
264 if (!(mpnt
->vm_flags
& VM_MAYSHARE
))
266 offset
= (pgoff
- mpnt
->vm_pgoff
) << PAGE_SHIFT
;
267 flush_cache_page(mpnt
, mpnt
->vm_start
+ offset
, page_to_pfn(page
));
269 flush_dcache_mmap_unlock(mapping
);
272 #if __LINUX_ARM_ARCH__ >= 6
273 void __sync_icache_dcache(pte_t pteval
)
277 struct address_space
*mapping
;
279 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval
))
280 /* only flush non-aliasing VIPT caches for exec mappings */
282 pfn
= pte_pfn(pteval
);
286 page
= pfn_to_page(pfn
);
287 if (cache_is_vipt_aliasing())
288 mapping
= page_mapping(page
);
292 if (!test_and_set_bit(PG_dcache_clean
, &page
->flags
))
293 __flush_dcache_page(mapping
, page
);
295 if (pte_exec(pteval
))
296 __flush_icache_all();
301 * Ensure cache coherency between kernel mapping and userspace mapping
304 * We have three cases to consider:
305 * - VIPT non-aliasing cache: fully coherent so nothing required.
306 * - VIVT: fully aliasing, so we need to handle every alias in our
308 * - VIPT aliasing: need to handle one alias in our current VM view.
310 * If we need to handle aliasing:
311 * If the page only exists in the page cache and there are no user
312 * space mappings, we can be lazy and remember that we may have dirty
313 * kernel cache lines for later. Otherwise, we assume we have
316 * Note that we disable the lazy flush for SMP configurations where
317 * the cache maintenance operations are not automatically broadcasted.
319 void flush_dcache_page(struct page
*page
)
321 struct address_space
*mapping
;
324 * The zero page is never written to, so never has any dirty
325 * cache lines, and therefore never needs to be flushed.
327 if (page
== ZERO_PAGE(0))
330 mapping
= page_mapping(page
);
332 if (!cache_ops_need_broadcast() &&
333 mapping
&& !page_mapped(page
))
334 clear_bit(PG_dcache_clean
, &page
->flags
);
336 __flush_dcache_page(mapping
, page
);
337 if (mapping
&& cache_is_vivt())
338 __flush_dcache_aliases(mapping
, page
);
340 __flush_icache_all();
341 set_bit(PG_dcache_clean
, &page
->flags
);
344 EXPORT_SYMBOL(flush_dcache_page
);
347 * Ensure cache coherency for the kernel mapping of this page. We can
348 * assume that the page is pinned via kmap.
350 * If the page only exists in the page cache and there are no user
351 * space mappings, this is a no-op since the page was already marked
352 * dirty at creation. Otherwise, we need to flush the dirty kernel
353 * cache lines directly.
355 void flush_kernel_dcache_page(struct page
*page
)
357 if (cache_is_vivt() || cache_is_vipt_aliasing()) {
358 struct address_space
*mapping
;
360 mapping
= page_mapping(page
);
362 if (!mapping
|| mapping_mapped(mapping
)) {
365 addr
= page_address(page
);
367 * kmap_atomic() doesn't set the page virtual
368 * address for highmem pages, and
369 * kunmap_atomic() takes care of cache
372 if (!IS_ENABLED(CONFIG_HIGHMEM
) || addr
)
373 __cpuc_flush_dcache_area(addr
, PAGE_SIZE
);
377 EXPORT_SYMBOL(flush_kernel_dcache_page
);
380 * Flush an anonymous page so that users of get_user_pages()
381 * can safely access the data. The expected sequence is:
385 * memcpy() to/from page
386 * if written to page, flush_dcache_page()
388 void __flush_anon_page(struct vm_area_struct
*vma
, struct page
*page
, unsigned long vmaddr
)
392 /* VIPT non-aliasing caches need do nothing */
393 if (cache_is_vipt_nonaliasing())
397 * Write back and invalidate userspace mapping.
399 pfn
= page_to_pfn(page
);
400 if (cache_is_vivt()) {
401 flush_cache_page(vma
, vmaddr
, pfn
);
404 * For aliasing VIPT, we can flush an alias of the
405 * userspace address only.
407 flush_pfn_alias(pfn
, vmaddr
);
408 __flush_icache_all();
412 * Invalidate kernel mapping. No data should be contained
413 * in this mapping of the page. FIXME: this is overkill
414 * since we actually ask for a write-back and invalidate.
416 __cpuc_flush_dcache_area(page_address(page
), PAGE_SIZE
);
419 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
420 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
421 void pmdp_splitting_flush(struct vm_area_struct
*vma
, unsigned long address
,
424 pmd_t pmd
= pmd_mksplitting(*pmdp
);
425 VM_BUG_ON(address
& ~PMD_MASK
);
426 set_pmd_at(vma
->vm_mm
, address
, pmdp
, pmd
);
428 /* dummy IPI to serialise against fast_gup */
429 kick_all_cpus_sync();
431 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
432 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */