2 * arch/arm/include/asm/cacheflush.h
4 * Copyright (C) 1999-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #ifndef _ASMARM_CACHEFLUSH_H
11 #define _ASMARM_CACHEFLUSH_H
15 #include <asm/glue-cache.h>
16 #include <asm/shmparam.h>
17 #include <asm/cachetype.h>
18 #include <asm/outercache.h>
20 #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
23 * This flag is used to indicate that the page pointed to by a pte is clean
24 * and does not require cleaning before returning it to the user.
26 #define PG_dcache_clean PG_arch_1
32 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
33 * implement these methods.
35 * Start addresses are inclusive and end addresses are exclusive;
36 * start addresses should be rounded down, end addresses up.
38 * See Documentation/cachetlb.txt for more information.
39 * Please note that the implementation of these, and the required
40 * effects are cache-type (VIVT/VIPT/PIPT) specific.
44 * Unconditionally clean and invalidate the entire icache.
45 * Currently only needed for cache-v6.S and cache-v7.S, see
46 * __flush_icache_all for the generic implementation.
50 * Unconditionally clean and invalidate the entire cache.
54 * Flush data cache levels up to the level of unification
55 * inner shareable and invalidate the I-cache.
56 * Only needed from v7 onwards, falls back to flush_cache_all()
57 * for all other processor versions.
61 * Clean and invalidate all user space cache entries
62 * before a change of page tables.
64 * flush_user_range(start, end, flags)
66 * Clean and invalidate a range of cache entries in the
67 * specified address space before a change of page tables.
68 * - start - user start address (inclusive, page aligned)
69 * - end - user end address (exclusive, page aligned)
70 * - flags - vma->vm_flags field
72 * coherent_kern_range(start, end)
74 * Ensure coherency between the Icache and the Dcache in the
75 * region described by start, end. If you have non-snooping
76 * Harvard caches, you need to implement this function.
77 * - start - virtual start address
78 * - end - virtual end address
80 * coherent_user_range(start, end)
82 * Ensure coherency between the Icache and the Dcache in the
83 * region described by start, end. If you have non-snooping
84 * Harvard caches, you need to implement this function.
85 * - start - virtual start address
86 * - end - virtual end address
88 * flush_kern_dcache_area(kaddr, size)
90 * Ensure that the data held in page is written back.
91 * - kaddr - page address
92 * - size - region size
97 * dma_flush_range(start, end)
99 * Clean and invalidate the specified virtual address range.
100 * - start - virtual start address
101 * - end - virtual end address
104 struct cpu_cache_fns
{
105 void (*flush_icache_all
)(void);
106 void (*flush_kern_all
)(void);
107 void (*flush_kern_louis
)(void);
108 void (*flush_user_all
)(void);
109 void (*flush_user_range
)(unsigned long, unsigned long, unsigned int);
111 void (*coherent_kern_range
)(unsigned long, unsigned long);
112 int (*coherent_user_range
)(unsigned long, unsigned long);
113 void (*flush_kern_dcache_area
)(void *, size_t);
115 void (*dma_map_area
)(const void *, size_t, int);
116 void (*dma_unmap_area
)(const void *, size_t, int);
118 void (*dma_flush_range
)(const void *, const void *);
122 * Select the calling method
126 extern struct cpu_cache_fns cpu_cache
;
128 #define __cpuc_flush_icache_all cpu_cache.flush_icache_all
129 #define __cpuc_flush_kern_all cpu_cache.flush_kern_all
130 #define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
131 #define __cpuc_flush_user_all cpu_cache.flush_user_all
132 #define __cpuc_flush_user_range cpu_cache.flush_user_range
133 #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
134 #define __cpuc_coherent_user_range cpu_cache.coherent_user_range
135 #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
138 * These are private to the dma-mapping API. Do not use directly.
139 * Their sole purpose is to ensure that data held in the cache
140 * is visible to DMA, or data written by DMA to system memory is
141 * visible to the CPU.
143 #define dmac_map_area cpu_cache.dma_map_area
144 #define dmac_unmap_area cpu_cache.dma_unmap_area
145 #define dmac_flush_range cpu_cache.dma_flush_range
149 extern void __cpuc_flush_icache_all(void);
150 extern void __cpuc_flush_kern_all(void);
151 extern void __cpuc_flush_kern_louis(void);
152 extern void __cpuc_flush_user_all(void);
153 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
154 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
155 extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
156 extern void __cpuc_flush_dcache_area(void *, size_t);
159 * These are private to the dma-mapping API. Do not use directly.
160 * Their sole purpose is to ensure that data held in the cache
161 * is visible to DMA, or data written by DMA to system memory is
162 * visible to the CPU.
164 extern void dmac_map_area(const void *, size_t, int);
165 extern void dmac_unmap_area(const void *, size_t, int);
166 extern void dmac_flush_range(const void *, const void *);
171 * Copy user data from/to a page which is mapped into a different
172 * processes address space. Really, we want to allow our "user
173 * space" model to handle this.
175 extern void copy_to_user_page(struct vm_area_struct
*, struct page
*,
176 unsigned long, void *, const void *, unsigned long);
177 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
179 memcpy(dst, src, len); \
183 * Convert calls to our calling convention.
186 /* Invalidate I-cache */
187 #define __flush_icache_all_generic() \
188 asm("mcr p15, 0, %0, c7, c5, 0" \
191 /* Invalidate I-cache inner shareable */
192 #define __flush_icache_all_v7_smp() \
193 asm("mcr p15, 0, %0, c7, c1, 0" \
197 * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
198 * will fall through to use __flush_icache_all_generic.
200 #if (defined(CONFIG_CPU_V7) && \
201 (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
202 defined(CONFIG_SMP_ON_UP)
203 #define __flush_icache_preferred __cpuc_flush_icache_all
204 #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
205 #define __flush_icache_preferred __flush_icache_all_v7_smp
206 #elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
207 #define __flush_icache_preferred __cpuc_flush_icache_all
209 #define __flush_icache_preferred __flush_icache_all_generic
212 static inline void __flush_icache_all(void)
214 __flush_icache_preferred();
219 * Flush caches up to Level of Unification Inner Shareable
221 #define flush_cache_louis() __cpuc_flush_kern_louis()
223 #define flush_cache_all() __cpuc_flush_kern_all()
225 static inline void vivt_flush_cache_mm(struct mm_struct
*mm
)
227 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm
)))
228 __cpuc_flush_user_all();
232 vivt_flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
234 struct mm_struct
*mm
= vma
->vm_mm
;
236 if (!mm
|| cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm
)))
237 __cpuc_flush_user_range(start
& PAGE_MASK
, PAGE_ALIGN(end
),
242 vivt_flush_cache_page(struct vm_area_struct
*vma
, unsigned long user_addr
, unsigned long pfn
)
244 struct mm_struct
*mm
= vma
->vm_mm
;
246 if (!mm
|| cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm
))) {
247 unsigned long addr
= user_addr
& PAGE_MASK
;
248 __cpuc_flush_user_range(addr
, addr
+ PAGE_SIZE
, vma
->vm_flags
);
252 #ifndef CONFIG_CPU_CACHE_VIPT
253 #define flush_cache_mm(mm) \
254 vivt_flush_cache_mm(mm)
255 #define flush_cache_range(vma,start,end) \
256 vivt_flush_cache_range(vma,start,end)
257 #define flush_cache_page(vma,addr,pfn) \
258 vivt_flush_cache_page(vma,addr,pfn)
260 extern void flush_cache_mm(struct mm_struct
*mm
);
261 extern void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
262 extern void flush_cache_page(struct vm_area_struct
*vma
, unsigned long user_addr
, unsigned long pfn
);
265 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
268 * flush_cache_user_range is used when we want to ensure that the
269 * Harvard caches are synchronised for the user space address range.
270 * This is used for the ARM private sys_cacheflush system call.
272 #define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e)
275 * Perform necessary cache operations to ensure that data previously
276 * stored within this range of addresses can be executed by the CPU.
278 #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
281 * Perform necessary cache operations to ensure that the TLB will
282 * see data written in the specified area.
284 #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
287 * flush_dcache_page is used when the kernel has written to the page
288 * cache page at virtual address page->virtual.
290 * If this page isn't mapped (ie, page_mapping == NULL), or it might
291 * have userspace mappings, then we _must_ always clean + invalidate
292 * the dcache entries associated with the kernel mapping.
294 * Otherwise we can defer the operation, and clean the cache when we are
295 * about to change to user space. This is the same method as used on SPARC64.
296 * See update_mmu_cache for the user space part.
298 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
299 extern void flush_dcache_page(struct page
*);
301 static inline void flush_kernel_vmap_range(void *addr
, int size
)
303 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
304 __cpuc_flush_dcache_area(addr
, (size_t)size
);
306 static inline void invalidate_kernel_vmap_range(void *addr
, int size
)
308 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
309 __cpuc_flush_dcache_area(addr
, (size_t)size
);
312 #define ARCH_HAS_FLUSH_ANON_PAGE
313 static inline void flush_anon_page(struct vm_area_struct
*vma
,
314 struct page
*page
, unsigned long vmaddr
)
316 extern void __flush_anon_page(struct vm_area_struct
*vma
,
317 struct page
*, unsigned long);
319 __flush_anon_page(vma
, page
, vmaddr
);
322 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
323 extern void flush_kernel_dcache_page(struct page
*);
325 #define flush_dcache_mmap_lock(mapping) \
326 spin_lock_irq(&(mapping)->tree_lock)
327 #define flush_dcache_mmap_unlock(mapping) \
328 spin_unlock_irq(&(mapping)->tree_lock)
330 #define flush_icache_user_range(vma,page,addr,len) \
331 flush_dcache_page(page)
334 * We don't appear to need to do anything here. In fact, if we did, we'd
335 * duplicate cache flushing elsewhere performed by flush_dcache_page().
337 #define flush_icache_page(vma,page) do { } while (0)
340 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
341 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
342 * caches, since the direct-mappings of these pages may contain cached
343 * data, we need to do a full cache flush to ensure that writebacks
344 * don't corrupt data placed into these pages via the new mappings.
346 static inline void flush_cache_vmap(unsigned long start
, unsigned long end
)
348 if (!cache_is_vipt_nonaliasing())
352 * set_pte_at() called from vmap_pte_range() does not
353 * have a DSB after cleaning the cache line.
358 static inline void flush_cache_vunmap(unsigned long start
, unsigned long end
)
360 if (!cache_is_vipt_nonaliasing())
365 * Memory synchronization helpers for mixed cached vs non cached accesses.
367 * Some synchronization algorithms have to set states in memory with the
368 * cache enabled or disabled depending on the code path. It is crucial
369 * to always ensure proper cache maintenance to update main memory right
372 * Any cached write must be followed by a cache clean operation.
373 * Any cached read must be preceded by a cache invalidate operation.
374 * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
375 * operation is needed to avoid discarding possible concurrent writes to the
378 * Also, in order to prevent a cached writer from interfering with an
379 * adjacent non-cached writer, each state variable must be located to
380 * a separate cache line.
384 * This needs to be >= the max cache writeback size of all
385 * supported platforms included in the current kernel configuration.
386 * This is used to align state variables to their own cache lines.
388 #define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */
389 #define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
392 * There is no __cpuc_clean_dcache_area but we use it anyway for
393 * code intent clarity, and alias it to __cpuc_flush_dcache_area.
395 #define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
398 * Ensure preceding writes to *p by this CPU are visible to
399 * subsequent reads by other CPUs:
401 static inline void __sync_cache_range_w(volatile void *p
, size_t size
)
403 char *_p
= (char *)p
;
405 __cpuc_clean_dcache_area(_p
, size
);
406 outer_clean_range(__pa(_p
), __pa(_p
+ size
));
410 * Ensure preceding writes to *p by other CPUs are visible to
411 * subsequent reads by this CPU. We must be careful not to
412 * discard data simultaneously written by another CPU, hence the
413 * usage of flush rather than invalidate operations.
415 static inline void __sync_cache_range_r(volatile void *p
, size_t size
)
417 char *_p
= (char *)p
;
419 #ifdef CONFIG_OUTER_CACHE
420 if (outer_cache
.flush_range
) {
422 * Ensure dirty data migrated from other CPUs into our cache
423 * are cleaned out safely before the outer cache is cleaned:
425 __cpuc_clean_dcache_area(_p
, size
);
427 /* Clean and invalidate stale data for *p from outer ... */
428 outer_flush_range(__pa(_p
), __pa(_p
+ size
));
432 /* ... and inner cache: */
433 __cpuc_flush_dcache_area(_p
, size
);
436 #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
437 #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
440 * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
443 * - Clear the SCTLR.C bit to prevent further cache allocations
444 * - Flush the desired level of cache
445 * - Clear the ACTLR "SMP" bit to disable local coherency
447 * ... and so without any intervening memory access in between those steps,
448 * not even to the stack.
450 * WARNING -- After this has been called:
452 * - No ldrex/strex (and similar) instructions must be used.
453 * - The CPU is obviously no longer coherent with the other CPUs.
454 * - This is unlikely to work as expected if Linux is running non-secure.
458 * - This is known to apply to several ARMv7 processor implementations,
459 * however some exceptions may exist. Caveat emptor.
461 * - The clobber list is dictated by the call to v7_flush_dcache_*.
462 * fp is preserved to the stack explicitly prior disabling the cache
463 * since adding it to the clobber list is incompatible with having
464 * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
465 * trampoline are inserted by the linker and to keep sp 64-bit aligned.
467 #define v7_exit_coherency_flush(level) \
469 ".arch armv7-a \n\t" \
470 "stmfd sp!, {fp, ip} \n\t" \
471 "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
472 "bic r0, r0, #"__stringify(CR_C)" \n\t" \
473 "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
475 "bl v7_flush_dcache_"__stringify(level)" \n\t" \
476 "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
477 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
478 "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
481 "ldmfd sp!, {fp, ip}" \
482 : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
483 "r9","r10","lr","memory" )
485 int set_memory_ro(unsigned long addr
, int numpages
);
486 int set_memory_rw(unsigned long addr
, int numpages
);
487 int set_memory_x(unsigned long addr
, int numpages
);
488 int set_memory_nx(unsigned long addr
, int numpages
);
490 #ifdef CONFIG_DEBUG_RODATA
491 void mark_rodata_ro(void);
492 void set_kernel_text_rw(void);
493 void set_kernel_text_ro(void);
495 static inline void set_kernel_text_rw(void) { }
496 static inline void set_kernel_text_ro(void) { }
499 void flush_uprobe_xol_access(struct page
*page
, unsigned long uaddr
,
500 void *kaddr
, unsigned long len
);