1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * linux/arch/arm/mm/cache-v4wt.S
5 * Copyright (C) 1997-2002 Russell king
7 * ARMv4 write through cache operations support.
9 * We assume that the write buffer is not enabled.
11 #include <linux/linkage.h>
12 #include <linux/init.h>
13 #include <linux/cfi_types.h>
14 #include <asm/assembler.h>
16 #include "proc-macros.S"
19 * The size of one data cache line.
21 #define CACHE_DLINESIZE 32
24 * The number of data cache segments.
26 #define CACHE_DSEGMENTS 8
29 * The number of lines in a cache segment.
31 #define CACHE_DENTRIES 64
34 * This is the size at which it becomes more efficient to
35 * clean the whole cache, rather than using the individual
36 * cache line maintenance instructions.
38 * *** This needs benchmarking
40 #define CACHE_DLIMIT 16384
45 * Unconditionally clean and invalidate the entire icache.
47 SYM_TYPED_FUNC_START(v4wt_flush_icache_all)
49 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
51 SYM_FUNC_END(v4wt_flush_icache_all)
54 * flush_user_cache_all()
56 * Invalidate all cache entries in a particular address
59 SYM_FUNC_ALIAS(v4wt_flush_user_cache_all, v4wt_flush_kern_cache_all)
62 * flush_kern_cache_all()
64 * Clean and invalidate the entire cache.
66 SYM_TYPED_FUNC_START(v4wt_flush_kern_cache_all)
71 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
72 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
74 SYM_FUNC_END(v4wt_flush_kern_cache_all)
77 * flush_user_cache_range(start, end, flags)
79 * Clean and invalidate a range of cache entries in the specified
82 * - start - start address (inclusive, page aligned)
83 * - end - end address (exclusive, page aligned)
84 * - flags - vma_area_struct flags describing address space
86 SYM_TYPED_FUNC_START(v4wt_flush_user_cache_range)
87 sub r3, r1, r0 @ calculate total size
89 bhs __flush_whole_cache
91 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
93 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
94 add r0, r0, #CACHE_DLINESIZE
98 SYM_FUNC_END(v4wt_flush_user_cache_range)
101 * coherent_kern_range(start, end)
103 * Ensure coherency between the Icache and the Dcache in the
104 * region described by start. If you have non-snooping
105 * Harvard caches, you need to implement this function.
107 * - start - virtual start address
108 * - end - virtual end address
110 SYM_TYPED_FUNC_START(v4wt_coherent_kern_range)
111 #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
112 b v4wt_coherent_user_range
114 SYM_FUNC_END(v4wt_coherent_kern_range)
117 * coherent_user_range(start, end)
119 * Ensure coherency between the Icache and the Dcache in the
120 * region described by start. If you have non-snooping
121 * Harvard caches, you need to implement this function.
123 * - start - virtual start address
124 * - end - virtual end address
126 SYM_TYPED_FUNC_START(v4wt_coherent_user_range)
127 bic r0, r0, #CACHE_DLINESIZE - 1
128 1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
129 add r0, r0, #CACHE_DLINESIZE
134 SYM_FUNC_END(v4wt_coherent_user_range)
137 * flush_kern_dcache_area(void *addr, size_t size)
139 * Ensure no D cache aliasing occurs, either with itself or
142 * - addr - kernel address
143 * - size - region size
145 SYM_TYPED_FUNC_START(v4wt_flush_kern_dcache_area)
147 mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
150 SYM_FUNC_END(v4wt_flush_kern_dcache_area)
153 * dma_inv_range(start, end)
155 * Invalidate (discard) the specified virtual address range.
156 * May not write back any entries. If 'start' or 'end'
157 * are not cache line aligned, those lines must be written
160 * - start - virtual start address
161 * - end - virtual end address
164 bic r0, r0, #CACHE_DLINESIZE - 1
165 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
166 add r0, r0, #CACHE_DLINESIZE
172 * dma_flush_range(start, end)
174 * Clean and invalidate the specified virtual address range.
176 * - start - virtual start address
177 * - end - virtual end address
179 SYM_TYPED_FUNC_START(v4wt_dma_flush_range)
181 SYM_FUNC_END(v4wt_dma_flush_range)
184 * dma_unmap_area(start, size, dir)
185 * - start - kernel virtual start address
186 * - size - size of region
187 * - dir - DMA direction
189 SYM_TYPED_FUNC_START(v4wt_dma_unmap_area)
191 teq r2, #DMA_TO_DEVICE
192 bne v4wt_dma_inv_range
194 SYM_FUNC_END(v4wt_dma_unmap_area)
197 * dma_map_area(start, size, dir)
198 * - start - kernel virtual start address
199 * - size - size of region
200 * - dir - DMA direction
202 SYM_TYPED_FUNC_START(v4wt_dma_map_area)
204 SYM_FUNC_END(v4wt_dma_map_area)