1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * linux/arch/arm/mm/arm940.S: utility functions for ARM940T
5 * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
7 #include <linux/linkage.h>
8 #include <linux/init.h>
9 #include <linux/cfi_types.h>
10 #include <linux/pgtable.h>
11 #include <asm/assembler.h>
12 #include <asm/hwcap.h>
13 #include <asm/pgtable-hwdef.h>
14 #include <asm/ptrace.h>
15 #include "proc-macros.S"
17 /* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
18 #define CACHE_DLINESIZE 16
19 #define CACHE_DSEGMENTS 4
20 #define CACHE_DENTRIES 64
24 * cpu_arm940_proc_init()
25 * cpu_arm940_switch_mm()
27 * These are not required.
29 SYM_TYPED_FUNC_START(cpu_arm940_proc_init)
31 SYM_FUNC_END(cpu_arm940_proc_init)
33 SYM_TYPED_FUNC_START(cpu_arm940_switch_mm)
35 SYM_FUNC_END(cpu_arm940_switch_mm)
38 * cpu_arm940_proc_fin()
40 SYM_TYPED_FUNC_START(cpu_arm940_proc_fin)
41 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
42 bic r0, r0, #0x00001000 @ i-cache
43 bic r0, r0, #0x00000004 @ d-cache
44 mcr p15, 0, r0, c1, c0, 0 @ disable caches
46 SYM_FUNC_END(cpu_arm940_proc_fin)
49 * cpu_arm940_reset(loc)
50 * Params : r0 = address to jump to
51 * Notes : This sets up everything for a reset
53 .pushsection .idmap.text, "ax"
54 SYM_TYPED_FUNC_START(cpu_arm940_reset)
56 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
57 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
58 mcr p15, 0, ip, c7, c10, 4 @ drain WB
59 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
60 bic ip, ip, #0x00000005 @ .............c.p
61 bic ip, ip, #0x00001000 @ i-cache
62 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
64 SYM_FUNC_END(cpu_arm940_reset)
68 * cpu_arm940_do_idle()
71 SYM_TYPED_FUNC_START(cpu_arm940_do_idle)
72 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
74 SYM_FUNC_END(cpu_arm940_do_idle)
79 * Unconditionally clean and invalidate the entire icache.
81 SYM_TYPED_FUNC_START(arm940_flush_icache_all)
83 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
85 SYM_FUNC_END(arm940_flush_icache_all)
88 * flush_user_cache_all()
90 SYM_FUNC_ALIAS(arm940_flush_user_cache_all, arm940_flush_kern_cache_all)
93 * flush_kern_cache_all()
95 * Clean and invalidate the entire cache.
97 SYM_TYPED_FUNC_START(arm940_flush_kern_cache_all)
99 b arm940_flush_user_cache_range
100 SYM_FUNC_END(arm940_flush_kern_cache_all)
103 * flush_user_cache_range(start, end, flags)
105 * There is no efficient way to flush a range of cache entries
106 * in the specified address range. Thus, flushes all.
108 * - start - start address (inclusive)
109 * - end - end address (exclusive)
110 * - flags - vm_flags describing address space
112 SYM_TYPED_FUNC_START(arm940_flush_user_cache_range)
114 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
115 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
117 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
118 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
119 2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
120 subs r3, r3, #1 << 26
121 bcs 2b @ entries 63 to 0
123 bcs 1b @ segments 3 to 0
126 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
127 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
129 SYM_FUNC_END(arm940_flush_user_cache_range)
132 * coherent_kern_range(start, end)
134 * Ensure coherency between the Icache and the Dcache in the
135 * region described by start, end. If you have non-snooping
136 * Harvard caches, you need to implement this function.
138 * - start - virtual start address
139 * - end - virtual end address
141 SYM_TYPED_FUNC_START(arm940_coherent_kern_range)
142 b arm940_flush_kern_dcache_area
143 SYM_FUNC_END(arm940_coherent_kern_range)
146 * coherent_user_range(start, end)
148 * Ensure coherency between the Icache and the Dcache in the
149 * region described by start, end. If you have non-snooping
150 * Harvard caches, you need to implement this function.
152 * - start - virtual start address
153 * - end - virtual end address
155 SYM_TYPED_FUNC_START(arm940_coherent_user_range)
156 #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
157 b arm940_flush_kern_dcache_area
159 SYM_FUNC_END(arm940_coherent_user_range)
162 * flush_kern_dcache_area(void *addr, size_t size)
164 * Ensure no D cache aliasing occurs, either with itself or
167 * - addr - kernel address
168 * - size - region size
170 SYM_TYPED_FUNC_START(arm940_flush_kern_dcache_area)
172 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
173 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
174 2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
175 subs r3, r3, #1 << 26
176 bcs 2b @ entries 63 to 0
178 bcs 1b @ segments 7 to 0
179 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
180 mcr p15, 0, r0, c7, c10, 4 @ drain WB
182 SYM_FUNC_END(arm940_flush_kern_dcache_area)
185 * dma_inv_range(start, end)
187 * There is no efficient way to invalidate a specifid virtual
188 * address range. Thus, invalidates all.
190 * - start - virtual start address
191 * - end - virtual end address
193 arm940_dma_inv_range:
195 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
196 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
197 2: mcr p15, 0, r3, c7, c6, 2 @ flush D entry
198 subs r3, r3, #1 << 26
199 bcs 2b @ entries 63 to 0
201 bcs 1b @ segments 7 to 0
202 mcr p15, 0, ip, c7, c10, 4 @ drain WB
206 * dma_clean_range(start, end)
208 * There is no efficient way to clean a specifid virtual
209 * address range. Thus, cleans all.
211 * - start - virtual start address
212 * - end - virtual end address
214 arm940_dma_clean_range:
215 SYM_TYPED_FUNC_START(cpu_arm940_dcache_clean_area)
217 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
218 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
219 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
220 2: mcr p15, 0, r3, c7, c10, 2 @ clean D entry
221 subs r3, r3, #1 << 26
222 bcs 2b @ entries 63 to 0
224 bcs 1b @ segments 7 to 0
226 mcr p15, 0, ip, c7, c10, 4 @ drain WB
228 SYM_FUNC_END(cpu_arm940_dcache_clean_area)
231 * dma_flush_range(start, end)
233 * There is no efficient way to clean and invalidate a specifid
234 * virtual address range.
236 * - start - virtual start address
237 * - end - virtual end address
239 SYM_TYPED_FUNC_START(arm940_dma_flush_range)
241 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
242 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
244 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
245 mcr p15, 0, r3, c7, c14, 2 @ clean/flush D entry
247 mcr p15, 0, r3, c7, c6, 2 @ invalidate D entry
249 subs r3, r3, #1 << 26
250 bcs 2b @ entries 63 to 0
252 bcs 1b @ segments 7 to 0
253 mcr p15, 0, ip, c7, c10, 4 @ drain WB
255 SYM_FUNC_END(arm940_dma_flush_range)
258 * dma_map_area(start, size, dir)
259 * - start - kernel virtual start address
260 * - size - size of region
261 * - dir - DMA direction
263 SYM_TYPED_FUNC_START(arm940_dma_map_area)
265 cmp r2, #DMA_TO_DEVICE
266 beq arm940_dma_clean_range
267 bcs arm940_dma_inv_range
268 b arm940_dma_flush_range
269 SYM_FUNC_END(arm940_dma_map_area)
272 * dma_unmap_area(start, size, dir)
273 * - start - kernel virtual start address
274 * - size - size of region
275 * - dir - DMA direction
277 SYM_TYPED_FUNC_START(arm940_dma_unmap_area)
279 SYM_FUNC_END(arm940_dma_unmap_area)
281 .type __arm940_setup, #function
284 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
285 mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache
286 mcr p15, 0, r0, c7, c10, 4 @ drain WB
288 mcr p15, 0, r0, c6, c3, 0 @ disable data area 3~7
289 mcr p15, 0, r0, c6, c4, 0
290 mcr p15, 0, r0, c6, c5, 0
291 mcr p15, 0, r0, c6, c6, 0
292 mcr p15, 0, r0, c6, c7, 0
294 mcr p15, 0, r0, c6, c3, 1 @ disable instruction area 3~7
295 mcr p15, 0, r0, c6, c4, 1
296 mcr p15, 0, r0, c6, c5, 1
297 mcr p15, 0, r0, c6, c6, 1
298 mcr p15, 0, r0, c6, c7, 1
300 mov r0, #0x0000003F @ base = 0, size = 4GB
301 mcr p15, 0, r0, c6, c0, 0 @ set area 0, default
302 mcr p15, 0, r0, c6, c0, 1
304 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
305 ldr r7, =CONFIG_DRAM_SIZE >> 12 @ size of RAM (must be >= 4KB)
306 pr_val r3, r0, r7, #1
307 mcr p15, 0, r3, c6, c1, 0 @ set area 1, RAM
308 mcr p15, 0, r3, c6, c1, 1
310 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
311 ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB)
312 pr_val r3, r0, r6, #1
313 mcr p15, 0, r3, c6, c2, 0 @ set area 2, ROM/FLASH
314 mcr p15, 0, r3, c6, c2, 1
317 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable
318 mcr p15, 0, r0, c2, c0, 1
319 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
320 mov r0, #0x00 @ disable whole write buffer
322 mov r0, #0x02 @ Region 1 write bufferred
324 mcr p15, 0, r0, c3, c0, 0
327 sub r0, r0, #1 @ r0 = 0xffff
328 mcr p15, 0, r0, c5, c0, 0 @ all read/write access
329 mcr p15, 0, r0, c5, c0, 1
331 mrc p15, 0, r0, c1, c0 @ get control register
332 orr r0, r0, #0x00001000 @ I-cache
333 orr r0, r0, #0x00000005 @ MPU/D-cache
337 .size __arm940_setup, . - __arm940_setup
341 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
342 define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
346 string cpu_arch_name, "armv4t"
347 string cpu_elf_name, "v4"
348 string cpu_arm940_name, "ARM940T"
352 .section ".proc.info.init", "a"
354 .type __arm940_proc_info,#object
359 initfn __arm940_setup, __arm940_proc_info
362 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
363 .long cpu_arm940_name
364 .long arm940_processor_functions
367 .long arm940_cache_fns
368 .size __arm940_proc_info, . - __arm940_proc_info