2 * arch/sh/mm/cache-sh4.c
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2001 - 2009 Paul Mundt
6 * Copyright (C) 2003 Richard Curnow
7 * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
16 #include <linux/mutex.h>
18 #include <linux/highmem.h>
19 #include <linux/pagemap.h>
20 #include <asm/mmu_context.h>
21 #include <asm/cache_insns.h>
22 #include <asm/cacheflush.h>
25 * The maximum number of pages we support up to when doing ranged dcache
26 * flushing. Anything exceeding this will simply flush the dcache in its
29 #define MAX_ICACHE_PAGES 32
31 static void __flush_cache_one(unsigned long addr
, unsigned long phys
,
32 unsigned long exec_offset
);
35 * Write back the range of D-cache, and purge the I-cache.
37 * Called from kernel/module.c:sys_init_module and routine for a.out format,
38 * signal handler code and kprobes code
40 static void sh4_flush_icache_range(void *args
)
42 struct flusher_data
*data
= args
;
43 unsigned long start
, end
;
44 unsigned long flags
, v
;
50 /* If there are too many pages then just blow away the caches */
51 if (((end
- start
) >> PAGE_SHIFT
) >= MAX_ICACHE_PAGES
) {
52 local_flush_cache_all(NULL
);
57 * Selectively flush d-cache then invalidate the i-cache.
58 * This is inefficient, so only use this for small ranges.
60 start
&= ~(L1_CACHE_BYTES
-1);
61 end
+= L1_CACHE_BYTES
-1;
62 end
&= ~(L1_CACHE_BYTES
-1);
64 local_irq_save(flags
);
67 for (v
= start
; v
< end
; v
+= L1_CACHE_BYTES
) {
68 unsigned long icacheaddr
;
73 icacheaddr
= CACHE_IC_ADDRESS_ARRAY
| (v
&
74 cpu_data
->icache
.entry_mask
);
76 /* Clear i-cache line valid-bit */
77 n
= boot_cpu_data
.icache
.n_aliases
;
78 for (i
= 0; i
< cpu_data
->icache
.ways
; i
++) {
79 for (j
= 0; j
< n
; j
++)
80 __raw_writel(0, icacheaddr
+ (j
* PAGE_SIZE
));
81 icacheaddr
+= cpu_data
->icache
.way_incr
;
86 local_irq_restore(flags
);
89 static inline void flush_cache_one(unsigned long start
, unsigned long phys
)
91 unsigned long flags
, exec_offset
= 0;
94 * All types of SH-4 require PC to be uncached to operate on the I-cache.
95 * Some types of SH-4 require PC to be uncached to operate on the D-cache.
97 if ((boot_cpu_data
.flags
& CPU_HAS_P2_FLUSH_BUG
) ||
98 (start
< CACHE_OC_ADDRESS_ARRAY
))
99 exec_offset
= cached_to_uncached
;
101 local_irq_save(flags
);
102 __flush_cache_one(start
, phys
, exec_offset
);
103 local_irq_restore(flags
);
107 * Write back & invalidate the D-cache of the page.
108 * (To avoid "alias" issues)
110 static void sh4_flush_dcache_folio(void *arg
)
112 struct folio
*folio
= arg
;
114 struct address_space
*mapping
= folio_flush_mapping(folio
);
116 if (mapping
&& !mapping_mapped(mapping
))
117 clear_bit(PG_dcache_clean
, &folio
->flags
);
121 unsigned long pfn
= folio_pfn(folio
);
122 unsigned long addr
= (unsigned long)folio_address(folio
);
123 unsigned int i
, nr
= folio_nr_pages(folio
);
125 for (i
= 0; i
< nr
; i
++) {
126 flush_cache_one(CACHE_OC_ADDRESS_ARRAY
|
127 (addr
& shm_align_mask
),
137 /* TODO: Selective icache invalidation through IC address array.. */
138 static void flush_icache_all(void)
140 unsigned long flags
, ccr
;
142 local_irq_save(flags
);
146 ccr
= __raw_readl(SH_CCR
);
147 ccr
|= CCR_CACHE_ICI
;
148 __raw_writel(ccr
, SH_CCR
);
151 * back_to_cached() will take care of the barrier for us, don't add
156 local_irq_restore(flags
);
159 static void flush_dcache_all(void)
161 unsigned long addr
, end_addr
, entry_offset
;
163 end_addr
= CACHE_OC_ADDRESS_ARRAY
+
164 (current_cpu_data
.dcache
.sets
<<
165 current_cpu_data
.dcache
.entry_shift
) *
166 current_cpu_data
.dcache
.ways
;
168 entry_offset
= 1 << current_cpu_data
.dcache
.entry_shift
;
170 for (addr
= CACHE_OC_ADDRESS_ARRAY
; addr
< end_addr
; ) {
171 __raw_writel(0, addr
); addr
+= entry_offset
;
172 __raw_writel(0, addr
); addr
+= entry_offset
;
173 __raw_writel(0, addr
); addr
+= entry_offset
;
174 __raw_writel(0, addr
); addr
+= entry_offset
;
175 __raw_writel(0, addr
); addr
+= entry_offset
;
176 __raw_writel(0, addr
); addr
+= entry_offset
;
177 __raw_writel(0, addr
); addr
+= entry_offset
;
178 __raw_writel(0, addr
); addr
+= entry_offset
;
182 static void sh4_flush_cache_all(void *unused
)
189 * Note : (RPC) since the caches are physically tagged, the only point
190 * of flush_cache_mm for SH-4 is to get rid of aliases from the
191 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
192 * lines can stay resident so long as the virtual address they were
193 * accessed with (hence cache set) is in accord with the physical
194 * address (i.e. tag). It's no different here.
196 * Caller takes mm->mmap_lock.
198 static void sh4_flush_cache_mm(void *arg
)
200 struct mm_struct
*mm
= arg
;
202 if (cpu_context(smp_processor_id(), mm
) == NO_CONTEXT
)
209 * Write back and invalidate I/D-caches for the page.
211 * ADDR: Virtual Address (U0 address)
212 * PFN: Physical page number
214 static void sh4_flush_cache_page(void *args
)
216 struct flusher_data
*data
= args
;
217 struct vm_area_struct
*vma
;
219 unsigned long address
, pfn
, phys
;
220 int map_coherent
= 0;
226 address
= data
->addr1
& PAGE_MASK
;
228 phys
= pfn
<< PAGE_SHIFT
;
229 page
= pfn_to_page(pfn
);
231 if (cpu_context(smp_processor_id(), vma
->vm_mm
) == NO_CONTEXT
)
234 pmd
= pmd_off(vma
->vm_mm
, address
);
235 pte
= pte_offset_kernel(pmd
, address
);
237 /* If the page isn't present, there is nothing to do here. */
238 if (!(pte_val(*pte
) & _PAGE_PRESENT
))
241 if ((vma
->vm_mm
== current
->active_mm
))
244 struct folio
*folio
= page_folio(page
);
246 * Use kmap_coherent or kmap_atomic to do flushes for
247 * another ASID than the current one.
249 map_coherent
= (current_cpu_data
.dcache
.n_aliases
&&
250 test_bit(PG_dcache_clean
, folio_flags(folio
, 0)) &&
253 vaddr
= kmap_coherent(page
, address
);
255 vaddr
= kmap_atomic(page
);
257 address
= (unsigned long)vaddr
;
260 flush_cache_one(CACHE_OC_ADDRESS_ARRAY
|
261 (address
& shm_align_mask
), phys
);
263 if (vma
->vm_flags
& VM_EXEC
)
268 kunmap_coherent(vaddr
);
270 kunmap_atomic(vaddr
);
275 * Write back and invalidate D-caches.
277 * START, END: Virtual Address (U0 address)
279 * NOTE: We need to flush the _physical_ page entry.
280 * Flushing the cache lines for U0 only isn't enough.
281 * We need to flush for P1 too, which may contain aliases.
283 static void sh4_flush_cache_range(void *args
)
285 struct flusher_data
*data
= args
;
286 struct vm_area_struct
*vma
;
287 unsigned long start
, end
;
293 if (cpu_context(smp_processor_id(), vma
->vm_mm
) == NO_CONTEXT
)
297 * If cache is only 4k-per-way, there are never any 'aliases'. Since
298 * the cache is physically tagged, the data can just be left in there.
300 if (boot_cpu_data
.dcache
.n_aliases
== 0)
305 if (vma
->vm_flags
& VM_EXEC
)
312 * @addr: address in memory mapped cache array
313 * @phys: P1 address to flush (has to match tags if addr has 'A' bit
314 * set i.e. associative write)
315 * @exec_offset: set to 0x20000000 if flush has to be executed from P2
318 * The offset into the cache array implied by 'addr' selects the
319 * 'colour' of the virtual address range that will be flushed. The
320 * operation (purge/write-back) is selected by the lower 2 bits of
323 static void __flush_cache_one(unsigned long addr
, unsigned long phys
,
324 unsigned long exec_offset
)
327 unsigned long base_addr
= addr
;
328 struct cache_info
*dcache
;
329 unsigned long way_incr
;
330 unsigned long a
, ea
, p
;
331 unsigned long temp_pc
;
333 dcache
= &boot_cpu_data
.dcache
;
334 /* Write this way for better assembly. */
335 way_count
= dcache
->ways
;
336 way_incr
= dcache
->way_incr
;
339 * Apply exec_offset (i.e. branch to P2 if required.).
343 * If I write "=r" for the (temp_pc), it puts this in r6 hence
344 * trashing exec_offset before it's been added on - why? Hence
345 * "=&r" as a 'workaround'
347 asm volatile("mov.l 1f, %0\n\t"
353 "2:\n" : "=&r" (temp_pc
) : "r" (exec_offset
));
356 * We know there will be >=1 iteration, so write as do-while to avoid
357 * pointless nead-of-loop check for 0 iterations.
360 ea
= base_addr
+ PAGE_SIZE
;
365 *(volatile unsigned long *)a
= p
;
367 * Next line: intentionally not p+32, saves an add, p
368 * will do since only the cache tag bits need to
371 *(volatile unsigned long *)(a
+32) = p
;
376 base_addr
+= way_incr
;
377 } while (--way_count
!= 0);
381 * SH-4 has virtually indexed and physically tagged cache.
383 void __init
sh4_cache_init(void)
385 printk("PVR=%08x CVR=%08x PRR=%08x\n",
386 __raw_readl(CCN_PVR
),
387 __raw_readl(CCN_CVR
),
388 __raw_readl(CCN_PRR
));
390 local_flush_icache_range
= sh4_flush_icache_range
;
391 local_flush_dcache_folio
= sh4_flush_dcache_folio
;
392 local_flush_cache_all
= sh4_flush_cache_all
;
393 local_flush_cache_mm
= sh4_flush_cache_mm
;
394 local_flush_cache_dup_mm
= sh4_flush_cache_mm
;
395 local_flush_cache_page
= sh4_flush_cache_page
;
396 local_flush_cache_range
= sh4_flush_cache_range
;
398 sh4__flush_region_init();