2 * arch/sh/mm/cache-sh4.c
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2001 - 2009 Paul Mundt
6 * Copyright (C) 2003 Richard Curnow
7 * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
16 #include <linux/mutex.h>
18 #include <linux/highmem.h>
19 #include <asm/pgtable.h>
20 #include <asm/mmu_context.h>
21 #include <asm/cache_insns.h>
22 #include <asm/cacheflush.h>
25 * The maximum number of pages we support up to when doing ranged dcache
26 * flushing. Anything exceeding this will simply flush the dcache in its
29 #define MAX_ICACHE_PAGES 32
31 static void __flush_cache_one(unsigned long addr
, unsigned long phys
,
32 unsigned long exec_offset
);
35 * Write back the range of D-cache, and purge the I-cache.
37 * Called from kernel/module.c:sys_init_module and routine for a.out format,
38 * signal handler code and kprobes code
40 static void sh4_flush_icache_range(void *args
)
42 struct flusher_data
*data
= args
;
43 unsigned long start
, end
;
44 unsigned long flags
, v
;
50 /* If there are too many pages then just blow away the caches */
51 if (((end
- start
) >> PAGE_SHIFT
) >= MAX_ICACHE_PAGES
) {
52 local_flush_cache_all(NULL
);
57 * Selectively flush d-cache then invalidate the i-cache.
58 * This is inefficient, so only use this for small ranges.
60 start
&= ~(L1_CACHE_BYTES
-1);
61 end
+= L1_CACHE_BYTES
-1;
62 end
&= ~(L1_CACHE_BYTES
-1);
64 local_irq_save(flags
);
67 for (v
= start
; v
< end
; v
+= L1_CACHE_BYTES
) {
68 unsigned long icacheaddr
;
73 icacheaddr
= CACHE_IC_ADDRESS_ARRAY
| (v
&
74 cpu_data
->icache
.entry_mask
);
76 /* Clear i-cache line valid-bit */
77 n
= boot_cpu_data
.icache
.n_aliases
;
78 for (i
= 0; i
< cpu_data
->icache
.ways
; i
++) {
79 for (j
= 0; j
< n
; j
++)
80 __raw_writel(0, icacheaddr
+ (j
* PAGE_SIZE
));
81 icacheaddr
+= cpu_data
->icache
.way_incr
;
86 local_irq_restore(flags
);
89 static inline void flush_cache_one(unsigned long start
, unsigned long phys
)
91 unsigned long flags
, exec_offset
= 0;
94 * All types of SH-4 require PC to be uncached to operate on the I-cache.
95 * Some types of SH-4 require PC to be uncached to operate on the D-cache.
97 if ((boot_cpu_data
.flags
& CPU_HAS_P2_FLUSH_BUG
) ||
98 (start
< CACHE_OC_ADDRESS_ARRAY
))
99 exec_offset
= cached_to_uncached
;
101 local_irq_save(flags
);
102 __flush_cache_one(start
, phys
, exec_offset
);
103 local_irq_restore(flags
);
107 * Write back & invalidate the D-cache of the page.
108 * (To avoid "alias" issues)
110 static void sh4_flush_dcache_page(void *arg
)
112 struct page
*page
= arg
;
113 unsigned long addr
= (unsigned long)page_address(page
);
115 struct address_space
*mapping
= page_mapping(page
);
117 if (mapping
&& !mapping_mapped(mapping
))
118 clear_bit(PG_dcache_clean
, &page
->flags
);
121 flush_cache_one(CACHE_OC_ADDRESS_ARRAY
|
122 (addr
& shm_align_mask
), page_to_phys(page
));
127 /* TODO: Selective icache invalidation through IC address array.. */
128 static void flush_icache_all(void)
130 unsigned long flags
, ccr
;
132 local_irq_save(flags
);
136 ccr
= __raw_readl(CCR
);
137 ccr
|= CCR_CACHE_ICI
;
138 __raw_writel(ccr
, CCR
);
141 * back_to_cached() will take care of the barrier for us, don't add
146 local_irq_restore(flags
);
149 static void flush_dcache_all(void)
151 unsigned long addr
, end_addr
, entry_offset
;
153 end_addr
= CACHE_OC_ADDRESS_ARRAY
+
154 (current_cpu_data
.dcache
.sets
<<
155 current_cpu_data
.dcache
.entry_shift
) *
156 current_cpu_data
.dcache
.ways
;
158 entry_offset
= 1 << current_cpu_data
.dcache
.entry_shift
;
160 for (addr
= CACHE_OC_ADDRESS_ARRAY
; addr
< end_addr
; ) {
161 __raw_writel(0, addr
); addr
+= entry_offset
;
162 __raw_writel(0, addr
); addr
+= entry_offset
;
163 __raw_writel(0, addr
); addr
+= entry_offset
;
164 __raw_writel(0, addr
); addr
+= entry_offset
;
165 __raw_writel(0, addr
); addr
+= entry_offset
;
166 __raw_writel(0, addr
); addr
+= entry_offset
;
167 __raw_writel(0, addr
); addr
+= entry_offset
;
168 __raw_writel(0, addr
); addr
+= entry_offset
;
172 static void sh4_flush_cache_all(void *unused
)
179 * Note : (RPC) since the caches are physically tagged, the only point
180 * of flush_cache_mm for SH-4 is to get rid of aliases from the
181 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
182 * lines can stay resident so long as the virtual address they were
183 * accessed with (hence cache set) is in accord with the physical
184 * address (i.e. tag). It's no different here.
186 * Caller takes mm->mmap_sem.
188 static void sh4_flush_cache_mm(void *arg
)
190 struct mm_struct
*mm
= arg
;
192 if (cpu_context(smp_processor_id(), mm
) == NO_CONTEXT
)
199 * Write back and invalidate I/D-caches for the page.
201 * ADDR: Virtual Address (U0 address)
202 * PFN: Physical page number
204 static void sh4_flush_cache_page(void *args
)
206 struct flusher_data
*data
= args
;
207 struct vm_area_struct
*vma
;
209 unsigned long address
, pfn
, phys
;
210 int map_coherent
= 0;
218 address
= data
->addr1
& PAGE_MASK
;
220 phys
= pfn
<< PAGE_SHIFT
;
221 page
= pfn_to_page(pfn
);
223 if (cpu_context(smp_processor_id(), vma
->vm_mm
) == NO_CONTEXT
)
226 pgd
= pgd_offset(vma
->vm_mm
, address
);
227 pud
= pud_offset(pgd
, address
);
228 pmd
= pmd_offset(pud
, address
);
229 pte
= pte_offset_kernel(pmd
, address
);
231 /* If the page isn't present, there is nothing to do here. */
232 if (!(pte_val(*pte
) & _PAGE_PRESENT
))
235 if ((vma
->vm_mm
== current
->active_mm
))
239 * Use kmap_coherent or kmap_atomic to do flushes for
240 * another ASID than the current one.
242 map_coherent
= (current_cpu_data
.dcache
.n_aliases
&&
243 test_bit(PG_dcache_clean
, &page
->flags
) &&
246 vaddr
= kmap_coherent(page
, address
);
248 vaddr
= kmap_atomic(page
);
250 address
= (unsigned long)vaddr
;
253 flush_cache_one(CACHE_OC_ADDRESS_ARRAY
|
254 (address
& shm_align_mask
), phys
);
256 if (vma
->vm_flags
& VM_EXEC
)
261 kunmap_coherent(vaddr
);
263 kunmap_atomic(vaddr
);
268 * Write back and invalidate D-caches.
270 * START, END: Virtual Address (U0 address)
272 * NOTE: We need to flush the _physical_ page entry.
273 * Flushing the cache lines for U0 only isn't enough.
274 * We need to flush for P1 too, which may contain aliases.
276 static void sh4_flush_cache_range(void *args
)
278 struct flusher_data
*data
= args
;
279 struct vm_area_struct
*vma
;
280 unsigned long start
, end
;
286 if (cpu_context(smp_processor_id(), vma
->vm_mm
) == NO_CONTEXT
)
290 * If cache is only 4k-per-way, there are never any 'aliases'. Since
291 * the cache is physically tagged, the data can just be left in there.
293 if (boot_cpu_data
.dcache
.n_aliases
== 0)
298 if (vma
->vm_flags
& VM_EXEC
)
305 * @addr: address in memory mapped cache array
306 * @phys: P1 address to flush (has to match tags if addr has 'A' bit
307 * set i.e. associative write)
308 * @exec_offset: set to 0x20000000 if flush has to be executed from P2
311 * The offset into the cache array implied by 'addr' selects the
312 * 'colour' of the virtual address range that will be flushed. The
313 * operation (purge/write-back) is selected by the lower 2 bits of
316 static void __flush_cache_one(unsigned long addr
, unsigned long phys
,
317 unsigned long exec_offset
)
320 unsigned long base_addr
= addr
;
321 struct cache_info
*dcache
;
322 unsigned long way_incr
;
323 unsigned long a
, ea
, p
;
324 unsigned long temp_pc
;
326 dcache
= &boot_cpu_data
.dcache
;
327 /* Write this way for better assembly. */
328 way_count
= dcache
->ways
;
329 way_incr
= dcache
->way_incr
;
332 * Apply exec_offset (i.e. branch to P2 if required.).
336 * If I write "=r" for the (temp_pc), it puts this in r6 hence
337 * trashing exec_offset before it's been added on - why? Hence
338 * "=&r" as a 'workaround'
340 asm volatile("mov.l 1f, %0\n\t"
346 "2:\n" : "=&r" (temp_pc
) : "r" (exec_offset
));
349 * We know there will be >=1 iteration, so write as do-while to avoid
350 * pointless nead-of-loop check for 0 iterations.
353 ea
= base_addr
+ PAGE_SIZE
;
358 *(volatile unsigned long *)a
= p
;
360 * Next line: intentionally not p+32, saves an add, p
361 * will do since only the cache tag bits need to
364 *(volatile unsigned long *)(a
+32) = p
;
369 base_addr
+= way_incr
;
370 } while (--way_count
!= 0);
373 extern void __weak
sh4__flush_region_init(void);
376 * SH-4 has virtually indexed and physically tagged cache.
378 void __init
sh4_cache_init(void)
380 printk("PVR=%08x CVR=%08x PRR=%08x\n",
381 __raw_readl(CCN_PVR
),
382 __raw_readl(CCN_CVR
),
383 __raw_readl(CCN_PRR
));
385 local_flush_icache_range
= sh4_flush_icache_range
;
386 local_flush_dcache_page
= sh4_flush_dcache_page
;
387 local_flush_cache_all
= sh4_flush_cache_all
;
388 local_flush_cache_mm
= sh4_flush_cache_mm
;
389 local_flush_cache_dup_mm
= sh4_flush_cache_mm
;
390 local_flush_cache_page
= sh4_flush_cache_page
;
391 local_flush_cache_range
= sh4_flush_cache_range
;
393 sh4__flush_region_init();