2 * arch/sh/mm/cache-sh4.c
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2001 - 2007 Paul Mundt
6 * Copyright (C) 2003 Richard Curnow
7 * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
16 #include <linux/mutex.h>
17 #include <asm/mmu_context.h>
18 #include <asm/cacheflush.h>
21 * The maximum number of pages we support up to when doing ranged dcache
22 * flushing. Anything exceeding this will simply flush the dcache in its
25 #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
26 #define MAX_ICACHE_PAGES 32
28 static void __flush_dcache_segment_1way(unsigned long start
,
29 unsigned long extent
);
30 static void __flush_dcache_segment_2way(unsigned long start
,
31 unsigned long extent
);
32 static void __flush_dcache_segment_4way(unsigned long start
,
33 unsigned long extent
);
35 static void __flush_cache_4096(unsigned long addr
, unsigned long phys
,
36 unsigned long exec_offset
);
39 * This is initialised here to ensure that it is not placed in the BSS. If
40 * that were to happen, note that cache_init gets called before the BSS is
41 * cleared, so this would get nulled out which would be hopeless.
43 static void (*__flush_dcache_segment_fn
)(unsigned long, unsigned long) =
44 (void (*)(unsigned long, unsigned long))0xdeadbeef;
46 static void compute_alias(struct cache_info
*c
)
48 c
->alias_mask
= ((c
->sets
- 1) << c
->entry_shift
) & ~(PAGE_SIZE
- 1);
49 c
->n_aliases
= c
->alias_mask
? (c
->alias_mask
>> PAGE_SHIFT
) + 1 : 0;
52 static void __init
emit_cache_params(void)
54 printk("PVR=%08x CVR=%08x PRR=%08x\n",
58 printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
59 boot_cpu_data
.icache
.ways
,
60 boot_cpu_data
.icache
.sets
,
61 boot_cpu_data
.icache
.way_incr
);
62 printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
63 boot_cpu_data
.icache
.entry_mask
,
64 boot_cpu_data
.icache
.alias_mask
,
65 boot_cpu_data
.icache
.n_aliases
);
66 printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
67 boot_cpu_data
.dcache
.ways
,
68 boot_cpu_data
.dcache
.sets
,
69 boot_cpu_data
.dcache
.way_incr
);
70 printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
71 boot_cpu_data
.dcache
.entry_mask
,
72 boot_cpu_data
.dcache
.alias_mask
,
73 boot_cpu_data
.dcache
.n_aliases
);
76 * Emit Secondary Cache parameters if the CPU has a probed L2.
78 if (boot_cpu_data
.flags
& CPU_HAS_L2_CACHE
) {
79 printk("S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
80 boot_cpu_data
.scache
.ways
,
81 boot_cpu_data
.scache
.sets
,
82 boot_cpu_data
.scache
.way_incr
);
83 printk("S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
84 boot_cpu_data
.scache
.entry_mask
,
85 boot_cpu_data
.scache
.alias_mask
,
86 boot_cpu_data
.scache
.n_aliases
);
89 if (!__flush_dcache_segment_fn
)
90 panic("unknown number of cache ways\n");
94 * SH-4 has virtually indexed and physically tagged cache.
96 void __init
p3_cache_init(void)
98 compute_alias(&boot_cpu_data
.icache
);
99 compute_alias(&boot_cpu_data
.dcache
);
100 compute_alias(&boot_cpu_data
.scache
);
102 switch (boot_cpu_data
.dcache
.ways
) {
104 __flush_dcache_segment_fn
= __flush_dcache_segment_1way
;
107 __flush_dcache_segment_fn
= __flush_dcache_segment_2way
;
110 __flush_dcache_segment_fn
= __flush_dcache_segment_4way
;
113 __flush_dcache_segment_fn
= NULL
;
121 * Write back the dirty D-caches, but not invalidate them.
123 * START: Virtual Address (U0, P1, or P3)
124 * SIZE: Size of the region.
126 void __flush_wback_region(void *start
, int size
)
129 unsigned long begin
, end
;
131 begin
= (unsigned long)start
& ~(L1_CACHE_BYTES
-1);
132 end
= ((unsigned long)start
+ size
+ L1_CACHE_BYTES
-1)
133 & ~(L1_CACHE_BYTES
-1);
134 for (v
= begin
; v
< end
; v
+=L1_CACHE_BYTES
) {
135 asm volatile("ocbwb %0"
142 * Write back the dirty D-caches and invalidate them.
144 * START: Virtual Address (U0, P1, or P3)
145 * SIZE: Size of the region.
147 void __flush_purge_region(void *start
, int size
)
150 unsigned long begin
, end
;
152 begin
= (unsigned long)start
& ~(L1_CACHE_BYTES
-1);
153 end
= ((unsigned long)start
+ size
+ L1_CACHE_BYTES
-1)
154 & ~(L1_CACHE_BYTES
-1);
155 for (v
= begin
; v
< end
; v
+=L1_CACHE_BYTES
) {
156 asm volatile("ocbp %0"
163 * No write back please
165 void __flush_invalidate_region(void *start
, int size
)
168 unsigned long begin
, end
;
170 begin
= (unsigned long)start
& ~(L1_CACHE_BYTES
-1);
171 end
= ((unsigned long)start
+ size
+ L1_CACHE_BYTES
-1)
172 & ~(L1_CACHE_BYTES
-1);
173 for (v
= begin
; v
< end
; v
+=L1_CACHE_BYTES
) {
174 asm volatile("ocbi %0"
181 * Write back the range of D-cache, and purge the I-cache.
183 * Called from kernel/module.c:sys_init_module and routine for a.out format,
184 * signal handler code and kprobes code
186 void flush_icache_range(unsigned long start
, unsigned long end
)
189 unsigned long flags
, v
;
192 /* If there are too many pages then just blow the caches */
193 if (((end
- start
) >> PAGE_SHIFT
) >= MAX_ICACHE_PAGES
) {
196 /* selectively flush d-cache then invalidate the i-cache */
197 /* this is inefficient, so only use for small ranges */
198 start
&= ~(L1_CACHE_BYTES
-1);
199 end
+= L1_CACHE_BYTES
-1;
200 end
&= ~(L1_CACHE_BYTES
-1);
202 local_irq_save(flags
);
205 for (v
= start
; v
< end
; v
+=L1_CACHE_BYTES
) {
206 asm volatile("ocbwb %0"
210 icacheaddr
= CACHE_IC_ADDRESS_ARRAY
| (
211 v
& cpu_data
->icache
.entry_mask
);
213 for (i
= 0; i
< cpu_data
->icache
.ways
;
214 i
++, icacheaddr
+= cpu_data
->icache
.way_incr
)
215 /* Clear i-cache line valid-bit */
216 ctrl_outl(0, icacheaddr
);
220 local_irq_restore(flags
);
224 static inline void flush_cache_4096(unsigned long start
,
227 unsigned long flags
, exec_offset
= 0;
230 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
231 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
233 if ((boot_cpu_data
.flags
& CPU_HAS_P2_FLUSH_BUG
) ||
234 (start
< CACHE_OC_ADDRESS_ARRAY
))
235 exec_offset
= 0x20000000;
237 local_irq_save(flags
);
238 __flush_cache_4096(start
| SH_CACHE_ASSOC
,
239 P1SEGADDR(phys
), exec_offset
);
240 local_irq_restore(flags
);
244 * Write back & invalidate the D-cache of the page.
245 * (To avoid "alias" issues)
247 void flush_dcache_page(struct page
*page
)
249 if (test_bit(PG_mapped
, &page
->flags
)) {
250 unsigned long phys
= PHYSADDR(page_address(page
));
251 unsigned long addr
= CACHE_OC_ADDRESS_ARRAY
;
254 /* Loop all the D-cache */
255 n
= boot_cpu_data
.dcache
.n_aliases
;
256 for (i
= 0; i
< n
; i
++, addr
+= 4096)
257 flush_cache_4096(addr
, phys
);
263 /* TODO: Selective icache invalidation through IC address array.. */
264 static void __uses_jump_to_uncached
flush_icache_all(void)
266 unsigned long flags
, ccr
;
268 local_irq_save(flags
);
273 ccr
|= CCR_CACHE_ICI
;
277 * back_to_cached() will take care of the barrier for us, don't add
282 local_irq_restore(flags
);
285 void flush_dcache_all(void)
287 (*__flush_dcache_segment_fn
)(0UL, boot_cpu_data
.dcache
.way_size
);
291 void flush_cache_all(void)
297 static void __flush_cache_mm(struct mm_struct
*mm
, unsigned long start
,
300 unsigned long d
= 0, p
= start
& PAGE_MASK
;
301 unsigned long alias_mask
= boot_cpu_data
.dcache
.alias_mask
;
302 unsigned long n_aliases
= boot_cpu_data
.dcache
.n_aliases
;
303 unsigned long select_bit
;
304 unsigned long all_aliases_mask
;
305 unsigned long addr_offset
;
312 dir
= pgd_offset(mm
, p
);
313 pud
= pud_offset(dir
, p
);
314 pmd
= pmd_offset(pud
, p
);
315 end
= PAGE_ALIGN(end
);
317 all_aliases_mask
= (1 << n_aliases
) - 1;
320 if (pmd_none(*pmd
) || unlikely(pmd_bad(*pmd
))) {
328 pte
= pte_offset_kernel(pmd
, p
);
334 if (!(pte_val(entry
) & _PAGE_PRESENT
)) {
340 phys
= pte_val(entry
) & PTE_PHYS_MASK
;
342 if ((p
^ phys
) & alias_mask
) {
343 d
|= 1 << ((p
& alias_mask
) >> PAGE_SHIFT
);
344 d
|= 1 << ((phys
& alias_mask
) >> PAGE_SHIFT
);
346 if (d
== all_aliases_mask
)
352 } while (p
< end
&& ((unsigned long)pte
& ~PAGE_MASK
));
360 for (i
= 0; i
< n_aliases
; i
++) {
361 if (d
& select_bit
) {
362 (*__flush_dcache_segment_fn
)(addr_offset
, PAGE_SIZE
);
367 addr_offset
+= PAGE_SIZE
;
372 * Note : (RPC) since the caches are physically tagged, the only point
373 * of flush_cache_mm for SH-4 is to get rid of aliases from the
374 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
375 * lines can stay resident so long as the virtual address they were
376 * accessed with (hence cache set) is in accord with the physical
377 * address (i.e. tag). It's no different here. So I reckon we don't
378 * need to flush the I-cache, since aliases don't matter for that. We
381 * Caller takes mm->mmap_sem.
383 void flush_cache_mm(struct mm_struct
*mm
)
386 * If cache is only 4k-per-way, there are never any 'aliases'. Since
387 * the cache is physically tagged, the data can just be left in there.
389 if (boot_cpu_data
.dcache
.n_aliases
== 0)
393 * Don't bother groveling around the dcache for the VMA ranges
394 * if there are too many PTEs to make it worthwhile.
396 if (mm
->nr_ptes
>= MAX_DCACHE_PAGES
)
399 struct vm_area_struct
*vma
;
402 * In this case there are reasonably sized ranges to flush,
403 * iterate through the VMA list and take care of any aliases.
405 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
)
406 __flush_cache_mm(mm
, vma
->vm_start
, vma
->vm_end
);
409 /* Only touch the icache if one of the VMAs has VM_EXEC set. */
415 * Write back and invalidate I/D-caches for the page.
417 * ADDR: Virtual Address (U0 address)
418 * PFN: Physical page number
420 void flush_cache_page(struct vm_area_struct
*vma
, unsigned long address
,
423 unsigned long phys
= pfn
<< PAGE_SHIFT
;
424 unsigned int alias_mask
;
426 alias_mask
= boot_cpu_data
.dcache
.alias_mask
;
428 /* We only need to flush D-cache when we have alias */
429 if ((address
^phys
) & alias_mask
) {
430 /* Loop 4K of the D-cache */
432 CACHE_OC_ADDRESS_ARRAY
| (address
& alias_mask
),
434 /* Loop another 4K of the D-cache */
436 CACHE_OC_ADDRESS_ARRAY
| (phys
& alias_mask
),
440 alias_mask
= boot_cpu_data
.icache
.alias_mask
;
441 if (vma
->vm_flags
& VM_EXEC
) {
443 * Evict entries from the portion of the cache from which code
444 * may have been executed at this address (virtual). There's
445 * no need to evict from the portion corresponding to the
446 * physical address as for the D-cache, because we know the
447 * kernel has never executed the code through its identity
451 CACHE_IC_ADDRESS_ARRAY
| (address
& alias_mask
),
457 * Write back and invalidate D-caches.
459 * START, END: Virtual Address (U0 address)
461 * NOTE: We need to flush the _physical_ page entry.
462 * Flushing the cache lines for U0 only isn't enough.
463 * We need to flush for P1 too, which may contain aliases.
465 void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
,
469 * If cache is only 4k-per-way, there are never any 'aliases'. Since
470 * the cache is physically tagged, the data can just be left in there.
472 if (boot_cpu_data
.dcache
.n_aliases
== 0)
476 * Don't bother with the lookup and alias check if we have a
477 * wide range to cover, just blow away the dcache in its
478 * entirety instead. -- PFM.
480 if (((end
- start
) >> PAGE_SHIFT
) >= MAX_DCACHE_PAGES
)
483 __flush_cache_mm(vma
->vm_mm
, start
, end
);
485 if (vma
->vm_flags
& VM_EXEC
) {
487 * TODO: Is this required??? Need to look at how I-cache
488 * coherency is assured when new programs are loaded to see if
496 * flush_icache_user_range
497 * @vma: VMA of the process
500 * @len: length of the range (< page size)
502 void flush_icache_user_range(struct vm_area_struct
*vma
,
503 struct page
*page
, unsigned long addr
, int len
)
505 flush_cache_page(vma
, addr
, page_to_pfn(page
));
512 * @addr: address in memory mapped cache array
513 * @phys: P1 address to flush (has to match tags if addr has 'A' bit
514 * set i.e. associative write)
515 * @exec_offset: set to 0x20000000 if flush has to be executed from P2
518 * The offset into the cache array implied by 'addr' selects the
519 * 'colour' of the virtual address range that will be flushed. The
520 * operation (purge/write-back) is selected by the lower 2 bits of
523 static void __flush_cache_4096(unsigned long addr
, unsigned long phys
,
524 unsigned long exec_offset
)
527 unsigned long base_addr
= addr
;
528 struct cache_info
*dcache
;
529 unsigned long way_incr
;
530 unsigned long a
, ea
, p
;
531 unsigned long temp_pc
;
533 dcache
= &boot_cpu_data
.dcache
;
534 /* Write this way for better assembly. */
535 way_count
= dcache
->ways
;
536 way_incr
= dcache
->way_incr
;
539 * Apply exec_offset (i.e. branch to P2 if required.).
543 * If I write "=r" for the (temp_pc), it puts this in r6 hence
544 * trashing exec_offset before it's been added on - why? Hence
545 * "=&r" as a 'workaround'
547 asm volatile("mov.l 1f, %0\n\t"
553 "2:\n" : "=&r" (temp_pc
) : "r" (exec_offset
));
556 * We know there will be >=1 iteration, so write as do-while to avoid
557 * pointless nead-of-loop check for 0 iterations.
560 ea
= base_addr
+ PAGE_SIZE
;
565 *(volatile unsigned long *)a
= p
;
567 * Next line: intentionally not p+32, saves an add, p
568 * will do since only the cache tag bits need to
571 *(volatile unsigned long *)(a
+32) = p
;
576 base_addr
+= way_incr
;
577 } while (--way_count
!= 0);
581 * Break the 1, 2 and 4 way variants of this out into separate functions to
582 * avoid nearly all the overhead of having the conditional stuff in the function
583 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
585 static void __flush_dcache_segment_1way(unsigned long start
,
586 unsigned long extent_per_way
)
588 unsigned long orig_sr
, sr_with_bl
;
589 unsigned long base_addr
;
590 unsigned long way_incr
, linesz
, way_size
;
591 struct cache_info
*dcache
;
592 register unsigned long a0
, a0e
;
594 asm volatile("stc sr, %0" : "=r" (orig_sr
));
595 sr_with_bl
= orig_sr
| (1<<28);
596 base_addr
= ((unsigned long)&empty_zero_page
[0]);
599 * The previous code aligned base_addr to 16k, i.e. the way_size of all
600 * existing SH-4 D-caches. Whilst I don't see a need to have this
601 * aligned to any better than the cache line size (which it will be
602 * anyway by construction), let's align it to at least the way_size of
603 * any existing or conceivable SH-4 D-cache. -- RPC
605 base_addr
= ((base_addr
>> 16) << 16);
608 dcache
= &boot_cpu_data
.dcache
;
609 linesz
= dcache
->linesz
;
610 way_incr
= dcache
->way_incr
;
611 way_size
= dcache
->way_size
;
614 a0e
= base_addr
+ extent_per_way
;
616 asm volatile("ldc %0, sr" : : "r" (sr_with_bl
));
617 asm volatile("movca.l r0, @%0\n\t"
618 "ocbi @%0" : : "r" (a0
));
620 asm volatile("movca.l r0, @%0\n\t"
621 "ocbi @%0" : : "r" (a0
));
623 asm volatile("movca.l r0, @%0\n\t"
624 "ocbi @%0" : : "r" (a0
));
626 asm volatile("movca.l r0, @%0\n\t"
627 "ocbi @%0" : : "r" (a0
));
628 asm volatile("ldc %0, sr" : : "r" (orig_sr
));
633 static void __flush_dcache_segment_2way(unsigned long start
,
634 unsigned long extent_per_way
)
636 unsigned long orig_sr
, sr_with_bl
;
637 unsigned long base_addr
;
638 unsigned long way_incr
, linesz
, way_size
;
639 struct cache_info
*dcache
;
640 register unsigned long a0
, a1
, a0e
;
642 asm volatile("stc sr, %0" : "=r" (orig_sr
));
643 sr_with_bl
= orig_sr
| (1<<28);
644 base_addr
= ((unsigned long)&empty_zero_page
[0]);
646 /* See comment under 1-way above */
647 base_addr
= ((base_addr
>> 16) << 16);
650 dcache
= &boot_cpu_data
.dcache
;
651 linesz
= dcache
->linesz
;
652 way_incr
= dcache
->way_incr
;
653 way_size
= dcache
->way_size
;
657 a0e
= base_addr
+ extent_per_way
;
659 asm volatile("ldc %0, sr" : : "r" (sr_with_bl
));
660 asm volatile("movca.l r0, @%0\n\t"
661 "movca.l r0, @%1\n\t"
667 asm volatile("movca.l r0, @%0\n\t"
668 "movca.l r0, @%1\n\t"
674 asm volatile("movca.l r0, @%0\n\t"
675 "movca.l r0, @%1\n\t"
681 asm volatile("movca.l r0, @%0\n\t"
682 "movca.l r0, @%1\n\t"
686 asm volatile("ldc %0, sr" : : "r" (orig_sr
));
692 static void __flush_dcache_segment_4way(unsigned long start
,
693 unsigned long extent_per_way
)
695 unsigned long orig_sr
, sr_with_bl
;
696 unsigned long base_addr
;
697 unsigned long way_incr
, linesz
, way_size
;
698 struct cache_info
*dcache
;
699 register unsigned long a0
, a1
, a2
, a3
, a0e
;
701 asm volatile("stc sr, %0" : "=r" (orig_sr
));
702 sr_with_bl
= orig_sr
| (1<<28);
703 base_addr
= ((unsigned long)&empty_zero_page
[0]);
705 /* See comment under 1-way above */
706 base_addr
= ((base_addr
>> 16) << 16);
709 dcache
= &boot_cpu_data
.dcache
;
710 linesz
= dcache
->linesz
;
711 way_incr
= dcache
->way_incr
;
712 way_size
= dcache
->way_size
;
718 a0e
= base_addr
+ extent_per_way
;
720 asm volatile("ldc %0, sr" : : "r" (sr_with_bl
));
721 asm volatile("movca.l r0, @%0\n\t"
722 "movca.l r0, @%1\n\t"
723 "movca.l r0, @%2\n\t"
724 "movca.l r0, @%3\n\t"
729 "r" (a0
), "r" (a1
), "r" (a2
), "r" (a3
));
734 asm volatile("movca.l r0, @%0\n\t"
735 "movca.l r0, @%1\n\t"
736 "movca.l r0, @%2\n\t"
737 "movca.l r0, @%3\n\t"
742 "r" (a0
), "r" (a1
), "r" (a2
), "r" (a3
));
747 asm volatile("movca.l r0, @%0\n\t"
748 "movca.l r0, @%1\n\t"
749 "movca.l r0, @%2\n\t"
750 "movca.l r0, @%3\n\t"
755 "r" (a0
), "r" (a1
), "r" (a2
), "r" (a3
));
760 asm volatile("movca.l r0, @%0\n\t"
761 "movca.l r0, @%1\n\t"
762 "movca.l r0, @%2\n\t"
763 "movca.l r0, @%3\n\t"
768 "r" (a0
), "r" (a1
), "r" (a2
), "r" (a3
));
769 asm volatile("ldc %0, sr" : : "r" (orig_sr
));