2 * arch/sh/mm/cache-sh4.c
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2001 - 2007 Paul Mundt
6 * Copyright (C) 2003 Richard Curnow
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/init.h>
15 #include <linux/mutex.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
20 * The maximum number of pages we support up to when doing ranged dcache
21 * flushing. Anything exceeding this will simply flush the dcache in its
24 #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
26 static void __flush_dcache_segment_1way(unsigned long start
,
27 unsigned long extent
);
28 static void __flush_dcache_segment_2way(unsigned long start
,
29 unsigned long extent
);
30 static void __flush_dcache_segment_4way(unsigned long start
,
31 unsigned long extent
);
33 static void __flush_cache_4096(unsigned long addr
, unsigned long phys
,
34 unsigned long exec_offset
);
37 * This is initialised here to ensure that it is not placed in the BSS. If
38 * that were to happen, note that cache_init gets called before the BSS is
39 * cleared, so this would get nulled out which would be hopeless.
41 static void (*__flush_dcache_segment_fn
)(unsigned long, unsigned long) =
42 (void (*)(unsigned long, unsigned long))0xdeadbeef;
44 static void compute_alias(struct cache_info
*c
)
46 c
->alias_mask
= ((c
->sets
- 1) << c
->entry_shift
) & ~(PAGE_SIZE
- 1);
47 c
->n_aliases
= c
->alias_mask
? (c
->alias_mask
>> PAGE_SHIFT
) + 1 : 0;
50 static void __init
emit_cache_params(void)
52 printk("PVR=%08x CVR=%08x PRR=%08x\n",
56 printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
57 boot_cpu_data
.icache
.ways
,
58 boot_cpu_data
.icache
.sets
,
59 boot_cpu_data
.icache
.way_incr
);
60 printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
61 boot_cpu_data
.icache
.entry_mask
,
62 boot_cpu_data
.icache
.alias_mask
,
63 boot_cpu_data
.icache
.n_aliases
);
64 printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
65 boot_cpu_data
.dcache
.ways
,
66 boot_cpu_data
.dcache
.sets
,
67 boot_cpu_data
.dcache
.way_incr
);
68 printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
69 boot_cpu_data
.dcache
.entry_mask
,
70 boot_cpu_data
.dcache
.alias_mask
,
71 boot_cpu_data
.dcache
.n_aliases
);
74 * Emit Secondary Cache parameters if the CPU has a probed L2.
76 if (boot_cpu_data
.flags
& CPU_HAS_L2_CACHE
) {
77 printk("S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
78 boot_cpu_data
.scache
.ways
,
79 boot_cpu_data
.scache
.sets
,
80 boot_cpu_data
.scache
.way_incr
);
81 printk("S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
82 boot_cpu_data
.scache
.entry_mask
,
83 boot_cpu_data
.scache
.alias_mask
,
84 boot_cpu_data
.scache
.n_aliases
);
87 if (!__flush_dcache_segment_fn
)
88 panic("unknown number of cache ways\n");
92 * SH-4 has virtually indexed and physically tagged cache.
94 void __init
p3_cache_init(void)
96 compute_alias(&boot_cpu_data
.icache
);
97 compute_alias(&boot_cpu_data
.dcache
);
98 compute_alias(&boot_cpu_data
.scache
);
100 switch (boot_cpu_data
.dcache
.ways
) {
102 __flush_dcache_segment_fn
= __flush_dcache_segment_1way
;
105 __flush_dcache_segment_fn
= __flush_dcache_segment_2way
;
108 __flush_dcache_segment_fn
= __flush_dcache_segment_4way
;
111 __flush_dcache_segment_fn
= NULL
;
119 * Write back the dirty D-caches, but not invalidate them.
121 * START: Virtual Address (U0, P1, or P3)
122 * SIZE: Size of the region.
124 void __flush_wback_region(void *start
, int size
)
127 unsigned long begin
, end
;
129 begin
= (unsigned long)start
& ~(L1_CACHE_BYTES
-1);
130 end
= ((unsigned long)start
+ size
+ L1_CACHE_BYTES
-1)
131 & ~(L1_CACHE_BYTES
-1);
132 for (v
= begin
; v
< end
; v
+=L1_CACHE_BYTES
) {
133 asm volatile("ocbwb %0"
140 * Write back the dirty D-caches and invalidate them.
142 * START: Virtual Address (U0, P1, or P3)
143 * SIZE: Size of the region.
145 void __flush_purge_region(void *start
, int size
)
148 unsigned long begin
, end
;
150 begin
= (unsigned long)start
& ~(L1_CACHE_BYTES
-1);
151 end
= ((unsigned long)start
+ size
+ L1_CACHE_BYTES
-1)
152 & ~(L1_CACHE_BYTES
-1);
153 for (v
= begin
; v
< end
; v
+=L1_CACHE_BYTES
) {
154 asm volatile("ocbp %0"
161 * No write back please
163 void __flush_invalidate_region(void *start
, int size
)
166 unsigned long begin
, end
;
168 begin
= (unsigned long)start
& ~(L1_CACHE_BYTES
-1);
169 end
= ((unsigned long)start
+ size
+ L1_CACHE_BYTES
-1)
170 & ~(L1_CACHE_BYTES
-1);
171 for (v
= begin
; v
< end
; v
+=L1_CACHE_BYTES
) {
172 asm volatile("ocbi %0"
179 * Write back the range of D-cache, and purge the I-cache.
181 * Called from kernel/module.c:sys_init_module and routine for a.out format.
183 void flush_icache_range(unsigned long start
, unsigned long end
)
189 * Write back the D-cache and purge the I-cache for signal trampoline.
190 * .. which happens to be the same behavior as flush_icache_range().
191 * So, we simply flush out a line.
193 void flush_cache_sigtramp(unsigned long addr
)
195 unsigned long v
, index
;
199 v
= addr
& ~(L1_CACHE_BYTES
-1);
200 asm volatile("ocbwb %0"
204 index
= CACHE_IC_ADDRESS_ARRAY
|
205 (v
& boot_cpu_data
.icache
.entry_mask
);
207 local_irq_save(flags
);
210 for (i
= 0; i
< boot_cpu_data
.icache
.ways
;
211 i
++, index
+= boot_cpu_data
.icache
.way_incr
)
212 ctrl_outl(0, index
); /* Clear out Valid-bit */
216 local_irq_restore(flags
);
219 static inline void flush_cache_4096(unsigned long start
,
222 unsigned long flags
, exec_offset
= 0;
225 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
226 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
228 if ((boot_cpu_data
.flags
& CPU_HAS_P2_FLUSH_BUG
) ||
229 (start
< CACHE_OC_ADDRESS_ARRAY
))
230 exec_offset
= 0x20000000;
232 local_irq_save(flags
);
233 __flush_cache_4096(start
| SH_CACHE_ASSOC
,
234 P1SEGADDR(phys
), exec_offset
);
235 local_irq_restore(flags
);
239 * Write back & invalidate the D-cache of the page.
240 * (To avoid "alias" issues)
242 void flush_dcache_page(struct page
*page
)
244 if (test_bit(PG_mapped
, &page
->flags
)) {
245 unsigned long phys
= PHYSADDR(page_address(page
));
246 unsigned long addr
= CACHE_OC_ADDRESS_ARRAY
;
249 /* Loop all the D-cache */
250 n
= boot_cpu_data
.dcache
.n_aliases
;
251 for (i
= 0; i
< n
; i
++, addr
+= 4096)
252 flush_cache_4096(addr
, phys
);
258 /* TODO: Selective icache invalidation through IC address array.. */
259 static inline void flush_icache_all(void)
261 unsigned long flags
, ccr
;
263 local_irq_save(flags
);
268 ccr
|= CCR_CACHE_ICI
;
272 * back_to_P1() will take care of the barrier for us, don't add
277 local_irq_restore(flags
);
280 void flush_dcache_all(void)
282 (*__flush_dcache_segment_fn
)(0UL, boot_cpu_data
.dcache
.way_size
);
286 void flush_cache_all(void)
292 static void __flush_cache_mm(struct mm_struct
*mm
, unsigned long start
,
295 unsigned long d
= 0, p
= start
& PAGE_MASK
;
296 unsigned long alias_mask
= boot_cpu_data
.dcache
.alias_mask
;
297 unsigned long n_aliases
= boot_cpu_data
.dcache
.n_aliases
;
298 unsigned long select_bit
;
299 unsigned long all_aliases_mask
;
300 unsigned long addr_offset
;
307 dir
= pgd_offset(mm
, p
);
308 pud
= pud_offset(dir
, p
);
309 pmd
= pmd_offset(pud
, p
);
310 end
= PAGE_ALIGN(end
);
312 all_aliases_mask
= (1 << n_aliases
) - 1;
315 if (pmd_none(*pmd
) || unlikely(pmd_bad(*pmd
))) {
323 pte
= pte_offset_kernel(pmd
, p
);
329 if (!(pte_val(entry
) & _PAGE_PRESENT
)) {
335 phys
= pte_val(entry
) & PTE_PHYS_MASK
;
337 if ((p
^ phys
) & alias_mask
) {
338 d
|= 1 << ((p
& alias_mask
) >> PAGE_SHIFT
);
339 d
|= 1 << ((phys
& alias_mask
) >> PAGE_SHIFT
);
341 if (d
== all_aliases_mask
)
347 } while (p
< end
&& ((unsigned long)pte
& ~PAGE_MASK
));
355 for (i
= 0; i
< n_aliases
; i
++) {
356 if (d
& select_bit
) {
357 (*__flush_dcache_segment_fn
)(addr_offset
, PAGE_SIZE
);
362 addr_offset
+= PAGE_SIZE
;
367 * Note : (RPC) since the caches are physically tagged, the only point
368 * of flush_cache_mm for SH-4 is to get rid of aliases from the
369 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
370 * lines can stay resident so long as the virtual address they were
371 * accessed with (hence cache set) is in accord with the physical
372 * address (i.e. tag). It's no different here. So I reckon we don't
373 * need to flush the I-cache, since aliases don't matter for that. We
376 * Caller takes mm->mmap_sem.
378 void flush_cache_mm(struct mm_struct
*mm
)
381 * If cache is only 4k-per-way, there are never any 'aliases'. Since
382 * the cache is physically tagged, the data can just be left in there.
384 if (boot_cpu_data
.dcache
.n_aliases
== 0)
388 * Don't bother groveling around the dcache for the VMA ranges
389 * if there are too many PTEs to make it worthwhile.
391 if (mm
->nr_ptes
>= MAX_DCACHE_PAGES
)
394 struct vm_area_struct
*vma
;
397 * In this case there are reasonably sized ranges to flush,
398 * iterate through the VMA list and take care of any aliases.
400 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
)
401 __flush_cache_mm(mm
, vma
->vm_start
, vma
->vm_end
);
404 /* Only touch the icache if one of the VMAs has VM_EXEC set. */
410 * Write back and invalidate I/D-caches for the page.
412 * ADDR: Virtual Address (U0 address)
413 * PFN: Physical page number
415 void flush_cache_page(struct vm_area_struct
*vma
, unsigned long address
,
418 unsigned long phys
= pfn
<< PAGE_SHIFT
;
419 unsigned int alias_mask
;
421 alias_mask
= boot_cpu_data
.dcache
.alias_mask
;
423 /* We only need to flush D-cache when we have alias */
424 if ((address
^phys
) & alias_mask
) {
425 /* Loop 4K of the D-cache */
427 CACHE_OC_ADDRESS_ARRAY
| (address
& alias_mask
),
429 /* Loop another 4K of the D-cache */
431 CACHE_OC_ADDRESS_ARRAY
| (phys
& alias_mask
),
435 alias_mask
= boot_cpu_data
.icache
.alias_mask
;
436 if (vma
->vm_flags
& VM_EXEC
) {
438 * Evict entries from the portion of the cache from which code
439 * may have been executed at this address (virtual). There's
440 * no need to evict from the portion corresponding to the
441 * physical address as for the D-cache, because we know the
442 * kernel has never executed the code through its identity
446 CACHE_IC_ADDRESS_ARRAY
| (address
& alias_mask
),
452 * Write back and invalidate D-caches.
454 * START, END: Virtual Address (U0 address)
456 * NOTE: We need to flush the _physical_ page entry.
457 * Flushing the cache lines for U0 only isn't enough.
458 * We need to flush for P1 too, which may contain aliases.
460 void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
,
464 * If cache is only 4k-per-way, there are never any 'aliases'. Since
465 * the cache is physically tagged, the data can just be left in there.
467 if (boot_cpu_data
.dcache
.n_aliases
== 0)
471 * Don't bother with the lookup and alias check if we have a
472 * wide range to cover, just blow away the dcache in its
473 * entirety instead. -- PFM.
475 if (((end
- start
) >> PAGE_SHIFT
) >= MAX_DCACHE_PAGES
)
478 __flush_cache_mm(vma
->vm_mm
, start
, end
);
480 if (vma
->vm_flags
& VM_EXEC
) {
482 * TODO: Is this required??? Need to look at how I-cache
483 * coherency is assured when new programs are loaded to see if
491 * flush_icache_user_range
492 * @vma: VMA of the process
495 * @len: length of the range (< page size)
497 void flush_icache_user_range(struct vm_area_struct
*vma
,
498 struct page
*page
, unsigned long addr
, int len
)
500 flush_cache_page(vma
, addr
, page_to_pfn(page
));
507 * @addr: address in memory mapped cache array
508 * @phys: P1 address to flush (has to match tags if addr has 'A' bit
509 * set i.e. associative write)
510 * @exec_offset: set to 0x20000000 if flush has to be executed from P2
513 * The offset into the cache array implied by 'addr' selects the
514 * 'colour' of the virtual address range that will be flushed. The
515 * operation (purge/write-back) is selected by the lower 2 bits of
518 static void __flush_cache_4096(unsigned long addr
, unsigned long phys
,
519 unsigned long exec_offset
)
522 unsigned long base_addr
= addr
;
523 struct cache_info
*dcache
;
524 unsigned long way_incr
;
525 unsigned long a
, ea
, p
;
526 unsigned long temp_pc
;
528 dcache
= &boot_cpu_data
.dcache
;
529 /* Write this way for better assembly. */
530 way_count
= dcache
->ways
;
531 way_incr
= dcache
->way_incr
;
534 * Apply exec_offset (i.e. branch to P2 if required.).
538 * If I write "=r" for the (temp_pc), it puts this in r6 hence
539 * trashing exec_offset before it's been added on - why? Hence
540 * "=&r" as a 'workaround'
542 asm volatile("mov.l 1f, %0\n\t"
548 "2:\n" : "=&r" (temp_pc
) : "r" (exec_offset
));
551 * We know there will be >=1 iteration, so write as do-while to avoid
552 * pointless nead-of-loop check for 0 iterations.
555 ea
= base_addr
+ PAGE_SIZE
;
560 *(volatile unsigned long *)a
= p
;
562 * Next line: intentionally not p+32, saves an add, p
563 * will do since only the cache tag bits need to
566 *(volatile unsigned long *)(a
+32) = p
;
571 base_addr
+= way_incr
;
572 } while (--way_count
!= 0);
576 * Break the 1, 2 and 4 way variants of this out into separate functions to
577 * avoid nearly all the overhead of having the conditional stuff in the function
578 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
580 static void __flush_dcache_segment_1way(unsigned long start
,
581 unsigned long extent_per_way
)
583 unsigned long orig_sr
, sr_with_bl
;
584 unsigned long base_addr
;
585 unsigned long way_incr
, linesz
, way_size
;
586 struct cache_info
*dcache
;
587 register unsigned long a0
, a0e
;
589 asm volatile("stc sr, %0" : "=r" (orig_sr
));
590 sr_with_bl
= orig_sr
| (1<<28);
591 base_addr
= ((unsigned long)&empty_zero_page
[0]);
594 * The previous code aligned base_addr to 16k, i.e. the way_size of all
595 * existing SH-4 D-caches. Whilst I don't see a need to have this
596 * aligned to any better than the cache line size (which it will be
597 * anyway by construction), let's align it to at least the way_size of
598 * any existing or conceivable SH-4 D-cache. -- RPC
600 base_addr
= ((base_addr
>> 16) << 16);
603 dcache
= &boot_cpu_data
.dcache
;
604 linesz
= dcache
->linesz
;
605 way_incr
= dcache
->way_incr
;
606 way_size
= dcache
->way_size
;
609 a0e
= base_addr
+ extent_per_way
;
611 asm volatile("ldc %0, sr" : : "r" (sr_with_bl
));
612 asm volatile("movca.l r0, @%0\n\t"
613 "ocbi @%0" : : "r" (a0
));
615 asm volatile("movca.l r0, @%0\n\t"
616 "ocbi @%0" : : "r" (a0
));
618 asm volatile("movca.l r0, @%0\n\t"
619 "ocbi @%0" : : "r" (a0
));
621 asm volatile("movca.l r0, @%0\n\t"
622 "ocbi @%0" : : "r" (a0
));
623 asm volatile("ldc %0, sr" : : "r" (orig_sr
));
628 static void __flush_dcache_segment_2way(unsigned long start
,
629 unsigned long extent_per_way
)
631 unsigned long orig_sr
, sr_with_bl
;
632 unsigned long base_addr
;
633 unsigned long way_incr
, linesz
, way_size
;
634 struct cache_info
*dcache
;
635 register unsigned long a0
, a1
, a0e
;
637 asm volatile("stc sr, %0" : "=r" (orig_sr
));
638 sr_with_bl
= orig_sr
| (1<<28);
639 base_addr
= ((unsigned long)&empty_zero_page
[0]);
641 /* See comment under 1-way above */
642 base_addr
= ((base_addr
>> 16) << 16);
645 dcache
= &boot_cpu_data
.dcache
;
646 linesz
= dcache
->linesz
;
647 way_incr
= dcache
->way_incr
;
648 way_size
= dcache
->way_size
;
652 a0e
= base_addr
+ extent_per_way
;
654 asm volatile("ldc %0, sr" : : "r" (sr_with_bl
));
655 asm volatile("movca.l r0, @%0\n\t"
656 "movca.l r0, @%1\n\t"
662 asm volatile("movca.l r0, @%0\n\t"
663 "movca.l r0, @%1\n\t"
669 asm volatile("movca.l r0, @%0\n\t"
670 "movca.l r0, @%1\n\t"
676 asm volatile("movca.l r0, @%0\n\t"
677 "movca.l r0, @%1\n\t"
681 asm volatile("ldc %0, sr" : : "r" (orig_sr
));
687 static void __flush_dcache_segment_4way(unsigned long start
,
688 unsigned long extent_per_way
)
690 unsigned long orig_sr
, sr_with_bl
;
691 unsigned long base_addr
;
692 unsigned long way_incr
, linesz
, way_size
;
693 struct cache_info
*dcache
;
694 register unsigned long a0
, a1
, a2
, a3
, a0e
;
696 asm volatile("stc sr, %0" : "=r" (orig_sr
));
697 sr_with_bl
= orig_sr
| (1<<28);
698 base_addr
= ((unsigned long)&empty_zero_page
[0]);
700 /* See comment under 1-way above */
701 base_addr
= ((base_addr
>> 16) << 16);
704 dcache
= &boot_cpu_data
.dcache
;
705 linesz
= dcache
->linesz
;
706 way_incr
= dcache
->way_incr
;
707 way_size
= dcache
->way_size
;
713 a0e
= base_addr
+ extent_per_way
;
715 asm volatile("ldc %0, sr" : : "r" (sr_with_bl
));
716 asm volatile("movca.l r0, @%0\n\t"
717 "movca.l r0, @%1\n\t"
718 "movca.l r0, @%2\n\t"
719 "movca.l r0, @%3\n\t"
724 "r" (a0
), "r" (a1
), "r" (a2
), "r" (a3
));
729 asm volatile("movca.l r0, @%0\n\t"
730 "movca.l r0, @%1\n\t"
731 "movca.l r0, @%2\n\t"
732 "movca.l r0, @%3\n\t"
737 "r" (a0
), "r" (a1
), "r" (a2
), "r" (a3
));
742 asm volatile("movca.l r0, @%0\n\t"
743 "movca.l r0, @%1\n\t"
744 "movca.l r0, @%2\n\t"
745 "movca.l r0, @%3\n\t"
750 "r" (a0
), "r" (a1
), "r" (a2
), "r" (a3
));
755 asm volatile("movca.l r0, @%0\n\t"
756 "movca.l r0, @%1\n\t"
757 "movca.l r0, @%2\n\t"
758 "movca.l r0, @%3\n\t"
763 "r" (a0
), "r" (a1
), "r" (a2
), "r" (a3
));
764 asm volatile("ldc %0, sr" : : "r" (orig_sr
));