2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 #include <linux/hardirq.h>
11 #include <linux/init.h>
12 #include <linux/highmem.h>
13 #include <linux/kernel.h>
14 #include <linux/linkage.h>
15 #include <linux/preempt.h>
16 #include <linux/sched.h>
17 #include <linux/smp.h>
19 #include <linux/module.h>
20 #include <linux/bitops.h>
22 #include <asm/bcache.h>
23 #include <asm/bootinfo.h>
24 #include <asm/cache.h>
25 #include <asm/cacheops.h>
27 #include <asm/cpu-features.h>
28 #include <asm/cpu-type.h>
31 #include <asm/pgtable.h>
32 #include <asm/r4kcache.h>
33 #include <asm/sections.h>
34 #include <asm/mmu_context.h>
36 #include <asm/cacheflush.h> /* for run_uncached() */
37 #include <asm/traps.h>
38 #include <asm/dma-coherence.h>
41 * Special Variant of smp_call_function for use by cache functions:
44 * o collapses to normal function call on UP kernels
45 * o collapses to normal function call on systems with a single shared
47 * o doesn't disable interrupts on the local CPU
49 static inline void r4k_on_each_cpu(void (*func
) (void *info
), void *info
)
53 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
54 smp_call_function(func
, info
, 1);
60 #if defined(CONFIG_MIPS_CMP)
61 #define cpu_has_safe_index_cacheops 0
63 #define cpu_has_safe_index_cacheops 1
69 static unsigned long icache_size __read_mostly
;
70 static unsigned long dcache_size __read_mostly
;
71 static unsigned long scache_size __read_mostly
;
74 * Dummy cache handling routines for machines without boardcaches
76 static void cache_noop(void) {}
78 static struct bcache_ops no_sc_ops
= {
79 .bc_enable
= (void *)cache_noop
,
80 .bc_disable
= (void *)cache_noop
,
81 .bc_wback_inv
= (void *)cache_noop
,
82 .bc_inv
= (void *)cache_noop
85 struct bcache_ops
*bcops
= &no_sc_ops
;
87 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
88 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
90 #define R4600_HIT_CACHEOP_WAR_IMPL \
92 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
93 *(volatile unsigned long *)CKSEG1; \
94 if (R4600_V1_HIT_CACHEOP_WAR) \
95 __asm__ __volatile__("nop;nop;nop;nop"); \
98 static void (*r4k_blast_dcache_page
)(unsigned long addr
);
100 static inline void r4k_blast_dcache_page_dc32(unsigned long addr
)
102 R4600_HIT_CACHEOP_WAR_IMPL
;
103 blast_dcache32_page(addr
);
106 static inline void r4k_blast_dcache_page_dc64(unsigned long addr
)
108 R4600_HIT_CACHEOP_WAR_IMPL
;
109 blast_dcache64_page(addr
);
112 static void r4k_blast_dcache_page_setup(void)
114 unsigned long dc_lsize
= cpu_dcache_line_size();
117 r4k_blast_dcache_page
= (void *)cache_noop
;
118 else if (dc_lsize
== 16)
119 r4k_blast_dcache_page
= blast_dcache16_page
;
120 else if (dc_lsize
== 32)
121 r4k_blast_dcache_page
= r4k_blast_dcache_page_dc32
;
122 else if (dc_lsize
== 64)
123 r4k_blast_dcache_page
= r4k_blast_dcache_page_dc64
;
126 static void (* r4k_blast_dcache_page_indexed
)(unsigned long addr
);
128 static void r4k_blast_dcache_page_indexed_setup(void)
130 unsigned long dc_lsize
= cpu_dcache_line_size();
133 r4k_blast_dcache_page_indexed
= (void *)cache_noop
;
134 else if (dc_lsize
== 16)
135 r4k_blast_dcache_page_indexed
= blast_dcache16_page_indexed
;
136 else if (dc_lsize
== 32)
137 r4k_blast_dcache_page_indexed
= blast_dcache32_page_indexed
;
138 else if (dc_lsize
== 64)
139 r4k_blast_dcache_page_indexed
= blast_dcache64_page_indexed
;
142 void (* r4k_blast_dcache
)(void);
143 EXPORT_SYMBOL(r4k_blast_dcache
);
145 static void r4k_blast_dcache_setup(void)
147 unsigned long dc_lsize
= cpu_dcache_line_size();
150 r4k_blast_dcache
= (void *)cache_noop
;
151 else if (dc_lsize
== 16)
152 r4k_blast_dcache
= blast_dcache16
;
153 else if (dc_lsize
== 32)
154 r4k_blast_dcache
= blast_dcache32
;
155 else if (dc_lsize
== 64)
156 r4k_blast_dcache
= blast_dcache64
;
159 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
160 #define JUMP_TO_ALIGN(order) \
161 __asm__ __volatile__( \
163 ".align\t" #order "\n\t" \
166 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
167 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
169 static inline void blast_r4600_v1_icache32(void)
173 local_irq_save(flags
);
175 local_irq_restore(flags
);
178 static inline void tx49_blast_icache32(void)
180 unsigned long start
= INDEX_BASE
;
181 unsigned long end
= start
+ current_cpu_data
.icache
.waysize
;
182 unsigned long ws_inc
= 1UL << current_cpu_data
.icache
.waybit
;
183 unsigned long ws_end
= current_cpu_data
.icache
.ways
<<
184 current_cpu_data
.icache
.waybit
;
185 unsigned long ws
, addr
;
187 CACHE32_UNROLL32_ALIGN2
;
188 /* I'm in even chunk. blast odd chunks */
189 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
190 for (addr
= start
+ 0x400; addr
< end
; addr
+= 0x400 * 2)
191 cache32_unroll32(addr
|ws
, Index_Invalidate_I
);
192 CACHE32_UNROLL32_ALIGN
;
193 /* I'm in odd chunk. blast even chunks */
194 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
195 for (addr
= start
; addr
< end
; addr
+= 0x400 * 2)
196 cache32_unroll32(addr
|ws
, Index_Invalidate_I
);
199 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page
)
203 local_irq_save(flags
);
204 blast_icache32_page_indexed(page
);
205 local_irq_restore(flags
);
208 static inline void tx49_blast_icache32_page_indexed(unsigned long page
)
210 unsigned long indexmask
= current_cpu_data
.icache
.waysize
- 1;
211 unsigned long start
= INDEX_BASE
+ (page
& indexmask
);
212 unsigned long end
= start
+ PAGE_SIZE
;
213 unsigned long ws_inc
= 1UL << current_cpu_data
.icache
.waybit
;
214 unsigned long ws_end
= current_cpu_data
.icache
.ways
<<
215 current_cpu_data
.icache
.waybit
;
216 unsigned long ws
, addr
;
218 CACHE32_UNROLL32_ALIGN2
;
219 /* I'm in even chunk. blast odd chunks */
220 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
221 for (addr
= start
+ 0x400; addr
< end
; addr
+= 0x400 * 2)
222 cache32_unroll32(addr
|ws
, Index_Invalidate_I
);
223 CACHE32_UNROLL32_ALIGN
;
224 /* I'm in odd chunk. blast even chunks */
225 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
226 for (addr
= start
; addr
< end
; addr
+= 0x400 * 2)
227 cache32_unroll32(addr
|ws
, Index_Invalidate_I
);
230 static void (* r4k_blast_icache_page
)(unsigned long addr
);
232 static void r4k_blast_icache_page_setup(void)
234 unsigned long ic_lsize
= cpu_icache_line_size();
237 r4k_blast_icache_page
= (void *)cache_noop
;
238 else if (ic_lsize
== 16)
239 r4k_blast_icache_page
= blast_icache16_page
;
240 else if (ic_lsize
== 32)
241 r4k_blast_icache_page
= blast_icache32_page
;
242 else if (ic_lsize
== 64)
243 r4k_blast_icache_page
= blast_icache64_page
;
247 static void (* r4k_blast_icache_page_indexed
)(unsigned long addr
);
249 static void r4k_blast_icache_page_indexed_setup(void)
251 unsigned long ic_lsize
= cpu_icache_line_size();
254 r4k_blast_icache_page_indexed
= (void *)cache_noop
;
255 else if (ic_lsize
== 16)
256 r4k_blast_icache_page_indexed
= blast_icache16_page_indexed
;
257 else if (ic_lsize
== 32) {
258 if (R4600_V1_INDEX_ICACHEOP_WAR
&& cpu_is_r4600_v1_x())
259 r4k_blast_icache_page_indexed
=
260 blast_icache32_r4600_v1_page_indexed
;
261 else if (TX49XX_ICACHE_INDEX_INV_WAR
)
262 r4k_blast_icache_page_indexed
=
263 tx49_blast_icache32_page_indexed
;
265 r4k_blast_icache_page_indexed
=
266 blast_icache32_page_indexed
;
267 } else if (ic_lsize
== 64)
268 r4k_blast_icache_page_indexed
= blast_icache64_page_indexed
;
271 void (* r4k_blast_icache
)(void);
272 EXPORT_SYMBOL(r4k_blast_icache
);
274 static void r4k_blast_icache_setup(void)
276 unsigned long ic_lsize
= cpu_icache_line_size();
279 r4k_blast_icache
= (void *)cache_noop
;
280 else if (ic_lsize
== 16)
281 r4k_blast_icache
= blast_icache16
;
282 else if (ic_lsize
== 32) {
283 if (R4600_V1_INDEX_ICACHEOP_WAR
&& cpu_is_r4600_v1_x())
284 r4k_blast_icache
= blast_r4600_v1_icache32
;
285 else if (TX49XX_ICACHE_INDEX_INV_WAR
)
286 r4k_blast_icache
= tx49_blast_icache32
;
288 r4k_blast_icache
= blast_icache32
;
289 } else if (ic_lsize
== 64)
290 r4k_blast_icache
= blast_icache64
;
293 static void (* r4k_blast_scache_page
)(unsigned long addr
);
295 static void r4k_blast_scache_page_setup(void)
297 unsigned long sc_lsize
= cpu_scache_line_size();
299 if (scache_size
== 0)
300 r4k_blast_scache_page
= (void *)cache_noop
;
301 else if (sc_lsize
== 16)
302 r4k_blast_scache_page
= blast_scache16_page
;
303 else if (sc_lsize
== 32)
304 r4k_blast_scache_page
= blast_scache32_page
;
305 else if (sc_lsize
== 64)
306 r4k_blast_scache_page
= blast_scache64_page
;
307 else if (sc_lsize
== 128)
308 r4k_blast_scache_page
= blast_scache128_page
;
311 static void (* r4k_blast_scache_page_indexed
)(unsigned long addr
);
313 static void r4k_blast_scache_page_indexed_setup(void)
315 unsigned long sc_lsize
= cpu_scache_line_size();
317 if (scache_size
== 0)
318 r4k_blast_scache_page_indexed
= (void *)cache_noop
;
319 else if (sc_lsize
== 16)
320 r4k_blast_scache_page_indexed
= blast_scache16_page_indexed
;
321 else if (sc_lsize
== 32)
322 r4k_blast_scache_page_indexed
= blast_scache32_page_indexed
;
323 else if (sc_lsize
== 64)
324 r4k_blast_scache_page_indexed
= blast_scache64_page_indexed
;
325 else if (sc_lsize
== 128)
326 r4k_blast_scache_page_indexed
= blast_scache128_page_indexed
;
329 static void (* r4k_blast_scache
)(void);
331 static void r4k_blast_scache_setup(void)
333 unsigned long sc_lsize
= cpu_scache_line_size();
335 if (scache_size
== 0)
336 r4k_blast_scache
= (void *)cache_noop
;
337 else if (sc_lsize
== 16)
338 r4k_blast_scache
= blast_scache16
;
339 else if (sc_lsize
== 32)
340 r4k_blast_scache
= blast_scache32
;
341 else if (sc_lsize
== 64)
342 r4k_blast_scache
= blast_scache64
;
343 else if (sc_lsize
== 128)
344 r4k_blast_scache
= blast_scache128
;
347 static inline void local_r4k___flush_cache_all(void * args
)
349 #if defined(CONFIG_CPU_LOONGSON2)
356 switch (current_cpu_type()) {
368 static void r4k___flush_cache_all(void)
370 r4k_on_each_cpu(local_r4k___flush_cache_all
, NULL
);
373 static inline int has_valid_asid(const struct mm_struct
*mm
)
375 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
378 for_each_online_cpu(i
)
379 if (cpu_context(i
, mm
))
384 return cpu_context(smp_processor_id(), mm
);
388 static void r4k__flush_cache_vmap(void)
393 static void r4k__flush_cache_vunmap(void)
398 static inline void local_r4k_flush_cache_range(void * args
)
400 struct vm_area_struct
*vma
= args
;
401 int exec
= vma
->vm_flags
& VM_EXEC
;
403 if (!(has_valid_asid(vma
->vm_mm
)))
411 static void r4k_flush_cache_range(struct vm_area_struct
*vma
,
412 unsigned long start
, unsigned long end
)
414 int exec
= vma
->vm_flags
& VM_EXEC
;
416 if (cpu_has_dc_aliases
|| (exec
&& !cpu_has_ic_fills_f_dc
))
417 r4k_on_each_cpu(local_r4k_flush_cache_range
, vma
);
420 static inline void local_r4k_flush_cache_mm(void * args
)
422 struct mm_struct
*mm
= args
;
424 if (!has_valid_asid(mm
))
428 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
429 * only flush the primary caches but R10000 and R12000 behave sane ...
430 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
431 * caches, so we can bail out early.
433 if (current_cpu_type() == CPU_R4000SC
||
434 current_cpu_type() == CPU_R4000MC
||
435 current_cpu_type() == CPU_R4400SC
||
436 current_cpu_type() == CPU_R4400MC
) {
444 static void r4k_flush_cache_mm(struct mm_struct
*mm
)
446 if (!cpu_has_dc_aliases
)
449 r4k_on_each_cpu(local_r4k_flush_cache_mm
, mm
);
452 struct flush_cache_page_args
{
453 struct vm_area_struct
*vma
;
458 static inline void local_r4k_flush_cache_page(void *args
)
460 struct flush_cache_page_args
*fcp_args
= args
;
461 struct vm_area_struct
*vma
= fcp_args
->vma
;
462 unsigned long addr
= fcp_args
->addr
;
463 struct page
*page
= pfn_to_page(fcp_args
->pfn
);
464 int exec
= vma
->vm_flags
& VM_EXEC
;
465 struct mm_struct
*mm
= vma
->vm_mm
;
466 int map_coherent
= 0;
474 * If ownes no valid ASID yet, cannot possibly have gotten
475 * this page into the cache.
477 if (!has_valid_asid(mm
))
481 pgdp
= pgd_offset(mm
, addr
);
482 pudp
= pud_offset(pgdp
, addr
);
483 pmdp
= pmd_offset(pudp
, addr
);
484 ptep
= pte_offset(pmdp
, addr
);
487 * If the page isn't marked valid, the page cannot possibly be
490 if (!(pte_present(*ptep
)))
493 if ((mm
== current
->active_mm
) && (pte_val(*ptep
) & _PAGE_VALID
))
497 * Use kmap_coherent or kmap_atomic to do flushes for
498 * another ASID than the current one.
500 map_coherent
= (cpu_has_dc_aliases
&&
501 page_mapped(page
) && !Page_dcache_dirty(page
));
503 vaddr
= kmap_coherent(page
, addr
);
505 vaddr
= kmap_atomic(page
);
506 addr
= (unsigned long)vaddr
;
509 if (cpu_has_dc_aliases
|| (exec
&& !cpu_has_ic_fills_f_dc
)) {
510 r4k_blast_dcache_page(addr
);
511 if (exec
&& !cpu_icache_snoops_remote_store
)
512 r4k_blast_scache_page(addr
);
515 if (vaddr
&& cpu_has_vtag_icache
&& mm
== current
->active_mm
) {
516 int cpu
= smp_processor_id();
518 if (cpu_context(cpu
, mm
) != 0)
519 drop_mmu_context(mm
, cpu
);
521 r4k_blast_icache_page(addr
);
528 kunmap_atomic(vaddr
);
532 static void r4k_flush_cache_page(struct vm_area_struct
*vma
,
533 unsigned long addr
, unsigned long pfn
)
535 struct flush_cache_page_args args
;
541 r4k_on_each_cpu(local_r4k_flush_cache_page
, &args
);
544 static inline void local_r4k_flush_data_cache_page(void * addr
)
546 r4k_blast_dcache_page((unsigned long) addr
);
549 static void r4k_flush_data_cache_page(unsigned long addr
)
552 local_r4k_flush_data_cache_page((void *)addr
);
554 r4k_on_each_cpu(local_r4k_flush_data_cache_page
, (void *) addr
);
557 struct flush_icache_range_args
{
562 static inline void local_r4k_flush_icache_range(unsigned long start
, unsigned long end
)
564 if (!cpu_has_ic_fills_f_dc
) {
565 if (end
- start
>= dcache_size
) {
568 R4600_HIT_CACHEOP_WAR_IMPL
;
569 protected_blast_dcache_range(start
, end
);
573 if (end
- start
> icache_size
)
576 protected_blast_icache_range(start
, end
);
579 static inline void local_r4k_flush_icache_range_ipi(void *args
)
581 struct flush_icache_range_args
*fir_args
= args
;
582 unsigned long start
= fir_args
->start
;
583 unsigned long end
= fir_args
->end
;
585 local_r4k_flush_icache_range(start
, end
);
588 static void r4k_flush_icache_range(unsigned long start
, unsigned long end
)
590 struct flush_icache_range_args args
;
595 r4k_on_each_cpu(local_r4k_flush_icache_range_ipi
, &args
);
596 instruction_hazard();
599 #ifdef CONFIG_DMA_NONCOHERENT
601 static void r4k_dma_cache_wback_inv(unsigned long addr
, unsigned long size
)
603 /* Catch bad driver code */
607 if (cpu_has_inclusive_pcaches
) {
608 if (size
>= scache_size
)
611 blast_scache_range(addr
, addr
+ size
);
618 * Either no secondary cache or the available caches don't have the
619 * subset property so we have to flush the primary caches
622 if (cpu_has_safe_index_cacheops
&& size
>= dcache_size
) {
625 R4600_HIT_CACHEOP_WAR_IMPL
;
626 blast_dcache_range(addr
, addr
+ size
);
630 bc_wback_inv(addr
, size
);
634 static void r4k_dma_cache_inv(unsigned long addr
, unsigned long size
)
636 /* Catch bad driver code */
640 if (cpu_has_inclusive_pcaches
) {
641 if (size
>= scache_size
)
645 * There is no clearly documented alignment requirement
646 * for the cache instruction on MIPS processors and
647 * some processors, among them the RM5200 and RM7000
648 * QED processors will throw an address error for cache
649 * hit ops with insufficient alignment. Solved by
650 * aligning the address to cache line size.
652 blast_inv_scache_range(addr
, addr
+ size
);
659 if (cpu_has_safe_index_cacheops
&& size
>= dcache_size
) {
662 R4600_HIT_CACHEOP_WAR_IMPL
;
663 blast_inv_dcache_range(addr
, addr
+ size
);
670 #endif /* CONFIG_DMA_NONCOHERENT */
673 * While we're protected against bad userland addresses we don't care
674 * very much about what happens in that case. Usually a segmentation
675 * fault will dump the process later on anyway ...
677 static void local_r4k_flush_cache_sigtramp(void * arg
)
679 unsigned long ic_lsize
= cpu_icache_line_size();
680 unsigned long dc_lsize
= cpu_dcache_line_size();
681 unsigned long sc_lsize
= cpu_scache_line_size();
682 unsigned long addr
= (unsigned long) arg
;
684 R4600_HIT_CACHEOP_WAR_IMPL
;
686 protected_writeback_dcache_line(addr
& ~(dc_lsize
- 1));
687 if (!cpu_icache_snoops_remote_store
&& scache_size
)
688 protected_writeback_scache_line(addr
& ~(sc_lsize
- 1));
690 protected_flush_icache_line(addr
& ~(ic_lsize
- 1));
691 if (MIPS4K_ICACHE_REFILL_WAR
) {
692 __asm__
__volatile__ (
707 : "i" (Hit_Invalidate_I
));
709 if (MIPS_CACHE_SYNC_WAR
)
710 __asm__
__volatile__ ("sync");
713 static void r4k_flush_cache_sigtramp(unsigned long addr
)
715 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp
, (void *) addr
);
718 static void r4k_flush_icache_all(void)
720 if (cpu_has_vtag_icache
)
724 struct flush_kernel_vmap_range_args
{
729 static inline void local_r4k_flush_kernel_vmap_range(void *args
)
731 struct flush_kernel_vmap_range_args
*vmra
= args
;
732 unsigned long vaddr
= vmra
->vaddr
;
733 int size
= vmra
->size
;
736 * Aliases only affect the primary caches so don't bother with
737 * S-caches or T-caches.
739 if (cpu_has_safe_index_cacheops
&& size
>= dcache_size
)
742 R4600_HIT_CACHEOP_WAR_IMPL
;
743 blast_dcache_range(vaddr
, vaddr
+ size
);
747 static void r4k_flush_kernel_vmap_range(unsigned long vaddr
, int size
)
749 struct flush_kernel_vmap_range_args args
;
751 args
.vaddr
= (unsigned long) vaddr
;
754 r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range
, &args
);
757 static inline void rm7k_erratum31(void)
759 const unsigned long ic_lsize
= 32;
762 /* RM7000 erratum #31. The icache is screwed at startup. */
766 for (addr
= INDEX_BASE
; addr
<= INDEX_BASE
+ 4096; addr
+= ic_lsize
) {
767 __asm__
__volatile__ (
771 "cache\t%1, 0(%0)\n\t"
772 "cache\t%1, 0x1000(%0)\n\t"
773 "cache\t%1, 0x2000(%0)\n\t"
774 "cache\t%1, 0x3000(%0)\n\t"
775 "cache\t%2, 0(%0)\n\t"
776 "cache\t%2, 0x1000(%0)\n\t"
777 "cache\t%2, 0x2000(%0)\n\t"
778 "cache\t%2, 0x3000(%0)\n\t"
779 "cache\t%1, 0(%0)\n\t"
780 "cache\t%1, 0x1000(%0)\n\t"
781 "cache\t%1, 0x2000(%0)\n\t"
782 "cache\t%1, 0x3000(%0)\n\t"
785 : "r" (addr
), "i" (Index_Store_Tag_I
), "i" (Fill
));
789 static inline void alias_74k_erratum(struct cpuinfo_mips
*c
)
791 unsigned int imp
= c
->processor_id
& PRID_IMP_MASK
;
792 unsigned int rev
= c
->processor_id
& PRID_REV_MASK
;
795 * Early versions of the 74K do not update the cache tags on a
796 * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
797 * aliases. In this case it is better to treat the cache as always
802 if (rev
<= PRID_REV_ENCODE_332(2, 4, 0))
803 c
->dcache
.flags
|= MIPS_CACHE_VTAG
;
804 if (rev
== PRID_REV_ENCODE_332(2, 4, 0))
805 write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND
);
808 if (rev
<= PRID_REV_ENCODE_332(1, 1, 0)) {
809 c
->dcache
.flags
|= MIPS_CACHE_VTAG
;
810 write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND
);
818 static char *way_string
[] = { NULL
, "direct mapped", "2-way",
819 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
822 static void probe_pcache(void)
824 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
825 unsigned int config
= read_c0_config();
826 unsigned int prid
= read_c0_prid();
827 unsigned long config1
;
830 switch (current_cpu_type()) {
831 case CPU_R4600
: /* QED style two way caches? */
835 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
836 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
838 c
->icache
.waybit
= __ffs(icache_size
/2);
840 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
841 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
843 c
->dcache
.waybit
= __ffs(dcache_size
/2);
845 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
850 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
851 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
855 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
856 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
858 c
->dcache
.waybit
= 0;
860 c
->options
|= MIPS_CPU_CACHE_CDEX_P
| MIPS_CPU_PREFETCH
;
864 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
865 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
869 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
870 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
872 c
->dcache
.waybit
= 0;
874 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
875 c
->options
|= MIPS_CPU_PREFETCH
;
885 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
886 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
888 c
->icache
.waybit
= 0; /* doesn't matter */
890 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
891 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
893 c
->dcache
.waybit
= 0; /* does not matter */
895 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
901 icache_size
= 1 << (12 + ((config
& R10K_CONF_IC
) >> 29));
902 c
->icache
.linesz
= 64;
904 c
->icache
.waybit
= 0;
906 dcache_size
= 1 << (12 + ((config
& R10K_CONF_DC
) >> 26));
907 c
->dcache
.linesz
= 32;
909 c
->dcache
.waybit
= 0;
911 c
->options
|= MIPS_CPU_PREFETCH
;
915 write_c0_config(config
& ~VR41_CONF_P4K
);
917 /* Workaround for cache instruction bug of VR4131 */
918 if (c
->processor_id
== 0x0c80U
|| c
->processor_id
== 0x0c81U
||
919 c
->processor_id
== 0x0c82U
) {
920 config
|= 0x00400000U
;
921 if (c
->processor_id
== 0x0c80U
)
922 config
|= VR41_CONF_BP
;
923 write_c0_config(config
);
925 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
927 icache_size
= 1 << (10 + ((config
& CONF_IC
) >> 9));
928 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
930 c
->icache
.waybit
= __ffs(icache_size
/2);
932 dcache_size
= 1 << (10 + ((config
& CONF_DC
) >> 6));
933 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
935 c
->dcache
.waybit
= __ffs(dcache_size
/2);
944 icache_size
= 1 << (10 + ((config
& CONF_IC
) >> 9));
945 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
947 c
->icache
.waybit
= 0; /* doesn't matter */
949 dcache_size
= 1 << (10 + ((config
& CONF_DC
) >> 6));
950 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
952 c
->dcache
.waybit
= 0; /* does not matter */
954 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
960 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
961 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
963 c
->icache
.waybit
= __ffs(icache_size
/ c
->icache
.ways
);
965 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
966 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
968 c
->dcache
.waybit
= __ffs(dcache_size
/ c
->dcache
.ways
);
970 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
971 c
->options
|= MIPS_CPU_PREFETCH
;
975 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
976 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
981 c
->icache
.waybit
= 0;
983 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
984 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
989 c
->dcache
.waybit
= 0;
993 if (!(config
& MIPS_CONF_M
))
994 panic("Don't know how to probe P-caches on this cpu.");
997 * So we seem to be a MIPS32 or MIPS64 CPU
998 * So let's probe the I-cache ...
1000 config1
= read_c0_config1();
1002 if ((lsize
= ((config1
>> 19) & 7)))
1003 c
->icache
.linesz
= 2 << lsize
;
1005 c
->icache
.linesz
= lsize
;
1006 c
->icache
.sets
= 32 << (((config1
>> 22) + 1) & 7);
1007 c
->icache
.ways
= 1 + ((config1
>> 16) & 7);
1009 icache_size
= c
->icache
.sets
*
1012 c
->icache
.waybit
= __ffs(icache_size
/c
->icache
.ways
);
1014 if (config
& 0x8) /* VI bit */
1015 c
->icache
.flags
|= MIPS_CACHE_VTAG
;
1018 * Now probe the MIPS32 / MIPS64 data cache.
1020 c
->dcache
.flags
= 0;
1022 if ((lsize
= ((config1
>> 10) & 7)))
1023 c
->dcache
.linesz
= 2 << lsize
;
1025 c
->dcache
.linesz
= lsize
;
1026 c
->dcache
.sets
= 32 << (((config1
>> 13) + 1) & 7);
1027 c
->dcache
.ways
= 1 + ((config1
>> 7) & 7);
1029 dcache_size
= c
->dcache
.sets
*
1032 c
->dcache
.waybit
= __ffs(dcache_size
/c
->dcache
.ways
);
1034 c
->options
|= MIPS_CPU_PREFETCH
;
1039 * Processor configuration sanity check for the R4000SC erratum
1040 * #5. With page sizes larger than 32kB there is no possibility
1041 * to get a VCE exception anymore so we don't care about this
1042 * misconfiguration. The case is rather theoretical anyway;
1043 * presumably no vendor is shipping his hardware in the "bad"
1046 if ((prid
& PRID_IMP_MASK
) == PRID_IMP_R4000
&&
1047 (prid
& PRID_REV_MASK
) < PRID_REV_R4400
&&
1048 !(config
& CONF_SC
) && c
->icache
.linesz
!= 16 &&
1049 PAGE_SIZE
<= 0x8000)
1050 panic("Improper R4000SC processor configuration detected");
1052 /* compute a couple of other cache variables */
1053 c
->icache
.waysize
= icache_size
/ c
->icache
.ways
;
1054 c
->dcache
.waysize
= dcache_size
/ c
->dcache
.ways
;
1056 c
->icache
.sets
= c
->icache
.linesz
?
1057 icache_size
/ (c
->icache
.linesz
* c
->icache
.ways
) : 0;
1058 c
->dcache
.sets
= c
->dcache
.linesz
?
1059 dcache_size
/ (c
->dcache
.linesz
* c
->dcache
.ways
) : 0;
1062 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
1063 * 2-way virtually indexed so normally would suffer from aliases. So
1064 * normally they'd suffer from aliases but magic in the hardware deals
1065 * with that for us so we don't need to take care ourselves.
1067 switch (current_cpu_type()) {
1073 c
->dcache
.flags
|= MIPS_CACHE_PINDEX
;
1087 if (current_cpu_type() == CPU_74K
)
1088 alias_74k_erratum(c
);
1089 if ((read_c0_config7() & (1 << 16))) {
1090 /* effectively physically indexed dcache,
1091 thus no virtual aliases. */
1092 c
->dcache
.flags
|= MIPS_CACHE_PINDEX
;
1096 if (c
->dcache
.waysize
> PAGE_SIZE
)
1097 c
->dcache
.flags
|= MIPS_CACHE_ALIASES
;
1100 switch (current_cpu_type()) {
1103 * Some older 20Kc chips doesn't have the 'VI' bit in
1104 * the config register.
1106 c
->icache
.flags
|= MIPS_CACHE_VTAG
;
1110 c
->icache
.flags
|= MIPS_CACHE_IC_F_DC
;
1114 #ifdef CONFIG_CPU_LOONGSON2
1116 * LOONGSON2 has 4 way icache, but when using indexed cache op,
1117 * one op will act on all 4 ways
1122 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1124 c
->icache
.flags
& MIPS_CACHE_VTAG
? "VIVT" : "VIPT",
1125 way_string
[c
->icache
.ways
], c
->icache
.linesz
);
1127 printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1128 dcache_size
>> 10, way_string
[c
->dcache
.ways
],
1129 (c
->dcache
.flags
& MIPS_CACHE_PINDEX
) ? "PIPT" : "VIPT",
1130 (c
->dcache
.flags
& MIPS_CACHE_ALIASES
) ?
1131 "cache aliases" : "no aliases",
1136 * If you even _breathe_ on this function, look at the gcc output and make sure
1137 * it does not pop things on and off the stack for the cache sizing loop that
1138 * executes in KSEG1 space or else you will crash and burn badly. You have
1141 static int probe_scache(void)
1143 unsigned long flags
, addr
, begin
, end
, pow2
;
1144 unsigned int config
= read_c0_config();
1145 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1147 if (config
& CONF_SC
)
1150 begin
= (unsigned long) &_stext
;
1151 begin
&= ~((4 * 1024 * 1024) - 1);
1152 end
= begin
+ (4 * 1024 * 1024);
1155 * This is such a bitch, you'd think they would make it easy to do
1156 * this. Away you daemons of stupidity!
1158 local_irq_save(flags
);
1160 /* Fill each size-multiple cache line with a valid tag. */
1162 for (addr
= begin
; addr
< end
; addr
= (begin
+ pow2
)) {
1163 unsigned long *p
= (unsigned long *) addr
;
1164 __asm__
__volatile__("nop" : : "r" (*p
)); /* whee... */
1168 /* Load first line with zero (therefore invalid) tag. */
1171 __asm__
__volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1172 cache_op(Index_Store_Tag_I
, begin
);
1173 cache_op(Index_Store_Tag_D
, begin
);
1174 cache_op(Index_Store_Tag_SD
, begin
);
1176 /* Now search for the wrap around point. */
1177 pow2
= (128 * 1024);
1178 for (addr
= begin
+ (128 * 1024); addr
< end
; addr
= begin
+ pow2
) {
1179 cache_op(Index_Load_Tag_SD
, addr
);
1180 __asm__
__volatile__("nop; nop; nop; nop;"); /* hazard... */
1181 if (!read_c0_taglo())
1185 local_irq_restore(flags
);
1189 c
->scache
.linesz
= 16 << ((config
& R4K_CONF_SB
) >> 22);
1191 c
->dcache
.waybit
= 0; /* does not matter */
1196 #if defined(CONFIG_CPU_LOONGSON2)
1197 static void __init
loongson2_sc_init(void)
1199 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1201 scache_size
= 512*1024;
1202 c
->scache
.linesz
= 32;
1204 c
->scache
.waybit
= 0;
1205 c
->scache
.waysize
= scache_size
/ (c
->scache
.ways
);
1206 c
->scache
.sets
= scache_size
/ (c
->scache
.linesz
* c
->scache
.ways
);
1207 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1208 scache_size
>> 10, way_string
[c
->scache
.ways
], c
->scache
.linesz
);
1210 c
->options
|= MIPS_CPU_INCLUSIVE_CACHES
;
1214 extern int r5k_sc_init(void);
1215 extern int rm7k_sc_init(void);
1216 extern int mips_sc_init(void);
1218 static void setup_scache(void)
1220 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1221 unsigned int config
= read_c0_config();
1225 * Do the probing thing on R4000SC and R4400SC processors. Other
1226 * processors don't have a S-cache that would be relevant to the
1227 * Linux memory management.
1229 switch (current_cpu_type()) {
1234 sc_present
= run_uncached(probe_scache
);
1236 c
->options
|= MIPS_CPU_CACHE_CDEX_S
;
1242 scache_size
= 0x80000 << ((config
& R10K_CONF_SS
) >> 16);
1243 c
->scache
.linesz
= 64 << ((config
>> 13) & 1);
1245 c
->scache
.waybit
= 0;
1251 #ifdef CONFIG_R5000_CPU_SCACHE
1257 #ifdef CONFIG_RM7000_CPU_SCACHE
1262 #if defined(CONFIG_CPU_LOONGSON2)
1264 loongson2_sc_init();
1268 /* don't need to worry about L2, fully coherent */
1272 if (c
->isa_level
& (MIPS_CPU_ISA_M32R1
| MIPS_CPU_ISA_M32R2
|
1273 MIPS_CPU_ISA_M64R1
| MIPS_CPU_ISA_M64R2
)) {
1274 #ifdef CONFIG_MIPS_CPU_SCACHE
1275 if (mips_sc_init ()) {
1276 scache_size
= c
->scache
.ways
* c
->scache
.sets
* c
->scache
.linesz
;
1277 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1279 way_string
[c
->scache
.ways
], c
->scache
.linesz
);
1282 if (!(c
->scache
.flags
& MIPS_CACHE_NOT_PRESENT
))
1283 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1293 /* compute a couple of other cache variables */
1294 c
->scache
.waysize
= scache_size
/ c
->scache
.ways
;
1296 c
->scache
.sets
= scache_size
/ (c
->scache
.linesz
* c
->scache
.ways
);
1298 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1299 scache_size
>> 10, way_string
[c
->scache
.ways
], c
->scache
.linesz
);
1301 c
->options
|= MIPS_CPU_INCLUSIVE_CACHES
;
1304 void au1x00_fixup_config_od(void)
1307 * c0_config.od (bit 19) was write only (and read as 0)
1308 * on the early revisions of Alchemy SOCs. It disables the bus
1309 * transaction overlapping and needs to be set to fix various errata.
1311 switch (read_c0_prid()) {
1312 case 0x00030100: /* Au1000 DA */
1313 case 0x00030201: /* Au1000 HA */
1314 case 0x00030202: /* Au1000 HB */
1315 case 0x01030200: /* Au1500 AB */
1317 * Au1100 errata actually keeps silence about this bit, so we set it
1318 * just in case for those revisions that require it to be set according
1319 * to the (now gone) cpu table.
1321 case 0x02030200: /* Au1100 AB */
1322 case 0x02030201: /* Au1100 BA */
1323 case 0x02030202: /* Au1100 BC */
1324 set_c0_config(1 << 19);
1329 /* CP0 hazard avoidance. */
1330 #define NXP_BARRIER() \
1331 __asm__ __volatile__( \
1332 ".set noreorder\n\t" \
1333 "nop; nop; nop; nop; nop; nop;\n\t" \
1336 static void nxp_pr4450_fixup_config(void)
1338 unsigned long config0
;
1340 config0
= read_c0_config();
1342 /* clear all three cache coherency fields */
1343 config0
&= ~(0x7 | (7 << 25) | (7 << 28));
1344 config0
|= (((_page_cachable_default
>> _CACHE_SHIFT
) << 0) |
1345 ((_page_cachable_default
>> _CACHE_SHIFT
) << 25) |
1346 ((_page_cachable_default
>> _CACHE_SHIFT
) << 28));
1347 write_c0_config(config0
);
1351 static int cca
= -1;
1353 static int __init
cca_setup(char *str
)
1355 get_option(&str
, &cca
);
1360 early_param("cca", cca_setup
);
1362 static void coherency_setup(void)
1364 if (cca
< 0 || cca
> 7)
1365 cca
= read_c0_config() & CONF_CM_CMASK
;
1366 _page_cachable_default
= cca
<< _CACHE_SHIFT
;
1368 pr_debug("Using cache attribute %d\n", cca
);
1369 change_c0_config(CONF_CM_CMASK
, cca
);
1372 * c0_status.cu=0 specifies that updates by the sc instruction use
1373 * the coherency mode specified by the TLB; 1 means cachable
1374 * coherent update on write will be used. Not all processors have
1375 * this bit and; some wire it to zero, others like Toshiba had the
1376 * silly idea of putting something else there ...
1378 switch (current_cpu_type()) {
1385 clear_c0_config(CONF_CU
);
1388 * We need to catch the early Alchemy SOCs with
1389 * the write-only co_config.od bit and set it back to one on:
1390 * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB
1393 au1x00_fixup_config_od();
1396 case PRID_IMP_PR4450
:
1397 nxp_pr4450_fixup_config();
1402 static void r4k_cache_error_setup(void)
1404 extern char __weak except_vec2_generic
;
1405 extern char __weak except_vec2_sb1
;
1407 switch (current_cpu_type()) {
1410 set_uncached_handler(0x100, &except_vec2_sb1
, 0x80);
1414 set_uncached_handler(0x100, &except_vec2_generic
, 0x80);
1419 void r4k_cache_init(void)
1421 extern void build_clear_page(void);
1422 extern void build_copy_page(void);
1423 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1428 r4k_blast_dcache_page_setup();
1429 r4k_blast_dcache_page_indexed_setup();
1430 r4k_blast_dcache_setup();
1431 r4k_blast_icache_page_setup();
1432 r4k_blast_icache_page_indexed_setup();
1433 r4k_blast_icache_setup();
1434 r4k_blast_scache_page_setup();
1435 r4k_blast_scache_page_indexed_setup();
1436 r4k_blast_scache_setup();
1439 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1440 * This code supports virtually indexed processors and will be
1441 * unnecessarily inefficient on physically indexed processors.
1443 if (c
->dcache
.linesz
)
1444 shm_align_mask
= max_t( unsigned long,
1445 c
->dcache
.sets
* c
->dcache
.linesz
- 1,
1448 shm_align_mask
= PAGE_SIZE
-1;
1450 __flush_cache_vmap
= r4k__flush_cache_vmap
;
1451 __flush_cache_vunmap
= r4k__flush_cache_vunmap
;
1453 flush_cache_all
= cache_noop
;
1454 __flush_cache_all
= r4k___flush_cache_all
;
1455 flush_cache_mm
= r4k_flush_cache_mm
;
1456 flush_cache_page
= r4k_flush_cache_page
;
1457 flush_cache_range
= r4k_flush_cache_range
;
1459 __flush_kernel_vmap_range
= r4k_flush_kernel_vmap_range
;
1461 flush_cache_sigtramp
= r4k_flush_cache_sigtramp
;
1462 flush_icache_all
= r4k_flush_icache_all
;
1463 local_flush_data_cache_page
= local_r4k_flush_data_cache_page
;
1464 flush_data_cache_page
= r4k_flush_data_cache_page
;
1465 flush_icache_range
= r4k_flush_icache_range
;
1466 local_flush_icache_range
= local_r4k_flush_icache_range
;
1468 #if defined(CONFIG_DMA_NONCOHERENT)
1470 _dma_cache_wback_inv
= (void *)cache_noop
;
1471 _dma_cache_wback
= (void *)cache_noop
;
1472 _dma_cache_inv
= (void *)cache_noop
;
1474 _dma_cache_wback_inv
= r4k_dma_cache_wback_inv
;
1475 _dma_cache_wback
= r4k_dma_cache_wback_inv
;
1476 _dma_cache_inv
= r4k_dma_cache_inv
;
1484 * We want to run CMP kernels on core with and without coherent
1485 * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
1486 * or not to flush caches.
1488 local_r4k___flush_cache_all(NULL
);
1491 board_cache_error_setup
= r4k_cache_error_setup
;