2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 #include <linux/cpu_pm.h>
11 #include <linux/hardirq.h>
12 #include <linux/init.h>
13 #include <linux/highmem.h>
14 #include <linux/kernel.h>
15 #include <linux/linkage.h>
16 #include <linux/preempt.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
20 #include <linux/export.h>
21 #include <linux/bitops.h>
23 #include <asm/bcache.h>
24 #include <asm/bootinfo.h>
25 #include <asm/cache.h>
26 #include <asm/cacheops.h>
28 #include <asm/cpu-features.h>
29 #include <asm/cpu-type.h>
32 #include <asm/pgtable.h>
33 #include <asm/r4kcache.h>
34 #include <asm/sections.h>
35 #include <asm/mmu_context.h>
37 #include <asm/cacheflush.h> /* for run_uncached() */
38 #include <asm/traps.h>
39 #include <asm/dma-coherence.h>
40 #include <asm/mips-cps.h>
43 * Bits describing what cache ops an SMP callback function may perform.
45 * R4K_HIT - Virtual user or kernel address based cache operations. The
46 * active_mm must be checked before using user addresses, falling
48 * R4K_INDEX - Index based cache operations.
51 #define R4K_HIT BIT(0)
52 #define R4K_INDEX BIT(1)
55 * r4k_op_needs_ipi() - Decide if a cache op needs to be done on every core.
56 * @type: Type of cache operations (R4K_HIT or R4K_INDEX).
58 * Decides whether a cache op needs to be performed on every core in the system.
59 * This may change depending on the @type of cache operation, as well as the set
60 * of online CPUs, so preemption should be disabled by the caller to prevent CPU
61 * hotplug from changing the result.
63 * Returns: 1 if the cache operation @type should be done on every core in
65 * 0 if the cache operation @type is globalized and only needs to
66 * be performed on a simple CPU.
68 static inline bool r4k_op_needs_ipi(unsigned int type
)
70 /* The MIPS Coherence Manager (CM) globalizes address-based cache ops */
71 if (type
== R4K_HIT
&& mips_cm_present())
75 * Hardware doesn't globalize the required cache ops, so SMP calls may
76 * be needed, but only if there are foreign CPUs (non-siblings with
79 /* cpu_foreign_map[] undeclared when !CONFIG_SMP */
81 return !cpumask_empty(&cpu_foreign_map
[0]);
88 * Special Variant of smp_call_function for use by cache functions:
91 * o collapses to normal function call on UP kernels
92 * o collapses to normal function call on systems with a single shared
94 * o doesn't disable interrupts on the local CPU
96 static inline void r4k_on_each_cpu(unsigned int type
,
97 void (*func
)(void *info
), void *info
)
100 if (r4k_op_needs_ipi(type
))
101 smp_call_function_many(&cpu_foreign_map
[smp_processor_id()],
110 static unsigned long icache_size __read_mostly
;
111 static unsigned long dcache_size __read_mostly
;
112 static unsigned long vcache_size __read_mostly
;
113 static unsigned long scache_size __read_mostly
;
116 * Dummy cache handling routines for machines without boardcaches
118 static void cache_noop(void) {}
120 static struct bcache_ops no_sc_ops
= {
121 .bc_enable
= (void *)cache_noop
,
122 .bc_disable
= (void *)cache_noop
,
123 .bc_wback_inv
= (void *)cache_noop
,
124 .bc_inv
= (void *)cache_noop
127 struct bcache_ops
*bcops
= &no_sc_ops
;
129 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
130 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
132 #define R4600_HIT_CACHEOP_WAR_IMPL \
134 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
135 *(volatile unsigned long *)CKSEG1; \
136 if (R4600_V1_HIT_CACHEOP_WAR) \
137 __asm__ __volatile__("nop;nop;nop;nop"); \
140 static void (*r4k_blast_dcache_page
)(unsigned long addr
);
142 static inline void r4k_blast_dcache_page_dc32(unsigned long addr
)
144 R4600_HIT_CACHEOP_WAR_IMPL
;
145 blast_dcache32_page(addr
);
148 static inline void r4k_blast_dcache_page_dc64(unsigned long addr
)
150 blast_dcache64_page(addr
);
153 static inline void r4k_blast_dcache_page_dc128(unsigned long addr
)
155 blast_dcache128_page(addr
);
158 static void r4k_blast_dcache_page_setup(void)
160 unsigned long dc_lsize
= cpu_dcache_line_size();
164 r4k_blast_dcache_page
= (void *)cache_noop
;
167 r4k_blast_dcache_page
= blast_dcache16_page
;
170 r4k_blast_dcache_page
= r4k_blast_dcache_page_dc32
;
173 r4k_blast_dcache_page
= r4k_blast_dcache_page_dc64
;
176 r4k_blast_dcache_page
= r4k_blast_dcache_page_dc128
;
184 #define r4k_blast_dcache_user_page r4k_blast_dcache_page
187 static void (*r4k_blast_dcache_user_page
)(unsigned long addr
);
189 static void r4k_blast_dcache_user_page_setup(void)
191 unsigned long dc_lsize
= cpu_dcache_line_size();
194 r4k_blast_dcache_user_page
= (void *)cache_noop
;
195 else if (dc_lsize
== 16)
196 r4k_blast_dcache_user_page
= blast_dcache16_user_page
;
197 else if (dc_lsize
== 32)
198 r4k_blast_dcache_user_page
= blast_dcache32_user_page
;
199 else if (dc_lsize
== 64)
200 r4k_blast_dcache_user_page
= blast_dcache64_user_page
;
205 static void (* r4k_blast_dcache_page_indexed
)(unsigned long addr
);
207 static void r4k_blast_dcache_page_indexed_setup(void)
209 unsigned long dc_lsize
= cpu_dcache_line_size();
212 r4k_blast_dcache_page_indexed
= (void *)cache_noop
;
213 else if (dc_lsize
== 16)
214 r4k_blast_dcache_page_indexed
= blast_dcache16_page_indexed
;
215 else if (dc_lsize
== 32)
216 r4k_blast_dcache_page_indexed
= blast_dcache32_page_indexed
;
217 else if (dc_lsize
== 64)
218 r4k_blast_dcache_page_indexed
= blast_dcache64_page_indexed
;
219 else if (dc_lsize
== 128)
220 r4k_blast_dcache_page_indexed
= blast_dcache128_page_indexed
;
223 void (* r4k_blast_dcache
)(void);
224 EXPORT_SYMBOL(r4k_blast_dcache
);
226 static void r4k_blast_dcache_setup(void)
228 unsigned long dc_lsize
= cpu_dcache_line_size();
231 r4k_blast_dcache
= (void *)cache_noop
;
232 else if (dc_lsize
== 16)
233 r4k_blast_dcache
= blast_dcache16
;
234 else if (dc_lsize
== 32)
235 r4k_blast_dcache
= blast_dcache32
;
236 else if (dc_lsize
== 64)
237 r4k_blast_dcache
= blast_dcache64
;
238 else if (dc_lsize
== 128)
239 r4k_blast_dcache
= blast_dcache128
;
242 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
243 #define JUMP_TO_ALIGN(order) \
244 __asm__ __volatile__( \
246 ".align\t" #order "\n\t" \
249 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
250 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
252 static inline void blast_r4600_v1_icache32(void)
256 local_irq_save(flags
);
258 local_irq_restore(flags
);
261 static inline void tx49_blast_icache32(void)
263 unsigned long start
= INDEX_BASE
;
264 unsigned long end
= start
+ current_cpu_data
.icache
.waysize
;
265 unsigned long ws_inc
= 1UL << current_cpu_data
.icache
.waybit
;
266 unsigned long ws_end
= current_cpu_data
.icache
.ways
<<
267 current_cpu_data
.icache
.waybit
;
268 unsigned long ws
, addr
;
270 CACHE32_UNROLL32_ALIGN2
;
271 /* I'm in even chunk. blast odd chunks */
272 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
273 for (addr
= start
+ 0x400; addr
< end
; addr
+= 0x400 * 2)
274 cache32_unroll32(addr
|ws
, Index_Invalidate_I
);
275 CACHE32_UNROLL32_ALIGN
;
276 /* I'm in odd chunk. blast even chunks */
277 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
278 for (addr
= start
; addr
< end
; addr
+= 0x400 * 2)
279 cache32_unroll32(addr
|ws
, Index_Invalidate_I
);
282 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page
)
286 local_irq_save(flags
);
287 blast_icache32_page_indexed(page
);
288 local_irq_restore(flags
);
291 static inline void tx49_blast_icache32_page_indexed(unsigned long page
)
293 unsigned long indexmask
= current_cpu_data
.icache
.waysize
- 1;
294 unsigned long start
= INDEX_BASE
+ (page
& indexmask
);
295 unsigned long end
= start
+ PAGE_SIZE
;
296 unsigned long ws_inc
= 1UL << current_cpu_data
.icache
.waybit
;
297 unsigned long ws_end
= current_cpu_data
.icache
.ways
<<
298 current_cpu_data
.icache
.waybit
;
299 unsigned long ws
, addr
;
301 CACHE32_UNROLL32_ALIGN2
;
302 /* I'm in even chunk. blast odd chunks */
303 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
304 for (addr
= start
+ 0x400; addr
< end
; addr
+= 0x400 * 2)
305 cache32_unroll32(addr
|ws
, Index_Invalidate_I
);
306 CACHE32_UNROLL32_ALIGN
;
307 /* I'm in odd chunk. blast even chunks */
308 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
309 for (addr
= start
; addr
< end
; addr
+= 0x400 * 2)
310 cache32_unroll32(addr
|ws
, Index_Invalidate_I
);
313 static void (* r4k_blast_icache_page
)(unsigned long addr
);
315 static void r4k_blast_icache_page_setup(void)
317 unsigned long ic_lsize
= cpu_icache_line_size();
320 r4k_blast_icache_page
= (void *)cache_noop
;
321 else if (ic_lsize
== 16)
322 r4k_blast_icache_page
= blast_icache16_page
;
323 else if (ic_lsize
== 32 && current_cpu_type() == CPU_LOONGSON2
)
324 r4k_blast_icache_page
= loongson2_blast_icache32_page
;
325 else if (ic_lsize
== 32)
326 r4k_blast_icache_page
= blast_icache32_page
;
327 else if (ic_lsize
== 64)
328 r4k_blast_icache_page
= blast_icache64_page
;
329 else if (ic_lsize
== 128)
330 r4k_blast_icache_page
= blast_icache128_page
;
334 #define r4k_blast_icache_user_page r4k_blast_icache_page
337 static void (*r4k_blast_icache_user_page
)(unsigned long addr
);
339 static void r4k_blast_icache_user_page_setup(void)
341 unsigned long ic_lsize
= cpu_icache_line_size();
344 r4k_blast_icache_user_page
= (void *)cache_noop
;
345 else if (ic_lsize
== 16)
346 r4k_blast_icache_user_page
= blast_icache16_user_page
;
347 else if (ic_lsize
== 32)
348 r4k_blast_icache_user_page
= blast_icache32_user_page
;
349 else if (ic_lsize
== 64)
350 r4k_blast_icache_user_page
= blast_icache64_user_page
;
355 static void (* r4k_blast_icache_page_indexed
)(unsigned long addr
);
357 static void r4k_blast_icache_page_indexed_setup(void)
359 unsigned long ic_lsize
= cpu_icache_line_size();
362 r4k_blast_icache_page_indexed
= (void *)cache_noop
;
363 else if (ic_lsize
== 16)
364 r4k_blast_icache_page_indexed
= blast_icache16_page_indexed
;
365 else if (ic_lsize
== 32) {
366 if (R4600_V1_INDEX_ICACHEOP_WAR
&& cpu_is_r4600_v1_x())
367 r4k_blast_icache_page_indexed
=
368 blast_icache32_r4600_v1_page_indexed
;
369 else if (TX49XX_ICACHE_INDEX_INV_WAR
)
370 r4k_blast_icache_page_indexed
=
371 tx49_blast_icache32_page_indexed
;
372 else if (current_cpu_type() == CPU_LOONGSON2
)
373 r4k_blast_icache_page_indexed
=
374 loongson2_blast_icache32_page_indexed
;
376 r4k_blast_icache_page_indexed
=
377 blast_icache32_page_indexed
;
378 } else if (ic_lsize
== 64)
379 r4k_blast_icache_page_indexed
= blast_icache64_page_indexed
;
382 void (* r4k_blast_icache
)(void);
383 EXPORT_SYMBOL(r4k_blast_icache
);
385 static void r4k_blast_icache_setup(void)
387 unsigned long ic_lsize
= cpu_icache_line_size();
390 r4k_blast_icache
= (void *)cache_noop
;
391 else if (ic_lsize
== 16)
392 r4k_blast_icache
= blast_icache16
;
393 else if (ic_lsize
== 32) {
394 if (R4600_V1_INDEX_ICACHEOP_WAR
&& cpu_is_r4600_v1_x())
395 r4k_blast_icache
= blast_r4600_v1_icache32
;
396 else if (TX49XX_ICACHE_INDEX_INV_WAR
)
397 r4k_blast_icache
= tx49_blast_icache32
;
398 else if (current_cpu_type() == CPU_LOONGSON2
)
399 r4k_blast_icache
= loongson2_blast_icache32
;
401 r4k_blast_icache
= blast_icache32
;
402 } else if (ic_lsize
== 64)
403 r4k_blast_icache
= blast_icache64
;
404 else if (ic_lsize
== 128)
405 r4k_blast_icache
= blast_icache128
;
408 static void (* r4k_blast_scache_page
)(unsigned long addr
);
410 static void r4k_blast_scache_page_setup(void)
412 unsigned long sc_lsize
= cpu_scache_line_size();
414 if (scache_size
== 0)
415 r4k_blast_scache_page
= (void *)cache_noop
;
416 else if (sc_lsize
== 16)
417 r4k_blast_scache_page
= blast_scache16_page
;
418 else if (sc_lsize
== 32)
419 r4k_blast_scache_page
= blast_scache32_page
;
420 else if (sc_lsize
== 64)
421 r4k_blast_scache_page
= blast_scache64_page
;
422 else if (sc_lsize
== 128)
423 r4k_blast_scache_page
= blast_scache128_page
;
426 static void (* r4k_blast_scache_page_indexed
)(unsigned long addr
);
428 static void r4k_blast_scache_page_indexed_setup(void)
430 unsigned long sc_lsize
= cpu_scache_line_size();
432 if (scache_size
== 0)
433 r4k_blast_scache_page_indexed
= (void *)cache_noop
;
434 else if (sc_lsize
== 16)
435 r4k_blast_scache_page_indexed
= blast_scache16_page_indexed
;
436 else if (sc_lsize
== 32)
437 r4k_blast_scache_page_indexed
= blast_scache32_page_indexed
;
438 else if (sc_lsize
== 64)
439 r4k_blast_scache_page_indexed
= blast_scache64_page_indexed
;
440 else if (sc_lsize
== 128)
441 r4k_blast_scache_page_indexed
= blast_scache128_page_indexed
;
444 static void (* r4k_blast_scache
)(void);
446 static void r4k_blast_scache_setup(void)
448 unsigned long sc_lsize
= cpu_scache_line_size();
450 if (scache_size
== 0)
451 r4k_blast_scache
= (void *)cache_noop
;
452 else if (sc_lsize
== 16)
453 r4k_blast_scache
= blast_scache16
;
454 else if (sc_lsize
== 32)
455 r4k_blast_scache
= blast_scache32
;
456 else if (sc_lsize
== 64)
457 r4k_blast_scache
= blast_scache64
;
458 else if (sc_lsize
== 128)
459 r4k_blast_scache
= blast_scache128
;
462 static inline void local_r4k___flush_cache_all(void * args
)
464 switch (current_cpu_type()) {
476 * These caches are inclusive caches, that is, if something
477 * is not cached in the S-cache, we know it also won't be
478 * in one of the primary caches.
495 static void r4k___flush_cache_all(void)
497 r4k_on_each_cpu(R4K_INDEX
, local_r4k___flush_cache_all
, NULL
);
501 * has_valid_asid() - Determine if an mm already has an ASID.
503 * @type: R4K_HIT or R4K_INDEX, type of cache op.
505 * Determines whether @mm already has an ASID on any of the CPUs which cache ops
506 * of type @type within an r4k_on_each_cpu() call will affect. If
507 * r4k_on_each_cpu() does an SMP call to a single VPE in each core, then the
508 * scope of the operation is confined to sibling CPUs, otherwise all online CPUs
509 * will need to be checked.
511 * Must be called in non-preemptive context.
513 * Returns: 1 if the CPUs affected by @type cache ops have an ASID for @mm.
516 static inline int has_valid_asid(const struct mm_struct
*mm
, unsigned int type
)
519 const cpumask_t
*mask
= cpu_present_mask
;
521 /* cpu_sibling_map[] undeclared when !CONFIG_SMP */
524 * If r4k_on_each_cpu does SMP calls, it does them to a single VPE in
525 * each foreign core, so we only need to worry about siblings.
526 * Otherwise we need to worry about all present CPUs.
528 if (r4k_op_needs_ipi(type
))
529 mask
= &cpu_sibling_map
[smp_processor_id()];
531 for_each_cpu(i
, mask
)
532 if (cpu_context(i
, mm
))
537 static void r4k__flush_cache_vmap(void)
542 static void r4k__flush_cache_vunmap(void)
548 * Note: flush_tlb_range() assumes flush_cache_range() sufficiently flushes
549 * whole caches when vma is executable.
551 static inline void local_r4k_flush_cache_range(void * args
)
553 struct vm_area_struct
*vma
= args
;
554 int exec
= vma
->vm_flags
& VM_EXEC
;
556 if (!has_valid_asid(vma
->vm_mm
, R4K_INDEX
))
560 * If dcache can alias, we must blast it since mapping is changing.
561 * If executable, we must ensure any dirty lines are written back far
562 * enough to be visible to icache.
564 if (cpu_has_dc_aliases
|| (exec
&& !cpu_has_ic_fills_f_dc
))
566 /* If executable, blast stale lines from icache */
571 static void r4k_flush_cache_range(struct vm_area_struct
*vma
,
572 unsigned long start
, unsigned long end
)
574 int exec
= vma
->vm_flags
& VM_EXEC
;
576 if (cpu_has_dc_aliases
|| exec
)
577 r4k_on_each_cpu(R4K_INDEX
, local_r4k_flush_cache_range
, vma
);
580 static inline void local_r4k_flush_cache_mm(void * args
)
582 struct mm_struct
*mm
= args
;
584 if (!has_valid_asid(mm
, R4K_INDEX
))
588 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
589 * only flush the primary caches but R1x000 behave sane ...
590 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
591 * caches, so we can bail out early.
593 if (current_cpu_type() == CPU_R4000SC
||
594 current_cpu_type() == CPU_R4000MC
||
595 current_cpu_type() == CPU_R4400SC
||
596 current_cpu_type() == CPU_R4400MC
) {
604 static void r4k_flush_cache_mm(struct mm_struct
*mm
)
606 if (!cpu_has_dc_aliases
)
609 r4k_on_each_cpu(R4K_INDEX
, local_r4k_flush_cache_mm
, mm
);
612 struct flush_cache_page_args
{
613 struct vm_area_struct
*vma
;
618 static inline void local_r4k_flush_cache_page(void *args
)
620 struct flush_cache_page_args
*fcp_args
= args
;
621 struct vm_area_struct
*vma
= fcp_args
->vma
;
622 unsigned long addr
= fcp_args
->addr
;
623 struct page
*page
= pfn_to_page(fcp_args
->pfn
);
624 int exec
= vma
->vm_flags
& VM_EXEC
;
625 struct mm_struct
*mm
= vma
->vm_mm
;
626 int map_coherent
= 0;
634 * If owns no valid ASID yet, cannot possibly have gotten
635 * this page into the cache.
637 if (!has_valid_asid(mm
, R4K_HIT
))
641 pgdp
= pgd_offset(mm
, addr
);
642 pudp
= pud_offset(pgdp
, addr
);
643 pmdp
= pmd_offset(pudp
, addr
);
644 ptep
= pte_offset(pmdp
, addr
);
647 * If the page isn't marked valid, the page cannot possibly be
650 if (!(pte_present(*ptep
)))
653 if ((mm
== current
->active_mm
) && (pte_val(*ptep
) & _PAGE_VALID
))
657 * Use kmap_coherent or kmap_atomic to do flushes for
658 * another ASID than the current one.
660 map_coherent
= (cpu_has_dc_aliases
&&
661 page_mapcount(page
) &&
662 !Page_dcache_dirty(page
));
664 vaddr
= kmap_coherent(page
, addr
);
666 vaddr
= kmap_atomic(page
);
667 addr
= (unsigned long)vaddr
;
670 if (cpu_has_dc_aliases
|| (exec
&& !cpu_has_ic_fills_f_dc
)) {
671 vaddr
? r4k_blast_dcache_page(addr
) :
672 r4k_blast_dcache_user_page(addr
);
673 if (exec
&& !cpu_icache_snoops_remote_store
)
674 r4k_blast_scache_page(addr
);
677 if (vaddr
&& cpu_has_vtag_icache
&& mm
== current
->active_mm
) {
678 int cpu
= smp_processor_id();
680 if (cpu_context(cpu
, mm
) != 0)
681 drop_mmu_context(mm
, cpu
);
683 vaddr
? r4k_blast_icache_page(addr
) :
684 r4k_blast_icache_user_page(addr
);
691 kunmap_atomic(vaddr
);
695 static void r4k_flush_cache_page(struct vm_area_struct
*vma
,
696 unsigned long addr
, unsigned long pfn
)
698 struct flush_cache_page_args args
;
704 r4k_on_each_cpu(R4K_HIT
, local_r4k_flush_cache_page
, &args
);
707 static inline void local_r4k_flush_data_cache_page(void * addr
)
709 r4k_blast_dcache_page((unsigned long) addr
);
712 static void r4k_flush_data_cache_page(unsigned long addr
)
715 local_r4k_flush_data_cache_page((void *)addr
);
717 r4k_on_each_cpu(R4K_HIT
, local_r4k_flush_data_cache_page
,
721 struct flush_icache_range_args
{
728 static inline void __local_r4k_flush_icache_range(unsigned long start
,
733 if (!cpu_has_ic_fills_f_dc
) {
734 if (type
== R4K_INDEX
||
735 (type
& R4K_INDEX
&& end
- start
>= dcache_size
)) {
738 R4600_HIT_CACHEOP_WAR_IMPL
;
740 protected_blast_dcache_range(start
, end
);
742 blast_dcache_range(start
, end
);
746 if (type
== R4K_INDEX
||
747 (type
& R4K_INDEX
&& end
- start
> icache_size
))
750 switch (boot_cpu_type()) {
752 protected_loongson2_blast_icache_range(start
, end
);
757 protected_blast_icache_range(start
, end
);
759 blast_icache_range(start
, end
);
765 static inline void local_r4k_flush_icache_range(unsigned long start
,
768 __local_r4k_flush_icache_range(start
, end
, R4K_HIT
| R4K_INDEX
, false);
771 static inline void local_r4k_flush_icache_user_range(unsigned long start
,
774 __local_r4k_flush_icache_range(start
, end
, R4K_HIT
| R4K_INDEX
, true);
777 static inline void local_r4k_flush_icache_range_ipi(void *args
)
779 struct flush_icache_range_args
*fir_args
= args
;
780 unsigned long start
= fir_args
->start
;
781 unsigned long end
= fir_args
->end
;
782 unsigned int type
= fir_args
->type
;
783 bool user
= fir_args
->user
;
785 __local_r4k_flush_icache_range(start
, end
, type
, user
);
788 static void __r4k_flush_icache_range(unsigned long start
, unsigned long end
,
791 struct flush_icache_range_args args
;
792 unsigned long size
, cache_size
;
796 args
.type
= R4K_HIT
| R4K_INDEX
;
800 * Indexed cache ops require an SMP call.
801 * Consider if that can or should be avoided.
804 if (r4k_op_needs_ipi(R4K_INDEX
) && !r4k_op_needs_ipi(R4K_HIT
)) {
806 * If address-based cache ops don't require an SMP call, then
807 * use them exclusively for small flushes.
810 cache_size
= icache_size
;
811 if (!cpu_has_ic_fills_f_dc
) {
813 cache_size
+= dcache_size
;
815 if (size
<= cache_size
)
816 args
.type
&= ~R4K_INDEX
;
818 r4k_on_each_cpu(args
.type
, local_r4k_flush_icache_range_ipi
, &args
);
820 instruction_hazard();
823 static void r4k_flush_icache_range(unsigned long start
, unsigned long end
)
825 return __r4k_flush_icache_range(start
, end
, false);
828 static void r4k_flush_icache_user_range(unsigned long start
, unsigned long end
)
830 return __r4k_flush_icache_range(start
, end
, true);
833 #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
835 static void r4k_dma_cache_wback_inv(unsigned long addr
, unsigned long size
)
837 /* Catch bad driver code */
841 if (cpu_has_inclusive_pcaches
) {
842 if (size
>= scache_size
)
845 blast_scache_range(addr
, addr
+ size
);
852 * Either no secondary cache or the available caches don't have the
853 * subset property so we have to flush the primary caches
856 if (size
>= dcache_size
) {
859 R4600_HIT_CACHEOP_WAR_IMPL
;
860 blast_dcache_range(addr
, addr
+ size
);
864 bc_wback_inv(addr
, size
);
868 static void r4k_dma_cache_inv(unsigned long addr
, unsigned long size
)
870 /* Catch bad driver code */
874 if (cpu_has_inclusive_pcaches
) {
875 if (size
>= scache_size
)
879 * There is no clearly documented alignment requirement
880 * for the cache instruction on MIPS processors and
881 * some processors, among them the RM5200 and RM7000
882 * QED processors will throw an address error for cache
883 * hit ops with insufficient alignment. Solved by
884 * aligning the address to cache line size.
886 blast_inv_scache_range(addr
, addr
+ size
);
893 if (size
>= dcache_size
) {
896 R4600_HIT_CACHEOP_WAR_IMPL
;
897 blast_inv_dcache_range(addr
, addr
+ size
);
904 #endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
906 struct flush_cache_sigtramp_args
{
907 struct mm_struct
*mm
;
913 * While we're protected against bad userland addresses we don't care
914 * very much about what happens in that case. Usually a segmentation
915 * fault will dump the process later on anyway ...
917 static void local_r4k_flush_cache_sigtramp(void *args
)
919 struct flush_cache_sigtramp_args
*fcs_args
= args
;
920 unsigned long addr
= fcs_args
->addr
;
921 struct page
*page
= fcs_args
->page
;
922 struct mm_struct
*mm
= fcs_args
->mm
;
923 int map_coherent
= 0;
926 unsigned long ic_lsize
= cpu_icache_line_size();
927 unsigned long dc_lsize
= cpu_dcache_line_size();
928 unsigned long sc_lsize
= cpu_scache_line_size();
931 * If owns no valid ASID yet, cannot possibly have gotten
932 * this page into the cache.
934 if (!has_valid_asid(mm
, R4K_HIT
))
937 if (mm
== current
->active_mm
) {
941 * Use kmap_coherent or kmap_atomic to do flushes for
942 * another ASID than the current one.
944 map_coherent
= (cpu_has_dc_aliases
&&
945 page_mapcount(page
) &&
946 !Page_dcache_dirty(page
));
948 vaddr
= kmap_coherent(page
, addr
);
950 vaddr
= kmap_atomic(page
);
951 addr
= (unsigned long)vaddr
+ (addr
& ~PAGE_MASK
);
954 R4600_HIT_CACHEOP_WAR_IMPL
;
955 if (!cpu_has_ic_fills_f_dc
) {
957 vaddr
? flush_dcache_line(addr
& ~(dc_lsize
- 1))
958 : protected_writeback_dcache_line(
959 addr
& ~(dc_lsize
- 1));
960 if (!cpu_icache_snoops_remote_store
&& scache_size
)
961 vaddr
? flush_scache_line(addr
& ~(sc_lsize
- 1))
962 : protected_writeback_scache_line(
963 addr
& ~(sc_lsize
- 1));
966 vaddr
? flush_icache_line(addr
& ~(ic_lsize
- 1))
967 : protected_flush_icache_line(addr
& ~(ic_lsize
- 1));
973 kunmap_atomic(vaddr
);
976 if (MIPS4K_ICACHE_REFILL_WAR
) {
977 __asm__
__volatile__ (
980 ".set "MIPS_ISA_LEVEL
"\n\t"
992 : "i" (Hit_Invalidate_I
));
994 if (MIPS_CACHE_SYNC_WAR
)
995 __asm__
__volatile__ ("sync");
998 static void r4k_flush_cache_sigtramp(unsigned long addr
)
1000 struct flush_cache_sigtramp_args args
;
1003 down_read(¤t
->mm
->mmap_sem
);
1005 npages
= get_user_pages_fast(addr
, 1, 0, &args
.page
);
1009 args
.mm
= current
->mm
;
1012 r4k_on_each_cpu(R4K_HIT
, local_r4k_flush_cache_sigtramp
, &args
);
1014 put_page(args
.page
);
1016 up_read(¤t
->mm
->mmap_sem
);
1019 static void r4k_flush_icache_all(void)
1021 if (cpu_has_vtag_icache
)
1025 struct flush_kernel_vmap_range_args
{
1026 unsigned long vaddr
;
1030 static inline void local_r4k_flush_kernel_vmap_range_index(void *args
)
1033 * Aliases only affect the primary caches so don't bother with
1034 * S-caches or T-caches.
1039 static inline void local_r4k_flush_kernel_vmap_range(void *args
)
1041 struct flush_kernel_vmap_range_args
*vmra
= args
;
1042 unsigned long vaddr
= vmra
->vaddr
;
1043 int size
= vmra
->size
;
1046 * Aliases only affect the primary caches so don't bother with
1047 * S-caches or T-caches.
1049 R4600_HIT_CACHEOP_WAR_IMPL
;
1050 blast_dcache_range(vaddr
, vaddr
+ size
);
1053 static void r4k_flush_kernel_vmap_range(unsigned long vaddr
, int size
)
1055 struct flush_kernel_vmap_range_args args
;
1057 args
.vaddr
= (unsigned long) vaddr
;
1060 if (size
>= dcache_size
)
1061 r4k_on_each_cpu(R4K_INDEX
,
1062 local_r4k_flush_kernel_vmap_range_index
, NULL
);
1064 r4k_on_each_cpu(R4K_HIT
, local_r4k_flush_kernel_vmap_range
,
1068 static inline void rm7k_erratum31(void)
1070 const unsigned long ic_lsize
= 32;
1073 /* RM7000 erratum #31. The icache is screwed at startup. */
1077 for (addr
= INDEX_BASE
; addr
<= INDEX_BASE
+ 4096; addr
+= ic_lsize
) {
1078 __asm__
__volatile__ (
1080 ".set noreorder\n\t"
1082 "cache\t%1, 0(%0)\n\t"
1083 "cache\t%1, 0x1000(%0)\n\t"
1084 "cache\t%1, 0x2000(%0)\n\t"
1085 "cache\t%1, 0x3000(%0)\n\t"
1086 "cache\t%2, 0(%0)\n\t"
1087 "cache\t%2, 0x1000(%0)\n\t"
1088 "cache\t%2, 0x2000(%0)\n\t"
1089 "cache\t%2, 0x3000(%0)\n\t"
1090 "cache\t%1, 0(%0)\n\t"
1091 "cache\t%1, 0x1000(%0)\n\t"
1092 "cache\t%1, 0x2000(%0)\n\t"
1093 "cache\t%1, 0x3000(%0)\n\t"
1096 : "r" (addr
), "i" (Index_Store_Tag_I
), "i" (Fill
));
1100 static inline int alias_74k_erratum(struct cpuinfo_mips
*c
)
1102 unsigned int imp
= c
->processor_id
& PRID_IMP_MASK
;
1103 unsigned int rev
= c
->processor_id
& PRID_REV_MASK
;
1107 * Early versions of the 74K do not update the cache tags on a
1108 * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
1109 * aliases. In this case it is better to treat the cache as always
1110 * having aliases. Also disable the synonym tag update feature
1111 * where available. In this case no opportunistic tag update will
1112 * happen where a load causes a virtual address miss but a physical
1113 * address hit during a D-cache look-up.
1117 if (rev
<= PRID_REV_ENCODE_332(2, 4, 0))
1119 if (rev
== PRID_REV_ENCODE_332(2, 4, 0))
1120 write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND
);
1122 case PRID_IMP_1074K
:
1123 if (rev
<= PRID_REV_ENCODE_332(1, 1, 0)) {
1125 write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND
);
1135 static void b5k_instruction_hazard(void)
1139 __asm__
__volatile__(
1140 " nop; nop; nop; nop; nop; nop; nop; nop\n"
1141 " nop; nop; nop; nop; nop; nop; nop; nop\n"
1142 " nop; nop; nop; nop; nop; nop; nop; nop\n"
1143 " nop; nop; nop; nop; nop; nop; nop; nop\n"
1147 static char *way_string
[] = { NULL
, "direct mapped", "2-way",
1148 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
1149 "9-way", "10-way", "11-way", "12-way",
1150 "13-way", "14-way", "15-way", "16-way",
1153 static void probe_pcache(void)
1155 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1156 unsigned int config
= read_c0_config();
1157 unsigned int prid
= read_c0_prid();
1158 int has_74k_erratum
= 0;
1159 unsigned long config1
;
1162 switch (current_cpu_type()) {
1163 case CPU_R4600
: /* QED style two way caches? */
1167 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
1168 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
1170 c
->icache
.waybit
= __ffs(icache_size
/2);
1172 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
1173 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
1175 c
->dcache
.waybit
= __ffs(dcache_size
/2);
1177 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
1182 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
1183 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
1185 c
->icache
.waybit
= 0;
1187 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
1188 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
1190 c
->dcache
.waybit
= 0;
1192 c
->options
|= MIPS_CPU_CACHE_CDEX_P
| MIPS_CPU_PREFETCH
;
1196 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
1197 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
1199 c
->icache
.waybit
= 0;
1201 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
1202 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
1204 c
->dcache
.waybit
= 0;
1206 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
1207 c
->options
|= MIPS_CPU_PREFETCH
;
1217 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
1218 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
1220 c
->icache
.waybit
= 0; /* doesn't matter */
1222 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
1223 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
1225 c
->dcache
.waybit
= 0; /* does not matter */
1227 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
1234 icache_size
= 1 << (12 + ((config
& R10K_CONF_IC
) >> 29));
1235 c
->icache
.linesz
= 64;
1237 c
->icache
.waybit
= 0;
1239 dcache_size
= 1 << (12 + ((config
& R10K_CONF_DC
) >> 26));
1240 c
->dcache
.linesz
= 32;
1242 c
->dcache
.waybit
= 0;
1244 c
->options
|= MIPS_CPU_PREFETCH
;
1248 write_c0_config(config
& ~VR41_CONF_P4K
);
1250 /* Workaround for cache instruction bug of VR4131 */
1251 if (c
->processor_id
== 0x0c80U
|| c
->processor_id
== 0x0c81U
||
1252 c
->processor_id
== 0x0c82U
) {
1253 config
|= 0x00400000U
;
1254 if (c
->processor_id
== 0x0c80U
)
1255 config
|= VR41_CONF_BP
;
1256 write_c0_config(config
);
1258 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
1260 icache_size
= 1 << (10 + ((config
& CONF_IC
) >> 9));
1261 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
1263 c
->icache
.waybit
= __ffs(icache_size
/2);
1265 dcache_size
= 1 << (10 + ((config
& CONF_DC
) >> 6));
1266 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
1268 c
->dcache
.waybit
= __ffs(dcache_size
/2);
1277 icache_size
= 1 << (10 + ((config
& CONF_IC
) >> 9));
1278 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
1280 c
->icache
.waybit
= 0; /* doesn't matter */
1282 dcache_size
= 1 << (10 + ((config
& CONF_DC
) >> 6));
1283 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
1285 c
->dcache
.waybit
= 0; /* does not matter */
1287 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
1293 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
1294 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
1296 c
->icache
.waybit
= __ffs(icache_size
/ c
->icache
.ways
);
1298 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
1299 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
1301 c
->dcache
.waybit
= __ffs(dcache_size
/ c
->dcache
.ways
);
1303 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
1304 c
->options
|= MIPS_CPU_PREFETCH
;
1308 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
1309 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
1314 c
->icache
.waybit
= 0;
1316 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
1317 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
1322 c
->dcache
.waybit
= 0;
1326 config1
= read_c0_config1();
1327 lsize
= (config1
>> 19) & 7;
1329 c
->icache
.linesz
= 2 << lsize
;
1331 c
->icache
.linesz
= 0;
1332 c
->icache
.sets
= 64 << ((config1
>> 22) & 7);
1333 c
->icache
.ways
= 1 + ((config1
>> 16) & 7);
1334 icache_size
= c
->icache
.sets
*
1337 c
->icache
.waybit
= 0;
1339 lsize
= (config1
>> 10) & 7;
1341 c
->dcache
.linesz
= 2 << lsize
;
1343 c
->dcache
.linesz
= 0;
1344 c
->dcache
.sets
= 64 << ((config1
>> 13) & 7);
1345 c
->dcache
.ways
= 1 + ((config1
>> 7) & 7);
1346 dcache_size
= c
->dcache
.sets
*
1349 c
->dcache
.waybit
= 0;
1350 if ((prid
& PRID_REV_MASK
) >= PRID_REV_LOONGSON3A_R2
)
1351 c
->options
|= MIPS_CPU_PREFETCH
;
1354 case CPU_CAVIUM_OCTEON3
:
1355 /* For now lie about the number of ways. */
1356 c
->icache
.linesz
= 128;
1357 c
->icache
.sets
= 16;
1359 c
->icache
.flags
|= MIPS_CACHE_VTAG
;
1360 icache_size
= c
->icache
.sets
* c
->icache
.ways
* c
->icache
.linesz
;
1362 c
->dcache
.linesz
= 128;
1365 dcache_size
= c
->dcache
.sets
* c
->dcache
.ways
* c
->dcache
.linesz
;
1366 c
->options
|= MIPS_CPU_PREFETCH
;
1370 if (!(config
& MIPS_CONF_M
))
1371 panic("Don't know how to probe P-caches on this cpu.");
1374 * So we seem to be a MIPS32 or MIPS64 CPU
1375 * So let's probe the I-cache ...
1377 config1
= read_c0_config1();
1379 lsize
= (config1
>> 19) & 7;
1381 /* IL == 7 is reserved */
1383 panic("Invalid icache line size");
1385 c
->icache
.linesz
= lsize
? 2 << lsize
: 0;
1387 c
->icache
.sets
= 32 << (((config1
>> 22) + 1) & 7);
1388 c
->icache
.ways
= 1 + ((config1
>> 16) & 7);
1390 icache_size
= c
->icache
.sets
*
1393 c
->icache
.waybit
= __ffs(icache_size
/c
->icache
.ways
);
1395 if (config
& MIPS_CONF_VI
)
1396 c
->icache
.flags
|= MIPS_CACHE_VTAG
;
1399 * Now probe the MIPS32 / MIPS64 data cache.
1401 c
->dcache
.flags
= 0;
1403 lsize
= (config1
>> 10) & 7;
1405 /* DL == 7 is reserved */
1407 panic("Invalid dcache line size");
1409 c
->dcache
.linesz
= lsize
? 2 << lsize
: 0;
1411 c
->dcache
.sets
= 32 << (((config1
>> 13) + 1) & 7);
1412 c
->dcache
.ways
= 1 + ((config1
>> 7) & 7);
1414 dcache_size
= c
->dcache
.sets
*
1417 c
->dcache
.waybit
= __ffs(dcache_size
/c
->dcache
.ways
);
1419 c
->options
|= MIPS_CPU_PREFETCH
;
1424 * Processor configuration sanity check for the R4000SC erratum
1425 * #5. With page sizes larger than 32kB there is no possibility
1426 * to get a VCE exception anymore so we don't care about this
1427 * misconfiguration. The case is rather theoretical anyway;
1428 * presumably no vendor is shipping his hardware in the "bad"
1431 if ((prid
& PRID_IMP_MASK
) == PRID_IMP_R4000
&&
1432 (prid
& PRID_REV_MASK
) < PRID_REV_R4400
&&
1433 !(config
& CONF_SC
) && c
->icache
.linesz
!= 16 &&
1434 PAGE_SIZE
<= 0x8000)
1435 panic("Improper R4000SC processor configuration detected");
1437 /* compute a couple of other cache variables */
1438 c
->icache
.waysize
= icache_size
/ c
->icache
.ways
;
1439 c
->dcache
.waysize
= dcache_size
/ c
->dcache
.ways
;
1441 c
->icache
.sets
= c
->icache
.linesz
?
1442 icache_size
/ (c
->icache
.linesz
* c
->icache
.ways
) : 0;
1443 c
->dcache
.sets
= c
->dcache
.linesz
?
1444 dcache_size
/ (c
->dcache
.linesz
* c
->dcache
.ways
) : 0;
1447 * R1x000 P-caches are odd in a positive way. They're 32kB 2-way
1448 * virtually indexed so normally would suffer from aliases. So
1449 * normally they'd suffer from aliases but magic in the hardware deals
1450 * with that for us so we don't need to take care ourselves.
1452 switch (current_cpu_type()) {
1460 c
->dcache
.flags
|= MIPS_CACHE_PINDEX
;
1471 has_74k_erratum
= alias_74k_erratum(c
);
1478 case CPU_INTERAPTIV
:
1482 case CPU_QEMU_GENERIC
:
1485 if (!(read_c0_config7() & MIPS_CONF7_IAR
) &&
1486 (c
->icache
.waysize
> PAGE_SIZE
))
1487 c
->icache
.flags
|= MIPS_CACHE_ALIASES
;
1488 if (!has_74k_erratum
&& (read_c0_config7() & MIPS_CONF7_AR
)) {
1490 * Effectively physically indexed dcache,
1491 * thus no virtual aliases.
1493 c
->dcache
.flags
|= MIPS_CACHE_PINDEX
;
1497 if (has_74k_erratum
|| c
->dcache
.waysize
> PAGE_SIZE
)
1498 c
->dcache
.flags
|= MIPS_CACHE_ALIASES
;
1501 /* Physically indexed caches don't suffer from virtual aliasing */
1502 if (c
->dcache
.flags
& MIPS_CACHE_PINDEX
)
1503 c
->dcache
.flags
&= ~MIPS_CACHE_ALIASES
;
1505 switch (current_cpu_type()) {
1508 * Some older 20Kc chips doesn't have the 'VI' bit in
1509 * the config register.
1511 c
->icache
.flags
|= MIPS_CACHE_VTAG
;
1517 c
->icache
.flags
|= MIPS_CACHE_IC_F_DC
;
1521 c
->icache
.flags
|= MIPS_CACHE_IC_F_DC
;
1522 /* Cache aliases are handled in hardware; allow HIGHMEM */
1523 c
->dcache
.flags
&= ~MIPS_CACHE_ALIASES
;
1528 * LOONGSON2 has 4 way icache, but when using indexed cache op,
1529 * one op will act on all 4 ways
1534 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1536 c
->icache
.flags
& MIPS_CACHE_VTAG
? "VIVT" : "VIPT",
1537 way_string
[c
->icache
.ways
], c
->icache
.linesz
);
1539 printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1540 dcache_size
>> 10, way_string
[c
->dcache
.ways
],
1541 (c
->dcache
.flags
& MIPS_CACHE_PINDEX
) ? "PIPT" : "VIPT",
1542 (c
->dcache
.flags
& MIPS_CACHE_ALIASES
) ?
1543 "cache aliases" : "no aliases",
1547 static void probe_vcache(void)
1549 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1550 unsigned int config2
, lsize
;
1552 if (current_cpu_type() != CPU_LOONGSON3
)
1555 config2
= read_c0_config2();
1556 if ((lsize
= ((config2
>> 20) & 15)))
1557 c
->vcache
.linesz
= 2 << lsize
;
1559 c
->vcache
.linesz
= lsize
;
1561 c
->vcache
.sets
= 64 << ((config2
>> 24) & 15);
1562 c
->vcache
.ways
= 1 + ((config2
>> 16) & 15);
1564 vcache_size
= c
->vcache
.sets
* c
->vcache
.ways
* c
->vcache
.linesz
;
1566 c
->vcache
.waybit
= 0;
1567 c
->vcache
.waysize
= vcache_size
/ c
->vcache
.ways
;
1569 pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
1570 vcache_size
>> 10, way_string
[c
->vcache
.ways
], c
->vcache
.linesz
);
1574 * If you even _breathe_ on this function, look at the gcc output and make sure
1575 * it does not pop things on and off the stack for the cache sizing loop that
1576 * executes in KSEG1 space or else you will crash and burn badly. You have
1579 static int probe_scache(void)
1581 unsigned long flags
, addr
, begin
, end
, pow2
;
1582 unsigned int config
= read_c0_config();
1583 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1585 if (config
& CONF_SC
)
1588 begin
= (unsigned long) &_stext
;
1589 begin
&= ~((4 * 1024 * 1024) - 1);
1590 end
= begin
+ (4 * 1024 * 1024);
1593 * This is such a bitch, you'd think they would make it easy to do
1594 * this. Away you daemons of stupidity!
1596 local_irq_save(flags
);
1598 /* Fill each size-multiple cache line with a valid tag. */
1600 for (addr
= begin
; addr
< end
; addr
= (begin
+ pow2
)) {
1601 unsigned long *p
= (unsigned long *) addr
;
1602 __asm__
__volatile__("nop" : : "r" (*p
)); /* whee... */
1606 /* Load first line with zero (therefore invalid) tag. */
1609 __asm__
__volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1610 cache_op(Index_Store_Tag_I
, begin
);
1611 cache_op(Index_Store_Tag_D
, begin
);
1612 cache_op(Index_Store_Tag_SD
, begin
);
1614 /* Now search for the wrap around point. */
1615 pow2
= (128 * 1024);
1616 for (addr
= begin
+ (128 * 1024); addr
< end
; addr
= begin
+ pow2
) {
1617 cache_op(Index_Load_Tag_SD
, addr
);
1618 __asm__
__volatile__("nop; nop; nop; nop;"); /* hazard... */
1619 if (!read_c0_taglo())
1623 local_irq_restore(flags
);
1627 c
->scache
.linesz
= 16 << ((config
& R4K_CONF_SB
) >> 22);
1629 c
->scache
.waybit
= 0; /* does not matter */
1634 static void __init
loongson2_sc_init(void)
1636 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1638 scache_size
= 512*1024;
1639 c
->scache
.linesz
= 32;
1641 c
->scache
.waybit
= 0;
1642 c
->scache
.waysize
= scache_size
/ (c
->scache
.ways
);
1643 c
->scache
.sets
= scache_size
/ (c
->scache
.linesz
* c
->scache
.ways
);
1644 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1645 scache_size
>> 10, way_string
[c
->scache
.ways
], c
->scache
.linesz
);
1647 c
->options
|= MIPS_CPU_INCLUSIVE_CACHES
;
1650 static void __init
loongson3_sc_init(void)
1652 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1653 unsigned int config2
, lsize
;
1655 config2
= read_c0_config2();
1656 lsize
= (config2
>> 4) & 15;
1658 c
->scache
.linesz
= 2 << lsize
;
1660 c
->scache
.linesz
= 0;
1661 c
->scache
.sets
= 64 << ((config2
>> 8) & 15);
1662 c
->scache
.ways
= 1 + (config2
& 15);
1664 scache_size
= c
->scache
.sets
*
1667 /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
1669 c
->scache
.waybit
= 0;
1670 c
->scache
.waysize
= scache_size
/ c
->scache
.ways
;
1671 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1672 scache_size
>> 10, way_string
[c
->scache
.ways
], c
->scache
.linesz
);
1674 c
->options
|= MIPS_CPU_INCLUSIVE_CACHES
;
1678 extern int r5k_sc_init(void);
1679 extern int rm7k_sc_init(void);
1680 extern int mips_sc_init(void);
1682 static void setup_scache(void)
1684 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1685 unsigned int config
= read_c0_config();
1689 * Do the probing thing on R4000SC and R4400SC processors. Other
1690 * processors don't have a S-cache that would be relevant to the
1691 * Linux memory management.
1693 switch (current_cpu_type()) {
1698 sc_present
= run_uncached(probe_scache
);
1700 c
->options
|= MIPS_CPU_CACHE_CDEX_S
;
1707 scache_size
= 0x80000 << ((config
& R10K_CONF_SS
) >> 16);
1708 c
->scache
.linesz
= 64 << ((config
>> 13) & 1);
1710 c
->scache
.waybit
= 0;
1716 #ifdef CONFIG_R5000_CPU_SCACHE
1722 #ifdef CONFIG_RM7000_CPU_SCACHE
1728 loongson2_sc_init();
1732 loongson3_sc_init();
1735 case CPU_CAVIUM_OCTEON3
:
1737 /* don't need to worry about L2, fully coherent */
1741 if (c
->isa_level
& (MIPS_CPU_ISA_M32R1
| MIPS_CPU_ISA_M32R2
|
1742 MIPS_CPU_ISA_M32R6
| MIPS_CPU_ISA_M64R1
|
1743 MIPS_CPU_ISA_M64R2
| MIPS_CPU_ISA_M64R6
)) {
1744 #ifdef CONFIG_MIPS_CPU_SCACHE
1745 if (mips_sc_init ()) {
1746 scache_size
= c
->scache
.ways
* c
->scache
.sets
* c
->scache
.linesz
;
1747 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1749 way_string
[c
->scache
.ways
], c
->scache
.linesz
);
1752 if (!(c
->scache
.flags
& MIPS_CACHE_NOT_PRESENT
))
1753 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1763 /* compute a couple of other cache variables */
1764 c
->scache
.waysize
= scache_size
/ c
->scache
.ways
;
1766 c
->scache
.sets
= scache_size
/ (c
->scache
.linesz
* c
->scache
.ways
);
1768 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1769 scache_size
>> 10, way_string
[c
->scache
.ways
], c
->scache
.linesz
);
1771 c
->options
|= MIPS_CPU_INCLUSIVE_CACHES
;
1774 void au1x00_fixup_config_od(void)
1777 * c0_config.od (bit 19) was write only (and read as 0)
1778 * on the early revisions of Alchemy SOCs. It disables the bus
1779 * transaction overlapping and needs to be set to fix various errata.
1781 switch (read_c0_prid()) {
1782 case 0x00030100: /* Au1000 DA */
1783 case 0x00030201: /* Au1000 HA */
1784 case 0x00030202: /* Au1000 HB */
1785 case 0x01030200: /* Au1500 AB */
1787 * Au1100 errata actually keeps silence about this bit, so we set it
1788 * just in case for those revisions that require it to be set according
1789 * to the (now gone) cpu table.
1791 case 0x02030200: /* Au1100 AB */
1792 case 0x02030201: /* Au1100 BA */
1793 case 0x02030202: /* Au1100 BC */
1794 set_c0_config(1 << 19);
1799 /* CP0 hazard avoidance. */
1800 #define NXP_BARRIER() \
1801 __asm__ __volatile__( \
1802 ".set noreorder\n\t" \
1803 "nop; nop; nop; nop; nop; nop;\n\t" \
1806 static void nxp_pr4450_fixup_config(void)
1808 unsigned long config0
;
1810 config0
= read_c0_config();
1812 /* clear all three cache coherency fields */
1813 config0
&= ~(0x7 | (7 << 25) | (7 << 28));
1814 config0
|= (((_page_cachable_default
>> _CACHE_SHIFT
) << 0) |
1815 ((_page_cachable_default
>> _CACHE_SHIFT
) << 25) |
1816 ((_page_cachable_default
>> _CACHE_SHIFT
) << 28));
1817 write_c0_config(config0
);
1821 static int cca
= -1;
1823 static int __init
cca_setup(char *str
)
1825 get_option(&str
, &cca
);
1830 early_param("cca", cca_setup
);
1832 static void coherency_setup(void)
1834 if (cca
< 0 || cca
> 7)
1835 cca
= read_c0_config() & CONF_CM_CMASK
;
1836 _page_cachable_default
= cca
<< _CACHE_SHIFT
;
1838 pr_debug("Using cache attribute %d\n", cca
);
1839 change_c0_config(CONF_CM_CMASK
, cca
);
1842 * c0_status.cu=0 specifies that updates by the sc instruction use
1843 * the coherency mode specified by the TLB; 1 means cachable
1844 * coherent update on write will be used. Not all processors have
1845 * this bit and; some wire it to zero, others like Toshiba had the
1846 * silly idea of putting something else there ...
1848 switch (current_cpu_type()) {
1855 clear_c0_config(CONF_CU
);
1858 * We need to catch the early Alchemy SOCs with
1859 * the write-only co_config.od bit and set it back to one on:
1860 * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB
1863 au1x00_fixup_config_od();
1866 case PRID_IMP_PR4450
:
1867 nxp_pr4450_fixup_config();
1872 static void r4k_cache_error_setup(void)
1874 extern char __weak except_vec2_generic
;
1875 extern char __weak except_vec2_sb1
;
1877 switch (current_cpu_type()) {
1880 set_uncached_handler(0x100, &except_vec2_sb1
, 0x80);
1884 set_uncached_handler(0x100, &except_vec2_generic
, 0x80);
1889 void r4k_cache_init(void)
1891 extern void build_clear_page(void);
1892 extern void build_copy_page(void);
1893 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1899 r4k_blast_dcache_page_setup();
1900 r4k_blast_dcache_page_indexed_setup();
1901 r4k_blast_dcache_setup();
1902 r4k_blast_icache_page_setup();
1903 r4k_blast_icache_page_indexed_setup();
1904 r4k_blast_icache_setup();
1905 r4k_blast_scache_page_setup();
1906 r4k_blast_scache_page_indexed_setup();
1907 r4k_blast_scache_setup();
1909 r4k_blast_dcache_user_page_setup();
1910 r4k_blast_icache_user_page_setup();
1914 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1915 * This code supports virtually indexed processors and will be
1916 * unnecessarily inefficient on physically indexed processors.
1918 if (c
->dcache
.linesz
&& cpu_has_dc_aliases
)
1919 shm_align_mask
= max_t( unsigned long,
1920 c
->dcache
.sets
* c
->dcache
.linesz
- 1,
1923 shm_align_mask
= PAGE_SIZE
-1;
1925 __flush_cache_vmap
= r4k__flush_cache_vmap
;
1926 __flush_cache_vunmap
= r4k__flush_cache_vunmap
;
1928 flush_cache_all
= cache_noop
;
1929 __flush_cache_all
= r4k___flush_cache_all
;
1930 flush_cache_mm
= r4k_flush_cache_mm
;
1931 flush_cache_page
= r4k_flush_cache_page
;
1932 flush_cache_range
= r4k_flush_cache_range
;
1934 __flush_kernel_vmap_range
= r4k_flush_kernel_vmap_range
;
1936 flush_cache_sigtramp
= r4k_flush_cache_sigtramp
;
1937 flush_icache_all
= r4k_flush_icache_all
;
1938 local_flush_data_cache_page
= local_r4k_flush_data_cache_page
;
1939 flush_data_cache_page
= r4k_flush_data_cache_page
;
1940 flush_icache_range
= r4k_flush_icache_range
;
1941 local_flush_icache_range
= local_r4k_flush_icache_range
;
1942 __flush_icache_user_range
= r4k_flush_icache_user_range
;
1943 __local_flush_icache_user_range
= local_r4k_flush_icache_user_range
;
1945 #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
1946 # if defined(CONFIG_DMA_PERDEV_COHERENT)
1949 if ((coherentio
== IO_COHERENCE_ENABLED
) ||
1950 ((coherentio
== IO_COHERENCE_DEFAULT
) && hw_coherentio
)) {
1952 _dma_cache_wback_inv
= (void *)cache_noop
;
1953 _dma_cache_wback
= (void *)cache_noop
;
1954 _dma_cache_inv
= (void *)cache_noop
;
1956 _dma_cache_wback_inv
= r4k_dma_cache_wback_inv
;
1957 _dma_cache_wback
= r4k_dma_cache_wback_inv
;
1958 _dma_cache_inv
= r4k_dma_cache_inv
;
1966 * We want to run CMP kernels on core with and without coherent
1967 * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
1968 * or not to flush caches.
1970 local_r4k___flush_cache_all(NULL
);
1973 board_cache_error_setup
= r4k_cache_error_setup
;
1978 switch (current_cpu_type()) {
1981 /* No IPI is needed because all CPUs share the same D$ */
1982 flush_data_cache_page
= r4k_blast_dcache_page
;
1985 /* We lose our superpowers if L2 is disabled */
1986 if (c
->scache
.flags
& MIPS_CACHE_NOT_PRESENT
)
1989 /* I$ fills from D$ just by emptying the write buffers */
1990 flush_cache_page
= (void *)b5k_instruction_hazard
;
1991 flush_cache_range
= (void *)b5k_instruction_hazard
;
1992 flush_cache_sigtramp
= (void *)b5k_instruction_hazard
;
1993 local_flush_data_cache_page
= (void *)b5k_instruction_hazard
;
1994 flush_data_cache_page
= (void *)b5k_instruction_hazard
;
1995 flush_icache_range
= (void *)b5k_instruction_hazard
;
1996 local_flush_icache_range
= (void *)b5k_instruction_hazard
;
1999 /* Optimization: an L2 flush implicitly flushes the L1 */
2000 current_cpu_data
.options
|= MIPS_CPU_INCLUSIVE_CACHES
;
2003 /* Loongson-3 maintains cache coherency by hardware */
2004 __flush_cache_all
= cache_noop
;
2005 __flush_cache_vmap
= cache_noop
;
2006 __flush_cache_vunmap
= cache_noop
;
2007 __flush_kernel_vmap_range
= (void *)cache_noop
;
2008 flush_cache_mm
= (void *)cache_noop
;
2009 flush_cache_page
= (void *)cache_noop
;
2010 flush_cache_range
= (void *)cache_noop
;
2011 flush_cache_sigtramp
= (void *)cache_noop
;
2012 flush_icache_all
= (void *)cache_noop
;
2013 flush_data_cache_page
= (void *)cache_noop
;
2014 local_flush_data_cache_page
= (void *)cache_noop
;
2019 static int r4k_cache_pm_notifier(struct notifier_block
*self
, unsigned long cmd
,
2023 case CPU_PM_ENTER_FAILED
:
2032 static struct notifier_block r4k_cache_pm_notifier_block
= {
2033 .notifier_call
= r4k_cache_pm_notifier
,
2036 int __init
r4k_cache_init_pm(void)
2038 return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block
);
2040 arch_initcall(r4k_cache_init_pm
);