2 * Cache control for MicroBlaze cache memories
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
13 #include <asm/cacheflush.h>
14 #include <linux/cache.h>
15 #include <asm/cpuinfo.h>
18 static inline void __enable_icache_msr(void)
20 __asm__
__volatile__ (" msrset r0, %0;" \
22 : : "i" (MSR_ICE
) : "memory");
25 static inline void __disable_icache_msr(void)
27 __asm__
__volatile__ (" msrclr r0, %0;" \
29 : : "i" (MSR_ICE
) : "memory");
32 static inline void __enable_dcache_msr(void)
34 __asm__
__volatile__ (" msrset r0, %0;" \
36 : : "i" (MSR_DCE
) : "memory");
39 static inline void __disable_dcache_msr(void)
41 __asm__
__volatile__ (" msrclr r0, %0;" \
43 : : "i" (MSR_DCE
) : "memory");
46 static inline void __enable_icache_nomsr(void)
48 __asm__
__volatile__ (" mfs r12, rmsr;" \
53 : : "i" (MSR_ICE
) : "memory", "r12");
56 static inline void __disable_icache_nomsr(void)
58 __asm__
__volatile__ (" mfs r12, rmsr;" \
60 "andi r12, r12, ~%0;" \
63 : : "i" (MSR_ICE
) : "memory", "r12");
66 static inline void __enable_dcache_nomsr(void)
68 __asm__
__volatile__ (" mfs r12, rmsr;" \
73 : : "i" (MSR_DCE
) : "memory", "r12");
76 static inline void __disable_dcache_nomsr(void)
78 __asm__
__volatile__ (" mfs r12, rmsr;" \
80 "andi r12, r12, ~%0;" \
83 : : "i" (MSR_DCE
) : "memory", "r12");
87 /* Helper macro for computing the limits of cache range loops
89 * End address can be unaligned which is OK for C implementation.
90 * ASM implementation align it in ASM macros
92 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
94 int align = ~(cache_line_length - 1); \
95 end = min(start + cache_size, end); \
100 * Helper macro to loop over the specified cache_size/line_length and
101 * execute 'op' on that cacheline
103 #define CACHE_ALL_LOOP(cache_size, line_length, op) \
105 unsigned int len = cache_size - line_length; \
106 int step = -line_length; \
107 WARN_ON(step >= 0); \
109 __asm__ __volatile__ (" 1: " #op " %0, r0;" \
112 : : "r" (len), "r" (step) \
116 /* Used for wdc.flush/clear which can use rB for offset which is not possible
117 * to use for simple wdc or wic.
119 * start address is cache aligned
120 * end address is not aligned, if end is aligned then I have to subtract
121 * cacheline length because I can't flush/invalidate the next cacheline.
122 * If is not, I align it because I will flush/invalidate whole line.
124 #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
126 int step = -line_length; \
127 int align = ~(line_length - 1); \
129 end = ((end & align) == end) ? end - line_length : end & align; \
130 count = end - start; \
131 WARN_ON(count < 0); \
133 __asm__ __volatile__ (" 1: " #op " %0, %1;" \
136 : : "r" (start), "r" (count), \
137 "r" (step) : "memory"); \
140 /* It is used only first parameter for OP - for wic, wdc */
141 #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
143 unsigned int volatile temp = 0; \
144 unsigned int align = ~(line_length - 1); \
145 end = ((end & align) == end) ? end - line_length : end & align; \
146 WARN_ON(end < start); \
148 __asm__ __volatile__ (" 1: " #op " %1, r0;" \
152 : : "r" (temp), "r" (start), "r" (end), \
153 "r" (line_length) : "memory"); \
158 static void __flush_icache_range_msr_irq(unsigned long start
, unsigned long end
)
164 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
165 (unsigned int)start
, (unsigned int) end
);
167 CACHE_LOOP_LIMITS(start
, end
,
168 cpuinfo
.icache_line_length
, cpuinfo
.icache_size
);
170 local_irq_save(flags
);
171 __disable_icache_msr();
174 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.icache_line_length
, wic
);
176 for (i
= start
; i
< end
; i
+= cpuinfo
.icache_line_length
)
177 __asm__
__volatile__ ("wic %0, r0;" \
180 __enable_icache_msr();
181 local_irq_restore(flags
);
184 static void __flush_icache_range_nomsr_irq(unsigned long start
,
191 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
192 (unsigned int)start
, (unsigned int) end
);
194 CACHE_LOOP_LIMITS(start
, end
,
195 cpuinfo
.icache_line_length
, cpuinfo
.icache_size
);
197 local_irq_save(flags
);
198 __disable_icache_nomsr();
201 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.icache_line_length
, wic
);
203 for (i
= start
; i
< end
; i
+= cpuinfo
.icache_line_length
)
204 __asm__
__volatile__ ("wic %0, r0;" \
208 __enable_icache_nomsr();
209 local_irq_restore(flags
);
212 static void __flush_icache_range_noirq(unsigned long start
,
218 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
219 (unsigned int)start
, (unsigned int) end
);
221 CACHE_LOOP_LIMITS(start
, end
,
222 cpuinfo
.icache_line_length
, cpuinfo
.icache_size
);
224 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.icache_line_length
, wic
);
226 for (i
= start
; i
< end
; i
+= cpuinfo
.icache_line_length
)
227 __asm__
__volatile__ ("wic %0, r0;" \
232 static void __flush_icache_all_msr_irq(void)
238 pr_debug("%s\n", __func__
);
240 local_irq_save(flags
);
241 __disable_icache_msr();
243 CACHE_ALL_LOOP(cpuinfo
.icache_size
, cpuinfo
.icache_line_length
, wic
);
245 for (i
= 0; i
< cpuinfo
.icache_size
;
246 i
+= cpuinfo
.icache_line_length
)
247 __asm__
__volatile__ ("wic %0, r0;" \
250 __enable_icache_msr();
251 local_irq_restore(flags
);
254 static void __flush_icache_all_nomsr_irq(void)
260 pr_debug("%s\n", __func__
);
262 local_irq_save(flags
);
263 __disable_icache_nomsr();
265 CACHE_ALL_LOOP(cpuinfo
.icache_size
, cpuinfo
.icache_line_length
, wic
);
267 for (i
= 0; i
< cpuinfo
.icache_size
;
268 i
+= cpuinfo
.icache_line_length
)
269 __asm__
__volatile__ ("wic %0, r0;" \
272 __enable_icache_nomsr();
273 local_irq_restore(flags
);
276 static void __flush_icache_all_noirq(void)
281 pr_debug("%s\n", __func__
);
283 CACHE_ALL_LOOP(cpuinfo
.icache_size
, cpuinfo
.icache_line_length
, wic
);
285 for (i
= 0; i
< cpuinfo
.icache_size
;
286 i
+= cpuinfo
.icache_line_length
)
287 __asm__
__volatile__ ("wic %0, r0;" \
292 static void __invalidate_dcache_all_msr_irq(void)
298 pr_debug("%s\n", __func__
);
300 local_irq_save(flags
);
301 __disable_dcache_msr();
303 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
, wdc
);
305 for (i
= 0; i
< cpuinfo
.dcache_size
;
306 i
+= cpuinfo
.dcache_line_length
)
307 __asm__
__volatile__ ("wdc %0, r0;" \
310 __enable_dcache_msr();
311 local_irq_restore(flags
);
314 static void __invalidate_dcache_all_nomsr_irq(void)
320 pr_debug("%s\n", __func__
);
322 local_irq_save(flags
);
323 __disable_dcache_nomsr();
325 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
, wdc
);
327 for (i
= 0; i
< cpuinfo
.dcache_size
;
328 i
+= cpuinfo
.dcache_line_length
)
329 __asm__
__volatile__ ("wdc %0, r0;" \
332 __enable_dcache_nomsr();
333 local_irq_restore(flags
);
336 static void __invalidate_dcache_all_noirq_wt(void)
341 pr_debug("%s\n", __func__
);
343 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
, wdc
);
345 for (i
= 0; i
< cpuinfo
.dcache_size
;
346 i
+= cpuinfo
.dcache_line_length
)
347 __asm__
__volatile__ ("wdc %0, r0;" \
353 * FIXME It is blindly invalidation as is expected
354 * but can't be called on noMMU in microblaze_cache_init below
356 * MS: noMMU kernel won't boot if simple wdc is used
357 * The reason should be that there are discared data which kernel needs
359 static void __invalidate_dcache_all_wb(void)
364 pr_debug("%s\n", __func__
);
366 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
,
369 for (i
= 0; i
< cpuinfo
.dcache_size
;
370 i
+= cpuinfo
.dcache_line_length
)
371 __asm__
__volatile__ ("wdc %0, r0;" \
376 static void __invalidate_dcache_range_wb(unsigned long start
,
382 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
383 (unsigned int)start
, (unsigned int) end
);
385 CACHE_LOOP_LIMITS(start
, end
,
386 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
388 CACHE_RANGE_LOOP_2(start
, end
, cpuinfo
.dcache_line_length
, wdc
.clear
);
390 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
391 __asm__
__volatile__ ("wdc.clear %0, r0;" \
396 static void __invalidate_dcache_range_nomsr_wt(unsigned long start
,
402 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
403 (unsigned int)start
, (unsigned int) end
);
404 CACHE_LOOP_LIMITS(start
, end
,
405 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
408 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.dcache_line_length
, wdc
);
410 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
411 __asm__
__volatile__ ("wdc %0, r0;" \
416 static void __invalidate_dcache_range_msr_irq_wt(unsigned long start
,
423 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
424 (unsigned int)start
, (unsigned int) end
);
425 CACHE_LOOP_LIMITS(start
, end
,
426 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
428 local_irq_save(flags
);
429 __disable_dcache_msr();
432 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.dcache_line_length
, wdc
);
434 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
435 __asm__
__volatile__ ("wdc %0, r0;" \
439 __enable_dcache_msr();
440 local_irq_restore(flags
);
443 static void __invalidate_dcache_range_nomsr_irq(unsigned long start
,
450 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
451 (unsigned int)start
, (unsigned int) end
);
453 CACHE_LOOP_LIMITS(start
, end
,
454 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
456 local_irq_save(flags
);
457 __disable_dcache_nomsr();
460 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.dcache_line_length
, wdc
);
462 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
463 __asm__
__volatile__ ("wdc %0, r0;" \
467 __enable_dcache_nomsr();
468 local_irq_restore(flags
);
471 static void __flush_dcache_all_wb(void)
476 pr_debug("%s\n", __func__
);
478 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
,
481 for (i
= 0; i
< cpuinfo
.dcache_size
;
482 i
+= cpuinfo
.dcache_line_length
)
483 __asm__
__volatile__ ("wdc.flush %0, r0;" \
488 static void __flush_dcache_range_wb(unsigned long start
, unsigned long end
)
493 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
494 (unsigned int)start
, (unsigned int) end
);
496 CACHE_LOOP_LIMITS(start
, end
,
497 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
499 CACHE_RANGE_LOOP_2(start
, end
, cpuinfo
.dcache_line_length
, wdc
.flush
);
501 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
502 __asm__
__volatile__ ("wdc.flush %0, r0;" \
507 /* struct for wb caches and for wt caches */
510 /* new wb cache model */
511 static const struct scache wb_msr
= {
512 .ie
= __enable_icache_msr
,
513 .id
= __disable_icache_msr
,
514 .ifl
= __flush_icache_all_noirq
,
515 .iflr
= __flush_icache_range_noirq
,
516 .iin
= __flush_icache_all_noirq
,
517 .iinr
= __flush_icache_range_noirq
,
518 .de
= __enable_dcache_msr
,
519 .dd
= __disable_dcache_msr
,
520 .dfl
= __flush_dcache_all_wb
,
521 .dflr
= __flush_dcache_range_wb
,
522 .din
= __invalidate_dcache_all_wb
,
523 .dinr
= __invalidate_dcache_range_wb
,
526 /* There is only difference in ie, id, de, dd functions */
527 static const struct scache wb_nomsr
= {
528 .ie
= __enable_icache_nomsr
,
529 .id
= __disable_icache_nomsr
,
530 .ifl
= __flush_icache_all_noirq
,
531 .iflr
= __flush_icache_range_noirq
,
532 .iin
= __flush_icache_all_noirq
,
533 .iinr
= __flush_icache_range_noirq
,
534 .de
= __enable_dcache_nomsr
,
535 .dd
= __disable_dcache_nomsr
,
536 .dfl
= __flush_dcache_all_wb
,
537 .dflr
= __flush_dcache_range_wb
,
538 .din
= __invalidate_dcache_all_wb
,
539 .dinr
= __invalidate_dcache_range_wb
,
542 /* Old wt cache model with disabling irq and turn off cache */
543 static const struct scache wt_msr
= {
544 .ie
= __enable_icache_msr
,
545 .id
= __disable_icache_msr
,
546 .ifl
= __flush_icache_all_msr_irq
,
547 .iflr
= __flush_icache_range_msr_irq
,
548 .iin
= __flush_icache_all_msr_irq
,
549 .iinr
= __flush_icache_range_msr_irq
,
550 .de
= __enable_dcache_msr
,
551 .dd
= __disable_dcache_msr
,
552 .dfl
= __invalidate_dcache_all_msr_irq
,
553 .dflr
= __invalidate_dcache_range_msr_irq_wt
,
554 .din
= __invalidate_dcache_all_msr_irq
,
555 .dinr
= __invalidate_dcache_range_msr_irq_wt
,
558 static const struct scache wt_nomsr
= {
559 .ie
= __enable_icache_nomsr
,
560 .id
= __disable_icache_nomsr
,
561 .ifl
= __flush_icache_all_nomsr_irq
,
562 .iflr
= __flush_icache_range_nomsr_irq
,
563 .iin
= __flush_icache_all_nomsr_irq
,
564 .iinr
= __flush_icache_range_nomsr_irq
,
565 .de
= __enable_dcache_nomsr
,
566 .dd
= __disable_dcache_nomsr
,
567 .dfl
= __invalidate_dcache_all_nomsr_irq
,
568 .dflr
= __invalidate_dcache_range_nomsr_irq
,
569 .din
= __invalidate_dcache_all_nomsr_irq
,
570 .dinr
= __invalidate_dcache_range_nomsr_irq
,
573 /* New wt cache model for newer Microblaze versions */
574 static const struct scache wt_msr_noirq
= {
575 .ie
= __enable_icache_msr
,
576 .id
= __disable_icache_msr
,
577 .ifl
= __flush_icache_all_noirq
,
578 .iflr
= __flush_icache_range_noirq
,
579 .iin
= __flush_icache_all_noirq
,
580 .iinr
= __flush_icache_range_noirq
,
581 .de
= __enable_dcache_msr
,
582 .dd
= __disable_dcache_msr
,
583 .dfl
= __invalidate_dcache_all_noirq_wt
,
584 .dflr
= __invalidate_dcache_range_nomsr_wt
,
585 .din
= __invalidate_dcache_all_noirq_wt
,
586 .dinr
= __invalidate_dcache_range_nomsr_wt
,
589 static const struct scache wt_nomsr_noirq
= {
590 .ie
= __enable_icache_nomsr
,
591 .id
= __disable_icache_nomsr
,
592 .ifl
= __flush_icache_all_noirq
,
593 .iflr
= __flush_icache_range_noirq
,
594 .iin
= __flush_icache_all_noirq
,
595 .iinr
= __flush_icache_range_noirq
,
596 .de
= __enable_dcache_nomsr
,
597 .dd
= __disable_dcache_nomsr
,
598 .dfl
= __invalidate_dcache_all_noirq_wt
,
599 .dflr
= __invalidate_dcache_range_nomsr_wt
,
600 .din
= __invalidate_dcache_all_noirq_wt
,
601 .dinr
= __invalidate_dcache_range_nomsr_wt
,
604 /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
605 #define CPUVER_7_20_A 0x0c
606 #define CPUVER_7_20_D 0x0f
608 void microblaze_cache_init(void)
610 if (cpuinfo
.use_instr
& PVR2_USE_MSR_INSTR
) {
611 if (cpuinfo
.dcache_wb
) {
613 mbc
= (struct scache
*)&wb_msr
;
614 if (cpuinfo
.ver_code
<= CPUVER_7_20_D
) {
615 /* MS: problem with signal handling - hw bug */
616 pr_info("WB won't work properly\n");
619 if (cpuinfo
.ver_code
>= CPUVER_7_20_A
) {
620 pr_info("wt_msr_noirq\n");
621 mbc
= (struct scache
*)&wt_msr_noirq
;
624 mbc
= (struct scache
*)&wt_msr
;
628 if (cpuinfo
.dcache_wb
) {
629 pr_info("wb_nomsr\n");
630 mbc
= (struct scache
*)&wb_nomsr
;
631 if (cpuinfo
.ver_code
<= CPUVER_7_20_D
) {
632 /* MS: problem with signal handling - hw bug */
633 pr_info("WB won't work properly\n");
636 if (cpuinfo
.ver_code
>= CPUVER_7_20_A
) {
637 pr_info("wt_nomsr_noirq\n");
638 mbc
= (struct scache
*)&wt_nomsr_noirq
;
640 pr_info("wt_nomsr\n");
641 mbc
= (struct scache
*)&wt_nomsr
;
646 * FIXME Invalidation is done in U-BOOT
647 * WT cache: Data is already written to main memory
648 * WB cache: Discard data on noMMU which caused that kernel doesn't boot
650 /* invalidate_dcache(); */