2 * Cache control for MicroBlaze cache memories
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
13 #include <asm/cacheflush.h>
14 #include <linux/cache.h>
15 #include <asm/cpuinfo.h>
18 static inline void __enable_icache_msr(void)
20 __asm__
__volatile__ (" msrset r0, %0; \
22 : : "i" (MSR_ICE
) : "memory");
25 static inline void __disable_icache_msr(void)
27 __asm__
__volatile__ (" msrclr r0, %0; \
29 : : "i" (MSR_ICE
) : "memory");
32 static inline void __enable_dcache_msr(void)
34 __asm__
__volatile__ (" msrset r0, %0; \
41 static inline void __disable_dcache_msr(void)
43 __asm__
__volatile__ (" msrclr r0, %0; \
50 static inline void __enable_icache_nomsr(void)
52 __asm__
__volatile__ (" mfs r12, rmsr; \
62 static inline void __disable_icache_nomsr(void)
64 __asm__
__volatile__ (" mfs r12, rmsr; \
74 static inline void __enable_dcache_nomsr(void)
76 __asm__
__volatile__ (" mfs r12, rmsr; \
86 static inline void __disable_dcache_nomsr(void)
88 __asm__
__volatile__ (" mfs r12, rmsr; \
99 /* Helper macro for computing the limits of cache range loops
101 * End address can be unaligned which is OK for C implementation.
102 * ASM implementation align it in ASM macros
104 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
106 int align = ~(cache_line_length - 1); \
107 end = min(start + cache_size, end); \
112 * Helper macro to loop over the specified cache_size/line_length and
113 * execute 'op' on that cacheline
115 #define CACHE_ALL_LOOP(cache_size, line_length, op) \
117 unsigned int len = cache_size - line_length; \
118 int step = -line_length; \
119 WARN_ON(step >= 0); \
121 __asm__ __volatile__ (" 1: " #op " %0, r0; \
124 " : : "r" (len), "r" (step) \
128 /* Used for wdc.flush/clear which can use rB for offset which is not possible
129 * to use for simple wdc or wic.
131 * start address is cache aligned
132 * end address is not aligned, if end is aligned then I have to subtract
133 * cacheline length because I can't flush/invalidate the next cacheline.
134 * If is not, I align it because I will flush/invalidate whole line.
136 #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
138 int step = -line_length; \
139 int align = ~(line_length - 1); \
141 end = ((end & align) == end) ? end - line_length : end & align; \
142 count = end - start; \
143 WARN_ON(count < 0); \
145 __asm__ __volatile__ (" 1: " #op " %0, %1; \
148 " : : "r" (start), "r" (count), \
149 "r" (step) : "memory"); \
152 /* It is used only first parameter for OP - for wic, wdc */
153 #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
156 int align = ~(line_length - 1); \
157 end = ((end & align) == end) ? end - line_length : end & align; \
158 WARN_ON(end - start < 0); \
160 __asm__ __volatile__ (" 1: " #op " %1, r0; \
164 " : : "r" (temp), "r" (start), "r" (end),\
165 "r" (line_length) : "memory"); \
170 static void __flush_icache_range_msr_irq(unsigned long start
, unsigned long end
)
176 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
177 (unsigned int)start
, (unsigned int) end
);
179 CACHE_LOOP_LIMITS(start
, end
,
180 cpuinfo
.icache_line_length
, cpuinfo
.icache_size
);
182 local_irq_save(flags
);
183 __disable_icache_msr();
186 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.icache_line_length
, wic
);
188 for (i
= start
; i
< end
; i
+= cpuinfo
.icache_line_length
)
189 __asm__
__volatile__ ("wic %0, r0;" \
192 __enable_icache_msr();
193 local_irq_restore(flags
);
196 static void __flush_icache_range_nomsr_irq(unsigned long start
,
203 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
204 (unsigned int)start
, (unsigned int) end
);
206 CACHE_LOOP_LIMITS(start
, end
,
207 cpuinfo
.icache_line_length
, cpuinfo
.icache_size
);
209 local_irq_save(flags
);
210 __disable_icache_nomsr();
213 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.icache_line_length
, wic
);
215 for (i
= start
; i
< end
; i
+= cpuinfo
.icache_line_length
)
216 __asm__
__volatile__ ("wic %0, r0;" \
220 __enable_icache_nomsr();
221 local_irq_restore(flags
);
224 static void __flush_icache_range_noirq(unsigned long start
,
230 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
231 (unsigned int)start
, (unsigned int) end
);
233 CACHE_LOOP_LIMITS(start
, end
,
234 cpuinfo
.icache_line_length
, cpuinfo
.icache_size
);
236 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.icache_line_length
, wic
);
238 for (i
= start
; i
< end
; i
+= cpuinfo
.icache_line_length
)
239 __asm__
__volatile__ ("wic %0, r0;" \
244 static void __flush_icache_all_msr_irq(void)
250 pr_debug("%s\n", __func__
);
252 local_irq_save(flags
);
253 __disable_icache_msr();
255 CACHE_ALL_LOOP(cpuinfo
.icache_size
, cpuinfo
.icache_line_length
, wic
);
257 for (i
= 0; i
< cpuinfo
.icache_size
;
258 i
+= cpuinfo
.icache_line_length
)
259 __asm__
__volatile__ ("wic %0, r0;" \
262 __enable_icache_msr();
263 local_irq_restore(flags
);
266 static void __flush_icache_all_nomsr_irq(void)
272 pr_debug("%s\n", __func__
);
274 local_irq_save(flags
);
275 __disable_icache_nomsr();
277 CACHE_ALL_LOOP(cpuinfo
.icache_size
, cpuinfo
.icache_line_length
, wic
);
279 for (i
= 0; i
< cpuinfo
.icache_size
;
280 i
+= cpuinfo
.icache_line_length
)
281 __asm__
__volatile__ ("wic %0, r0;" \
284 __enable_icache_nomsr();
285 local_irq_restore(flags
);
288 static void __flush_icache_all_noirq(void)
293 pr_debug("%s\n", __func__
);
295 CACHE_ALL_LOOP(cpuinfo
.icache_size
, cpuinfo
.icache_line_length
, wic
);
297 for (i
= 0; i
< cpuinfo
.icache_size
;
298 i
+= cpuinfo
.icache_line_length
)
299 __asm__
__volatile__ ("wic %0, r0;" \
304 static void __invalidate_dcache_all_msr_irq(void)
310 pr_debug("%s\n", __func__
);
312 local_irq_save(flags
);
313 __disable_dcache_msr();
315 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
, wdc
);
317 for (i
= 0; i
< cpuinfo
.dcache_size
;
318 i
+= cpuinfo
.dcache_line_length
)
319 __asm__
__volatile__ ("wdc %0, r0;" \
322 __enable_dcache_msr();
323 local_irq_restore(flags
);
326 static void __invalidate_dcache_all_nomsr_irq(void)
332 pr_debug("%s\n", __func__
);
334 local_irq_save(flags
);
335 __disable_dcache_nomsr();
337 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
, wdc
);
339 for (i
= 0; i
< cpuinfo
.dcache_size
;
340 i
+= cpuinfo
.dcache_line_length
)
341 __asm__
__volatile__ ("wdc %0, r0;" \
344 __enable_dcache_nomsr();
345 local_irq_restore(flags
);
348 static void __invalidate_dcache_all_noirq_wt(void)
353 pr_debug("%s\n", __func__
);
355 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
, wdc
)
357 for (i
= 0; i
< cpuinfo
.dcache_size
;
358 i
+= cpuinfo
.dcache_line_length
)
359 __asm__
__volatile__ ("wdc %0, r0;" \
364 /* FIXME It is blindly invalidation as is expected
365 * but can't be called on noMMU in microblaze_cache_init below
367 * MS: noMMU kernel won't boot if simple wdc is used
368 * The reason should be that there are discared data which kernel needs
370 static void __invalidate_dcache_all_wb(void)
375 pr_debug("%s\n", __func__
);
377 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
,
380 for (i
= 0; i
< cpuinfo
.dcache_size
;
381 i
+= cpuinfo
.dcache_line_length
)
382 __asm__
__volatile__ ("wdc %0, r0;" \
387 static void __invalidate_dcache_range_wb(unsigned long start
,
393 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
394 (unsigned int)start
, (unsigned int) end
);
396 CACHE_LOOP_LIMITS(start
, end
,
397 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
399 CACHE_RANGE_LOOP_2(start
, end
, cpuinfo
.dcache_line_length
, wdc
.clear
);
401 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
402 __asm__
__volatile__ ("wdc.clear %0, r0;" \
407 static void __invalidate_dcache_range_nomsr_wt(unsigned long start
,
413 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
414 (unsigned int)start
, (unsigned int) end
);
415 CACHE_LOOP_LIMITS(start
, end
,
416 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
419 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.dcache_line_length
, wdc
);
421 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
422 __asm__
__volatile__ ("wdc %0, r0;" \
427 static void __invalidate_dcache_range_msr_irq_wt(unsigned long start
,
434 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
435 (unsigned int)start
, (unsigned int) end
);
436 CACHE_LOOP_LIMITS(start
, end
,
437 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
439 local_irq_save(flags
);
440 __disable_dcache_msr();
443 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.dcache_line_length
, wdc
);
445 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
446 __asm__
__volatile__ ("wdc %0, r0;" \
450 __enable_dcache_msr();
451 local_irq_restore(flags
);
454 static void __invalidate_dcache_range_nomsr_irq(unsigned long start
,
461 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
462 (unsigned int)start
, (unsigned int) end
);
464 CACHE_LOOP_LIMITS(start
, end
,
465 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
467 local_irq_save(flags
);
468 __disable_dcache_nomsr();
471 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.dcache_line_length
, wdc
);
473 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
474 __asm__
__volatile__ ("wdc %0, r0;" \
478 __enable_dcache_nomsr();
479 local_irq_restore(flags
);
482 static void __flush_dcache_all_wb(void)
487 pr_debug("%s\n", __func__
);
489 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
,
492 for (i
= 0; i
< cpuinfo
.dcache_size
;
493 i
+= cpuinfo
.dcache_line_length
)
494 __asm__
__volatile__ ("wdc.flush %0, r0;" \
499 static void __flush_dcache_range_wb(unsigned long start
, unsigned long end
)
504 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
505 (unsigned int)start
, (unsigned int) end
);
507 CACHE_LOOP_LIMITS(start
, end
,
508 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
510 CACHE_RANGE_LOOP_2(start
, end
, cpuinfo
.dcache_line_length
, wdc
.flush
);
512 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
513 __asm__
__volatile__ ("wdc.flush %0, r0;" \
518 /* struct for wb caches and for wt caches */
521 /* new wb cache model */
522 static const struct scache wb_msr
= {
523 .ie
= __enable_icache_msr
,
524 .id
= __disable_icache_msr
,
525 .ifl
= __flush_icache_all_noirq
,
526 .iflr
= __flush_icache_range_noirq
,
527 .iin
= __flush_icache_all_noirq
,
528 .iinr
= __flush_icache_range_noirq
,
529 .de
= __enable_dcache_msr
,
530 .dd
= __disable_dcache_msr
,
531 .dfl
= __flush_dcache_all_wb
,
532 .dflr
= __flush_dcache_range_wb
,
533 .din
= __invalidate_dcache_all_wb
,
534 .dinr
= __invalidate_dcache_range_wb
,
537 /* There is only difference in ie, id, de, dd functions */
538 static const struct scache wb_nomsr
= {
539 .ie
= __enable_icache_nomsr
,
540 .id
= __disable_icache_nomsr
,
541 .ifl
= __flush_icache_all_noirq
,
542 .iflr
= __flush_icache_range_noirq
,
543 .iin
= __flush_icache_all_noirq
,
544 .iinr
= __flush_icache_range_noirq
,
545 .de
= __enable_dcache_nomsr
,
546 .dd
= __disable_dcache_nomsr
,
547 .dfl
= __flush_dcache_all_wb
,
548 .dflr
= __flush_dcache_range_wb
,
549 .din
= __invalidate_dcache_all_wb
,
550 .dinr
= __invalidate_dcache_range_wb
,
553 /* Old wt cache model with disabling irq and turn off cache */
554 static const struct scache wt_msr
= {
555 .ie
= __enable_icache_msr
,
556 .id
= __disable_icache_msr
,
557 .ifl
= __flush_icache_all_msr_irq
,
558 .iflr
= __flush_icache_range_msr_irq
,
559 .iin
= __flush_icache_all_msr_irq
,
560 .iinr
= __flush_icache_range_msr_irq
,
561 .de
= __enable_dcache_msr
,
562 .dd
= __disable_dcache_msr
,
563 .dfl
= __invalidate_dcache_all_msr_irq
,
564 .dflr
= __invalidate_dcache_range_msr_irq_wt
,
565 .din
= __invalidate_dcache_all_msr_irq
,
566 .dinr
= __invalidate_dcache_range_msr_irq_wt
,
569 static const struct scache wt_nomsr
= {
570 .ie
= __enable_icache_nomsr
,
571 .id
= __disable_icache_nomsr
,
572 .ifl
= __flush_icache_all_nomsr_irq
,
573 .iflr
= __flush_icache_range_nomsr_irq
,
574 .iin
= __flush_icache_all_nomsr_irq
,
575 .iinr
= __flush_icache_range_nomsr_irq
,
576 .de
= __enable_dcache_nomsr
,
577 .dd
= __disable_dcache_nomsr
,
578 .dfl
= __invalidate_dcache_all_nomsr_irq
,
579 .dflr
= __invalidate_dcache_range_nomsr_irq
,
580 .din
= __invalidate_dcache_all_nomsr_irq
,
581 .dinr
= __invalidate_dcache_range_nomsr_irq
,
584 /* New wt cache model for newer Microblaze versions */
585 static const struct scache wt_msr_noirq
= {
586 .ie
= __enable_icache_msr
,
587 .id
= __disable_icache_msr
,
588 .ifl
= __flush_icache_all_noirq
,
589 .iflr
= __flush_icache_range_noirq
,
590 .iin
= __flush_icache_all_noirq
,
591 .iinr
= __flush_icache_range_noirq
,
592 .de
= __enable_dcache_msr
,
593 .dd
= __disable_dcache_msr
,
594 .dfl
= __invalidate_dcache_all_noirq_wt
,
595 .dflr
= __invalidate_dcache_range_nomsr_wt
,
596 .din
= __invalidate_dcache_all_noirq_wt
,
597 .dinr
= __invalidate_dcache_range_nomsr_wt
,
600 static const struct scache wt_nomsr_noirq
= {
601 .ie
= __enable_icache_nomsr
,
602 .id
= __disable_icache_nomsr
,
603 .ifl
= __flush_icache_all_noirq
,
604 .iflr
= __flush_icache_range_noirq
,
605 .iin
= __flush_icache_all_noirq
,
606 .iinr
= __flush_icache_range_noirq
,
607 .de
= __enable_dcache_nomsr
,
608 .dd
= __disable_dcache_nomsr
,
609 .dfl
= __invalidate_dcache_all_noirq_wt
,
610 .dflr
= __invalidate_dcache_range_nomsr_wt
,
611 .din
= __invalidate_dcache_all_noirq_wt
,
612 .dinr
= __invalidate_dcache_range_nomsr_wt
,
615 /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
616 #define CPUVER_7_20_A 0x0c
617 #define CPUVER_7_20_D 0x0f
619 #define INFO(s) printk(KERN_INFO "cache: " s "\n");
621 void microblaze_cache_init(void)
623 if (cpuinfo
.use_instr
& PVR2_USE_MSR_INSTR
) {
624 if (cpuinfo
.dcache_wb
) {
626 mbc
= (struct scache
*)&wb_msr
;
627 if (cpuinfo
.ver_code
<= CPUVER_7_20_D
) {
628 /* MS: problem with signal handling - hw bug */
629 INFO("WB won't work properly");
632 if (cpuinfo
.ver_code
>= CPUVER_7_20_A
) {
633 INFO("wt_msr_noirq");
634 mbc
= (struct scache
*)&wt_msr_noirq
;
637 mbc
= (struct scache
*)&wt_msr
;
641 if (cpuinfo
.dcache_wb
) {
643 mbc
= (struct scache
*)&wb_nomsr
;
644 if (cpuinfo
.ver_code
<= CPUVER_7_20_D
) {
645 /* MS: problem with signal handling - hw bug */
646 INFO("WB won't work properly");
649 if (cpuinfo
.ver_code
>= CPUVER_7_20_A
) {
650 INFO("wt_nomsr_noirq");
651 mbc
= (struct scache
*)&wt_nomsr_noirq
;
654 mbc
= (struct scache
*)&wt_nomsr
;
658 /* FIXME Invalidation is done in U-BOOT
659 * WT cache: Data is already written to main memory
660 * WB cache: Discard data on noMMU which caused that kernel doesn't boot
662 /* invalidate_dcache(); */