2 * Cache control for MicroBlaze cache memories
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
13 #include <asm/cacheflush.h>
14 #include <linux/cache.h>
15 #include <asm/cpuinfo.h>
18 static inline void __enable_icache_msr(void)
20 __asm__
__volatile__ (" msrset r0, %0;" \
22 : : "i" (MSR_ICE
) : "memory");
25 static inline void __disable_icache_msr(void)
27 __asm__
__volatile__ (" msrclr r0, %0;" \
29 : : "i" (MSR_ICE
) : "memory");
32 static inline void __enable_dcache_msr(void)
34 __asm__
__volatile__ (" msrset r0, %0;" \
36 : : "i" (MSR_DCE
) : "memory");
39 static inline void __disable_dcache_msr(void)
41 __asm__
__volatile__ (" msrclr r0, %0;" \
43 : : "i" (MSR_DCE
) : "memory");
46 static inline void __enable_icache_nomsr(void)
48 __asm__
__volatile__ (" mfs r12, rmsr;" \
53 : : "i" (MSR_ICE
) : "memory", "r12");
56 static inline void __disable_icache_nomsr(void)
58 __asm__
__volatile__ (" mfs r12, rmsr;" \
60 "andi r12, r12, ~%0;" \
63 : : "i" (MSR_ICE
) : "memory", "r12");
66 static inline void __enable_dcache_nomsr(void)
68 __asm__
__volatile__ (" mfs r12, rmsr;" \
73 : : "i" (MSR_DCE
) : "memory", "r12");
76 static inline void __disable_dcache_nomsr(void)
78 __asm__
__volatile__ (" mfs r12, rmsr;" \
80 "andi r12, r12, ~%0;" \
83 : : "i" (MSR_DCE
) : "memory", "r12");
87 /* Helper macro for computing the limits of cache range loops
89 * End address can be unaligned which is OK for C implementation.
90 * ASM implementation align it in ASM macros
92 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
94 int align = ~(cache_line_length - 1); \
95 if (start < UINT_MAX - cache_size) \
96 end = min(start + cache_size, end); \
101 * Helper macro to loop over the specified cache_size/line_length and
102 * execute 'op' on that cacheline
104 #define CACHE_ALL_LOOP(cache_size, line_length, op) \
106 unsigned int len = cache_size - line_length; \
107 int step = -line_length; \
108 WARN_ON(step >= 0); \
110 __asm__ __volatile__ (" 1: " #op " %0, r0;" \
113 : : "r" (len), "r" (step) \
117 /* Used for wdc.flush/clear which can use rB for offset which is not possible
118 * to use for simple wdc or wic.
120 * start address is cache aligned
121 * end address is not aligned, if end is aligned then I have to subtract
122 * cacheline length because I can't flush/invalidate the next cacheline.
123 * If is not, I align it because I will flush/invalidate whole line.
125 #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
127 int step = -line_length; \
128 int align = ~(line_length - 1); \
130 end = ((end & align) == end) ? end - line_length : end & align; \
131 count = end - start; \
132 WARN_ON(count < 0); \
134 __asm__ __volatile__ (" 1: " #op " %0, %1;" \
137 : : "r" (start), "r" (count), \
138 "r" (step) : "memory"); \
141 /* It is used only first parameter for OP - for wic, wdc */
142 #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
144 unsigned int volatile temp = 0; \
145 unsigned int align = ~(line_length - 1); \
146 end = ((end & align) == end) ? end - line_length : end & align; \
147 WARN_ON(end < start); \
149 __asm__ __volatile__ (" 1: " #op " %1, r0;" \
153 : : "r" (temp), "r" (start), "r" (end), \
154 "r" (line_length) : "memory"); \
159 static void __flush_icache_range_msr_irq(unsigned long start
, unsigned long end
)
165 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
166 (unsigned int)start
, (unsigned int) end
);
168 CACHE_LOOP_LIMITS(start
, end
,
169 cpuinfo
.icache_line_length
, cpuinfo
.icache_size
);
171 local_irq_save(flags
);
172 __disable_icache_msr();
175 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.icache_line_length
, wic
);
177 for (i
= start
; i
< end
; i
+= cpuinfo
.icache_line_length
)
178 __asm__
__volatile__ ("wic %0, r0;" \
181 __enable_icache_msr();
182 local_irq_restore(flags
);
185 static void __flush_icache_range_nomsr_irq(unsigned long start
,
192 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
193 (unsigned int)start
, (unsigned int) end
);
195 CACHE_LOOP_LIMITS(start
, end
,
196 cpuinfo
.icache_line_length
, cpuinfo
.icache_size
);
198 local_irq_save(flags
);
199 __disable_icache_nomsr();
202 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.icache_line_length
, wic
);
204 for (i
= start
; i
< end
; i
+= cpuinfo
.icache_line_length
)
205 __asm__
__volatile__ ("wic %0, r0;" \
209 __enable_icache_nomsr();
210 local_irq_restore(flags
);
213 static void __flush_icache_range_noirq(unsigned long start
,
219 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
220 (unsigned int)start
, (unsigned int) end
);
222 CACHE_LOOP_LIMITS(start
, end
,
223 cpuinfo
.icache_line_length
, cpuinfo
.icache_size
);
225 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.icache_line_length
, wic
);
227 for (i
= start
; i
< end
; i
+= cpuinfo
.icache_line_length
)
228 __asm__
__volatile__ ("wic %0, r0;" \
233 static void __flush_icache_all_msr_irq(void)
239 pr_debug("%s\n", __func__
);
241 local_irq_save(flags
);
242 __disable_icache_msr();
244 CACHE_ALL_LOOP(cpuinfo
.icache_size
, cpuinfo
.icache_line_length
, wic
);
246 for (i
= 0; i
< cpuinfo
.icache_size
;
247 i
+= cpuinfo
.icache_line_length
)
248 __asm__
__volatile__ ("wic %0, r0;" \
251 __enable_icache_msr();
252 local_irq_restore(flags
);
255 static void __flush_icache_all_nomsr_irq(void)
261 pr_debug("%s\n", __func__
);
263 local_irq_save(flags
);
264 __disable_icache_nomsr();
266 CACHE_ALL_LOOP(cpuinfo
.icache_size
, cpuinfo
.icache_line_length
, wic
);
268 for (i
= 0; i
< cpuinfo
.icache_size
;
269 i
+= cpuinfo
.icache_line_length
)
270 __asm__
__volatile__ ("wic %0, r0;" \
273 __enable_icache_nomsr();
274 local_irq_restore(flags
);
277 static void __flush_icache_all_noirq(void)
282 pr_debug("%s\n", __func__
);
284 CACHE_ALL_LOOP(cpuinfo
.icache_size
, cpuinfo
.icache_line_length
, wic
);
286 for (i
= 0; i
< cpuinfo
.icache_size
;
287 i
+= cpuinfo
.icache_line_length
)
288 __asm__
__volatile__ ("wic %0, r0;" \
293 static void __invalidate_dcache_all_msr_irq(void)
299 pr_debug("%s\n", __func__
);
301 local_irq_save(flags
);
302 __disable_dcache_msr();
304 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
, wdc
);
306 for (i
= 0; i
< cpuinfo
.dcache_size
;
307 i
+= cpuinfo
.dcache_line_length
)
308 __asm__
__volatile__ ("wdc %0, r0;" \
311 __enable_dcache_msr();
312 local_irq_restore(flags
);
315 static void __invalidate_dcache_all_nomsr_irq(void)
321 pr_debug("%s\n", __func__
);
323 local_irq_save(flags
);
324 __disable_dcache_nomsr();
326 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
, wdc
);
328 for (i
= 0; i
< cpuinfo
.dcache_size
;
329 i
+= cpuinfo
.dcache_line_length
)
330 __asm__
__volatile__ ("wdc %0, r0;" \
333 __enable_dcache_nomsr();
334 local_irq_restore(flags
);
337 static void __invalidate_dcache_all_noirq_wt(void)
342 pr_debug("%s\n", __func__
);
344 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
, wdc
);
346 for (i
= 0; i
< cpuinfo
.dcache_size
;
347 i
+= cpuinfo
.dcache_line_length
)
348 __asm__
__volatile__ ("wdc %0, r0;" \
354 * FIXME It is blindly invalidation as is expected
355 * but can't be called on noMMU in microblaze_cache_init below
357 * MS: noMMU kernel won't boot if simple wdc is used
358 * The reason should be that there are discared data which kernel needs
360 static void __invalidate_dcache_all_wb(void)
365 pr_debug("%s\n", __func__
);
367 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
,
370 for (i
= 0; i
< cpuinfo
.dcache_size
;
371 i
+= cpuinfo
.dcache_line_length
)
372 __asm__
__volatile__ ("wdc %0, r0;" \
377 static void __invalidate_dcache_range_wb(unsigned long start
,
383 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
384 (unsigned int)start
, (unsigned int) end
);
386 CACHE_LOOP_LIMITS(start
, end
,
387 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
389 CACHE_RANGE_LOOP_2(start
, end
, cpuinfo
.dcache_line_length
, wdc
.clear
);
391 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
392 __asm__
__volatile__ ("wdc.clear %0, r0;" \
397 static void __invalidate_dcache_range_nomsr_wt(unsigned long start
,
403 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
404 (unsigned int)start
, (unsigned int) end
);
405 CACHE_LOOP_LIMITS(start
, end
,
406 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
409 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.dcache_line_length
, wdc
);
411 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
412 __asm__
__volatile__ ("wdc %0, r0;" \
417 static void __invalidate_dcache_range_msr_irq_wt(unsigned long start
,
424 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
425 (unsigned int)start
, (unsigned int) end
);
426 CACHE_LOOP_LIMITS(start
, end
,
427 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
429 local_irq_save(flags
);
430 __disable_dcache_msr();
433 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.dcache_line_length
, wdc
);
435 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
436 __asm__
__volatile__ ("wdc %0, r0;" \
440 __enable_dcache_msr();
441 local_irq_restore(flags
);
444 static void __invalidate_dcache_range_nomsr_irq(unsigned long start
,
451 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
452 (unsigned int)start
, (unsigned int) end
);
454 CACHE_LOOP_LIMITS(start
, end
,
455 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
457 local_irq_save(flags
);
458 __disable_dcache_nomsr();
461 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.dcache_line_length
, wdc
);
463 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
464 __asm__
__volatile__ ("wdc %0, r0;" \
468 __enable_dcache_nomsr();
469 local_irq_restore(flags
);
472 static void __flush_dcache_all_wb(void)
477 pr_debug("%s\n", __func__
);
479 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
,
482 for (i
= 0; i
< cpuinfo
.dcache_size
;
483 i
+= cpuinfo
.dcache_line_length
)
484 __asm__
__volatile__ ("wdc.flush %0, r0;" \
489 static void __flush_dcache_range_wb(unsigned long start
, unsigned long end
)
494 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
495 (unsigned int)start
, (unsigned int) end
);
497 CACHE_LOOP_LIMITS(start
, end
,
498 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
500 CACHE_RANGE_LOOP_2(start
, end
, cpuinfo
.dcache_line_length
, wdc
.flush
);
502 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
503 __asm__
__volatile__ ("wdc.flush %0, r0;" \
508 /* struct for wb caches and for wt caches */
511 /* new wb cache model */
512 static const struct scache wb_msr
= {
513 .ie
= __enable_icache_msr
,
514 .id
= __disable_icache_msr
,
515 .ifl
= __flush_icache_all_noirq
,
516 .iflr
= __flush_icache_range_noirq
,
517 .iin
= __flush_icache_all_noirq
,
518 .iinr
= __flush_icache_range_noirq
,
519 .de
= __enable_dcache_msr
,
520 .dd
= __disable_dcache_msr
,
521 .dfl
= __flush_dcache_all_wb
,
522 .dflr
= __flush_dcache_range_wb
,
523 .din
= __invalidate_dcache_all_wb
,
524 .dinr
= __invalidate_dcache_range_wb
,
527 /* There is only difference in ie, id, de, dd functions */
528 static const struct scache wb_nomsr
= {
529 .ie
= __enable_icache_nomsr
,
530 .id
= __disable_icache_nomsr
,
531 .ifl
= __flush_icache_all_noirq
,
532 .iflr
= __flush_icache_range_noirq
,
533 .iin
= __flush_icache_all_noirq
,
534 .iinr
= __flush_icache_range_noirq
,
535 .de
= __enable_dcache_nomsr
,
536 .dd
= __disable_dcache_nomsr
,
537 .dfl
= __flush_dcache_all_wb
,
538 .dflr
= __flush_dcache_range_wb
,
539 .din
= __invalidate_dcache_all_wb
,
540 .dinr
= __invalidate_dcache_range_wb
,
543 /* Old wt cache model with disabling irq and turn off cache */
544 static const struct scache wt_msr
= {
545 .ie
= __enable_icache_msr
,
546 .id
= __disable_icache_msr
,
547 .ifl
= __flush_icache_all_msr_irq
,
548 .iflr
= __flush_icache_range_msr_irq
,
549 .iin
= __flush_icache_all_msr_irq
,
550 .iinr
= __flush_icache_range_msr_irq
,
551 .de
= __enable_dcache_msr
,
552 .dd
= __disable_dcache_msr
,
553 .dfl
= __invalidate_dcache_all_msr_irq
,
554 .dflr
= __invalidate_dcache_range_msr_irq_wt
,
555 .din
= __invalidate_dcache_all_msr_irq
,
556 .dinr
= __invalidate_dcache_range_msr_irq_wt
,
559 static const struct scache wt_nomsr
= {
560 .ie
= __enable_icache_nomsr
,
561 .id
= __disable_icache_nomsr
,
562 .ifl
= __flush_icache_all_nomsr_irq
,
563 .iflr
= __flush_icache_range_nomsr_irq
,
564 .iin
= __flush_icache_all_nomsr_irq
,
565 .iinr
= __flush_icache_range_nomsr_irq
,
566 .de
= __enable_dcache_nomsr
,
567 .dd
= __disable_dcache_nomsr
,
568 .dfl
= __invalidate_dcache_all_nomsr_irq
,
569 .dflr
= __invalidate_dcache_range_nomsr_irq
,
570 .din
= __invalidate_dcache_all_nomsr_irq
,
571 .dinr
= __invalidate_dcache_range_nomsr_irq
,
574 /* New wt cache model for newer Microblaze versions */
575 static const struct scache wt_msr_noirq
= {
576 .ie
= __enable_icache_msr
,
577 .id
= __disable_icache_msr
,
578 .ifl
= __flush_icache_all_noirq
,
579 .iflr
= __flush_icache_range_noirq
,
580 .iin
= __flush_icache_all_noirq
,
581 .iinr
= __flush_icache_range_noirq
,
582 .de
= __enable_dcache_msr
,
583 .dd
= __disable_dcache_msr
,
584 .dfl
= __invalidate_dcache_all_noirq_wt
,
585 .dflr
= __invalidate_dcache_range_nomsr_wt
,
586 .din
= __invalidate_dcache_all_noirq_wt
,
587 .dinr
= __invalidate_dcache_range_nomsr_wt
,
590 static const struct scache wt_nomsr_noirq
= {
591 .ie
= __enable_icache_nomsr
,
592 .id
= __disable_icache_nomsr
,
593 .ifl
= __flush_icache_all_noirq
,
594 .iflr
= __flush_icache_range_noirq
,
595 .iin
= __flush_icache_all_noirq
,
596 .iinr
= __flush_icache_range_noirq
,
597 .de
= __enable_dcache_nomsr
,
598 .dd
= __disable_dcache_nomsr
,
599 .dfl
= __invalidate_dcache_all_noirq_wt
,
600 .dflr
= __invalidate_dcache_range_nomsr_wt
,
601 .din
= __invalidate_dcache_all_noirq_wt
,
602 .dinr
= __invalidate_dcache_range_nomsr_wt
,
605 /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
606 #define CPUVER_7_20_A 0x0c
607 #define CPUVER_7_20_D 0x0f
609 void microblaze_cache_init(void)
611 if (cpuinfo
.use_instr
& PVR2_USE_MSR_INSTR
) {
612 if (cpuinfo
.dcache_wb
) {
614 mbc
= (struct scache
*)&wb_msr
;
615 if (cpuinfo
.ver_code
<= CPUVER_7_20_D
) {
616 /* MS: problem with signal handling - hw bug */
617 pr_info("WB won't work properly\n");
620 if (cpuinfo
.ver_code
>= CPUVER_7_20_A
) {
621 pr_info("wt_msr_noirq\n");
622 mbc
= (struct scache
*)&wt_msr_noirq
;
625 mbc
= (struct scache
*)&wt_msr
;
629 if (cpuinfo
.dcache_wb
) {
630 pr_info("wb_nomsr\n");
631 mbc
= (struct scache
*)&wb_nomsr
;
632 if (cpuinfo
.ver_code
<= CPUVER_7_20_D
) {
633 /* MS: problem with signal handling - hw bug */
634 pr_info("WB won't work properly\n");
637 if (cpuinfo
.ver_code
>= CPUVER_7_20_A
) {
638 pr_info("wt_nomsr_noirq\n");
639 mbc
= (struct scache
*)&wt_nomsr_noirq
;
641 pr_info("wt_nomsr\n");
642 mbc
= (struct scache
*)&wt_nomsr
;
647 * FIXME Invalidation is done in U-BOOT
648 * WT cache: Data is already written to main memory
649 * WB cache: Discard data on noMMU which caused that kernel doesn't boot
651 /* invalidate_dcache(); */