2 * Cache control for MicroBlaze cache memories
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
13 #include <asm/cacheflush.h>
14 #include <linux/cache.h>
15 #include <asm/cpuinfo.h>
17 /* Exported functions */
19 void _enable_icache(void)
21 if (cpuinfo
.use_icache
) {
22 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
23 __asm__
__volatile__ (" \
30 __asm__
__volatile__ (" \
43 void _disable_icache(void)
45 if (cpuinfo
.use_icache
) {
46 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
47 __asm__
__volatile__ (" \
54 __asm__
__volatile__ (" \
67 void _invalidate_icache(unsigned int addr
)
69 if (cpuinfo
.use_icache
) {
70 __asm__
__volatile__ (" \
77 void _enable_dcache(void)
79 if (cpuinfo
.use_dcache
) {
80 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
81 __asm__
__volatile__ (" \
88 __asm__
__volatile__ (" \
101 void _disable_dcache(void)
103 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
104 __asm__
__volatile__ (" \
111 __asm__
__volatile__ (" \
114 andi r12, r12, ~%0; \
123 void _invalidate_dcache(unsigned int addr
)
125 __asm__
__volatile__ (" \
131 void __invalidate_icache_all(void)
136 if (cpuinfo
.use_icache
) {
137 local_irq_save(flags
);
140 /* Just loop through cache size and invalidate, no need to add
141 CACHE_BASE address */
142 for (i
= 0; i
< cpuinfo
.icache_size
;
143 i
+= cpuinfo
.icache_line
)
144 __invalidate_icache(i
);
147 local_irq_restore(flags
);
151 void __invalidate_icache_range(unsigned long start
, unsigned long end
)
157 if (cpuinfo
.use_icache
) {
159 * No need to cover entire cache range,
160 * just cover cache footprint
162 end
= min(start
+ cpuinfo
.icache_size
, end
);
163 align
= ~(cpuinfo
.icache_line
- 1);
164 start
&= align
; /* Make sure we are aligned */
165 /* Push end up to the next cache line */
166 end
= ((end
& align
) + cpuinfo
.icache_line
);
168 local_irq_save(flags
);
171 for (i
= start
; i
< end
; i
+= cpuinfo
.icache_line
)
172 __invalidate_icache(i
);
175 local_irq_restore(flags
);
179 void __invalidate_icache_page(struct vm_area_struct
*vma
, struct page
*page
)
181 __invalidate_icache_all();
184 void __invalidate_icache_user_range(struct vm_area_struct
*vma
,
185 struct page
*page
, unsigned long adr
,
188 __invalidate_icache_all();
191 void __invalidate_cache_sigtramp(unsigned long addr
)
193 __invalidate_icache_range(addr
, addr
+ 8);
196 void __invalidate_dcache_all(void)
201 if (cpuinfo
.use_dcache
) {
202 local_irq_save(flags
);
206 * Just loop through cache size and invalidate,
207 * no need to add CACHE_BASE address
209 for (i
= 0; i
< cpuinfo
.dcache_size
;
210 i
+= cpuinfo
.dcache_line
)
211 __invalidate_dcache(i
);
214 local_irq_restore(flags
);
218 void __invalidate_dcache_range(unsigned long start
, unsigned long end
)
224 if (cpuinfo
.use_dcache
) {
226 * No need to cover entire cache range,
227 * just cover cache footprint
229 end
= min(start
+ cpuinfo
.dcache_size
, end
);
230 align
= ~(cpuinfo
.dcache_line
- 1);
231 start
&= align
; /* Make sure we are aligned */
232 /* Push end up to the next cache line */
233 end
= ((end
& align
) + cpuinfo
.dcache_line
);
234 local_irq_save(flags
);
237 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line
)
238 __invalidate_dcache(i
);
241 local_irq_restore(flags
);
245 void __invalidate_dcache_page(struct vm_area_struct
*vma
, struct page
*page
)
247 __invalidate_dcache_all();
250 void __invalidate_dcache_user_range(struct vm_area_struct
*vma
,
251 struct page
*page
, unsigned long adr
,
254 __invalidate_dcache_all();