OMAP3: SR: Replace printk's with pr_* calls
[linux-ginger.git] / arch / microblaze / kernel / cpu / cache.c
blobaf866a4501256a4028b0daa99c92ff40a8b40e69
1 /*
2 * Cache control for MicroBlaze cache memories
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
13 #include <asm/cacheflush.h>
14 #include <linux/cache.h>
15 #include <asm/cpuinfo.h>
17 /* Exported functions */
19 void _enable_icache(void)
21 if (cpuinfo.use_icache) {
22 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
23 __asm__ __volatile__ (" \
24 msrset r0, %0; \
25 nop; " \
26 : \
27 : "i" (MSR_ICE) \
28 : "memory");
29 #else
30 __asm__ __volatile__ (" \
31 mfs r12, rmsr; \
32 nop; \
33 ori r12, r12, %0; \
34 mts rmsr, r12; \
35 nop; " \
36 : \
37 : "i" (MSR_ICE) \
38 : "memory", "r12");
39 #endif
43 void _disable_icache(void)
45 if (cpuinfo.use_icache) {
46 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
47 __asm__ __volatile__ (" \
48 msrclr r0, %0; \
49 nop; " \
50 : \
51 : "i" (MSR_ICE) \
52 : "memory");
53 #else
54 __asm__ __volatile__ (" \
55 mfs r12, rmsr; \
56 nop; \
57 andi r12, r12, ~%0; \
58 mts rmsr, r12; \
59 nop; " \
60 : \
61 : "i" (MSR_ICE) \
62 : "memory", "r12");
63 #endif
67 void _invalidate_icache(unsigned int addr)
69 if (cpuinfo.use_icache) {
70 __asm__ __volatile__ (" \
71 wic %0, r0" \
72 : \
73 : "r" (addr));
77 void _enable_dcache(void)
79 if (cpuinfo.use_dcache) {
80 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
81 __asm__ __volatile__ (" \
82 msrset r0, %0; \
83 nop; " \
84 : \
85 : "i" (MSR_DCE) \
86 : "memory");
87 #else
88 __asm__ __volatile__ (" \
89 mfs r12, rmsr; \
90 nop; \
91 ori r12, r12, %0; \
92 mts rmsr, r12; \
93 nop; " \
94 : \
95 : "i" (MSR_DCE) \
96 : "memory", "r12");
97 #endif
101 void _disable_dcache(void)
103 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
104 __asm__ __volatile__ (" \
105 msrclr r0, %0; \
106 nop; " \
108 : "i" (MSR_DCE) \
109 : "memory");
110 #else
111 __asm__ __volatile__ (" \
112 mfs r12, rmsr; \
113 nop; \
114 andi r12, r12, ~%0; \
115 mts rmsr, r12; \
116 nop; " \
118 : "i" (MSR_DCE) \
119 : "memory", "r12");
120 #endif
123 void _invalidate_dcache(unsigned int addr)
125 __asm__ __volatile__ (" \
126 wdc %0, r0" \
128 : "r" (addr));
131 void __invalidate_icache_all(void)
133 unsigned int i;
134 unsigned flags;
136 if (cpuinfo.use_icache) {
137 local_irq_save(flags);
138 __disable_icache();
140 /* Just loop through cache size and invalidate, no need to add
141 CACHE_BASE address */
142 for (i = 0; i < cpuinfo.icache_size;
143 i += cpuinfo.icache_line)
144 __invalidate_icache(i);
146 __enable_icache();
147 local_irq_restore(flags);
151 void __invalidate_icache_range(unsigned long start, unsigned long end)
153 unsigned int i;
154 unsigned flags;
155 unsigned int align;
157 if (cpuinfo.use_icache) {
159 * No need to cover entire cache range,
160 * just cover cache footprint
162 end = min(start + cpuinfo.icache_size, end);
163 align = ~(cpuinfo.icache_line - 1);
164 start &= align; /* Make sure we are aligned */
165 /* Push end up to the next cache line */
166 end = ((end & align) + cpuinfo.icache_line);
168 local_irq_save(flags);
169 __disable_icache();
171 for (i = start; i < end; i += cpuinfo.icache_line)
172 __invalidate_icache(i);
174 __enable_icache();
175 local_irq_restore(flags);
179 void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page)
181 __invalidate_icache_all();
184 void __invalidate_icache_user_range(struct vm_area_struct *vma,
185 struct page *page, unsigned long adr,
186 int len)
188 __invalidate_icache_all();
191 void __invalidate_cache_sigtramp(unsigned long addr)
193 __invalidate_icache_range(addr, addr + 8);
196 void __invalidate_dcache_all(void)
198 unsigned int i;
199 unsigned flags;
201 if (cpuinfo.use_dcache) {
202 local_irq_save(flags);
203 __disable_dcache();
206 * Just loop through cache size and invalidate,
207 * no need to add CACHE_BASE address
209 for (i = 0; i < cpuinfo.dcache_size;
210 i += cpuinfo.dcache_line)
211 __invalidate_dcache(i);
213 __enable_dcache();
214 local_irq_restore(flags);
218 void __invalidate_dcache_range(unsigned long start, unsigned long end)
220 unsigned int i;
221 unsigned flags;
222 unsigned int align;
224 if (cpuinfo.use_dcache) {
226 * No need to cover entire cache range,
227 * just cover cache footprint
229 end = min(start + cpuinfo.dcache_size, end);
230 align = ~(cpuinfo.dcache_line - 1);
231 start &= align; /* Make sure we are aligned */
232 /* Push end up to the next cache line */
233 end = ((end & align) + cpuinfo.dcache_line);
234 local_irq_save(flags);
235 __disable_dcache();
237 for (i = start; i < end; i += cpuinfo.dcache_line)
238 __invalidate_dcache(i);
240 __enable_dcache();
241 local_irq_restore(flags);
245 void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page)
247 __invalidate_dcache_all();
250 void __invalidate_dcache_user_range(struct vm_area_struct *vma,
251 struct page *page, unsigned long adr,
252 int len)
254 __invalidate_dcache_all();