treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / mips / include / asm / r4kcache.h
blob15ab16f99f2858b6fc5eb33422cdd623ef51dd07
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Inline assembly cache operations.
8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
15 #include <linux/stringify.h>
17 #include <asm/asm.h>
18 #include <asm/asm-eva.h>
19 #include <asm/cacheops.h>
20 #include <asm/compiler.h>
21 #include <asm/cpu-features.h>
22 #include <asm/cpu-type.h>
23 #include <asm/mipsmtregs.h>
24 #include <asm/mmzone.h>
25 #include <asm/unroll.h>
26 #include <linux/uaccess.h> /* for uaccess_kernel() */
28 extern void (*r4k_blast_dcache)(void);
29 extern void (*r4k_blast_icache)(void);
32 * This macro return a properly sign-extended address suitable as base address
33 * for indexed cache operations. Two issues here:
35 * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
36 * the index bits from the virtual address. This breaks with tradition
37 * set by the R4000. To keep unpleasant surprises from happening we pick
38 * an address in KSEG0 / CKSEG0.
39 * - We need a properly sign extended address for 64-bit code. To get away
40 * without ifdefs we let the compiler do it by a type cast.
42 #define INDEX_BASE CKSEG0
44 #define _cache_op(insn, op, addr) \
45 __asm__ __volatile__( \
46 " .set push \n" \
47 " .set noreorder \n" \
48 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
49 " " insn("%0", "%1") " \n" \
50 " .set pop \n" \
51 : \
52 : "i" (op), "R" (*(unsigned char *)(addr)))
54 #define cache_op(op, addr) \
55 _cache_op(kernel_cache, op, addr)
57 static inline void flush_icache_line_indexed(unsigned long addr)
59 cache_op(Index_Invalidate_I, addr);
62 static inline void flush_dcache_line_indexed(unsigned long addr)
64 cache_op(Index_Writeback_Inv_D, addr);
67 static inline void flush_scache_line_indexed(unsigned long addr)
69 cache_op(Index_Writeback_Inv_SD, addr);
72 static inline void flush_icache_line(unsigned long addr)
74 switch (boot_cpu_type()) {
75 case CPU_LOONGSON2EF:
76 cache_op(Hit_Invalidate_I_Loongson2, addr);
77 break;
79 default:
80 cache_op(Hit_Invalidate_I, addr);
81 break;
85 static inline void flush_dcache_line(unsigned long addr)
87 cache_op(Hit_Writeback_Inv_D, addr);
90 static inline void invalidate_dcache_line(unsigned long addr)
92 cache_op(Hit_Invalidate_D, addr);
95 static inline void invalidate_scache_line(unsigned long addr)
97 cache_op(Hit_Invalidate_SD, addr);
100 static inline void flush_scache_line(unsigned long addr)
102 cache_op(Hit_Writeback_Inv_SD, addr);
105 #define protected_cache_op(op,addr) \
106 ({ \
107 int __err = 0; \
108 __asm__ __volatile__( \
109 " .set push \n" \
110 " .set noreorder \n" \
111 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
112 "1: cache %1, (%2) \n" \
113 "2: .insn \n" \
114 " .set pop \n" \
115 " .section .fixup,\"ax\" \n" \
116 "3: li %0, %3 \n" \
117 " j 2b \n" \
118 " .previous \n" \
119 " .section __ex_table,\"a\" \n" \
120 " "STR(PTR)" 1b, 3b \n" \
121 " .previous" \
122 : "+r" (__err) \
123 : "i" (op), "r" (addr), "i" (-EFAULT)); \
124 __err; \
128 #define protected_cachee_op(op,addr) \
129 ({ \
130 int __err = 0; \
131 __asm__ __volatile__( \
132 " .set push \n" \
133 " .set noreorder \n" \
134 " .set mips0 \n" \
135 " .set eva \n" \
136 "1: cachee %1, (%2) \n" \
137 "2: .insn \n" \
138 " .set pop \n" \
139 " .section .fixup,\"ax\" \n" \
140 "3: li %0, %3 \n" \
141 " j 2b \n" \
142 " .previous \n" \
143 " .section __ex_table,\"a\" \n" \
144 " "STR(PTR)" 1b, 3b \n" \
145 " .previous" \
146 : "+r" (__err) \
147 : "i" (op), "r" (addr), "i" (-EFAULT)); \
148 __err; \
152 * The next two are for badland addresses like signal trampolines.
154 static inline int protected_flush_icache_line(unsigned long addr)
156 switch (boot_cpu_type()) {
157 case CPU_LOONGSON2EF:
158 return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
160 default:
161 #ifdef CONFIG_EVA
162 return protected_cachee_op(Hit_Invalidate_I, addr);
163 #else
164 return protected_cache_op(Hit_Invalidate_I, addr);
165 #endif
170 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
171 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
172 * caches. We're talking about one cacheline unnecessarily getting invalidated
173 * here so the penalty isn't overly hard.
175 static inline int protected_writeback_dcache_line(unsigned long addr)
177 #ifdef CONFIG_EVA
178 return protected_cachee_op(Hit_Writeback_Inv_D, addr);
179 #else
180 return protected_cache_op(Hit_Writeback_Inv_D, addr);
181 #endif
184 static inline int protected_writeback_scache_line(unsigned long addr)
186 #ifdef CONFIG_EVA
187 return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
188 #else
189 return protected_cache_op(Hit_Writeback_Inv_SD, addr);
190 #endif
194 * This one is RM7000-specific
196 static inline void invalidate_tcache_page(unsigned long addr)
198 cache_op(Page_Invalidate_T, addr);
201 #define cache_unroll(times, insn, op, addr, lsize) do { \
202 int i = 0; \
203 unroll(times, _cache_op, insn, op, (addr) + (i++ * (lsize))); \
204 } while (0)
206 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
207 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
208 static inline void extra##blast_##pfx##cache##lsize(void) \
210 unsigned long start = INDEX_BASE; \
211 unsigned long end = start + current_cpu_data.desc.waysize; \
212 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
213 unsigned long ws_end = current_cpu_data.desc.ways << \
214 current_cpu_data.desc.waybit; \
215 unsigned long ws, addr; \
217 for (ws = 0; ws < ws_end; ws += ws_inc) \
218 for (addr = start; addr < end; addr += lsize * 32) \
219 cache_unroll(32, kernel_cache, indexop, \
220 addr | ws, lsize); \
223 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
225 unsigned long start = page; \
226 unsigned long end = page + PAGE_SIZE; \
228 do { \
229 cache_unroll(32, kernel_cache, hitop, start, lsize); \
230 start += lsize * 32; \
231 } while (start < end); \
234 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
236 unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
237 unsigned long start = INDEX_BASE + (page & indexmask); \
238 unsigned long end = start + PAGE_SIZE; \
239 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
240 unsigned long ws_end = current_cpu_data.desc.ways << \
241 current_cpu_data.desc.waybit; \
242 unsigned long ws, addr; \
244 for (ws = 0; ws < ws_end; ws += ws_inc) \
245 for (addr = start; addr < end; addr += lsize * 32) \
246 cache_unroll(32, kernel_cache, indexop, \
247 addr | ws, lsize); \
250 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
251 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
252 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
253 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
254 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
255 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
256 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
257 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
258 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
259 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
260 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
261 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
262 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
264 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
265 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
266 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
267 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
268 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
269 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
271 #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
272 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
274 unsigned long start = page; \
275 unsigned long end = page + PAGE_SIZE; \
277 do { \
278 cache_unroll(32, user_cache, hitop, start, lsize); \
279 start += lsize * 32; \
280 } while (start < end); \
283 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
285 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
286 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
288 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
289 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
291 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
293 /* build blast_xxx_range, protected_blast_xxx_range */
294 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
295 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
296 unsigned long end) \
298 unsigned long lsize = cpu_##desc##_line_size(); \
299 unsigned long addr = start & ~(lsize - 1); \
300 unsigned long aend = (end - 1) & ~(lsize - 1); \
302 while (1) { \
303 prot##cache_op(hitop, addr); \
304 if (addr == aend) \
305 break; \
306 addr += lsize; \
310 #ifndef CONFIG_EVA
312 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
313 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
315 #else
317 #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
318 static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
319 unsigned long end) \
321 unsigned long lsize = cpu_##desc##_line_size(); \
322 unsigned long addr = start & ~(lsize - 1); \
323 unsigned long aend = (end - 1) & ~(lsize - 1); \
325 if (!uaccess_kernel()) { \
326 while (1) { \
327 protected_cachee_op(hitop, addr); \
328 if (addr == aend) \
329 break; \
330 addr += lsize; \
332 } else { \
333 while (1) { \
334 protected_cache_op(hitop, addr); \
335 if (addr == aend) \
336 break; \
337 addr += lsize; \
343 __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
344 __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
346 #endif
347 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
348 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
349 protected_, loongson2_)
350 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
351 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
352 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
353 /* blast_inv_dcache_range */
354 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
355 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
357 /* Currently, this is very specific to Loongson-3 */
358 #define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \
359 static inline void blast_##pfx##cache##lsize##_node(long node) \
361 unsigned long start = CAC_BASE | nid_to_addrbase(node); \
362 unsigned long end = start + current_cpu_data.desc.waysize; \
363 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
364 unsigned long ws_end = current_cpu_data.desc.ways << \
365 current_cpu_data.desc.waybit; \
366 unsigned long ws, addr; \
368 for (ws = 0; ws < ws_end; ws += ws_inc) \
369 for (addr = start; addr < end; addr += lsize * 32) \
370 cache_unroll(32, kernel_cache, indexop, \
371 addr | ws, lsize); \
374 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
375 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
376 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
377 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
379 #endif /* _ASM_R4KCACHE_H */