2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Inline assembly cache operations.
8 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
16 #include <asm/cacheops.h>
17 #include <asm/cpu-features.h>
18 #include <asm/mipsmtregs.h>
21 * This macro return a properly sign-extended address suitable as base address
22 * for indexed cache operations. Two issues here:
24 * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
25 * the index bits from the virtual address. This breaks with tradition
26 * set by the R4000. To keep unpleasant surprises from happening we pick
27 * an address in KSEG0 / CKSEG0.
28 * - We need a properly sign extended address for 64-bit code. To get away
29 * without ifdefs we let the compiler do it by a type cast.
31 #define INDEX_BASE CKSEG0
33 #define cache_op(op,addr) \
34 __asm__ __volatile__( \
36 " .set noreorder \n" \
37 " .set mips3\n\t \n" \
41 : "i" (op), "R" (*(unsigned char *)(addr)))
45 * Temporary hacks for SMTC debug. Optionally force single-threaded
46 * execution during I-cache flushes.
49 #define PROTECT_CACHE_FLUSHES 1
51 #ifdef PROTECT_CACHE_FLUSHES
53 extern int mt_protiflush
;
54 extern int mt_protdflush
;
55 extern void mt_cflush_lockdown(void);
56 extern void mt_cflush_release(void);
58 #define BEGIN_MT_IPROT \
59 unsigned long flags = 0; \
60 unsigned long mtflags = 0; \
62 local_irq_save(flags); \
65 mt_cflush_lockdown(); \
68 #define END_MT_IPROT \
70 mt_cflush_release(); \
72 local_irq_restore(flags); \
75 #define BEGIN_MT_DPROT \
76 unsigned long flags = 0; \
77 unsigned long mtflags = 0; \
79 local_irq_save(flags); \
82 mt_cflush_lockdown(); \
85 #define END_MT_DPROT \
87 mt_cflush_release(); \
89 local_irq_restore(flags); \
94 #define BEGIN_MT_IPROT
95 #define BEGIN_MT_DPROT
99 #endif /* PROTECT_CACHE_FLUSHES */
101 #define __iflush_prologue \
102 unsigned long redundance; \
103 extern int mt_n_iflushes; \
105 for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
107 #define __iflush_epilogue \
111 #define __dflush_prologue \
112 unsigned long redundance; \
113 extern int mt_n_dflushes; \
115 for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
117 #define __dflush_epilogue \
121 #define __inv_dflush_prologue __dflush_prologue
122 #define __inv_dflush_epilogue __dflush_epilogue
123 #define __sflush_prologue {
124 #define __sflush_epilogue }
125 #define __inv_sflush_prologue __sflush_prologue
126 #define __inv_sflush_epilogue __sflush_epilogue
128 #else /* CONFIG_MIPS_MT */
130 #define __iflush_prologue {
131 #define __iflush_epilogue }
132 #define __dflush_prologue {
133 #define __dflush_epilogue }
134 #define __inv_dflush_prologue {
135 #define __inv_dflush_epilogue }
136 #define __sflush_prologue {
137 #define __sflush_epilogue }
138 #define __inv_sflush_prologue {
139 #define __inv_sflush_epilogue }
141 #endif /* CONFIG_MIPS_MT */
143 static inline void flush_icache_line_indexed(unsigned long addr
)
146 cache_op(Index_Invalidate_I
, addr
);
150 static inline void flush_dcache_line_indexed(unsigned long addr
)
153 cache_op(Index_Writeback_Inv_D
, addr
);
157 static inline void flush_scache_line_indexed(unsigned long addr
)
159 cache_op(Index_Writeback_Inv_SD
, addr
);
162 static inline void flush_icache_line(unsigned long addr
)
165 cache_op(Hit_Invalidate_I
, addr
);
169 static inline void flush_dcache_line(unsigned long addr
)
172 cache_op(Hit_Writeback_Inv_D
, addr
);
176 static inline void invalidate_dcache_line(unsigned long addr
)
179 cache_op(Hit_Invalidate_D
, addr
);
183 static inline void invalidate_scache_line(unsigned long addr
)
185 cache_op(Hit_Invalidate_SD
, addr
);
188 static inline void flush_scache_line(unsigned long addr
)
190 cache_op(Hit_Writeback_Inv_SD
, addr
);
193 #define protected_cache_op(op,addr) \
194 __asm__ __volatile__( \
196 " .set noreorder \n" \
198 "1: cache %0, (%1) \n" \
200 " .section __ex_table,\"a\" \n" \
201 " "STR(PTR)" 1b, 2b \n" \
204 : "i" (op), "r" (addr))
207 * The next two are for badland addresses like signal trampolines.
209 static inline void protected_flush_icache_line(unsigned long addr
)
211 protected_cache_op(Hit_Invalidate_I
, addr
);
215 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
216 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
217 * caches. We're talking about one cacheline unnecessarily getting invalidated
218 * here so the penalty isn't overly hard.
220 static inline void protected_writeback_dcache_line(unsigned long addr
)
222 protected_cache_op(Hit_Writeback_Inv_D
, addr
);
225 static inline void protected_writeback_scache_line(unsigned long addr
)
227 protected_cache_op(Hit_Writeback_Inv_SD
, addr
);
231 * This one is RM7000-specific
233 static inline void invalidate_tcache_page(unsigned long addr
)
235 cache_op(Page_Invalidate_T
, addr
);
238 #define cache16_unroll32(base,op) \
239 __asm__ __volatile__( \
241 " .set noreorder \n" \
243 " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
244 " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
245 " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
246 " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
247 " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
248 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
249 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
250 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
251 " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \
252 " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \
253 " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \
254 " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \
255 " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \
256 " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
257 " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
258 " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
264 #define cache32_unroll32(base,op) \
265 __asm__ __volatile__( \
267 " .set noreorder \n" \
269 " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
270 " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
271 " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
272 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
273 " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \
274 " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \
275 " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \
276 " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \
277 " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \
278 " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \
279 " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \
280 " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \
281 " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \
282 " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \
283 " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \
284 " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \
290 #define cache64_unroll32(base,op) \
291 __asm__ __volatile__( \
293 " .set noreorder \n" \
295 " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
296 " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
297 " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \
298 " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \
299 " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \
300 " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \
301 " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \
302 " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \
303 " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \
304 " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \
305 " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \
306 " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \
307 " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \
308 " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \
309 " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \
310 " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \
316 #define cache128_unroll32(base,op) \
317 __asm__ __volatile__( \
319 " .set noreorder \n" \
321 " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
322 " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \
323 " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \
324 " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \
325 " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \
326 " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \
327 " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \
328 " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \
329 " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \
330 " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \
331 " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \
332 " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \
333 " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \
334 " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \
335 " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \
336 " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \
342 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
343 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \
344 static inline void blast_##pfx##cache##lsize(void) \
346 unsigned long start = INDEX_BASE; \
347 unsigned long end = start + current_cpu_data.desc.waysize; \
348 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
349 unsigned long ws_end = current_cpu_data.desc.ways << \
350 current_cpu_data.desc.waybit; \
351 unsigned long ws, addr; \
353 __##pfx##flush_prologue \
355 for (ws = 0; ws < ws_end; ws += ws_inc) \
356 for (addr = start; addr < end; addr += lsize * 32) \
357 cache##lsize##_unroll32(addr|ws, indexop); \
359 __##pfx##flush_epilogue \
362 static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
364 unsigned long start = page; \
365 unsigned long end = page + PAGE_SIZE; \
367 __##pfx##flush_prologue \
370 cache##lsize##_unroll32(start, hitop); \
371 start += lsize * 32; \
372 } while (start < end); \
374 __##pfx##flush_epilogue \
377 static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
379 unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
380 unsigned long start = INDEX_BASE + (page & indexmask); \
381 unsigned long end = start + PAGE_SIZE; \
382 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
383 unsigned long ws_end = current_cpu_data.desc.ways << \
384 current_cpu_data.desc.waybit; \
385 unsigned long ws, addr; \
387 __##pfx##flush_prologue \
389 for (ws = 0; ws < ws_end; ws += ws_inc) \
390 for (addr = start; addr < end; addr += lsize * 32) \
391 cache##lsize##_unroll32(addr|ws, indexop); \
393 __##pfx##flush_epilogue \
396 __BUILD_BLAST_CACHE(d
, dcache
, Index_Writeback_Inv_D
, Hit_Writeback_Inv_D
, 16)
397 __BUILD_BLAST_CACHE(i
, icache
, Index_Invalidate_I
, Hit_Invalidate_I
, 16)
398 __BUILD_BLAST_CACHE(s
, scache
, Index_Writeback_Inv_SD
, Hit_Writeback_Inv_SD
, 16)
399 __BUILD_BLAST_CACHE(d
, dcache
, Index_Writeback_Inv_D
, Hit_Writeback_Inv_D
, 32)
400 __BUILD_BLAST_CACHE(i
, icache
, Index_Invalidate_I
, Hit_Invalidate_I
, 32)
401 __BUILD_BLAST_CACHE(s
, scache
, Index_Writeback_Inv_SD
, Hit_Writeback_Inv_SD
, 32)
402 __BUILD_BLAST_CACHE(d
, dcache
, Index_Writeback_Inv_D
, Hit_Writeback_Inv_D
, 64)
403 __BUILD_BLAST_CACHE(i
, icache
, Index_Invalidate_I
, Hit_Invalidate_I
, 64)
404 __BUILD_BLAST_CACHE(s
, scache
, Index_Writeback_Inv_SD
, Hit_Writeback_Inv_SD
, 64)
405 __BUILD_BLAST_CACHE(s
, scache
, Index_Writeback_Inv_SD
, Hit_Writeback_Inv_SD
, 128)
407 __BUILD_BLAST_CACHE(inv_d
, dcache
, Index_Writeback_Inv_D
, Hit_Invalidate_D
, 16)
408 __BUILD_BLAST_CACHE(inv_d
, dcache
, Index_Writeback_Inv_D
, Hit_Invalidate_D
, 32)
409 __BUILD_BLAST_CACHE(inv_s
, scache
, Index_Writeback_Inv_SD
, Hit_Invalidate_SD
, 16)
410 __BUILD_BLAST_CACHE(inv_s
, scache
, Index_Writeback_Inv_SD
, Hit_Invalidate_SD
, 32)
411 __BUILD_BLAST_CACHE(inv_s
, scache
, Index_Writeback_Inv_SD
, Hit_Invalidate_SD
, 64)
412 __BUILD_BLAST_CACHE(inv_s
, scache
, Index_Writeback_Inv_SD
, Hit_Invalidate_SD
, 128)
414 /* build blast_xxx_range, protected_blast_xxx_range */
415 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
416 static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
419 unsigned long lsize = cpu_##desc##_line_size(); \
420 unsigned long addr = start & ~(lsize - 1); \
421 unsigned long aend = (end - 1) & ~(lsize - 1); \
423 __##pfx##flush_prologue \
426 prot##cache_op(hitop, addr); \
432 __##pfx##flush_epilogue \
435 __BUILD_BLAST_CACHE_RANGE(d
, dcache
, Hit_Writeback_Inv_D
, protected_
)
436 __BUILD_BLAST_CACHE_RANGE(s
, scache
, Hit_Writeback_Inv_SD
, protected_
)
437 __BUILD_BLAST_CACHE_RANGE(i
, icache
, Hit_Invalidate_I
, protected_
)
438 __BUILD_BLAST_CACHE_RANGE(d
, dcache
, Hit_Writeback_Inv_D
, )
439 __BUILD_BLAST_CACHE_RANGE(s
, scache
, Hit_Writeback_Inv_SD
, )
440 /* blast_inv_dcache_range */
441 __BUILD_BLAST_CACHE_RANGE(inv_d
, dcache
, Hit_Invalidate_D
, )
442 __BUILD_BLAST_CACHE_RANGE(inv_s
, scache
, Hit_Invalidate_SD
, )
444 #endif /* _ASM_R4KCACHE_H */