2 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
3 * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org)
4 * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
5 * Copyright (C) 2004 Maciej W. Rozycki
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/init.h>
24 #include <asm/bootinfo.h>
25 #include <asm/cacheops.h>
27 #include <asm/mipsregs.h>
28 #include <asm/mmu_context.h>
29 #include <asm/uaccess.h>
31 extern void sb1_dma_init(void);
33 /* These are probed at ld_mmu time */
34 static unsigned long icache_size
;
35 static unsigned long dcache_size
;
37 static unsigned short icache_line_size
;
38 static unsigned short dcache_line_size
;
40 static unsigned int icache_index_mask
;
41 static unsigned int dcache_index_mask
;
43 static unsigned short icache_assoc
;
44 static unsigned short dcache_assoc
;
46 static unsigned short icache_sets
;
47 static unsigned short dcache_sets
;
49 static unsigned int icache_range_cutoff
;
50 static unsigned int dcache_range_cutoff
;
53 * The dcache is fully coherent to the system, with one
54 * big caveat: the instruction stream. In other words,
55 * if we miss in the icache, and have dirty data in the
56 * L1 dcache, then we'll go out to memory (or the L2) and
57 * get the not-as-recent data.
59 * So the only time we have to flush the dcache is when
60 * we're flushing the icache. Since the L2 is fully
61 * coherent to everything, including I/O, we never have
65 #define cache_set_op(op, addr) \
66 __asm__ __volatile__( \
67 " .set noreorder \n" \
68 " .set mips64\n\t \n" \
69 " cache %0, (0<<13)(%1) \n" \
70 " cache %0, (1<<13)(%1) \n" \
71 " cache %0, (2<<13)(%1) \n" \
72 " cache %0, (3<<13)(%1) \n" \
76 : "i" (op), "r" (addr))
80 " .set mips64\n\t \n" \
84 #define mispredict() \
85 __asm__ __volatile__( \
86 " bnezl $0, 1f \n" /* Force mispredict */ \
90 * Writeback and invalidate the entire dcache
92 static inline void __sb1_writeback_inv_dcache_all(void)
94 unsigned long addr
= 0;
96 while (addr
< dcache_line_size
* dcache_sets
) {
97 cache_set_op(Index_Writeback_Inv_D
, addr
);
98 addr
+= dcache_line_size
;
103 * Writeback and invalidate a range of the dcache. The addresses are
104 * virtual, and since we're using index ops and bit 12 is part of both
105 * the virtual frame and physical index, we have to clear both sets
106 * (bit 12 set and cleared).
108 static inline void __sb1_writeback_inv_dcache_range(unsigned long start
,
113 start
&= ~(dcache_line_size
- 1);
114 end
= (end
+ dcache_line_size
- 1) & ~(dcache_line_size
- 1);
116 while (start
!= end
) {
117 index
= start
& dcache_index_mask
;
118 cache_set_op(Index_Writeback_Inv_D
, index
);
119 cache_set_op(Index_Writeback_Inv_D
, index
^ (1<<12));
120 start
+= dcache_line_size
;
126 * Writeback and invalidate a range of the dcache. With physical
127 * addresseses, we don't have to worry about possible bit 12 aliasing.
128 * XXXKW is it worth turning on KX and using hit ops with xkphys?
130 static inline void __sb1_writeback_inv_dcache_phys_range(unsigned long start
,
133 start
&= ~(dcache_line_size
- 1);
134 end
= (end
+ dcache_line_size
- 1) & ~(dcache_line_size
- 1);
136 while (start
!= end
) {
137 cache_set_op(Index_Writeback_Inv_D
, start
& dcache_index_mask
);
138 start
+= dcache_line_size
;
145 * Invalidate the entire icache
147 static inline void __sb1_flush_icache_all(void)
149 unsigned long addr
= 0;
151 while (addr
< icache_line_size
* icache_sets
) {
152 cache_set_op(Index_Invalidate_I
, addr
);
153 addr
+= icache_line_size
;
158 * Flush the icache for a given physical page. Need to writeback the
159 * dcache first, then invalidate the icache. If the page isn't
160 * executable, nothing is required.
162 static void local_sb1_flush_cache_page(struct vm_area_struct
*vma
, unsigned long addr
, unsigned long pfn
)
164 int cpu
= smp_processor_id();
167 if (!(vma
->vm_flags
& VM_EXEC
))
171 __sb1_writeback_inv_dcache_range(addr
, addr
+ PAGE_SIZE
);
174 * Bumping the ASID is probably cheaper than the flush ...
176 if (cpu_context(cpu
, vma
->vm_mm
) != 0)
177 drop_mmu_context(vma
->vm_mm
, cpu
);
181 struct flush_cache_page_args
{
182 struct vm_area_struct
*vma
;
187 static void sb1_flush_cache_page_ipi(void *info
)
189 struct flush_cache_page_args
*args
= info
;
191 local_sb1_flush_cache_page(args
->vma
, args
->addr
, args
->pfn
);
194 /* Dirty dcache could be on another CPU, so do the IPIs */
195 static void sb1_flush_cache_page(struct vm_area_struct
*vma
, unsigned long addr
, unsigned long pfn
)
197 struct flush_cache_page_args args
;
199 if (!(vma
->vm_flags
& VM_EXEC
))
206 on_each_cpu(sb1_flush_cache_page_ipi
, (void *) &args
, 1, 1);
209 void sb1_flush_cache_page(struct vm_area_struct
*vma
, unsigned long addr
, unsigned long pfn
)
210 __attribute__((alias("local_sb1_flush_cache_page")));
214 * Invalidate a range of the icache. The addresses are virtual, and
215 * the cache is virtually indexed and tagged. However, we don't
216 * necessarily have the right ASID context, so use index ops instead
219 static inline void __sb1_flush_icache_range(unsigned long start
,
222 start
&= ~(icache_line_size
- 1);
223 end
= (end
+ icache_line_size
- 1) & ~(icache_line_size
- 1);
225 while (start
!= end
) {
226 cache_set_op(Index_Invalidate_I
, start
& icache_index_mask
);
227 start
+= icache_line_size
;
235 * Invalidate all caches on this CPU
237 static void __attribute_used__
local_sb1___flush_cache_all(void)
239 __sb1_writeback_inv_dcache_all();
240 __sb1_flush_icache_all();
244 void sb1___flush_cache_all_ipi(void *ignored
)
245 __attribute__((alias("local_sb1___flush_cache_all")));
247 static void sb1___flush_cache_all(void)
249 on_each_cpu(sb1___flush_cache_all_ipi
, 0, 1, 1);
252 void sb1___flush_cache_all(void)
253 __attribute__((alias("local_sb1___flush_cache_all")));
257 * When flushing a range in the icache, we have to first writeback
258 * the dcache for the same range, so new ifetches will see any
259 * data that was dirty in the dcache.
261 * The start/end arguments are Kseg addresses (possibly mapped Kseg).
264 static void local_sb1_flush_icache_range(unsigned long start
,
267 /* Just wb-inv the whole dcache if the range is big enough */
268 if ((end
- start
) > dcache_range_cutoff
)
269 __sb1_writeback_inv_dcache_all();
271 __sb1_writeback_inv_dcache_range(start
, end
);
273 /* Just flush the whole icache if the range is big enough */
274 if ((end
- start
) > icache_range_cutoff
)
275 __sb1_flush_icache_all();
277 __sb1_flush_icache_range(start
, end
);
281 struct flush_icache_range_args
{
286 static void sb1_flush_icache_range_ipi(void *info
)
288 struct flush_icache_range_args
*args
= info
;
290 local_sb1_flush_icache_range(args
->start
, args
->end
);
293 void sb1_flush_icache_range(unsigned long start
, unsigned long end
)
295 struct flush_icache_range_args args
;
299 on_each_cpu(sb1_flush_icache_range_ipi
, &args
, 1, 1);
302 void sb1_flush_icache_range(unsigned long start
, unsigned long end
)
303 __attribute__((alias("local_sb1_flush_icache_range")));
307 * Flush the icache for a given physical page. Need to writeback the
308 * dcache first, then invalidate the icache. If the page isn't
309 * executable, nothing is required.
311 static void local_sb1_flush_icache_page(struct vm_area_struct
*vma
,
315 int cpu
= smp_processor_id();
318 if (!(vma
->vm_flags
& VM_EXEC
))
322 /* Need to writeback any dirty data for that page, we have the PA */
323 start
= (unsigned long)(page
-mem_map
) << PAGE_SHIFT
;
324 __sb1_writeback_inv_dcache_phys_range(start
, start
+ PAGE_SIZE
);
326 * If there's a context, bump the ASID (cheaper than a flush,
327 * since we don't know VAs!)
329 if (cpu_context(cpu
, vma
->vm_mm
) != 0) {
330 drop_mmu_context(vma
->vm_mm
, cpu
);
335 struct flush_icache_page_args
{
336 struct vm_area_struct
*vma
;
340 static void sb1_flush_icache_page_ipi(void *info
)
342 struct flush_icache_page_args
*args
= info
;
343 local_sb1_flush_icache_page(args
->vma
, args
->page
);
346 /* Dirty dcache could be on another CPU, so do the IPIs */
347 static void sb1_flush_icache_page(struct vm_area_struct
*vma
,
350 struct flush_icache_page_args args
;
352 if (!(vma
->vm_flags
& VM_EXEC
))
356 on_each_cpu(sb1_flush_icache_page_ipi
, (void *) &args
, 1, 1);
359 void sb1_flush_icache_page(struct vm_area_struct
*vma
, struct page
*page
)
360 __attribute__((alias("local_sb1_flush_icache_page")));
364 * A signal trampoline must fit into a single cacheline.
366 static void local_sb1_flush_cache_sigtramp(unsigned long addr
)
368 cache_set_op(Index_Writeback_Inv_D
, addr
& dcache_index_mask
);
369 cache_set_op(Index_Writeback_Inv_D
, (addr
^ (1<<12)) & dcache_index_mask
);
370 cache_set_op(Index_Invalidate_I
, addr
& icache_index_mask
);
375 static void sb1_flush_cache_sigtramp_ipi(void *info
)
377 unsigned long iaddr
= (unsigned long) info
;
378 local_sb1_flush_cache_sigtramp(iaddr
);
381 static void sb1_flush_cache_sigtramp(unsigned long addr
)
383 on_each_cpu(sb1_flush_cache_sigtramp_ipi
, (void *) addr
, 1, 1);
386 void sb1_flush_cache_sigtramp(unsigned long addr
)
387 __attribute__((alias("local_sb1_flush_cache_sigtramp")));
392 * Anything that just flushes dcache state can be ignored, as we're always
393 * coherent in dcache space. This is just a dummy function that all the
394 * nop'ed routines point to
396 static void sb1_nop(void)
401 * Cache set values (from the mips64 spec)
412 static unsigned int decode_cache_sets(unsigned int config_field
)
414 if (config_field
== 7) {
415 /* JDCXXX - Find a graceful way to abort. */
418 return (1<<(config_field
+ 6));
422 * Cache line size values (from the mips64 spec)
423 * 0 - No cache present.
433 static unsigned int decode_cache_line_size(unsigned int config_field
)
435 if (config_field
== 0) {
437 } else if (config_field
== 7) {
438 /* JDCXXX - Find a graceful way to abort. */
441 return (1<<(config_field
+ 1));
445 * Relevant bits of the config1 register format (from the MIPS32/MIPS64 specs)
447 * 24:22 Icache sets per way
448 * 21:19 Icache line size
449 * 18:16 Icache Associativity
450 * 15:13 Dcache sets per way
451 * 12:10 Dcache line size
452 * 9:7 Dcache Associativity
455 static char *way_string
[] = {
456 "direct mapped", "2-way", "3-way", "4-way",
457 "5-way", "6-way", "7-way", "8-way",
460 static __init
void probe_cache_sizes(void)
464 config1
= read_c0_config1();
465 icache_line_size
= decode_cache_line_size((config1
>> 19) & 0x7);
466 dcache_line_size
= decode_cache_line_size((config1
>> 10) & 0x7);
467 icache_sets
= decode_cache_sets((config1
>> 22) & 0x7);
468 dcache_sets
= decode_cache_sets((config1
>> 13) & 0x7);
469 icache_assoc
= ((config1
>> 16) & 0x7) + 1;
470 dcache_assoc
= ((config1
>> 7) & 0x7) + 1;
471 icache_size
= icache_line_size
* icache_sets
* icache_assoc
;
472 dcache_size
= dcache_line_size
* dcache_sets
* dcache_assoc
;
473 /* Need to remove non-index bits for index ops */
474 icache_index_mask
= (icache_sets
- 1) * icache_line_size
;
475 dcache_index_mask
= (dcache_sets
- 1) * dcache_line_size
;
477 * These are for choosing range (index ops) versus all.
478 * icache flushes all ways for each set, so drop icache_assoc.
479 * dcache flushes all ways and each setting of bit 12 for each
480 * index, so drop dcache_assoc and halve the dcache_sets.
482 icache_range_cutoff
= icache_sets
* icache_line_size
;
483 dcache_range_cutoff
= (dcache_sets
/ 2) * icache_line_size
;
485 printk("Primary instruction cache %ldkB, %s, linesize %d bytes.\n",
486 icache_size
>> 10, way_string
[icache_assoc
- 1],
488 printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
489 dcache_size
>> 10, way_string
[dcache_assoc
- 1],
494 * This is called from cache.c. We have to set up all the
495 * memory management function pointers, as well as initialize
496 * the caches and tlbs
498 void sb1_cache_init(void)
500 extern char except_vec2_sb1
;
501 extern char handle_vec2_sb1
;
503 /* Special cache error handler for SB1 */
504 set_uncached_handler (0x100, &except_vec2_sb1
, 0x80);
508 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
513 * None of these are needed for the SB1 - the Dcache is
514 * physically indexed and tagged, so no virtual aliasing can
517 flush_cache_range
= (void *) sb1_nop
;
518 flush_cache_mm
= (void (*)(struct mm_struct
*))sb1_nop
;
519 flush_cache_all
= sb1_nop
;
521 /* These routines are for Icache coherence with the Dcache */
522 flush_icache_range
= sb1_flush_icache_range
;
523 flush_icache_page
= sb1_flush_icache_page
;
524 flush_icache_all
= __sb1_flush_icache_all
; /* local only */
526 /* This implies an Icache flush too, so can't be nop'ed */
527 flush_cache_page
= sb1_flush_cache_page
;
529 flush_cache_sigtramp
= sb1_flush_cache_sigtramp
;
530 local_flush_data_cache_page
= (void *) sb1_nop
;
531 flush_data_cache_page
= (void *) sb1_nop
;
534 __flush_cache_all
= sb1___flush_cache_all
;
536 change_c0_config(CONF_CM_CMASK
, CONF_CM_DEFAULT
);
539 * This is the only way to force the update of K0 to complete
540 * before subsequent instruction fetch.
542 __asm__
__volatile__(
547 " " STR(PTR_LA
) " $1, 1f \n"
548 " " STR(MTC0
) " $1, $14 \n"