2 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
3 * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org)
4 * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
5 * Copyright (C) 2004 Maciej W. Rozycki
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/init.h>
22 #include <linux/hardirq.h>
25 #include <asm/bootinfo.h>
26 #include <asm/cacheops.h>
28 #include <asm/mipsregs.h>
29 #include <asm/mmu_context.h>
30 #include <asm/uaccess.h>
32 extern void sb1_dma_init(void);
34 /* These are probed at ld_mmu time */
35 static unsigned long icache_size
;
36 static unsigned long dcache_size
;
38 static unsigned short icache_line_size
;
39 static unsigned short dcache_line_size
;
41 static unsigned int icache_index_mask
;
42 static unsigned int dcache_index_mask
;
44 static unsigned short icache_assoc
;
45 static unsigned short dcache_assoc
;
47 static unsigned short icache_sets
;
48 static unsigned short dcache_sets
;
50 static unsigned int icache_range_cutoff
;
51 static unsigned int dcache_range_cutoff
;
53 static inline void sb1_on_each_cpu(void (*func
) (void *info
), void *info
,
57 smp_call_function(func
, info
, retry
, wait
);
63 * The dcache is fully coherent to the system, with one
64 * big caveat: the instruction stream. In other words,
65 * if we miss in the icache, and have dirty data in the
66 * L1 dcache, then we'll go out to memory (or the L2) and
67 * get the not-as-recent data.
69 * So the only time we have to flush the dcache is when
70 * we're flushing the icache. Since the L2 is fully
71 * coherent to everything, including I/O, we never have
75 #define cache_set_op(op, addr) \
76 __asm__ __volatile__( \
77 " .set noreorder \n" \
78 " .set mips64\n\t \n" \
79 " cache %0, (0<<13)(%1) \n" \
80 " cache %0, (1<<13)(%1) \n" \
81 " cache %0, (2<<13)(%1) \n" \
82 " cache %0, (3<<13)(%1) \n" \
86 : "i" (op), "r" (addr))
90 " .set mips64\n\t \n" \
94 #define mispredict() \
95 __asm__ __volatile__( \
96 " bnezl $0, 1f \n" /* Force mispredict */ \
100 * Writeback and invalidate the entire dcache
102 static inline void __sb1_writeback_inv_dcache_all(void)
104 unsigned long addr
= 0;
106 while (addr
< dcache_line_size
* dcache_sets
) {
107 cache_set_op(Index_Writeback_Inv_D
, addr
);
108 addr
+= dcache_line_size
;
113 * Writeback and invalidate a range of the dcache. The addresses are
114 * virtual, and since we're using index ops and bit 12 is part of both
115 * the virtual frame and physical index, we have to clear both sets
116 * (bit 12 set and cleared).
118 static inline void __sb1_writeback_inv_dcache_range(unsigned long start
,
123 start
&= ~(dcache_line_size
- 1);
124 end
= (end
+ dcache_line_size
- 1) & ~(dcache_line_size
- 1);
126 while (start
!= end
) {
127 index
= start
& dcache_index_mask
;
128 cache_set_op(Index_Writeback_Inv_D
, index
);
129 cache_set_op(Index_Writeback_Inv_D
, index
^ (1<<12));
130 start
+= dcache_line_size
;
136 * Writeback and invalidate a range of the dcache. With physical
137 * addresseses, we don't have to worry about possible bit 12 aliasing.
138 * XXXKW is it worth turning on KX and using hit ops with xkphys?
140 static inline void __sb1_writeback_inv_dcache_phys_range(unsigned long start
,
143 start
&= ~(dcache_line_size
- 1);
144 end
= (end
+ dcache_line_size
- 1) & ~(dcache_line_size
- 1);
146 while (start
!= end
) {
147 cache_set_op(Index_Writeback_Inv_D
, start
& dcache_index_mask
);
148 start
+= dcache_line_size
;
155 * Invalidate the entire icache
157 static inline void __sb1_flush_icache_all(void)
159 unsigned long addr
= 0;
161 while (addr
< icache_line_size
* icache_sets
) {
162 cache_set_op(Index_Invalidate_I
, addr
);
163 addr
+= icache_line_size
;
168 * Invalidate a range of the icache. The addresses are virtual, and
169 * the cache is virtually indexed and tagged. However, we don't
170 * necessarily have the right ASID context, so use index ops instead
173 static inline void __sb1_flush_icache_range(unsigned long start
,
176 start
&= ~(icache_line_size
- 1);
177 end
= (end
+ icache_line_size
- 1) & ~(icache_line_size
- 1);
179 while (start
!= end
) {
180 cache_set_op(Index_Invalidate_I
, start
& icache_index_mask
);
181 start
+= icache_line_size
;
188 * Flush the icache for a given physical page. Need to writeback the
189 * dcache first, then invalidate the icache. If the page isn't
190 * executable, nothing is required.
192 static void local_sb1_flush_cache_page(struct vm_area_struct
*vma
, unsigned long addr
, unsigned long pfn
)
194 int cpu
= smp_processor_id();
197 if (!(vma
->vm_flags
& VM_EXEC
))
201 __sb1_writeback_inv_dcache_range(addr
, addr
+ PAGE_SIZE
);
204 * Bumping the ASID is probably cheaper than the flush ...
206 if (vma
->vm_mm
== current
->active_mm
) {
207 if (cpu_context(cpu
, vma
->vm_mm
) != 0)
208 drop_mmu_context(vma
->vm_mm
, cpu
);
210 __sb1_flush_icache_range(addr
, addr
+ PAGE_SIZE
);
214 struct flush_cache_page_args
{
215 struct vm_area_struct
*vma
;
220 static void sb1_flush_cache_page_ipi(void *info
)
222 struct flush_cache_page_args
*args
= info
;
224 local_sb1_flush_cache_page(args
->vma
, args
->addr
, args
->pfn
);
227 /* Dirty dcache could be on another CPU, so do the IPIs */
228 static void sb1_flush_cache_page(struct vm_area_struct
*vma
, unsigned long addr
, unsigned long pfn
)
230 struct flush_cache_page_args args
;
232 if (!(vma
->vm_flags
& VM_EXEC
))
239 sb1_on_each_cpu(sb1_flush_cache_page_ipi
, (void *) &args
, 1, 1);
242 void sb1_flush_cache_page(struct vm_area_struct
*vma
, unsigned long addr
, unsigned long pfn
)
243 __attribute__((alias("local_sb1_flush_cache_page")));
247 static void sb1_flush_cache_data_page_ipi(void *info
)
249 unsigned long start
= (unsigned long)info
;
251 __sb1_writeback_inv_dcache_range(start
, start
+ PAGE_SIZE
);
254 static void sb1_flush_cache_data_page(unsigned long addr
)
257 __sb1_writeback_inv_dcache_range(addr
, addr
+ PAGE_SIZE
);
259 on_each_cpu(sb1_flush_cache_data_page_ipi
, (void *) addr
, 1, 1);
263 static void local_sb1_flush_cache_data_page(unsigned long addr
)
265 __sb1_writeback_inv_dcache_range(addr
, addr
+ PAGE_SIZE
);
268 void sb1_flush_cache_data_page(unsigned long)
269 __attribute__((alias("local_sb1_flush_cache_data_page")));
273 * Invalidate all caches on this CPU
275 static void __attribute_used__
local_sb1___flush_cache_all(void)
277 __sb1_writeback_inv_dcache_all();
278 __sb1_flush_icache_all();
282 void sb1___flush_cache_all_ipi(void *ignored
)
283 __attribute__((alias("local_sb1___flush_cache_all")));
285 static void sb1___flush_cache_all(void)
287 sb1_on_each_cpu(sb1___flush_cache_all_ipi
, 0, 1, 1);
290 void sb1___flush_cache_all(void)
291 __attribute__((alias("local_sb1___flush_cache_all")));
295 * When flushing a range in the icache, we have to first writeback
296 * the dcache for the same range, so new ifetches will see any
297 * data that was dirty in the dcache.
299 * The start/end arguments are Kseg addresses (possibly mapped Kseg).
302 static void local_sb1_flush_icache_range(unsigned long start
,
305 /* Just wb-inv the whole dcache if the range is big enough */
306 if ((end
- start
) > dcache_range_cutoff
)
307 __sb1_writeback_inv_dcache_all();
309 __sb1_writeback_inv_dcache_range(start
, end
);
311 /* Just flush the whole icache if the range is big enough */
312 if ((end
- start
) > icache_range_cutoff
)
313 __sb1_flush_icache_all();
315 __sb1_flush_icache_range(start
, end
);
319 struct flush_icache_range_args
{
324 static void sb1_flush_icache_range_ipi(void *info
)
326 struct flush_icache_range_args
*args
= info
;
328 local_sb1_flush_icache_range(args
->start
, args
->end
);
331 void sb1_flush_icache_range(unsigned long start
, unsigned long end
)
333 struct flush_icache_range_args args
;
337 sb1_on_each_cpu(sb1_flush_icache_range_ipi
, &args
, 1, 1);
340 void sb1_flush_icache_range(unsigned long start
, unsigned long end
)
341 __attribute__((alias("local_sb1_flush_icache_range")));
345 * A signal trampoline must fit into a single cacheline.
347 static void local_sb1_flush_cache_sigtramp(unsigned long addr
)
349 cache_set_op(Index_Writeback_Inv_D
, addr
& dcache_index_mask
);
350 cache_set_op(Index_Writeback_Inv_D
, (addr
^ (1<<12)) & dcache_index_mask
);
351 cache_set_op(Index_Invalidate_I
, addr
& icache_index_mask
);
356 static void sb1_flush_cache_sigtramp_ipi(void *info
)
358 unsigned long iaddr
= (unsigned long) info
;
359 local_sb1_flush_cache_sigtramp(iaddr
);
362 static void sb1_flush_cache_sigtramp(unsigned long addr
)
364 sb1_on_each_cpu(sb1_flush_cache_sigtramp_ipi
, (void *) addr
, 1, 1);
367 void sb1_flush_cache_sigtramp(unsigned long addr
)
368 __attribute__((alias("local_sb1_flush_cache_sigtramp")));
373 * Anything that just flushes dcache state can be ignored, as we're always
374 * coherent in dcache space. This is just a dummy function that all the
375 * nop'ed routines point to
377 static void sb1_nop(void)
382 * Cache set values (from the mips64 spec)
393 static unsigned int decode_cache_sets(unsigned int config_field
)
395 if (config_field
== 7) {
396 /* JDCXXX - Find a graceful way to abort. */
399 return (1<<(config_field
+ 6));
403 * Cache line size values (from the mips64 spec)
404 * 0 - No cache present.
414 static unsigned int decode_cache_line_size(unsigned int config_field
)
416 if (config_field
== 0) {
418 } else if (config_field
== 7) {
419 /* JDCXXX - Find a graceful way to abort. */
422 return (1<<(config_field
+ 1));
426 * Relevant bits of the config1 register format (from the MIPS32/MIPS64 specs)
428 * 24:22 Icache sets per way
429 * 21:19 Icache line size
430 * 18:16 Icache Associativity
431 * 15:13 Dcache sets per way
432 * 12:10 Dcache line size
433 * 9:7 Dcache Associativity
436 static char *way_string
[] = {
437 "direct mapped", "2-way", "3-way", "4-way",
438 "5-way", "6-way", "7-way", "8-way",
441 static __init
void probe_cache_sizes(void)
445 config1
= read_c0_config1();
446 icache_line_size
= decode_cache_line_size((config1
>> 19) & 0x7);
447 dcache_line_size
= decode_cache_line_size((config1
>> 10) & 0x7);
448 icache_sets
= decode_cache_sets((config1
>> 22) & 0x7);
449 dcache_sets
= decode_cache_sets((config1
>> 13) & 0x7);
450 icache_assoc
= ((config1
>> 16) & 0x7) + 1;
451 dcache_assoc
= ((config1
>> 7) & 0x7) + 1;
452 icache_size
= icache_line_size
* icache_sets
* icache_assoc
;
453 dcache_size
= dcache_line_size
* dcache_sets
* dcache_assoc
;
454 /* Need to remove non-index bits for index ops */
455 icache_index_mask
= (icache_sets
- 1) * icache_line_size
;
456 dcache_index_mask
= (dcache_sets
- 1) * dcache_line_size
;
458 * These are for choosing range (index ops) versus all.
459 * icache flushes all ways for each set, so drop icache_assoc.
460 * dcache flushes all ways and each setting of bit 12 for each
461 * index, so drop dcache_assoc and halve the dcache_sets.
463 icache_range_cutoff
= icache_sets
* icache_line_size
;
464 dcache_range_cutoff
= (dcache_sets
/ 2) * icache_line_size
;
466 printk("Primary instruction cache %ldkB, %s, linesize %d bytes.\n",
467 icache_size
>> 10, way_string
[icache_assoc
- 1],
469 printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
470 dcache_size
>> 10, way_string
[dcache_assoc
- 1],
475 * This is called from cache.c. We have to set up all the
476 * memory management function pointers, as well as initialize
477 * the caches and tlbs
479 void sb1_cache_init(void)
481 extern char except_vec2_sb1
;
483 /* Special cache error handler for SB1 */
484 set_uncached_handler (0x100, &except_vec2_sb1
, 0x80);
488 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
493 * None of these are needed for the SB1 - the Dcache is
494 * physically indexed and tagged, so no virtual aliasing can
497 flush_cache_range
= (void *) sb1_nop
;
498 flush_cache_mm
= (void (*)(struct mm_struct
*))sb1_nop
;
499 flush_cache_all
= sb1_nop
;
501 /* These routines are for Icache coherence with the Dcache */
502 flush_icache_range
= sb1_flush_icache_range
;
503 flush_icache_all
= __sb1_flush_icache_all
; /* local only */
505 /* This implies an Icache flush too, so can't be nop'ed */
506 flush_cache_page
= sb1_flush_cache_page
;
508 flush_cache_sigtramp
= sb1_flush_cache_sigtramp
;
509 local_flush_data_cache_page
= (void *) sb1_nop
;
510 flush_data_cache_page
= sb1_flush_cache_data_page
;
513 __flush_cache_all
= sb1___flush_cache_all
;
515 change_c0_config(CONF_CM_CMASK
, CONF_CM_DEFAULT
);
518 * This is the only way to force the update of K0 to complete
519 * before subsequent instruction fetch.
521 __asm__
__volatile__(
526 " " STR(PTR_LA
) " $1, 1f \n"
527 " " STR(MTC0
) " $1, $14 \n"
534 local_sb1___flush_cache_all();