Linux 4.1.18
[linux/fpc-iii.git] / arch / mips / mm / sc-rm7k.c
blob9ac1efcfbcc7f735727900bc482fcce28b1622ba
1 /*
2 * sc-rm7k.c: RM7000 cache management functions.
4 * Copyright (C) 1997, 2001, 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
5 */
7 #undef DEBUG
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/bitops.h>
13 #include <asm/addrspace.h>
14 #include <asm/bcache.h>
15 #include <asm/cacheops.h>
16 #include <asm/mipsregs.h>
17 #include <asm/processor.h>
18 #include <asm/sections.h>
19 #include <asm/cacheflush.h> /* for run_uncached() */
21 /* Primary cache parameters. */
22 #define sc_lsize 32
23 #define tc_pagesize (32*128)
25 /* Secondary cache parameters. */
26 #define scache_size (256*1024) /* Fixed to 256KiB on RM7000 */
28 /* Tertiary cache parameters */
29 #define tc_lsize 32
31 extern unsigned long icache_way_size, dcache_way_size;
32 static unsigned long tcache_size;
34 #include <asm/r4kcache.h>
36 static int rm7k_tcache_init;
39 * Writeback and invalidate the primary cache dcache before DMA.
40 * (XXX These need to be fixed ...)
42 static void rm7k_sc_wback_inv(unsigned long addr, unsigned long size)
44 unsigned long end, a;
46 pr_debug("rm7k_sc_wback_inv[%08lx,%08lx]", addr, size);
48 /* Catch bad driver code */
49 BUG_ON(size == 0);
51 blast_scache_range(addr, addr + size);
53 if (!rm7k_tcache_init)
54 return;
56 a = addr & ~(tc_pagesize - 1);
57 end = (addr + size - 1) & ~(tc_pagesize - 1);
58 while(1) {
59 invalidate_tcache_page(a); /* Page_Invalidate_T */
60 if (a == end)
61 break;
62 a += tc_pagesize;
66 static void rm7k_sc_inv(unsigned long addr, unsigned long size)
68 unsigned long end, a;
70 pr_debug("rm7k_sc_inv[%08lx,%08lx]", addr, size);
72 /* Catch bad driver code */
73 BUG_ON(size == 0);
75 blast_inv_scache_range(addr, addr + size);
77 if (!rm7k_tcache_init)
78 return;
80 a = addr & ~(tc_pagesize - 1);
81 end = (addr + size - 1) & ~(tc_pagesize - 1);
82 while(1) {
83 invalidate_tcache_page(a); /* Page_Invalidate_T */
84 if (a == end)
85 break;
86 a += tc_pagesize;
90 static void blast_rm7k_tcache(void)
92 unsigned long start = CKSEG0ADDR(0);
93 unsigned long end = start + tcache_size;
95 write_c0_taglo(0);
97 while (start < end) {
98 cache_op(Page_Invalidate_T, start);
99 start += tc_pagesize;
104 * This function is executed in uncached address space.
106 static void __rm7k_tc_enable(void)
108 int i;
110 set_c0_config(RM7K_CONF_TE);
112 write_c0_taglo(0);
113 write_c0_taghi(0);
115 for (i = 0; i < tcache_size; i += tc_lsize)
116 cache_op(Index_Store_Tag_T, CKSEG0ADDR(i));
119 static void rm7k_tc_enable(void)
121 if (read_c0_config() & RM7K_CONF_TE)
122 return;
124 BUG_ON(tcache_size == 0);
126 run_uncached(__rm7k_tc_enable);
130 * This function is executed in uncached address space.
132 static void __rm7k_sc_enable(void)
134 int i;
136 set_c0_config(RM7K_CONF_SE);
138 write_c0_taglo(0);
139 write_c0_taghi(0);
141 for (i = 0; i < scache_size; i += sc_lsize)
142 cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i));
145 static void rm7k_sc_enable(void)
147 if (read_c0_config() & RM7K_CONF_SE)
148 return;
150 pr_info("Enabling secondary cache...\n");
151 run_uncached(__rm7k_sc_enable);
153 if (rm7k_tcache_init)
154 rm7k_tc_enable();
157 static void rm7k_tc_disable(void)
159 unsigned long flags;
161 local_irq_save(flags);
162 blast_rm7k_tcache();
163 clear_c0_config(RM7K_CONF_TE);
164 local_irq_save(flags);
167 static void rm7k_sc_disable(void)
169 clear_c0_config(RM7K_CONF_SE);
171 if (rm7k_tcache_init)
172 rm7k_tc_disable();
175 static struct bcache_ops rm7k_sc_ops = {
176 .bc_enable = rm7k_sc_enable,
177 .bc_disable = rm7k_sc_disable,
178 .bc_wback_inv = rm7k_sc_wback_inv,
179 .bc_inv = rm7k_sc_inv
183 * This is a probing function like the one found in c-r4k.c, we look for the
184 * wrap around point with different addresses.
186 static void __probe_tcache(void)
188 unsigned long flags, addr, begin, end, pow2;
190 begin = (unsigned long) &_stext;
191 begin &= ~((8 * 1024 * 1024) - 1);
192 end = begin + (8 * 1024 * 1024);
194 local_irq_save(flags);
196 set_c0_config(RM7K_CONF_TE);
198 /* Fill size-multiple lines with a valid tag */
199 pow2 = (256 * 1024);
200 for (addr = begin; addr <= end; addr = (begin + pow2)) {
201 unsigned long *p = (unsigned long *) addr;
202 __asm__ __volatile__("nop" : : "r" (*p));
203 pow2 <<= 1;
206 /* Load first line with a 0 tag, to check after */
207 write_c0_taglo(0);
208 write_c0_taghi(0);
209 cache_op(Index_Store_Tag_T, begin);
211 /* Look for the wrap-around */
212 pow2 = (512 * 1024);
213 for (addr = begin + (512 * 1024); addr <= end; addr = begin + pow2) {
214 cache_op(Index_Load_Tag_T, addr);
215 if (!read_c0_taglo())
216 break;
217 pow2 <<= 1;
220 addr -= begin;
221 tcache_size = addr;
223 clear_c0_config(RM7K_CONF_TE);
225 local_irq_restore(flags);
228 void rm7k_sc_init(void)
230 struct cpuinfo_mips *c = &current_cpu_data;
231 unsigned int config = read_c0_config();
233 if ((config & RM7K_CONF_SC))
234 return;
236 c->scache.linesz = sc_lsize;
237 c->scache.ways = 4;
238 c->scache.waybit= __ffs(scache_size / c->scache.ways);
239 c->scache.waysize = scache_size / c->scache.ways;
240 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
241 printk(KERN_INFO "Secondary cache size %dK, linesize %d bytes.\n",
242 (scache_size >> 10), sc_lsize);
244 if (!(config & RM7K_CONF_SE))
245 rm7k_sc_enable();
247 bcops = &rm7k_sc_ops;
250 * While we're at it let's deal with the tertiary cache.
253 rm7k_tcache_init = 0;
254 tcache_size = 0;
256 if (config & RM7K_CONF_TC)
257 return;
260 * No efficient way to ask the hardware for the size of the tcache,
261 * so must probe for it.
263 run_uncached(__probe_tcache);
264 rm7k_tc_enable();
265 rm7k_tcache_init = 1;
266 c->tcache.linesz = tc_lsize;
267 c->tcache.ways = 1;
268 pr_info("Tertiary cache size %ldK.\n", (tcache_size >> 10));