NFSv4.1/pnfs Improve the packing of struct pnfs_layout_hdr
[linux/fpc-iii.git] / arch / c6x / platforms / cache.c
blob46fd2d530271e1a42f7ce7c96aa1a65dcbcdf7ad
1 /*
2 * Copyright (C) 2011 Texas Instruments Incorporated
3 * Author: Mark Salter <msalter@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9 #include <linux/of.h>
10 #include <linux/of_address.h>
11 #include <linux/io.h>
13 #include <asm/cache.h>
14 #include <asm/soc.h>
17 * Internal Memory Control Registers for caches
19 #define IMCR_CCFG 0x0000
20 #define IMCR_L1PCFG 0x0020
21 #define IMCR_L1PCC 0x0024
22 #define IMCR_L1DCFG 0x0040
23 #define IMCR_L1DCC 0x0044
24 #define IMCR_L2ALLOC0 0x2000
25 #define IMCR_L2ALLOC1 0x2004
26 #define IMCR_L2ALLOC2 0x2008
27 #define IMCR_L2ALLOC3 0x200c
28 #define IMCR_L2WBAR 0x4000
29 #define IMCR_L2WWC 0x4004
30 #define IMCR_L2WIBAR 0x4010
31 #define IMCR_L2WIWC 0x4014
32 #define IMCR_L2IBAR 0x4018
33 #define IMCR_L2IWC 0x401c
34 #define IMCR_L1PIBAR 0x4020
35 #define IMCR_L1PIWC 0x4024
36 #define IMCR_L1DWIBAR 0x4030
37 #define IMCR_L1DWIWC 0x4034
38 #define IMCR_L1DWBAR 0x4040
39 #define IMCR_L1DWWC 0x4044
40 #define IMCR_L1DIBAR 0x4048
41 #define IMCR_L1DIWC 0x404c
42 #define IMCR_L2WB 0x5000
43 #define IMCR_L2WBINV 0x5004
44 #define IMCR_L2INV 0x5008
45 #define IMCR_L1PINV 0x5028
46 #define IMCR_L1DWB 0x5040
47 #define IMCR_L1DWBINV 0x5044
48 #define IMCR_L1DINV 0x5048
49 #define IMCR_MAR_BASE 0x8000
50 #define IMCR_MAR96_111 0x8180
51 #define IMCR_MAR128_191 0x8200
52 #define IMCR_MAR224_239 0x8380
53 #define IMCR_L2MPFAR 0xa000
54 #define IMCR_L2MPFSR 0xa004
55 #define IMCR_L2MPFCR 0xa008
56 #define IMCR_L2MPLK0 0xa100
57 #define IMCR_L2MPLK1 0xa104
58 #define IMCR_L2MPLK2 0xa108
59 #define IMCR_L2MPLK3 0xa10c
60 #define IMCR_L2MPLKCMD 0xa110
61 #define IMCR_L2MPLKSTAT 0xa114
62 #define IMCR_L2MPPA_BASE 0xa200
63 #define IMCR_L1PMPFAR 0xa400
64 #define IMCR_L1PMPFSR 0xa404
65 #define IMCR_L1PMPFCR 0xa408
66 #define IMCR_L1PMPLK0 0xa500
67 #define IMCR_L1PMPLK1 0xa504
68 #define IMCR_L1PMPLK2 0xa508
69 #define IMCR_L1PMPLK3 0xa50c
70 #define IMCR_L1PMPLKCMD 0xa510
71 #define IMCR_L1PMPLKSTAT 0xa514
72 #define IMCR_L1PMPPA_BASE 0xa600
73 #define IMCR_L1DMPFAR 0xac00
74 #define IMCR_L1DMPFSR 0xac04
75 #define IMCR_L1DMPFCR 0xac08
76 #define IMCR_L1DMPLK0 0xad00
77 #define IMCR_L1DMPLK1 0xad04
78 #define IMCR_L1DMPLK2 0xad08
79 #define IMCR_L1DMPLK3 0xad0c
80 #define IMCR_L1DMPLKCMD 0xad10
81 #define IMCR_L1DMPLKSTAT 0xad14
82 #define IMCR_L1DMPPA_BASE 0xae00
83 #define IMCR_L2PDWAKE0 0xc040
84 #define IMCR_L2PDWAKE1 0xc044
85 #define IMCR_L2PDSLEEP0 0xc050
86 #define IMCR_L2PDSLEEP1 0xc054
87 #define IMCR_L2PDSTAT0 0xc060
88 #define IMCR_L2PDSTAT1 0xc064
91 * CCFG register values and bits
93 #define L2MODE_0K_CACHE 0x0
94 #define L2MODE_32K_CACHE 0x1
95 #define L2MODE_64K_CACHE 0x2
96 #define L2MODE_128K_CACHE 0x3
97 #define L2MODE_256K_CACHE 0x7
99 #define L2PRIO_URGENT 0x0
100 #define L2PRIO_HIGH 0x1
101 #define L2PRIO_MEDIUM 0x2
102 #define L2PRIO_LOW 0x3
104 #define CCFG_ID 0x100 /* Invalidate L1P bit */
105 #define CCFG_IP 0x200 /* Invalidate L1D bit */
107 static void __iomem *cache_base;
110 * L1 & L2 caches generic functions
112 #define imcr_get(reg) soc_readl(cache_base + (reg))
113 #define imcr_set(reg, value) \
114 do { \
115 soc_writel((value), cache_base + (reg)); \
116 soc_readl(cache_base + (reg)); \
117 } while (0)
119 static void cache_block_operation_wait(unsigned int wc_reg)
121 /* Wait for completion */
122 while (imcr_get(wc_reg))
123 cpu_relax();
126 static DEFINE_SPINLOCK(cache_lock);
129 * Generic function to perform a block cache operation as
130 * invalidate or writeback/invalidate
132 static void cache_block_operation(unsigned int *start,
133 unsigned int *end,
134 unsigned int bar_reg,
135 unsigned int wc_reg)
137 unsigned long flags;
138 unsigned int wcnt =
139 (L2_CACHE_ALIGN_CNT((unsigned int) end)
140 - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2;
141 unsigned int wc = 0;
143 for (; wcnt; wcnt -= wc, start += wc) {
144 loop:
145 spin_lock_irqsave(&cache_lock, flags);
148 * If another cache operation is occuring
150 if (unlikely(imcr_get(wc_reg))) {
151 spin_unlock_irqrestore(&cache_lock, flags);
153 /* Wait for previous operation completion */
154 cache_block_operation_wait(wc_reg);
156 /* Try again */
157 goto loop;
160 imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start));
162 if (wcnt > 0xffff)
163 wc = 0xffff;
164 else
165 wc = wcnt;
167 /* Set word count value in the WC register */
168 imcr_set(wc_reg, wc & 0xffff);
170 spin_unlock_irqrestore(&cache_lock, flags);
172 /* Wait for completion */
173 cache_block_operation_wait(wc_reg);
177 static void cache_block_operation_nowait(unsigned int *start,
178 unsigned int *end,
179 unsigned int bar_reg,
180 unsigned int wc_reg)
182 unsigned long flags;
183 unsigned int wcnt =
184 (L2_CACHE_ALIGN_CNT((unsigned int) end)
185 - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2;
186 unsigned int wc = 0;
188 for (; wcnt; wcnt -= wc, start += wc) {
190 spin_lock_irqsave(&cache_lock, flags);
192 imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start));
194 if (wcnt > 0xffff)
195 wc = 0xffff;
196 else
197 wc = wcnt;
199 /* Set word count value in the WC register */
200 imcr_set(wc_reg, wc & 0xffff);
202 spin_unlock_irqrestore(&cache_lock, flags);
204 /* Don't wait for completion on last cache operation */
205 if (wcnt > 0xffff)
206 cache_block_operation_wait(wc_reg);
211 * L1 caches management
215 * Disable L1 caches
217 void L1_cache_off(void)
219 unsigned int dummy;
221 imcr_set(IMCR_L1PCFG, 0);
222 dummy = imcr_get(IMCR_L1PCFG);
224 imcr_set(IMCR_L1DCFG, 0);
225 dummy = imcr_get(IMCR_L1DCFG);
229 * Enable L1 caches
231 void L1_cache_on(void)
233 unsigned int dummy;
235 imcr_set(IMCR_L1PCFG, 7);
236 dummy = imcr_get(IMCR_L1PCFG);
238 imcr_set(IMCR_L1DCFG, 7);
239 dummy = imcr_get(IMCR_L1DCFG);
243 * L1P global-invalidate all
245 void L1P_cache_global_invalidate(void)
247 unsigned int set = 1;
248 imcr_set(IMCR_L1PINV, set);
249 while (imcr_get(IMCR_L1PINV) & 1)
250 cpu_relax();
254 * L1D global-invalidate all
256 * Warning: this operation causes all updated data in L1D to
257 * be discarded rather than written back to the lower levels of
258 * memory
260 void L1D_cache_global_invalidate(void)
262 unsigned int set = 1;
263 imcr_set(IMCR_L1DINV, set);
264 while (imcr_get(IMCR_L1DINV) & 1)
265 cpu_relax();
268 void L1D_cache_global_writeback(void)
270 unsigned int set = 1;
271 imcr_set(IMCR_L1DWB, set);
272 while (imcr_get(IMCR_L1DWB) & 1)
273 cpu_relax();
276 void L1D_cache_global_writeback_invalidate(void)
278 unsigned int set = 1;
279 imcr_set(IMCR_L1DWBINV, set);
280 while (imcr_get(IMCR_L1DWBINV) & 1)
281 cpu_relax();
285 * L2 caches management
289 * Set L2 operation mode
291 void L2_cache_set_mode(unsigned int mode)
293 unsigned int ccfg = imcr_get(IMCR_CCFG);
295 /* Clear and set the L2MODE bits in CCFG */
296 ccfg &= ~7;
297 ccfg |= (mode & 7);
298 imcr_set(IMCR_CCFG, ccfg);
299 ccfg = imcr_get(IMCR_CCFG);
303 * L2 global-writeback and global-invalidate all
305 void L2_cache_global_writeback_invalidate(void)
307 imcr_set(IMCR_L2WBINV, 1);
308 while (imcr_get(IMCR_L2WBINV))
309 cpu_relax();
313 * L2 global-writeback all
315 void L2_cache_global_writeback(void)
317 imcr_set(IMCR_L2WB, 1);
318 while (imcr_get(IMCR_L2WB))
319 cpu_relax();
323 * Cacheability controls
325 void enable_caching(unsigned long start, unsigned long end)
327 unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2);
328 unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2);
330 for (; mar <= mar_e; mar += 4)
331 imcr_set(mar, imcr_get(mar) | 1);
334 void disable_caching(unsigned long start, unsigned long end)
336 unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2);
337 unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2);
339 for (; mar <= mar_e; mar += 4)
340 imcr_set(mar, imcr_get(mar) & ~1);
345 * L1 block operations
347 void L1P_cache_block_invalidate(unsigned int start, unsigned int end)
349 cache_block_operation((unsigned int *) start,
350 (unsigned int *) end,
351 IMCR_L1PIBAR, IMCR_L1PIWC);
353 EXPORT_SYMBOL(L1P_cache_block_invalidate);
355 void L1D_cache_block_invalidate(unsigned int start, unsigned int end)
357 cache_block_operation((unsigned int *) start,
358 (unsigned int *) end,
359 IMCR_L1DIBAR, IMCR_L1DIWC);
362 void L1D_cache_block_writeback_invalidate(unsigned int start, unsigned int end)
364 cache_block_operation((unsigned int *) start,
365 (unsigned int *) end,
366 IMCR_L1DWIBAR, IMCR_L1DWIWC);
369 void L1D_cache_block_writeback(unsigned int start, unsigned int end)
371 cache_block_operation((unsigned int *) start,
372 (unsigned int *) end,
373 IMCR_L1DWBAR, IMCR_L1DWWC);
375 EXPORT_SYMBOL(L1D_cache_block_writeback);
378 * L2 block operations
380 void L2_cache_block_invalidate(unsigned int start, unsigned int end)
382 cache_block_operation((unsigned int *) start,
383 (unsigned int *) end,
384 IMCR_L2IBAR, IMCR_L2IWC);
387 void L2_cache_block_writeback(unsigned int start, unsigned int end)
389 cache_block_operation((unsigned int *) start,
390 (unsigned int *) end,
391 IMCR_L2WBAR, IMCR_L2WWC);
394 void L2_cache_block_writeback_invalidate(unsigned int start, unsigned int end)
396 cache_block_operation((unsigned int *) start,
397 (unsigned int *) end,
398 IMCR_L2WIBAR, IMCR_L2WIWC);
401 void L2_cache_block_invalidate_nowait(unsigned int start, unsigned int end)
403 cache_block_operation_nowait((unsigned int *) start,
404 (unsigned int *) end,
405 IMCR_L2IBAR, IMCR_L2IWC);
408 void L2_cache_block_writeback_nowait(unsigned int start, unsigned int end)
410 cache_block_operation_nowait((unsigned int *) start,
411 (unsigned int *) end,
412 IMCR_L2WBAR, IMCR_L2WWC);
415 void L2_cache_block_writeback_invalidate_nowait(unsigned int start,
416 unsigned int end)
418 cache_block_operation_nowait((unsigned int *) start,
419 (unsigned int *) end,
420 IMCR_L2WIBAR, IMCR_L2WIWC);
425 * L1 and L2 caches configuration
427 void __init c6x_cache_init(void)
429 struct device_node *node;
431 node = of_find_compatible_node(NULL, NULL, "ti,c64x+cache");
432 if (!node)
433 return;
435 cache_base = of_iomap(node, 0);
437 of_node_put(node);
439 if (!cache_base)
440 return;
442 /* Set L2 caches on the the whole L2 SRAM memory */
443 L2_cache_set_mode(L2MODE_SIZE);
445 /* Enable L1 */
446 L1_cache_on();