2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
4 * Copyright (C) 2007 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/spinlock.h>
24 #include <linux/of_address.h>
26 #include <asm/cacheflush.h>
27 #include <asm/hardware/cache-l2x0.h>
29 #define CACHE_LINE_SIZE 32
31 static void __iomem
*l2x0_base
;
32 static DEFINE_RAW_SPINLOCK(l2x0_lock
);
33 static uint32_t l2x0_way_mask
; /* Bitmask of active ways */
34 static uint32_t l2x0_size
;
35 static unsigned long sync_reg_offset
= L2X0_CACHE_SYNC
;
37 struct l2x0_regs l2x0_saved_regs
;
40 void (*setup
)(const struct device_node
*, __u32
*, __u32
*);
45 static inline void cache_wait_way(void __iomem
*reg
, unsigned long mask
)
47 /* wait for cache operation by line or way to complete */
48 while (readl_relaxed(reg
) & mask
)
52 #ifdef CONFIG_CACHE_PL310
53 static inline void cache_wait(void __iomem
*reg
, unsigned long mask
)
55 /* cache operations by line are atomic on PL310 */
58 #define cache_wait cache_wait_way
61 static inline void cache_sync(void)
63 void __iomem
*base
= l2x0_base
;
65 writel_relaxed(0, base
+ sync_reg_offset
);
66 cache_wait(base
+ L2X0_CACHE_SYNC
, 1);
69 static inline void l2x0_clean_line(unsigned long addr
)
71 void __iomem
*base
= l2x0_base
;
72 cache_wait(base
+ L2X0_CLEAN_LINE_PA
, 1);
73 writel_relaxed(addr
, base
+ L2X0_CLEAN_LINE_PA
);
76 static inline void l2x0_inv_line(unsigned long addr
)
78 void __iomem
*base
= l2x0_base
;
79 cache_wait(base
+ L2X0_INV_LINE_PA
, 1);
80 writel_relaxed(addr
, base
+ L2X0_INV_LINE_PA
);
83 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
84 static inline void debug_writel(unsigned long val
)
86 if (outer_cache
.set_debug
)
87 outer_cache
.set_debug(val
);
90 static void pl310_set_debug(unsigned long val
)
92 writel_relaxed(val
, l2x0_base
+ L2X0_DEBUG_CTRL
);
95 /* Optimised out for non-errata case */
96 static inline void debug_writel(unsigned long val
)
100 #define pl310_set_debug NULL
103 #ifdef CONFIG_PL310_ERRATA_588369
104 static inline void l2x0_flush_line(unsigned long addr
)
106 void __iomem
*base
= l2x0_base
;
108 /* Clean by PA followed by Invalidate by PA */
109 cache_wait(base
+ L2X0_CLEAN_LINE_PA
, 1);
110 writel_relaxed(addr
, base
+ L2X0_CLEAN_LINE_PA
);
111 cache_wait(base
+ L2X0_INV_LINE_PA
, 1);
112 writel_relaxed(addr
, base
+ L2X0_INV_LINE_PA
);
116 static inline void l2x0_flush_line(unsigned long addr
)
118 void __iomem
*base
= l2x0_base
;
119 cache_wait(base
+ L2X0_CLEAN_INV_LINE_PA
, 1);
120 writel_relaxed(addr
, base
+ L2X0_CLEAN_INV_LINE_PA
);
124 static void l2x0_cache_sync(void)
128 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
130 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
133 static void __l2x0_flush_all(void)
136 writel_relaxed(l2x0_way_mask
, l2x0_base
+ L2X0_CLEAN_INV_WAY
);
137 cache_wait_way(l2x0_base
+ L2X0_CLEAN_INV_WAY
, l2x0_way_mask
);
142 static void l2x0_flush_all(void)
147 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
149 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
152 static void l2x0_clean_all(void)
157 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
158 writel_relaxed(l2x0_way_mask
, l2x0_base
+ L2X0_CLEAN_WAY
);
159 cache_wait_way(l2x0_base
+ L2X0_CLEAN_WAY
, l2x0_way_mask
);
161 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
164 static void l2x0_inv_all(void)
168 /* invalidate all ways */
169 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
170 /* Invalidating when L2 is enabled is a nono */
171 BUG_ON(readl(l2x0_base
+ L2X0_CTRL
) & 1);
172 writel_relaxed(l2x0_way_mask
, l2x0_base
+ L2X0_INV_WAY
);
173 cache_wait_way(l2x0_base
+ L2X0_INV_WAY
, l2x0_way_mask
);
175 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
178 static void l2x0_inv_range(unsigned long start
, unsigned long end
)
180 void __iomem
*base
= l2x0_base
;
183 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
184 if (start
& (CACHE_LINE_SIZE
- 1)) {
185 start
&= ~(CACHE_LINE_SIZE
- 1);
187 l2x0_flush_line(start
);
189 start
+= CACHE_LINE_SIZE
;
192 if (end
& (CACHE_LINE_SIZE
- 1)) {
193 end
&= ~(CACHE_LINE_SIZE
- 1);
195 l2x0_flush_line(end
);
199 while (start
< end
) {
200 unsigned long blk_end
= start
+ min(end
- start
, 4096UL);
202 while (start
< blk_end
) {
203 l2x0_inv_line(start
);
204 start
+= CACHE_LINE_SIZE
;
208 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
209 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
212 cache_wait(base
+ L2X0_INV_LINE_PA
, 1);
214 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
217 static void l2x0_clean_range(unsigned long start
, unsigned long end
)
219 void __iomem
*base
= l2x0_base
;
222 if ((end
- start
) >= l2x0_size
) {
227 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
228 start
&= ~(CACHE_LINE_SIZE
- 1);
229 while (start
< end
) {
230 unsigned long blk_end
= start
+ min(end
- start
, 4096UL);
232 while (start
< blk_end
) {
233 l2x0_clean_line(start
);
234 start
+= CACHE_LINE_SIZE
;
238 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
239 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
242 cache_wait(base
+ L2X0_CLEAN_LINE_PA
, 1);
244 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
247 static void l2x0_flush_range(unsigned long start
, unsigned long end
)
249 void __iomem
*base
= l2x0_base
;
252 if ((end
- start
) >= l2x0_size
) {
257 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
258 start
&= ~(CACHE_LINE_SIZE
- 1);
259 while (start
< end
) {
260 unsigned long blk_end
= start
+ min(end
- start
, 4096UL);
263 while (start
< blk_end
) {
264 l2x0_flush_line(start
);
265 start
+= CACHE_LINE_SIZE
;
270 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
271 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
274 cache_wait(base
+ L2X0_CLEAN_INV_LINE_PA
, 1);
276 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
279 static void l2x0_disable(void)
283 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
285 writel_relaxed(0, l2x0_base
+ L2X0_CTRL
);
287 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
290 static void l2x0_unlock(__u32 cache_id
)
295 if (cache_id
== L2X0_CACHE_ID_PART_L310
)
298 /* L210 and unknown types */
301 for (i
= 0; i
< lockregs
; i
++) {
302 writel_relaxed(0x0, l2x0_base
+ L2X0_LOCKDOWN_WAY_D_BASE
+
303 i
* L2X0_LOCKDOWN_STRIDE
);
304 writel_relaxed(0x0, l2x0_base
+ L2X0_LOCKDOWN_WAY_I_BASE
+
305 i
* L2X0_LOCKDOWN_STRIDE
);
309 void __init
l2x0_init(void __iomem
*base
, __u32 aux_val
, __u32 aux_mask
)
319 cache_id
= readl_relaxed(l2x0_base
+ L2X0_CACHE_ID
);
320 aux
= readl_relaxed(l2x0_base
+ L2X0_AUX_CTRL
);
325 /* Determine the number of ways */
326 switch (cache_id
& L2X0_CACHE_ID_PART_MASK
) {
327 case L2X0_CACHE_ID_PART_L310
:
333 #ifdef CONFIG_PL310_ERRATA_753970
334 /* Unmapped register. */
335 sync_reg_offset
= L2X0_DUMMY_REG
;
337 outer_cache
.set_debug
= pl310_set_debug
;
339 case L2X0_CACHE_ID_PART_L210
:
340 ways
= (aux
>> 13) & 0xf;
344 /* Assume unknown chips have 8 ways */
346 type
= "L2x0 series";
350 l2x0_way_mask
= (1 << ways
) - 1;
353 * L2 cache Size = Way size * Number of ways
355 way_size
= (aux
& L2X0_AUX_CTRL_WAY_SIZE_MASK
) >> 17;
356 way_size
= 1 << (way_size
+ 3);
357 l2x0_size
= ways
* way_size
* SZ_1K
;
360 * Check if l2x0 controller is already enabled.
361 * If you are booting from non-secure mode
362 * accessing the below registers will fault.
364 if (!(readl_relaxed(l2x0_base
+ L2X0_CTRL
) & 1)) {
365 /* Make sure that I&D is not locked down when starting */
366 l2x0_unlock(cache_id
);
368 /* l2x0 controller is disabled */
369 writel_relaxed(aux
, l2x0_base
+ L2X0_AUX_CTRL
);
371 l2x0_saved_regs
.aux_ctrl
= aux
;
376 writel_relaxed(1, l2x0_base
+ L2X0_CTRL
);
379 outer_cache
.inv_range
= l2x0_inv_range
;
380 outer_cache
.clean_range
= l2x0_clean_range
;
381 outer_cache
.flush_range
= l2x0_flush_range
;
382 outer_cache
.sync
= l2x0_cache_sync
;
383 outer_cache
.flush_all
= l2x0_flush_all
;
384 outer_cache
.inv_all
= l2x0_inv_all
;
385 outer_cache
.disable
= l2x0_disable
;
387 printk(KERN_INFO
"%s cache controller enabled\n", type
);
388 printk(KERN_INFO
"l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
389 ways
, cache_id
, aux
, l2x0_size
);
393 static void __init
l2x0_of_setup(const struct device_node
*np
,
394 __u32
*aux_val
, __u32
*aux_mask
)
396 u32 data
[2] = { 0, 0 };
399 u32 val
= 0, mask
= 0;
401 of_property_read_u32(np
, "arm,tag-latency", &tag
);
403 mask
|= L2X0_AUX_CTRL_TAG_LATENCY_MASK
;
404 val
|= (tag
- 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT
;
407 of_property_read_u32_array(np
, "arm,data-latency",
408 data
, ARRAY_SIZE(data
));
409 if (data
[0] && data
[1]) {
410 mask
|= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK
|
411 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK
;
412 val
|= ((data
[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT
) |
413 ((data
[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT
);
416 of_property_read_u32(np
, "arm,dirty-latency", &dirty
);
418 mask
|= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK
;
419 val
|= (dirty
- 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT
;
427 static void __init
pl310_of_setup(const struct device_node
*np
,
428 __u32
*aux_val
, __u32
*aux_mask
)
430 u32 data
[3] = { 0, 0, 0 };
431 u32 tag
[3] = { 0, 0, 0 };
432 u32 filter
[2] = { 0, 0 };
434 of_property_read_u32_array(np
, "arm,tag-latency", tag
, ARRAY_SIZE(tag
));
435 if (tag
[0] && tag
[1] && tag
[2])
437 ((tag
[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT
) |
438 ((tag
[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT
) |
439 ((tag
[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT
),
440 l2x0_base
+ L2X0_TAG_LATENCY_CTRL
);
442 of_property_read_u32_array(np
, "arm,data-latency",
443 data
, ARRAY_SIZE(data
));
444 if (data
[0] && data
[1] && data
[2])
446 ((data
[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT
) |
447 ((data
[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT
) |
448 ((data
[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT
),
449 l2x0_base
+ L2X0_DATA_LATENCY_CTRL
);
451 of_property_read_u32_array(np
, "arm,filter-ranges",
452 filter
, ARRAY_SIZE(filter
));
454 writel_relaxed(ALIGN(filter
[0] + filter
[1], SZ_1M
),
455 l2x0_base
+ L2X0_ADDR_FILTER_END
);
456 writel_relaxed((filter
[0] & ~(SZ_1M
- 1)) | L2X0_ADDR_FILTER_EN
,
457 l2x0_base
+ L2X0_ADDR_FILTER_START
);
461 static void __init
pl310_save(void)
463 u32 l2x0_revision
= readl_relaxed(l2x0_base
+ L2X0_CACHE_ID
) &
464 L2X0_CACHE_ID_RTL_MASK
;
466 l2x0_saved_regs
.tag_latency
= readl_relaxed(l2x0_base
+
467 L2X0_TAG_LATENCY_CTRL
);
468 l2x0_saved_regs
.data_latency
= readl_relaxed(l2x0_base
+
469 L2X0_DATA_LATENCY_CTRL
);
470 l2x0_saved_regs
.filter_end
= readl_relaxed(l2x0_base
+
471 L2X0_ADDR_FILTER_END
);
472 l2x0_saved_regs
.filter_start
= readl_relaxed(l2x0_base
+
473 L2X0_ADDR_FILTER_START
);
475 if (l2x0_revision
>= L2X0_CACHE_ID_RTL_R2P0
) {
477 * From r2p0, there is Prefetch offset/control register
479 l2x0_saved_regs
.prefetch_ctrl
= readl_relaxed(l2x0_base
+
482 * From r3p0, there is Power control register
484 if (l2x0_revision
>= L2X0_CACHE_ID_RTL_R3P0
)
485 l2x0_saved_regs
.pwr_ctrl
= readl_relaxed(l2x0_base
+
490 static void l2x0_resume(void)
492 if (!(readl_relaxed(l2x0_base
+ L2X0_CTRL
) & 1)) {
493 /* restore aux ctrl and enable l2 */
494 l2x0_unlock(readl_relaxed(l2x0_base
+ L2X0_CACHE_ID
));
496 writel_relaxed(l2x0_saved_regs
.aux_ctrl
, l2x0_base
+
501 writel_relaxed(1, l2x0_base
+ L2X0_CTRL
);
505 static void pl310_resume(void)
509 if (!(readl_relaxed(l2x0_base
+ L2X0_CTRL
) & 1)) {
510 /* restore pl310 setup */
511 writel_relaxed(l2x0_saved_regs
.tag_latency
,
512 l2x0_base
+ L2X0_TAG_LATENCY_CTRL
);
513 writel_relaxed(l2x0_saved_regs
.data_latency
,
514 l2x0_base
+ L2X0_DATA_LATENCY_CTRL
);
515 writel_relaxed(l2x0_saved_regs
.filter_end
,
516 l2x0_base
+ L2X0_ADDR_FILTER_END
);
517 writel_relaxed(l2x0_saved_regs
.filter_start
,
518 l2x0_base
+ L2X0_ADDR_FILTER_START
);
520 l2x0_revision
= readl_relaxed(l2x0_base
+ L2X0_CACHE_ID
) &
521 L2X0_CACHE_ID_RTL_MASK
;
523 if (l2x0_revision
>= L2X0_CACHE_ID_RTL_R2P0
) {
524 writel_relaxed(l2x0_saved_regs
.prefetch_ctrl
,
525 l2x0_base
+ L2X0_PREFETCH_CTRL
);
526 if (l2x0_revision
>= L2X0_CACHE_ID_RTL_R3P0
)
527 writel_relaxed(l2x0_saved_regs
.pwr_ctrl
,
528 l2x0_base
+ L2X0_POWER_CTRL
);
535 static const struct l2x0_of_data pl310_data
= {
541 static const struct l2x0_of_data l2x0_data
= {
547 static const struct of_device_id l2x0_ids
[] __initconst
= {
548 { .compatible
= "arm,pl310-cache", .data
= (void *)&pl310_data
},
549 { .compatible
= "arm,l220-cache", .data
= (void *)&l2x0_data
},
550 { .compatible
= "arm,l210-cache", .data
= (void *)&l2x0_data
},
554 int __init
l2x0_of_init(__u32 aux_val
, __u32 aux_mask
)
556 struct device_node
*np
;
557 struct l2x0_of_data
*data
;
560 np
= of_find_matching_node(NULL
, l2x0_ids
);
564 if (of_address_to_resource(np
, 0, &res
))
567 l2x0_base
= ioremap(res
.start
, resource_size(&res
));
571 l2x0_saved_regs
.phy_base
= res
.start
;
573 data
= of_match_node(l2x0_ids
, np
)->data
;
575 /* L2 configuration can only be changed if the cache is disabled */
576 if (!(readl_relaxed(l2x0_base
+ L2X0_CTRL
) & 1)) {
578 data
->setup(np
, &aux_val
, &aux_mask
);
584 l2x0_init(l2x0_base
, aux_val
, aux_mask
);
586 outer_cache
.resume
= data
->resume
;