2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
4 * Copyright (C) 2007 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/spinlock.h>
24 #include <linux/of_address.h>
26 #include <asm/cacheflush.h>
27 #include <asm/hardware/cache-l2x0.h>
29 #define CACHE_LINE_SIZE 32
31 static void __iomem
*l2x0_base
;
32 static DEFINE_RAW_SPINLOCK(l2x0_lock
);
33 static uint32_t l2x0_way_mask
; /* Bitmask of active ways */
34 static uint32_t l2x0_size
;
36 struct l2x0_regs l2x0_saved_regs
;
39 void (*setup
)(const struct device_node
*, __u32
*, __u32
*);
44 static inline void cache_wait_way(void __iomem
*reg
, unsigned long mask
)
46 /* wait for cache operation by line or way to complete */
47 while (readl_relaxed(reg
) & mask
)
51 #ifdef CONFIG_CACHE_PL310
52 static inline void cache_wait(void __iomem
*reg
, unsigned long mask
)
54 /* cache operations by line are atomic on PL310 */
57 #define cache_wait cache_wait_way
60 static inline void cache_sync(void)
62 void __iomem
*base
= l2x0_base
;
64 #ifdef CONFIG_ARM_ERRATA_753970
65 /* write to an unmmapped register */
66 writel_relaxed(0, base
+ L2X0_DUMMY_REG
);
68 writel_relaxed(0, base
+ L2X0_CACHE_SYNC
);
70 cache_wait(base
+ L2X0_CACHE_SYNC
, 1);
73 static inline void l2x0_clean_line(unsigned long addr
)
75 void __iomem
*base
= l2x0_base
;
76 cache_wait(base
+ L2X0_CLEAN_LINE_PA
, 1);
77 writel_relaxed(addr
, base
+ L2X0_CLEAN_LINE_PA
);
80 static inline void l2x0_inv_line(unsigned long addr
)
82 void __iomem
*base
= l2x0_base
;
83 cache_wait(base
+ L2X0_INV_LINE_PA
, 1);
84 writel_relaxed(addr
, base
+ L2X0_INV_LINE_PA
);
87 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
89 #define debug_writel(val) outer_cache.set_debug(val)
91 static void l2x0_set_debug(unsigned long val
)
93 writel_relaxed(val
, l2x0_base
+ L2X0_DEBUG_CTRL
);
96 /* Optimised out for non-errata case */
97 static inline void debug_writel(unsigned long val
)
101 #define l2x0_set_debug NULL
104 #ifdef CONFIG_PL310_ERRATA_588369
105 static inline void l2x0_flush_line(unsigned long addr
)
107 void __iomem
*base
= l2x0_base
;
109 /* Clean by PA followed by Invalidate by PA */
110 cache_wait(base
+ L2X0_CLEAN_LINE_PA
, 1);
111 writel_relaxed(addr
, base
+ L2X0_CLEAN_LINE_PA
);
112 cache_wait(base
+ L2X0_INV_LINE_PA
, 1);
113 writel_relaxed(addr
, base
+ L2X0_INV_LINE_PA
);
117 static inline void l2x0_flush_line(unsigned long addr
)
119 void __iomem
*base
= l2x0_base
;
120 cache_wait(base
+ L2X0_CLEAN_INV_LINE_PA
, 1);
121 writel_relaxed(addr
, base
+ L2X0_CLEAN_INV_LINE_PA
);
125 static void l2x0_cache_sync(void)
129 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
131 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
134 static void __l2x0_flush_all(void)
137 writel_relaxed(l2x0_way_mask
, l2x0_base
+ L2X0_CLEAN_INV_WAY
);
138 cache_wait_way(l2x0_base
+ L2X0_CLEAN_INV_WAY
, l2x0_way_mask
);
143 static void l2x0_flush_all(void)
148 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
150 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
153 static void l2x0_clean_all(void)
158 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
159 writel_relaxed(l2x0_way_mask
, l2x0_base
+ L2X0_CLEAN_WAY
);
160 cache_wait_way(l2x0_base
+ L2X0_CLEAN_WAY
, l2x0_way_mask
);
162 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
165 static void l2x0_inv_all(void)
169 /* invalidate all ways */
170 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
171 /* Invalidating when L2 is enabled is a nono */
172 BUG_ON(readl(l2x0_base
+ L2X0_CTRL
) & 1);
173 writel_relaxed(l2x0_way_mask
, l2x0_base
+ L2X0_INV_WAY
);
174 cache_wait_way(l2x0_base
+ L2X0_INV_WAY
, l2x0_way_mask
);
176 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
179 static void l2x0_inv_range(unsigned long start
, unsigned long end
)
181 void __iomem
*base
= l2x0_base
;
184 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
185 if (start
& (CACHE_LINE_SIZE
- 1)) {
186 start
&= ~(CACHE_LINE_SIZE
- 1);
188 l2x0_flush_line(start
);
190 start
+= CACHE_LINE_SIZE
;
193 if (end
& (CACHE_LINE_SIZE
- 1)) {
194 end
&= ~(CACHE_LINE_SIZE
- 1);
196 l2x0_flush_line(end
);
200 while (start
< end
) {
201 unsigned long blk_end
= start
+ min(end
- start
, 4096UL);
203 while (start
< blk_end
) {
204 l2x0_inv_line(start
);
205 start
+= CACHE_LINE_SIZE
;
209 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
210 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
213 cache_wait(base
+ L2X0_INV_LINE_PA
, 1);
215 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
218 static void l2x0_clean_range(unsigned long start
, unsigned long end
)
220 void __iomem
*base
= l2x0_base
;
223 if ((end
- start
) >= l2x0_size
) {
228 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
229 start
&= ~(CACHE_LINE_SIZE
- 1);
230 while (start
< end
) {
231 unsigned long blk_end
= start
+ min(end
- start
, 4096UL);
233 while (start
< blk_end
) {
234 l2x0_clean_line(start
);
235 start
+= CACHE_LINE_SIZE
;
239 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
240 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
243 cache_wait(base
+ L2X0_CLEAN_LINE_PA
, 1);
245 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
248 static void l2x0_flush_range(unsigned long start
, unsigned long end
)
250 void __iomem
*base
= l2x0_base
;
253 if ((end
- start
) >= l2x0_size
) {
258 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
259 start
&= ~(CACHE_LINE_SIZE
- 1);
260 while (start
< end
) {
261 unsigned long blk_end
= start
+ min(end
- start
, 4096UL);
264 while (start
< blk_end
) {
265 l2x0_flush_line(start
);
266 start
+= CACHE_LINE_SIZE
;
271 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
272 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
275 cache_wait(base
+ L2X0_CLEAN_INV_LINE_PA
, 1);
277 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
280 static void l2x0_disable(void)
284 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
286 writel_relaxed(0, l2x0_base
+ L2X0_CTRL
);
288 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
291 static void l2x0_unlock(__u32 cache_id
)
296 if (cache_id
== L2X0_CACHE_ID_PART_L310
)
299 /* L210 and unknown types */
302 for (i
= 0; i
< lockregs
; i
++) {
303 writel_relaxed(0x0, l2x0_base
+ L2X0_LOCKDOWN_WAY_D_BASE
+
304 i
* L2X0_LOCKDOWN_STRIDE
);
305 writel_relaxed(0x0, l2x0_base
+ L2X0_LOCKDOWN_WAY_I_BASE
+
306 i
* L2X0_LOCKDOWN_STRIDE
);
310 void __init
l2x0_init(void __iomem
*base
, __u32 aux_val
, __u32 aux_mask
)
320 cache_id
= readl_relaxed(l2x0_base
+ L2X0_CACHE_ID
);
321 aux
= readl_relaxed(l2x0_base
+ L2X0_AUX_CTRL
);
326 /* Determine the number of ways */
327 switch (cache_id
& L2X0_CACHE_ID_PART_MASK
) {
328 case L2X0_CACHE_ID_PART_L310
:
335 case L2X0_CACHE_ID_PART_L210
:
336 ways
= (aux
>> 13) & 0xf;
340 /* Assume unknown chips have 8 ways */
342 type
= "L2x0 series";
346 l2x0_way_mask
= (1 << ways
) - 1;
349 * L2 cache Size = Way size * Number of ways
351 way_size
= (aux
& L2X0_AUX_CTRL_WAY_SIZE_MASK
) >> 17;
352 way_size
= 1 << (way_size
+ 3);
353 l2x0_size
= ways
* way_size
* SZ_1K
;
356 * Check if l2x0 controller is already enabled.
357 * If you are booting from non-secure mode
358 * accessing the below registers will fault.
360 if (!(readl_relaxed(l2x0_base
+ L2X0_CTRL
) & 1)) {
361 /* Make sure that I&D is not locked down when starting */
362 l2x0_unlock(cache_id
);
364 /* l2x0 controller is disabled */
365 writel_relaxed(aux
, l2x0_base
+ L2X0_AUX_CTRL
);
367 l2x0_saved_regs
.aux_ctrl
= aux
;
372 writel_relaxed(1, l2x0_base
+ L2X0_CTRL
);
375 outer_cache
.inv_range
= l2x0_inv_range
;
376 outer_cache
.clean_range
= l2x0_clean_range
;
377 outer_cache
.flush_range
= l2x0_flush_range
;
378 outer_cache
.sync
= l2x0_cache_sync
;
379 outer_cache
.flush_all
= l2x0_flush_all
;
380 outer_cache
.inv_all
= l2x0_inv_all
;
381 outer_cache
.disable
= l2x0_disable
;
382 outer_cache
.set_debug
= l2x0_set_debug
;
384 printk(KERN_INFO
"%s cache controller enabled\n", type
);
385 printk(KERN_INFO
"l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
386 ways
, cache_id
, aux
, l2x0_size
);
390 static void __init
l2x0_of_setup(const struct device_node
*np
,
391 __u32
*aux_val
, __u32
*aux_mask
)
393 u32 data
[2] = { 0, 0 };
396 u32 val
= 0, mask
= 0;
398 of_property_read_u32(np
, "arm,tag-latency", &tag
);
400 mask
|= L2X0_AUX_CTRL_TAG_LATENCY_MASK
;
401 val
|= (tag
- 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT
;
404 of_property_read_u32_array(np
, "arm,data-latency",
405 data
, ARRAY_SIZE(data
));
406 if (data
[0] && data
[1]) {
407 mask
|= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK
|
408 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK
;
409 val
|= ((data
[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT
) |
410 ((data
[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT
);
413 of_property_read_u32(np
, "arm,dirty-latency", &dirty
);
415 mask
|= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK
;
416 val
|= (dirty
- 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT
;
424 static void __init
pl310_of_setup(const struct device_node
*np
,
425 __u32
*aux_val
, __u32
*aux_mask
)
427 u32 data
[3] = { 0, 0, 0 };
428 u32 tag
[3] = { 0, 0, 0 };
429 u32 filter
[2] = { 0, 0 };
431 of_property_read_u32_array(np
, "arm,tag-latency", tag
, ARRAY_SIZE(tag
));
432 if (tag
[0] && tag
[1] && tag
[2])
434 ((tag
[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT
) |
435 ((tag
[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT
) |
436 ((tag
[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT
),
437 l2x0_base
+ L2X0_TAG_LATENCY_CTRL
);
439 of_property_read_u32_array(np
, "arm,data-latency",
440 data
, ARRAY_SIZE(data
));
441 if (data
[0] && data
[1] && data
[2])
443 ((data
[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT
) |
444 ((data
[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT
) |
445 ((data
[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT
),
446 l2x0_base
+ L2X0_DATA_LATENCY_CTRL
);
448 of_property_read_u32_array(np
, "arm,filter-ranges",
449 filter
, ARRAY_SIZE(filter
));
451 writel_relaxed(ALIGN(filter
[0] + filter
[1], SZ_1M
),
452 l2x0_base
+ L2X0_ADDR_FILTER_END
);
453 writel_relaxed((filter
[0] & ~(SZ_1M
- 1)) | L2X0_ADDR_FILTER_EN
,
454 l2x0_base
+ L2X0_ADDR_FILTER_START
);
458 static void __init
pl310_save(void)
460 u32 l2x0_revision
= readl_relaxed(l2x0_base
+ L2X0_CACHE_ID
) &
461 L2X0_CACHE_ID_RTL_MASK
;
463 l2x0_saved_regs
.tag_latency
= readl_relaxed(l2x0_base
+
464 L2X0_TAG_LATENCY_CTRL
);
465 l2x0_saved_regs
.data_latency
= readl_relaxed(l2x0_base
+
466 L2X0_DATA_LATENCY_CTRL
);
467 l2x0_saved_regs
.filter_end
= readl_relaxed(l2x0_base
+
468 L2X0_ADDR_FILTER_END
);
469 l2x0_saved_regs
.filter_start
= readl_relaxed(l2x0_base
+
470 L2X0_ADDR_FILTER_START
);
472 if (l2x0_revision
>= L2X0_CACHE_ID_RTL_R2P0
) {
474 * From r2p0, there is Prefetch offset/control register
476 l2x0_saved_regs
.prefetch_ctrl
= readl_relaxed(l2x0_base
+
479 * From r3p0, there is Power control register
481 if (l2x0_revision
>= L2X0_CACHE_ID_RTL_R3P0
)
482 l2x0_saved_regs
.pwr_ctrl
= readl_relaxed(l2x0_base
+
487 static void l2x0_resume(void)
489 if (!(readl_relaxed(l2x0_base
+ L2X0_CTRL
) & 1)) {
490 /* restore aux ctrl and enable l2 */
491 l2x0_unlock(readl_relaxed(l2x0_base
+ L2X0_CACHE_ID
));
493 writel_relaxed(l2x0_saved_regs
.aux_ctrl
, l2x0_base
+
498 writel_relaxed(1, l2x0_base
+ L2X0_CTRL
);
502 static void pl310_resume(void)
506 if (!(readl_relaxed(l2x0_base
+ L2X0_CTRL
) & 1)) {
507 /* restore pl310 setup */
508 writel_relaxed(l2x0_saved_regs
.tag_latency
,
509 l2x0_base
+ L2X0_TAG_LATENCY_CTRL
);
510 writel_relaxed(l2x0_saved_regs
.data_latency
,
511 l2x0_base
+ L2X0_DATA_LATENCY_CTRL
);
512 writel_relaxed(l2x0_saved_regs
.filter_end
,
513 l2x0_base
+ L2X0_ADDR_FILTER_END
);
514 writel_relaxed(l2x0_saved_regs
.filter_start
,
515 l2x0_base
+ L2X0_ADDR_FILTER_START
);
517 l2x0_revision
= readl_relaxed(l2x0_base
+ L2X0_CACHE_ID
) &
518 L2X0_CACHE_ID_RTL_MASK
;
520 if (l2x0_revision
>= L2X0_CACHE_ID_RTL_R2P0
) {
521 writel_relaxed(l2x0_saved_regs
.prefetch_ctrl
,
522 l2x0_base
+ L2X0_PREFETCH_CTRL
);
523 if (l2x0_revision
>= L2X0_CACHE_ID_RTL_R3P0
)
524 writel_relaxed(l2x0_saved_regs
.pwr_ctrl
,
525 l2x0_base
+ L2X0_POWER_CTRL
);
532 static const struct l2x0_of_data pl310_data
= {
538 static const struct l2x0_of_data l2x0_data
= {
544 static const struct of_device_id l2x0_ids
[] __initconst
= {
545 { .compatible
= "arm,pl310-cache", .data
= (void *)&pl310_data
},
546 { .compatible
= "arm,l220-cache", .data
= (void *)&l2x0_data
},
547 { .compatible
= "arm,l210-cache", .data
= (void *)&l2x0_data
},
551 int __init
l2x0_of_init(__u32 aux_val
, __u32 aux_mask
)
553 struct device_node
*np
;
554 struct l2x0_of_data
*data
;
557 np
= of_find_matching_node(NULL
, l2x0_ids
);
561 if (of_address_to_resource(np
, 0, &res
))
564 l2x0_base
= ioremap(res
.start
, resource_size(&res
));
568 l2x0_saved_regs
.phy_base
= res
.start
;
570 data
= of_match_node(l2x0_ids
, np
)->data
;
572 /* L2 configuration can only be changed if the cache is disabled */
573 if (!(readl_relaxed(l2x0_base
+ L2X0_CTRL
) & 1)) {
575 data
->setup(np
, &aux_val
, &aux_mask
);
581 l2x0_init(l2x0_base
, aux_val
, aux_mask
);
583 outer_cache
.resume
= data
->resume
;