2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
4 * Copyright (C) 2007 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
23 #include <asm/cacheflush.h>
24 #include <asm/hardware/cache-l2x0.h>
26 #define CACHE_LINE_SIZE 32
28 static void __iomem
*l2x0_base
;
29 static DEFINE_SPINLOCK(l2x0_lock
);
30 static uint32_t l2x0_way_mask
; /* Bitmask of active ways */
32 static inline void cache_wait(void __iomem
*reg
, unsigned long mask
)
34 /* wait for the operation to complete */
35 while (readl(reg
) & mask
)
39 static inline void cache_sync(void)
41 void __iomem
*base
= l2x0_base
;
42 writel(0, base
+ L2X0_CACHE_SYNC
);
43 cache_wait(base
+ L2X0_CACHE_SYNC
, 1);
46 static inline void l2x0_clean_line(unsigned long addr
)
48 void __iomem
*base
= l2x0_base
;
49 cache_wait(base
+ L2X0_CLEAN_LINE_PA
, 1);
50 writel(addr
, base
+ L2X0_CLEAN_LINE_PA
);
53 static inline void l2x0_inv_line(unsigned long addr
)
55 void __iomem
*base
= l2x0_base
;
56 cache_wait(base
+ L2X0_INV_LINE_PA
, 1);
57 writel(addr
, base
+ L2X0_INV_LINE_PA
);
60 #ifdef CONFIG_PL310_ERRATA_588369
61 static void debug_writel(unsigned long val
)
63 extern void omap_smc1(u32 fn
, u32 arg
);
66 * Texas Instrument secure monitor api to modify the
67 * PL310 Debug Control Register.
69 omap_smc1(0x100, val
);
72 static inline void l2x0_flush_line(unsigned long addr
)
74 void __iomem
*base
= l2x0_base
;
76 /* Clean by PA followed by Invalidate by PA */
77 cache_wait(base
+ L2X0_CLEAN_LINE_PA
, 1);
78 writel(addr
, base
+ L2X0_CLEAN_LINE_PA
);
79 cache_wait(base
+ L2X0_INV_LINE_PA
, 1);
80 writel(addr
, base
+ L2X0_INV_LINE_PA
);
84 /* Optimised out for non-errata case */
85 static inline void debug_writel(unsigned long val
)
89 static inline void l2x0_flush_line(unsigned long addr
)
91 void __iomem
*base
= l2x0_base
;
92 cache_wait(base
+ L2X0_CLEAN_INV_LINE_PA
, 1);
93 writel(addr
, base
+ L2X0_CLEAN_INV_LINE_PA
);
97 static void l2x0_cache_sync(void)
101 spin_lock_irqsave(&l2x0_lock
, flags
);
103 spin_unlock_irqrestore(&l2x0_lock
, flags
);
106 static inline void l2x0_inv_all(void)
110 /* invalidate all ways */
111 spin_lock_irqsave(&l2x0_lock
, flags
);
112 writel(l2x0_way_mask
, l2x0_base
+ L2X0_INV_WAY
);
113 cache_wait(l2x0_base
+ L2X0_INV_WAY
, l2x0_way_mask
);
115 spin_unlock_irqrestore(&l2x0_lock
, flags
);
118 static void l2x0_inv_range(unsigned long start
, unsigned long end
)
120 void __iomem
*base
= l2x0_base
;
123 spin_lock_irqsave(&l2x0_lock
, flags
);
124 if (start
& (CACHE_LINE_SIZE
- 1)) {
125 start
&= ~(CACHE_LINE_SIZE
- 1);
127 l2x0_flush_line(start
);
129 start
+= CACHE_LINE_SIZE
;
132 if (end
& (CACHE_LINE_SIZE
- 1)) {
133 end
&= ~(CACHE_LINE_SIZE
- 1);
135 l2x0_flush_line(end
);
139 while (start
< end
) {
140 unsigned long blk_end
= start
+ min(end
- start
, 4096UL);
142 while (start
< blk_end
) {
143 l2x0_inv_line(start
);
144 start
+= CACHE_LINE_SIZE
;
148 spin_unlock_irqrestore(&l2x0_lock
, flags
);
149 spin_lock_irqsave(&l2x0_lock
, flags
);
152 cache_wait(base
+ L2X0_INV_LINE_PA
, 1);
154 spin_unlock_irqrestore(&l2x0_lock
, flags
);
157 static void l2x0_clean_range(unsigned long start
, unsigned long end
)
159 void __iomem
*base
= l2x0_base
;
162 spin_lock_irqsave(&l2x0_lock
, flags
);
163 start
&= ~(CACHE_LINE_SIZE
- 1);
164 while (start
< end
) {
165 unsigned long blk_end
= start
+ min(end
- start
, 4096UL);
167 while (start
< blk_end
) {
168 l2x0_clean_line(start
);
169 start
+= CACHE_LINE_SIZE
;
173 spin_unlock_irqrestore(&l2x0_lock
, flags
);
174 spin_lock_irqsave(&l2x0_lock
, flags
);
177 cache_wait(base
+ L2X0_CLEAN_LINE_PA
, 1);
179 spin_unlock_irqrestore(&l2x0_lock
, flags
);
182 static void l2x0_flush_range(unsigned long start
, unsigned long end
)
184 void __iomem
*base
= l2x0_base
;
187 spin_lock_irqsave(&l2x0_lock
, flags
);
188 start
&= ~(CACHE_LINE_SIZE
- 1);
189 while (start
< end
) {
190 unsigned long blk_end
= start
+ min(end
- start
, 4096UL);
193 while (start
< blk_end
) {
194 l2x0_flush_line(start
);
195 start
+= CACHE_LINE_SIZE
;
200 spin_unlock_irqrestore(&l2x0_lock
, flags
);
201 spin_lock_irqsave(&l2x0_lock
, flags
);
204 cache_wait(base
+ L2X0_CLEAN_INV_LINE_PA
, 1);
206 spin_unlock_irqrestore(&l2x0_lock
, flags
);
209 void __init
l2x0_init(void __iomem
*base
, __u32 aux_val
, __u32 aux_mask
)
218 cache_id
= readl(l2x0_base
+ L2X0_CACHE_ID
);
219 aux
= readl(l2x0_base
+ L2X0_AUX_CTRL
);
221 /* Determine the number of ways */
222 switch (cache_id
& L2X0_CACHE_ID_PART_MASK
) {
223 case L2X0_CACHE_ID_PART_L310
:
230 case L2X0_CACHE_ID_PART_L210
:
231 ways
= (aux
>> 13) & 0xf;
235 /* Assume unknown chips have 8 ways */
237 type
= "L2x0 series";
241 l2x0_way_mask
= (1 << ways
) - 1;
244 * Check if l2x0 controller is already enabled.
245 * If you are booting from non-secure mode
246 * accessing the below registers will fault.
248 if (!(readl(l2x0_base
+ L2X0_CTRL
) & 1)) {
250 /* l2x0 controller is disabled */
253 writel(aux
, l2x0_base
+ L2X0_AUX_CTRL
);
258 writel(1, l2x0_base
+ L2X0_CTRL
);
261 outer_cache
.inv_range
= l2x0_inv_range
;
262 outer_cache
.clean_range
= l2x0_clean_range
;
263 outer_cache
.flush_range
= l2x0_flush_range
;
264 outer_cache
.sync
= l2x0_cache_sync
;
266 printk(KERN_INFO
"%s cache controller enabled\n", type
);
267 printk(KERN_INFO
"l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
268 ways
, cache_id
, aux
);