1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2015-2016 Socionext Inc.
4 * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
7 #define pr_fmt(fmt) "uniphier: " fmt
9 #include <linux/bitops.h>
10 #include <linux/init.h>
12 #include <linux/log2.h>
13 #include <linux/of_address.h>
14 #include <linux/slab.h>
15 #include <asm/hardware/cache-uniphier.h>
16 #include <asm/outercache.h>
18 /* control registers */
19 #define UNIPHIER_SSCC 0x0 /* Control Register */
20 #define UNIPHIER_SSCC_BST BIT(20) /* UCWG burst read */
21 #define UNIPHIER_SSCC_ACT BIT(19) /* Inst-Data separate */
22 #define UNIPHIER_SSCC_WTG BIT(18) /* WT gathering on */
23 #define UNIPHIER_SSCC_PRD BIT(17) /* enable pre-fetch */
24 #define UNIPHIER_SSCC_ON BIT(0) /* enable cache */
25 #define UNIPHIER_SSCLPDAWCR 0x30 /* Unified/Data Active Way Control */
26 #define UNIPHIER_SSCLPIAWCR 0x34 /* Instruction Active Way Control */
28 /* revision registers */
29 #define UNIPHIER_SSCID 0x0 /* ID Register */
31 /* operation registers */
32 #define UNIPHIER_SSCOPE 0x244 /* Cache Operation Primitive Entry */
33 #define UNIPHIER_SSCOPE_CM_INV 0x0 /* invalidate */
34 #define UNIPHIER_SSCOPE_CM_CLEAN 0x1 /* clean */
35 #define UNIPHIER_SSCOPE_CM_FLUSH 0x2 /* flush */
36 #define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */
37 #define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */
38 #define UNIPHIER_SSCOQM 0x248 /* Cache Operation Queue Mode */
39 #define UNIPHIER_SSCOQM_S_MASK (0x3 << 17)
40 #define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17)
41 #define UNIPHIER_SSCOQM_S_ALL (0x1 << 17)
42 #define UNIPHIER_SSCOQM_CE BIT(15) /* notify completion */
43 #define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */
44 #define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */
45 #define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */
46 #define UNIPHIER_SSCOQAD 0x24c /* Cache Operation Queue Address */
47 #define UNIPHIER_SSCOQSZ 0x250 /* Cache Operation Queue Size */
48 #define UNIPHIER_SSCOPPQSEF 0x25c /* Cache Operation Queue Set Complete*/
49 #define UNIPHIER_SSCOPPQSEF_FE BIT(1)
50 #define UNIPHIER_SSCOPPQSEF_OE BIT(0)
51 #define UNIPHIER_SSCOLPQS 0x260 /* Cache Operation Queue Status */
52 #define UNIPHIER_SSCOLPQS_EF BIT(2)
53 #define UNIPHIER_SSCOLPQS_EST BIT(1)
54 #define UNIPHIER_SSCOLPQS_QST BIT(0)
56 /* Is the operation region specified by address range? */
57 #define UNIPHIER_SSCOQM_S_IS_RANGE(op) \
58 ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
61 * struct uniphier_cache_data - UniPhier outer cache specific data
63 * @ctrl_base: virtual base address of control registers
64 * @rev_base: virtual base address of revision registers
65 * @op_base: virtual base address of operation registers
66 * @way_ctrl_base: virtual address of the way control registers for this
68 * @way_mask: each bit specifies if the way is present
69 * @nsets: number of associativity sets
70 * @line_size: line size in bytes
71 * @range_op_max_size: max size that can be handled by a single range operation
72 * @list: list node to include this level in the whole cache hierarchy
74 struct uniphier_cache_data
{
75 void __iomem
*ctrl_base
;
76 void __iomem
*rev_base
;
77 void __iomem
*op_base
;
78 void __iomem
*way_ctrl_base
;
82 u32 range_op_max_size
;
83 struct list_head list
;
87 * List of the whole outer cache hierarchy. This list is only modified during
88 * the early boot stage, so no mutex is taken for the access to the list.
90 static LIST_HEAD(uniphier_cache_list
);
93 * __uniphier_cache_sync - perform a sync point for a particular cache level
95 * @data: cache controller specific data
97 static void __uniphier_cache_sync(struct uniphier_cache_data
*data
)
99 /* This sequence need not be atomic. Do not disable IRQ. */
100 writel_relaxed(UNIPHIER_SSCOPE_CM_SYNC
,
101 data
->op_base
+ UNIPHIER_SSCOPE
);
102 /* need a read back to confirm */
103 readl_relaxed(data
->op_base
+ UNIPHIER_SSCOPE
);
107 * __uniphier_cache_maint_common - run a queue operation for a particular level
109 * @data: cache controller specific data
110 * @start: start address of range operation (don't care for "all" operation)
111 * @size: data size of range operation (don't care for "all" operation)
112 * @operation: flags to specify the desired cache operation
114 static void __uniphier_cache_maint_common(struct uniphier_cache_data
*data
,
122 * No spin lock is necessary here because:
124 * [1] This outer cache controller is able to accept maintenance
125 * operations from multiple CPUs at a time in an SMP system; if a
126 * maintenance operation is under way and another operation is issued,
127 * the new one is stored in the queue. The controller performs one
128 * operation after another. If the queue is full, the status register,
129 * UNIPHIER_SSCOPPQSEF, indicates that the queue registration has
130 * failed. The status registers, UNIPHIER_{SSCOPPQSEF, SSCOLPQS}, have
131 * different instances for each CPU, i.e. each CPU can track the status
132 * of the maintenance operations triggered by itself.
134 * [2] The cache command registers, UNIPHIER_{SSCOQM, SSCOQAD, SSCOQSZ,
135 * SSCOQWN}, are shared between multiple CPUs, but the hardware still
136 * guarantees the registration sequence is atomic; the write access to
137 * them are arbitrated by the hardware. The first accessor to the
138 * register, UNIPHIER_SSCOQM, holds the access right and it is released
139 * by reading the status register, UNIPHIER_SSCOPPQSEF. While one CPU
140 * is holding the access right, other CPUs fail to register operations.
141 * One CPU should not hold the access right for a long time, so local
142 * IRQs should be disabled while the following sequence.
144 local_irq_save(flags
);
146 /* clear the complete notification flag */
147 writel_relaxed(UNIPHIER_SSCOLPQS_EF
, data
->op_base
+ UNIPHIER_SSCOLPQS
);
150 /* set cache operation */
151 writel_relaxed(UNIPHIER_SSCOQM_CE
| operation
,
152 data
->op_base
+ UNIPHIER_SSCOQM
);
154 /* set address range if needed */
155 if (likely(UNIPHIER_SSCOQM_S_IS_RANGE(operation
))) {
156 writel_relaxed(start
, data
->op_base
+ UNIPHIER_SSCOQAD
);
157 writel_relaxed(size
, data
->op_base
+ UNIPHIER_SSCOQSZ
);
159 } while (unlikely(readl_relaxed(data
->op_base
+ UNIPHIER_SSCOPPQSEF
) &
160 (UNIPHIER_SSCOPPQSEF_FE
| UNIPHIER_SSCOPPQSEF_OE
)));
162 /* wait until the operation is completed */
163 while (likely(readl_relaxed(data
->op_base
+ UNIPHIER_SSCOLPQS
) !=
164 UNIPHIER_SSCOLPQS_EF
))
167 local_irq_restore(flags
);
170 static void __uniphier_cache_maint_all(struct uniphier_cache_data
*data
,
173 __uniphier_cache_maint_common(data
, 0, 0,
174 UNIPHIER_SSCOQM_S_ALL
| operation
);
176 __uniphier_cache_sync(data
);
179 static void __uniphier_cache_maint_range(struct uniphier_cache_data
*data
,
180 unsigned long start
, unsigned long end
,
186 * If the start address is not aligned,
187 * perform a cache operation for the first cache-line
189 start
= start
& ~(data
->line_size
- 1);
193 if (unlikely(size
>= (unsigned long)(-data
->line_size
))) {
194 /* this means cache operation for all range */
195 __uniphier_cache_maint_all(data
, operation
);
200 * If the end address is not aligned,
201 * perform a cache operation for the last cache-line
203 size
= ALIGN(size
, data
->line_size
);
206 unsigned long chunk_size
= min_t(unsigned long, size
,
207 data
->range_op_max_size
);
209 __uniphier_cache_maint_common(data
, start
, chunk_size
,
210 UNIPHIER_SSCOQM_S_RANGE
| operation
);
216 __uniphier_cache_sync(data
);
219 static void __uniphier_cache_enable(struct uniphier_cache_data
*data
, bool on
)
224 val
= UNIPHIER_SSCC_WTG
| UNIPHIER_SSCC_PRD
| UNIPHIER_SSCC_ON
;
226 writel_relaxed(val
, data
->ctrl_base
+ UNIPHIER_SSCC
);
229 static void __init
__uniphier_cache_set_active_ways(
230 struct uniphier_cache_data
*data
)
234 for_each_possible_cpu(cpu
)
235 writel_relaxed(data
->way_mask
, data
->way_ctrl_base
+ 4 * cpu
);
238 static void uniphier_cache_maint_range(unsigned long start
, unsigned long end
,
241 struct uniphier_cache_data
*data
;
243 list_for_each_entry(data
, &uniphier_cache_list
, list
)
244 __uniphier_cache_maint_range(data
, start
, end
, operation
);
247 static void uniphier_cache_maint_all(u32 operation
)
249 struct uniphier_cache_data
*data
;
251 list_for_each_entry(data
, &uniphier_cache_list
, list
)
252 __uniphier_cache_maint_all(data
, operation
);
255 static void uniphier_cache_inv_range(unsigned long start
, unsigned long end
)
257 uniphier_cache_maint_range(start
, end
, UNIPHIER_SSCOQM_CM_INV
);
260 static void uniphier_cache_clean_range(unsigned long start
, unsigned long end
)
262 uniphier_cache_maint_range(start
, end
, UNIPHIER_SSCOQM_CM_CLEAN
);
265 static void uniphier_cache_flush_range(unsigned long start
, unsigned long end
)
267 uniphier_cache_maint_range(start
, end
, UNIPHIER_SSCOQM_CM_FLUSH
);
270 static void __init
uniphier_cache_inv_all(void)
272 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV
);
275 static void uniphier_cache_flush_all(void)
277 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH
);
280 static void uniphier_cache_disable(void)
282 struct uniphier_cache_data
*data
;
284 list_for_each_entry_reverse(data
, &uniphier_cache_list
, list
)
285 __uniphier_cache_enable(data
, false);
287 uniphier_cache_flush_all();
290 static void __init
uniphier_cache_enable(void)
292 struct uniphier_cache_data
*data
;
294 uniphier_cache_inv_all();
296 list_for_each_entry(data
, &uniphier_cache_list
, list
) {
297 __uniphier_cache_enable(data
, true);
298 __uniphier_cache_set_active_ways(data
);
302 static void uniphier_cache_sync(void)
304 struct uniphier_cache_data
*data
;
306 list_for_each_entry(data
, &uniphier_cache_list
, list
)
307 __uniphier_cache_sync(data
);
310 static const struct of_device_id uniphier_cache_match
[] __initconst
= {
311 { .compatible
= "socionext,uniphier-system-cache" },
315 static int __init
__uniphier_cache_init(struct device_node
*np
,
316 unsigned int *cache_level
)
318 struct uniphier_cache_data
*data
;
319 u32 level
, cache_size
;
320 struct device_node
*next_np
;
323 if (!of_match_node(uniphier_cache_match
, np
)) {
324 pr_err("L%d: not compatible with uniphier cache\n",
329 if (of_property_read_u32(np
, "cache-level", &level
)) {
330 pr_err("L%d: cache-level is not specified\n", *cache_level
);
334 if (level
!= *cache_level
) {
335 pr_err("L%d: cache-level is unexpected value %d\n",
336 *cache_level
, level
);
340 if (!of_property_read_bool(np
, "cache-unified")) {
341 pr_err("L%d: cache-unified is not specified\n", *cache_level
);
345 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
349 if (of_property_read_u32(np
, "cache-line-size", &data
->line_size
) ||
350 !is_power_of_2(data
->line_size
)) {
351 pr_err("L%d: cache-line-size is unspecified or invalid\n",
357 if (of_property_read_u32(np
, "cache-sets", &data
->nsets
) ||
358 !is_power_of_2(data
->nsets
)) {
359 pr_err("L%d: cache-sets is unspecified or invalid\n",
365 if (of_property_read_u32(np
, "cache-size", &cache_size
) ||
366 cache_size
== 0 || cache_size
% (data
->nsets
* data
->line_size
)) {
367 pr_err("L%d: cache-size is unspecified or invalid\n",
373 data
->way_mask
= GENMASK(cache_size
/ data
->nsets
/ data
->line_size
- 1,
376 data
->ctrl_base
= of_iomap(np
, 0);
377 if (!data
->ctrl_base
) {
378 pr_err("L%d: failed to map control register\n", *cache_level
);
383 data
->rev_base
= of_iomap(np
, 1);
384 if (!data
->rev_base
) {
385 pr_err("L%d: failed to map revision register\n", *cache_level
);
390 data
->op_base
= of_iomap(np
, 2);
391 if (!data
->op_base
) {
392 pr_err("L%d: failed to map operation register\n", *cache_level
);
397 data
->way_ctrl_base
= data
->ctrl_base
+ 0xc00;
399 if (*cache_level
== 2) {
400 u32 revision
= readl(data
->rev_base
+ UNIPHIER_SSCID
);
402 * The size of range operation is limited to (1 << 22) or less
403 * for PH-sLD8 or older SoCs.
405 if (revision
<= 0x16)
406 data
->range_op_max_size
= (u32
)1 << 22;
409 * Unfortunatly, the offset address of active way control base
410 * varies from SoC to SoC.
413 case 0x11: /* sLD3 */
414 data
->way_ctrl_base
= data
->ctrl_base
+ 0x870;
417 case 0x16: /* sld8 */
418 data
->way_ctrl_base
= data
->ctrl_base
+ 0x840;
425 data
->range_op_max_size
-= data
->line_size
;
427 INIT_LIST_HEAD(&data
->list
);
428 list_add_tail(&data
->list
, &uniphier_cache_list
); /* no mutex */
431 * OK, this level has been successfully initialized. Look for the next
432 * level cache. Do not roll back even if the initialization of the
433 * next level cache fails because we want to continue with available
436 next_np
= of_find_next_cache_node(np
);
439 ret
= __uniphier_cache_init(next_np
, cache_level
);
441 of_node_put(next_np
);
445 iounmap(data
->op_base
);
446 iounmap(data
->rev_base
);
447 iounmap(data
->ctrl_base
);
453 int __init
uniphier_cache_init(void)
455 struct device_node
*np
= NULL
;
456 unsigned int cache_level
;
459 /* look for level 2 cache */
460 while ((np
= of_find_matching_node(np
, uniphier_cache_match
)))
461 if (!of_property_read_u32(np
, "cache-level", &cache_level
) &&
468 ret
= __uniphier_cache_init(np
, &cache_level
);
473 * Error out iif L2 initialization fails. Continue with any
474 * error on L3 or outer because they are optional.
476 if (cache_level
== 2) {
477 pr_err("failed to initialize L2 cache\n");
485 outer_cache
.inv_range
= uniphier_cache_inv_range
;
486 outer_cache
.clean_range
= uniphier_cache_clean_range
;
487 outer_cache
.flush_range
= uniphier_cache_flush_range
;
488 outer_cache
.flush_all
= uniphier_cache_flush_all
;
489 outer_cache
.disable
= uniphier_cache_disable
;
490 outer_cache
.sync
= uniphier_cache_sync
;
492 uniphier_cache_enable();
494 pr_info("enabled outer cache (cache level: %d)\n", cache_level
);