1 // SPDX-License-Identifier: GPL-2.0
3 * non-coherent cache functions for Andes AX45MP
5 * Copyright (C) 2023 Renesas Electronics Corp.
8 #include <linux/cacheflush.h>
9 #include <linux/cacheinfo.h>
10 #include <linux/dma-direction.h>
11 #include <linux/of_address.h>
12 #include <linux/of_platform.h>
14 #include <asm/dma-noncoherent.h>
16 /* L2 cache registers */
17 #define AX45MP_L2C_REG_CTL_OFFSET 0x8
19 #define AX45MP_L2C_REG_C0_CMD_OFFSET 0x40
20 #define AX45MP_L2C_REG_C0_ACC_OFFSET 0x48
21 #define AX45MP_L2C_REG_STATUS_OFFSET 0x80
23 /* D-cache operation */
24 #define AX45MP_CCTL_L1D_VA_INVAL 0 /* Invalidate an L1 cache entry */
25 #define AX45MP_CCTL_L1D_VA_WB 1 /* Write-back an L1 cache entry */
28 #define AX45MP_CCTL_L2_STATUS_IDLE 0
30 /* L2 CCTL status cores mask */
31 #define AX45MP_CCTL_L2_STATUS_C0_MASK 0xf
33 /* L2 cache operation */
34 #define AX45MP_CCTL_L2_PA_INVAL 0x8 /* Invalidate an L2 cache entry */
35 #define AX45MP_CCTL_L2_PA_WB 0x9 /* Write-back an L2 cache entry */
37 #define AX45MP_L2C_REG_PER_CORE_OFFSET 0x10
38 #define AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET 4
40 #define AX45MP_L2C_REG_CN_CMD_OFFSET(n) \
41 (AX45MP_L2C_REG_C0_CMD_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
42 #define AX45MP_L2C_REG_CN_ACC_OFFSET(n) \
43 (AX45MP_L2C_REG_C0_ACC_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
44 #define AX45MP_CCTL_L2_STATUS_CN_MASK(n) \
45 (AX45MP_CCTL_L2_STATUS_C0_MASK << ((n) * AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET))
47 #define AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM 0x80b
48 #define AX45MP_CCTL_REG_UCCTLCOMMAND_NUM 0x80c
50 #define AX45MP_CACHE_LINE_SIZE 64
53 void __iomem
*l2c_base
;
54 u32 ax45mp_cache_line_size
;
57 static struct ax45mp_priv ax45mp_priv
;
59 /* L2 Cache operations */
60 static inline uint32_t ax45mp_cpu_l2c_get_cctl_status(void)
62 return readl(ax45mp_priv
.l2c_base
+ AX45MP_L2C_REG_STATUS_OFFSET
);
65 static void ax45mp_cpu_cache_operation(unsigned long start
, unsigned long end
,
66 unsigned int l1_op
, unsigned int l2_op
)
68 unsigned long line_size
= ax45mp_priv
.ax45mp_cache_line_size
;
69 void __iomem
*base
= ax45mp_priv
.l2c_base
;
70 int mhartid
= smp_processor_id();
74 csr_write(AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM
, start
);
75 csr_write(AX45MP_CCTL_REG_UCCTLCOMMAND_NUM
, l1_op
);
77 pa
= virt_to_phys((void *)start
);
78 writel(pa
, base
+ AX45MP_L2C_REG_CN_ACC_OFFSET(mhartid
));
79 writel(l2_op
, base
+ AX45MP_L2C_REG_CN_CMD_OFFSET(mhartid
));
80 while ((ax45mp_cpu_l2c_get_cctl_status() &
81 AX45MP_CCTL_L2_STATUS_CN_MASK(mhartid
)) !=
82 AX45MP_CCTL_L2_STATUS_IDLE
)
89 /* Write-back L1 and L2 cache entry */
90 static inline void ax45mp_cpu_dcache_wb_range(unsigned long start
, unsigned long end
)
92 ax45mp_cpu_cache_operation(start
, end
, AX45MP_CCTL_L1D_VA_WB
,
93 AX45MP_CCTL_L2_PA_WB
);
96 /* Invalidate the L1 and L2 cache entry */
97 static inline void ax45mp_cpu_dcache_inval_range(unsigned long start
, unsigned long end
)
99 ax45mp_cpu_cache_operation(start
, end
, AX45MP_CCTL_L1D_VA_INVAL
,
100 AX45MP_CCTL_L2_PA_INVAL
);
103 static void ax45mp_dma_cache_inv(phys_addr_t paddr
, size_t size
)
105 unsigned long start
= (unsigned long)phys_to_virt(paddr
);
106 unsigned long end
= start
+ size
;
107 unsigned long line_size
;
110 if (unlikely(start
== end
))
113 line_size
= ax45mp_priv
.ax45mp_cache_line_size
;
115 start
= start
& (~(line_size
- 1));
116 end
= ((end
+ line_size
- 1) & (~(line_size
- 1)));
118 local_irq_save(flags
);
120 ax45mp_cpu_dcache_inval_range(start
, end
);
122 local_irq_restore(flags
);
125 static void ax45mp_dma_cache_wback(phys_addr_t paddr
, size_t size
)
127 unsigned long start
= (unsigned long)phys_to_virt(paddr
);
128 unsigned long end
= start
+ size
;
129 unsigned long line_size
;
132 if (unlikely(start
== end
))
135 line_size
= ax45mp_priv
.ax45mp_cache_line_size
;
136 start
= start
& (~(line_size
- 1));
137 end
= ((end
+ line_size
- 1) & (~(line_size
- 1)));
138 local_irq_save(flags
);
139 ax45mp_cpu_dcache_wb_range(start
, end
);
140 local_irq_restore(flags
);
143 static void ax45mp_dma_cache_wback_inv(phys_addr_t paddr
, size_t size
)
145 ax45mp_dma_cache_wback(paddr
, size
);
146 ax45mp_dma_cache_inv(paddr
, size
);
149 static int ax45mp_get_l2_line_size(struct device_node
*np
)
153 ret
= of_property_read_u32(np
, "cache-line-size", &ax45mp_priv
.ax45mp_cache_line_size
);
155 pr_err("Failed to get cache-line-size, defaulting to 64 bytes\n");
159 if (ax45mp_priv
.ax45mp_cache_line_size
!= AX45MP_CACHE_LINE_SIZE
) {
160 pr_err("Expected cache-line-size to be 64 bytes (found:%u)\n",
161 ax45mp_priv
.ax45mp_cache_line_size
);
168 static const struct riscv_nonstd_cache_ops ax45mp_cmo_ops __initdata
= {
169 .wback
= &ax45mp_dma_cache_wback
,
170 .inv
= &ax45mp_dma_cache_inv
,
171 .wback_inv
= &ax45mp_dma_cache_wback_inv
,
174 static const struct of_device_id ax45mp_cache_ids
[] = {
175 { .compatible
= "andestech,ax45mp-cache" },
179 static int __init
ax45mp_cache_init(void)
181 struct device_node
*np
;
185 np
= of_find_matching_node(NULL
, ax45mp_cache_ids
);
186 if (!of_device_is_available(np
))
189 ret
= of_address_to_resource(np
, 0, &res
);
194 * If IOCP is present on the Andes AX45MP core riscv_cbom_block_size
195 * will be 0 for sure, so we can definitely rely on it. If
196 * riscv_cbom_block_size = 0 we don't need to handle CMO using SW any
197 * more so we just return success here and only if its being set we
198 * continue further in the probe path.
200 if (!riscv_cbom_block_size
)
203 ax45mp_priv
.l2c_base
= ioremap(res
.start
, resource_size(&res
));
204 if (!ax45mp_priv
.l2c_base
)
207 ret
= ax45mp_get_l2_line_size(np
);
209 iounmap(ax45mp_priv
.l2c_base
);
213 riscv_noncoherent_register_cache_ops(&ax45mp_cmo_ops
);
217 early_initcall(ax45mp_cache_init
);