2 * Coherency fabric (Aurora) support for Armada 370 and XP platforms.
4 * Copyright (C) 2012 Marvell
6 * Yehuda Yitschak <yehuday@marvell.com>
7 * Gregory Clement <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
14 * The Armada 370 and Armada XP SOCs have a coherency fabric which is
15 * responsible for ensuring hardware coherency between all CPUs and between
16 * CPUs and I/O masters. This file initializes the coherency fabric and
17 * supplies basic routines for configuring and controlling hardware coherency
20 #define pr_fmt(fmt) "mvebu-coherency: " fmt
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/of_address.h>
26 #include <linux/smp.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/platform_device.h>
29 #include <linux/slab.h>
30 #include <linux/mbus.h>
31 #include <linux/clk.h>
32 #include <linux/pci.h>
33 #include <asm/smp_plat.h>
34 #include <asm/cacheflush.h>
35 #include <asm/mach/map.h>
36 #include "armada-370-xp.h"
37 #include "coherency.h"
38 #include "mvebu-soc-id.h"
40 unsigned long coherency_phys_base
;
41 void __iomem
*coherency_base
;
42 static void __iomem
*coherency_cpu_base
;
44 /* Coherency fabric registers */
45 #define COHERENCY_FABRIC_CFG_OFFSET 0x4
47 #define IO_SYNC_BARRIER_CTL_OFFSET 0x0
50 COHERENCY_FABRIC_TYPE_NONE
,
51 COHERENCY_FABRIC_TYPE_ARMADA_370_XP
,
52 COHERENCY_FABRIC_TYPE_ARMADA_375
,
53 COHERENCY_FABRIC_TYPE_ARMADA_380
,
56 static struct of_device_id of_coherency_table
[] = {
57 {.compatible
= "marvell,coherency-fabric",
58 .data
= (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP
},
59 {.compatible
= "marvell,armada-375-coherency-fabric",
60 .data
= (void *) COHERENCY_FABRIC_TYPE_ARMADA_375
},
61 {.compatible
= "marvell,armada-380-coherency-fabric",
62 .data
= (void *) COHERENCY_FABRIC_TYPE_ARMADA_380
},
63 { /* end of list */ },
66 /* Functions defined in coherency_ll.S */
67 int ll_enable_coherency(void);
68 void ll_add_cpu_to_smp_group(void);
70 int set_cpu_coherent(void)
72 if (!coherency_base
) {
73 pr_warn("Can't make current CPU cache coherent.\n");
74 pr_warn("Coherency fabric is not initialized\n");
78 ll_add_cpu_to_smp_group();
79 return ll_enable_coherency();
83 * The below code implements the I/O coherency workaround on Armada
84 * 375. This workaround consists in using the two channels of the
85 * first XOR engine to trigger a XOR transaction that serves as the
86 * I/O coherency barrier.
89 static void __iomem
*xor_base
, *xor_high_base
;
90 static dma_addr_t coherency_wa_buf_phys
[CONFIG_NR_CPUS
];
91 static void *coherency_wa_buf
[CONFIG_NR_CPUS
];
92 static bool coherency_wa_enabled
;
94 #define XOR_CONFIG(chan) (0x10 + (chan * 4))
95 #define XOR_ACTIVATION(chan) (0x20 + (chan * 4))
96 #define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
97 #define WINDOW_BASE(w) (0x250 + ((w) << 2))
98 #define WINDOW_SIZE(w) (0x270 + ((w) << 2))
99 #define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
100 #define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2))
101 #define XOR_DEST_POINTER(chan) (0x2B0 + (chan * 4))
102 #define XOR_BLOCK_SIZE(chan) (0x2C0 + (chan * 4))
103 #define XOR_INIT_VALUE_LOW 0x2E0
104 #define XOR_INIT_VALUE_HIGH 0x2E4
106 static inline void mvebu_hwcc_armada375_sync_io_barrier_wa(void)
108 int idx
= smp_processor_id();
110 /* Write '1' to the first word of the buffer */
111 writel(0x1, coherency_wa_buf
[idx
]);
113 /* Wait until the engine is idle */
114 while ((readl(xor_base
+ XOR_ACTIVATION(idx
)) >> 4) & 0x3)
119 /* Trigger channel */
120 writel(0x1, xor_base
+ XOR_ACTIVATION(idx
));
122 /* Poll the data until it is cleared by the XOR transaction */
123 while (readl(coherency_wa_buf
[idx
]))
127 static void __init
armada_375_coherency_init_wa(void)
129 const struct mbus_dram_target_info
*dram
;
130 struct device_node
*xor_node
;
131 struct property
*xor_status
;
136 pr_warn("enabling coherency workaround for Armada 375 Z1, one XOR engine disabled\n");
139 * Since the workaround uses one XOR engine, we grab a
140 * reference to its Device Tree node first.
142 xor_node
= of_find_compatible_node(NULL
, NULL
, "marvell,orion-xor");
146 * Then we mark it as disabled so that the real XOR driver
149 xor_status
= kzalloc(sizeof(struct property
), GFP_KERNEL
);
152 xor_status
->value
= kstrdup("disabled", GFP_KERNEL
);
153 BUG_ON(!xor_status
->value
);
155 xor_status
->length
= 8;
156 xor_status
->name
= kstrdup("status", GFP_KERNEL
);
157 BUG_ON(!xor_status
->name
);
159 of_update_property(xor_node
, xor_status
);
162 * And we remap the registers, get the clock, and do the
163 * initial configuration of the XOR engine.
165 xor_base
= of_iomap(xor_node
, 0);
166 xor_high_base
= of_iomap(xor_node
, 1);
168 xor_clk
= of_clk_get_by_name(xor_node
, NULL
);
171 clk_prepare_enable(xor_clk
);
173 dram
= mv_mbus_dram_info();
175 for (i
= 0; i
< 8; i
++) {
176 writel(0, xor_base
+ WINDOW_BASE(i
));
177 writel(0, xor_base
+ WINDOW_SIZE(i
));
179 writel(0, xor_base
+ WINDOW_REMAP_HIGH(i
));
182 for (i
= 0; i
< dram
->num_cs
; i
++) {
183 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
184 writel((cs
->base
& 0xffff0000) |
185 (cs
->mbus_attr
<< 8) |
186 dram
->mbus_dram_target_id
, xor_base
+ WINDOW_BASE(i
));
187 writel((cs
->size
- 1) & 0xffff0000, xor_base
+ WINDOW_SIZE(i
));
189 win_enable
|= (1 << i
);
190 win_enable
|= 3 << (16 + (2 * i
));
193 writel(win_enable
, xor_base
+ WINDOW_BAR_ENABLE(0));
194 writel(win_enable
, xor_base
+ WINDOW_BAR_ENABLE(1));
195 writel(0, xor_base
+ WINDOW_OVERRIDE_CTRL(0));
196 writel(0, xor_base
+ WINDOW_OVERRIDE_CTRL(1));
198 for (i
= 0; i
< CONFIG_NR_CPUS
; i
++) {
199 coherency_wa_buf
[i
] = kzalloc(PAGE_SIZE
, GFP_KERNEL
);
200 BUG_ON(!coherency_wa_buf
[i
]);
203 * We can't use the DMA mapping API, since we don't
204 * have a valid 'struct device' pointer
206 coherency_wa_buf_phys
[i
] =
207 virt_to_phys(coherency_wa_buf
[i
]);
208 BUG_ON(!coherency_wa_buf_phys
[i
]);
211 * Configure the XOR engine for memset operation, with
212 * a 128 bytes block size
214 writel(0x444, xor_base
+ XOR_CONFIG(i
));
215 writel(128, xor_base
+ XOR_BLOCK_SIZE(i
));
216 writel(coherency_wa_buf_phys
[i
],
217 xor_base
+ XOR_DEST_POINTER(i
));
220 writel(0x0, xor_base
+ XOR_INIT_VALUE_LOW
);
221 writel(0x0, xor_base
+ XOR_INIT_VALUE_HIGH
);
223 coherency_wa_enabled
= true;
226 static inline void mvebu_hwcc_sync_io_barrier(void)
228 if (coherency_wa_enabled
) {
229 mvebu_hwcc_armada375_sync_io_barrier_wa();
233 writel(0x1, coherency_cpu_base
+ IO_SYNC_BARRIER_CTL_OFFSET
);
234 while (readl(coherency_cpu_base
+ IO_SYNC_BARRIER_CTL_OFFSET
) & 0x1);
237 static dma_addr_t
mvebu_hwcc_dma_map_page(struct device
*dev
, struct page
*page
,
238 unsigned long offset
, size_t size
,
239 enum dma_data_direction dir
,
240 struct dma_attrs
*attrs
)
242 if (dir
!= DMA_TO_DEVICE
)
243 mvebu_hwcc_sync_io_barrier();
244 return pfn_to_dma(dev
, page_to_pfn(page
)) + offset
;
248 static void mvebu_hwcc_dma_unmap_page(struct device
*dev
, dma_addr_t dma_handle
,
249 size_t size
, enum dma_data_direction dir
,
250 struct dma_attrs
*attrs
)
252 if (dir
!= DMA_TO_DEVICE
)
253 mvebu_hwcc_sync_io_barrier();
256 static void mvebu_hwcc_dma_sync(struct device
*dev
, dma_addr_t dma_handle
,
257 size_t size
, enum dma_data_direction dir
)
259 if (dir
!= DMA_TO_DEVICE
)
260 mvebu_hwcc_sync_io_barrier();
263 static struct dma_map_ops mvebu_hwcc_dma_ops
= {
264 .alloc
= arm_dma_alloc
,
265 .free
= arm_dma_free
,
266 .mmap
= arm_dma_mmap
,
267 .map_page
= mvebu_hwcc_dma_map_page
,
268 .unmap_page
= mvebu_hwcc_dma_unmap_page
,
269 .get_sgtable
= arm_dma_get_sgtable
,
270 .map_sg
= arm_dma_map_sg
,
271 .unmap_sg
= arm_dma_unmap_sg
,
272 .sync_single_for_cpu
= mvebu_hwcc_dma_sync
,
273 .sync_single_for_device
= mvebu_hwcc_dma_sync
,
274 .sync_sg_for_cpu
= arm_dma_sync_sg_for_cpu
,
275 .sync_sg_for_device
= arm_dma_sync_sg_for_device
,
276 .set_dma_mask
= arm_dma_set_mask
,
279 static int mvebu_hwcc_notifier(struct notifier_block
*nb
,
280 unsigned long event
, void *__dev
)
282 struct device
*dev
= __dev
;
284 if (event
!= BUS_NOTIFY_ADD_DEVICE
)
286 set_dma_ops(dev
, &mvebu_hwcc_dma_ops
);
291 static struct notifier_block mvebu_hwcc_nb
= {
292 .notifier_call
= mvebu_hwcc_notifier
,
295 static void __init
armada_370_coherency_init(struct device_node
*np
)
299 of_address_to_resource(np
, 0, &res
);
300 coherency_phys_base
= res
.start
;
302 * Ensure secondary CPUs will see the updated value,
303 * which they read before they join the coherency
304 * fabric, and therefore before they are coherent with
305 * the boot CPU cache.
307 sync_cache_w(&coherency_phys_base
);
308 coherency_base
= of_iomap(np
, 0);
309 coherency_cpu_base
= of_iomap(np
, 1);
314 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
315 * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
316 * is needed as a workaround for a deadlock issue between the PCIe
317 * interface and the cache controller.
319 static void __iomem
*
320 armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr
, size_t size
,
321 unsigned int mtype
, void *caller
)
323 struct resource pcie_mem
;
325 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem
);
327 if (pcie_mem
.start
<= phys_addr
&& (phys_addr
+ size
) <= pcie_mem
.end
)
330 return __arm_ioremap_caller(phys_addr
, size
, mtype
, caller
);
333 static void __init
armada_375_380_coherency_init(struct device_node
*np
)
335 struct device_node
*cache_dn
;
337 coherency_cpu_base
= of_iomap(np
, 0);
338 arch_ioremap_caller
= armada_pcie_wa_ioremap_caller
;
341 * Add the PL310 property "arm,io-coherent". This makes sure the
342 * outer sync operation is not used, which allows to
343 * workaround the system erratum that causes deadlocks when
344 * doing PCIe in an SMP situation on Armada 375 and Armada
347 for_each_compatible_node(cache_dn
, NULL
, "arm,pl310-cache") {
350 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
351 p
->name
= kstrdup("arm,io-coherent", GFP_KERNEL
);
352 of_add_property(cache_dn
, p
);
356 static int coherency_type(void)
358 struct device_node
*np
;
359 const struct of_device_id
*match
;
361 np
= of_find_matching_node_and_match(NULL
, of_coherency_table
, &match
);
363 int type
= (int) match
->data
;
365 /* Armada 370/XP coherency works in both UP and SMP */
366 if (type
== COHERENCY_FABRIC_TYPE_ARMADA_370_XP
)
369 /* Armada 375 coherency works only on SMP */
370 else if (type
== COHERENCY_FABRIC_TYPE_ARMADA_375
&& is_smp())
373 /* Armada 380 coherency works only on SMP */
374 else if (type
== COHERENCY_FABRIC_TYPE_ARMADA_380
&& is_smp())
378 return COHERENCY_FABRIC_TYPE_NONE
;
381 int coherency_available(void)
383 return coherency_type() != COHERENCY_FABRIC_TYPE_NONE
;
386 int __init
coherency_init(void)
388 int type
= coherency_type();
389 struct device_node
*np
;
391 np
= of_find_matching_node(NULL
, of_coherency_table
);
393 if (type
== COHERENCY_FABRIC_TYPE_ARMADA_370_XP
)
394 armada_370_coherency_init(np
);
395 else if (type
== COHERENCY_FABRIC_TYPE_ARMADA_375
||
396 type
== COHERENCY_FABRIC_TYPE_ARMADA_380
)
397 armada_375_380_coherency_init(np
);
402 static int __init
coherency_late_init(void)
404 int type
= coherency_type();
406 if (type
== COHERENCY_FABRIC_TYPE_NONE
)
409 if (type
== COHERENCY_FABRIC_TYPE_ARMADA_375
) {
412 if (mvebu_get_soc_id(&dev
, &rev
) == 0 &&
413 rev
== ARMADA_375_Z1_REV
)
414 armada_375_coherency_init_wa();
417 bus_register_notifier(&platform_bus_type
,
423 postcore_initcall(coherency_late_init
);
425 #if IS_ENABLED(CONFIG_PCI)
426 static int __init
coherency_pci_init(void)
428 if (coherency_available())
429 bus_register_notifier(&pci_bus_type
,
434 arch_initcall(coherency_pci_init
);