2 * Copyright (C) 2017 Marvell
4 * Hanna Hawa <hannah@marvell.com>
5 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqdomain.h>
16 #include <linux/jump_label.h>
17 #include <linux/kernel.h>
18 #include <linux/msi.h>
19 #include <linux/of_irq.h>
20 #include <linux/of_platform.h>
21 #include <linux/platform_device.h>
23 #include <dt-bindings/interrupt-controller/mvebu-icu.h>
26 #define ICU_SETSPI_NSR_AL 0x10
27 #define ICU_SETSPI_NSR_AH 0x14
28 #define ICU_CLRSPI_NSR_AL 0x18
29 #define ICU_CLRSPI_NSR_AH 0x1c
30 #define ICU_SET_SEI_AL 0x50
31 #define ICU_SET_SEI_AH 0x54
32 #define ICU_CLR_SEI_AL 0x58
33 #define ICU_CLR_SEI_AH 0x5C
34 #define ICU_INT_CFG(x) (0x100 + 4 * (x))
35 #define ICU_INT_ENABLE BIT(24)
36 #define ICU_IS_EDGE BIT(28)
37 #define ICU_GROUP_SHIFT 29
40 #define ICU_MAX_IRQS 207
41 #define ICU_SATA0_ICU_ID 109
42 #define ICU_SATA1_ICU_ID 107
44 struct mvebu_icu_subset_data
{
45 unsigned int icu_group
;
46 unsigned int offset_set_ah
;
47 unsigned int offset_set_al
;
48 unsigned int offset_clr_ah
;
49 unsigned int offset_clr_al
;
57 struct mvebu_icu_msi_data
{
58 struct mvebu_icu
*icu
;
60 const struct mvebu_icu_subset_data
*subset_data
;
63 struct mvebu_icu_irq_data
{
64 struct mvebu_icu
*icu
;
65 unsigned int icu_group
;
69 static DEFINE_STATIC_KEY_FALSE(legacy_bindings
);
71 static void mvebu_icu_init(struct mvebu_icu
*icu
,
72 struct mvebu_icu_msi_data
*msi_data
,
75 const struct mvebu_icu_subset_data
*subset
= msi_data
->subset_data
;
77 if (atomic_cmpxchg(&msi_data
->initialized
, false, true))
80 /* Set 'SET' ICU SPI message address in AP */
81 writel_relaxed(msg
[0].address_hi
, icu
->base
+ subset
->offset_set_ah
);
82 writel_relaxed(msg
[0].address_lo
, icu
->base
+ subset
->offset_set_al
);
84 if (subset
->icu_group
!= ICU_GRP_NSR
)
87 /* Set 'CLEAR' ICU SPI message address in AP (level-MSI only) */
88 writel_relaxed(msg
[1].address_hi
, icu
->base
+ subset
->offset_clr_ah
);
89 writel_relaxed(msg
[1].address_lo
, icu
->base
+ subset
->offset_clr_al
);
92 static void mvebu_icu_write_msg(struct msi_desc
*desc
, struct msi_msg
*msg
)
94 struct irq_data
*d
= irq_get_irq_data(desc
->irq
);
95 struct mvebu_icu_msi_data
*msi_data
= platform_msi_get_host_data(d
->domain
);
96 struct mvebu_icu_irq_data
*icu_irqd
= d
->chip_data
;
97 struct mvebu_icu
*icu
= icu_irqd
->icu
;
100 if (msg
->address_lo
|| msg
->address_hi
) {
101 /* One off initialization per domain */
102 mvebu_icu_init(icu
, msi_data
, msg
);
103 /* Configure the ICU with irq number & type */
104 icu_int
= msg
->data
| ICU_INT_ENABLE
;
105 if (icu_irqd
->type
& IRQ_TYPE_EDGE_RISING
)
106 icu_int
|= ICU_IS_EDGE
;
107 icu_int
|= icu_irqd
->icu_group
<< ICU_GROUP_SHIFT
;
109 /* De-configure the ICU */
113 writel_relaxed(icu_int
, icu
->base
+ ICU_INT_CFG(d
->hwirq
));
116 * The SATA unit has 2 ports, and a dedicated ICU entry per
117 * port. The ahci sata driver supports only one irq interrupt
118 * per SATA unit. To solve this conflict, we configure the 2
119 * SATA wired interrupts in the south bridge into 1 GIC
120 * interrupt in the north bridge. Even if only a single port
121 * is enabled, if sata node is enabled, both interrupts are
122 * configured (regardless of which port is actually in use).
124 if (d
->hwirq
== ICU_SATA0_ICU_ID
|| d
->hwirq
== ICU_SATA1_ICU_ID
) {
125 writel_relaxed(icu_int
,
126 icu
->base
+ ICU_INT_CFG(ICU_SATA0_ICU_ID
));
127 writel_relaxed(icu_int
,
128 icu
->base
+ ICU_INT_CFG(ICU_SATA1_ICU_ID
));
132 static struct irq_chip mvebu_icu_nsr_chip
= {
134 .irq_mask
= irq_chip_mask_parent
,
135 .irq_unmask
= irq_chip_unmask_parent
,
136 .irq_eoi
= irq_chip_eoi_parent
,
137 .irq_set_type
= irq_chip_set_type_parent
,
138 .irq_set_affinity
= irq_chip_set_affinity_parent
,
141 static struct irq_chip mvebu_icu_sei_chip
= {
143 .irq_ack
= irq_chip_ack_parent
,
144 .irq_mask
= irq_chip_mask_parent
,
145 .irq_unmask
= irq_chip_unmask_parent
,
146 .irq_set_type
= irq_chip_set_type_parent
,
147 .irq_set_affinity
= irq_chip_set_affinity_parent
,
151 mvebu_icu_irq_domain_translate(struct irq_domain
*d
, struct irq_fwspec
*fwspec
,
152 unsigned long *hwirq
, unsigned int *type
)
154 struct mvebu_icu_msi_data
*msi_data
= platform_msi_get_host_data(d
);
155 struct mvebu_icu
*icu
= platform_msi_get_host_data(d
);
156 unsigned int param_count
= static_branch_unlikely(&legacy_bindings
) ? 3 : 2;
158 /* Check the count of the parameters in dt */
159 if (WARN_ON(fwspec
->param_count
!= param_count
)) {
160 dev_err(icu
->dev
, "wrong ICU parameter count %d\n",
161 fwspec
->param_count
);
165 if (static_branch_unlikely(&legacy_bindings
)) {
166 *hwirq
= fwspec
->param
[1];
167 *type
= fwspec
->param
[2] & IRQ_TYPE_SENSE_MASK
;
168 if (fwspec
->param
[0] != ICU_GRP_NSR
) {
169 dev_err(icu
->dev
, "wrong ICU group type %x\n",
174 *hwirq
= fwspec
->param
[0];
175 *type
= fwspec
->param
[1] & IRQ_TYPE_SENSE_MASK
;
178 * The ICU receives level interrupts. While the NSR are also
179 * level interrupts, SEI are edge interrupts. Force the type
180 * here in this case. Please note that this makes the interrupt
181 * handling unreliable.
183 if (msi_data
->subset_data
->icu_group
== ICU_GRP_SEI
)
184 *type
= IRQ_TYPE_EDGE_RISING
;
187 if (*hwirq
>= ICU_MAX_IRQS
) {
188 dev_err(icu
->dev
, "invalid interrupt number %ld\n", *hwirq
);
196 mvebu_icu_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
197 unsigned int nr_irqs
, void *args
)
201 struct irq_fwspec
*fwspec
= args
;
202 struct mvebu_icu_msi_data
*msi_data
= platform_msi_get_host_data(domain
);
203 struct mvebu_icu
*icu
= msi_data
->icu
;
204 struct mvebu_icu_irq_data
*icu_irqd
;
205 struct irq_chip
*chip
= &mvebu_icu_nsr_chip
;
207 icu_irqd
= kmalloc(sizeof(*icu_irqd
), GFP_KERNEL
);
211 err
= mvebu_icu_irq_domain_translate(domain
, fwspec
, &hwirq
,
214 dev_err(icu
->dev
, "failed to translate ICU parameters\n");
218 if (static_branch_unlikely(&legacy_bindings
))
219 icu_irqd
->icu_group
= fwspec
->param
[0];
221 icu_irqd
->icu_group
= msi_data
->subset_data
->icu_group
;
224 err
= platform_msi_domain_alloc(domain
, virq
, nr_irqs
);
226 dev_err(icu
->dev
, "failed to allocate ICU interrupt in parent domain\n");
230 /* Make sure there is no interrupt left pending by the firmware */
231 err
= irq_set_irqchip_state(virq
, IRQCHIP_STATE_PENDING
, false);
235 if (icu_irqd
->icu_group
== ICU_GRP_SEI
)
236 chip
= &mvebu_icu_sei_chip
;
238 err
= irq_domain_set_hwirq_and_chip(domain
, virq
, hwirq
,
241 dev_err(icu
->dev
, "failed to set the data to IRQ domain\n");
248 platform_msi_domain_free(domain
, virq
, nr_irqs
);
255 mvebu_icu_irq_domain_free(struct irq_domain
*domain
, unsigned int virq
,
256 unsigned int nr_irqs
)
258 struct irq_data
*d
= irq_get_irq_data(virq
);
259 struct mvebu_icu_irq_data
*icu_irqd
= d
->chip_data
;
263 platform_msi_domain_free(domain
, virq
, nr_irqs
);
266 static const struct irq_domain_ops mvebu_icu_domain_ops
= {
267 .translate
= mvebu_icu_irq_domain_translate
,
268 .alloc
= mvebu_icu_irq_domain_alloc
,
269 .free
= mvebu_icu_irq_domain_free
,
272 static const struct mvebu_icu_subset_data mvebu_icu_nsr_subset_data
= {
273 .icu_group
= ICU_GRP_NSR
,
274 .offset_set_ah
= ICU_SETSPI_NSR_AH
,
275 .offset_set_al
= ICU_SETSPI_NSR_AL
,
276 .offset_clr_ah
= ICU_CLRSPI_NSR_AH
,
277 .offset_clr_al
= ICU_CLRSPI_NSR_AL
,
280 static const struct mvebu_icu_subset_data mvebu_icu_sei_subset_data
= {
281 .icu_group
= ICU_GRP_SEI
,
282 .offset_set_ah
= ICU_SET_SEI_AH
,
283 .offset_set_al
= ICU_SET_SEI_AL
,
286 static const struct of_device_id mvebu_icu_subset_of_match
[] = {
288 .compatible
= "marvell,cp110-icu-nsr",
289 .data
= &mvebu_icu_nsr_subset_data
,
292 .compatible
= "marvell,cp110-icu-sei",
293 .data
= &mvebu_icu_sei_subset_data
,
298 static int mvebu_icu_subset_probe(struct platform_device
*pdev
)
300 struct mvebu_icu_msi_data
*msi_data
;
301 struct device_node
*msi_parent_dn
;
302 struct device
*dev
= &pdev
->dev
;
303 struct irq_domain
*irq_domain
;
305 msi_data
= devm_kzalloc(dev
, sizeof(*msi_data
), GFP_KERNEL
);
309 if (static_branch_unlikely(&legacy_bindings
)) {
310 msi_data
->icu
= dev_get_drvdata(dev
);
311 msi_data
->subset_data
= &mvebu_icu_nsr_subset_data
;
313 msi_data
->icu
= dev_get_drvdata(dev
->parent
);
314 msi_data
->subset_data
= of_device_get_match_data(dev
);
317 dev
->msi_domain
= of_msi_get_domain(dev
, dev
->of_node
,
318 DOMAIN_BUS_PLATFORM_MSI
);
319 if (!dev
->msi_domain
)
320 return -EPROBE_DEFER
;
322 msi_parent_dn
= irq_domain_get_of_node(dev
->msi_domain
);
326 irq_domain
= platform_msi_create_device_tree_domain(dev
, ICU_MAX_IRQS
,
328 &mvebu_icu_domain_ops
,
331 dev_err(dev
, "Failed to create ICU MSI domain\n");
338 static struct platform_driver mvebu_icu_subset_driver
= {
339 .probe
= mvebu_icu_subset_probe
,
341 .name
= "mvebu-icu-subset",
342 .of_match_table
= mvebu_icu_subset_of_match
,
345 builtin_platform_driver(mvebu_icu_subset_driver
);
347 static int mvebu_icu_probe(struct platform_device
*pdev
)
349 struct mvebu_icu
*icu
;
350 struct resource
*res
;
353 icu
= devm_kzalloc(&pdev
->dev
, sizeof(struct mvebu_icu
),
358 icu
->dev
= &pdev
->dev
;
360 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
361 icu
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
362 if (IS_ERR(icu
->base
)) {
363 dev_err(&pdev
->dev
, "Failed to map icu base address.\n");
364 return PTR_ERR(icu
->base
);
368 * Legacy bindings: ICU is one node with one MSI parent: force manually
369 * the probe of the NSR interrupts side.
370 * New bindings: ICU node has children, one per interrupt controller
371 * having its own MSI parent: call platform_populate().
372 * All ICU instances should use the same bindings.
374 if (!of_get_child_count(pdev
->dev
.of_node
))
375 static_branch_enable(&legacy_bindings
);
378 * Clean all ICU interrupts of type NSR and SEI, required to
379 * avoid unpredictable SPI assignments done by firmware.
381 for (i
= 0 ; i
< ICU_MAX_IRQS
; i
++) {
382 u32 icu_int
, icu_grp
;
384 icu_int
= readl_relaxed(icu
->base
+ ICU_INT_CFG(i
));
385 icu_grp
= icu_int
>> ICU_GROUP_SHIFT
;
387 if (icu_grp
== ICU_GRP_NSR
||
388 (icu_grp
== ICU_GRP_SEI
&&
389 !static_branch_unlikely(&legacy_bindings
)))
390 writel_relaxed(0x0, icu
->base
+ ICU_INT_CFG(i
));
393 platform_set_drvdata(pdev
, icu
);
395 if (static_branch_unlikely(&legacy_bindings
))
396 return mvebu_icu_subset_probe(pdev
);
398 return devm_of_platform_populate(&pdev
->dev
);
401 static const struct of_device_id mvebu_icu_of_match
[] = {
402 { .compatible
= "marvell,cp110-icu", },
406 static struct platform_driver mvebu_icu_driver
= {
407 .probe
= mvebu_icu_probe
,
410 .of_match_table
= mvebu_icu_of_match
,
413 builtin_platform_driver(mvebu_icu_driver
);