1 // SPDX-License-Identifier: GPL-2.0
3 #define pr_fmt(fmt) "mvebu-sei: " fmt
5 #include <linux/interrupt.h>
7 #include <linux/irqchip.h>
8 #include <linux/irqchip/chained_irq.h>
9 #include <linux/irqdomain.h>
10 #include <linux/kernel.h>
11 #include <linux/msi.h>
12 #include <linux/platform_device.h>
13 #include <linux/of_address.h>
14 #include <linux/of_irq.h>
15 #include <linux/of_platform.h>
18 #define GICP_SECR(idx) (0x0 + ((idx) * 0x4))
20 #define GICP_SEMR(idx) (0x20 + ((idx) * 0x4))
21 #define GICP_SET_SEI_OFFSET 0x30
23 #define SEI_IRQ_COUNT_PER_REG 32
24 #define SEI_IRQ_REG_COUNT 2
25 #define SEI_IRQ_COUNT (SEI_IRQ_COUNT_PER_REG * SEI_IRQ_REG_COUNT)
26 #define SEI_IRQ_REG_IDX(irq_id) ((irq_id) / SEI_IRQ_COUNT_PER_REG)
27 #define SEI_IRQ_REG_BIT(irq_id) ((irq_id) % SEI_IRQ_COUNT_PER_REG)
29 struct mvebu_sei_interrupt_range
{
34 struct mvebu_sei_caps
{
35 struct mvebu_sei_interrupt_range ap_range
;
36 struct mvebu_sei_interrupt_range cp_range
;
43 struct irq_domain
*sei_domain
;
44 struct irq_domain
*ap_domain
;
45 struct irq_domain
*cp_domain
;
46 const struct mvebu_sei_caps
*caps
;
48 /* Lock on MSI allocations/releases */
49 struct mutex cp_msi_lock
;
50 DECLARE_BITMAP(cp_msi_bitmap
, SEI_IRQ_COUNT
);
52 /* Lock on IRQ masking register */
53 raw_spinlock_t mask_lock
;
56 static void mvebu_sei_ack_irq(struct irq_data
*d
)
58 struct mvebu_sei
*sei
= irq_data_get_irq_chip_data(d
);
59 u32 reg_idx
= SEI_IRQ_REG_IDX(d
->hwirq
);
61 writel_relaxed(BIT(SEI_IRQ_REG_BIT(d
->hwirq
)),
62 sei
->base
+ GICP_SECR(reg_idx
));
65 static void mvebu_sei_mask_irq(struct irq_data
*d
)
67 struct mvebu_sei
*sei
= irq_data_get_irq_chip_data(d
);
68 u32 reg
, reg_idx
= SEI_IRQ_REG_IDX(d
->hwirq
);
71 /* 1 disables the interrupt */
72 raw_spin_lock_irqsave(&sei
->mask_lock
, flags
);
73 reg
= readl_relaxed(sei
->base
+ GICP_SEMR(reg_idx
));
74 reg
|= BIT(SEI_IRQ_REG_BIT(d
->hwirq
));
75 writel_relaxed(reg
, sei
->base
+ GICP_SEMR(reg_idx
));
76 raw_spin_unlock_irqrestore(&sei
->mask_lock
, flags
);
79 static void mvebu_sei_unmask_irq(struct irq_data
*d
)
81 struct mvebu_sei
*sei
= irq_data_get_irq_chip_data(d
);
82 u32 reg
, reg_idx
= SEI_IRQ_REG_IDX(d
->hwirq
);
85 /* 0 enables the interrupt */
86 raw_spin_lock_irqsave(&sei
->mask_lock
, flags
);
87 reg
= readl_relaxed(sei
->base
+ GICP_SEMR(reg_idx
));
88 reg
&= ~BIT(SEI_IRQ_REG_BIT(d
->hwirq
));
89 writel_relaxed(reg
, sei
->base
+ GICP_SEMR(reg_idx
));
90 raw_spin_unlock_irqrestore(&sei
->mask_lock
, flags
);
93 static int mvebu_sei_set_affinity(struct irq_data
*d
,
94 const struct cpumask
*mask_val
,
100 static int mvebu_sei_set_irqchip_state(struct irq_data
*d
,
101 enum irqchip_irq_state which
,
104 /* We can only clear the pending state by acking the interrupt */
105 if (which
!= IRQCHIP_STATE_PENDING
|| state
)
108 mvebu_sei_ack_irq(d
);
112 static struct irq_chip mvebu_sei_irq_chip
= {
114 .irq_ack
= mvebu_sei_ack_irq
,
115 .irq_mask
= mvebu_sei_mask_irq
,
116 .irq_unmask
= mvebu_sei_unmask_irq
,
117 .irq_set_affinity
= mvebu_sei_set_affinity
,
118 .irq_set_irqchip_state
= mvebu_sei_set_irqchip_state
,
121 static int mvebu_sei_ap_set_type(struct irq_data
*data
, unsigned int type
)
123 if ((type
& IRQ_TYPE_SENSE_MASK
) != IRQ_TYPE_LEVEL_HIGH
)
129 static struct irq_chip mvebu_sei_ap_irq_chip
= {
131 .irq_ack
= irq_chip_ack_parent
,
132 .irq_mask
= irq_chip_mask_parent
,
133 .irq_unmask
= irq_chip_unmask_parent
,
134 .irq_set_affinity
= irq_chip_set_affinity_parent
,
135 .irq_set_type
= mvebu_sei_ap_set_type
,
138 static void mvebu_sei_cp_compose_msi_msg(struct irq_data
*data
,
141 struct mvebu_sei
*sei
= data
->chip_data
;
142 phys_addr_t set
= sei
->res
->start
+ GICP_SET_SEI_OFFSET
;
144 msg
->data
= data
->hwirq
+ sei
->caps
->cp_range
.first
;
145 msg
->address_lo
= lower_32_bits(set
);
146 msg
->address_hi
= upper_32_bits(set
);
149 static int mvebu_sei_cp_set_type(struct irq_data
*data
, unsigned int type
)
151 if ((type
& IRQ_TYPE_SENSE_MASK
) != IRQ_TYPE_EDGE_RISING
)
157 static struct irq_chip mvebu_sei_cp_irq_chip
= {
159 .irq_ack
= irq_chip_ack_parent
,
160 .irq_mask
= irq_chip_mask_parent
,
161 .irq_unmask
= irq_chip_unmask_parent
,
162 .irq_set_affinity
= irq_chip_set_affinity_parent
,
163 .irq_set_type
= mvebu_sei_cp_set_type
,
164 .irq_compose_msi_msg
= mvebu_sei_cp_compose_msi_msg
,
167 static int mvebu_sei_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
168 unsigned int nr_irqs
, void *arg
)
170 struct mvebu_sei
*sei
= domain
->host_data
;
171 struct irq_fwspec
*fwspec
= arg
;
173 /* Not much to do, just setup the irqdata */
174 irq_domain_set_hwirq_and_chip(domain
, virq
, fwspec
->param
[0],
175 &mvebu_sei_irq_chip
, sei
);
180 static void mvebu_sei_domain_free(struct irq_domain
*domain
, unsigned int virq
,
181 unsigned int nr_irqs
)
185 for (i
= 0; i
< nr_irqs
; i
++) {
186 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
+ i
);
187 irq_set_handler(virq
+ i
, NULL
);
188 irq_domain_reset_irq_data(d
);
192 static const struct irq_domain_ops mvebu_sei_domain_ops
= {
193 .alloc
= mvebu_sei_domain_alloc
,
194 .free
= mvebu_sei_domain_free
,
197 static int mvebu_sei_ap_translate(struct irq_domain
*domain
,
198 struct irq_fwspec
*fwspec
,
199 unsigned long *hwirq
,
202 *hwirq
= fwspec
->param
[0];
203 *type
= IRQ_TYPE_LEVEL_HIGH
;
208 static int mvebu_sei_ap_alloc(struct irq_domain
*domain
, unsigned int virq
,
209 unsigned int nr_irqs
, void *arg
)
211 struct mvebu_sei
*sei
= domain
->host_data
;
212 struct irq_fwspec fwspec
;
217 mvebu_sei_ap_translate(domain
, arg
, &hwirq
, &type
);
219 fwspec
.fwnode
= domain
->parent
->fwnode
;
220 fwspec
.param_count
= 1;
221 fwspec
.param
[0] = hwirq
+ sei
->caps
->ap_range
.first
;
223 err
= irq_domain_alloc_irqs_parent(domain
, virq
, 1, &fwspec
);
227 irq_domain_set_info(domain
, virq
, hwirq
,
228 &mvebu_sei_ap_irq_chip
, sei
,
229 handle_level_irq
, NULL
, NULL
);
235 static const struct irq_domain_ops mvebu_sei_ap_domain_ops
= {
236 .translate
= mvebu_sei_ap_translate
,
237 .alloc
= mvebu_sei_ap_alloc
,
238 .free
= irq_domain_free_irqs_parent
,
241 static void mvebu_sei_cp_release_irq(struct mvebu_sei
*sei
, unsigned long hwirq
)
243 mutex_lock(&sei
->cp_msi_lock
);
244 clear_bit(hwirq
, sei
->cp_msi_bitmap
);
245 mutex_unlock(&sei
->cp_msi_lock
);
248 static int mvebu_sei_cp_domain_alloc(struct irq_domain
*domain
,
249 unsigned int virq
, unsigned int nr_irqs
,
252 struct mvebu_sei
*sei
= domain
->host_data
;
253 struct irq_fwspec fwspec
;
257 /* The software only supports single allocations for now */
261 mutex_lock(&sei
->cp_msi_lock
);
262 hwirq
= find_first_zero_bit(sei
->cp_msi_bitmap
,
263 sei
->caps
->cp_range
.size
);
264 if (hwirq
< sei
->caps
->cp_range
.size
)
265 set_bit(hwirq
, sei
->cp_msi_bitmap
);
266 mutex_unlock(&sei
->cp_msi_lock
);
268 if (hwirq
== sei
->caps
->cp_range
.size
)
271 fwspec
.fwnode
= domain
->parent
->fwnode
;
272 fwspec
.param_count
= 1;
273 fwspec
.param
[0] = hwirq
+ sei
->caps
->cp_range
.first
;
275 ret
= irq_domain_alloc_irqs_parent(domain
, virq
, 1, &fwspec
);
279 irq_domain_set_info(domain
, virq
, hwirq
,
280 &mvebu_sei_cp_irq_chip
, sei
,
281 handle_edge_irq
, NULL
, NULL
);
286 mvebu_sei_cp_release_irq(sei
, hwirq
);
290 static void mvebu_sei_cp_domain_free(struct irq_domain
*domain
,
291 unsigned int virq
, unsigned int nr_irqs
)
293 struct mvebu_sei
*sei
= domain
->host_data
;
294 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
);
296 if (nr_irqs
!= 1 || d
->hwirq
>= sei
->caps
->cp_range
.size
) {
297 dev_err(sei
->dev
, "Invalid hwirq %lu\n", d
->hwirq
);
301 mvebu_sei_cp_release_irq(sei
, d
->hwirq
);
302 irq_domain_free_irqs_parent(domain
, virq
, 1);
305 static const struct irq_domain_ops mvebu_sei_cp_domain_ops
= {
306 .alloc
= mvebu_sei_cp_domain_alloc
,
307 .free
= mvebu_sei_cp_domain_free
,
310 static struct irq_chip mvebu_sei_msi_irq_chip
= {
312 .irq_ack
= irq_chip_ack_parent
,
313 .irq_set_type
= irq_chip_set_type_parent
,
316 static struct msi_domain_ops mvebu_sei_msi_ops
= {
319 static struct msi_domain_info mvebu_sei_msi_domain_info
= {
320 .flags
= MSI_FLAG_USE_DEF_DOM_OPS
| MSI_FLAG_USE_DEF_CHIP_OPS
,
321 .ops
= &mvebu_sei_msi_ops
,
322 .chip
= &mvebu_sei_msi_irq_chip
,
325 static void mvebu_sei_handle_cascade_irq(struct irq_desc
*desc
)
327 struct mvebu_sei
*sei
= irq_desc_get_handler_data(desc
);
328 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
331 chained_irq_enter(chip
, desc
);
333 for (idx
= 0; idx
< SEI_IRQ_REG_COUNT
; idx
++) {
334 unsigned long irqmap
;
337 irqmap
= readl_relaxed(sei
->base
+ GICP_SECR(idx
));
338 for_each_set_bit(bit
, &irqmap
, SEI_IRQ_COUNT_PER_REG
) {
342 hwirq
= idx
* SEI_IRQ_COUNT_PER_REG
+ bit
;
343 virq
= irq_find_mapping(sei
->sei_domain
, hwirq
);
345 generic_handle_irq(virq
);
350 "Spurious IRQ detected (hwirq %lu)\n", hwirq
);
354 chained_irq_exit(chip
, desc
);
357 static void mvebu_sei_reset(struct mvebu_sei
*sei
)
361 /* Clear IRQ cause registers, mask all interrupts */
362 for (reg_idx
= 0; reg_idx
< SEI_IRQ_REG_COUNT
; reg_idx
++) {
363 writel_relaxed(0xFFFFFFFF, sei
->base
+ GICP_SECR(reg_idx
));
364 writel_relaxed(0xFFFFFFFF, sei
->base
+ GICP_SEMR(reg_idx
));
368 static int mvebu_sei_probe(struct platform_device
*pdev
)
370 struct device_node
*node
= pdev
->dev
.of_node
;
371 struct irq_domain
*plat_domain
;
372 struct mvebu_sei
*sei
;
376 sei
= devm_kzalloc(&pdev
->dev
, sizeof(*sei
), GFP_KERNEL
);
380 sei
->dev
= &pdev
->dev
;
382 mutex_init(&sei
->cp_msi_lock
);
383 raw_spin_lock_init(&sei
->mask_lock
);
385 sei
->res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
386 sei
->base
= devm_ioremap_resource(sei
->dev
, sei
->res
);
387 if (IS_ERR(sei
->base
)) {
388 dev_err(sei
->dev
, "Failed to remap SEI resource\n");
389 return PTR_ERR(sei
->base
);
392 /* Retrieve the SEI capabilities with the interrupt ranges */
393 sei
->caps
= of_device_get_match_data(&pdev
->dev
);
396 "Could not retrieve controller capabilities\n");
401 * Reserve the single (top-level) parent SPI IRQ from which all the
402 * interrupts handled by this driver will be signaled.
404 parent_irq
= irq_of_parse_and_map(node
, 0);
405 if (parent_irq
<= 0) {
406 dev_err(sei
->dev
, "Failed to retrieve top-level SPI IRQ\n");
410 /* Create the root SEI domain */
411 sei
->sei_domain
= irq_domain_create_linear(of_node_to_fwnode(node
),
412 (sei
->caps
->ap_range
.size
+
413 sei
->caps
->cp_range
.size
),
414 &mvebu_sei_domain_ops
,
416 if (!sei
->sei_domain
) {
417 dev_err(sei
->dev
, "Failed to create SEI IRQ domain\n");
422 irq_domain_update_bus_token(sei
->sei_domain
, DOMAIN_BUS_NEXUS
);
424 /* Create the 'wired' domain */
425 sei
->ap_domain
= irq_domain_create_hierarchy(sei
->sei_domain
, 0,
426 sei
->caps
->ap_range
.size
,
427 of_node_to_fwnode(node
),
428 &mvebu_sei_ap_domain_ops
,
430 if (!sei
->ap_domain
) {
431 dev_err(sei
->dev
, "Failed to create AP IRQ domain\n");
433 goto remove_sei_domain
;
436 irq_domain_update_bus_token(sei
->ap_domain
, DOMAIN_BUS_WIRED
);
438 /* Create the 'MSI' domain */
439 sei
->cp_domain
= irq_domain_create_hierarchy(sei
->sei_domain
, 0,
440 sei
->caps
->cp_range
.size
,
441 of_node_to_fwnode(node
),
442 &mvebu_sei_cp_domain_ops
,
444 if (!sei
->cp_domain
) {
445 pr_err("Failed to create CPs IRQ domain\n");
447 goto remove_ap_domain
;
450 irq_domain_update_bus_token(sei
->cp_domain
, DOMAIN_BUS_GENERIC_MSI
);
452 plat_domain
= platform_msi_create_irq_domain(of_node_to_fwnode(node
),
453 &mvebu_sei_msi_domain_info
,
456 pr_err("Failed to create CPs MSI domain\n");
458 goto remove_cp_domain
;
461 mvebu_sei_reset(sei
);
463 irq_set_chained_handler_and_data(parent_irq
,
464 mvebu_sei_handle_cascade_irq
,
470 irq_domain_remove(sei
->cp_domain
);
472 irq_domain_remove(sei
->ap_domain
);
474 irq_domain_remove(sei
->sei_domain
);
476 irq_dispose_mapping(parent_irq
);
481 static struct mvebu_sei_caps mvebu_sei_ap806_caps
= {
492 static const struct of_device_id mvebu_sei_of_match
[] = {
494 .compatible
= "marvell,ap806-sei",
495 .data
= &mvebu_sei_ap806_caps
,
500 static struct platform_driver mvebu_sei_driver
= {
501 .probe
= mvebu_sei_probe
,
504 .of_match_table
= mvebu_sei_of_match
,
507 builtin_platform_driver(mvebu_sei_driver
);