2 * APM X-Gene MSI Driver
4 * Copyright (c) 2014, Applied Micro Circuits Corporation
5 * Author: Tanmay Inamdar <tinamdar@apm.com>
6 * Duc Dang <dhdang@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 #include <linux/cpu.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/msi.h>
22 #include <linux/of_irq.h>
23 #include <linux/irqchip/chained_irq.h>
24 #include <linux/pci.h>
25 #include <linux/platform_device.h>
26 #include <linux/of_pci.h>
28 #define MSI_IR0 0x000000
29 #define MSI_INT0 0x800000
30 #define IDX_PER_GROUP 8
31 #define IRQS_PER_IDX 16
33 #define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
35 struct xgene_msi_group
{
36 struct xgene_msi
*msi
;
42 struct device_node
*node
;
43 struct irq_domain
*inner_domain
;
44 struct irq_domain
*msi_domain
;
46 void __iomem
*msi_regs
;
47 unsigned long *bitmap
;
48 struct mutex bitmap_lock
;
49 struct xgene_msi_group
*msi_groups
;
54 static struct xgene_msi xgene_msi_ctrl
;
56 static struct irq_chip xgene_msi_top_irq_chip
= {
57 .name
= "X-Gene1 MSI",
58 .irq_enable
= pci_msi_unmask_irq
,
59 .irq_disable
= pci_msi_mask_irq
,
60 .irq_mask
= pci_msi_mask_irq
,
61 .irq_unmask
= pci_msi_unmask_irq
,
64 static struct msi_domain_info xgene_msi_domain_info
= {
65 .flags
= (MSI_FLAG_USE_DEF_DOM_OPS
| MSI_FLAG_USE_DEF_CHIP_OPS
|
67 .chip
= &xgene_msi_top_irq_chip
,
71 * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where
72 * n is group number (0..F), x is index of registers in each group (0..7)
73 * The register layout is as follows:
75 * MSI0IR1 base_addr + 0x10000
77 * MSI0IR6 base_addr + 0x60000
78 * MSI0IR7 base_addr + 0x70000
79 * MSI1IR0 base_addr + 0x80000
80 * MSI1IR1 base_addr + 0x90000
82 * MSI1IR7 base_addr + 0xF0000
83 * MSI2IR0 base_addr + 0x100000
85 * MSIFIR0 base_addr + 0x780000
86 * MSIFIR1 base_addr + 0x790000
88 * MSIFIR7 base_addr + 0x7F0000
89 * MSIINT0 base_addr + 0x800000
90 * MSIINT1 base_addr + 0x810000
92 * MSIINTF base_addr + 0x8F0000
94 * Each index register supports 16 MSI vectors (0..15) to generate interrupt.
95 * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination
98 * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate
99 * the MSI pending status caused by 1 of its 8 index registers.
102 /* MSInIRx read helper */
103 static u32
xgene_msi_ir_read(struct xgene_msi
*msi
,
104 u32 msi_grp
, u32 msir_idx
)
106 return readl_relaxed(msi
->msi_regs
+ MSI_IR0
+
107 (msi_grp
<< 19) + (msir_idx
<< 16));
110 /* MSIINTn read helper */
111 static u32
xgene_msi_int_read(struct xgene_msi
*msi
, u32 msi_grp
)
113 return readl_relaxed(msi
->msi_regs
+ MSI_INT0
+ (msi_grp
<< 16));
117 * With 2048 MSI vectors supported, the MSI message can be constructed using
119 * - Divide into 8 256-vector groups
125 * - Each 256-vector group is divided into 16 16-vector groups
126 * As an example: 16 16-vector groups for 256-vector group 0-255 is
131 * - The termination address of MSI vector in 256-vector group n and 16-vector
132 * group x is the address of MSIxIRn
133 * - The data for MSI vector in 16-vector group x is x
135 static u32
hwirq_to_reg_set(unsigned long hwirq
)
137 return (hwirq
/ (NR_HW_IRQS
* IRQS_PER_IDX
));
140 static u32
hwirq_to_group(unsigned long hwirq
)
142 return (hwirq
% NR_HW_IRQS
);
145 static u32
hwirq_to_msi_data(unsigned long hwirq
)
147 return ((hwirq
/ NR_HW_IRQS
) % IRQS_PER_IDX
);
150 static void xgene_compose_msi_msg(struct irq_data
*data
, struct msi_msg
*msg
)
152 struct xgene_msi
*msi
= irq_data_get_irq_chip_data(data
);
153 u32 reg_set
= hwirq_to_reg_set(data
->hwirq
);
154 u32 group
= hwirq_to_group(data
->hwirq
);
155 u64 target_addr
= msi
->msi_addr
+ (((8 * group
) + reg_set
) << 16);
157 msg
->address_hi
= upper_32_bits(target_addr
);
158 msg
->address_lo
= lower_32_bits(target_addr
);
159 msg
->data
= hwirq_to_msi_data(data
->hwirq
);
163 * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain
164 * the expected behaviour of .set_affinity for each MSI interrupt, the 16
165 * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs
166 * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another
167 * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a
168 * consequence, the total MSI vectors that X-Gene v1 supports will be
169 * reduced to 256 (2048/8) vectors.
171 static int hwirq_to_cpu(unsigned long hwirq
)
173 return (hwirq
% xgene_msi_ctrl
.num_cpus
);
176 static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq
)
178 return (hwirq
- hwirq_to_cpu(hwirq
));
181 static int xgene_msi_set_affinity(struct irq_data
*irqdata
,
182 const struct cpumask
*mask
, bool force
)
184 int target_cpu
= cpumask_first(mask
);
187 curr_cpu
= hwirq_to_cpu(irqdata
->hwirq
);
188 if (curr_cpu
== target_cpu
)
189 return IRQ_SET_MASK_OK_DONE
;
191 /* Update MSI number to target the new CPU */
192 irqdata
->hwirq
= hwirq_to_canonical_hwirq(irqdata
->hwirq
) + target_cpu
;
194 return IRQ_SET_MASK_OK
;
197 static struct irq_chip xgene_msi_bottom_irq_chip
= {
199 .irq_set_affinity
= xgene_msi_set_affinity
,
200 .irq_compose_msi_msg
= xgene_compose_msi_msg
,
203 static int xgene_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
204 unsigned int nr_irqs
, void *args
)
206 struct xgene_msi
*msi
= domain
->host_data
;
209 mutex_lock(&msi
->bitmap_lock
);
211 msi_irq
= bitmap_find_next_zero_area(msi
->bitmap
, NR_MSI_VEC
, 0,
213 if (msi_irq
< NR_MSI_VEC
)
214 bitmap_set(msi
->bitmap
, msi_irq
, msi
->num_cpus
);
218 mutex_unlock(&msi
->bitmap_lock
);
223 irq_domain_set_info(domain
, virq
, msi_irq
,
224 &xgene_msi_bottom_irq_chip
, domain
->host_data
,
225 handle_simple_irq
, NULL
, NULL
);
230 static void xgene_irq_domain_free(struct irq_domain
*domain
,
231 unsigned int virq
, unsigned int nr_irqs
)
233 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
);
234 struct xgene_msi
*msi
= irq_data_get_irq_chip_data(d
);
237 mutex_lock(&msi
->bitmap_lock
);
239 hwirq
= hwirq_to_canonical_hwirq(d
->hwirq
);
240 bitmap_clear(msi
->bitmap
, hwirq
, msi
->num_cpus
);
242 mutex_unlock(&msi
->bitmap_lock
);
244 irq_domain_free_irqs_parent(domain
, virq
, nr_irqs
);
247 static const struct irq_domain_ops msi_domain_ops
= {
248 .alloc
= xgene_irq_domain_alloc
,
249 .free
= xgene_irq_domain_free
,
252 static int xgene_allocate_domains(struct xgene_msi
*msi
)
254 msi
->inner_domain
= irq_domain_add_linear(NULL
, NR_MSI_VEC
,
255 &msi_domain_ops
, msi
);
256 if (!msi
->inner_domain
)
259 msi
->msi_domain
= pci_msi_create_irq_domain(of_node_to_fwnode(msi
->node
),
260 &xgene_msi_domain_info
,
263 if (!msi
->msi_domain
) {
264 irq_domain_remove(msi
->inner_domain
);
271 static void xgene_free_domains(struct xgene_msi
*msi
)
274 irq_domain_remove(msi
->msi_domain
);
275 if (msi
->inner_domain
)
276 irq_domain_remove(msi
->inner_domain
);
279 static int xgene_msi_init_allocator(struct xgene_msi
*xgene_msi
)
281 int size
= BITS_TO_LONGS(NR_MSI_VEC
) * sizeof(long);
283 xgene_msi
->bitmap
= kzalloc(size
, GFP_KERNEL
);
284 if (!xgene_msi
->bitmap
)
287 mutex_init(&xgene_msi
->bitmap_lock
);
289 xgene_msi
->msi_groups
= kcalloc(NR_HW_IRQS
,
290 sizeof(struct xgene_msi_group
),
292 if (!xgene_msi
->msi_groups
)
298 static void xgene_msi_isr(struct irq_desc
*desc
)
300 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
301 struct xgene_msi_group
*msi_groups
;
302 struct xgene_msi
*xgene_msi
;
304 int msir_index
, msir_val
, hw_irq
;
305 u32 intr_index
, grp_select
, msi_grp
;
307 chained_irq_enter(chip
, desc
);
309 msi_groups
= irq_desc_get_handler_data(desc
);
310 xgene_msi
= msi_groups
->msi
;
311 msi_grp
= msi_groups
->msi_grp
;
314 * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt
315 * If bit x of this register is set (x is 0..7), one or more interupts
316 * corresponding to MSInIRx is set.
318 grp_select
= xgene_msi_int_read(xgene_msi
, msi_grp
);
320 msir_index
= ffs(grp_select
) - 1;
322 * Calculate MSInIRx address to read to check for interrupts
323 * (refer to termination address and data assignment
324 * described in xgene_compose_msi_msg() )
326 msir_val
= xgene_msi_ir_read(xgene_msi
, msi_grp
, msir_index
);
328 intr_index
= ffs(msir_val
) - 1;
330 * Calculate MSI vector number (refer to the termination
331 * address and data assignment described in
332 * xgene_compose_msi_msg function)
334 hw_irq
= (((msir_index
* IRQS_PER_IDX
) + intr_index
) *
335 NR_HW_IRQS
) + msi_grp
;
337 * As we have multiple hw_irq that maps to single MSI,
338 * always look up the virq using the hw_irq as seen from
341 hw_irq
= hwirq_to_canonical_hwirq(hw_irq
);
342 virq
= irq_find_mapping(xgene_msi
->inner_domain
, hw_irq
);
345 generic_handle_irq(virq
);
346 msir_val
&= ~(1 << intr_index
);
348 grp_select
&= ~(1 << msir_index
);
352 * We handled all interrupts happened in this group,
353 * resample this group MSI_INTx register in case
354 * something else has been made pending in the meantime
356 grp_select
= xgene_msi_int_read(xgene_msi
, msi_grp
);
360 chained_irq_exit(chip
, desc
);
363 static enum cpuhp_state pci_xgene_online
;
365 static int xgene_msi_remove(struct platform_device
*pdev
)
367 struct xgene_msi
*msi
= platform_get_drvdata(pdev
);
369 if (pci_xgene_online
)
370 cpuhp_remove_state(pci_xgene_online
);
371 cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD
);
373 kfree(msi
->msi_groups
);
378 xgene_free_domains(msi
);
383 static int xgene_msi_hwirq_alloc(unsigned int cpu
)
385 struct xgene_msi
*msi
= &xgene_msi_ctrl
;
386 struct xgene_msi_group
*msi_group
;
391 for (i
= cpu
; i
< NR_HW_IRQS
; i
+= msi
->num_cpus
) {
392 msi_group
= &msi
->msi_groups
[i
];
393 if (!msi_group
->gic_irq
)
396 irq_set_chained_handler(msi_group
->gic_irq
,
398 err
= irq_set_handler_data(msi_group
->gic_irq
, msi_group
);
400 pr_err("failed to register GIC IRQ handler\n");
404 * Statically allocate MSI GIC IRQs to each CPU core.
405 * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
408 if (alloc_cpumask_var(&mask
, GFP_KERNEL
)) {
410 cpumask_set_cpu(cpu
, mask
);
411 err
= irq_set_affinity(msi_group
->gic_irq
, mask
);
413 pr_err("failed to set affinity for GIC IRQ");
414 free_cpumask_var(mask
);
416 pr_err("failed to alloc CPU mask for affinity\n");
421 irq_set_chained_handler_and_data(msi_group
->gic_irq
,
430 static int xgene_msi_hwirq_free(unsigned int cpu
)
432 struct xgene_msi
*msi
= &xgene_msi_ctrl
;
433 struct xgene_msi_group
*msi_group
;
436 for (i
= cpu
; i
< NR_HW_IRQS
; i
+= msi
->num_cpus
) {
437 msi_group
= &msi
->msi_groups
[i
];
438 if (!msi_group
->gic_irq
)
441 irq_set_chained_handler_and_data(msi_group
->gic_irq
, NULL
,
447 static const struct of_device_id xgene_msi_match_table
[] = {
448 {.compatible
= "apm,xgene1-msi"},
452 static int xgene_msi_probe(struct platform_device
*pdev
)
454 struct resource
*res
;
456 struct xgene_msi
*xgene_msi
;
458 u32 msi_val
, msi_idx
;
460 xgene_msi
= &xgene_msi_ctrl
;
462 platform_set_drvdata(pdev
, xgene_msi
);
464 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
465 xgene_msi
->msi_regs
= devm_ioremap_resource(&pdev
->dev
, res
);
466 if (IS_ERR(xgene_msi
->msi_regs
)) {
467 dev_err(&pdev
->dev
, "no reg space\n");
471 xgene_msi
->msi_addr
= res
->start
;
472 xgene_msi
->node
= pdev
->dev
.of_node
;
473 xgene_msi
->num_cpus
= num_possible_cpus();
475 rc
= xgene_msi_init_allocator(xgene_msi
);
477 dev_err(&pdev
->dev
, "Error allocating MSI bitmap\n");
481 rc
= xgene_allocate_domains(xgene_msi
);
483 dev_err(&pdev
->dev
, "Failed to allocate MSI domain\n");
487 for (irq_index
= 0; irq_index
< NR_HW_IRQS
; irq_index
++) {
488 virt_msir
= platform_get_irq(pdev
, irq_index
);
490 dev_err(&pdev
->dev
, "Cannot translate IRQ index %d\n",
495 xgene_msi
->msi_groups
[irq_index
].gic_irq
= virt_msir
;
496 xgene_msi
->msi_groups
[irq_index
].msi_grp
= irq_index
;
497 xgene_msi
->msi_groups
[irq_index
].msi
= xgene_msi
;
501 * MSInIRx registers are read-to-clear; before registering
502 * interrupt handlers, read all of them to clear spurious
503 * interrupts that may occur before the driver is probed.
505 for (irq_index
= 0; irq_index
< NR_HW_IRQS
; irq_index
++) {
506 for (msi_idx
= 0; msi_idx
< IDX_PER_GROUP
; msi_idx
++)
507 msi_val
= xgene_msi_ir_read(xgene_msi
, irq_index
,
509 /* Read MSIINTn to confirm */
510 msi_val
= xgene_msi_int_read(xgene_msi
, irq_index
);
512 dev_err(&pdev
->dev
, "Failed to clear spurious IRQ\n");
518 rc
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "pci/xgene:online",
519 xgene_msi_hwirq_alloc
, NULL
);
522 pci_xgene_online
= rc
;
523 rc
= cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD
, "pci/xgene:dead", NULL
,
524 xgene_msi_hwirq_free
);
528 dev_info(&pdev
->dev
, "APM X-Gene PCIe MSI driver loaded\n");
533 dev_err(&pdev
->dev
, "failed to add CPU MSI notifier\n");
535 xgene_msi_remove(pdev
);
539 static struct platform_driver xgene_msi_driver
= {
542 .of_match_table
= xgene_msi_match_table
,
544 .probe
= xgene_msi_probe
,
545 .remove
= xgene_msi_remove
,
548 static int __init
xgene_pcie_msi_init(void)
550 return platform_driver_register(&xgene_msi_driver
);
552 subsys_initcall(xgene_pcie_msi_init
);