1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015 Broadcom Corporation
6 #include <linux/interrupt.h>
7 #include <linux/irqchip/chained_irq.h>
8 #include <linux/irqdomain.h>
10 #include <linux/of_irq.h>
11 #include <linux/of_pci.h>
12 #include <linux/pci.h>
14 #include "pcie-iproc.h"
16 #define IPROC_MSI_INTR_EN_SHIFT 11
17 #define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT)
18 #define IPROC_MSI_INT_N_EVENT_SHIFT 1
19 #define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT)
20 #define IPROC_MSI_EQ_EN_SHIFT 0
21 #define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT)
23 #define IPROC_MSI_EQ_MASK 0x3f
25 /* Max number of GIC interrupts */
28 /* Number of entries in each event queue */
31 /* Size of each event queue memory region */
32 #define EQ_MEM_REGION_SIZE SZ_4K
34 /* Size of each MSI address region */
35 #define MSI_MEM_REGION_SIZE SZ_4K
38 IPROC_MSI_EQ_PAGE
= 0,
39 IPROC_MSI_EQ_PAGE_UPPER
,
52 * struct iproc_msi_grp - iProc MSI group
54 * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI
57 * @msi: pointer to iProc MSI data
58 * @gic_irq: GIC interrupt
59 * @eq: Event queue number
61 struct iproc_msi_grp
{
62 struct iproc_msi
*msi
;
68 * struct iproc_msi - iProc event queue based MSI
70 * Only meant to be used on platforms without MSI support integrated into the
73 * @pcie: pointer to iProc PCIe data
74 * @reg_offsets: MSI register offsets
76 * @nr_irqs: number of total interrupts connected to GIC
77 * @nr_cpus: number of toal CPUs
78 * @has_inten_reg: indicates the MSI interrupt enable register needs to be
79 * set explicitly (required for some legacy platforms)
80 * @bitmap: MSI vector bitmap
81 * @bitmap_lock: lock to protect access to the MSI bitmap
82 * @nr_msi_vecs: total number of MSI vectors
83 * @inner_domain: inner IRQ domain
84 * @msi_domain: MSI IRQ domain
85 * @nr_eq_region: required number of 4K aligned memory region for MSI event
87 * @nr_msi_region: required number of 4K aligned address region for MSI posted
89 * @eq_cpu: pointer to allocated memory region for MSI event queues
90 * @eq_dma: DMA address of MSI event queues
91 * @msi_addr: MSI address
94 struct iproc_pcie
*pcie
;
95 const u16 (*reg_offsets
)[IPROC_MSI_REG_SIZE
];
96 struct iproc_msi_grp
*grps
;
100 unsigned long *bitmap
;
101 struct mutex bitmap_lock
;
102 unsigned int nr_msi_vecs
;
103 struct irq_domain
*inner_domain
;
104 struct irq_domain
*msi_domain
;
105 unsigned int nr_eq_region
;
106 unsigned int nr_msi_region
;
109 phys_addr_t msi_addr
;
112 static const u16 iproc_msi_reg_paxb
[NR_HW_IRQS
][IPROC_MSI_REG_SIZE
] = {
113 { 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 },
114 { 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 },
115 { 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 },
116 { 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 },
117 { 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 },
118 { 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 },
121 static const u16 iproc_msi_reg_paxc
[NR_HW_IRQS
][IPROC_MSI_REG_SIZE
] = {
122 { 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 },
123 { 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 },
124 { 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 },
125 { 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c },
128 static inline u32
iproc_msi_read_reg(struct iproc_msi
*msi
,
129 enum iproc_msi_reg reg
,
132 struct iproc_pcie
*pcie
= msi
->pcie
;
134 return readl_relaxed(pcie
->base
+ msi
->reg_offsets
[eq
][reg
]);
137 static inline void iproc_msi_write_reg(struct iproc_msi
*msi
,
138 enum iproc_msi_reg reg
,
141 struct iproc_pcie
*pcie
= msi
->pcie
;
143 writel_relaxed(val
, pcie
->base
+ msi
->reg_offsets
[eq
][reg
]);
146 static inline u32
hwirq_to_group(struct iproc_msi
*msi
, unsigned long hwirq
)
148 return (hwirq
% msi
->nr_irqs
);
151 static inline unsigned int iproc_msi_addr_offset(struct iproc_msi
*msi
,
154 if (msi
->nr_msi_region
> 1)
155 return hwirq_to_group(msi
, hwirq
) * MSI_MEM_REGION_SIZE
;
157 return hwirq_to_group(msi
, hwirq
) * sizeof(u32
);
160 static inline unsigned int iproc_msi_eq_offset(struct iproc_msi
*msi
, u32 eq
)
162 if (msi
->nr_eq_region
> 1)
163 return eq
* EQ_MEM_REGION_SIZE
;
165 return eq
* EQ_LEN
* sizeof(u32
);
168 static struct irq_chip iproc_msi_irq_chip
= {
172 static struct msi_domain_info iproc_msi_domain_info
= {
173 .flags
= MSI_FLAG_USE_DEF_DOM_OPS
| MSI_FLAG_USE_DEF_CHIP_OPS
|
175 .chip
= &iproc_msi_irq_chip
,
179 * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a
180 * dedicated event queue. Each MSI group can support up to 64 MSI vectors.
182 * The number of MSI groups varies between different iProc SoCs. The total
183 * number of CPU cores also varies. To support MSI IRQ affinity, we
184 * distribute GIC interrupts across all available CPUs. MSI vector is moved
185 * from one GIC interrupt to another to steer to the target CPU.
188 * - the number of MSI groups is M
189 * - the number of CPU cores is N
190 * - M is always a multiple of N
192 * Total number of raw MSI vectors = M * 64
193 * Total number of supported MSI vectors = (M * 64) / N
195 static inline int hwirq_to_cpu(struct iproc_msi
*msi
, unsigned long hwirq
)
197 return (hwirq
% msi
->nr_cpus
);
200 static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi
*msi
,
203 return (hwirq
- hwirq_to_cpu(msi
, hwirq
));
206 static int iproc_msi_irq_set_affinity(struct irq_data
*data
,
207 const struct cpumask
*mask
, bool force
)
209 struct iproc_msi
*msi
= irq_data_get_irq_chip_data(data
);
210 int target_cpu
= cpumask_first(mask
);
214 curr_cpu
= hwirq_to_cpu(msi
, data
->hwirq
);
215 if (curr_cpu
== target_cpu
)
216 ret
= IRQ_SET_MASK_OK_DONE
;
218 /* steer MSI to the target CPU */
219 data
->hwirq
= hwirq_to_canonical_hwirq(msi
, data
->hwirq
) + target_cpu
;
220 ret
= IRQ_SET_MASK_OK
;
223 irq_data_update_effective_affinity(data
, cpumask_of(target_cpu
));
228 static void iproc_msi_irq_compose_msi_msg(struct irq_data
*data
,
231 struct iproc_msi
*msi
= irq_data_get_irq_chip_data(data
);
234 addr
= msi
->msi_addr
+ iproc_msi_addr_offset(msi
, data
->hwirq
);
235 msg
->address_lo
= lower_32_bits(addr
);
236 msg
->address_hi
= upper_32_bits(addr
);
237 msg
->data
= data
->hwirq
<< 5;
240 static struct irq_chip iproc_msi_bottom_irq_chip
= {
242 .irq_set_affinity
= iproc_msi_irq_set_affinity
,
243 .irq_compose_msi_msg
= iproc_msi_irq_compose_msi_msg
,
246 static int iproc_msi_irq_domain_alloc(struct irq_domain
*domain
,
247 unsigned int virq
, unsigned int nr_irqs
,
250 struct iproc_msi
*msi
= domain
->host_data
;
253 if (msi
->nr_cpus
> 1 && nr_irqs
> 1)
256 mutex_lock(&msi
->bitmap_lock
);
259 * Allocate 'nr_irqs' multiplied by 'nr_cpus' number of MSI vectors
262 hwirq
= bitmap_find_free_region(msi
->bitmap
, msi
->nr_msi_vecs
,
263 order_base_2(msi
->nr_cpus
* nr_irqs
));
265 mutex_unlock(&msi
->bitmap_lock
);
270 for (i
= 0; i
< nr_irqs
; i
++) {
271 irq_domain_set_info(domain
, virq
+ i
, hwirq
+ i
,
272 &iproc_msi_bottom_irq_chip
,
273 domain
->host_data
, handle_simple_irq
,
280 static void iproc_msi_irq_domain_free(struct irq_domain
*domain
,
281 unsigned int virq
, unsigned int nr_irqs
)
283 struct irq_data
*data
= irq_domain_get_irq_data(domain
, virq
);
284 struct iproc_msi
*msi
= irq_data_get_irq_chip_data(data
);
287 mutex_lock(&msi
->bitmap_lock
);
289 hwirq
= hwirq_to_canonical_hwirq(msi
, data
->hwirq
);
290 bitmap_release_region(msi
->bitmap
, hwirq
,
291 order_base_2(msi
->nr_cpus
* nr_irqs
));
293 mutex_unlock(&msi
->bitmap_lock
);
295 irq_domain_free_irqs_parent(domain
, virq
, nr_irqs
);
298 static const struct irq_domain_ops msi_domain_ops
= {
299 .alloc
= iproc_msi_irq_domain_alloc
,
300 .free
= iproc_msi_irq_domain_free
,
303 static inline u32
decode_msi_hwirq(struct iproc_msi
*msi
, u32 eq
, u32 head
)
309 offs
= iproc_msi_eq_offset(msi
, eq
) + head
* sizeof(u32
);
310 msg
= (u32 __iomem
*)(msi
->eq_cpu
+ offs
);
312 hwirq
= (hwirq
>> 5) + (hwirq
& 0x1f);
315 * Since we have multiple hwirq mapped to a single MSI vector,
316 * now we need to derive the hwirq at CPU0. It can then be used to
317 * mapped back to virq.
319 return hwirq_to_canonical_hwirq(msi
, hwirq
);
322 static void iproc_msi_handler(struct irq_desc
*desc
)
324 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
325 struct iproc_msi_grp
*grp
;
326 struct iproc_msi
*msi
;
327 u32 eq
, head
, tail
, nr_events
;
330 chained_irq_enter(chip
, desc
);
332 grp
= irq_desc_get_handler_data(desc
);
337 * iProc MSI event queue is tracked by head and tail pointers. Head
338 * pointer indicates the next entry (MSI data) to be consumed by SW in
339 * the queue and needs to be updated by SW. iProc MSI core uses the
340 * tail pointer as the next data insertion point.
342 * Entries between head and tail pointers contain valid MSI data. MSI
343 * data is guaranteed to be in the event queue memory before the tail
344 * pointer is updated by the iProc MSI core.
346 head
= iproc_msi_read_reg(msi
, IPROC_MSI_EQ_HEAD
,
347 eq
) & IPROC_MSI_EQ_MASK
;
349 tail
= iproc_msi_read_reg(msi
, IPROC_MSI_EQ_TAIL
,
350 eq
) & IPROC_MSI_EQ_MASK
;
353 * Figure out total number of events (MSI data) to be
356 nr_events
= (tail
< head
) ?
357 (EQ_LEN
- (head
- tail
)) : (tail
- head
);
361 /* process all outstanding events */
362 while (nr_events
--) {
363 hwirq
= decode_msi_hwirq(msi
, eq
, head
);
364 generic_handle_domain_irq(msi
->inner_domain
, hwirq
);
371 * Now all outstanding events have been processed. Update the
374 iproc_msi_write_reg(msi
, IPROC_MSI_EQ_HEAD
, eq
, head
);
377 * Now go read the tail pointer again to see if there are new
378 * outstanding events that came in during the above window.
382 chained_irq_exit(chip
, desc
);
385 static void iproc_msi_enable(struct iproc_msi
*msi
)
390 /* Program memory region for each event queue */
391 for (i
= 0; i
< msi
->nr_eq_region
; i
++) {
392 dma_addr_t addr
= msi
->eq_dma
+ (i
* EQ_MEM_REGION_SIZE
);
394 iproc_msi_write_reg(msi
, IPROC_MSI_EQ_PAGE
, i
,
395 lower_32_bits(addr
));
396 iproc_msi_write_reg(msi
, IPROC_MSI_EQ_PAGE_UPPER
, i
,
397 upper_32_bits(addr
));
400 /* Program address region for MSI posted writes */
401 for (i
= 0; i
< msi
->nr_msi_region
; i
++) {
402 phys_addr_t addr
= msi
->msi_addr
+ (i
* MSI_MEM_REGION_SIZE
);
404 iproc_msi_write_reg(msi
, IPROC_MSI_PAGE
, i
,
405 lower_32_bits(addr
));
406 iproc_msi_write_reg(msi
, IPROC_MSI_PAGE_UPPER
, i
,
407 upper_32_bits(addr
));
410 for (eq
= 0; eq
< msi
->nr_irqs
; eq
++) {
411 /* Enable MSI event queue */
412 val
= IPROC_MSI_INTR_EN
| IPROC_MSI_INT_N_EVENT
|
414 iproc_msi_write_reg(msi
, IPROC_MSI_CTRL
, eq
, val
);
417 * Some legacy platforms require the MSI interrupt enable
418 * register to be set explicitly.
420 if (msi
->has_inten_reg
) {
421 val
= iproc_msi_read_reg(msi
, IPROC_MSI_INTS_EN
, eq
);
423 iproc_msi_write_reg(msi
, IPROC_MSI_INTS_EN
, eq
, val
);
428 static void iproc_msi_disable(struct iproc_msi
*msi
)
432 for (eq
= 0; eq
< msi
->nr_irqs
; eq
++) {
433 if (msi
->has_inten_reg
) {
434 val
= iproc_msi_read_reg(msi
, IPROC_MSI_INTS_EN
, eq
);
436 iproc_msi_write_reg(msi
, IPROC_MSI_INTS_EN
, eq
, val
);
439 val
= iproc_msi_read_reg(msi
, IPROC_MSI_CTRL
, eq
);
440 val
&= ~(IPROC_MSI_INTR_EN
| IPROC_MSI_INT_N_EVENT
|
442 iproc_msi_write_reg(msi
, IPROC_MSI_CTRL
, eq
, val
);
446 static int iproc_msi_alloc_domains(struct device_node
*node
,
447 struct iproc_msi
*msi
)
449 msi
->inner_domain
= irq_domain_add_linear(NULL
, msi
->nr_msi_vecs
,
450 &msi_domain_ops
, msi
);
451 if (!msi
->inner_domain
)
454 msi
->msi_domain
= pci_msi_create_irq_domain(of_node_to_fwnode(node
),
455 &iproc_msi_domain_info
,
457 if (!msi
->msi_domain
) {
458 irq_domain_remove(msi
->inner_domain
);
465 static void iproc_msi_free_domains(struct iproc_msi
*msi
)
468 irq_domain_remove(msi
->msi_domain
);
470 if (msi
->inner_domain
)
471 irq_domain_remove(msi
->inner_domain
);
474 static void iproc_msi_irq_free(struct iproc_msi
*msi
, unsigned int cpu
)
478 for (i
= cpu
; i
< msi
->nr_irqs
; i
+= msi
->nr_cpus
) {
479 irq_set_chained_handler_and_data(msi
->grps
[i
].gic_irq
,
484 static int iproc_msi_irq_setup(struct iproc_msi
*msi
, unsigned int cpu
)
488 struct iproc_pcie
*pcie
= msi
->pcie
;
490 for (i
= cpu
; i
< msi
->nr_irqs
; i
+= msi
->nr_cpus
) {
491 irq_set_chained_handler_and_data(msi
->grps
[i
].gic_irq
,
494 /* Dedicate GIC interrupt to each CPU core */
495 if (alloc_cpumask_var(&mask
, GFP_KERNEL
)) {
497 cpumask_set_cpu(cpu
, mask
);
498 ret
= irq_set_affinity(msi
->grps
[i
].gic_irq
, mask
);
501 "failed to set affinity for IRQ%d\n",
502 msi
->grps
[i
].gic_irq
);
503 free_cpumask_var(mask
);
505 dev_err(pcie
->dev
, "failed to alloc CPU mask\n");
510 /* Free all configured/unconfigured IRQs */
511 iproc_msi_irq_free(msi
, cpu
);
519 int iproc_msi_init(struct iproc_pcie
*pcie
, struct device_node
*node
)
521 struct iproc_msi
*msi
;
525 if (!of_device_is_compatible(node
, "brcm,iproc-msi"))
528 if (!of_property_read_bool(node
, "msi-controller"))
534 msi
= devm_kzalloc(pcie
->dev
, sizeof(*msi
), GFP_KERNEL
);
540 msi
->msi_addr
= pcie
->base_addr
;
541 mutex_init(&msi
->bitmap_lock
);
542 msi
->nr_cpus
= num_possible_cpus();
544 if (msi
->nr_cpus
== 1)
545 iproc_msi_domain_info
.flags
|= MSI_FLAG_MULTI_PCI_MSI
;
547 msi
->nr_irqs
= of_irq_count(node
);
549 dev_err(pcie
->dev
, "found no MSI GIC interrupt\n");
553 if (msi
->nr_irqs
> NR_HW_IRQS
) {
554 dev_warn(pcie
->dev
, "too many MSI GIC interrupts defined %d\n",
556 msi
->nr_irqs
= NR_HW_IRQS
;
559 if (msi
->nr_irqs
< msi
->nr_cpus
) {
561 "not enough GIC interrupts for MSI affinity\n");
565 if (msi
->nr_irqs
% msi
->nr_cpus
!= 0) {
566 msi
->nr_irqs
-= msi
->nr_irqs
% msi
->nr_cpus
;
567 dev_warn(pcie
->dev
, "Reducing number of interrupts to %d\n",
571 switch (pcie
->type
) {
572 case IPROC_PCIE_PAXB_BCMA
:
573 case IPROC_PCIE_PAXB
:
574 msi
->reg_offsets
= iproc_msi_reg_paxb
;
575 msi
->nr_eq_region
= 1;
576 msi
->nr_msi_region
= 1;
578 case IPROC_PCIE_PAXC
:
579 msi
->reg_offsets
= iproc_msi_reg_paxc
;
580 msi
->nr_eq_region
= msi
->nr_irqs
;
581 msi
->nr_msi_region
= msi
->nr_irqs
;
584 dev_err(pcie
->dev
, "incompatible iProc PCIe interface\n");
588 msi
->has_inten_reg
= of_property_read_bool(node
, "brcm,pcie-msi-inten");
590 msi
->nr_msi_vecs
= msi
->nr_irqs
* EQ_LEN
;
591 msi
->bitmap
= devm_bitmap_zalloc(pcie
->dev
, msi
->nr_msi_vecs
,
596 msi
->grps
= devm_kcalloc(pcie
->dev
, msi
->nr_irqs
, sizeof(*msi
->grps
),
601 for (i
= 0; i
< msi
->nr_irqs
; i
++) {
602 unsigned int irq
= irq_of_parse_and_map(node
, i
);
605 dev_err(pcie
->dev
, "unable to parse/map interrupt\n");
609 msi
->grps
[i
].gic_irq
= irq
;
610 msi
->grps
[i
].msi
= msi
;
614 /* Reserve memory for event queue and make sure memories are zeroed */
615 msi
->eq_cpu
= dma_alloc_coherent(pcie
->dev
,
616 msi
->nr_eq_region
* EQ_MEM_REGION_SIZE
,
617 &msi
->eq_dma
, GFP_KERNEL
);
623 ret
= iproc_msi_alloc_domains(node
, msi
);
625 dev_err(pcie
->dev
, "failed to create MSI domains\n");
629 for_each_online_cpu(cpu
) {
630 ret
= iproc_msi_irq_setup(msi
, cpu
);
635 iproc_msi_enable(msi
);
640 for_each_online_cpu(cpu
)
641 iproc_msi_irq_free(msi
, cpu
);
642 iproc_msi_free_domains(msi
);
645 dma_free_coherent(pcie
->dev
, msi
->nr_eq_region
* EQ_MEM_REGION_SIZE
,
646 msi
->eq_cpu
, msi
->eq_dma
);
649 for (i
= 0; i
< msi
->nr_irqs
; i
++) {
650 if (msi
->grps
[i
].gic_irq
)
651 irq_dispose_mapping(msi
->grps
[i
].gic_irq
);
656 EXPORT_SYMBOL(iproc_msi_init
);
658 void iproc_msi_exit(struct iproc_pcie
*pcie
)
660 struct iproc_msi
*msi
= pcie
->msi
;
666 iproc_msi_disable(msi
);
668 for_each_online_cpu(cpu
)
669 iproc_msi_irq_free(msi
, cpu
);
671 iproc_msi_free_domains(msi
);
673 dma_free_coherent(pcie
->dev
, msi
->nr_eq_region
* EQ_MEM_REGION_SIZE
,
674 msi
->eq_cpu
, msi
->eq_dma
);
676 for (i
= 0; i
< msi
->nr_irqs
; i
++) {
677 if (msi
->grps
[i
].gic_irq
)
678 irq_dispose_mapping(msi
->grps
[i
].gic_irq
);
681 EXPORT_SYMBOL(iproc_msi_exit
);