ata: start separating SATA specific code from libata-core.c
[linux/fpc-iii.git] / drivers / pci / controller / pcie-iproc-msi.c
blob3176ad3ab0e5268c9a632ab758a712214da8ba0b
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2015 Broadcom Corporation
4 */
6 #include <linux/interrupt.h>
7 #include <linux/irqchip/chained_irq.h>
8 #include <linux/irqdomain.h>
9 #include <linux/msi.h>
10 #include <linux/of_irq.h>
11 #include <linux/of_pci.h>
12 #include <linux/pci.h>
14 #include "pcie-iproc.h"
16 #define IPROC_MSI_INTR_EN_SHIFT 11
17 #define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT)
18 #define IPROC_MSI_INT_N_EVENT_SHIFT 1
19 #define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT)
20 #define IPROC_MSI_EQ_EN_SHIFT 0
21 #define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT)
23 #define IPROC_MSI_EQ_MASK 0x3f
25 /* Max number of GIC interrupts */
26 #define NR_HW_IRQS 6
28 /* Number of entries in each event queue */
29 #define EQ_LEN 64
31 /* Size of each event queue memory region */
32 #define EQ_MEM_REGION_SIZE SZ_4K
34 /* Size of each MSI address region */
35 #define MSI_MEM_REGION_SIZE SZ_4K
37 enum iproc_msi_reg {
38 IPROC_MSI_EQ_PAGE = 0,
39 IPROC_MSI_EQ_PAGE_UPPER,
40 IPROC_MSI_PAGE,
41 IPROC_MSI_PAGE_UPPER,
42 IPROC_MSI_CTRL,
43 IPROC_MSI_EQ_HEAD,
44 IPROC_MSI_EQ_TAIL,
45 IPROC_MSI_INTS_EN,
46 IPROC_MSI_REG_SIZE,
49 struct iproc_msi;
51 /**
52 * iProc MSI group
54 * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI
55 * event queue.
57 * @msi: pointer to iProc MSI data
58 * @gic_irq: GIC interrupt
59 * @eq: Event queue number
61 struct iproc_msi_grp {
62 struct iproc_msi *msi;
63 int gic_irq;
64 unsigned int eq;
67 /**
68 * iProc event queue based MSI
70 * Only meant to be used on platforms without MSI support integrated into the
71 * GIC.
73 * @pcie: pointer to iProc PCIe data
74 * @reg_offsets: MSI register offsets
75 * @grps: MSI groups
76 * @nr_irqs: number of total interrupts connected to GIC
77 * @nr_cpus: number of toal CPUs
78 * @has_inten_reg: indicates the MSI interrupt enable register needs to be
79 * set explicitly (required for some legacy platforms)
80 * @bitmap: MSI vector bitmap
81 * @bitmap_lock: lock to protect access to the MSI bitmap
82 * @nr_msi_vecs: total number of MSI vectors
83 * @inner_domain: inner IRQ domain
84 * @msi_domain: MSI IRQ domain
85 * @nr_eq_region: required number of 4K aligned memory region for MSI event
86 * queues
87 * @nr_msi_region: required number of 4K aligned address region for MSI posted
88 * writes
89 * @eq_cpu: pointer to allocated memory region for MSI event queues
90 * @eq_dma: DMA address of MSI event queues
91 * @msi_addr: MSI address
93 struct iproc_msi {
94 struct iproc_pcie *pcie;
95 const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE];
96 struct iproc_msi_grp *grps;
97 int nr_irqs;
98 int nr_cpus;
99 bool has_inten_reg;
100 unsigned long *bitmap;
101 struct mutex bitmap_lock;
102 unsigned int nr_msi_vecs;
103 struct irq_domain *inner_domain;
104 struct irq_domain *msi_domain;
105 unsigned int nr_eq_region;
106 unsigned int nr_msi_region;
107 void *eq_cpu;
108 dma_addr_t eq_dma;
109 phys_addr_t msi_addr;
112 static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
113 { 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 },
114 { 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 },
115 { 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 },
116 { 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 },
117 { 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 },
118 { 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 },
121 static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
122 { 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 },
123 { 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 },
124 { 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 },
125 { 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c },
128 static inline u32 iproc_msi_read_reg(struct iproc_msi *msi,
129 enum iproc_msi_reg reg,
130 unsigned int eq)
132 struct iproc_pcie *pcie = msi->pcie;
134 return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]);
137 static inline void iproc_msi_write_reg(struct iproc_msi *msi,
138 enum iproc_msi_reg reg,
139 int eq, u32 val)
141 struct iproc_pcie *pcie = msi->pcie;
143 writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]);
146 static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq)
148 return (hwirq % msi->nr_irqs);
151 static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi,
152 unsigned long hwirq)
154 if (msi->nr_msi_region > 1)
155 return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE;
156 else
157 return hwirq_to_group(msi, hwirq) * sizeof(u32);
160 static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq)
162 if (msi->nr_eq_region > 1)
163 return eq * EQ_MEM_REGION_SIZE;
164 else
165 return eq * EQ_LEN * sizeof(u32);
168 static struct irq_chip iproc_msi_irq_chip = {
169 .name = "iProc-MSI",
172 static struct msi_domain_info iproc_msi_domain_info = {
173 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
174 MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
175 .chip = &iproc_msi_irq_chip,
179 * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a
180 * dedicated event queue. Each MSI group can support up to 64 MSI vectors.
182 * The number of MSI groups varies between different iProc SoCs. The total
183 * number of CPU cores also varies. To support MSI IRQ affinity, we
184 * distribute GIC interrupts across all available CPUs. MSI vector is moved
185 * from one GIC interrupt to another to steer to the target CPU.
187 * Assuming:
188 * - the number of MSI groups is M
189 * - the number of CPU cores is N
190 * - M is always a multiple of N
192 * Total number of raw MSI vectors = M * 64
193 * Total number of supported MSI vectors = (M * 64) / N
195 static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq)
197 return (hwirq % msi->nr_cpus);
200 static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi,
201 unsigned long hwirq)
203 return (hwirq - hwirq_to_cpu(msi, hwirq));
206 static int iproc_msi_irq_set_affinity(struct irq_data *data,
207 const struct cpumask *mask, bool force)
209 struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
210 int target_cpu = cpumask_first(mask);
211 int curr_cpu;
213 curr_cpu = hwirq_to_cpu(msi, data->hwirq);
214 if (curr_cpu == target_cpu)
215 return IRQ_SET_MASK_OK_DONE;
217 /* steer MSI to the target CPU */
218 data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
220 return IRQ_SET_MASK_OK;
223 static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
224 struct msi_msg *msg)
226 struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
227 dma_addr_t addr;
229 addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq);
230 msg->address_lo = lower_32_bits(addr);
231 msg->address_hi = upper_32_bits(addr);
232 msg->data = data->hwirq << 5;
235 static struct irq_chip iproc_msi_bottom_irq_chip = {
236 .name = "MSI",
237 .irq_set_affinity = iproc_msi_irq_set_affinity,
238 .irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg,
241 static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
242 unsigned int virq, unsigned int nr_irqs,
243 void *args)
245 struct iproc_msi *msi = domain->host_data;
246 int hwirq, i;
248 mutex_lock(&msi->bitmap_lock);
250 /* Allocate 'nr_cpus' number of MSI vectors each time */
251 hwirq = bitmap_find_next_zero_area(msi->bitmap, msi->nr_msi_vecs, 0,
252 msi->nr_cpus, 0);
253 if (hwirq < msi->nr_msi_vecs) {
254 bitmap_set(msi->bitmap, hwirq, msi->nr_cpus);
255 } else {
256 mutex_unlock(&msi->bitmap_lock);
257 return -ENOSPC;
260 mutex_unlock(&msi->bitmap_lock);
262 for (i = 0; i < nr_irqs; i++) {
263 irq_domain_set_info(domain, virq + i, hwirq + i,
264 &iproc_msi_bottom_irq_chip,
265 domain->host_data, handle_simple_irq,
266 NULL, NULL);
269 return hwirq;
272 static void iproc_msi_irq_domain_free(struct irq_domain *domain,
273 unsigned int virq, unsigned int nr_irqs)
275 struct irq_data *data = irq_domain_get_irq_data(domain, virq);
276 struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
277 unsigned int hwirq;
279 mutex_lock(&msi->bitmap_lock);
281 hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq);
282 bitmap_clear(msi->bitmap, hwirq, msi->nr_cpus);
284 mutex_unlock(&msi->bitmap_lock);
286 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
289 static const struct irq_domain_ops msi_domain_ops = {
290 .alloc = iproc_msi_irq_domain_alloc,
291 .free = iproc_msi_irq_domain_free,
294 static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head)
296 u32 __iomem *msg;
297 u32 hwirq;
298 unsigned int offs;
300 offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32);
301 msg = (u32 __iomem *)(msi->eq_cpu + offs);
302 hwirq = readl(msg);
303 hwirq = (hwirq >> 5) + (hwirq & 0x1f);
306 * Since we have multiple hwirq mapped to a single MSI vector,
307 * now we need to derive the hwirq at CPU0. It can then be used to
308 * mapped back to virq.
310 return hwirq_to_canonical_hwirq(msi, hwirq);
313 static void iproc_msi_handler(struct irq_desc *desc)
315 struct irq_chip *chip = irq_desc_get_chip(desc);
316 struct iproc_msi_grp *grp;
317 struct iproc_msi *msi;
318 u32 eq, head, tail, nr_events;
319 unsigned long hwirq;
320 int virq;
322 chained_irq_enter(chip, desc);
324 grp = irq_desc_get_handler_data(desc);
325 msi = grp->msi;
326 eq = grp->eq;
329 * iProc MSI event queue is tracked by head and tail pointers. Head
330 * pointer indicates the next entry (MSI data) to be consumed by SW in
331 * the queue and needs to be updated by SW. iProc MSI core uses the
332 * tail pointer as the next data insertion point.
334 * Entries between head and tail pointers contain valid MSI data. MSI
335 * data is guaranteed to be in the event queue memory before the tail
336 * pointer is updated by the iProc MSI core.
338 head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD,
339 eq) & IPROC_MSI_EQ_MASK;
340 do {
341 tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL,
342 eq) & IPROC_MSI_EQ_MASK;
345 * Figure out total number of events (MSI data) to be
346 * processed.
348 nr_events = (tail < head) ?
349 (EQ_LEN - (head - tail)) : (tail - head);
350 if (!nr_events)
351 break;
353 /* process all outstanding events */
354 while (nr_events--) {
355 hwirq = decode_msi_hwirq(msi, eq, head);
356 virq = irq_find_mapping(msi->inner_domain, hwirq);
357 generic_handle_irq(virq);
359 head++;
360 head %= EQ_LEN;
364 * Now all outstanding events have been processed. Update the
365 * head pointer.
367 iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head);
370 * Now go read the tail pointer again to see if there are new
371 * outstanding events that came in during the above window.
373 } while (true);
375 chained_irq_exit(chip, desc);
378 static void iproc_msi_enable(struct iproc_msi *msi)
380 int i, eq;
381 u32 val;
383 /* Program memory region for each event queue */
384 for (i = 0; i < msi->nr_eq_region; i++) {
385 dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE);
387 iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i,
388 lower_32_bits(addr));
389 iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i,
390 upper_32_bits(addr));
393 /* Program address region for MSI posted writes */
394 for (i = 0; i < msi->nr_msi_region; i++) {
395 phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE);
397 iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i,
398 lower_32_bits(addr));
399 iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i,
400 upper_32_bits(addr));
403 for (eq = 0; eq < msi->nr_irqs; eq++) {
404 /* Enable MSI event queue */
405 val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
406 IPROC_MSI_EQ_EN;
407 iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
410 * Some legacy platforms require the MSI interrupt enable
411 * register to be set explicitly.
413 if (msi->has_inten_reg) {
414 val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
415 val |= BIT(eq);
416 iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
421 static void iproc_msi_disable(struct iproc_msi *msi)
423 u32 eq, val;
425 for (eq = 0; eq < msi->nr_irqs; eq++) {
426 if (msi->has_inten_reg) {
427 val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
428 val &= ~BIT(eq);
429 iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
432 val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq);
433 val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
434 IPROC_MSI_EQ_EN);
435 iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
439 static int iproc_msi_alloc_domains(struct device_node *node,
440 struct iproc_msi *msi)
442 msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs,
443 &msi_domain_ops, msi);
444 if (!msi->inner_domain)
445 return -ENOMEM;
447 msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
448 &iproc_msi_domain_info,
449 msi->inner_domain);
450 if (!msi->msi_domain) {
451 irq_domain_remove(msi->inner_domain);
452 return -ENOMEM;
455 return 0;
458 static void iproc_msi_free_domains(struct iproc_msi *msi)
460 if (msi->msi_domain)
461 irq_domain_remove(msi->msi_domain);
463 if (msi->inner_domain)
464 irq_domain_remove(msi->inner_domain);
467 static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu)
469 int i;
471 for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
472 irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
473 NULL, NULL);
477 static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu)
479 int i, ret;
480 cpumask_var_t mask;
481 struct iproc_pcie *pcie = msi->pcie;
483 for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
484 irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
485 iproc_msi_handler,
486 &msi->grps[i]);
487 /* Dedicate GIC interrupt to each CPU core */
488 if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
489 cpumask_clear(mask);
490 cpumask_set_cpu(cpu, mask);
491 ret = irq_set_affinity(msi->grps[i].gic_irq, mask);
492 if (ret)
493 dev_err(pcie->dev,
494 "failed to set affinity for IRQ%d\n",
495 msi->grps[i].gic_irq);
496 free_cpumask_var(mask);
497 } else {
498 dev_err(pcie->dev, "failed to alloc CPU mask\n");
499 ret = -EINVAL;
502 if (ret) {
503 /* Free all configured/unconfigured IRQs */
504 iproc_msi_irq_free(msi, cpu);
505 return ret;
509 return 0;
512 int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
514 struct iproc_msi *msi;
515 int i, ret;
516 unsigned int cpu;
518 if (!of_device_is_compatible(node, "brcm,iproc-msi"))
519 return -ENODEV;
521 if (!of_find_property(node, "msi-controller", NULL))
522 return -ENODEV;
524 if (pcie->msi)
525 return -EBUSY;
527 msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL);
528 if (!msi)
529 return -ENOMEM;
531 msi->pcie = pcie;
532 pcie->msi = msi;
533 msi->msi_addr = pcie->base_addr;
534 mutex_init(&msi->bitmap_lock);
535 msi->nr_cpus = num_possible_cpus();
537 msi->nr_irqs = of_irq_count(node);
538 if (!msi->nr_irqs) {
539 dev_err(pcie->dev, "found no MSI GIC interrupt\n");
540 return -ENODEV;
543 if (msi->nr_irqs > NR_HW_IRQS) {
544 dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n",
545 msi->nr_irqs);
546 msi->nr_irqs = NR_HW_IRQS;
549 if (msi->nr_irqs < msi->nr_cpus) {
550 dev_err(pcie->dev,
551 "not enough GIC interrupts for MSI affinity\n");
552 return -EINVAL;
555 if (msi->nr_irqs % msi->nr_cpus != 0) {
556 msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus;
557 dev_warn(pcie->dev, "Reducing number of interrupts to %d\n",
558 msi->nr_irqs);
561 switch (pcie->type) {
562 case IPROC_PCIE_PAXB_BCMA:
563 case IPROC_PCIE_PAXB:
564 msi->reg_offsets = iproc_msi_reg_paxb;
565 msi->nr_eq_region = 1;
566 msi->nr_msi_region = 1;
567 break;
568 case IPROC_PCIE_PAXC:
569 msi->reg_offsets = iproc_msi_reg_paxc;
570 msi->nr_eq_region = msi->nr_irqs;
571 msi->nr_msi_region = msi->nr_irqs;
572 break;
573 default:
574 dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
575 return -EINVAL;
578 if (of_find_property(node, "brcm,pcie-msi-inten", NULL))
579 msi->has_inten_reg = true;
581 msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN;
582 msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs),
583 sizeof(*msi->bitmap), GFP_KERNEL);
584 if (!msi->bitmap)
585 return -ENOMEM;
587 msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps),
588 GFP_KERNEL);
589 if (!msi->grps)
590 return -ENOMEM;
592 for (i = 0; i < msi->nr_irqs; i++) {
593 unsigned int irq = irq_of_parse_and_map(node, i);
595 if (!irq) {
596 dev_err(pcie->dev, "unable to parse/map interrupt\n");
597 ret = -ENODEV;
598 goto free_irqs;
600 msi->grps[i].gic_irq = irq;
601 msi->grps[i].msi = msi;
602 msi->grps[i].eq = i;
605 /* Reserve memory for event queue and make sure memories are zeroed */
606 msi->eq_cpu = dma_alloc_coherent(pcie->dev,
607 msi->nr_eq_region * EQ_MEM_REGION_SIZE,
608 &msi->eq_dma, GFP_KERNEL);
609 if (!msi->eq_cpu) {
610 ret = -ENOMEM;
611 goto free_irqs;
614 ret = iproc_msi_alloc_domains(node, msi);
615 if (ret) {
616 dev_err(pcie->dev, "failed to create MSI domains\n");
617 goto free_eq_dma;
620 for_each_online_cpu(cpu) {
621 ret = iproc_msi_irq_setup(msi, cpu);
622 if (ret)
623 goto free_msi_irq;
626 iproc_msi_enable(msi);
628 return 0;
630 free_msi_irq:
631 for_each_online_cpu(cpu)
632 iproc_msi_irq_free(msi, cpu);
633 iproc_msi_free_domains(msi);
635 free_eq_dma:
636 dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
637 msi->eq_cpu, msi->eq_dma);
639 free_irqs:
640 for (i = 0; i < msi->nr_irqs; i++) {
641 if (msi->grps[i].gic_irq)
642 irq_dispose_mapping(msi->grps[i].gic_irq);
644 pcie->msi = NULL;
645 return ret;
647 EXPORT_SYMBOL(iproc_msi_init);
649 void iproc_msi_exit(struct iproc_pcie *pcie)
651 struct iproc_msi *msi = pcie->msi;
652 unsigned int i, cpu;
654 if (!msi)
655 return;
657 iproc_msi_disable(msi);
659 for_each_online_cpu(cpu)
660 iproc_msi_irq_free(msi, cpu);
662 iproc_msi_free_domains(msi);
664 dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
665 msi->eq_cpu, msi->eq_dma);
667 for (i = 0; i < msi->nr_irqs; i++) {
668 if (msi->grps[i].gic_irq)
669 irq_dispose_mapping(msi->grps[i].gic_irq);
672 EXPORT_SYMBOL(iproc_msi_exit);