spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / arch / powerpc / platforms / wsp / ics.c
blob97fe82ee863334664eb2864f18f0b2a7fbd1dc56
1 /*
2 * Copyright 2008-2011 IBM Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
10 #include <linux/cpu.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/kernel.h>
15 #include <linux/msi.h>
16 #include <linux/of.h>
17 #include <linux/slab.h>
18 #include <linux/smp.h>
19 #include <linux/spinlock.h>
20 #include <linux/types.h>
22 #include <asm/io.h>
23 #include <asm/irq.h>
24 #include <asm/xics.h>
26 #include "wsp.h"
27 #include "ics.h"
30 /* WSP ICS */
32 struct wsp_ics {
33 struct ics ics;
34 struct device_node *dn;
35 void __iomem *regs;
36 spinlock_t lock;
37 unsigned long *bitmap;
38 u32 chip_id;
39 u32 lsi_base;
40 u32 lsi_count;
41 u64 hwirq_start;
42 u64 count;
43 #ifdef CONFIG_SMP
44 int *hwirq_cpu_map;
45 #endif
48 #define to_wsp_ics(ics) container_of(ics, struct wsp_ics, ics)
50 #define INT_SRC_LAYER_BUID_REG(base) ((base) + 0x00)
51 #define IODA_TBL_ADDR_REG(base) ((base) + 0x18)
52 #define IODA_TBL_DATA_REG(base) ((base) + 0x20)
53 #define XIVE_UPDATE_REG(base) ((base) + 0x28)
54 #define ICS_INT_CAPS_REG(base) ((base) + 0x30)
56 #define TBL_AUTO_INCREMENT ((1UL << 63) | (1UL << 15))
57 #define TBL_SELECT_XIST (1UL << 48)
58 #define TBL_SELECT_XIVT (1UL << 49)
60 #define IODA_IRQ(irq) ((irq) & (0x7FFULL)) /* HRM 5.1.3.4 */
62 #define XIST_REQUIRED 0x8
63 #define XIST_REJECTED 0x4
64 #define XIST_PRESENTED 0x2
65 #define XIST_PENDING 0x1
67 #define XIVE_SERVER_SHIFT 42
68 #define XIVE_SERVER_MASK 0xFFFFULL
69 #define XIVE_PRIORITY_MASK 0xFFULL
70 #define XIVE_PRIORITY_SHIFT 32
71 #define XIVE_WRITE_ENABLE (1ULL << 63)
74 * The docs refer to a 6 bit field called ChipID, which consists of a
75 * 3 bit NodeID and a 3 bit ChipID. On WSP the ChipID is always zero
76 * so we ignore it, and every where we use "chip id" in this code we
77 * mean the NodeID.
79 #define WSP_ICS_CHIP_SHIFT 17
82 static struct wsp_ics *ics_list;
83 static int num_ics;
85 /* ICS Source controller accessors */
87 static u64 wsp_ics_get_xive(struct wsp_ics *ics, unsigned int irq)
89 unsigned long flags;
90 u64 xive;
92 spin_lock_irqsave(&ics->lock, flags);
93 out_be64(IODA_TBL_ADDR_REG(ics->regs), TBL_SELECT_XIVT | IODA_IRQ(irq));
94 xive = in_be64(IODA_TBL_DATA_REG(ics->regs));
95 spin_unlock_irqrestore(&ics->lock, flags);
97 return xive;
100 static void wsp_ics_set_xive(struct wsp_ics *ics, unsigned int irq, u64 xive)
102 xive &= ~XIVE_ADDR_MASK;
103 xive |= (irq & XIVE_ADDR_MASK);
104 xive |= XIVE_WRITE_ENABLE;
106 out_be64(XIVE_UPDATE_REG(ics->regs), xive);
109 static u64 xive_set_server(u64 xive, unsigned int server)
111 u64 mask = ~(XIVE_SERVER_MASK << XIVE_SERVER_SHIFT);
113 xive &= mask;
114 xive |= (server & XIVE_SERVER_MASK) << XIVE_SERVER_SHIFT;
116 return xive;
119 static u64 xive_set_priority(u64 xive, unsigned int priority)
121 u64 mask = ~(XIVE_PRIORITY_MASK << XIVE_PRIORITY_SHIFT);
123 xive &= mask;
124 xive |= (priority & XIVE_PRIORITY_MASK) << XIVE_PRIORITY_SHIFT;
126 return xive;
130 #ifdef CONFIG_SMP
131 /* Find logical CPUs within mask on a given chip and store result in ret */
132 void cpus_on_chip(int chip_id, cpumask_t *mask, cpumask_t *ret)
134 int cpu, chip;
135 struct device_node *cpu_dn, *dn;
136 const u32 *prop;
138 cpumask_clear(ret);
139 for_each_cpu(cpu, mask) {
140 cpu_dn = of_get_cpu_node(cpu, NULL);
141 if (!cpu_dn)
142 continue;
144 prop = of_get_property(cpu_dn, "at-node", NULL);
145 if (!prop) {
146 of_node_put(cpu_dn);
147 continue;
150 dn = of_find_node_by_phandle(*prop);
151 of_node_put(cpu_dn);
153 chip = wsp_get_chip_id(dn);
154 if (chip == chip_id)
155 cpumask_set_cpu(cpu, ret);
157 of_node_put(dn);
161 /* Store a suitable CPU to handle a hwirq in the ics->hwirq_cpu_map cache */
162 static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
163 const cpumask_t *affinity)
165 cpumask_var_t avail, newmask;
166 int ret = -ENOMEM, cpu, cpu_rover = 0, target;
167 int index = hwirq - ics->hwirq_start;
168 unsigned int nodeid;
170 BUG_ON(index < 0 || index >= ics->count);
172 if (!ics->hwirq_cpu_map)
173 return -ENOMEM;
175 if (!distribute_irqs) {
176 ics->hwirq_cpu_map[hwirq - ics->hwirq_start] = xics_default_server;
177 return 0;
180 /* Allocate needed CPU masks */
181 if (!alloc_cpumask_var(&avail, GFP_KERNEL))
182 goto ret;
183 if (!alloc_cpumask_var(&newmask, GFP_KERNEL))
184 goto freeavail;
186 /* Find PBus attached to the source of this IRQ */
187 nodeid = (hwirq >> WSP_ICS_CHIP_SHIFT) & 0x3; /* 12:14 */
189 /* Find CPUs that could handle this IRQ */
190 if (affinity)
191 cpumask_and(avail, cpu_online_mask, affinity);
192 else
193 cpumask_copy(avail, cpu_online_mask);
195 /* Narrow selection down to logical CPUs on the same chip */
196 cpus_on_chip(nodeid, avail, newmask);
198 /* Ensure we haven't narrowed it down to 0 */
199 if (unlikely(cpumask_empty(newmask))) {
200 if (unlikely(cpumask_empty(avail))) {
201 ret = -1;
202 goto out;
204 cpumask_copy(newmask, avail);
207 /* Choose a CPU out of those we narrowed it down to in round robin */
208 target = hwirq % cpumask_weight(newmask);
209 for_each_cpu(cpu, newmask) {
210 if (cpu_rover++ >= target) {
211 ics->hwirq_cpu_map[index] = get_hard_smp_processor_id(cpu);
212 ret = 0;
213 goto out;
217 /* Shouldn't happen */
218 WARN_ON(1);
220 out:
221 free_cpumask_var(newmask);
222 freeavail:
223 free_cpumask_var(avail);
224 ret:
225 if (ret < 0) {
226 ics->hwirq_cpu_map[index] = cpumask_first(cpu_online_mask);
227 pr_warning("Error, falling hwirq 0x%x routing back to CPU %i\n",
228 hwirq, ics->hwirq_cpu_map[index]);
230 return ret;
233 static void alloc_irq_map(struct wsp_ics *ics)
235 int i;
237 ics->hwirq_cpu_map = kmalloc(sizeof(int) * ics->count, GFP_KERNEL);
238 if (!ics->hwirq_cpu_map) {
239 pr_warning("Allocate hwirq_cpu_map failed, "
240 "IRQ balancing disabled\n");
241 return;
244 for (i=0; i < ics->count; i++)
245 ics->hwirq_cpu_map[i] = xics_default_server;
248 static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
250 int index = hwirq - ics->hwirq_start;
252 BUG_ON(index < 0 || index >= ics->count);
254 if (!ics->hwirq_cpu_map)
255 return xics_default_server;
257 return ics->hwirq_cpu_map[index];
259 #else /* !CONFIG_SMP */
260 static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
261 const cpumask_t *affinity)
263 return 0;
266 static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
268 return xics_default_server;
271 static void alloc_irq_map(struct wsp_ics *ics) { }
272 #endif
274 static void wsp_chip_unmask_irq(struct irq_data *d)
276 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
277 struct wsp_ics *ics;
278 int server;
279 u64 xive;
281 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
282 return;
284 ics = d->chip_data;
285 if (WARN_ON(!ics))
286 return;
288 server = get_irq_server(ics, hw_irq);
290 xive = wsp_ics_get_xive(ics, hw_irq);
291 xive = xive_set_server(xive, server);
292 xive = xive_set_priority(xive, DEFAULT_PRIORITY);
293 wsp_ics_set_xive(ics, hw_irq, xive);
296 static unsigned int wsp_chip_startup(struct irq_data *d)
298 /* unmask it */
299 wsp_chip_unmask_irq(d);
300 return 0;
303 static void wsp_mask_real_irq(unsigned int hw_irq, struct wsp_ics *ics)
305 u64 xive;
307 if (hw_irq == XICS_IPI)
308 return;
310 if (WARN_ON(!ics))
311 return;
312 xive = wsp_ics_get_xive(ics, hw_irq);
313 xive = xive_set_server(xive, xics_default_server);
314 xive = xive_set_priority(xive, LOWEST_PRIORITY);
315 wsp_ics_set_xive(ics, hw_irq, xive);
318 static void wsp_chip_mask_irq(struct irq_data *d)
320 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
321 struct wsp_ics *ics = d->chip_data;
323 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
324 return;
326 wsp_mask_real_irq(hw_irq, ics);
329 static int wsp_chip_set_affinity(struct irq_data *d,
330 const struct cpumask *cpumask, bool force)
332 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
333 struct wsp_ics *ics;
334 int ret;
335 u64 xive;
337 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
338 return -1;
340 ics = d->chip_data;
341 if (WARN_ON(!ics))
342 return -1;
343 xive = wsp_ics_get_xive(ics, hw_irq);
346 * For the moment only implement delivery to all cpus or one cpu.
347 * Get current irq_server for the given irq
349 ret = cache_hwirq_map(ics, hw_irq, cpumask);
350 if (ret == -1) {
351 char cpulist[128];
352 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
353 pr_warning("%s: No online cpus in the mask %s for irq %d\n",
354 __func__, cpulist, d->irq);
355 return -1;
356 } else if (ret == -ENOMEM) {
357 pr_warning("%s: Out of memory\n", __func__);
358 return -1;
361 xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
362 wsp_ics_set_xive(ics, hw_irq, xive);
364 return 0;
367 static struct irq_chip wsp_irq_chip = {
368 .name = "WSP ICS",
369 .irq_startup = wsp_chip_startup,
370 .irq_mask = wsp_chip_mask_irq,
371 .irq_unmask = wsp_chip_unmask_irq,
372 .irq_set_affinity = wsp_chip_set_affinity
375 static int wsp_ics_host_match(struct ics *ics, struct device_node *dn)
377 /* All ICSs in the system implement a global irq number space,
378 * so match against them all. */
379 return of_device_is_compatible(dn, "ibm,ppc-xics");
382 static int wsp_ics_match_hwirq(struct wsp_ics *wsp_ics, unsigned int hwirq)
384 if (hwirq >= wsp_ics->hwirq_start &&
385 hwirq < wsp_ics->hwirq_start + wsp_ics->count)
386 return 1;
388 return 0;
391 static int wsp_ics_map(struct ics *ics, unsigned int virq)
393 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
394 unsigned int hw_irq = virq_to_hw(virq);
395 unsigned long flags;
397 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
398 return -ENOENT;
400 irq_set_chip_and_handler(virq, &wsp_irq_chip, handle_fasteoi_irq);
402 irq_set_chip_data(virq, wsp_ics);
404 spin_lock_irqsave(&wsp_ics->lock, flags);
405 bitmap_allocate_region(wsp_ics->bitmap, hw_irq - wsp_ics->hwirq_start, 0);
406 spin_unlock_irqrestore(&wsp_ics->lock, flags);
408 return 0;
411 static void wsp_ics_mask_unknown(struct ics *ics, unsigned long hw_irq)
413 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
415 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
416 return;
418 pr_err("%s: IRQ %lu (real) is invalid, disabling it.\n", __func__, hw_irq);
419 wsp_mask_real_irq(hw_irq, wsp_ics);
422 static long wsp_ics_get_server(struct ics *ics, unsigned long hw_irq)
424 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
426 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
427 return -ENOENT;
429 return get_irq_server(wsp_ics, hw_irq);
432 /* HW Number allocation API */
434 static struct wsp_ics *wsp_ics_find_dn_ics(struct device_node *dn)
436 struct device_node *iparent;
437 int i;
439 iparent = of_irq_find_parent(dn);
440 if (!iparent) {
441 pr_err("wsp_ics: Failed to find interrupt parent!\n");
442 return NULL;
445 for(i = 0; i < num_ics; i++) {
446 if(ics_list[i].dn == iparent)
447 break;
450 if (i >= num_ics) {
451 pr_err("wsp_ics: Unable to find parent bitmap!\n");
452 return NULL;
455 return &ics_list[i];
458 int wsp_ics_alloc_irq(struct device_node *dn, int num)
460 struct wsp_ics *ics;
461 int order, offset;
463 ics = wsp_ics_find_dn_ics(dn);
464 if (!ics)
465 return -ENODEV;
467 /* Fast, but overly strict if num isn't a power of two */
468 order = get_count_order(num);
470 spin_lock_irq(&ics->lock);
471 offset = bitmap_find_free_region(ics->bitmap, ics->count, order);
472 spin_unlock_irq(&ics->lock);
474 if (offset < 0)
475 return offset;
477 return offset + ics->hwirq_start;
480 void wsp_ics_free_irq(struct device_node *dn, unsigned int irq)
482 struct wsp_ics *ics;
484 ics = wsp_ics_find_dn_ics(dn);
485 if (WARN_ON(!ics))
486 return;
488 spin_lock_irq(&ics->lock);
489 bitmap_release_region(ics->bitmap, irq, 0);
490 spin_unlock_irq(&ics->lock);
493 /* Initialisation */
495 static int __init wsp_ics_bitmap_setup(struct wsp_ics *ics,
496 struct device_node *dn)
498 int len, i, j, size;
499 u32 start, count;
500 const u32 *p;
502 size = BITS_TO_LONGS(ics->count) * sizeof(long);
503 ics->bitmap = kzalloc(size, GFP_KERNEL);
504 if (!ics->bitmap) {
505 pr_err("wsp_ics: ENOMEM allocating IRQ bitmap!\n");
506 return -ENOMEM;
509 spin_lock_init(&ics->lock);
511 p = of_get_property(dn, "available-ranges", &len);
512 if (!p || !len) {
513 /* FIXME this should be a WARN() once mambo is updated */
514 pr_err("wsp_ics: No available-ranges defined for %s\n",
515 dn->full_name);
516 return 0;
519 if (len % (2 * sizeof(u32)) != 0) {
520 /* FIXME this should be a WARN() once mambo is updated */
521 pr_err("wsp_ics: Invalid available-ranges for %s\n",
522 dn->full_name);
523 return 0;
526 bitmap_fill(ics->bitmap, ics->count);
528 for (i = 0; i < len / sizeof(u32); i += 2) {
529 start = of_read_number(p + i, 1);
530 count = of_read_number(p + i + 1, 1);
532 pr_devel("%s: start: %d count: %d\n", __func__, start, count);
534 if ((start + count) > (ics->hwirq_start + ics->count) ||
535 start < ics->hwirq_start) {
536 pr_err("wsp_ics: Invalid range! -> %d to %d\n",
537 start, start + count);
538 break;
541 for (j = 0; j < count; j++)
542 bitmap_release_region(ics->bitmap,
543 (start + j) - ics->hwirq_start, 0);
546 /* Ensure LSIs are not available for allocation */
547 bitmap_allocate_region(ics->bitmap, ics->lsi_base,
548 get_count_order(ics->lsi_count));
550 return 0;
553 static int __init wsp_ics_setup(struct wsp_ics *ics, struct device_node *dn)
555 u32 lsi_buid, msi_buid, msi_base, msi_count;
556 void __iomem *regs;
557 const u32 *p;
558 int rc, len, i;
559 u64 caps, buid;
561 p = of_get_property(dn, "interrupt-ranges", &len);
562 if (!p || len < (2 * sizeof(u32))) {
563 pr_err("wsp_ics: No/bad interrupt-ranges found on %s\n",
564 dn->full_name);
565 return -ENOENT;
568 if (len > (2 * sizeof(u32))) {
569 pr_err("wsp_ics: Multiple ics ranges not supported.\n");
570 return -EINVAL;
573 regs = of_iomap(dn, 0);
574 if (!regs) {
575 pr_err("wsp_ics: of_iomap(%s) failed\n", dn->full_name);
576 return -ENXIO;
579 ics->hwirq_start = of_read_number(p, 1);
580 ics->count = of_read_number(p + 1, 1);
581 ics->regs = regs;
583 ics->chip_id = wsp_get_chip_id(dn);
584 if (WARN_ON(ics->chip_id < 0))
585 ics->chip_id = 0;
587 /* Get some informations about the critter */
588 caps = in_be64(ICS_INT_CAPS_REG(ics->regs));
589 buid = in_be64(INT_SRC_LAYER_BUID_REG(ics->regs));
590 ics->lsi_count = caps >> 56;
591 msi_count = (caps >> 44) & 0x7ff;
593 /* Note: LSI BUID is 9 bits, but really only 3 are BUID and the
594 * rest is mixed in the interrupt number. We store the whole
595 * thing though
597 lsi_buid = (buid >> 48) & 0x1ff;
598 ics->lsi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | lsi_buid << 5;
599 msi_buid = (buid >> 37) & 0x7;
600 msi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | msi_buid << 11;
602 pr_info("wsp_ics: Found %s\n", dn->full_name);
603 pr_info("wsp_ics: irq range : 0x%06llx..0x%06llx\n",
604 ics->hwirq_start, ics->hwirq_start + ics->count - 1);
605 pr_info("wsp_ics: %4d LSIs : 0x%06x..0x%06x\n",
606 ics->lsi_count, ics->lsi_base,
607 ics->lsi_base + ics->lsi_count - 1);
608 pr_info("wsp_ics: %4d MSIs : 0x%06x..0x%06x\n",
609 msi_count, msi_base,
610 msi_base + msi_count - 1);
612 /* Let's check the HW config is sane */
613 if (ics->lsi_base < ics->hwirq_start ||
614 (ics->lsi_base + ics->lsi_count) > (ics->hwirq_start + ics->count))
615 pr_warning("wsp_ics: WARNING ! LSIs out of interrupt-ranges !\n");
616 if (msi_base < ics->hwirq_start ||
617 (msi_base + msi_count) > (ics->hwirq_start + ics->count))
618 pr_warning("wsp_ics: WARNING ! MSIs out of interrupt-ranges !\n");
620 /* We don't check for overlap between LSI and MSI, which will happen
621 * if we use the same BUID, I'm not sure yet how legit that is.
624 rc = wsp_ics_bitmap_setup(ics, dn);
625 if (rc) {
626 iounmap(regs);
627 return rc;
630 ics->dn = of_node_get(dn);
631 alloc_irq_map(ics);
633 for(i = 0; i < ics->count; i++)
634 wsp_mask_real_irq(ics->hwirq_start + i, ics);
636 ics->ics.map = wsp_ics_map;
637 ics->ics.mask_unknown = wsp_ics_mask_unknown;
638 ics->ics.get_server = wsp_ics_get_server;
639 ics->ics.host_match = wsp_ics_host_match;
641 xics_register_ics(&ics->ics);
643 return 0;
646 static void __init wsp_ics_set_default_server(void)
648 struct device_node *np;
649 u32 hwid;
651 /* Find the server number for the boot cpu. */
652 np = of_get_cpu_node(boot_cpuid, NULL);
653 BUG_ON(!np);
655 hwid = get_hard_smp_processor_id(boot_cpuid);
657 pr_info("wsp_ics: default server is %#x, CPU %s\n", hwid, np->full_name);
658 xics_default_server = hwid;
660 of_node_put(np);
663 static int __init wsp_ics_init(void)
665 struct device_node *dn;
666 struct wsp_ics *ics;
667 int rc, found;
669 wsp_ics_set_default_server();
671 found = 0;
672 for_each_compatible_node(dn, NULL, "ibm,ppc-xics")
673 found++;
675 if (found == 0) {
676 pr_err("wsp_ics: No ICS's found!\n");
677 return -ENODEV;
680 ics_list = kmalloc(sizeof(*ics) * found, GFP_KERNEL);
681 if (!ics_list) {
682 pr_err("wsp_ics: No memory for structs.\n");
683 return -ENOMEM;
686 num_ics = 0;
687 ics = ics_list;
688 for_each_compatible_node(dn, NULL, "ibm,wsp-xics") {
689 rc = wsp_ics_setup(ics, dn);
690 if (rc == 0) {
691 ics++;
692 num_ics++;
696 if (found != num_ics) {
697 pr_err("wsp_ics: Failed setting up %d ICS's\n",
698 found - num_ics);
699 return -1;
702 return 0;
705 void __init wsp_init_irq(void)
707 wsp_ics_init();
708 xics_init();
710 /* We need to patch our irq chip's EOI to point to the right ICP */
711 wsp_irq_chip.irq_eoi = icp_ops->eoi;
714 #ifdef CONFIG_PCI_MSI
715 static void wsp_ics_msi_unmask_irq(struct irq_data *d)
717 wsp_chip_unmask_irq(d);
718 unmask_msi_irq(d);
721 static unsigned int wsp_ics_msi_startup(struct irq_data *d)
723 wsp_ics_msi_unmask_irq(d);
724 return 0;
727 static void wsp_ics_msi_mask_irq(struct irq_data *d)
729 mask_msi_irq(d);
730 wsp_chip_mask_irq(d);
734 * we do it this way because we reassinge default EOI handling in
735 * irq_init() above
737 static void wsp_ics_eoi(struct irq_data *data)
739 wsp_irq_chip.irq_eoi(data);
742 static struct irq_chip wsp_ics_msi = {
743 .name = "WSP ICS MSI",
744 .irq_startup = wsp_ics_msi_startup,
745 .irq_mask = wsp_ics_msi_mask_irq,
746 .irq_unmask = wsp_ics_msi_unmask_irq,
747 .irq_eoi = wsp_ics_eoi,
748 .irq_set_affinity = wsp_chip_set_affinity
751 void wsp_ics_set_msi_chip(unsigned int irq)
753 irq_set_chip(irq, &wsp_ics_msi);
756 void wsp_ics_set_std_chip(unsigned int irq)
758 irq_set_chip(irq, &wsp_irq_chip);
760 #endif /* CONFIG_PCI_MSI */