udf: improve error management in udf_CS0toUTF8()
[linux/fpc-iii.git] / drivers / bcma / driver_pci_host.c
blobc42cec7c7ecc0a88f649b315c8c8616a0439c2f8
1 /*
2 * Broadcom specific AMBA
3 * PCI Core in hostmode
5 * Copyright 2005 - 2011, Broadcom Corporation
6 * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
7 * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
9 * Licensed under the GNU/GPL. See COPYING for details.
12 #include "bcma_private.h"
13 #include <linux/pci.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/bcma/bcma.h>
17 #include <asm/paccess.h>
19 /* Probe a 32bit value on the bus and catch bus exceptions.
20 * Returns nonzero on a bus exception.
21 * This is MIPS specific */
22 #define mips_busprobe32(val, addr) get_dbe((val), ((u32 *)(addr)))
24 /* Assume one-hot slot wiring */
25 #define BCMA_PCI_SLOT_MAX 16
26 #define PCI_CONFIG_SPACE_SIZE 256
28 bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
30 struct bcma_bus *bus = pc->core->bus;
31 u16 chipid_top;
32 u32 tmp;
34 chipid_top = (bus->chipinfo.id & 0xFF00);
35 if (chipid_top != 0x4700 &&
36 chipid_top != 0x5300)
37 return false;
39 bcma_core_enable(pc->core, 0);
41 return !mips_busprobe32(tmp, pc->core->io_addr);
44 static u32 bcma_pcie_read_config(struct bcma_drv_pci *pc, u32 address)
46 pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
47 pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
48 return pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_DATA);
51 static void bcma_pcie_write_config(struct bcma_drv_pci *pc, u32 address,
52 u32 data)
54 pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
55 pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
56 pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_DATA, data);
59 static u32 bcma_get_cfgspace_addr(struct bcma_drv_pci *pc, unsigned int dev,
60 unsigned int func, unsigned int off)
62 u32 addr = 0;
64 /* Issue config commands only when the data link is up (atleast
65 * one external pcie device is present).
67 if (dev >= 2 || !(bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_LSREG)
68 & BCMA_CORE_PCI_DLLP_LSREG_LINKUP))
69 goto out;
71 /* Type 0 transaction */
72 /* Slide the PCI window to the appropriate slot */
73 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
74 /* Calculate the address */
75 addr = pc->host_controller->host_cfg_addr;
76 addr |= (dev << BCMA_CORE_PCI_CFG_SLOT_SHIFT);
77 addr |= (func << BCMA_CORE_PCI_CFG_FUN_SHIFT);
78 addr |= (off & ~3);
80 out:
81 return addr;
84 static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
85 unsigned int func, unsigned int off,
86 void *buf, int len)
88 int err = -EINVAL;
89 u32 addr, val;
90 void __iomem *mmio = 0;
92 WARN_ON(!pc->hostmode);
93 if (unlikely(len != 1 && len != 2 && len != 4))
94 goto out;
95 if (dev == 0) {
96 /* we support only two functions on device 0 */
97 if (func > 1)
98 goto out;
100 /* accesses to config registers with offsets >= 256
101 * requires indirect access.
103 if (off >= PCI_CONFIG_SPACE_SIZE) {
104 addr = (func << 12);
105 addr |= (off & 0x0FFC);
106 val = bcma_pcie_read_config(pc, addr);
107 } else {
108 addr = BCMA_CORE_PCI_PCICFG0;
109 addr |= (func << 8);
110 addr |= (off & 0xFC);
111 val = pcicore_read32(pc, addr);
113 } else {
114 addr = bcma_get_cfgspace_addr(pc, dev, func, off);
115 if (unlikely(!addr))
116 goto out;
117 err = -ENOMEM;
118 mmio = ioremap_nocache(addr, sizeof(val));
119 if (!mmio)
120 goto out;
122 if (mips_busprobe32(val, mmio)) {
123 val = 0xFFFFFFFF;
124 goto unmap;
127 val >>= (8 * (off & 3));
129 switch (len) {
130 case 1:
131 *((u8 *)buf) = (u8)val;
132 break;
133 case 2:
134 *((u16 *)buf) = (u16)val;
135 break;
136 case 4:
137 *((u32 *)buf) = (u32)val;
138 break;
140 err = 0;
141 unmap:
142 if (mmio)
143 iounmap(mmio);
144 out:
145 return err;
148 static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
149 unsigned int func, unsigned int off,
150 const void *buf, int len)
152 int err = -EINVAL;
153 u32 addr, val;
154 void __iomem *mmio = 0;
155 u16 chipid = pc->core->bus->chipinfo.id;
157 WARN_ON(!pc->hostmode);
158 if (unlikely(len != 1 && len != 2 && len != 4))
159 goto out;
160 if (dev == 0) {
161 /* we support only two functions on device 0 */
162 if (func > 1)
163 goto out;
165 /* accesses to config registers with offsets >= 256
166 * requires indirect access.
168 if (off >= PCI_CONFIG_SPACE_SIZE) {
169 addr = (func << 12);
170 addr |= (off & 0x0FFC);
171 val = bcma_pcie_read_config(pc, addr);
172 } else {
173 addr = BCMA_CORE_PCI_PCICFG0;
174 addr |= (func << 8);
175 addr |= (off & 0xFC);
176 val = pcicore_read32(pc, addr);
178 } else {
179 addr = bcma_get_cfgspace_addr(pc, dev, func, off);
180 if (unlikely(!addr))
181 goto out;
182 err = -ENOMEM;
183 mmio = ioremap_nocache(addr, sizeof(val));
184 if (!mmio)
185 goto out;
187 if (mips_busprobe32(val, mmio)) {
188 val = 0xFFFFFFFF;
189 goto unmap;
193 switch (len) {
194 case 1:
195 val &= ~(0xFF << (8 * (off & 3)));
196 val |= *((const u8 *)buf) << (8 * (off & 3));
197 break;
198 case 2:
199 val &= ~(0xFFFF << (8 * (off & 3)));
200 val |= *((const u16 *)buf) << (8 * (off & 3));
201 break;
202 case 4:
203 val = *((const u32 *)buf);
204 break;
206 if (dev == 0) {
207 /* accesses to config registers with offsets >= 256
208 * requires indirect access.
210 if (off >= PCI_CONFIG_SPACE_SIZE)
211 bcma_pcie_write_config(pc, addr, val);
212 else
213 pcicore_write32(pc, addr, val);
214 } else {
215 writel(val, mmio);
217 if (chipid == BCMA_CHIP_ID_BCM4716 ||
218 chipid == BCMA_CHIP_ID_BCM4748)
219 readl(mmio);
222 err = 0;
223 unmap:
224 if (mmio)
225 iounmap(mmio);
226 out:
227 return err;
230 static int bcma_core_pci_hostmode_read_config(struct pci_bus *bus,
231 unsigned int devfn,
232 int reg, int size, u32 *val)
234 unsigned long flags;
235 int err;
236 struct bcma_drv_pci *pc;
237 struct bcma_drv_pci_host *pc_host;
239 pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
240 pc = pc_host->pdev;
242 spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
243 err = bcma_extpci_read_config(pc, PCI_SLOT(devfn),
244 PCI_FUNC(devfn), reg, val, size);
245 spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
247 return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
250 static int bcma_core_pci_hostmode_write_config(struct pci_bus *bus,
251 unsigned int devfn,
252 int reg, int size, u32 val)
254 unsigned long flags;
255 int err;
256 struct bcma_drv_pci *pc;
257 struct bcma_drv_pci_host *pc_host;
259 pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
260 pc = pc_host->pdev;
262 spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
263 err = bcma_extpci_write_config(pc, PCI_SLOT(devfn),
264 PCI_FUNC(devfn), reg, &val, size);
265 spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
267 return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
270 /* return cap_offset if requested capability exists in the PCI config space */
271 static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
272 unsigned int func, u8 req_cap_id,
273 unsigned char *buf, u32 *buflen)
275 u8 cap_id;
276 u8 cap_ptr = 0;
277 u32 bufsize;
278 u8 byte_val;
280 /* check for Header type 0 */
281 bcma_extpci_read_config(pc, dev, func, PCI_HEADER_TYPE, &byte_val,
282 sizeof(u8));
283 if ((byte_val & 0x7F) != PCI_HEADER_TYPE_NORMAL)
284 return cap_ptr;
286 /* check if the capability pointer field exists */
287 bcma_extpci_read_config(pc, dev, func, PCI_STATUS, &byte_val,
288 sizeof(u8));
289 if (!(byte_val & PCI_STATUS_CAP_LIST))
290 return cap_ptr;
292 /* check if the capability pointer is 0x00 */
293 bcma_extpci_read_config(pc, dev, func, PCI_CAPABILITY_LIST, &cap_ptr,
294 sizeof(u8));
295 if (cap_ptr == 0x00)
296 return cap_ptr;
298 /* loop thr'u the capability list and see if the requested capabilty
299 * exists */
300 bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id, sizeof(u8));
301 while (cap_id != req_cap_id) {
302 bcma_extpci_read_config(pc, dev, func, cap_ptr + 1, &cap_ptr,
303 sizeof(u8));
304 if (cap_ptr == 0x00)
305 return cap_ptr;
306 bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id,
307 sizeof(u8));
310 /* found the caller requested capability */
311 if ((buf != NULL) && (buflen != NULL)) {
312 u8 cap_data;
314 bufsize = *buflen;
315 if (!bufsize)
316 return cap_ptr;
318 *buflen = 0;
320 /* copy the cpability data excluding cap ID and next ptr */
321 cap_data = cap_ptr + 2;
322 if ((bufsize + cap_data) > PCI_CONFIG_SPACE_SIZE)
323 bufsize = PCI_CONFIG_SPACE_SIZE - cap_data;
324 *buflen = bufsize;
325 while (bufsize--) {
326 bcma_extpci_read_config(pc, dev, func, cap_data, buf,
327 sizeof(u8));
328 cap_data++;
329 buf++;
333 return cap_ptr;
336 /* If the root port is capable of returning Config Request
337 * Retry Status (CRS) Completion Status to software then
338 * enable the feature.
340 static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
342 struct bcma_bus *bus = pc->core->bus;
343 u8 cap_ptr, root_ctrl, root_cap, dev;
344 u16 val16;
345 int i;
347 cap_ptr = bcma_find_pci_capability(pc, 0, 0, PCI_CAP_ID_EXP, NULL,
348 NULL);
349 root_cap = cap_ptr + PCI_EXP_RTCAP;
350 bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16));
351 if (val16 & BCMA_CORE_PCI_RC_CRS_VISIBILITY) {
352 /* Enable CRS software visibility */
353 root_ctrl = cap_ptr + PCI_EXP_RTCTL;
354 val16 = PCI_EXP_RTCTL_CRSSVE;
355 bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16,
356 sizeof(u16));
358 /* Initiate a configuration request to read the vendor id
359 * field of the device function's config space header after
360 * 100 ms wait time from the end of Reset. If the device is
361 * not done with its internal initialization, it must at
362 * least return a completion TLP, with a completion status
363 * of "Configuration Request Retry Status (CRS)". The root
364 * complex must complete the request to the host by returning
365 * a read-data value of 0001h for the Vendor ID field and
366 * all 1s for any additional bytes included in the request.
367 * Poll using the config reads for max wait time of 1 sec or
368 * until we receive the successful completion status. Repeat
369 * the procedure for all the devices.
371 for (dev = 1; dev < BCMA_PCI_SLOT_MAX; dev++) {
372 for (i = 0; i < 100000; i++) {
373 bcma_extpci_read_config(pc, dev, 0,
374 PCI_VENDOR_ID, &val16,
375 sizeof(val16));
376 if (val16 != 0x1)
377 break;
378 udelay(10);
380 if (val16 == 0x1)
381 bcma_err(bus, "PCI: Broken device in slot %d\n",
382 dev);
387 void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
389 struct bcma_bus *bus = pc->core->bus;
390 struct bcma_drv_pci_host *pc_host;
391 u32 tmp;
392 u32 pci_membase_1G;
393 unsigned long io_map_base;
395 bcma_info(bus, "PCIEcore in host mode found\n");
397 if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
398 bcma_info(bus, "This PCIE core is disabled and not working\n");
399 return;
402 pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
403 if (!pc_host) {
404 bcma_err(bus, "can not allocate memory");
405 return;
408 spin_lock_init(&pc_host->cfgspace_lock);
410 pc->host_controller = pc_host;
411 pc_host->pci_controller.io_resource = &pc_host->io_resource;
412 pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
413 pc_host->pci_controller.pci_ops = &pc_host->pci_ops;
414 pc_host->pdev = pc;
416 pci_membase_1G = BCMA_SOC_PCI_DMA;
417 pc_host->host_cfg_addr = BCMA_SOC_PCI_CFG;
419 pc_host->pci_ops.read = bcma_core_pci_hostmode_read_config;
420 pc_host->pci_ops.write = bcma_core_pci_hostmode_write_config;
422 pc_host->mem_resource.name = "BCMA PCIcore external memory",
423 pc_host->mem_resource.start = BCMA_SOC_PCI_DMA;
424 pc_host->mem_resource.end = BCMA_SOC_PCI_DMA + BCMA_SOC_PCI_DMA_SZ - 1;
425 pc_host->mem_resource.flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED;
427 pc_host->io_resource.name = "BCMA PCIcore external I/O",
428 pc_host->io_resource.start = 0x100;
429 pc_host->io_resource.end = 0x7FF;
430 pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED;
432 /* Reset RC */
433 usleep_range(3000, 5000);
434 pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE);
435 msleep(50);
436 pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST |
437 BCMA_CORE_PCI_CTL_RST_OE);
439 /* 64 MB I/O access window. On 4716, use
440 * sbtopcie0 to access the device registers. We
441 * can't use address match 2 (1 GB window) region
442 * as mips can't generate 64-bit address on the
443 * backplane.
445 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4716 ||
446 bus->chipinfo.id == BCMA_CHIP_ID_BCM4748) {
447 pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
448 pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
449 BCMA_SOC_PCI_MEM_SZ - 1;
450 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
451 BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM);
452 } else if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
453 tmp = BCMA_CORE_PCI_SBTOPCI_MEM;
454 tmp |= BCMA_CORE_PCI_SBTOPCI_PREF;
455 tmp |= BCMA_CORE_PCI_SBTOPCI_BURST;
456 if (pc->core->core_unit == 0) {
457 pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
458 pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
459 BCMA_SOC_PCI_MEM_SZ - 1;
460 pc_host->io_resource.start = 0x100;
461 pc_host->io_resource.end = 0x47F;
462 pci_membase_1G = BCMA_SOC_PCIE_DMA_H32;
463 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
464 tmp | BCMA_SOC_PCI_MEM);
465 } else if (pc->core->core_unit == 1) {
466 pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM;
467 pc_host->mem_resource.end = BCMA_SOC_PCI1_MEM +
468 BCMA_SOC_PCI_MEM_SZ - 1;
469 pc_host->io_resource.start = 0x480;
470 pc_host->io_resource.end = 0x7FF;
471 pci_membase_1G = BCMA_SOC_PCIE1_DMA_H32;
472 pc_host->host_cfg_addr = BCMA_SOC_PCI1_CFG;
473 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
474 tmp | BCMA_SOC_PCI1_MEM);
476 } else
477 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
478 BCMA_CORE_PCI_SBTOPCI_IO);
480 /* 64 MB configuration access window */
481 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
483 /* 1 GB memory access window */
484 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI2,
485 BCMA_CORE_PCI_SBTOPCI_MEM | pci_membase_1G);
488 /* As per PCI Express Base Spec 1.1 we need to wait for
489 * at least 100 ms from the end of a reset (cold/warm/hot)
490 * before issuing configuration requests to PCI Express
491 * devices.
493 msleep(100);
495 bcma_core_pci_enable_crs(pc);
497 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706 ||
498 bus->chipinfo.id == BCMA_CHIP_ID_BCM4716) {
499 u16 val16;
500 bcma_extpci_read_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL,
501 &val16, sizeof(val16));
502 val16 |= (2 << 5); /* Max payload size of 512 */
503 val16 |= (2 << 12); /* MRRS 512 */
504 bcma_extpci_write_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL,
505 &val16, sizeof(val16));
508 /* Enable PCI bridge BAR0 memory & master access */
509 tmp = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
510 bcma_extpci_write_config(pc, 0, 0, PCI_COMMAND, &tmp, sizeof(tmp));
512 /* Enable PCI interrupts */
513 pcicore_write32(pc, BCMA_CORE_PCI_IMASK, BCMA_CORE_PCI_IMASK_INTA);
515 /* Ok, ready to run, register it to the system.
516 * The following needs change, if we want to port hostmode
517 * to non-MIPS platform. */
518 io_map_base = (unsigned long)ioremap_nocache(pc_host->mem_resource.start,
519 resource_size(&pc_host->mem_resource));
520 pc_host->pci_controller.io_map_base = io_map_base;
521 set_io_port_base(pc_host->pci_controller.io_map_base);
522 /* Give some time to the PCI controller to configure itself with the new
523 * values. Not waiting at this point causes crashes of the machine. */
524 usleep_range(10000, 15000);
525 register_pci_controller(&pc_host->pci_controller);
526 return;
529 /* Early PCI fixup for a device on the PCI-core bridge. */
530 static void bcma_core_pci_fixup_pcibridge(struct pci_dev *dev)
532 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
533 /* This is not a device on the PCI-core bridge. */
534 return;
536 if (PCI_SLOT(dev->devfn) != 0)
537 return;
539 pr_info("PCI: Fixing up bridge %s\n", pci_name(dev));
541 /* Enable PCI bridge bus mastering and memory space */
542 pci_set_master(dev);
543 if (pcibios_enable_device(dev, ~0) < 0) {
544 pr_err("PCI: BCMA bridge enable failed\n");
545 return;
548 /* Enable PCI bridge BAR1 prefetch and burst */
549 pci_write_config_dword(dev, BCMA_PCI_BAR1_CONTROL, 3);
551 DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_pcibridge);
553 /* Early PCI fixup for all PCI-cores to set the correct memory address. */
554 static void bcma_core_pci_fixup_addresses(struct pci_dev *dev)
556 struct resource *res;
557 int pos, err;
559 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
560 /* This is not a device on the PCI-core bridge. */
561 return;
563 if (PCI_SLOT(dev->devfn) == 0)
564 return;
566 pr_info("PCI: Fixing up addresses %s\n", pci_name(dev));
568 for (pos = 0; pos < 6; pos++) {
569 res = &dev->resource[pos];
570 if (res->flags & (IORESOURCE_IO | IORESOURCE_MEM)) {
571 err = pci_assign_resource(dev, pos);
572 if (err)
573 pr_err("PCI: Problem fixing up the addresses on %s\n",
574 pci_name(dev));
578 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses);
580 /* This function is called when doing a pci_enable_device().
581 * We must first check if the device is a device on the PCI-core bridge. */
582 int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
584 struct bcma_drv_pci_host *pc_host;
585 int readrq;
587 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
588 /* This is not a device on the PCI-core bridge. */
589 return -ENODEV;
591 pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
592 pci_ops);
594 pr_info("PCI: Fixing up device %s\n", pci_name(dev));
596 /* Fix up interrupt lines */
597 dev->irq = bcma_core_irq(pc_host->pdev->core, 0);
598 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
600 readrq = pcie_get_readrq(dev);
601 if (readrq > 128) {
602 pr_info("change PCIe max read request size from %i to 128\n", readrq);
603 pcie_set_readrq(dev, 128);
605 return 0;
607 EXPORT_SYMBOL(bcma_core_pci_plat_dev_init);
609 /* PCI device IRQ mapping. */
610 int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
612 struct bcma_drv_pci_host *pc_host;
614 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
615 /* This is not a device on the PCI-core bridge. */
616 return -ENODEV;
619 pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
620 pci_ops);
621 return bcma_core_irq(pc_host->pdev->core, 0);
623 EXPORT_SYMBOL(bcma_core_pci_pcibios_map_irq);