hugetlb: introduce generic version of hugetlb_free_pgd_range
[linux/fpc-iii.git] / arch / x86 / pci / common.c
blobd4ec117c1142e0f155be3f3a17f4f675d64f4135
1 /*
2 * Low-Level PCI Support for PC
4 * (c) 1999--2000 Martin Mares <mj@ucw.cz>
5 */
7 #include <linux/sched.h>
8 #include <linux/pci.h>
9 #include <linux/pci-acpi.h>
10 #include <linux/ioport.h>
11 #include <linux/init.h>
12 #include <linux/dmi.h>
13 #include <linux/slab.h>
15 #include <asm/acpi.h>
16 #include <asm/segment.h>
17 #include <asm/io.h>
18 #include <asm/smp.h>
19 #include <asm/pci_x86.h>
20 #include <asm/setup.h>
22 unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
23 PCI_PROBE_MMCONF;
25 static int pci_bf_sort;
26 int pci_routeirq;
27 int noioapicquirk;
28 #ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS
29 int noioapicreroute = 0;
30 #else
31 int noioapicreroute = 1;
32 #endif
33 int pcibios_last_bus = -1;
34 unsigned long pirq_table_addr;
35 const struct pci_raw_ops *__read_mostly raw_pci_ops;
36 const struct pci_raw_ops *__read_mostly raw_pci_ext_ops;
38 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
39 int reg, int len, u32 *val)
41 if (domain == 0 && reg < 256 && raw_pci_ops)
42 return raw_pci_ops->read(domain, bus, devfn, reg, len, val);
43 if (raw_pci_ext_ops)
44 return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val);
45 return -EINVAL;
48 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
49 int reg, int len, u32 val)
51 if (domain == 0 && reg < 256 && raw_pci_ops)
52 return raw_pci_ops->write(domain, bus, devfn, reg, len, val);
53 if (raw_pci_ext_ops)
54 return raw_pci_ext_ops->write(domain, bus, devfn, reg, len, val);
55 return -EINVAL;
58 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
60 return raw_pci_read(pci_domain_nr(bus), bus->number,
61 devfn, where, size, value);
64 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
66 return raw_pci_write(pci_domain_nr(bus), bus->number,
67 devfn, where, size, value);
70 struct pci_ops pci_root_ops = {
71 .read = pci_read,
72 .write = pci_write,
76 * This interrupt-safe spinlock protects all accesses to PCI configuration
77 * space, except for the mmconfig (ECAM) based operations.
79 DEFINE_RAW_SPINLOCK(pci_config_lock);
81 static int __init can_skip_ioresource_align(const struct dmi_system_id *d)
83 pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
84 printk(KERN_INFO "PCI: %s detected, can skip ISA alignment\n", d->ident);
85 return 0;
88 static const struct dmi_system_id can_skip_pciprobe_dmi_table[] __initconst = {
90 * Systems where PCI IO resource ISA alignment can be skipped
91 * when the ISA enable bit in the bridge control is not set
94 .callback = can_skip_ioresource_align,
95 .ident = "IBM System x3800",
96 .matches = {
97 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
98 DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
102 .callback = can_skip_ioresource_align,
103 .ident = "IBM System x3850",
104 .matches = {
105 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
106 DMI_MATCH(DMI_PRODUCT_NAME, "x3850"),
110 .callback = can_skip_ioresource_align,
111 .ident = "IBM System x3950",
112 .matches = {
113 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
114 DMI_MATCH(DMI_PRODUCT_NAME, "x3950"),
120 void __init dmi_check_skip_isa_align(void)
122 dmi_check_system(can_skip_pciprobe_dmi_table);
125 static void pcibios_fixup_device_resources(struct pci_dev *dev)
127 struct resource *rom_r = &dev->resource[PCI_ROM_RESOURCE];
128 struct resource *bar_r;
129 int bar;
131 if (pci_probe & PCI_NOASSIGN_BARS) {
133 * If the BIOS did not assign the BAR, zero out the
134 * resource so the kernel doesn't attempt to assign
135 * it later on in pci_assign_unassigned_resources
137 for (bar = 0; bar <= PCI_STD_RESOURCE_END; bar++) {
138 bar_r = &dev->resource[bar];
139 if (bar_r->start == 0 && bar_r->end != 0) {
140 bar_r->flags = 0;
141 bar_r->end = 0;
146 if (pci_probe & PCI_NOASSIGN_ROMS) {
147 if (rom_r->parent)
148 return;
149 if (rom_r->start) {
150 /* we deal with BIOS assigned ROM later */
151 return;
153 rom_r->start = rom_r->end = rom_r->flags = 0;
158 * Called after each bus is probed, but before its children
159 * are examined.
162 void pcibios_fixup_bus(struct pci_bus *b)
164 struct pci_dev *dev;
166 pci_read_bridge_bases(b);
167 list_for_each_entry(dev, &b->devices, bus_list)
168 pcibios_fixup_device_resources(dev);
171 void pcibios_add_bus(struct pci_bus *bus)
173 acpi_pci_add_bus(bus);
176 void pcibios_remove_bus(struct pci_bus *bus)
178 acpi_pci_remove_bus(bus);
182 * Only use DMI information to set this if nothing was passed
183 * on the kernel command line (which was parsed earlier).
186 static int __init set_bf_sort(const struct dmi_system_id *d)
188 if (pci_bf_sort == pci_bf_sort_default) {
189 pci_bf_sort = pci_dmi_bf;
190 printk(KERN_INFO "PCI: %s detected, enabling pci=bfsort.\n", d->ident);
192 return 0;
195 static void __init read_dmi_type_b1(const struct dmi_header *dm,
196 void *private_data)
198 u8 *data = (u8 *)dm + 4;
200 if (dm->type != 0xB1)
201 return;
202 if ((((*(u32 *)data) >> 9) & 0x03) == 0x01)
203 set_bf_sort((const struct dmi_system_id *)private_data);
206 static int __init find_sort_method(const struct dmi_system_id *d)
208 dmi_walk(read_dmi_type_b1, (void *)d);
209 return 0;
213 * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus)
215 #ifdef __i386__
216 static int __init assign_all_busses(const struct dmi_system_id *d)
218 pci_probe |= PCI_ASSIGN_ALL_BUSSES;
219 printk(KERN_INFO "%s detected: enabling PCI bus# renumbering"
220 " (pci=assign-busses)\n", d->ident);
221 return 0;
223 #endif
225 static int __init set_scan_all(const struct dmi_system_id *d)
227 printk(KERN_INFO "PCI: %s detected, enabling pci=pcie_scan_all\n",
228 d->ident);
229 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
230 return 0;
233 static const struct dmi_system_id pciprobe_dmi_table[] __initconst = {
234 #ifdef __i386__
236 * Laptops which need pci=assign-busses to see Cardbus cards
239 .callback = assign_all_busses,
240 .ident = "Samsung X20 Laptop",
241 .matches = {
242 DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
243 DMI_MATCH(DMI_PRODUCT_NAME, "SX20S"),
246 #endif /* __i386__ */
248 .callback = set_bf_sort,
249 .ident = "Dell PowerEdge 1950",
250 .matches = {
251 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
252 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1950"),
256 .callback = set_bf_sort,
257 .ident = "Dell PowerEdge 1955",
258 .matches = {
259 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
260 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1955"),
264 .callback = set_bf_sort,
265 .ident = "Dell PowerEdge 2900",
266 .matches = {
267 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
268 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2900"),
272 .callback = set_bf_sort,
273 .ident = "Dell PowerEdge 2950",
274 .matches = {
275 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
276 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2950"),
280 .callback = set_bf_sort,
281 .ident = "Dell PowerEdge R900",
282 .matches = {
283 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
284 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R900"),
288 .callback = find_sort_method,
289 .ident = "Dell System",
290 .matches = {
291 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
295 .callback = set_bf_sort,
296 .ident = "HP ProLiant BL20p G3",
297 .matches = {
298 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
299 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G3"),
303 .callback = set_bf_sort,
304 .ident = "HP ProLiant BL20p G4",
305 .matches = {
306 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
307 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G4"),
311 .callback = set_bf_sort,
312 .ident = "HP ProLiant BL30p G1",
313 .matches = {
314 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
315 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL30p G1"),
319 .callback = set_bf_sort,
320 .ident = "HP ProLiant BL25p G1",
321 .matches = {
322 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
323 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL25p G1"),
327 .callback = set_bf_sort,
328 .ident = "HP ProLiant BL35p G1",
329 .matches = {
330 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
331 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL35p G1"),
335 .callback = set_bf_sort,
336 .ident = "HP ProLiant BL45p G1",
337 .matches = {
338 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
339 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G1"),
343 .callback = set_bf_sort,
344 .ident = "HP ProLiant BL45p G2",
345 .matches = {
346 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
347 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G2"),
351 .callback = set_bf_sort,
352 .ident = "HP ProLiant BL460c G1",
353 .matches = {
354 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
355 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL460c G1"),
359 .callback = set_bf_sort,
360 .ident = "HP ProLiant BL465c G1",
361 .matches = {
362 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
363 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL465c G1"),
367 .callback = set_bf_sort,
368 .ident = "HP ProLiant BL480c G1",
369 .matches = {
370 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
371 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL480c G1"),
375 .callback = set_bf_sort,
376 .ident = "HP ProLiant BL685c G1",
377 .matches = {
378 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
379 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL685c G1"),
383 .callback = set_bf_sort,
384 .ident = "HP ProLiant DL360",
385 .matches = {
386 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
387 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL360"),
391 .callback = set_bf_sort,
392 .ident = "HP ProLiant DL380",
393 .matches = {
394 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
395 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL380"),
398 #ifdef __i386__
400 .callback = assign_all_busses,
401 .ident = "Compaq EVO N800c",
402 .matches = {
403 DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
404 DMI_MATCH(DMI_PRODUCT_NAME, "EVO N800c"),
407 #endif
409 .callback = set_bf_sort,
410 .ident = "HP ProLiant DL385 G2",
411 .matches = {
412 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
413 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"),
417 .callback = set_bf_sort,
418 .ident = "HP ProLiant DL585 G2",
419 .matches = {
420 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
421 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
425 .callback = set_scan_all,
426 .ident = "Stratus/NEC ftServer",
427 .matches = {
428 DMI_MATCH(DMI_SYS_VENDOR, "Stratus"),
429 DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"),
433 .callback = set_scan_all,
434 .ident = "Stratus/NEC ftServer",
435 .matches = {
436 DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
437 DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"),
441 .callback = set_scan_all,
442 .ident = "Stratus/NEC ftServer",
443 .matches = {
444 DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
445 DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"),
451 void __init dmi_check_pciprobe(void)
453 dmi_check_system(pciprobe_dmi_table);
456 void pcibios_scan_root(int busnum)
458 struct pci_bus *bus;
459 struct pci_sysdata *sd;
460 LIST_HEAD(resources);
462 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
463 if (!sd) {
464 printk(KERN_ERR "PCI: OOM, skipping PCI bus %02x\n", busnum);
465 return;
467 sd->node = x86_pci_root_bus_node(busnum);
468 x86_pci_root_bus_resources(busnum, &resources);
469 printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum);
470 bus = pci_scan_root_bus(NULL, busnum, &pci_root_ops, sd, &resources);
471 if (!bus) {
472 pci_free_resource_list(&resources);
473 kfree(sd);
474 return;
476 pci_bus_add_devices(bus);
479 void __init pcibios_set_cache_line_size(void)
481 struct cpuinfo_x86 *c = &boot_cpu_data;
484 * Set PCI cacheline size to that of the CPU if the CPU has reported it.
485 * (For older CPUs that don't support cpuid, we se it to 32 bytes
486 * It's also good for 386/486s (which actually have 16)
487 * as quite a few PCI devices do not support smaller values.
489 if (c->x86_clflush_size > 0) {
490 pci_dfl_cache_line_size = c->x86_clflush_size >> 2;
491 printk(KERN_DEBUG "PCI: pci_cache_line_size set to %d bytes\n",
492 pci_dfl_cache_line_size << 2);
493 } else {
494 pci_dfl_cache_line_size = 32 >> 2;
495 printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n");
499 int __init pcibios_init(void)
501 if (!raw_pci_ops && !raw_pci_ext_ops) {
502 printk(KERN_WARNING "PCI: System does not support PCI\n");
503 return 0;
506 pcibios_set_cache_line_size();
507 pcibios_resource_survey();
509 if (pci_bf_sort >= pci_force_bf)
510 pci_sort_breadthfirst();
511 return 0;
514 char *__init pcibios_setup(char *str)
516 if (!strcmp(str, "off")) {
517 pci_probe = 0;
518 return NULL;
519 } else if (!strcmp(str, "bfsort")) {
520 pci_bf_sort = pci_force_bf;
521 return NULL;
522 } else if (!strcmp(str, "nobfsort")) {
523 pci_bf_sort = pci_force_nobf;
524 return NULL;
526 #ifdef CONFIG_PCI_BIOS
527 else if (!strcmp(str, "bios")) {
528 pci_probe = PCI_PROBE_BIOS;
529 return NULL;
530 } else if (!strcmp(str, "nobios")) {
531 pci_probe &= ~PCI_PROBE_BIOS;
532 return NULL;
533 } else if (!strcmp(str, "biosirq")) {
534 pci_probe |= PCI_BIOS_IRQ_SCAN;
535 return NULL;
536 } else if (!strncmp(str, "pirqaddr=", 9)) {
537 pirq_table_addr = simple_strtoul(str+9, NULL, 0);
538 return NULL;
540 #endif
541 #ifdef CONFIG_PCI_DIRECT
542 else if (!strcmp(str, "conf1")) {
543 pci_probe = PCI_PROBE_CONF1 | PCI_NO_CHECKS;
544 return NULL;
546 else if (!strcmp(str, "conf2")) {
547 pci_probe = PCI_PROBE_CONF2 | PCI_NO_CHECKS;
548 return NULL;
550 #endif
551 #ifdef CONFIG_PCI_MMCONFIG
552 else if (!strcmp(str, "nommconf")) {
553 pci_probe &= ~PCI_PROBE_MMCONF;
554 return NULL;
556 else if (!strcmp(str, "check_enable_amd_mmconf")) {
557 pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
558 return NULL;
560 #endif
561 else if (!strcmp(str, "noacpi")) {
562 acpi_noirq_set();
563 return NULL;
565 else if (!strcmp(str, "noearly")) {
566 pci_probe |= PCI_PROBE_NOEARLY;
567 return NULL;
569 else if (!strcmp(str, "usepirqmask")) {
570 pci_probe |= PCI_USE_PIRQ_MASK;
571 return NULL;
572 } else if (!strncmp(str, "irqmask=", 8)) {
573 pcibios_irq_mask = simple_strtol(str+8, NULL, 0);
574 return NULL;
575 } else if (!strncmp(str, "lastbus=", 8)) {
576 pcibios_last_bus = simple_strtol(str+8, NULL, 0);
577 return NULL;
578 } else if (!strcmp(str, "rom")) {
579 pci_probe |= PCI_ASSIGN_ROMS;
580 return NULL;
581 } else if (!strcmp(str, "norom")) {
582 pci_probe |= PCI_NOASSIGN_ROMS;
583 return NULL;
584 } else if (!strcmp(str, "nobar")) {
585 pci_probe |= PCI_NOASSIGN_BARS;
586 return NULL;
587 } else if (!strcmp(str, "assign-busses")) {
588 pci_probe |= PCI_ASSIGN_ALL_BUSSES;
589 return NULL;
590 } else if (!strcmp(str, "use_crs")) {
591 pci_probe |= PCI_USE__CRS;
592 return NULL;
593 } else if (!strcmp(str, "nocrs")) {
594 pci_probe |= PCI_ROOT_NO_CRS;
595 return NULL;
596 #ifdef CONFIG_PHYS_ADDR_T_64BIT
597 } else if (!strcmp(str, "big_root_window")) {
598 pci_probe |= PCI_BIG_ROOT_WINDOW;
599 return NULL;
600 #endif
601 } else if (!strcmp(str, "routeirq")) {
602 pci_routeirq = 1;
603 return NULL;
604 } else if (!strcmp(str, "skip_isa_align")) {
605 pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
606 return NULL;
607 } else if (!strcmp(str, "noioapicquirk")) {
608 noioapicquirk = 1;
609 return NULL;
610 } else if (!strcmp(str, "ioapicreroute")) {
611 if (noioapicreroute != -1)
612 noioapicreroute = 0;
613 return NULL;
614 } else if (!strcmp(str, "noioapicreroute")) {
615 if (noioapicreroute != -1)
616 noioapicreroute = 1;
617 return NULL;
619 return str;
622 unsigned int pcibios_assign_all_busses(void)
624 return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
627 #if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
628 static LIST_HEAD(dma_domain_list);
629 static DEFINE_SPINLOCK(dma_domain_list_lock);
631 void add_dma_domain(struct dma_domain *domain)
633 spin_lock(&dma_domain_list_lock);
634 list_add(&domain->node, &dma_domain_list);
635 spin_unlock(&dma_domain_list_lock);
637 EXPORT_SYMBOL_GPL(add_dma_domain);
639 void del_dma_domain(struct dma_domain *domain)
641 spin_lock(&dma_domain_list_lock);
642 list_del(&domain->node);
643 spin_unlock(&dma_domain_list_lock);
645 EXPORT_SYMBOL_GPL(del_dma_domain);
647 static void set_dma_domain_ops(struct pci_dev *pdev)
649 struct dma_domain *domain;
651 spin_lock(&dma_domain_list_lock);
652 list_for_each_entry(domain, &dma_domain_list, node) {
653 if (pci_domain_nr(pdev->bus) == domain->domain_nr) {
654 pdev->dev.dma_ops = domain->dma_ops;
655 break;
658 spin_unlock(&dma_domain_list_lock);
660 #else
661 static void set_dma_domain_ops(struct pci_dev *pdev) {}
662 #endif
664 static void set_dev_domain_options(struct pci_dev *pdev)
666 if (is_vmd(pdev->bus))
667 pdev->hotplug_user_indicators = 1;
670 int pcibios_add_device(struct pci_dev *dev)
672 struct setup_data *data;
673 struct pci_setup_rom *rom;
674 u64 pa_data;
676 pa_data = boot_params.hdr.setup_data;
677 while (pa_data) {
678 data = memremap(pa_data, sizeof(*rom), MEMREMAP_WB);
679 if (!data)
680 return -ENOMEM;
682 if (data->type == SETUP_PCI) {
683 rom = (struct pci_setup_rom *)data;
685 if ((pci_domain_nr(dev->bus) == rom->segment) &&
686 (dev->bus->number == rom->bus) &&
687 (PCI_SLOT(dev->devfn) == rom->device) &&
688 (PCI_FUNC(dev->devfn) == rom->function) &&
689 (dev->vendor == rom->vendor) &&
690 (dev->device == rom->devid)) {
691 dev->rom = pa_data +
692 offsetof(struct pci_setup_rom, romdata);
693 dev->romlen = rom->pcilen;
696 pa_data = data->next;
697 memunmap(data);
699 set_dma_domain_ops(dev);
700 set_dev_domain_options(dev);
701 return 0;
704 int pcibios_enable_device(struct pci_dev *dev, int mask)
706 int err;
708 if ((err = pci_enable_resources(dev, mask)) < 0)
709 return err;
711 if (!pci_dev_msi_enabled(dev))
712 return pcibios_enable_irq(dev);
713 return 0;
716 void pcibios_disable_device (struct pci_dev *dev)
718 if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
719 pcibios_disable_irq(dev);
722 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
723 void pcibios_release_device(struct pci_dev *dev)
725 if (atomic_dec_return(&dev->enable_cnt) >= 0)
726 pcibios_disable_device(dev);
729 #endif
731 int pci_ext_cfg_avail(void)
733 if (raw_pci_ext_ops)
734 return 1;
735 else
736 return 0;