fed up with those stupid warnings
[mmotm.git] / arch / x86 / pci / common.c
blobfbeec31316cf84d13a6c60ecd4a65c802244b93e
1 /*
2 * Low-Level PCI Support for PC
4 * (c) 1999--2000 Martin Mares <mj@ucw.cz>
5 */
7 #include <linux/sched.h>
8 #include <linux/pci.h>
9 #include <linux/ioport.h>
10 #include <linux/init.h>
11 #include <linux/dmi.h>
13 #include <asm/acpi.h>
14 #include <asm/segment.h>
15 #include <asm/io.h>
16 #include <asm/smp.h>
17 #include <asm/pci_x86.h>
19 unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
20 PCI_PROBE_MMCONF;
22 unsigned int pci_early_dump_regs;
23 static int pci_bf_sort;
24 int pci_routeirq;
25 int noioapicquirk;
26 #ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS
27 int noioapicreroute = 0;
28 #else
29 int noioapicreroute = 1;
30 #endif
31 int pcibios_last_bus = -1;
32 unsigned long pirq_table_addr;
33 struct pci_bus *pci_root_bus;
34 struct pci_raw_ops *raw_pci_ops;
35 struct pci_raw_ops *raw_pci_ext_ops;
37 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
38 int reg, int len, u32 *val)
40 if (domain == 0 && reg < 256 && raw_pci_ops)
41 return raw_pci_ops->read(domain, bus, devfn, reg, len, val);
42 if (raw_pci_ext_ops)
43 return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val);
44 return -EINVAL;
47 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
48 int reg, int len, u32 val)
50 if (domain == 0 && reg < 256 && raw_pci_ops)
51 return raw_pci_ops->write(domain, bus, devfn, reg, len, val);
52 if (raw_pci_ext_ops)
53 return raw_pci_ext_ops->write(domain, bus, devfn, reg, len, val);
54 return -EINVAL;
57 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
59 return raw_pci_read(pci_domain_nr(bus), bus->number,
60 devfn, where, size, value);
63 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
65 return raw_pci_write(pci_domain_nr(bus), bus->number,
66 devfn, where, size, value);
69 struct pci_ops pci_root_ops = {
70 .read = pci_read,
71 .write = pci_write,
75 * legacy, numa, and acpi all want to call pcibios_scan_root
76 * from their initcalls. This flag prevents that.
78 int pcibios_scanned;
81 * This interrupt-safe spinlock protects all accesses to PCI
82 * configuration space.
84 DEFINE_SPINLOCK(pci_config_lock);
86 static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d)
88 pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
89 printk(KERN_INFO "PCI: %s detected, can skip ISA alignment\n", d->ident);
90 return 0;
93 static const struct dmi_system_id can_skip_pciprobe_dmi_table[] __devinitconst = {
95 * Systems where PCI IO resource ISA alignment can be skipped
96 * when the ISA enable bit in the bridge control is not set
99 .callback = can_skip_ioresource_align,
100 .ident = "IBM System x3800",
101 .matches = {
102 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
103 DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
107 .callback = can_skip_ioresource_align,
108 .ident = "IBM System x3850",
109 .matches = {
110 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
111 DMI_MATCH(DMI_PRODUCT_NAME, "x3850"),
115 .callback = can_skip_ioresource_align,
116 .ident = "IBM System x3950",
117 .matches = {
118 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
119 DMI_MATCH(DMI_PRODUCT_NAME, "x3950"),
125 void __init dmi_check_skip_isa_align(void)
127 dmi_check_system(can_skip_pciprobe_dmi_table);
130 static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
132 struct resource *rom_r = &dev->resource[PCI_ROM_RESOURCE];
134 if (pci_probe & PCI_NOASSIGN_ROMS) {
135 if (rom_r->parent)
136 return;
137 if (rom_r->start) {
138 /* we deal with BIOS assigned ROM later */
139 return;
141 rom_r->start = rom_r->end = rom_r->flags = 0;
146 * Called after each bus is probed, but before its children
147 * are examined.
150 void __devinit pcibios_fixup_bus(struct pci_bus *b)
152 struct pci_dev *dev;
154 /* root bus? */
155 if (!b->parent)
156 x86_pci_root_bus_res_quirks(b);
157 pci_read_bridge_bases(b);
158 list_for_each_entry(dev, &b->devices, bus_list)
159 pcibios_fixup_device_resources(dev);
163 * Only use DMI information to set this if nothing was passed
164 * on the kernel command line (which was parsed earlier).
167 static int __devinit set_bf_sort(const struct dmi_system_id *d)
169 if (pci_bf_sort == pci_bf_sort_default) {
170 pci_bf_sort = pci_dmi_bf;
171 printk(KERN_INFO "PCI: %s detected, enabling pci=bfsort.\n", d->ident);
173 return 0;
177 * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus)
179 #ifdef __i386__
180 static int __devinit assign_all_busses(const struct dmi_system_id *d)
182 pci_probe |= PCI_ASSIGN_ALL_BUSSES;
183 printk(KERN_INFO "%s detected: enabling PCI bus# renumbering"
184 " (pci=assign-busses)\n", d->ident);
185 return 0;
187 #endif
189 static const struct dmi_system_id __devinitconst pciprobe_dmi_table[] = {
190 #ifdef __i386__
192 * Laptops which need pci=assign-busses to see Cardbus cards
195 .callback = assign_all_busses,
196 .ident = "Samsung X20 Laptop",
197 .matches = {
198 DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
199 DMI_MATCH(DMI_PRODUCT_NAME, "SX20S"),
202 #endif /* __i386__ */
204 .callback = set_bf_sort,
205 .ident = "Dell PowerEdge 1950",
206 .matches = {
207 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
208 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1950"),
212 .callback = set_bf_sort,
213 .ident = "Dell PowerEdge 1955",
214 .matches = {
215 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
216 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1955"),
220 .callback = set_bf_sort,
221 .ident = "Dell PowerEdge 2900",
222 .matches = {
223 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
224 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2900"),
228 .callback = set_bf_sort,
229 .ident = "Dell PowerEdge 2950",
230 .matches = {
231 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
232 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2950"),
236 .callback = set_bf_sort,
237 .ident = "Dell PowerEdge R900",
238 .matches = {
239 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
240 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R900"),
244 .callback = set_bf_sort,
245 .ident = "HP ProLiant BL20p G3",
246 .matches = {
247 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
248 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G3"),
252 .callback = set_bf_sort,
253 .ident = "HP ProLiant BL20p G4",
254 .matches = {
255 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
256 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G4"),
260 .callback = set_bf_sort,
261 .ident = "HP ProLiant BL30p G1",
262 .matches = {
263 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
264 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL30p G1"),
268 .callback = set_bf_sort,
269 .ident = "HP ProLiant BL25p G1",
270 .matches = {
271 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
272 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL25p G1"),
276 .callback = set_bf_sort,
277 .ident = "HP ProLiant BL35p G1",
278 .matches = {
279 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
280 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL35p G1"),
284 .callback = set_bf_sort,
285 .ident = "HP ProLiant BL45p G1",
286 .matches = {
287 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
288 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G1"),
292 .callback = set_bf_sort,
293 .ident = "HP ProLiant BL45p G2",
294 .matches = {
295 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
296 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G2"),
300 .callback = set_bf_sort,
301 .ident = "HP ProLiant BL460c G1",
302 .matches = {
303 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
304 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL460c G1"),
308 .callback = set_bf_sort,
309 .ident = "HP ProLiant BL465c G1",
310 .matches = {
311 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
312 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL465c G1"),
316 .callback = set_bf_sort,
317 .ident = "HP ProLiant BL480c G1",
318 .matches = {
319 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
320 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL480c G1"),
324 .callback = set_bf_sort,
325 .ident = "HP ProLiant BL685c G1",
326 .matches = {
327 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
328 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL685c G1"),
332 .callback = set_bf_sort,
333 .ident = "HP ProLiant DL360",
334 .matches = {
335 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
336 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL360"),
340 .callback = set_bf_sort,
341 .ident = "HP ProLiant DL380",
342 .matches = {
343 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
344 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL380"),
347 #ifdef __i386__
349 .callback = assign_all_busses,
350 .ident = "Compaq EVO N800c",
351 .matches = {
352 DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
353 DMI_MATCH(DMI_PRODUCT_NAME, "EVO N800c"),
356 #endif
358 .callback = set_bf_sort,
359 .ident = "HP ProLiant DL385 G2",
360 .matches = {
361 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
362 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"),
366 .callback = set_bf_sort,
367 .ident = "HP ProLiant DL585 G2",
368 .matches = {
369 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
370 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
376 void __init dmi_check_pciprobe(void)
378 dmi_check_system(pciprobe_dmi_table);
381 struct pci_bus * __devinit pcibios_scan_root(int busnum)
383 struct pci_bus *bus = NULL;
384 struct pci_sysdata *sd;
386 while ((bus = pci_find_next_bus(bus)) != NULL) {
387 if (bus->number == busnum) {
388 /* Already scanned */
389 return bus;
393 /* Allocate per-root-bus (not per bus) arch-specific data.
394 * TODO: leak; this memory is never freed.
395 * It's arguable whether it's worth the trouble to care.
397 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
398 if (!sd) {
399 printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum);
400 return NULL;
403 sd->node = get_mp_bus_to_node(busnum);
405 printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum);
406 bus = pci_scan_bus_parented(NULL, busnum, &pci_root_ops, sd);
407 if (!bus)
408 kfree(sd);
410 return bus;
413 int __init pcibios_init(void)
415 struct cpuinfo_x86 *c = &boot_cpu_data;
417 if (!raw_pci_ops) {
418 printk(KERN_WARNING "PCI: System does not support PCI\n");
419 return 0;
423 * Assume PCI cacheline size of 32 bytes for all x86s except K7/K8
424 * and P4. It's also good for 386/486s (which actually have 16)
425 * as quite a few PCI devices do not support smaller values.
427 pci_dfl_cache_line_size = 32 >> 2;
428 if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
429 pci_dfl_cache_line_size = 64 >> 2; /* K7 & K8 */
430 else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
431 pci_dfl_cache_line_size = 128 >> 2; /* P4 */
433 pcibios_resource_survey();
435 if (pci_bf_sort >= pci_force_bf)
436 pci_sort_breadthfirst();
437 return 0;
440 char * __devinit pcibios_setup(char *str)
442 if (!strcmp(str, "off")) {
443 pci_probe = 0;
444 return NULL;
445 } else if (!strcmp(str, "bfsort")) {
446 pci_bf_sort = pci_force_bf;
447 return NULL;
448 } else if (!strcmp(str, "nobfsort")) {
449 pci_bf_sort = pci_force_nobf;
450 return NULL;
452 #ifdef CONFIG_PCI_BIOS
453 else if (!strcmp(str, "bios")) {
454 pci_probe = PCI_PROBE_BIOS;
455 return NULL;
456 } else if (!strcmp(str, "nobios")) {
457 pci_probe &= ~PCI_PROBE_BIOS;
458 return NULL;
459 } else if (!strcmp(str, "biosirq")) {
460 pci_probe |= PCI_BIOS_IRQ_SCAN;
461 return NULL;
462 } else if (!strncmp(str, "pirqaddr=", 9)) {
463 pirq_table_addr = simple_strtoul(str+9, NULL, 0);
464 return NULL;
466 #endif
467 #ifdef CONFIG_PCI_DIRECT
468 else if (!strcmp(str, "conf1")) {
469 pci_probe = PCI_PROBE_CONF1 | PCI_NO_CHECKS;
470 return NULL;
472 else if (!strcmp(str, "conf2")) {
473 pci_probe = PCI_PROBE_CONF2 | PCI_NO_CHECKS;
474 return NULL;
476 #endif
477 #ifdef CONFIG_PCI_MMCONFIG
478 else if (!strcmp(str, "nommconf")) {
479 pci_probe &= ~PCI_PROBE_MMCONF;
480 return NULL;
482 else if (!strcmp(str, "check_enable_amd_mmconf")) {
483 pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
484 return NULL;
486 #endif
487 else if (!strcmp(str, "noacpi")) {
488 acpi_noirq_set();
489 return NULL;
491 else if (!strcmp(str, "noearly")) {
492 pci_probe |= PCI_PROBE_NOEARLY;
493 return NULL;
495 #ifndef CONFIG_X86_VISWS
496 else if (!strcmp(str, "usepirqmask")) {
497 pci_probe |= PCI_USE_PIRQ_MASK;
498 return NULL;
499 } else if (!strncmp(str, "irqmask=", 8)) {
500 pcibios_irq_mask = simple_strtol(str+8, NULL, 0);
501 return NULL;
502 } else if (!strncmp(str, "lastbus=", 8)) {
503 pcibios_last_bus = simple_strtol(str+8, NULL, 0);
504 return NULL;
506 #endif
507 else if (!strcmp(str, "rom")) {
508 pci_probe |= PCI_ASSIGN_ROMS;
509 return NULL;
510 } else if (!strcmp(str, "norom")) {
511 pci_probe |= PCI_NOASSIGN_ROMS;
512 return NULL;
513 } else if (!strcmp(str, "assign-busses")) {
514 pci_probe |= PCI_ASSIGN_ALL_BUSSES;
515 return NULL;
516 } else if (!strcmp(str, "use_crs")) {
517 pci_probe |= PCI_USE__CRS;
518 return NULL;
519 } else if (!strcmp(str, "earlydump")) {
520 pci_early_dump_regs = 1;
521 return NULL;
522 } else if (!strcmp(str, "routeirq")) {
523 pci_routeirq = 1;
524 return NULL;
525 } else if (!strcmp(str, "skip_isa_align")) {
526 pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
527 return NULL;
528 } else if (!strcmp(str, "noioapicquirk")) {
529 noioapicquirk = 1;
530 return NULL;
531 } else if (!strcmp(str, "ioapicreroute")) {
532 if (noioapicreroute != -1)
533 noioapicreroute = 0;
534 return NULL;
535 } else if (!strcmp(str, "noioapicreroute")) {
536 if (noioapicreroute != -1)
537 noioapicreroute = 1;
538 return NULL;
540 return str;
543 unsigned int pcibios_assign_all_busses(void)
545 return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
548 int pcibios_enable_device(struct pci_dev *dev, int mask)
550 int err;
552 if ((err = pci_enable_resources(dev, mask)) < 0)
553 return err;
555 if (!pci_dev_msi_enabled(dev))
556 return pcibios_enable_irq(dev);
557 return 0;
560 void pcibios_disable_device (struct pci_dev *dev)
562 if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
563 pcibios_disable_irq(dev);
566 int pci_ext_cfg_avail(struct pci_dev *dev)
568 if (raw_pci_ext_ops)
569 return 1;
570 else
571 return 0;
574 struct pci_bus * __devinit pci_scan_bus_on_node(int busno, struct pci_ops *ops, int node)
576 struct pci_bus *bus = NULL;
577 struct pci_sysdata *sd;
580 * Allocate per-root-bus (not per bus) arch-specific data.
581 * TODO: leak; this memory is never freed.
582 * It's arguable whether it's worth the trouble to care.
584 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
585 if (!sd) {
586 printk(KERN_ERR "PCI: OOM, skipping PCI bus %02x\n", busno);
587 return NULL;
589 sd->node = node;
590 bus = pci_scan_bus(busno, ops, sd);
591 if (!bus)
592 kfree(sd);
594 return bus;
597 struct pci_bus * __devinit pci_scan_bus_with_sysdata(int busno)
599 return pci_scan_bus_on_node(busno, &pci_root_ops, -1);
603 * NUMA info for PCI busses
605 * Early arch code is responsible for filling in reasonable values here.
606 * A node id of "-1" means "use current node". In other words, if a bus
607 * has a -1 node id, it's not tightly coupled to any particular chunk
608 * of memory (as is the case on some Nehalem systems).
610 #ifdef CONFIG_NUMA
612 #define BUS_NR 256
614 #ifdef CONFIG_X86_64
616 static int mp_bus_to_node[BUS_NR] = {
617 [0 ... BUS_NR - 1] = -1
620 void set_mp_bus_to_node(int busnum, int node)
622 if (busnum >= 0 && busnum < BUS_NR)
623 mp_bus_to_node[busnum] = node;
626 int get_mp_bus_to_node(int busnum)
628 int node = -1;
630 if (busnum < 0 || busnum > (BUS_NR - 1))
631 return node;
633 node = mp_bus_to_node[busnum];
636 * let numa_node_id to decide it later in dma_alloc_pages
637 * if there is no ram on that node
639 if (node != -1 && !node_online(node))
640 node = -1;
642 return node;
645 #else /* CONFIG_X86_32 */
647 static int mp_bus_to_node[BUS_NR] = {
648 [0 ... BUS_NR - 1] = -1
651 void set_mp_bus_to_node(int busnum, int node)
653 if (busnum >= 0 && busnum < BUS_NR)
654 mp_bus_to_node[busnum] = (unsigned char) node;
657 int get_mp_bus_to_node(int busnum)
659 int node;
661 if (busnum < 0 || busnum > (BUS_NR - 1))
662 return 0;
663 node = mp_bus_to_node[busnum];
664 return node;
667 #endif /* CONFIG_X86_32 */
669 #endif /* CONFIG_NUMA */