2 * probe.c - PCI detection and setup code
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
9 #include <linux/of_device.h>
10 #include <linux/of_pci.h>
11 #include <linux/pci_hotplug.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/cpumask.h>
15 #include <linux/pci-aspm.h>
16 #include <linux/aer.h>
17 #include <linux/acpi.h>
18 #include <linux/irqdomain.h>
19 #include <linux/pm_runtime.h>
22 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
23 #define CARDBUS_RESERVE_BUSNR 3
25 static struct resource busn_resource
= {
29 .flags
= IORESOURCE_BUS
,
32 /* Ugh. Need to stop exporting this to modules. */
33 LIST_HEAD(pci_root_buses
);
34 EXPORT_SYMBOL(pci_root_buses
);
36 static LIST_HEAD(pci_domain_busn_res_list
);
38 struct pci_domain_busn_res
{
39 struct list_head list
;
44 static struct resource
*get_pci_domain_busn_res(int domain_nr
)
46 struct pci_domain_busn_res
*r
;
48 list_for_each_entry(r
, &pci_domain_busn_res_list
, list
)
49 if (r
->domain_nr
== domain_nr
)
52 r
= kzalloc(sizeof(*r
), GFP_KERNEL
);
56 r
->domain_nr
= domain_nr
;
59 r
->res
.flags
= IORESOURCE_BUS
| IORESOURCE_PCI_FIXED
;
61 list_add_tail(&r
->list
, &pci_domain_busn_res_list
);
66 static int find_anything(struct device
*dev
, void *data
)
72 * Some device drivers need know if pci is initiated.
73 * Basically, we think pci is not initiated when there
74 * is no device to be found on the pci_bus_type.
76 int no_pci_devices(void)
81 dev
= bus_find_device(&pci_bus_type
, NULL
, NULL
, find_anything
);
82 no_devices
= (dev
== NULL
);
86 EXPORT_SYMBOL(no_pci_devices
);
91 static void release_pcibus_dev(struct device
*dev
)
93 struct pci_bus
*pci_bus
= to_pci_bus(dev
);
95 put_device(pci_bus
->bridge
);
96 pci_bus_remove_resources(pci_bus
);
97 pci_release_bus_of_node(pci_bus
);
101 static struct class pcibus_class
= {
103 .dev_release
= &release_pcibus_dev
,
104 .dev_groups
= pcibus_groups
,
107 static int __init
pcibus_class_init(void)
109 return class_register(&pcibus_class
);
111 postcore_initcall(pcibus_class_init
);
113 static u64
pci_size(u64 base
, u64 maxbase
, u64 mask
)
115 u64 size
= mask
& maxbase
; /* Find the significant bits */
119 /* Get the lowest of them to find the decode size, and
120 from that the extent. */
121 size
= (size
& ~(size
-1)) - 1;
123 /* base == maxbase can be valid only if the BAR has
124 already been programmed with all 1s. */
125 if (base
== maxbase
&& ((base
| size
) & mask
) != mask
)
131 static inline unsigned long decode_bar(struct pci_dev
*dev
, u32 bar
)
136 if ((bar
& PCI_BASE_ADDRESS_SPACE
) == PCI_BASE_ADDRESS_SPACE_IO
) {
137 flags
= bar
& ~PCI_BASE_ADDRESS_IO_MASK
;
138 flags
|= IORESOURCE_IO
;
142 flags
= bar
& ~PCI_BASE_ADDRESS_MEM_MASK
;
143 flags
|= IORESOURCE_MEM
;
144 if (flags
& PCI_BASE_ADDRESS_MEM_PREFETCH
)
145 flags
|= IORESOURCE_PREFETCH
;
147 mem_type
= bar
& PCI_BASE_ADDRESS_MEM_TYPE_MASK
;
149 case PCI_BASE_ADDRESS_MEM_TYPE_32
:
151 case PCI_BASE_ADDRESS_MEM_TYPE_1M
:
152 /* 1M mem BAR treated as 32-bit BAR */
154 case PCI_BASE_ADDRESS_MEM_TYPE_64
:
155 flags
|= IORESOURCE_MEM_64
;
158 /* mem unknown type treated as 32-bit BAR */
164 #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
167 * pci_read_base - read a PCI BAR
168 * @dev: the PCI device
169 * @type: type of the BAR
170 * @res: resource buffer to be filled in
171 * @pos: BAR position in the config space
173 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
175 int __pci_read_base(struct pci_dev
*dev
, enum pci_bar_type type
,
176 struct resource
*res
, unsigned int pos
)
179 u64 l64
, sz64
, mask64
;
181 struct pci_bus_region region
, inverted_region
;
183 mask
= type
? PCI_ROM_ADDRESS_MASK
: ~0;
185 /* No printks while decoding is disabled! */
186 if (!dev
->mmio_always_on
) {
187 pci_read_config_word(dev
, PCI_COMMAND
, &orig_cmd
);
188 if (orig_cmd
& PCI_COMMAND_DECODE_ENABLE
) {
189 pci_write_config_word(dev
, PCI_COMMAND
,
190 orig_cmd
& ~PCI_COMMAND_DECODE_ENABLE
);
194 res
->name
= pci_name(dev
);
196 pci_read_config_dword(dev
, pos
, &l
);
197 pci_write_config_dword(dev
, pos
, l
| mask
);
198 pci_read_config_dword(dev
, pos
, &sz
);
199 pci_write_config_dword(dev
, pos
, l
);
202 * All bits set in sz means the device isn't working properly.
203 * If the BAR isn't implemented, all bits must be 0. If it's a
204 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
207 if (sz
== 0xffffffff)
211 * I don't know how l can have all bits set. Copied from old code.
212 * Maybe it fixes a bug on some ancient platform.
217 if (type
== pci_bar_unknown
) {
218 res
->flags
= decode_bar(dev
, l
);
219 res
->flags
|= IORESOURCE_SIZEALIGN
;
220 if (res
->flags
& IORESOURCE_IO
) {
221 l64
= l
& PCI_BASE_ADDRESS_IO_MASK
;
222 sz64
= sz
& PCI_BASE_ADDRESS_IO_MASK
;
223 mask64
= PCI_BASE_ADDRESS_IO_MASK
& (u32
)IO_SPACE_LIMIT
;
225 l64
= l
& PCI_BASE_ADDRESS_MEM_MASK
;
226 sz64
= sz
& PCI_BASE_ADDRESS_MEM_MASK
;
227 mask64
= (u32
)PCI_BASE_ADDRESS_MEM_MASK
;
230 if (l
& PCI_ROM_ADDRESS_ENABLE
)
231 res
->flags
|= IORESOURCE_ROM_ENABLE
;
232 l64
= l
& PCI_ROM_ADDRESS_MASK
;
233 sz64
= sz
& PCI_ROM_ADDRESS_MASK
;
234 mask64
= PCI_ROM_ADDRESS_MASK
;
237 if (res
->flags
& IORESOURCE_MEM_64
) {
238 pci_read_config_dword(dev
, pos
+ 4, &l
);
239 pci_write_config_dword(dev
, pos
+ 4, ~0);
240 pci_read_config_dword(dev
, pos
+ 4, &sz
);
241 pci_write_config_dword(dev
, pos
+ 4, l
);
243 l64
|= ((u64
)l
<< 32);
244 sz64
|= ((u64
)sz
<< 32);
245 mask64
|= ((u64
)~0 << 32);
248 if (!dev
->mmio_always_on
&& (orig_cmd
& PCI_COMMAND_DECODE_ENABLE
))
249 pci_write_config_word(dev
, PCI_COMMAND
, orig_cmd
);
254 sz64
= pci_size(l64
, sz64
, mask64
);
256 dev_info(&dev
->dev
, FW_BUG
"reg 0x%x: invalid BAR (can't size)\n",
261 if (res
->flags
& IORESOURCE_MEM_64
) {
262 if ((sizeof(pci_bus_addr_t
) < 8 || sizeof(resource_size_t
) < 8)
263 && sz64
> 0x100000000ULL
) {
264 res
->flags
|= IORESOURCE_UNSET
| IORESOURCE_DISABLED
;
267 dev_err(&dev
->dev
, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
268 pos
, (unsigned long long)sz64
);
272 if ((sizeof(pci_bus_addr_t
) < 8) && l
) {
273 /* Above 32-bit boundary; try to reallocate */
274 res
->flags
|= IORESOURCE_UNSET
;
277 dev_info(&dev
->dev
, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
278 pos
, (unsigned long long)l64
);
284 region
.end
= l64
+ sz64
;
286 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
287 pcibios_resource_to_bus(dev
->bus
, &inverted_region
, res
);
290 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
291 * the corresponding resource address (the physical address used by
292 * the CPU. Converting that resource address back to a bus address
293 * should yield the original BAR value:
295 * resource_to_bus(bus_to_resource(A)) == A
297 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
298 * be claimed by the device.
300 if (inverted_region
.start
!= region
.start
) {
301 res
->flags
|= IORESOURCE_UNSET
;
303 res
->end
= region
.end
- region
.start
;
304 dev_info(&dev
->dev
, "reg 0x%x: initial BAR value %#010llx invalid\n",
305 pos
, (unsigned long long)region
.start
);
315 dev_printk(KERN_DEBUG
, &dev
->dev
, "reg 0x%x: %pR\n", pos
, res
);
317 return (res
->flags
& IORESOURCE_MEM_64
) ? 1 : 0;
320 static void pci_read_bases(struct pci_dev
*dev
, unsigned int howmany
, int rom
)
322 unsigned int pos
, reg
;
324 if (dev
->non_compliant_bars
)
327 for (pos
= 0; pos
< howmany
; pos
++) {
328 struct resource
*res
= &dev
->resource
[pos
];
329 reg
= PCI_BASE_ADDRESS_0
+ (pos
<< 2);
330 pos
+= __pci_read_base(dev
, pci_bar_unknown
, res
, reg
);
334 struct resource
*res
= &dev
->resource
[PCI_ROM_RESOURCE
];
335 dev
->rom_base_reg
= rom
;
336 res
->flags
= IORESOURCE_MEM
| IORESOURCE_PREFETCH
|
337 IORESOURCE_READONLY
| IORESOURCE_SIZEALIGN
;
338 __pci_read_base(dev
, pci_bar_mem32
, res
, rom
);
342 static void pci_read_bridge_io(struct pci_bus
*child
)
344 struct pci_dev
*dev
= child
->self
;
345 u8 io_base_lo
, io_limit_lo
;
346 unsigned long io_mask
, io_granularity
, base
, limit
;
347 struct pci_bus_region region
;
348 struct resource
*res
;
350 io_mask
= PCI_IO_RANGE_MASK
;
351 io_granularity
= 0x1000;
352 if (dev
->io_window_1k
) {
353 /* Support 1K I/O space granularity */
354 io_mask
= PCI_IO_1K_RANGE_MASK
;
355 io_granularity
= 0x400;
358 res
= child
->resource
[0];
359 pci_read_config_byte(dev
, PCI_IO_BASE
, &io_base_lo
);
360 pci_read_config_byte(dev
, PCI_IO_LIMIT
, &io_limit_lo
);
361 base
= (io_base_lo
& io_mask
) << 8;
362 limit
= (io_limit_lo
& io_mask
) << 8;
364 if ((io_base_lo
& PCI_IO_RANGE_TYPE_MASK
) == PCI_IO_RANGE_TYPE_32
) {
365 u16 io_base_hi
, io_limit_hi
;
367 pci_read_config_word(dev
, PCI_IO_BASE_UPPER16
, &io_base_hi
);
368 pci_read_config_word(dev
, PCI_IO_LIMIT_UPPER16
, &io_limit_hi
);
369 base
|= ((unsigned long) io_base_hi
<< 16);
370 limit
|= ((unsigned long) io_limit_hi
<< 16);
374 res
->flags
= (io_base_lo
& PCI_IO_RANGE_TYPE_MASK
) | IORESOURCE_IO
;
376 region
.end
= limit
+ io_granularity
- 1;
377 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
378 dev_printk(KERN_DEBUG
, &dev
->dev
, " bridge window %pR\n", res
);
382 static void pci_read_bridge_mmio(struct pci_bus
*child
)
384 struct pci_dev
*dev
= child
->self
;
385 u16 mem_base_lo
, mem_limit_lo
;
386 unsigned long base
, limit
;
387 struct pci_bus_region region
;
388 struct resource
*res
;
390 res
= child
->resource
[1];
391 pci_read_config_word(dev
, PCI_MEMORY_BASE
, &mem_base_lo
);
392 pci_read_config_word(dev
, PCI_MEMORY_LIMIT
, &mem_limit_lo
);
393 base
= ((unsigned long) mem_base_lo
& PCI_MEMORY_RANGE_MASK
) << 16;
394 limit
= ((unsigned long) mem_limit_lo
& PCI_MEMORY_RANGE_MASK
) << 16;
396 res
->flags
= (mem_base_lo
& PCI_MEMORY_RANGE_TYPE_MASK
) | IORESOURCE_MEM
;
398 region
.end
= limit
+ 0xfffff;
399 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
400 dev_printk(KERN_DEBUG
, &dev
->dev
, " bridge window %pR\n", res
);
404 static void pci_read_bridge_mmio_pref(struct pci_bus
*child
)
406 struct pci_dev
*dev
= child
->self
;
407 u16 mem_base_lo
, mem_limit_lo
;
409 pci_bus_addr_t base
, limit
;
410 struct pci_bus_region region
;
411 struct resource
*res
;
413 res
= child
->resource
[2];
414 pci_read_config_word(dev
, PCI_PREF_MEMORY_BASE
, &mem_base_lo
);
415 pci_read_config_word(dev
, PCI_PREF_MEMORY_LIMIT
, &mem_limit_lo
);
416 base64
= (mem_base_lo
& PCI_PREF_RANGE_MASK
) << 16;
417 limit64
= (mem_limit_lo
& PCI_PREF_RANGE_MASK
) << 16;
419 if ((mem_base_lo
& PCI_PREF_RANGE_TYPE_MASK
) == PCI_PREF_RANGE_TYPE_64
) {
420 u32 mem_base_hi
, mem_limit_hi
;
422 pci_read_config_dword(dev
, PCI_PREF_BASE_UPPER32
, &mem_base_hi
);
423 pci_read_config_dword(dev
, PCI_PREF_LIMIT_UPPER32
, &mem_limit_hi
);
426 * Some bridges set the base > limit by default, and some
427 * (broken) BIOSes do not initialize them. If we find
428 * this, just assume they are not being used.
430 if (mem_base_hi
<= mem_limit_hi
) {
431 base64
|= (u64
) mem_base_hi
<< 32;
432 limit64
|= (u64
) mem_limit_hi
<< 32;
436 base
= (pci_bus_addr_t
) base64
;
437 limit
= (pci_bus_addr_t
) limit64
;
439 if (base
!= base64
) {
440 dev_err(&dev
->dev
, "can't handle bridge window above 4GB (bus address %#010llx)\n",
441 (unsigned long long) base64
);
446 res
->flags
= (mem_base_lo
& PCI_PREF_RANGE_TYPE_MASK
) |
447 IORESOURCE_MEM
| IORESOURCE_PREFETCH
;
448 if (res
->flags
& PCI_PREF_RANGE_TYPE_64
)
449 res
->flags
|= IORESOURCE_MEM_64
;
451 region
.end
= limit
+ 0xfffff;
452 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
453 dev_printk(KERN_DEBUG
, &dev
->dev
, " bridge window %pR\n", res
);
457 void pci_read_bridge_bases(struct pci_bus
*child
)
459 struct pci_dev
*dev
= child
->self
;
460 struct resource
*res
;
463 if (pci_is_root_bus(child
)) /* It's a host bus, nothing to read */
466 dev_info(&dev
->dev
, "PCI bridge to %pR%s\n",
468 dev
->transparent
? " (subtractive decode)" : "");
470 pci_bus_remove_resources(child
);
471 for (i
= 0; i
< PCI_BRIDGE_RESOURCE_NUM
; i
++)
472 child
->resource
[i
] = &dev
->resource
[PCI_BRIDGE_RESOURCES
+i
];
474 pci_read_bridge_io(child
);
475 pci_read_bridge_mmio(child
);
476 pci_read_bridge_mmio_pref(child
);
478 if (dev
->transparent
) {
479 pci_bus_for_each_resource(child
->parent
, res
, i
) {
480 if (res
&& res
->flags
) {
481 pci_bus_add_resource(child
, res
,
482 PCI_SUBTRACTIVE_DECODE
);
483 dev_printk(KERN_DEBUG
, &dev
->dev
,
484 " bridge window %pR (subtractive decode)\n",
491 static struct pci_bus
*pci_alloc_bus(struct pci_bus
*parent
)
495 b
= kzalloc(sizeof(*b
), GFP_KERNEL
);
499 INIT_LIST_HEAD(&b
->node
);
500 INIT_LIST_HEAD(&b
->children
);
501 INIT_LIST_HEAD(&b
->devices
);
502 INIT_LIST_HEAD(&b
->slots
);
503 INIT_LIST_HEAD(&b
->resources
);
504 b
->max_bus_speed
= PCI_SPEED_UNKNOWN
;
505 b
->cur_bus_speed
= PCI_SPEED_UNKNOWN
;
506 #ifdef CONFIG_PCI_DOMAINS_GENERIC
508 b
->domain_nr
= parent
->domain_nr
;
513 static void pci_release_host_bridge_dev(struct device
*dev
)
515 struct pci_host_bridge
*bridge
= to_pci_host_bridge(dev
);
517 if (bridge
->release_fn
)
518 bridge
->release_fn(bridge
);
520 pci_free_resource_list(&bridge
->windows
);
525 static struct pci_host_bridge
*pci_alloc_host_bridge(struct pci_bus
*b
)
527 struct pci_host_bridge
*bridge
;
529 bridge
= kzalloc(sizeof(*bridge
), GFP_KERNEL
);
533 INIT_LIST_HEAD(&bridge
->windows
);
538 static const unsigned char pcix_bus_speed
[] = {
539 PCI_SPEED_UNKNOWN
, /* 0 */
540 PCI_SPEED_66MHz_PCIX
, /* 1 */
541 PCI_SPEED_100MHz_PCIX
, /* 2 */
542 PCI_SPEED_133MHz_PCIX
, /* 3 */
543 PCI_SPEED_UNKNOWN
, /* 4 */
544 PCI_SPEED_66MHz_PCIX_ECC
, /* 5 */
545 PCI_SPEED_100MHz_PCIX_ECC
, /* 6 */
546 PCI_SPEED_133MHz_PCIX_ECC
, /* 7 */
547 PCI_SPEED_UNKNOWN
, /* 8 */
548 PCI_SPEED_66MHz_PCIX_266
, /* 9 */
549 PCI_SPEED_100MHz_PCIX_266
, /* A */
550 PCI_SPEED_133MHz_PCIX_266
, /* B */
551 PCI_SPEED_UNKNOWN
, /* C */
552 PCI_SPEED_66MHz_PCIX_533
, /* D */
553 PCI_SPEED_100MHz_PCIX_533
, /* E */
554 PCI_SPEED_133MHz_PCIX_533
/* F */
557 const unsigned char pcie_link_speed
[] = {
558 PCI_SPEED_UNKNOWN
, /* 0 */
559 PCIE_SPEED_2_5GT
, /* 1 */
560 PCIE_SPEED_5_0GT
, /* 2 */
561 PCIE_SPEED_8_0GT
, /* 3 */
562 PCI_SPEED_UNKNOWN
, /* 4 */
563 PCI_SPEED_UNKNOWN
, /* 5 */
564 PCI_SPEED_UNKNOWN
, /* 6 */
565 PCI_SPEED_UNKNOWN
, /* 7 */
566 PCI_SPEED_UNKNOWN
, /* 8 */
567 PCI_SPEED_UNKNOWN
, /* 9 */
568 PCI_SPEED_UNKNOWN
, /* A */
569 PCI_SPEED_UNKNOWN
, /* B */
570 PCI_SPEED_UNKNOWN
, /* C */
571 PCI_SPEED_UNKNOWN
, /* D */
572 PCI_SPEED_UNKNOWN
, /* E */
573 PCI_SPEED_UNKNOWN
/* F */
576 void pcie_update_link_speed(struct pci_bus
*bus
, u16 linksta
)
578 bus
->cur_bus_speed
= pcie_link_speed
[linksta
& PCI_EXP_LNKSTA_CLS
];
580 EXPORT_SYMBOL_GPL(pcie_update_link_speed
);
582 static unsigned char agp_speeds
[] = {
590 static enum pci_bus_speed
agp_speed(int agp3
, int agpstat
)
596 else if (agpstat
& 2)
598 else if (agpstat
& 1)
610 return agp_speeds
[index
];
613 static void pci_set_bus_speed(struct pci_bus
*bus
)
615 struct pci_dev
*bridge
= bus
->self
;
618 pos
= pci_find_capability(bridge
, PCI_CAP_ID_AGP
);
620 pos
= pci_find_capability(bridge
, PCI_CAP_ID_AGP3
);
624 pci_read_config_dword(bridge
, pos
+ PCI_AGP_STATUS
, &agpstat
);
625 bus
->max_bus_speed
= agp_speed(agpstat
& 8, agpstat
& 7);
627 pci_read_config_dword(bridge
, pos
+ PCI_AGP_COMMAND
, &agpcmd
);
628 bus
->cur_bus_speed
= agp_speed(agpstat
& 8, agpcmd
& 7);
631 pos
= pci_find_capability(bridge
, PCI_CAP_ID_PCIX
);
634 enum pci_bus_speed max
;
636 pci_read_config_word(bridge
, pos
+ PCI_X_BRIDGE_SSTATUS
,
639 if (status
& PCI_X_SSTATUS_533MHZ
) {
640 max
= PCI_SPEED_133MHz_PCIX_533
;
641 } else if (status
& PCI_X_SSTATUS_266MHZ
) {
642 max
= PCI_SPEED_133MHz_PCIX_266
;
643 } else if (status
& PCI_X_SSTATUS_133MHZ
) {
644 if ((status
& PCI_X_SSTATUS_VERS
) == PCI_X_SSTATUS_V2
)
645 max
= PCI_SPEED_133MHz_PCIX_ECC
;
647 max
= PCI_SPEED_133MHz_PCIX
;
649 max
= PCI_SPEED_66MHz_PCIX
;
652 bus
->max_bus_speed
= max
;
653 bus
->cur_bus_speed
= pcix_bus_speed
[
654 (status
& PCI_X_SSTATUS_FREQ
) >> 6];
659 if (pci_is_pcie(bridge
)) {
663 pcie_capability_read_dword(bridge
, PCI_EXP_LNKCAP
, &linkcap
);
664 bus
->max_bus_speed
= pcie_link_speed
[linkcap
& PCI_EXP_LNKCAP_SLS
];
666 pcie_capability_read_word(bridge
, PCI_EXP_LNKSTA
, &linksta
);
667 pcie_update_link_speed(bus
, linksta
);
671 static struct irq_domain
*pci_host_bridge_msi_domain(struct pci_bus
*bus
)
673 struct irq_domain
*d
;
676 * Any firmware interface that can resolve the msi_domain
677 * should be called from here.
679 d
= pci_host_bridge_of_msi_domain(bus
);
681 d
= pci_host_bridge_acpi_msi_domain(bus
);
683 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
685 * If no IRQ domain was found via the OF tree, try looking it up
686 * directly through the fwnode_handle.
689 struct fwnode_handle
*fwnode
= pci_root_bus_fwnode(bus
);
692 d
= irq_find_matching_fwnode(fwnode
,
700 static void pci_set_bus_msi_domain(struct pci_bus
*bus
)
702 struct irq_domain
*d
;
706 * The bus can be a root bus, a subordinate bus, or a virtual bus
707 * created by an SR-IOV device. Walk up to the first bridge device
708 * found or derive the domain from the host bridge.
710 for (b
= bus
, d
= NULL
; !d
&& !pci_is_root_bus(b
); b
= b
->parent
) {
712 d
= dev_get_msi_domain(&b
->self
->dev
);
716 d
= pci_host_bridge_msi_domain(b
);
718 dev_set_msi_domain(&bus
->dev
, d
);
721 static struct pci_bus
*pci_alloc_child_bus(struct pci_bus
*parent
,
722 struct pci_dev
*bridge
, int busnr
)
724 struct pci_bus
*child
;
729 * Allocate a new bus, and inherit stuff from the parent..
731 child
= pci_alloc_bus(parent
);
735 child
->parent
= parent
;
736 child
->ops
= parent
->ops
;
737 child
->msi
= parent
->msi
;
738 child
->sysdata
= parent
->sysdata
;
739 child
->bus_flags
= parent
->bus_flags
;
741 /* initialize some portions of the bus device, but don't register it
742 * now as the parent is not properly set up yet.
744 child
->dev
.class = &pcibus_class
;
745 dev_set_name(&child
->dev
, "%04x:%02x", pci_domain_nr(child
), busnr
);
748 * Set up the primary, secondary and subordinate
751 child
->number
= child
->busn_res
.start
= busnr
;
752 child
->primary
= parent
->busn_res
.start
;
753 child
->busn_res
.end
= 0xff;
756 child
->dev
.parent
= parent
->bridge
;
760 child
->self
= bridge
;
761 child
->bridge
= get_device(&bridge
->dev
);
762 child
->dev
.parent
= child
->bridge
;
763 pci_set_bus_of_node(child
);
764 pci_set_bus_speed(child
);
766 /* Set up default resource pointers and names.. */
767 for (i
= 0; i
< PCI_BRIDGE_RESOURCE_NUM
; i
++) {
768 child
->resource
[i
] = &bridge
->resource
[PCI_BRIDGE_RESOURCES
+i
];
769 child
->resource
[i
]->name
= child
->name
;
771 bridge
->subordinate
= child
;
774 pci_set_bus_msi_domain(child
);
775 ret
= device_register(&child
->dev
);
778 pcibios_add_bus(child
);
780 if (child
->ops
->add_bus
) {
781 ret
= child
->ops
->add_bus(child
);
782 if (WARN_ON(ret
< 0))
783 dev_err(&child
->dev
, "failed to add bus: %d\n", ret
);
786 /* Create legacy_io and legacy_mem files for this bus */
787 pci_create_legacy_files(child
);
792 struct pci_bus
*pci_add_new_bus(struct pci_bus
*parent
, struct pci_dev
*dev
,
795 struct pci_bus
*child
;
797 child
= pci_alloc_child_bus(parent
, dev
, busnr
);
799 down_write(&pci_bus_sem
);
800 list_add_tail(&child
->node
, &parent
->children
);
801 up_write(&pci_bus_sem
);
805 EXPORT_SYMBOL(pci_add_new_bus
);
807 static void pci_enable_crs(struct pci_dev
*pdev
)
811 /* Enable CRS Software Visibility if supported */
812 pcie_capability_read_word(pdev
, PCI_EXP_RTCAP
, &root_cap
);
813 if (root_cap
& PCI_EXP_RTCAP_CRSVIS
)
814 pcie_capability_set_word(pdev
, PCI_EXP_RTCTL
,
815 PCI_EXP_RTCTL_CRSSVE
);
819 * If it's a bridge, configure it and scan the bus behind it.
820 * For CardBus bridges, we don't scan behind as the devices will
821 * be handled by the bridge driver itself.
823 * We need to process bridges in two passes -- first we scan those
824 * already configured by the BIOS and after we are done with all of
825 * them, we proceed to assigning numbers to the remaining buses in
826 * order to avoid overlaps between old and new bus numbers.
828 int pci_scan_bridge(struct pci_bus
*bus
, struct pci_dev
*dev
, int max
, int pass
)
830 struct pci_bus
*child
;
831 int is_cardbus
= (dev
->hdr_type
== PCI_HEADER_TYPE_CARDBUS
);
834 u8 primary
, secondary
, subordinate
;
838 * Make sure the bridge is powered on to be able to access config
839 * space of devices below it.
841 pm_runtime_get_sync(&dev
->dev
);
843 pci_read_config_dword(dev
, PCI_PRIMARY_BUS
, &buses
);
844 primary
= buses
& 0xFF;
845 secondary
= (buses
>> 8) & 0xFF;
846 subordinate
= (buses
>> 16) & 0xFF;
848 dev_dbg(&dev
->dev
, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
849 secondary
, subordinate
, pass
);
851 if (!primary
&& (primary
!= bus
->number
) && secondary
&& subordinate
) {
852 dev_warn(&dev
->dev
, "Primary bus is hard wired to 0\n");
853 primary
= bus
->number
;
856 /* Check if setup is sensible at all */
858 (primary
!= bus
->number
|| secondary
<= bus
->number
||
859 secondary
> subordinate
)) {
860 dev_info(&dev
->dev
, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
861 secondary
, subordinate
);
865 /* Disable MasterAbortMode during probing to avoid reporting
866 of bus errors (in some architectures) */
867 pci_read_config_word(dev
, PCI_BRIDGE_CONTROL
, &bctl
);
868 pci_write_config_word(dev
, PCI_BRIDGE_CONTROL
,
869 bctl
& ~PCI_BRIDGE_CTL_MASTER_ABORT
);
873 if ((secondary
|| subordinate
) && !pcibios_assign_all_busses() &&
874 !is_cardbus
&& !broken
) {
877 * Bus already configured by firmware, process it in the first
878 * pass and just note the configuration.
884 * The bus might already exist for two reasons: Either we are
885 * rescanning the bus or the bus is reachable through more than
886 * one bridge. The second case can happen with the i450NX
889 child
= pci_find_bus(pci_domain_nr(bus
), secondary
);
891 child
= pci_add_new_bus(bus
, dev
, secondary
);
894 child
->primary
= primary
;
895 pci_bus_insert_busn_res(child
, secondary
, subordinate
);
896 child
->bridge_ctl
= bctl
;
899 cmax
= pci_scan_child_bus(child
);
900 if (cmax
> subordinate
)
901 dev_warn(&dev
->dev
, "bridge has subordinate %02x but max busn %02x\n",
903 /* subordinate should equal child->busn_res.end */
904 if (subordinate
> max
)
908 * We need to assign a number to this bus which we always
909 * do in the second pass.
912 if (pcibios_assign_all_busses() || broken
|| is_cardbus
)
913 /* Temporarily disable forwarding of the
914 configuration cycles on all bridges in
915 this bus segment to avoid possible
916 conflicts in the second pass between two
917 bridges programmed with overlapping
919 pci_write_config_dword(dev
, PCI_PRIMARY_BUS
,
925 pci_write_config_word(dev
, PCI_STATUS
, 0xffff);
927 /* Prevent assigning a bus number that already exists.
928 * This can happen when a bridge is hot-plugged, so in
929 * this case we only re-scan this bus. */
930 child
= pci_find_bus(pci_domain_nr(bus
), max
+1);
932 child
= pci_add_new_bus(bus
, dev
, max
+1);
935 pci_bus_insert_busn_res(child
, max
+1,
939 buses
= (buses
& 0xff000000)
940 | ((unsigned int)(child
->primary
) << 0)
941 | ((unsigned int)(child
->busn_res
.start
) << 8)
942 | ((unsigned int)(child
->busn_res
.end
) << 16);
945 * yenta.c forces a secondary latency timer of 176.
946 * Copy that behaviour here.
949 buses
&= ~0xff000000;
950 buses
|= CARDBUS_LATENCY_TIMER
<< 24;
954 * We need to blast all three values with a single write.
956 pci_write_config_dword(dev
, PCI_PRIMARY_BUS
, buses
);
959 child
->bridge_ctl
= bctl
;
960 max
= pci_scan_child_bus(child
);
963 * For CardBus bridges, we leave 4 bus numbers
964 * as cards with a PCI-to-PCI bridge can be
967 for (i
= 0; i
< CARDBUS_RESERVE_BUSNR
; i
++) {
968 struct pci_bus
*parent
= bus
;
969 if (pci_find_bus(pci_domain_nr(bus
),
972 while (parent
->parent
) {
973 if ((!pcibios_assign_all_busses()) &&
974 (parent
->busn_res
.end
> max
) &&
975 (parent
->busn_res
.end
<= max
+i
)) {
978 parent
= parent
->parent
;
982 * Often, there are two cardbus bridges
983 * -- try to leave one valid bus number
993 * Set the subordinate bus number to its real value.
995 pci_bus_update_busn_res_end(child
, max
);
996 pci_write_config_byte(dev
, PCI_SUBORDINATE_BUS
, max
);
1000 (is_cardbus
? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
1001 pci_domain_nr(bus
), child
->number
);
1003 /* Has only triggered on CardBus, fixup is in yenta_socket */
1004 while (bus
->parent
) {
1005 if ((child
->busn_res
.end
> bus
->busn_res
.end
) ||
1006 (child
->number
> bus
->busn_res
.end
) ||
1007 (child
->number
< bus
->number
) ||
1008 (child
->busn_res
.end
< bus
->number
)) {
1009 dev_info(&child
->dev
, "%pR %s hidden behind%s bridge %s %pR\n",
1011 (bus
->number
> child
->busn_res
.end
&&
1012 bus
->busn_res
.end
< child
->number
) ?
1013 "wholly" : "partially",
1014 bus
->self
->transparent
? " transparent" : "",
1015 dev_name(&bus
->dev
),
1022 pci_write_config_word(dev
, PCI_BRIDGE_CONTROL
, bctl
);
1024 pm_runtime_put(&dev
->dev
);
1028 EXPORT_SYMBOL(pci_scan_bridge
);
1031 * Read interrupt line and base address registers.
1032 * The architecture-dependent code can tweak these, of course.
1034 static void pci_read_irq(struct pci_dev
*dev
)
1038 pci_read_config_byte(dev
, PCI_INTERRUPT_PIN
, &irq
);
1041 pci_read_config_byte(dev
, PCI_INTERRUPT_LINE
, &irq
);
1045 void set_pcie_port_type(struct pci_dev
*pdev
)
1050 struct pci_dev
*parent
;
1052 pos
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
1056 pdev
->pcie_cap
= pos
;
1057 pci_read_config_word(pdev
, pos
+ PCI_EXP_FLAGS
, ®16
);
1058 pdev
->pcie_flags_reg
= reg16
;
1059 pci_read_config_word(pdev
, pos
+ PCI_EXP_DEVCAP
, ®16
);
1060 pdev
->pcie_mpss
= reg16
& PCI_EXP_DEVCAP_PAYLOAD
;
1063 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
1064 * of a Link. No PCIe component has two Links. Two Links are
1065 * connected by a Switch that has a Port on each Link and internal
1066 * logic to connect the two Ports.
1068 type
= pci_pcie_type(pdev
);
1069 if (type
== PCI_EXP_TYPE_ROOT_PORT
||
1070 type
== PCI_EXP_TYPE_PCIE_BRIDGE
)
1071 pdev
->has_secondary_link
= 1;
1072 else if (type
== PCI_EXP_TYPE_UPSTREAM
||
1073 type
== PCI_EXP_TYPE_DOWNSTREAM
) {
1074 parent
= pci_upstream_bridge(pdev
);
1077 * Usually there's an upstream device (Root Port or Switch
1078 * Downstream Port), but we can't assume one exists.
1080 if (parent
&& !parent
->has_secondary_link
)
1081 pdev
->has_secondary_link
= 1;
1085 void set_pcie_hotplug_bridge(struct pci_dev
*pdev
)
1089 pcie_capability_read_dword(pdev
, PCI_EXP_SLTCAP
, ®32
);
1090 if (reg32
& PCI_EXP_SLTCAP_HPC
)
1091 pdev
->is_hotplug_bridge
= 1;
1095 * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1098 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1099 * when forwarding a type1 configuration request the bridge must check that
1100 * the extended register address field is zero. The bridge is not permitted
1101 * to forward the transactions and must handle it as an Unsupported Request.
1102 * Some bridges do not follow this rule and simply drop the extended register
1103 * bits, resulting in the standard config space being aliased, every 256
1104 * bytes across the entire configuration space. Test for this condition by
1105 * comparing the first dword of each potential alias to the vendor/device ID.
1107 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1108 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1110 static bool pci_ext_cfg_is_aliased(struct pci_dev
*dev
)
1112 #ifdef CONFIG_PCI_QUIRKS
1116 pci_read_config_dword(dev
, PCI_VENDOR_ID
, &header
);
1118 for (pos
= PCI_CFG_SPACE_SIZE
;
1119 pos
< PCI_CFG_SPACE_EXP_SIZE
; pos
+= PCI_CFG_SPACE_SIZE
) {
1120 if (pci_read_config_dword(dev
, pos
, &tmp
) != PCIBIOS_SUCCESSFUL
1132 * pci_cfg_space_size - get the configuration space size of the PCI device.
1135 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1136 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1137 * access it. Maybe we don't have a way to generate extended config space
1138 * accesses, or the device is behind a reverse Express bridge. So we try
1139 * reading the dword at 0x100 which must either be 0 or a valid extended
1140 * capability header.
1142 static int pci_cfg_space_size_ext(struct pci_dev
*dev
)
1145 int pos
= PCI_CFG_SPACE_SIZE
;
1147 if (pci_read_config_dword(dev
, pos
, &status
) != PCIBIOS_SUCCESSFUL
)
1148 return PCI_CFG_SPACE_SIZE
;
1149 if (status
== 0xffffffff || pci_ext_cfg_is_aliased(dev
))
1150 return PCI_CFG_SPACE_SIZE
;
1152 return PCI_CFG_SPACE_EXP_SIZE
;
1155 int pci_cfg_space_size(struct pci_dev
*dev
)
1161 class = dev
->class >> 8;
1162 if (class == PCI_CLASS_BRIDGE_HOST
)
1163 return pci_cfg_space_size_ext(dev
);
1165 if (pci_is_pcie(dev
))
1166 return pci_cfg_space_size_ext(dev
);
1168 pos
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
1170 return PCI_CFG_SPACE_SIZE
;
1172 pci_read_config_dword(dev
, pos
+ PCI_X_STATUS
, &status
);
1173 if (status
& (PCI_X_STATUS_266MHZ
| PCI_X_STATUS_533MHZ
))
1174 return pci_cfg_space_size_ext(dev
);
1176 return PCI_CFG_SPACE_SIZE
;
1179 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1181 static void pci_msi_setup_pci_dev(struct pci_dev
*dev
)
1184 * Disable the MSI hardware to avoid screaming interrupts
1185 * during boot. This is the power on reset default so
1186 * usually this should be a noop.
1188 dev
->msi_cap
= pci_find_capability(dev
, PCI_CAP_ID_MSI
);
1190 pci_msi_set_enable(dev
, 0);
1192 dev
->msix_cap
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
1194 pci_msix_clear_and_set_ctrl(dev
, PCI_MSIX_FLAGS_ENABLE
, 0);
1198 * pci_setup_device - fill in class and map information of a device
1199 * @dev: the device structure to fill
1201 * Initialize the device structure with information about the device's
1202 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1203 * Called at initialisation of the PCI subsystem and by CardBus services.
1204 * Returns 0 on success and negative if unknown type of device (not normal,
1205 * bridge or CardBus).
1207 int pci_setup_device(struct pci_dev
*dev
)
1213 struct pci_bus_region region
;
1214 struct resource
*res
;
1216 if (pci_read_config_byte(dev
, PCI_HEADER_TYPE
, &hdr_type
))
1219 dev
->sysdata
= dev
->bus
->sysdata
;
1220 dev
->dev
.parent
= dev
->bus
->bridge
;
1221 dev
->dev
.bus
= &pci_bus_type
;
1222 dev
->hdr_type
= hdr_type
& 0x7f;
1223 dev
->multifunction
= !!(hdr_type
& 0x80);
1224 dev
->error_state
= pci_channel_io_normal
;
1225 set_pcie_port_type(dev
);
1227 pci_dev_assign_slot(dev
);
1228 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1229 set this higher, assuming the system even supports it. */
1230 dev
->dma_mask
= 0xffffffff;
1232 dev_set_name(&dev
->dev
, "%04x:%02x:%02x.%d", pci_domain_nr(dev
->bus
),
1233 dev
->bus
->number
, PCI_SLOT(dev
->devfn
),
1234 PCI_FUNC(dev
->devfn
));
1236 pci_read_config_dword(dev
, PCI_CLASS_REVISION
, &class);
1237 dev
->revision
= class & 0xff;
1238 dev
->class = class >> 8; /* upper 3 bytes */
1240 dev_printk(KERN_DEBUG
, &dev
->dev
, "[%04x:%04x] type %02x class %#08x\n",
1241 dev
->vendor
, dev
->device
, dev
->hdr_type
, dev
->class);
1243 /* need to have dev->class ready */
1244 dev
->cfg_size
= pci_cfg_space_size(dev
);
1246 /* "Unknown power state" */
1247 dev
->current_state
= PCI_UNKNOWN
;
1249 /* Early fixups, before probing the BARs */
1250 pci_fixup_device(pci_fixup_early
, dev
);
1251 /* device class may be changed after fixup */
1252 class = dev
->class >> 8;
1254 if (dev
->non_compliant_bars
&& !dev
->mmio_always_on
) {
1255 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
1256 if (cmd
& (PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
)) {
1257 dev_info(&dev
->dev
, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1258 cmd
&= ~PCI_COMMAND_IO
;
1259 cmd
&= ~PCI_COMMAND_MEMORY
;
1260 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
1264 switch (dev
->hdr_type
) { /* header type */
1265 case PCI_HEADER_TYPE_NORMAL
: /* standard header */
1266 if (class == PCI_CLASS_BRIDGE_PCI
)
1269 pci_read_bases(dev
, 6, PCI_ROM_ADDRESS
);
1270 pci_read_config_word(dev
, PCI_SUBSYSTEM_VENDOR_ID
, &dev
->subsystem_vendor
);
1271 pci_read_config_word(dev
, PCI_SUBSYSTEM_ID
, &dev
->subsystem_device
);
1274 * Do the ugly legacy mode stuff here rather than broken chip
1275 * quirk code. Legacy mode ATA controllers have fixed
1276 * addresses. These are not always echoed in BAR0-3, and
1277 * BAR0-3 in a few cases contain junk!
1279 if (class == PCI_CLASS_STORAGE_IDE
) {
1281 pci_read_config_byte(dev
, PCI_CLASS_PROG
, &progif
);
1282 if ((progif
& 1) == 0) {
1283 region
.start
= 0x1F0;
1285 res
= &dev
->resource
[0];
1286 res
->flags
= LEGACY_IO_RESOURCE
;
1287 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
1288 dev_info(&dev
->dev
, "legacy IDE quirk: reg 0x10: %pR\n",
1290 region
.start
= 0x3F6;
1292 res
= &dev
->resource
[1];
1293 res
->flags
= LEGACY_IO_RESOURCE
;
1294 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
1295 dev_info(&dev
->dev
, "legacy IDE quirk: reg 0x14: %pR\n",
1298 if ((progif
& 4) == 0) {
1299 region
.start
= 0x170;
1301 res
= &dev
->resource
[2];
1302 res
->flags
= LEGACY_IO_RESOURCE
;
1303 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
1304 dev_info(&dev
->dev
, "legacy IDE quirk: reg 0x18: %pR\n",
1306 region
.start
= 0x376;
1308 res
= &dev
->resource
[3];
1309 res
->flags
= LEGACY_IO_RESOURCE
;
1310 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
1311 dev_info(&dev
->dev
, "legacy IDE quirk: reg 0x1c: %pR\n",
1317 case PCI_HEADER_TYPE_BRIDGE
: /* bridge header */
1318 if (class != PCI_CLASS_BRIDGE_PCI
)
1320 /* The PCI-to-PCI bridge spec requires that subtractive
1321 decoding (i.e. transparent) bridge must have programming
1322 interface code of 0x01. */
1324 dev
->transparent
= ((dev
->class & 0xff) == 1);
1325 pci_read_bases(dev
, 2, PCI_ROM_ADDRESS1
);
1326 set_pcie_hotplug_bridge(dev
);
1327 pos
= pci_find_capability(dev
, PCI_CAP_ID_SSVID
);
1329 pci_read_config_word(dev
, pos
+ PCI_SSVID_VENDOR_ID
, &dev
->subsystem_vendor
);
1330 pci_read_config_word(dev
, pos
+ PCI_SSVID_DEVICE_ID
, &dev
->subsystem_device
);
1334 case PCI_HEADER_TYPE_CARDBUS
: /* CardBus bridge header */
1335 if (class != PCI_CLASS_BRIDGE_CARDBUS
)
1338 pci_read_bases(dev
, 1, 0);
1339 pci_read_config_word(dev
, PCI_CB_SUBSYSTEM_VENDOR_ID
, &dev
->subsystem_vendor
);
1340 pci_read_config_word(dev
, PCI_CB_SUBSYSTEM_ID
, &dev
->subsystem_device
);
1343 default: /* unknown header */
1344 dev_err(&dev
->dev
, "unknown header type %02x, ignoring device\n",
1349 dev_err(&dev
->dev
, "ignoring class %#08x (doesn't match header type %02x)\n",
1350 dev
->class, dev
->hdr_type
);
1351 dev
->class = PCI_CLASS_NOT_DEFINED
<< 8;
1354 /* We found a fine healthy device, go go go... */
1358 static void pci_configure_mps(struct pci_dev
*dev
)
1360 struct pci_dev
*bridge
= pci_upstream_bridge(dev
);
1363 if (!pci_is_pcie(dev
))
1366 /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
1371 * For Root Complex Integrated Endpoints, program the maximum
1372 * supported value unless limited by the PCIE_BUS_PEER2PEER case.
1374 if (pci_pcie_type(dev
) == PCI_EXP_TYPE_RC_END
) {
1375 if (pcie_bus_config
== PCIE_BUS_PEER2PEER
)
1378 mps
= 128 << dev
->pcie_mpss
;
1379 rc
= pcie_set_mps(dev
, mps
);
1381 pci_warn(dev
, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1387 if (!bridge
|| !pci_is_pcie(bridge
))
1390 mps
= pcie_get_mps(dev
);
1391 p_mps
= pcie_get_mps(bridge
);
1396 if (pcie_bus_config
== PCIE_BUS_TUNE_OFF
) {
1397 dev_warn(&dev
->dev
, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1398 mps
, pci_name(bridge
), p_mps
);
1403 * Fancier MPS configuration is done later by
1404 * pcie_bus_configure_settings()
1406 if (pcie_bus_config
!= PCIE_BUS_DEFAULT
)
1409 rc
= pcie_set_mps(dev
, p_mps
);
1411 dev_warn(&dev
->dev
, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1416 dev_info(&dev
->dev
, "Max Payload Size set to %d (was %d, max %d)\n",
1417 p_mps
, mps
, 128 << dev
->pcie_mpss
);
1420 static struct hpp_type0 pci_default_type0
= {
1422 .cache_line_size
= 8,
1423 .latency_timer
= 0x40,
1428 static void program_hpp_type0(struct pci_dev
*dev
, struct hpp_type0
*hpp
)
1430 u16 pci_cmd
, pci_bctl
;
1433 hpp
= &pci_default_type0
;
1435 if (hpp
->revision
> 1) {
1437 "PCI settings rev %d not supported; using defaults\n",
1439 hpp
= &pci_default_type0
;
1442 pci_write_config_byte(dev
, PCI_CACHE_LINE_SIZE
, hpp
->cache_line_size
);
1443 pci_write_config_byte(dev
, PCI_LATENCY_TIMER
, hpp
->latency_timer
);
1444 pci_read_config_word(dev
, PCI_COMMAND
, &pci_cmd
);
1445 if (hpp
->enable_serr
)
1446 pci_cmd
|= PCI_COMMAND_SERR
;
1447 if (hpp
->enable_perr
)
1448 pci_cmd
|= PCI_COMMAND_PARITY
;
1449 pci_write_config_word(dev
, PCI_COMMAND
, pci_cmd
);
1451 /* Program bridge control value */
1452 if ((dev
->class >> 8) == PCI_CLASS_BRIDGE_PCI
) {
1453 pci_write_config_byte(dev
, PCI_SEC_LATENCY_TIMER
,
1454 hpp
->latency_timer
);
1455 pci_read_config_word(dev
, PCI_BRIDGE_CONTROL
, &pci_bctl
);
1456 if (hpp
->enable_serr
)
1457 pci_bctl
|= PCI_BRIDGE_CTL_SERR
;
1458 if (hpp
->enable_perr
)
1459 pci_bctl
|= PCI_BRIDGE_CTL_PARITY
;
1460 pci_write_config_word(dev
, PCI_BRIDGE_CONTROL
, pci_bctl
);
1464 static void program_hpp_type1(struct pci_dev
*dev
, struct hpp_type1
*hpp
)
1471 pos
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
1475 dev_warn(&dev
->dev
, "PCI-X settings not supported\n");
1478 static bool pcie_root_rcb_set(struct pci_dev
*dev
)
1480 struct pci_dev
*rp
= pcie_find_root_port(dev
);
1486 pcie_capability_read_word(rp
, PCI_EXP_LNKCTL
, &lnkctl
);
1487 if (lnkctl
& PCI_EXP_LNKCTL_RCB
)
1493 static void program_hpp_type2(struct pci_dev
*dev
, struct hpp_type2
*hpp
)
1501 if (!pci_is_pcie(dev
))
1504 if (hpp
->revision
> 1) {
1505 dev_warn(&dev
->dev
, "PCIe settings rev %d not supported\n",
1511 * Don't allow _HPX to change MPS or MRRS settings. We manage
1512 * those to make sure they're consistent with the rest of the
1515 hpp
->pci_exp_devctl_and
|= PCI_EXP_DEVCTL_PAYLOAD
|
1516 PCI_EXP_DEVCTL_READRQ
;
1517 hpp
->pci_exp_devctl_or
&= ~(PCI_EXP_DEVCTL_PAYLOAD
|
1518 PCI_EXP_DEVCTL_READRQ
);
1520 /* Initialize Device Control Register */
1521 pcie_capability_clear_and_set_word(dev
, PCI_EXP_DEVCTL
,
1522 ~hpp
->pci_exp_devctl_and
, hpp
->pci_exp_devctl_or
);
1524 /* Initialize Link Control Register */
1525 if (pcie_cap_has_lnkctl(dev
)) {
1528 * If the Root Port supports Read Completion Boundary of
1529 * 128, set RCB to 128. Otherwise, clear it.
1531 hpp
->pci_exp_lnkctl_and
|= PCI_EXP_LNKCTL_RCB
;
1532 hpp
->pci_exp_lnkctl_or
&= ~PCI_EXP_LNKCTL_RCB
;
1533 if (pcie_root_rcb_set(dev
))
1534 hpp
->pci_exp_lnkctl_or
|= PCI_EXP_LNKCTL_RCB
;
1536 pcie_capability_clear_and_set_word(dev
, PCI_EXP_LNKCTL
,
1537 ~hpp
->pci_exp_lnkctl_and
, hpp
->pci_exp_lnkctl_or
);
1540 /* Find Advanced Error Reporting Enhanced Capability */
1541 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_ERR
);
1545 /* Initialize Uncorrectable Error Mask Register */
1546 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_MASK
, ®32
);
1547 reg32
= (reg32
& hpp
->unc_err_mask_and
) | hpp
->unc_err_mask_or
;
1548 pci_write_config_dword(dev
, pos
+ PCI_ERR_UNCOR_MASK
, reg32
);
1550 /* Initialize Uncorrectable Error Severity Register */
1551 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_SEVER
, ®32
);
1552 reg32
= (reg32
& hpp
->unc_err_sever_and
) | hpp
->unc_err_sever_or
;
1553 pci_write_config_dword(dev
, pos
+ PCI_ERR_UNCOR_SEVER
, reg32
);
1555 /* Initialize Correctable Error Mask Register */
1556 pci_read_config_dword(dev
, pos
+ PCI_ERR_COR_MASK
, ®32
);
1557 reg32
= (reg32
& hpp
->cor_err_mask_and
) | hpp
->cor_err_mask_or
;
1558 pci_write_config_dword(dev
, pos
+ PCI_ERR_COR_MASK
, reg32
);
1560 /* Initialize Advanced Error Capabilities and Control Register */
1561 pci_read_config_dword(dev
, pos
+ PCI_ERR_CAP
, ®32
);
1562 reg32
= (reg32
& hpp
->adv_err_cap_and
) | hpp
->adv_err_cap_or
;
1563 pci_write_config_dword(dev
, pos
+ PCI_ERR_CAP
, reg32
);
1566 * FIXME: The following two registers are not supported yet.
1568 * o Secondary Uncorrectable Error Severity Register
1569 * o Secondary Uncorrectable Error Mask Register
1573 static void pci_configure_device(struct pci_dev
*dev
)
1575 struct hotplug_params hpp
;
1578 pci_configure_mps(dev
);
1580 memset(&hpp
, 0, sizeof(hpp
));
1581 ret
= pci_get_hp_params(dev
, &hpp
);
1585 program_hpp_type2(dev
, hpp
.t2
);
1586 program_hpp_type1(dev
, hpp
.t1
);
1587 program_hpp_type0(dev
, hpp
.t0
);
1590 static void pci_release_capabilities(struct pci_dev
*dev
)
1592 pci_vpd_release(dev
);
1593 pci_iov_release(dev
);
1594 pci_free_cap_save_buffers(dev
);
1598 * pci_release_dev - free a pci device structure when all users of it are finished.
1599 * @dev: device that's been disconnected
1601 * Will be called only by the device core when all users of this pci device are
1604 static void pci_release_dev(struct device
*dev
)
1606 struct pci_dev
*pci_dev
;
1608 pci_dev
= to_pci_dev(dev
);
1609 pci_release_capabilities(pci_dev
);
1610 pci_release_of_node(pci_dev
);
1611 pcibios_release_device(pci_dev
);
1612 pci_bus_put(pci_dev
->bus
);
1613 kfree(pci_dev
->driver_override
);
1614 kfree(pci_dev
->dma_alias_mask
);
1618 struct pci_dev
*pci_alloc_dev(struct pci_bus
*bus
)
1620 struct pci_dev
*dev
;
1622 dev
= kzalloc(sizeof(struct pci_dev
), GFP_KERNEL
);
1626 INIT_LIST_HEAD(&dev
->bus_list
);
1627 dev
->dev
.type
= &pci_dev_type
;
1628 dev
->bus
= pci_bus_get(bus
);
1632 EXPORT_SYMBOL(pci_alloc_dev
);
1634 bool pci_bus_read_dev_vendor_id(struct pci_bus
*bus
, int devfn
, u32
*l
,
1639 if (pci_bus_read_config_dword(bus
, devfn
, PCI_VENDOR_ID
, l
))
1642 /* some broken boards return 0 or ~0 if a slot is empty: */
1643 if (*l
== 0xffffffff || *l
== 0x00000000 ||
1644 *l
== 0x0000ffff || *l
== 0xffff0000)
1648 * Configuration Request Retry Status. Some root ports return the
1649 * actual device ID instead of the synthetic ID (0xFFFF) required
1650 * by the PCIe spec. Ignore the device ID and only check for
1653 while ((*l
& 0xffff) == 0x0001) {
1659 if (pci_bus_read_config_dword(bus
, devfn
, PCI_VENDOR_ID
, l
))
1661 /* Card hasn't responded in 60 seconds? Must be stuck. */
1662 if (delay
> crs_timeout
) {
1663 printk(KERN_WARNING
"pci %04x:%02x:%02x.%d: not responding\n",
1664 pci_domain_nr(bus
), bus
->number
, PCI_SLOT(devfn
),
1672 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id
);
1675 * Read the config data for a PCI device, sanity-check it
1676 * and fill in the dev structure...
1678 static struct pci_dev
*pci_scan_device(struct pci_bus
*bus
, int devfn
)
1680 struct pci_dev
*dev
;
1683 if (!pci_bus_read_dev_vendor_id(bus
, devfn
, &l
, 60*1000))
1686 dev
= pci_alloc_dev(bus
);
1691 dev
->vendor
= l
& 0xffff;
1692 dev
->device
= (l
>> 16) & 0xffff;
1694 pci_set_of_node(dev
);
1696 if (pci_setup_device(dev
)) {
1697 pci_bus_put(dev
->bus
);
1705 static void pci_init_capabilities(struct pci_dev
*dev
)
1707 /* Enhanced Allocation */
1710 /* Setup MSI caps & disable MSI/MSI-X interrupts */
1711 pci_msi_setup_pci_dev(dev
);
1713 /* Buffers for saving PCIe and PCI-X capabilities */
1714 pci_allocate_cap_save_buffers(dev
);
1716 /* Power Management */
1719 /* Vital Product Data */
1722 /* Alternative Routing-ID Forwarding */
1723 pci_configure_ari(dev
);
1725 /* Single Root I/O Virtualization */
1728 /* Address Translation Services */
1731 /* Enable ACS P2P upstream forwarding */
1732 pci_enable_acs(dev
);
1734 /* Precision Time Measurement */
1737 /* Advanced Error Reporting */
1742 * This is the equivalent of pci_host_bridge_msi_domain that acts on
1743 * devices. Firmware interfaces that can select the MSI domain on a
1744 * per-device basis should be called from here.
1746 static struct irq_domain
*pci_dev_msi_domain(struct pci_dev
*dev
)
1748 struct irq_domain
*d
;
1751 * If a domain has been set through the pcibios_add_device
1752 * callback, then this is the one (platform code knows best).
1754 d
= dev_get_msi_domain(&dev
->dev
);
1759 * Let's see if we have a firmware interface able to provide
1762 d
= pci_msi_get_device_domain(dev
);
1769 static void pci_set_msi_domain(struct pci_dev
*dev
)
1771 struct irq_domain
*d
;
1774 * If the platform or firmware interfaces cannot supply a
1775 * device-specific MSI domain, then inherit the default domain
1776 * from the host bridge itself.
1778 d
= pci_dev_msi_domain(dev
);
1780 d
= dev_get_msi_domain(&dev
->bus
->dev
);
1782 dev_set_msi_domain(&dev
->dev
, d
);
1786 * pci_dma_configure - Setup DMA configuration
1787 * @dev: ptr to pci_dev struct of the PCI device
1789 * Function to update PCI devices's DMA configuration using the same
1790 * info from the OF node or ACPI node of host bridge's parent (if any).
1792 static void pci_dma_configure(struct pci_dev
*dev
)
1794 struct device
*bridge
= pci_get_host_bridge_device(dev
);
1796 if (IS_ENABLED(CONFIG_OF
) &&
1797 bridge
->parent
&& bridge
->parent
->of_node
) {
1798 of_dma_configure(&dev
->dev
, bridge
->parent
->of_node
);
1799 } else if (has_acpi_companion(bridge
)) {
1800 struct acpi_device
*adev
= to_acpi_device_node(bridge
->fwnode
);
1801 enum dev_dma_attr attr
= acpi_get_dma_attr(adev
);
1803 if (attr
== DEV_DMA_NOT_SUPPORTED
)
1804 dev_warn(&dev
->dev
, "DMA not supported.\n");
1806 arch_setup_dma_ops(&dev
->dev
, 0, 0, NULL
,
1807 attr
== DEV_DMA_COHERENT
);
1810 pci_put_host_bridge_device(bridge
);
1813 void pci_device_add(struct pci_dev
*dev
, struct pci_bus
*bus
)
1817 pci_configure_device(dev
);
1819 device_initialize(&dev
->dev
);
1820 dev
->dev
.release
= pci_release_dev
;
1822 set_dev_node(&dev
->dev
, pcibus_to_node(bus
));
1823 dev
->dev
.dma_mask
= &dev
->dma_mask
;
1824 dev
->dev
.dma_parms
= &dev
->dma_parms
;
1825 dev
->dev
.coherent_dma_mask
= 0xffffffffull
;
1826 pci_dma_configure(dev
);
1828 pci_set_dma_max_seg_size(dev
, 65536);
1829 pci_set_dma_seg_boundary(dev
, 0xffffffff);
1831 /* Fix up broken headers */
1832 pci_fixup_device(pci_fixup_header
, dev
);
1834 /* moved out from quirk header fixup code */
1835 pci_reassigndev_resource_alignment(dev
);
1837 /* Clear the state_saved flag. */
1838 dev
->state_saved
= false;
1840 /* Initialize various capabilities */
1841 pci_init_capabilities(dev
);
1844 * Add the device to our list of discovered devices
1845 * and the bus list for fixup functions, etc.
1847 down_write(&pci_bus_sem
);
1848 list_add_tail(&dev
->bus_list
, &bus
->devices
);
1849 up_write(&pci_bus_sem
);
1851 ret
= pcibios_add_device(dev
);
1854 /* Setup MSI irq domain */
1855 pci_set_msi_domain(dev
);
1857 /* Notifier could use PCI capabilities */
1858 dev
->match_driver
= false;
1859 ret
= device_add(&dev
->dev
);
1863 struct pci_dev
*pci_scan_single_device(struct pci_bus
*bus
, int devfn
)
1865 struct pci_dev
*dev
;
1867 dev
= pci_get_slot(bus
, devfn
);
1873 dev
= pci_scan_device(bus
, devfn
);
1877 pci_device_add(dev
, bus
);
1881 EXPORT_SYMBOL(pci_scan_single_device
);
1883 static unsigned next_fn(struct pci_bus
*bus
, struct pci_dev
*dev
, unsigned fn
)
1889 if (pci_ari_enabled(bus
)) {
1892 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_ARI
);
1896 pci_read_config_word(dev
, pos
+ PCI_ARI_CAP
, &cap
);
1897 next_fn
= PCI_ARI_CAP_NFN(cap
);
1899 return 0; /* protect against malformed list */
1904 /* dev may be NULL for non-contiguous multifunction devices */
1905 if (!dev
|| dev
->multifunction
)
1906 return (fn
+ 1) % 8;
1911 static int only_one_child(struct pci_bus
*bus
)
1913 struct pci_dev
*parent
= bus
->self
;
1915 if (!parent
|| !pci_is_pcie(parent
))
1917 if (pci_pcie_type(parent
) == PCI_EXP_TYPE_ROOT_PORT
)
1921 * PCIe downstream ports are bridges that normally lead to only a
1922 * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
1923 * possible devices, not just device 0. See PCIe spec r3.0,
1926 if (parent
->has_secondary_link
&&
1927 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS
))
1933 * pci_scan_slot - scan a PCI slot on a bus for devices.
1934 * @bus: PCI bus to scan
1935 * @devfn: slot number to scan (must have zero function.)
1937 * Scan a PCI slot on the specified PCI bus for devices, adding
1938 * discovered devices to the @bus->devices list. New devices
1939 * will not have is_added set.
1941 * Returns the number of new devices found.
1943 int pci_scan_slot(struct pci_bus
*bus
, int devfn
)
1945 unsigned fn
, nr
= 0;
1946 struct pci_dev
*dev
;
1948 if (only_one_child(bus
) && (devfn
> 0))
1949 return 0; /* Already scanned the entire slot */
1951 dev
= pci_scan_single_device(bus
, devfn
);
1957 for (fn
= next_fn(bus
, dev
, 0); fn
> 0; fn
= next_fn(bus
, dev
, fn
)) {
1958 dev
= pci_scan_single_device(bus
, devfn
+ fn
);
1962 dev
->multifunction
= 1;
1966 /* only one slot has pcie device */
1967 if (bus
->self
&& nr
)
1968 pcie_aspm_init_link_state(bus
->self
);
1972 EXPORT_SYMBOL(pci_scan_slot
);
1974 static int pcie_find_smpss(struct pci_dev
*dev
, void *data
)
1978 if (!pci_is_pcie(dev
))
1982 * We don't have a way to change MPS settings on devices that have
1983 * drivers attached. A hot-added device might support only the minimum
1984 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
1985 * where devices may be hot-added, we limit the fabric MPS to 128 so
1986 * hot-added devices will work correctly.
1988 * However, if we hot-add a device to a slot directly below a Root
1989 * Port, it's impossible for there to be other existing devices below
1990 * the port. We don't limit the MPS in this case because we can
1991 * reconfigure MPS on both the Root Port and the hot-added device,
1992 * and there are no other devices involved.
1994 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
1996 if (dev
->is_hotplug_bridge
&&
1997 pci_pcie_type(dev
) != PCI_EXP_TYPE_ROOT_PORT
)
2000 if (*smpss
> dev
->pcie_mpss
)
2001 *smpss
= dev
->pcie_mpss
;
2006 static void pcie_write_mps(struct pci_dev
*dev
, int mps
)
2010 if (pcie_bus_config
== PCIE_BUS_PERFORMANCE
) {
2011 mps
= 128 << dev
->pcie_mpss
;
2013 if (pci_pcie_type(dev
) != PCI_EXP_TYPE_ROOT_PORT
&&
2015 /* For "Performance", the assumption is made that
2016 * downstream communication will never be larger than
2017 * the MRRS. So, the MPS only needs to be configured
2018 * for the upstream communication. This being the case,
2019 * walk from the top down and set the MPS of the child
2020 * to that of the parent bus.
2022 * Configure the device MPS with the smaller of the
2023 * device MPSS or the bridge MPS (which is assumed to be
2024 * properly configured at this point to the largest
2025 * allowable MPS based on its parent bus).
2027 mps
= min(mps
, pcie_get_mps(dev
->bus
->self
));
2030 rc
= pcie_set_mps(dev
, mps
);
2032 dev_err(&dev
->dev
, "Failed attempting to set the MPS\n");
2035 static void pcie_write_mrrs(struct pci_dev
*dev
)
2039 /* In the "safe" case, do not configure the MRRS. There appear to be
2040 * issues with setting MRRS to 0 on a number of devices.
2042 if (pcie_bus_config
!= PCIE_BUS_PERFORMANCE
)
2045 /* For Max performance, the MRRS must be set to the largest supported
2046 * value. However, it cannot be configured larger than the MPS the
2047 * device or the bus can support. This should already be properly
2048 * configured by a prior call to pcie_write_mps.
2050 mrrs
= pcie_get_mps(dev
);
2052 /* MRRS is a R/W register. Invalid values can be written, but a
2053 * subsequent read will verify if the value is acceptable or not.
2054 * If the MRRS value provided is not acceptable (e.g., too large),
2055 * shrink the value until it is acceptable to the HW.
2057 while (mrrs
!= pcie_get_readrq(dev
) && mrrs
>= 128) {
2058 rc
= pcie_set_readrq(dev
, mrrs
);
2062 dev_warn(&dev
->dev
, "Failed attempting to set the MRRS\n");
2067 dev_err(&dev
->dev
, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
2070 static int pcie_bus_configure_set(struct pci_dev
*dev
, void *data
)
2074 if (!pci_is_pcie(dev
))
2077 if (pcie_bus_config
== PCIE_BUS_TUNE_OFF
||
2078 pcie_bus_config
== PCIE_BUS_DEFAULT
)
2081 mps
= 128 << *(u8
*)data
;
2082 orig_mps
= pcie_get_mps(dev
);
2084 pcie_write_mps(dev
, mps
);
2085 pcie_write_mrrs(dev
);
2087 dev_info(&dev
->dev
, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2088 pcie_get_mps(dev
), 128 << dev
->pcie_mpss
,
2089 orig_mps
, pcie_get_readrq(dev
));
2094 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
2095 * parents then children fashion. If this changes, then this code will not
2098 void pcie_bus_configure_settings(struct pci_bus
*bus
)
2105 if (!pci_is_pcie(bus
->self
))
2108 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
2109 * to be aware of the MPS of the destination. To work around this,
2110 * simply force the MPS of the entire system to the smallest possible.
2112 if (pcie_bus_config
== PCIE_BUS_PEER2PEER
)
2115 if (pcie_bus_config
== PCIE_BUS_SAFE
) {
2116 smpss
= bus
->self
->pcie_mpss
;
2118 pcie_find_smpss(bus
->self
, &smpss
);
2119 pci_walk_bus(bus
, pcie_find_smpss
, &smpss
);
2122 pcie_bus_configure_set(bus
->self
, &smpss
);
2123 pci_walk_bus(bus
, pcie_bus_configure_set
, &smpss
);
2125 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings
);
2127 unsigned int pci_scan_child_bus(struct pci_bus
*bus
)
2129 unsigned int devfn
, pass
, max
= bus
->busn_res
.start
;
2130 struct pci_dev
*dev
;
2132 dev_dbg(&bus
->dev
, "scanning bus\n");
2134 /* Go find them, Rover! */
2135 for (devfn
= 0; devfn
< 0x100; devfn
+= 8)
2136 pci_scan_slot(bus
, devfn
);
2138 /* Reserve buses for SR-IOV capability. */
2139 max
+= pci_iov_bus_range(bus
);
2142 * After performing arch-dependent fixup of the bus, look behind
2143 * all PCI-to-PCI bridges on this bus.
2145 if (!bus
->is_added
) {
2146 dev_dbg(&bus
->dev
, "fixups for bus\n");
2147 pcibios_fixup_bus(bus
);
2151 for (pass
= 0; pass
< 2; pass
++)
2152 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
2153 if (pci_is_bridge(dev
))
2154 max
= pci_scan_bridge(bus
, dev
, max
, pass
);
2158 * Make sure a hotplug bridge has at least the minimum requested
2161 if (bus
->self
&& bus
->self
->is_hotplug_bridge
&& pci_hotplug_bus_size
) {
2162 if (max
- bus
->busn_res
.start
< pci_hotplug_bus_size
- 1)
2163 max
= bus
->busn_res
.start
+ pci_hotplug_bus_size
- 1;
2165 /* Do not allocate more buses than we have room left */
2166 if (max
> bus
->busn_res
.end
)
2167 max
= bus
->busn_res
.end
;
2171 * We've scanned the bus and so we know all about what's on
2172 * the other side of any bridges that may be on this bus plus
2175 * Return how far we've got finding sub-buses.
2177 dev_dbg(&bus
->dev
, "bus scan returning with max=%02x\n", max
);
2180 EXPORT_SYMBOL_GPL(pci_scan_child_bus
);
2183 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2184 * @bridge: Host bridge to set up.
2186 * Default empty implementation. Replace with an architecture-specific setup
2187 * routine, if necessary.
2189 int __weak
pcibios_root_bridge_prepare(struct pci_host_bridge
*bridge
)
2194 void __weak
pcibios_add_bus(struct pci_bus
*bus
)
2198 void __weak
pcibios_remove_bus(struct pci_bus
*bus
)
2202 struct pci_bus
*pci_create_root_bus(struct device
*parent
, int bus
,
2203 struct pci_ops
*ops
, void *sysdata
, struct list_head
*resources
)
2206 struct pci_host_bridge
*bridge
;
2207 struct pci_bus
*b
, *b2
;
2208 struct resource_entry
*window
, *n
;
2209 struct resource
*res
;
2210 resource_size_t offset
;
2214 b
= pci_alloc_bus(NULL
);
2218 b
->sysdata
= sysdata
;
2220 b
->number
= b
->busn_res
.start
= bus
;
2221 #ifdef CONFIG_PCI_DOMAINS_GENERIC
2222 b
->domain_nr
= pci_bus_find_domain_nr(b
, parent
);
2224 b2
= pci_find_bus(pci_domain_nr(b
), bus
);
2226 /* If we already got to this bus through a different bridge, ignore it */
2227 dev_dbg(&b2
->dev
, "bus already known\n");
2231 bridge
= pci_alloc_host_bridge(b
);
2235 bridge
->dev
.parent
= parent
;
2236 bridge
->dev
.release
= pci_release_host_bridge_dev
;
2237 dev_set_name(&bridge
->dev
, "pci%04x:%02x", pci_domain_nr(b
), bus
);
2238 error
= pcibios_root_bridge_prepare(bridge
);
2244 error
= device_register(&bridge
->dev
);
2246 put_device(&bridge
->dev
);
2249 b
->bridge
= get_device(&bridge
->dev
);
2250 device_enable_async_suspend(b
->bridge
);
2251 pci_set_bus_of_node(b
);
2252 pci_set_bus_msi_domain(b
);
2255 set_dev_node(b
->bridge
, pcibus_to_node(b
));
2257 b
->dev
.class = &pcibus_class
;
2258 b
->dev
.parent
= b
->bridge
;
2259 dev_set_name(&b
->dev
, "%04x:%02x", pci_domain_nr(b
), bus
);
2260 error
= device_register(&b
->dev
);
2262 goto class_dev_reg_err
;
2266 /* Create legacy_io and legacy_mem files for this bus */
2267 pci_create_legacy_files(b
);
2270 dev_info(parent
, "PCI host bridge to bus %s\n", dev_name(&b
->dev
));
2272 printk(KERN_INFO
"PCI host bridge to bus %s\n", dev_name(&b
->dev
));
2274 /* Add initial resources to the bus */
2275 resource_list_for_each_entry_safe(window
, n
, resources
) {
2276 list_move_tail(&window
->node
, &bridge
->windows
);
2278 offset
= window
->offset
;
2279 if (res
->flags
& IORESOURCE_BUS
)
2280 pci_bus_insert_busn_res(b
, bus
, res
->end
);
2282 pci_bus_add_resource(b
, res
, 0);
2284 if (resource_type(res
) == IORESOURCE_IO
)
2285 fmt
= " (bus address [%#06llx-%#06llx])";
2287 fmt
= " (bus address [%#010llx-%#010llx])";
2288 snprintf(bus_addr
, sizeof(bus_addr
), fmt
,
2289 (unsigned long long) (res
->start
- offset
),
2290 (unsigned long long) (res
->end
- offset
));
2293 dev_info(&b
->dev
, "root bus resource %pR%s\n", res
, bus_addr
);
2296 down_write(&pci_bus_sem
);
2297 list_add_tail(&b
->node
, &pci_root_buses
);
2298 up_write(&pci_bus_sem
);
2303 put_device(&bridge
->dev
);
2304 device_unregister(&bridge
->dev
);
2309 EXPORT_SYMBOL_GPL(pci_create_root_bus
);
2311 int pci_bus_insert_busn_res(struct pci_bus
*b
, int bus
, int bus_max
)
2313 struct resource
*res
= &b
->busn_res
;
2314 struct resource
*parent_res
, *conflict
;
2318 res
->flags
= IORESOURCE_BUS
;
2320 if (!pci_is_root_bus(b
))
2321 parent_res
= &b
->parent
->busn_res
;
2323 parent_res
= get_pci_domain_busn_res(pci_domain_nr(b
));
2324 res
->flags
|= IORESOURCE_PCI_FIXED
;
2327 conflict
= request_resource_conflict(parent_res
, res
);
2330 dev_printk(KERN_DEBUG
, &b
->dev
,
2331 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2332 res
, pci_is_root_bus(b
) ? "domain " : "",
2333 parent_res
, conflict
->name
, conflict
);
2335 return conflict
== NULL
;
2338 int pci_bus_update_busn_res_end(struct pci_bus
*b
, int bus_max
)
2340 struct resource
*res
= &b
->busn_res
;
2341 struct resource old_res
= *res
;
2342 resource_size_t size
;
2345 if (res
->start
> bus_max
)
2348 size
= bus_max
- res
->start
+ 1;
2349 ret
= adjust_resource(res
, res
->start
, size
);
2350 dev_printk(KERN_DEBUG
, &b
->dev
,
2351 "busn_res: %pR end %s updated to %02x\n",
2352 &old_res
, ret
? "can not be" : "is", bus_max
);
2354 if (!ret
&& !res
->parent
)
2355 pci_bus_insert_busn_res(b
, res
->start
, res
->end
);
2360 void pci_bus_release_busn_res(struct pci_bus
*b
)
2362 struct resource
*res
= &b
->busn_res
;
2365 if (!res
->flags
|| !res
->parent
)
2368 ret
= release_resource(res
);
2369 dev_printk(KERN_DEBUG
, &b
->dev
,
2370 "busn_res: %pR %s released\n",
2371 res
, ret
? "can not be" : "is");
2374 struct pci_bus
*pci_scan_root_bus_msi(struct device
*parent
, int bus
,
2375 struct pci_ops
*ops
, void *sysdata
,
2376 struct list_head
*resources
, struct msi_controller
*msi
)
2378 struct resource_entry
*window
;
2383 resource_list_for_each_entry(window
, resources
)
2384 if (window
->res
->flags
& IORESOURCE_BUS
) {
2389 b
= pci_create_root_bus(parent
, bus
, ops
, sysdata
, resources
);
2397 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2399 pci_bus_insert_busn_res(b
, bus
, 255);
2402 max
= pci_scan_child_bus(b
);
2405 pci_bus_update_busn_res_end(b
, max
);
2410 struct pci_bus
*pci_scan_root_bus(struct device
*parent
, int bus
,
2411 struct pci_ops
*ops
, void *sysdata
, struct list_head
*resources
)
2413 return pci_scan_root_bus_msi(parent
, bus
, ops
, sysdata
, resources
,
2416 EXPORT_SYMBOL(pci_scan_root_bus
);
2418 struct pci_bus
*pci_scan_bus(int bus
, struct pci_ops
*ops
,
2421 LIST_HEAD(resources
);
2424 pci_add_resource(&resources
, &ioport_resource
);
2425 pci_add_resource(&resources
, &iomem_resource
);
2426 pci_add_resource(&resources
, &busn_resource
);
2427 b
= pci_create_root_bus(NULL
, bus
, ops
, sysdata
, &resources
);
2429 pci_scan_child_bus(b
);
2431 pci_free_resource_list(&resources
);
2435 EXPORT_SYMBOL(pci_scan_bus
);
2438 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2439 * @bridge: PCI bridge for the bus to scan
2441 * Scan a PCI bus and child buses for new devices, add them,
2442 * and enable them, resizing bridge mmio/io resource if necessary
2443 * and possible. The caller must ensure the child devices are already
2444 * removed for resizing to occur.
2446 * Returns the max number of subordinate bus discovered.
2448 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev
*bridge
)
2451 struct pci_bus
*bus
= bridge
->subordinate
;
2453 max
= pci_scan_child_bus(bus
);
2455 pci_assign_unassigned_bridge_resources(bridge
);
2457 pci_bus_add_devices(bus
);
2463 * pci_rescan_bus - scan a PCI bus for devices.
2464 * @bus: PCI bus to scan
2466 * Scan a PCI bus and child buses for new devices, adds them,
2469 * Returns the max number of subordinate bus discovered.
2471 unsigned int pci_rescan_bus(struct pci_bus
*bus
)
2475 max
= pci_scan_child_bus(bus
);
2476 pci_assign_unassigned_bus_resources(bus
);
2477 pci_bus_add_devices(bus
);
2481 EXPORT_SYMBOL_GPL(pci_rescan_bus
);
2484 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2485 * routines should always be executed under this mutex.
2487 static DEFINE_MUTEX(pci_rescan_remove_lock
);
2489 void pci_lock_rescan_remove(void)
2491 mutex_lock(&pci_rescan_remove_lock
);
2493 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove
);
2495 void pci_unlock_rescan_remove(void)
2497 mutex_unlock(&pci_rescan_remove_lock
);
2499 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove
);
2501 static int __init
pci_sort_bf_cmp(const struct device
*d_a
,
2502 const struct device
*d_b
)
2504 const struct pci_dev
*a
= to_pci_dev(d_a
);
2505 const struct pci_dev
*b
= to_pci_dev(d_b
);
2507 if (pci_domain_nr(a
->bus
) < pci_domain_nr(b
->bus
)) return -1;
2508 else if (pci_domain_nr(a
->bus
) > pci_domain_nr(b
->bus
)) return 1;
2510 if (a
->bus
->number
< b
->bus
->number
) return -1;
2511 else if (a
->bus
->number
> b
->bus
->number
) return 1;
2513 if (a
->devfn
< b
->devfn
) return -1;
2514 else if (a
->devfn
> b
->devfn
) return 1;
2519 void __init
pci_sort_breadthfirst(void)
2521 bus_sort_breadthfirst(&pci_bus_type
, &pci_sort_bf_cmp
);