2 * drivers/pci/setup-bus.c
4 * Extruded from code written by
5 * Dave Rusling (david.rusling@reo.mts.dec.com)
6 * David Mosberger (davidm@cs.arizona.edu)
7 * David Miller (davem@redhat.com)
9 * Support routines for initializing a PCI subsystem.
13 * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
14 * PCI-PCI bridges cleanup, sorted resource allocation.
15 * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
16 * Converted to allocation in 3 passes, which gives
17 * tighter packing. Prefetchable range support.
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/pci.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/cache.h>
27 #include <linux/slab.h>
30 struct resource_list_x
{
31 struct resource_list_x
*next
;
34 resource_size_t start
;
36 resource_size_t add_size
;
40 #define free_list(type, head) do { \
41 struct type *list, *tmp; \
42 for (list = (head)->next; list;) { \
47 (head)->next = NULL; \
50 int pci_realloc_enable
= 0;
51 #define pci_realloc_enabled() pci_realloc_enable
52 void pci_realloc(void)
54 pci_realloc_enable
= 1;
58 * add_to_list() - add a new resource tracker to the list
59 * @head: Head of the list
60 * @dev: device corresponding to which the resource
62 * @res: The resource to be tracked
63 * @add_size: additional size to be optionally added
66 static void add_to_list(struct resource_list_x
*head
,
67 struct pci_dev
*dev
, struct resource
*res
,
68 resource_size_t add_size
)
70 struct resource_list_x
*list
= head
;
71 struct resource_list_x
*ln
= list
->next
;
72 struct resource_list_x
*tmp
;
74 tmp
= kmalloc(sizeof(*tmp
), GFP_KERNEL
);
76 pr_warning("add_to_list: kmalloc() failed!\n");
83 tmp
->start
= res
->start
;
85 tmp
->flags
= res
->flags
;
86 tmp
->add_size
= add_size
;
90 static void add_to_failed_list(struct resource_list_x
*head
,
91 struct pci_dev
*dev
, struct resource
*res
)
93 add_to_list(head
, dev
, res
, 0);
96 static void __dev_sort_resources(struct pci_dev
*dev
,
97 struct resource_list
*head
)
99 u16
class = dev
->class >> 8;
101 /* Don't touch classless devices or host bridges or ioapics. */
102 if (class == PCI_CLASS_NOT_DEFINED
|| class == PCI_CLASS_BRIDGE_HOST
)
105 /* Don't touch ioapic devices already enabled by firmware */
106 if (class == PCI_CLASS_SYSTEM_PIC
) {
108 pci_read_config_word(dev
, PCI_COMMAND
, &command
);
109 if (command
& (PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
))
113 pdev_sort_resources(dev
, head
);
116 static inline void reset_resource(struct resource
*res
)
124 * adjust_resources_sorted() - satisfy any additional resource requests
126 * @add_head : head of the list tracking requests requiring additional
128 * @head : head of the list tracking requests with allocated
131 * Walk through each element of the add_head and try to procure
132 * additional resources for the element, provided the element
133 * is in the head list.
135 static void adjust_resources_sorted(struct resource_list_x
*add_head
,
136 struct resource_list
*head
)
138 struct resource
*res
;
139 struct resource_list_x
*list
, *tmp
, *prev
;
140 struct resource_list
*hlist
;
141 resource_size_t add_size
;
145 for (list
= add_head
->next
; list
;) {
147 /* skip resource that has been reset */
151 /* skip this resource if not found in head list */
152 for (hlist
= head
->next
; hlist
&& hlist
->res
!= res
;
153 hlist
= hlist
->next
);
154 if (!hlist
) { /* just skip */
160 idx
= res
- &list
->dev
->resource
[0];
161 add_size
=list
->add_size
;
162 if (!resource_size(res
) && add_size
) {
163 res
->end
= res
->start
+ add_size
- 1;
164 if(pci_assign_resource(list
->dev
, idx
))
166 } else if (add_size
) {
167 adjust_resource(res
, res
->start
,
168 resource_size(res
) + add_size
);
172 prev
->next
= list
= list
->next
;
178 * assign_requested_resources_sorted() - satisfy resource requests
180 * @head : head of the list tracking requests for resources
181 * @failed_list : head of the list tracking requests that could
184 * Satisfy resource requests of each element in the list. Add
185 * requests that could not satisfied to the failed_list.
187 static void assign_requested_resources_sorted(struct resource_list
*head
,
188 struct resource_list_x
*fail_head
)
190 struct resource
*res
;
191 struct resource_list
*list
;
194 for (list
= head
->next
; list
; list
= list
->next
) {
196 idx
= res
- &list
->dev
->resource
[0];
197 if (resource_size(res
) && pci_assign_resource(list
->dev
, idx
)) {
198 if (fail_head
&& !pci_is_root_bus(list
->dev
->bus
)) {
200 * if the failed res is for ROM BAR, and it will
201 * be enabled later, don't add it to the list
203 if (!((idx
== PCI_ROM_RESOURCE
) &&
204 (!(res
->flags
& IORESOURCE_ROM_ENABLE
))))
205 add_to_failed_list(fail_head
, list
->dev
, res
);
212 static void __assign_resources_sorted(struct resource_list
*head
,
213 struct resource_list_x
*add_head
,
214 struct resource_list_x
*fail_head
)
216 /* Satisfy the must-have resource requests */
217 assign_requested_resources_sorted(head
, fail_head
);
219 /* Try to satisfy any additional nice-to-have resource
222 adjust_resources_sorted(add_head
, head
);
223 free_list(resource_list
, head
);
226 static void pdev_assign_resources_sorted(struct pci_dev
*dev
,
227 struct resource_list_x
*fail_head
)
229 struct resource_list head
;
232 __dev_sort_resources(dev
, &head
);
233 __assign_resources_sorted(&head
, NULL
, fail_head
);
237 static void pbus_assign_resources_sorted(const struct pci_bus
*bus
,
238 struct resource_list_x
*add_head
,
239 struct resource_list_x
*fail_head
)
242 struct resource_list head
;
245 list_for_each_entry(dev
, &bus
->devices
, bus_list
)
246 __dev_sort_resources(dev
, &head
);
248 __assign_resources_sorted(&head
, add_head
, fail_head
);
251 void pci_setup_cardbus(struct pci_bus
*bus
)
253 struct pci_dev
*bridge
= bus
->self
;
254 struct resource
*res
;
255 struct pci_bus_region region
;
257 dev_info(&bridge
->dev
, "CardBus bridge to [bus %02x-%02x]\n",
258 bus
->secondary
, bus
->subordinate
);
260 res
= bus
->resource
[0];
261 pcibios_resource_to_bus(bridge
, ®ion
, res
);
262 if (res
->flags
& IORESOURCE_IO
) {
264 * The IO resource is allocated a range twice as large as it
265 * would normally need. This allows us to set both IO regs.
267 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
268 pci_write_config_dword(bridge
, PCI_CB_IO_BASE_0
,
270 pci_write_config_dword(bridge
, PCI_CB_IO_LIMIT_0
,
274 res
= bus
->resource
[1];
275 pcibios_resource_to_bus(bridge
, ®ion
, res
);
276 if (res
->flags
& IORESOURCE_IO
) {
277 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
278 pci_write_config_dword(bridge
, PCI_CB_IO_BASE_1
,
280 pci_write_config_dword(bridge
, PCI_CB_IO_LIMIT_1
,
284 res
= bus
->resource
[2];
285 pcibios_resource_to_bus(bridge
, ®ion
, res
);
286 if (res
->flags
& IORESOURCE_MEM
) {
287 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
288 pci_write_config_dword(bridge
, PCI_CB_MEMORY_BASE_0
,
290 pci_write_config_dword(bridge
, PCI_CB_MEMORY_LIMIT_0
,
294 res
= bus
->resource
[3];
295 pcibios_resource_to_bus(bridge
, ®ion
, res
);
296 if (res
->flags
& IORESOURCE_MEM
) {
297 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
298 pci_write_config_dword(bridge
, PCI_CB_MEMORY_BASE_1
,
300 pci_write_config_dword(bridge
, PCI_CB_MEMORY_LIMIT_1
,
304 EXPORT_SYMBOL(pci_setup_cardbus
);
306 /* Initialize bridges with base/limit values we have collected.
307 PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998)
308 requires that if there is no I/O ports or memory behind the
309 bridge, corresponding range must be turned off by writing base
310 value greater than limit to the bridge's base/limit registers.
312 Note: care must be taken when updating I/O base/limit registers
313 of bridges which support 32-bit I/O. This update requires two
314 config space writes, so it's quite possible that an I/O window of
315 the bridge will have some undesirable address (e.g. 0) after the
316 first write. Ditto 64-bit prefetchable MMIO. */
317 static void pci_setup_bridge_io(struct pci_bus
*bus
)
319 struct pci_dev
*bridge
= bus
->self
;
320 struct resource
*res
;
321 struct pci_bus_region region
;
324 /* Set up the top and bottom of the PCI I/O segment for this bus. */
325 res
= bus
->resource
[0];
326 pcibios_resource_to_bus(bridge
, ®ion
, res
);
327 if (res
->flags
& IORESOURCE_IO
) {
328 pci_read_config_dword(bridge
, PCI_IO_BASE
, &l
);
330 l
|= (region
.start
>> 8) & 0x00f0;
331 l
|= region
.end
& 0xf000;
332 /* Set up upper 16 bits of I/O base/limit. */
333 io_upper16
= (region
.end
& 0xffff0000) | (region
.start
>> 16);
334 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
336 /* Clear upper 16 bits of I/O base/limit. */
339 dev_info(&bridge
->dev
, " bridge window [io disabled]\n");
341 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */
342 pci_write_config_dword(bridge
, PCI_IO_BASE_UPPER16
, 0x0000ffff);
343 /* Update lower 16 bits of I/O base/limit. */
344 pci_write_config_dword(bridge
, PCI_IO_BASE
, l
);
345 /* Update upper 16 bits of I/O base/limit. */
346 pci_write_config_dword(bridge
, PCI_IO_BASE_UPPER16
, io_upper16
);
349 static void pci_setup_bridge_mmio(struct pci_bus
*bus
)
351 struct pci_dev
*bridge
= bus
->self
;
352 struct resource
*res
;
353 struct pci_bus_region region
;
356 /* Set up the top and bottom of the PCI Memory segment for this bus. */
357 res
= bus
->resource
[1];
358 pcibios_resource_to_bus(bridge
, ®ion
, res
);
359 if (res
->flags
& IORESOURCE_MEM
) {
360 l
= (region
.start
>> 16) & 0xfff0;
361 l
|= region
.end
& 0xfff00000;
362 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
365 dev_info(&bridge
->dev
, " bridge window [mem disabled]\n");
367 pci_write_config_dword(bridge
, PCI_MEMORY_BASE
, l
);
370 static void pci_setup_bridge_mmio_pref(struct pci_bus
*bus
)
372 struct pci_dev
*bridge
= bus
->self
;
373 struct resource
*res
;
374 struct pci_bus_region region
;
377 /* Clear out the upper 32 bits of PREF limit.
378 If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily
379 disables PREF range, which is ok. */
380 pci_write_config_dword(bridge
, PCI_PREF_LIMIT_UPPER32
, 0);
382 /* Set up PREF base/limit. */
384 res
= bus
->resource
[2];
385 pcibios_resource_to_bus(bridge
, ®ion
, res
);
386 if (res
->flags
& IORESOURCE_PREFETCH
) {
387 l
= (region
.start
>> 16) & 0xfff0;
388 l
|= region
.end
& 0xfff00000;
389 if (res
->flags
& IORESOURCE_MEM_64
) {
390 bu
= upper_32_bits(region
.start
);
391 lu
= upper_32_bits(region
.end
);
393 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
396 dev_info(&bridge
->dev
, " bridge window [mem pref disabled]\n");
398 pci_write_config_dword(bridge
, PCI_PREF_MEMORY_BASE
, l
);
400 /* Set the upper 32 bits of PREF base & limit. */
401 pci_write_config_dword(bridge
, PCI_PREF_BASE_UPPER32
, bu
);
402 pci_write_config_dword(bridge
, PCI_PREF_LIMIT_UPPER32
, lu
);
405 static void __pci_setup_bridge(struct pci_bus
*bus
, unsigned long type
)
407 struct pci_dev
*bridge
= bus
->self
;
409 dev_info(&bridge
->dev
, "PCI bridge to [bus %02x-%02x]\n",
410 bus
->secondary
, bus
->subordinate
);
412 if (type
& IORESOURCE_IO
)
413 pci_setup_bridge_io(bus
);
415 if (type
& IORESOURCE_MEM
)
416 pci_setup_bridge_mmio(bus
);
418 if (type
& IORESOURCE_PREFETCH
)
419 pci_setup_bridge_mmio_pref(bus
);
421 pci_write_config_word(bridge
, PCI_BRIDGE_CONTROL
, bus
->bridge_ctl
);
424 static void pci_setup_bridge(struct pci_bus
*bus
)
426 unsigned long type
= IORESOURCE_IO
| IORESOURCE_MEM
|
429 __pci_setup_bridge(bus
, type
);
432 /* Check whether the bridge supports optional I/O and
433 prefetchable memory ranges. If not, the respective
434 base/limit registers must be read-only and read as 0. */
435 static void pci_bridge_check_ranges(struct pci_bus
*bus
)
439 struct pci_dev
*bridge
= bus
->self
;
440 struct resource
*b_res
;
442 b_res
= &bridge
->resource
[PCI_BRIDGE_RESOURCES
];
443 b_res
[1].flags
|= IORESOURCE_MEM
;
445 pci_read_config_word(bridge
, PCI_IO_BASE
, &io
);
447 pci_write_config_word(bridge
, PCI_IO_BASE
, 0xf0f0);
448 pci_read_config_word(bridge
, PCI_IO_BASE
, &io
);
449 pci_write_config_word(bridge
, PCI_IO_BASE
, 0x0);
452 b_res
[0].flags
|= IORESOURCE_IO
;
453 /* DECchip 21050 pass 2 errata: the bridge may miss an address
454 disconnect boundary by one PCI data phase.
455 Workaround: do not use prefetching on this device. */
456 if (bridge
->vendor
== PCI_VENDOR_ID_DEC
&& bridge
->device
== 0x0001)
458 pci_read_config_dword(bridge
, PCI_PREF_MEMORY_BASE
, &pmem
);
460 pci_write_config_dword(bridge
, PCI_PREF_MEMORY_BASE
,
462 pci_read_config_dword(bridge
, PCI_PREF_MEMORY_BASE
, &pmem
);
463 pci_write_config_dword(bridge
, PCI_PREF_MEMORY_BASE
, 0x0);
466 b_res
[2].flags
|= IORESOURCE_MEM
| IORESOURCE_PREFETCH
;
467 if ((pmem
& PCI_PREF_RANGE_TYPE_MASK
) ==
468 PCI_PREF_RANGE_TYPE_64
) {
469 b_res
[2].flags
|= IORESOURCE_MEM_64
;
470 b_res
[2].flags
|= PCI_PREF_RANGE_TYPE_64
;
474 /* double check if bridge does support 64 bit pref */
475 if (b_res
[2].flags
& IORESOURCE_MEM_64
) {
476 u32 mem_base_hi
, tmp
;
477 pci_read_config_dword(bridge
, PCI_PREF_BASE_UPPER32
,
479 pci_write_config_dword(bridge
, PCI_PREF_BASE_UPPER32
,
481 pci_read_config_dword(bridge
, PCI_PREF_BASE_UPPER32
, &tmp
);
483 b_res
[2].flags
&= ~IORESOURCE_MEM_64
;
484 pci_write_config_dword(bridge
, PCI_PREF_BASE_UPPER32
,
489 /* Helper function for sizing routines: find first available
490 bus resource of a given type. Note: we intentionally skip
491 the bus resources which have already been assigned (that is,
492 have non-NULL parent resource). */
493 static struct resource
*find_free_bus_resource(struct pci_bus
*bus
, unsigned long type
)
497 unsigned long type_mask
= IORESOURCE_IO
| IORESOURCE_MEM
|
500 pci_bus_for_each_resource(bus
, r
, i
) {
501 if (r
== &ioport_resource
|| r
== &iomem_resource
)
503 if (r
&& (r
->flags
& type_mask
) == type
&& !r
->parent
)
509 static resource_size_t
calculate_iosize(resource_size_t size
,
510 resource_size_t min_size
,
511 resource_size_t size1
,
512 resource_size_t old_size
,
513 resource_size_t align
)
519 /* To be fixed in 2.5: we should have sort of HAVE_ISA
520 flag in the struct pci_bus. */
521 #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
522 size
= (size
& 0xff) + ((size
& ~0xffUL
) << 2);
524 size
= ALIGN(size
+ size1
, align
);
530 static resource_size_t
calculate_memsize(resource_size_t size
,
531 resource_size_t min_size
,
532 resource_size_t size1
,
533 resource_size_t old_size
,
534 resource_size_t align
)
542 size
= ALIGN(size
+ size1
, align
);
547 * pbus_size_io() - size the io window of a given bus
550 * @min_size : the minimum io window that must to be allocated
551 * @add_size : additional optional io window
552 * @add_head : track the additional io window on this list
554 * Sizing the IO windows of the PCI-PCI bridge is trivial,
555 * since these windows have 4K granularity and the IO ranges
556 * of non-bridge PCI devices are limited to 256 bytes.
557 * We must be careful with the ISA aliasing though.
559 static void pbus_size_io(struct pci_bus
*bus
, resource_size_t min_size
,
560 resource_size_t add_size
, struct resource_list_x
*add_head
)
563 struct resource
*b_res
= find_free_bus_resource(bus
, IORESOURCE_IO
);
564 unsigned long size
= 0, size0
= 0, size1
= 0;
569 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
572 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
573 struct resource
*r
= &dev
->resource
[i
];
574 unsigned long r_size
;
576 if (r
->parent
|| !(r
->flags
& IORESOURCE_IO
))
578 r_size
= resource_size(r
);
581 /* Might be re-aligned for ISA */
587 size0
= calculate_iosize(size
, min_size
, size1
,
588 resource_size(b_res
), 4096);
589 size1
= (!add_head
|| (add_head
&& !add_size
)) ? size0
:
590 calculate_iosize(size
, min_size
+add_size
, size1
,
591 resource_size(b_res
), 4096);
592 if (!size0
&& !size1
) {
593 if (b_res
->start
|| b_res
->end
)
594 dev_info(&bus
->self
->dev
, "disabling bridge window "
595 "%pR to [bus %02x-%02x] (unused)\n", b_res
,
596 bus
->secondary
, bus
->subordinate
);
600 /* Alignment of the IO window is always 4K */
602 b_res
->end
= b_res
->start
+ size0
- 1;
603 b_res
->flags
|= IORESOURCE_STARTALIGN
;
604 if (size1
> size0
&& add_head
)
605 add_to_list(add_head
, bus
->self
, b_res
, size1
-size0
);
609 * pbus_size_mem() - size the memory window of a given bus
612 * @min_size : the minimum memory window that must to be allocated
613 * @add_size : additional optional memory window
614 * @add_head : track the additional memory window on this list
616 * Calculate the size of the bus and minimal alignment which
617 * guarantees that all child resources fit in this size.
619 static int pbus_size_mem(struct pci_bus
*bus
, unsigned long mask
,
620 unsigned long type
, resource_size_t min_size
,
621 resource_size_t add_size
,
622 struct resource_list_x
*add_head
)
625 resource_size_t min_align
, align
, size
, size0
, size1
;
626 resource_size_t aligns
[12]; /* Alignments from 1Mb to 2Gb */
627 int order
, max_order
;
628 struct resource
*b_res
= find_free_bus_resource(bus
, type
);
629 unsigned int mem64_mask
= 0;
634 memset(aligns
, 0, sizeof(aligns
));
638 mem64_mask
= b_res
->flags
& IORESOURCE_MEM_64
;
639 b_res
->flags
&= ~IORESOURCE_MEM_64
;
641 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
644 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
645 struct resource
*r
= &dev
->resource
[i
];
646 resource_size_t r_size
;
648 if (r
->parent
|| (r
->flags
& mask
) != type
)
650 r_size
= resource_size(r
);
651 /* For bridges size != alignment */
652 align
= pci_resource_alignment(dev
, r
);
653 order
= __ffs(align
) - 20;
655 dev_warn(&dev
->dev
, "disabling BAR %d: %pR "
656 "(bad alignment %#llx)\n", i
, r
,
657 (unsigned long long) align
);
664 /* Exclude ranges with size > align from
665 calculation of the alignment. */
667 aligns
[order
] += align
;
668 if (order
> max_order
)
670 mem64_mask
&= r
->flags
& IORESOURCE_MEM_64
;
675 for (order
= 0; order
<= max_order
; order
++) {
676 resource_size_t align1
= 1;
678 align1
<<= (order
+ 20);
682 else if (ALIGN(align
+ min_align
, min_align
) < align1
)
683 min_align
= align1
>> 1;
684 align
+= aligns
[order
];
686 size0
= calculate_memsize(size
, min_size
, 0, resource_size(b_res
), min_align
);
687 size1
= (!add_head
|| (add_head
&& !add_size
)) ? size0
:
688 calculate_memsize(size
, min_size
+add_size
, 0,
689 resource_size(b_res
), min_align
);
690 if (!size0
&& !size1
) {
691 if (b_res
->start
|| b_res
->end
)
692 dev_info(&bus
->self
->dev
, "disabling bridge window "
693 "%pR to [bus %02x-%02x] (unused)\n", b_res
,
694 bus
->secondary
, bus
->subordinate
);
698 b_res
->start
= min_align
;
699 b_res
->end
= size0
+ min_align
- 1;
700 b_res
->flags
|= IORESOURCE_STARTALIGN
| mem64_mask
;
701 if (size1
> size0
&& add_head
)
702 add_to_list(add_head
, bus
->self
, b_res
, size1
-size0
);
706 static void pci_bus_size_cardbus(struct pci_bus
*bus
)
708 struct pci_dev
*bridge
= bus
->self
;
709 struct resource
*b_res
= &bridge
->resource
[PCI_BRIDGE_RESOURCES
];
713 * Reserve some resources for CardBus. We reserve
714 * a fixed amount of bus space for CardBus bridges.
717 b_res
[0].end
= pci_cardbus_io_size
- 1;
718 b_res
[0].flags
|= IORESOURCE_IO
| IORESOURCE_SIZEALIGN
;
721 b_res
[1].end
= pci_cardbus_io_size
- 1;
722 b_res
[1].flags
|= IORESOURCE_IO
| IORESOURCE_SIZEALIGN
;
725 * Check whether prefetchable memory is supported
728 pci_read_config_word(bridge
, PCI_CB_BRIDGE_CONTROL
, &ctrl
);
729 if (!(ctrl
& PCI_CB_BRIDGE_CTL_PREFETCH_MEM0
)) {
730 ctrl
|= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0
;
731 pci_write_config_word(bridge
, PCI_CB_BRIDGE_CONTROL
, ctrl
);
732 pci_read_config_word(bridge
, PCI_CB_BRIDGE_CONTROL
, &ctrl
);
736 * If we have prefetchable memory support, allocate
737 * two regions. Otherwise, allocate one region of
740 if (ctrl
& PCI_CB_BRIDGE_CTL_PREFETCH_MEM0
) {
742 b_res
[2].end
= pci_cardbus_mem_size
- 1;
743 b_res
[2].flags
|= IORESOURCE_MEM
| IORESOURCE_PREFETCH
| IORESOURCE_SIZEALIGN
;
746 b_res
[3].end
= pci_cardbus_mem_size
- 1;
747 b_res
[3].flags
|= IORESOURCE_MEM
| IORESOURCE_SIZEALIGN
;
750 b_res
[3].end
= pci_cardbus_mem_size
* 2 - 1;
751 b_res
[3].flags
|= IORESOURCE_MEM
| IORESOURCE_SIZEALIGN
;
755 void __ref
__pci_bus_size_bridges(struct pci_bus
*bus
,
756 struct resource_list_x
*add_head
)
759 unsigned long mask
, prefmask
;
760 resource_size_t additional_mem_size
= 0, additional_io_size
= 0;
762 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
763 struct pci_bus
*b
= dev
->subordinate
;
767 switch (dev
->class >> 8) {
768 case PCI_CLASS_BRIDGE_CARDBUS
:
769 pci_bus_size_cardbus(b
);
772 case PCI_CLASS_BRIDGE_PCI
:
774 __pci_bus_size_bridges(b
, add_head
);
783 switch (bus
->self
->class >> 8) {
784 case PCI_CLASS_BRIDGE_CARDBUS
:
785 /* don't size cardbuses yet. */
788 case PCI_CLASS_BRIDGE_PCI
:
789 pci_bridge_check_ranges(bus
);
790 if (bus
->self
->is_hotplug_bridge
) {
791 additional_io_size
= pci_hotplug_io_size
;
792 additional_mem_size
= pci_hotplug_mem_size
;
798 pbus_size_io(bus
, 0, additional_io_size
, add_head
);
799 /* If the bridge supports prefetchable range, size it
800 separately. If it doesn't, or its prefetchable window
801 has already been allocated by arch code, try
802 non-prefetchable range for both types of PCI memory
804 mask
= IORESOURCE_MEM
;
805 prefmask
= IORESOURCE_MEM
| IORESOURCE_PREFETCH
;
806 if (pbus_size_mem(bus
, prefmask
, prefmask
, 0, additional_mem_size
, add_head
))
807 mask
= prefmask
; /* Success, size non-prefetch only. */
809 additional_mem_size
+= additional_mem_size
;
810 pbus_size_mem(bus
, mask
, IORESOURCE_MEM
, 0, additional_mem_size
, add_head
);
815 void __ref
pci_bus_size_bridges(struct pci_bus
*bus
)
817 __pci_bus_size_bridges(bus
, NULL
);
819 EXPORT_SYMBOL(pci_bus_size_bridges
);
821 static void __ref
__pci_bus_assign_resources(const struct pci_bus
*bus
,
822 struct resource_list_x
*add_head
,
823 struct resource_list_x
*fail_head
)
828 pbus_assign_resources_sorted(bus
, add_head
, fail_head
);
830 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
831 b
= dev
->subordinate
;
835 __pci_bus_assign_resources(b
, add_head
, fail_head
);
837 switch (dev
->class >> 8) {
838 case PCI_CLASS_BRIDGE_PCI
:
839 if (!pci_is_enabled(dev
))
843 case PCI_CLASS_BRIDGE_CARDBUS
:
844 pci_setup_cardbus(b
);
848 dev_info(&dev
->dev
, "not setting up bridge for bus "
849 "%04x:%02x\n", pci_domain_nr(b
), b
->number
);
855 void __ref
pci_bus_assign_resources(const struct pci_bus
*bus
)
857 __pci_bus_assign_resources(bus
, NULL
, NULL
);
859 EXPORT_SYMBOL(pci_bus_assign_resources
);
861 static void __ref
__pci_bridge_assign_resources(const struct pci_dev
*bridge
,
862 struct resource_list_x
*fail_head
)
866 pdev_assign_resources_sorted((struct pci_dev
*)bridge
, fail_head
);
868 b
= bridge
->subordinate
;
872 __pci_bus_assign_resources(b
, NULL
, fail_head
);
874 switch (bridge
->class >> 8) {
875 case PCI_CLASS_BRIDGE_PCI
:
879 case PCI_CLASS_BRIDGE_CARDBUS
:
880 pci_setup_cardbus(b
);
884 dev_info(&bridge
->dev
, "not setting up bridge for bus "
885 "%04x:%02x\n", pci_domain_nr(b
), b
->number
);
889 static void pci_bridge_release_resources(struct pci_bus
*bus
,
893 bool changed
= false;
896 unsigned long type_mask
= IORESOURCE_IO
| IORESOURCE_MEM
|
900 for (idx
= PCI_BRIDGE_RESOURCES
; idx
<= PCI_BRIDGE_RESOURCE_END
;
902 r
= &dev
->resource
[idx
];
903 if ((r
->flags
& type_mask
) != type
)
908 * if there are children under that, we should release them
911 release_child_resources(r
);
912 if (!release_resource(r
)) {
913 dev_printk(KERN_DEBUG
, &dev
->dev
,
914 "resource %d %pR released\n", idx
, r
);
915 /* keep the old size */
916 r
->end
= resource_size(r
) - 1;
924 /* avoiding touch the one without PREF */
925 if (type
& IORESOURCE_PREFETCH
)
926 type
= IORESOURCE_PREFETCH
;
927 __pci_setup_bridge(bus
, type
);
936 * try to release pci bridge resources that is from leaf bridge,
937 * so we can allocate big new one later
939 static void __ref
pci_bus_release_bridge_resources(struct pci_bus
*bus
,
941 enum release_type rel_type
)
944 bool is_leaf_bridge
= true;
946 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
947 struct pci_bus
*b
= dev
->subordinate
;
951 is_leaf_bridge
= false;
953 if ((dev
->class >> 8) != PCI_CLASS_BRIDGE_PCI
)
956 if (rel_type
== whole_subtree
)
957 pci_bus_release_bridge_resources(b
, type
,
961 if (pci_is_root_bus(bus
))
964 if ((bus
->self
->class >> 8) != PCI_CLASS_BRIDGE_PCI
)
967 if ((rel_type
== whole_subtree
) || is_leaf_bridge
)
968 pci_bridge_release_resources(bus
, type
);
971 static void pci_bus_dump_res(struct pci_bus
*bus
)
973 struct resource
*res
;
976 pci_bus_for_each_resource(bus
, res
, i
) {
977 if (!res
|| !res
->end
|| !res
->flags
)
980 dev_printk(KERN_DEBUG
, &bus
->dev
, "resource %d %pR\n", i
, res
);
984 static void pci_bus_dump_resources(struct pci_bus
*bus
)
990 pci_bus_dump_res(bus
);
992 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
993 b
= dev
->subordinate
;
997 pci_bus_dump_resources(b
);
1001 static int __init
pci_bus_get_depth(struct pci_bus
*bus
)
1004 struct pci_dev
*dev
;
1006 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
1008 struct pci_bus
*b
= dev
->subordinate
;
1012 ret
= pci_bus_get_depth(b
);
1013 if (ret
+ 1 > depth
)
1019 static int __init
pci_get_max_depth(void)
1022 struct pci_bus
*bus
;
1024 list_for_each_entry(bus
, &pci_root_buses
, node
) {
1027 ret
= pci_bus_get_depth(bus
);
1037 * first try will not touch pci bridge res
1038 * second and later try will clear small leaf bridge res
1039 * will stop till to the max deepth if can not find good one
1042 pci_assign_unassigned_resources(void)
1044 struct pci_bus
*bus
;
1045 struct resource_list_x add_list
; /* list of resources that
1046 want additional resources */
1047 int tried_times
= 0;
1048 enum release_type rel_type
= leaf_only
;
1049 struct resource_list_x head
, *list
;
1050 unsigned long type_mask
= IORESOURCE_IO
| IORESOURCE_MEM
|
1051 IORESOURCE_PREFETCH
;
1052 unsigned long failed_type
;
1053 int max_depth
= pci_get_max_depth();
1058 add_list
.next
= NULL
;
1060 pci_try_num
= max_depth
+ 1;
1061 printk(KERN_DEBUG
"PCI: max bus depth: %d pci_try_num: %d\n",
1062 max_depth
, pci_try_num
);
1065 /* Depth first, calculate sizes and alignments of all
1066 subordinate buses. */
1067 list_for_each_entry(bus
, &pci_root_buses
, node
)
1068 __pci_bus_size_bridges(bus
, &add_list
);
1070 /* Depth last, allocate resources and update the hardware. */
1071 list_for_each_entry(bus
, &pci_root_buses
, node
)
1072 __pci_bus_assign_resources(bus
, &add_list
, &head
);
1073 BUG_ON(add_list
.next
);
1076 /* any device complain? */
1078 goto enable_and_dump
;
1080 /* don't realloc if asked to do so */
1081 if (!pci_realloc_enabled()) {
1082 free_list(resource_list_x
, &head
);
1083 goto enable_and_dump
;
1087 for (list
= head
.next
; list
;) {
1088 failed_type
|= list
->flags
;
1092 * io port are tight, don't try extra
1093 * or if reach the limit, don't want to try more
1095 failed_type
&= type_mask
;
1096 if ((failed_type
== IORESOURCE_IO
) || (tried_times
>= pci_try_num
)) {
1097 free_list(resource_list_x
, &head
);
1098 goto enable_and_dump
;
1101 printk(KERN_DEBUG
"PCI: No. %d try to assign unassigned res\n",
1104 /* third times and later will not check if it is leaf */
1105 if ((tried_times
+ 1) > 2)
1106 rel_type
= whole_subtree
;
1109 * Try to release leaf bridge's resources that doesn't fit resource of
1110 * child device under that bridge
1112 for (list
= head
.next
; list
;) {
1113 bus
= list
->dev
->bus
;
1114 pci_bus_release_bridge_resources(bus
, list
->flags
& type_mask
,
1118 /* restore size and flags */
1119 for (list
= head
.next
; list
;) {
1120 struct resource
*res
= list
->res
;
1122 res
->start
= list
->start
;
1123 res
->end
= list
->end
;
1124 res
->flags
= list
->flags
;
1125 if (list
->dev
->subordinate
)
1130 free_list(resource_list_x
, &head
);
1135 /* Depth last, update the hardware. */
1136 list_for_each_entry(bus
, &pci_root_buses
, node
)
1137 pci_enable_bridges(bus
);
1139 /* dump the resource on buses */
1140 list_for_each_entry(bus
, &pci_root_buses
, node
)
1141 pci_bus_dump_resources(bus
);
1144 void pci_assign_unassigned_bridge_resources(struct pci_dev
*bridge
)
1146 struct pci_bus
*parent
= bridge
->subordinate
;
1147 int tried_times
= 0;
1148 struct resource_list_x head
, *list
;
1150 unsigned long type_mask
= IORESOURCE_IO
| IORESOURCE_MEM
|
1151 IORESOURCE_PREFETCH
;
1156 pci_bus_size_bridges(parent
);
1157 __pci_bridge_assign_resources(bridge
, &head
);
1164 if (tried_times
>= 2) {
1165 /* still fail, don't need to try more */
1166 free_list(resource_list_x
, &head
);
1170 printk(KERN_DEBUG
"PCI: No. %d try to assign unassigned res\n",
1174 * Try to release leaf bridge's resources that doesn't fit resource of
1175 * child device under that bridge
1177 for (list
= head
.next
; list
;) {
1178 struct pci_bus
*bus
= list
->dev
->bus
;
1179 unsigned long flags
= list
->flags
;
1181 pci_bus_release_bridge_resources(bus
, flags
& type_mask
,
1185 /* restore size and flags */
1186 for (list
= head
.next
; list
;) {
1187 struct resource
*res
= list
->res
;
1189 res
->start
= list
->start
;
1190 res
->end
= list
->end
;
1191 res
->flags
= list
->flags
;
1192 if (list
->dev
->subordinate
)
1197 free_list(resource_list_x
, &head
);
1202 retval
= pci_reenable_device(bridge
);
1203 pci_set_master(bridge
);
1204 pci_enable_bridges(parent
);
1206 EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources
);