[SCSI] scsi_dh_rdac: Add two new SUN devices to rdac_dev_list
[linux-2.6/linux-mips.git] / drivers / pci / setup-bus.c
blob66cb8f4cc5f4b37fbfc965fda9c63fc1fb1da913
1 /*
2 * drivers/pci/setup-bus.c
4 * Extruded from code written by
5 * Dave Rusling (david.rusling@reo.mts.dec.com)
6 * David Mosberger (davidm@cs.arizona.edu)
7 * David Miller (davem@redhat.com)
9 * Support routines for initializing a PCI subsystem.
13 * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
14 * PCI-PCI bridges cleanup, sorted resource allocation.
15 * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
16 * Converted to allocation in 3 passes, which gives
17 * tighter packing. Prefetchable range support.
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/pci.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/cache.h>
27 #include <linux/slab.h>
28 #include "pci.h"
30 struct resource_list_x {
31 struct resource_list_x *next;
32 struct resource *res;
33 struct pci_dev *dev;
34 resource_size_t start;
35 resource_size_t end;
36 unsigned long flags;
39 static void add_to_failed_list(struct resource_list_x *head,
40 struct pci_dev *dev, struct resource *res)
42 struct resource_list_x *list = head;
43 struct resource_list_x *ln = list->next;
44 struct resource_list_x *tmp;
46 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
47 if (!tmp) {
48 pr_warning("add_to_failed_list: kmalloc() failed!\n");
49 return;
52 tmp->next = ln;
53 tmp->res = res;
54 tmp->dev = dev;
55 tmp->start = res->start;
56 tmp->end = res->end;
57 tmp->flags = res->flags;
58 list->next = tmp;
61 static void free_failed_list(struct resource_list_x *head)
63 struct resource_list_x *list, *tmp;
65 for (list = head->next; list;) {
66 tmp = list;
67 list = list->next;
68 kfree(tmp);
71 head->next = NULL;
74 static void __dev_sort_resources(struct pci_dev *dev,
75 struct resource_list *head)
77 u16 class = dev->class >> 8;
79 /* Don't touch classless devices or host bridges or ioapics. */
80 if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
81 return;
83 /* Don't touch ioapic devices already enabled by firmware */
84 if (class == PCI_CLASS_SYSTEM_PIC) {
85 u16 command;
86 pci_read_config_word(dev, PCI_COMMAND, &command);
87 if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
88 return;
91 pdev_sort_resources(dev, head);
94 static void __assign_resources_sorted(struct resource_list *head,
95 struct resource_list_x *fail_head)
97 struct resource *res;
98 struct resource_list *list, *tmp;
99 int idx;
101 for (list = head->next; list;) {
102 res = list->res;
103 idx = res - &list->dev->resource[0];
105 if (pci_assign_resource(list->dev, idx)) {
106 if (fail_head && !pci_is_root_bus(list->dev->bus)) {
108 * if the failed res is for ROM BAR, and it will
109 * be enabled later, don't add it to the list
111 if (!((idx == PCI_ROM_RESOURCE) &&
112 (!(res->flags & IORESOURCE_ROM_ENABLE))))
113 add_to_failed_list(fail_head, list->dev, res);
115 res->start = 0;
116 res->end = 0;
117 res->flags = 0;
119 tmp = list;
120 list = list->next;
121 kfree(tmp);
125 static void pdev_assign_resources_sorted(struct pci_dev *dev,
126 struct resource_list_x *fail_head)
128 struct resource_list head;
130 head.next = NULL;
131 __dev_sort_resources(dev, &head);
132 __assign_resources_sorted(&head, fail_head);
136 static void pbus_assign_resources_sorted(const struct pci_bus *bus,
137 struct resource_list_x *fail_head)
139 struct pci_dev *dev;
140 struct resource_list head;
142 head.next = NULL;
143 list_for_each_entry(dev, &bus->devices, bus_list)
144 __dev_sort_resources(dev, &head);
146 __assign_resources_sorted(&head, fail_head);
149 void pci_setup_cardbus(struct pci_bus *bus)
151 struct pci_dev *bridge = bus->self;
152 struct resource *res;
153 struct pci_bus_region region;
155 dev_info(&bridge->dev, "CardBus bridge to [bus %02x-%02x]\n",
156 bus->secondary, bus->subordinate);
158 res = bus->resource[0];
159 pcibios_resource_to_bus(bridge, &region, res);
160 if (res->flags & IORESOURCE_IO) {
162 * The IO resource is allocated a range twice as large as it
163 * would normally need. This allows us to set both IO regs.
165 dev_info(&bridge->dev, " bridge window %pR\n", res);
166 pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
167 region.start);
168 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
169 region.end);
172 res = bus->resource[1];
173 pcibios_resource_to_bus(bridge, &region, res);
174 if (res->flags & IORESOURCE_IO) {
175 dev_info(&bridge->dev, " bridge window %pR\n", res);
176 pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
177 region.start);
178 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
179 region.end);
182 res = bus->resource[2];
183 pcibios_resource_to_bus(bridge, &region, res);
184 if (res->flags & IORESOURCE_MEM) {
185 dev_info(&bridge->dev, " bridge window %pR\n", res);
186 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
187 region.start);
188 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
189 region.end);
192 res = bus->resource[3];
193 pcibios_resource_to_bus(bridge, &region, res);
194 if (res->flags & IORESOURCE_MEM) {
195 dev_info(&bridge->dev, " bridge window %pR\n", res);
196 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
197 region.start);
198 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
199 region.end);
202 EXPORT_SYMBOL(pci_setup_cardbus);
204 /* Initialize bridges with base/limit values we have collected.
205 PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998)
206 requires that if there is no I/O ports or memory behind the
207 bridge, corresponding range must be turned off by writing base
208 value greater than limit to the bridge's base/limit registers.
210 Note: care must be taken when updating I/O base/limit registers
211 of bridges which support 32-bit I/O. This update requires two
212 config space writes, so it's quite possible that an I/O window of
213 the bridge will have some undesirable address (e.g. 0) after the
214 first write. Ditto 64-bit prefetchable MMIO. */
215 static void pci_setup_bridge_io(struct pci_bus *bus)
217 struct pci_dev *bridge = bus->self;
218 struct resource *res;
219 struct pci_bus_region region;
220 u32 l, io_upper16;
222 /* Set up the top and bottom of the PCI I/O segment for this bus. */
223 res = bus->resource[0];
224 pcibios_resource_to_bus(bridge, &region, res);
225 if (res->flags & IORESOURCE_IO) {
226 pci_read_config_dword(bridge, PCI_IO_BASE, &l);
227 l &= 0xffff0000;
228 l |= (region.start >> 8) & 0x00f0;
229 l |= region.end & 0xf000;
230 /* Set up upper 16 bits of I/O base/limit. */
231 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
232 dev_info(&bridge->dev, " bridge window %pR\n", res);
233 } else {
234 /* Clear upper 16 bits of I/O base/limit. */
235 io_upper16 = 0;
236 l = 0x00f0;
237 dev_info(&bridge->dev, " bridge window [io disabled]\n");
239 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */
240 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
241 /* Update lower 16 bits of I/O base/limit. */
242 pci_write_config_dword(bridge, PCI_IO_BASE, l);
243 /* Update upper 16 bits of I/O base/limit. */
244 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
247 static void pci_setup_bridge_mmio(struct pci_bus *bus)
249 struct pci_dev *bridge = bus->self;
250 struct resource *res;
251 struct pci_bus_region region;
252 u32 l;
254 /* Set up the top and bottom of the PCI Memory segment for this bus. */
255 res = bus->resource[1];
256 pcibios_resource_to_bus(bridge, &region, res);
257 if (res->flags & IORESOURCE_MEM) {
258 l = (region.start >> 16) & 0xfff0;
259 l |= region.end & 0xfff00000;
260 dev_info(&bridge->dev, " bridge window %pR\n", res);
261 } else {
262 l = 0x0000fff0;
263 dev_info(&bridge->dev, " bridge window [mem disabled]\n");
265 pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
268 static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
270 struct pci_dev *bridge = bus->self;
271 struct resource *res;
272 struct pci_bus_region region;
273 u32 l, bu, lu;
275 /* Clear out the upper 32 bits of PREF limit.
276 If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily
277 disables PREF range, which is ok. */
278 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
280 /* Set up PREF base/limit. */
281 bu = lu = 0;
282 res = bus->resource[2];
283 pcibios_resource_to_bus(bridge, &region, res);
284 if (res->flags & IORESOURCE_PREFETCH) {
285 l = (region.start >> 16) & 0xfff0;
286 l |= region.end & 0xfff00000;
287 if (res->flags & IORESOURCE_MEM_64) {
288 bu = upper_32_bits(region.start);
289 lu = upper_32_bits(region.end);
291 dev_info(&bridge->dev, " bridge window %pR\n", res);
292 } else {
293 l = 0x0000fff0;
294 dev_info(&bridge->dev, " bridge window [mem pref disabled]\n");
296 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
298 /* Set the upper 32 bits of PREF base & limit. */
299 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
300 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
303 static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
305 struct pci_dev *bridge = bus->self;
307 dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
308 bus->secondary, bus->subordinate);
310 if (type & IORESOURCE_IO)
311 pci_setup_bridge_io(bus);
313 if (type & IORESOURCE_MEM)
314 pci_setup_bridge_mmio(bus);
316 if (type & IORESOURCE_PREFETCH)
317 pci_setup_bridge_mmio_pref(bus);
319 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
322 static void pci_setup_bridge(struct pci_bus *bus)
324 unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
325 IORESOURCE_PREFETCH;
327 __pci_setup_bridge(bus, type);
330 /* Check whether the bridge supports optional I/O and
331 prefetchable memory ranges. If not, the respective
332 base/limit registers must be read-only and read as 0. */
333 static void pci_bridge_check_ranges(struct pci_bus *bus)
335 u16 io;
336 u32 pmem;
337 struct pci_dev *bridge = bus->self;
338 struct resource *b_res;
340 b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
341 b_res[1].flags |= IORESOURCE_MEM;
343 pci_read_config_word(bridge, PCI_IO_BASE, &io);
344 if (!io) {
345 pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0);
346 pci_read_config_word(bridge, PCI_IO_BASE, &io);
347 pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
349 if (io)
350 b_res[0].flags |= IORESOURCE_IO;
351 /* DECchip 21050 pass 2 errata: the bridge may miss an address
352 disconnect boundary by one PCI data phase.
353 Workaround: do not use prefetching on this device. */
354 if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
355 return;
356 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
357 if (!pmem) {
358 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
359 0xfff0fff0);
360 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
361 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
363 if (pmem) {
364 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
365 if ((pmem & PCI_PREF_RANGE_TYPE_MASK) ==
366 PCI_PREF_RANGE_TYPE_64) {
367 b_res[2].flags |= IORESOURCE_MEM_64;
368 b_res[2].flags |= PCI_PREF_RANGE_TYPE_64;
372 /* double check if bridge does support 64 bit pref */
373 if (b_res[2].flags & IORESOURCE_MEM_64) {
374 u32 mem_base_hi, tmp;
375 pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
376 &mem_base_hi);
377 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
378 0xffffffff);
379 pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
380 if (!tmp)
381 b_res[2].flags &= ~IORESOURCE_MEM_64;
382 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
383 mem_base_hi);
387 /* Helper function for sizing routines: find first available
388 bus resource of a given type. Note: we intentionally skip
389 the bus resources which have already been assigned (that is,
390 have non-NULL parent resource). */
391 static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned long type)
393 int i;
394 struct resource *r;
395 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
396 IORESOURCE_PREFETCH;
398 pci_bus_for_each_resource(bus, r, i) {
399 if (r == &ioport_resource || r == &iomem_resource)
400 continue;
401 if (r && (r->flags & type_mask) == type && !r->parent)
402 return r;
404 return NULL;
407 /* Sizing the IO windows of the PCI-PCI bridge is trivial,
408 since these windows have 4K granularity and the IO ranges
409 of non-bridge PCI devices are limited to 256 bytes.
410 We must be careful with the ISA aliasing though. */
411 static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size)
413 struct pci_dev *dev;
414 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
415 unsigned long size = 0, size1 = 0, old_size;
417 if (!b_res)
418 return;
420 list_for_each_entry(dev, &bus->devices, bus_list) {
421 int i;
423 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
424 struct resource *r = &dev->resource[i];
425 unsigned long r_size;
427 if (r->parent || !(r->flags & IORESOURCE_IO))
428 continue;
429 r_size = resource_size(r);
431 if (r_size < 0x400)
432 /* Might be re-aligned for ISA */
433 size += r_size;
434 else
435 size1 += r_size;
438 if (size < min_size)
439 size = min_size;
440 old_size = resource_size(b_res);
441 if (old_size == 1)
442 old_size = 0;
443 /* To be fixed in 2.5: we should have sort of HAVE_ISA
444 flag in the struct pci_bus. */
445 #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
446 size = (size & 0xff) + ((size & ~0xffUL) << 2);
447 #endif
448 size = ALIGN(size + size1, 4096);
449 if (size < old_size)
450 size = old_size;
451 if (!size) {
452 if (b_res->start || b_res->end)
453 dev_info(&bus->self->dev, "disabling bridge window "
454 "%pR to [bus %02x-%02x] (unused)\n", b_res,
455 bus->secondary, bus->subordinate);
456 b_res->flags = 0;
457 return;
459 /* Alignment of the IO window is always 4K */
460 b_res->start = 4096;
461 b_res->end = b_res->start + size - 1;
462 b_res->flags |= IORESOURCE_STARTALIGN;
465 /* Calculate the size of the bus and minimal alignment which
466 guarantees that all child resources fit in this size. */
467 static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
468 unsigned long type, resource_size_t min_size)
470 struct pci_dev *dev;
471 resource_size_t min_align, align, size, old_size;
472 resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */
473 int order, max_order;
474 struct resource *b_res = find_free_bus_resource(bus, type);
475 unsigned int mem64_mask = 0;
477 if (!b_res)
478 return 0;
480 memset(aligns, 0, sizeof(aligns));
481 max_order = 0;
482 size = 0;
484 mem64_mask = b_res->flags & IORESOURCE_MEM_64;
485 b_res->flags &= ~IORESOURCE_MEM_64;
487 list_for_each_entry(dev, &bus->devices, bus_list) {
488 int i;
490 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
491 struct resource *r = &dev->resource[i];
492 resource_size_t r_size;
494 if (r->parent || (r->flags & mask) != type)
495 continue;
496 r_size = resource_size(r);
497 /* For bridges size != alignment */
498 align = pci_resource_alignment(dev, r);
499 order = __ffs(align) - 20;
500 if (order > 11) {
501 dev_warn(&dev->dev, "disabling BAR %d: %pR "
502 "(bad alignment %#llx)\n", i, r,
503 (unsigned long long) align);
504 r->flags = 0;
505 continue;
507 size += r_size;
508 if (order < 0)
509 order = 0;
510 /* Exclude ranges with size > align from
511 calculation of the alignment. */
512 if (r_size == align)
513 aligns[order] += align;
514 if (order > max_order)
515 max_order = order;
516 mem64_mask &= r->flags & IORESOURCE_MEM_64;
519 if (size < min_size)
520 size = min_size;
521 old_size = resource_size(b_res);
522 if (old_size == 1)
523 old_size = 0;
524 if (size < old_size)
525 size = old_size;
527 align = 0;
528 min_align = 0;
529 for (order = 0; order <= max_order; order++) {
530 resource_size_t align1 = 1;
532 align1 <<= (order + 20);
534 if (!align)
535 min_align = align1;
536 else if (ALIGN(align + min_align, min_align) < align1)
537 min_align = align1 >> 1;
538 align += aligns[order];
540 size = ALIGN(size, min_align);
541 if (!size) {
542 if (b_res->start || b_res->end)
543 dev_info(&bus->self->dev, "disabling bridge window "
544 "%pR to [bus %02x-%02x] (unused)\n", b_res,
545 bus->secondary, bus->subordinate);
546 b_res->flags = 0;
547 return 1;
549 b_res->start = min_align;
550 b_res->end = size + min_align - 1;
551 b_res->flags |= IORESOURCE_STARTALIGN;
552 b_res->flags |= mem64_mask;
553 return 1;
556 static void pci_bus_size_cardbus(struct pci_bus *bus)
558 struct pci_dev *bridge = bus->self;
559 struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
560 u16 ctrl;
563 * Reserve some resources for CardBus. We reserve
564 * a fixed amount of bus space for CardBus bridges.
566 b_res[0].start = 0;
567 b_res[0].end = pci_cardbus_io_size - 1;
568 b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
570 b_res[1].start = 0;
571 b_res[1].end = pci_cardbus_io_size - 1;
572 b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
575 * Check whether prefetchable memory is supported
576 * by this bridge.
578 pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
579 if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
580 ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
581 pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
582 pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
586 * If we have prefetchable memory support, allocate
587 * two regions. Otherwise, allocate one region of
588 * twice the size.
590 if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
591 b_res[2].start = 0;
592 b_res[2].end = pci_cardbus_mem_size - 1;
593 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN;
595 b_res[3].start = 0;
596 b_res[3].end = pci_cardbus_mem_size - 1;
597 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
598 } else {
599 b_res[3].start = 0;
600 b_res[3].end = pci_cardbus_mem_size * 2 - 1;
601 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
605 void __ref pci_bus_size_bridges(struct pci_bus *bus)
607 struct pci_dev *dev;
608 unsigned long mask, prefmask;
609 resource_size_t min_mem_size = 0, min_io_size = 0;
611 list_for_each_entry(dev, &bus->devices, bus_list) {
612 struct pci_bus *b = dev->subordinate;
613 if (!b)
614 continue;
616 switch (dev->class >> 8) {
617 case PCI_CLASS_BRIDGE_CARDBUS:
618 pci_bus_size_cardbus(b);
619 break;
621 case PCI_CLASS_BRIDGE_PCI:
622 default:
623 pci_bus_size_bridges(b);
624 break;
628 /* The root bus? */
629 if (!bus->self)
630 return;
632 switch (bus->self->class >> 8) {
633 case PCI_CLASS_BRIDGE_CARDBUS:
634 /* don't size cardbuses yet. */
635 break;
637 case PCI_CLASS_BRIDGE_PCI:
638 pci_bridge_check_ranges(bus);
639 if (bus->self->is_hotplug_bridge) {
640 min_io_size = pci_hotplug_io_size;
641 min_mem_size = pci_hotplug_mem_size;
643 default:
644 pbus_size_io(bus, min_io_size);
645 /* If the bridge supports prefetchable range, size it
646 separately. If it doesn't, or its prefetchable window
647 has already been allocated by arch code, try
648 non-prefetchable range for both types of PCI memory
649 resources. */
650 mask = IORESOURCE_MEM;
651 prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
652 if (pbus_size_mem(bus, prefmask, prefmask, min_mem_size))
653 mask = prefmask; /* Success, size non-prefetch only. */
654 else
655 min_mem_size += min_mem_size;
656 pbus_size_mem(bus, mask, IORESOURCE_MEM, min_mem_size);
657 break;
660 EXPORT_SYMBOL(pci_bus_size_bridges);
662 static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
663 struct resource_list_x *fail_head)
665 struct pci_bus *b;
666 struct pci_dev *dev;
668 pbus_assign_resources_sorted(bus, fail_head);
670 list_for_each_entry(dev, &bus->devices, bus_list) {
671 b = dev->subordinate;
672 if (!b)
673 continue;
675 __pci_bus_assign_resources(b, fail_head);
677 switch (dev->class >> 8) {
678 case PCI_CLASS_BRIDGE_PCI:
679 if (!pci_is_enabled(dev))
680 pci_setup_bridge(b);
681 break;
683 case PCI_CLASS_BRIDGE_CARDBUS:
684 pci_setup_cardbus(b);
685 break;
687 default:
688 dev_info(&dev->dev, "not setting up bridge for bus "
689 "%04x:%02x\n", pci_domain_nr(b), b->number);
690 break;
695 void __ref pci_bus_assign_resources(const struct pci_bus *bus)
697 __pci_bus_assign_resources(bus, NULL);
699 EXPORT_SYMBOL(pci_bus_assign_resources);
701 static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge,
702 struct resource_list_x *fail_head)
704 struct pci_bus *b;
706 pdev_assign_resources_sorted((struct pci_dev *)bridge, fail_head);
708 b = bridge->subordinate;
709 if (!b)
710 return;
712 __pci_bus_assign_resources(b, fail_head);
714 switch (bridge->class >> 8) {
715 case PCI_CLASS_BRIDGE_PCI:
716 pci_setup_bridge(b);
717 break;
719 case PCI_CLASS_BRIDGE_CARDBUS:
720 pci_setup_cardbus(b);
721 break;
723 default:
724 dev_info(&bridge->dev, "not setting up bridge for bus "
725 "%04x:%02x\n", pci_domain_nr(b), b->number);
726 break;
729 static void pci_bridge_release_resources(struct pci_bus *bus,
730 unsigned long type)
732 int idx;
733 bool changed = false;
734 struct pci_dev *dev;
735 struct resource *r;
736 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
737 IORESOURCE_PREFETCH;
739 dev = bus->self;
740 for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END;
741 idx++) {
742 r = &dev->resource[idx];
743 if ((r->flags & type_mask) != type)
744 continue;
745 if (!r->parent)
746 continue;
748 * if there are children under that, we should release them
749 * all
751 release_child_resources(r);
752 if (!release_resource(r)) {
753 dev_printk(KERN_DEBUG, &dev->dev,
754 "resource %d %pR released\n", idx, r);
755 /* keep the old size */
756 r->end = resource_size(r) - 1;
757 r->start = 0;
758 r->flags = 0;
759 changed = true;
763 if (changed) {
764 /* avoiding touch the one without PREF */
765 if (type & IORESOURCE_PREFETCH)
766 type = IORESOURCE_PREFETCH;
767 __pci_setup_bridge(bus, type);
771 enum release_type {
772 leaf_only,
773 whole_subtree,
776 * try to release pci bridge resources that is from leaf bridge,
777 * so we can allocate big new one later
779 static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus,
780 unsigned long type,
781 enum release_type rel_type)
783 struct pci_dev *dev;
784 bool is_leaf_bridge = true;
786 list_for_each_entry(dev, &bus->devices, bus_list) {
787 struct pci_bus *b = dev->subordinate;
788 if (!b)
789 continue;
791 is_leaf_bridge = false;
793 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
794 continue;
796 if (rel_type == whole_subtree)
797 pci_bus_release_bridge_resources(b, type,
798 whole_subtree);
801 if (pci_is_root_bus(bus))
802 return;
804 if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI)
805 return;
807 if ((rel_type == whole_subtree) || is_leaf_bridge)
808 pci_bridge_release_resources(bus, type);
811 static void pci_bus_dump_res(struct pci_bus *bus)
813 struct resource *res;
814 int i;
816 pci_bus_for_each_resource(bus, res, i) {
817 if (!res || !res->end || !res->flags)
818 continue;
820 dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
824 static void pci_bus_dump_resources(struct pci_bus *bus)
826 struct pci_bus *b;
827 struct pci_dev *dev;
830 pci_bus_dump_res(bus);
832 list_for_each_entry(dev, &bus->devices, bus_list) {
833 b = dev->subordinate;
834 if (!b)
835 continue;
837 pci_bus_dump_resources(b);
841 void __init
842 pci_assign_unassigned_resources(void)
844 struct pci_bus *bus;
846 /* Depth first, calculate sizes and alignments of all
847 subordinate buses. */
848 list_for_each_entry(bus, &pci_root_buses, node) {
849 pci_bus_size_bridges(bus);
851 /* Depth last, allocate resources and update the hardware. */
852 list_for_each_entry(bus, &pci_root_buses, node) {
853 pci_bus_assign_resources(bus);
854 pci_enable_bridges(bus);
857 /* dump the resource on buses */
858 list_for_each_entry(bus, &pci_root_buses, node) {
859 pci_bus_dump_resources(bus);
863 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
865 struct pci_bus *parent = bridge->subordinate;
866 int tried_times = 0;
867 struct resource_list_x head, *list;
868 int retval;
869 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
870 IORESOURCE_PREFETCH;
872 head.next = NULL;
874 again:
875 pci_bus_size_bridges(parent);
876 __pci_bridge_assign_resources(bridge, &head);
878 tried_times++;
880 if (!head.next)
881 goto enable_all;
883 if (tried_times >= 2) {
884 /* still fail, don't need to try more */
885 free_failed_list(&head);
886 goto enable_all;
889 printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
890 tried_times + 1);
893 * Try to release leaf bridge's resources that doesn't fit resource of
894 * child device under that bridge
896 for (list = head.next; list;) {
897 struct pci_bus *bus = list->dev->bus;
898 unsigned long flags = list->flags;
900 pci_bus_release_bridge_resources(bus, flags & type_mask,
901 whole_subtree);
902 list = list->next;
904 /* restore size and flags */
905 for (list = head.next; list;) {
906 struct resource *res = list->res;
908 res->start = list->start;
909 res->end = list->end;
910 res->flags = list->flags;
911 if (list->dev->subordinate)
912 res->flags = 0;
914 list = list->next;
916 free_failed_list(&head);
918 goto again;
920 enable_all:
921 retval = pci_reenable_device(bridge);
922 pci_set_master(bridge);
923 pci_enable_bridges(parent);
925 EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);