2 #include <linux/acpi.h>
3 #include <linux/init.h>
6 #include <linux/slab.h>
8 #include <asm/pci_x86.h>
10 struct pci_root_info
{
11 struct acpi_device
*bridge
;
19 static bool pci_use_crs
= true;
21 static int __init
set_use_crs(const struct dmi_system_id
*id
)
27 static const struct dmi_system_id pci_use_crs_table
[] __initconst
= {
28 /* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
30 .callback
= set_use_crs
,
31 .ident
= "IBM System x3800",
33 DMI_MATCH(DMI_SYS_VENDOR
, "IBM"),
34 DMI_MATCH(DMI_PRODUCT_NAME
, "x3800"),
37 /* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */
38 /* 2006 AMD HT/VIA system with two host bridges */
40 .callback
= set_use_crs
,
41 .ident
= "ASRock ALiveSATA2-GLAN",
43 DMI_MATCH(DMI_PRODUCT_NAME
, "ALiveSATA2-GLAN"),
46 /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
47 /* 2006 AMD HT/VIA system with two host bridges */
49 .callback
= set_use_crs
,
50 .ident
= "ASUS M2V-MX SE",
52 DMI_MATCH(DMI_BOARD_VENDOR
, "ASUSTeK Computer INC."),
53 DMI_MATCH(DMI_BOARD_NAME
, "M2V-MX SE"),
54 DMI_MATCH(DMI_BIOS_VENDOR
, "American Megatrends Inc."),
60 void __init
pci_acpi_crs_quirks(void)
64 if (dmi_get_date(DMI_BIOS_DATE
, &year
, NULL
, NULL
) && year
< 2008)
67 dmi_check_system(pci_use_crs_table
);
70 * If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
71 * takes precedence over anything we figured out above.
73 if (pci_probe
& PCI_ROOT_NO_CRS
)
75 else if (pci_probe
& PCI_USE__CRS
)
78 printk(KERN_INFO
"PCI: %s host bridge windows from ACPI; "
79 "if necessary, use \"pci=%s\" and report a bug\n",
80 pci_use_crs
? "Using" : "Ignoring",
81 pci_use_crs
? "nocrs" : "use_crs");
85 resource_to_addr(struct acpi_resource
*resource
,
86 struct acpi_resource_address64
*addr
)
89 struct acpi_resource_memory24
*memory24
;
90 struct acpi_resource_memory32
*memory32
;
91 struct acpi_resource_fixed_memory32
*fixed_memory32
;
93 memset(addr
, 0, sizeof(*addr
));
94 switch (resource
->type
) {
95 case ACPI_RESOURCE_TYPE_MEMORY24
:
96 memory24
= &resource
->data
.memory24
;
97 addr
->resource_type
= ACPI_MEMORY_RANGE
;
98 addr
->minimum
= memory24
->minimum
;
99 addr
->address_length
= memory24
->address_length
;
100 addr
->maximum
= addr
->minimum
+ addr
->address_length
- 1;
102 case ACPI_RESOURCE_TYPE_MEMORY32
:
103 memory32
= &resource
->data
.memory32
;
104 addr
->resource_type
= ACPI_MEMORY_RANGE
;
105 addr
->minimum
= memory32
->minimum
;
106 addr
->address_length
= memory32
->address_length
;
107 addr
->maximum
= addr
->minimum
+ addr
->address_length
- 1;
109 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32
:
110 fixed_memory32
= &resource
->data
.fixed_memory32
;
111 addr
->resource_type
= ACPI_MEMORY_RANGE
;
112 addr
->minimum
= fixed_memory32
->address
;
113 addr
->address_length
= fixed_memory32
->address_length
;
114 addr
->maximum
= addr
->minimum
+ addr
->address_length
- 1;
116 case ACPI_RESOURCE_TYPE_ADDRESS16
:
117 case ACPI_RESOURCE_TYPE_ADDRESS32
:
118 case ACPI_RESOURCE_TYPE_ADDRESS64
:
119 status
= acpi_resource_to_address64(resource
, addr
);
120 if (ACPI_SUCCESS(status
) &&
121 (addr
->resource_type
== ACPI_MEMORY_RANGE
||
122 addr
->resource_type
== ACPI_IO_RANGE
) &&
123 addr
->address_length
> 0) {
132 count_resource(struct acpi_resource
*acpi_res
, void *data
)
134 struct pci_root_info
*info
= data
;
135 struct acpi_resource_address64 addr
;
138 status
= resource_to_addr(acpi_res
, &addr
);
139 if (ACPI_SUCCESS(status
))
145 setup_resource(struct acpi_resource
*acpi_res
, void *data
)
147 struct pci_root_info
*info
= data
;
148 struct resource
*res
;
149 struct acpi_resource_address64 addr
;
154 status
= resource_to_addr(acpi_res
, &addr
);
155 if (!ACPI_SUCCESS(status
))
158 if (addr
.resource_type
== ACPI_MEMORY_RANGE
) {
159 flags
= IORESOURCE_MEM
;
160 if (addr
.info
.mem
.caching
== ACPI_PREFETCHABLE_MEMORY
)
161 flags
|= IORESOURCE_PREFETCH
;
162 } else if (addr
.resource_type
== ACPI_IO_RANGE
) {
163 flags
= IORESOURCE_IO
;
167 start
= addr
.minimum
+ addr
.translation_offset
;
168 end
= addr
.maximum
+ addr
.translation_offset
;
170 res
= &info
->res
[info
->res_num
];
171 res
->name
= info
->name
;
178 dev_printk(KERN_DEBUG
, &info
->bridge
->dev
,
179 "host bridge window %pR (ignored)\n", res
);
184 if (addr
.translation_offset
)
185 dev_info(&info
->bridge
->dev
, "host bridge window %pR "
186 "(PCI address [%#llx-%#llx])\n",
187 res
, res
->start
- addr
.translation_offset
,
188 res
->end
- addr
.translation_offset
);
190 dev_info(&info
->bridge
->dev
, "host bridge window %pR\n", res
);
195 static bool resource_contains(struct resource
*res
, resource_size_t point
)
197 if (res
->start
<= point
&& point
<= res
->end
)
202 static void coalesce_windows(struct pci_root_info
*info
, unsigned long type
)
205 struct resource
*res1
, *res2
;
207 for (i
= 0; i
< info
->res_num
; i
++) {
208 res1
= &info
->res
[i
];
209 if (!(res1
->flags
& type
))
212 for (j
= i
+ 1; j
< info
->res_num
; j
++) {
213 res2
= &info
->res
[j
];
214 if (!(res2
->flags
& type
))
218 * I don't like throwing away windows because then
219 * our resources no longer match the ACPI _CRS, but
220 * the kernel resource tree doesn't allow overlaps.
222 if (resource_contains(res1
, res2
->start
) ||
223 resource_contains(res1
, res2
->end
) ||
224 resource_contains(res2
, res1
->start
) ||
225 resource_contains(res2
, res1
->end
)) {
226 res1
->start
= min(res1
->start
, res2
->start
);
227 res1
->end
= max(res1
->end
, res2
->end
);
228 dev_info(&info
->bridge
->dev
,
229 "host bridge window expanded to %pR; %pR ignored\n",
237 static void add_resources(struct pci_root_info
*info
)
240 struct resource
*res
, *root
, *conflict
;
245 coalesce_windows(info
, IORESOURCE_MEM
);
246 coalesce_windows(info
, IORESOURCE_IO
);
248 for (i
= 0; i
< info
->res_num
; i
++) {
251 if (res
->flags
& IORESOURCE_MEM
)
252 root
= &iomem_resource
;
253 else if (res
->flags
& IORESOURCE_IO
)
254 root
= &ioport_resource
;
258 conflict
= insert_resource_conflict(root
, res
);
260 dev_info(&info
->bridge
->dev
,
261 "ignoring host bridge window %pR (conflicts with %s %pR)\n",
262 res
, conflict
->name
, conflict
);
264 pci_bus_add_resource(info
->bus
, res
, 0);
269 get_current_resources(struct acpi_device
*device
, int busnum
,
270 int domain
, struct pci_bus
*bus
)
272 struct pci_root_info info
;
276 pci_bus_remove_resources(bus
);
278 info
.bridge
= device
;
281 acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
, count_resource
,
286 size
= sizeof(*info
.res
) * info
.res_num
;
287 info
.res
= kmalloc(size
, GFP_KERNEL
);
291 info
.name
= kasprintf(GFP_KERNEL
, "PCI Bus %04x:%02x", domain
, busnum
);
293 goto name_alloc_fail
;
296 acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
, setup_resource
,
299 add_resources(&info
);
308 struct pci_bus
* __devinit
pci_acpi_scan_root(struct acpi_pci_root
*root
)
310 struct acpi_device
*device
= root
->device
;
311 int domain
= root
->segment
;
312 int busnum
= root
->secondary
.start
;
314 struct pci_sysdata
*sd
;
316 #ifdef CONFIG_ACPI_NUMA
320 if (domain
&& !pci_domains_supported
) {
321 printk(KERN_WARNING
"pci_bus %04x:%02x: "
322 "ignored (multiple domains not supported)\n",
328 #ifdef CONFIG_ACPI_NUMA
329 pxm
= acpi_get_pxm(device
->handle
);
331 node
= pxm_to_node(pxm
);
333 set_mp_bus_to_node(busnum
, node
);
336 node
= get_mp_bus_to_node(busnum
);
338 if (node
!= -1 && !node_online(node
))
341 /* Allocate per-root-bus (not per bus) arch-specific data.
342 * TODO: leak; this memory is never freed.
343 * It's arguable whether it's worth the trouble to care.
345 sd
= kzalloc(sizeof(*sd
), GFP_KERNEL
);
347 printk(KERN_WARNING
"pci_bus %04x:%02x: "
348 "ignored (out of memory)\n", domain
, busnum
);
355 * Maybe the desired pci bus has been already scanned. In such case
356 * it is unnecessary to scan the pci bus with the given domain,busnum.
358 bus
= pci_find_bus(domain
, busnum
);
361 * If the desired bus exits, the content of bus->sysdata will
364 memcpy(bus
->sysdata
, sd
, sizeof(*sd
));
367 bus
= pci_create_bus(NULL
, busnum
, &pci_root_ops
, sd
);
369 get_current_resources(device
, busnum
, domain
, bus
);
370 bus
->subordinate
= pci_scan_child_bus(bus
);
374 /* After the PCI-E bus has been walked and all devices discovered,
375 * configure any settings of the fabric that might be necessary.
378 struct pci_bus
*child
;
379 list_for_each_entry(child
, &bus
->children
, node
) {
380 struct pci_dev
*self
= child
->self
;
384 pcie_bus_configure_settings(child
, self
->pcie_mpss
);
391 if (bus
&& node
!= -1) {
392 #ifdef CONFIG_ACPI_NUMA
394 dev_printk(KERN_DEBUG
, &bus
->dev
,
395 "on NUMA node %d (pxm %d)\n", node
, pxm
);
397 dev_printk(KERN_DEBUG
, &bus
->dev
, "on NUMA node %d\n", node
);
404 int __init
pci_acpi_init(void)
406 struct pci_dev
*dev
= NULL
;
411 printk(KERN_INFO
"PCI: Using ACPI for IRQ routing\n");
412 acpi_irq_penalty_init();
413 pcibios_enable_irq
= acpi_pci_irq_enable
;
414 pcibios_disable_irq
= acpi_pci_irq_disable
;
415 x86_init
.pci
.init_irq
= x86_init_noop
;
419 * PCI IRQ routing is set up by pci_enable_device(), but we
420 * also do it here in case there are still broken drivers that
421 * don't use pci_enable_device().
423 printk(KERN_INFO
"PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
424 for_each_pci_dev(dev
)
425 acpi_pci_irq_enable(dev
);