Add linux-next specific files for 20110831
[linux-2.6/next.git] / arch / x86 / pci / acpi.c
blobc95330267f08cfee41a27a0c94675d5779776f86
1 #include <linux/pci.h>
2 #include <linux/acpi.h>
3 #include <linux/init.h>
4 #include <linux/irq.h>
5 #include <linux/dmi.h>
6 #include <linux/slab.h>
7 #include <asm/numa.h>
8 #include <asm/pci_x86.h>
10 struct pci_root_info {
11 struct acpi_device *bridge;
12 char *name;
13 unsigned int res_num;
14 struct resource *res;
15 struct pci_bus *bus;
16 int busnum;
19 static bool pci_use_crs = true;
21 static int __init set_use_crs(const struct dmi_system_id *id)
23 pci_use_crs = true;
24 return 0;
27 static const struct dmi_system_id pci_use_crs_table[] __initconst = {
28 /* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
30 .callback = set_use_crs,
31 .ident = "IBM System x3800",
32 .matches = {
33 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
34 DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
37 /* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */
38 /* 2006 AMD HT/VIA system with two host bridges */
40 .callback = set_use_crs,
41 .ident = "ASRock ALiveSATA2-GLAN",
42 .matches = {
43 DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
49 void __init pci_acpi_crs_quirks(void)
51 int year;
53 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
54 pci_use_crs = false;
56 dmi_check_system(pci_use_crs_table);
59 * If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
60 * takes precedence over anything we figured out above.
62 if (pci_probe & PCI_ROOT_NO_CRS)
63 pci_use_crs = false;
64 else if (pci_probe & PCI_USE__CRS)
65 pci_use_crs = true;
67 printk(KERN_INFO "PCI: %s host bridge windows from ACPI; "
68 "if necessary, use \"pci=%s\" and report a bug\n",
69 pci_use_crs ? "Using" : "Ignoring",
70 pci_use_crs ? "nocrs" : "use_crs");
73 static acpi_status
74 resource_to_addr(struct acpi_resource *resource,
75 struct acpi_resource_address64 *addr)
77 acpi_status status;
78 struct acpi_resource_memory24 *memory24;
79 struct acpi_resource_memory32 *memory32;
80 struct acpi_resource_fixed_memory32 *fixed_memory32;
82 memset(addr, 0, sizeof(*addr));
83 switch (resource->type) {
84 case ACPI_RESOURCE_TYPE_MEMORY24:
85 memory24 = &resource->data.memory24;
86 addr->resource_type = ACPI_MEMORY_RANGE;
87 addr->minimum = memory24->minimum;
88 addr->address_length = memory24->address_length;
89 addr->maximum = addr->minimum + addr->address_length - 1;
90 return AE_OK;
91 case ACPI_RESOURCE_TYPE_MEMORY32:
92 memory32 = &resource->data.memory32;
93 addr->resource_type = ACPI_MEMORY_RANGE;
94 addr->minimum = memory32->minimum;
95 addr->address_length = memory32->address_length;
96 addr->maximum = addr->minimum + addr->address_length - 1;
97 return AE_OK;
98 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
99 fixed_memory32 = &resource->data.fixed_memory32;
100 addr->resource_type = ACPI_MEMORY_RANGE;
101 addr->minimum = fixed_memory32->address;
102 addr->address_length = fixed_memory32->address_length;
103 addr->maximum = addr->minimum + addr->address_length - 1;
104 return AE_OK;
105 case ACPI_RESOURCE_TYPE_ADDRESS16:
106 case ACPI_RESOURCE_TYPE_ADDRESS32:
107 case ACPI_RESOURCE_TYPE_ADDRESS64:
108 status = acpi_resource_to_address64(resource, addr);
109 if (ACPI_SUCCESS(status) &&
110 (addr->resource_type == ACPI_MEMORY_RANGE ||
111 addr->resource_type == ACPI_IO_RANGE) &&
112 addr->address_length > 0) {
113 return AE_OK;
115 break;
117 return AE_ERROR;
120 static acpi_status
121 count_resource(struct acpi_resource *acpi_res, void *data)
123 struct pci_root_info *info = data;
124 struct acpi_resource_address64 addr;
125 acpi_status status;
127 status = resource_to_addr(acpi_res, &addr);
128 if (ACPI_SUCCESS(status))
129 info->res_num++;
130 return AE_OK;
133 static acpi_status
134 setup_resource(struct acpi_resource *acpi_res, void *data)
136 struct pci_root_info *info = data;
137 struct resource *res;
138 struct acpi_resource_address64 addr;
139 acpi_status status;
140 unsigned long flags;
141 u64 start, end;
143 status = resource_to_addr(acpi_res, &addr);
144 if (!ACPI_SUCCESS(status))
145 return AE_OK;
147 if (addr.resource_type == ACPI_MEMORY_RANGE) {
148 flags = IORESOURCE_MEM;
149 if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
150 flags |= IORESOURCE_PREFETCH;
151 } else if (addr.resource_type == ACPI_IO_RANGE) {
152 flags = IORESOURCE_IO;
153 } else
154 return AE_OK;
156 start = addr.minimum + addr.translation_offset;
157 end = addr.maximum + addr.translation_offset;
159 res = &info->res[info->res_num];
160 res->name = info->name;
161 res->flags = flags;
162 res->start = start;
163 res->end = end;
164 res->child = NULL;
166 if (!pci_use_crs) {
167 dev_printk(KERN_DEBUG, &info->bridge->dev,
168 "host bridge window %pR (ignored)\n", res);
169 return AE_OK;
172 info->res_num++;
173 if (addr.translation_offset)
174 dev_info(&info->bridge->dev, "host bridge window %pR "
175 "(PCI address [%#llx-%#llx])\n",
176 res, res->start - addr.translation_offset,
177 res->end - addr.translation_offset);
178 else
179 dev_info(&info->bridge->dev, "host bridge window %pR\n", res);
181 return AE_OK;
184 static bool resource_contains(struct resource *res, resource_size_t point)
186 if (res->start <= point && point <= res->end)
187 return true;
188 return false;
191 static void coalesce_windows(struct pci_root_info *info, unsigned long type)
193 int i, j;
194 struct resource *res1, *res2;
196 for (i = 0; i < info->res_num; i++) {
197 res1 = &info->res[i];
198 if (!(res1->flags & type))
199 continue;
201 for (j = i + 1; j < info->res_num; j++) {
202 res2 = &info->res[j];
203 if (!(res2->flags & type))
204 continue;
207 * I don't like throwing away windows because then
208 * our resources no longer match the ACPI _CRS, but
209 * the kernel resource tree doesn't allow overlaps.
211 if (resource_contains(res1, res2->start) ||
212 resource_contains(res1, res2->end) ||
213 resource_contains(res2, res1->start) ||
214 resource_contains(res2, res1->end)) {
215 res1->start = min(res1->start, res2->start);
216 res1->end = max(res1->end, res2->end);
217 dev_info(&info->bridge->dev,
218 "host bridge window expanded to %pR; %pR ignored\n",
219 res1, res2);
220 res2->flags = 0;
226 static void add_resources(struct pci_root_info *info)
228 int i;
229 struct resource *res, *root, *conflict;
231 if (!pci_use_crs)
232 return;
234 coalesce_windows(info, IORESOURCE_MEM);
235 coalesce_windows(info, IORESOURCE_IO);
237 for (i = 0; i < info->res_num; i++) {
238 res = &info->res[i];
240 if (res->flags & IORESOURCE_MEM)
241 root = &iomem_resource;
242 else if (res->flags & IORESOURCE_IO)
243 root = &ioport_resource;
244 else
245 continue;
247 conflict = insert_resource_conflict(root, res);
248 if (conflict)
249 dev_info(&info->bridge->dev,
250 "ignoring host bridge window %pR (conflicts with %s %pR)\n",
251 res, conflict->name, conflict);
252 else
253 pci_bus_add_resource(info->bus, res, 0);
257 static void
258 get_current_resources(struct acpi_device *device, int busnum,
259 int domain, struct pci_bus *bus)
261 struct pci_root_info info;
262 size_t size;
264 if (pci_use_crs)
265 pci_bus_remove_resources(bus);
267 info.bridge = device;
268 info.bus = bus;
269 info.res_num = 0;
270 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
271 &info);
272 if (!info.res_num)
273 return;
275 size = sizeof(*info.res) * info.res_num;
276 info.res = kmalloc(size, GFP_KERNEL);
277 if (!info.res)
278 goto res_alloc_fail;
280 info.name = kasprintf(GFP_KERNEL, "PCI Bus %04x:%02x", domain, busnum);
281 if (!info.name)
282 goto name_alloc_fail;
284 info.res_num = 0;
285 acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
286 &info);
288 add_resources(&info);
289 return;
291 name_alloc_fail:
292 kfree(info.res);
293 res_alloc_fail:
294 return;
297 struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
299 struct acpi_device *device = root->device;
300 int domain = root->segment;
301 int busnum = root->secondary.start;
302 struct pci_bus *bus;
303 struct pci_sysdata *sd;
304 int node;
305 #ifdef CONFIG_ACPI_NUMA
306 int pxm;
307 #endif
309 if (domain && !pci_domains_supported) {
310 printk(KERN_WARNING "pci_bus %04x:%02x: "
311 "ignored (multiple domains not supported)\n",
312 domain, busnum);
313 return NULL;
316 node = -1;
317 #ifdef CONFIG_ACPI_NUMA
318 pxm = acpi_get_pxm(device->handle);
319 if (pxm >= 0)
320 node = pxm_to_node(pxm);
321 if (node != -1)
322 set_mp_bus_to_node(busnum, node);
323 else
324 #endif
325 node = get_mp_bus_to_node(busnum);
327 if (node != -1 && !node_online(node))
328 node = -1;
330 /* Allocate per-root-bus (not per bus) arch-specific data.
331 * TODO: leak; this memory is never freed.
332 * It's arguable whether it's worth the trouble to care.
334 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
335 if (!sd) {
336 printk(KERN_WARNING "pci_bus %04x:%02x: "
337 "ignored (out of memory)\n", domain, busnum);
338 return NULL;
341 sd->domain = domain;
342 sd->node = node;
344 * Maybe the desired pci bus has been already scanned. In such case
345 * it is unnecessary to scan the pci bus with the given domain,busnum.
347 bus = pci_find_bus(domain, busnum);
348 if (bus) {
350 * If the desired bus exits, the content of bus->sysdata will
351 * be replaced by sd.
353 memcpy(bus->sysdata, sd, sizeof(*sd));
354 kfree(sd);
355 } else {
356 bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd);
357 if (bus) {
358 get_current_resources(device, busnum, domain, bus);
359 bus->subordinate = pci_scan_child_bus(bus);
363 /* After the PCI-E bus has been walked and all devices discovered,
364 * configure any settings of the fabric that might be necessary.
366 if (bus) {
367 struct pci_bus *child;
368 list_for_each_entry(child, &bus->children, node)
369 pcie_bus_configure_settings(child, child->self->pcie_mpss);
372 if (!bus)
373 kfree(sd);
375 if (bus && node != -1) {
376 #ifdef CONFIG_ACPI_NUMA
377 if (pxm >= 0)
378 dev_printk(KERN_DEBUG, &bus->dev,
379 "on NUMA node %d (pxm %d)\n", node, pxm);
380 #else
381 dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
382 #endif
385 return bus;
388 int __init pci_acpi_init(void)
390 struct pci_dev *dev = NULL;
392 if (acpi_noirq)
393 return -ENODEV;
395 printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
396 acpi_irq_penalty_init();
397 pcibios_enable_irq = acpi_pci_irq_enable;
398 pcibios_disable_irq = acpi_pci_irq_disable;
399 x86_init.pci.init_irq = x86_init_noop;
401 if (pci_routeirq) {
403 * PCI IRQ routing is set up by pci_enable_device(), but we
404 * also do it here in case there are still broken drivers that
405 * don't use pci_enable_device().
407 printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
408 for_each_pci_dev(dev)
409 acpi_pci_irq_enable(dev);
412 return 0;