1 // SPDX-License-Identifier: GPL-2.0
3 * From setup-res.c, by:
4 * Dave Rusling (david.rusling@reo.mts.dec.com)
5 * David Mosberger (davidm@cs.arizona.edu)
6 * David Miller (davem@redhat.com)
7 * Ivan Kokshaysky (ink@jurassic.park.msu.ru)
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/pci.h>
12 #include <linux/errno.h>
13 #include <linux/ioport.h>
14 #include <linux/proc_fs.h>
15 #include <linux/slab.h>
19 void pci_add_resource_offset(struct list_head
*resources
, struct resource
*res
,
20 resource_size_t offset
)
22 struct resource_entry
*entry
;
24 entry
= resource_list_create_entry(res
, 0);
26 printk(KERN_ERR
"PCI: can't add host bridge window %pR\n", res
);
30 entry
->offset
= offset
;
31 resource_list_add_tail(entry
, resources
);
33 EXPORT_SYMBOL(pci_add_resource_offset
);
35 void pci_add_resource(struct list_head
*resources
, struct resource
*res
)
37 pci_add_resource_offset(resources
, res
, 0);
39 EXPORT_SYMBOL(pci_add_resource
);
41 void pci_free_resource_list(struct list_head
*resources
)
43 resource_list_free(resources
);
45 EXPORT_SYMBOL(pci_free_resource_list
);
47 void pci_bus_add_resource(struct pci_bus
*bus
, struct resource
*res
,
50 struct pci_bus_resource
*bus_res
;
52 bus_res
= kzalloc(sizeof(struct pci_bus_resource
), GFP_KERNEL
);
54 dev_err(&bus
->dev
, "can't add %pR resource\n", res
);
59 bus_res
->flags
= flags
;
60 list_add_tail(&bus_res
->list
, &bus
->resources
);
63 struct resource
*pci_bus_resource_n(const struct pci_bus
*bus
, int n
)
65 struct pci_bus_resource
*bus_res
;
67 if (n
< PCI_BRIDGE_RESOURCE_NUM
)
68 return bus
->resource
[n
];
70 n
-= PCI_BRIDGE_RESOURCE_NUM
;
71 list_for_each_entry(bus_res
, &bus
->resources
, list
) {
77 EXPORT_SYMBOL_GPL(pci_bus_resource_n
);
79 void pci_bus_remove_resources(struct pci_bus
*bus
)
82 struct pci_bus_resource
*bus_res
, *tmp
;
84 for (i
= 0; i
< PCI_BRIDGE_RESOURCE_NUM
; i
++)
85 bus
->resource
[i
] = NULL
;
87 list_for_each_entry_safe(bus_res
, tmp
, &bus
->resources
, list
) {
88 list_del(&bus_res
->list
);
93 int devm_request_pci_bus_resources(struct device
*dev
,
94 struct list_head
*resources
)
96 struct resource_entry
*win
;
97 struct resource
*parent
, *res
;
100 resource_list_for_each_entry(win
, resources
) {
102 switch (resource_type(res
)) {
104 parent
= &ioport_resource
;
107 parent
= &iomem_resource
;
113 err
= devm_request_resource(dev
, parent
, res
);
120 EXPORT_SYMBOL_GPL(devm_request_pci_bus_resources
);
122 static struct pci_bus_region pci_32_bit
= {0, 0xffffffffULL
};
123 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
124 static struct pci_bus_region pci_64_bit
= {0,
125 (pci_bus_addr_t
) 0xffffffffffffffffULL
};
126 static struct pci_bus_region pci_high
= {(pci_bus_addr_t
) 0x100000000ULL
,
127 (pci_bus_addr_t
) 0xffffffffffffffffULL
};
131 * @res contains CPU addresses. Clip it so the corresponding bus addresses
132 * on @bus are entirely within @region. This is used to control the bus
133 * addresses of resources we allocate, e.g., we may need a resource that
134 * can be mapped by a 32-bit BAR.
136 static void pci_clip_resource_to_region(struct pci_bus
*bus
,
137 struct resource
*res
,
138 struct pci_bus_region
*region
)
140 struct pci_bus_region r
;
142 pcibios_resource_to_bus(bus
, &r
, res
);
143 if (r
.start
< region
->start
)
144 r
.start
= region
->start
;
145 if (r
.end
> region
->end
)
149 res
->end
= res
->start
- 1;
151 pcibios_bus_to_resource(bus
, res
, &r
);
154 static int pci_bus_alloc_from_region(struct pci_bus
*bus
, struct resource
*res
,
155 resource_size_t size
, resource_size_t align
,
156 resource_size_t min
, unsigned long type_mask
,
157 resource_size_t (*alignf
)(void *,
158 const struct resource
*,
162 struct pci_bus_region
*region
)
165 struct resource
*r
, avail
;
168 type_mask
|= IORESOURCE_TYPE_BITS
;
170 pci_bus_for_each_resource(bus
, r
, i
) {
171 resource_size_t min_used
= min
;
176 /* type_mask must match */
177 if ((res
->flags
^ r
->flags
) & type_mask
)
180 /* We cannot allocate a non-prefetching resource
181 from a pre-fetching area */
182 if ((r
->flags
& IORESOURCE_PREFETCH
) &&
183 !(res
->flags
& IORESOURCE_PREFETCH
))
187 pci_clip_resource_to_region(bus
, &avail
, region
);
190 * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
191 * protect badly documented motherboard resources, but if
192 * this is an already-configured bridge window, its start
196 min_used
= avail
.start
;
200 /* Ok, try it out.. */
201 ret
= allocate_resource(r
, res
, size
, min_used
, max
,
202 align
, alignf
, alignf_data
);
210 * pci_bus_alloc_resource - allocate a resource from a parent bus
212 * @res: resource to allocate
213 * @size: size of resource to allocate
214 * @align: alignment of resource to allocate
215 * @min: minimum /proc/iomem address to allocate
216 * @type_mask: IORESOURCE_* type flags
217 * @alignf: resource alignment function
218 * @alignf_data: data argument for resource alignment function
220 * Given the PCI bus a device resides on, the size, minimum address,
221 * alignment and type, try to find an acceptable resource allocation
222 * for a specific device resource.
224 int pci_bus_alloc_resource(struct pci_bus
*bus
, struct resource
*res
,
225 resource_size_t size
, resource_size_t align
,
226 resource_size_t min
, unsigned long type_mask
,
227 resource_size_t (*alignf
)(void *,
228 const struct resource
*,
233 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
236 if (res
->flags
& IORESOURCE_MEM_64
) {
237 rc
= pci_bus_alloc_from_region(bus
, res
, size
, align
, min
,
238 type_mask
, alignf
, alignf_data
,
243 return pci_bus_alloc_from_region(bus
, res
, size
, align
, min
,
244 type_mask
, alignf
, alignf_data
,
249 return pci_bus_alloc_from_region(bus
, res
, size
, align
, min
,
250 type_mask
, alignf
, alignf_data
,
253 EXPORT_SYMBOL(pci_bus_alloc_resource
);
256 * The @idx resource of @dev should be a PCI-PCI bridge window. If this
257 * resource fits inside a window of an upstream bridge, do nothing. If it
258 * overlaps an upstream window but extends outside it, clip the resource so
259 * it fits completely inside.
261 bool pci_bus_clip_resource(struct pci_dev
*dev
, int idx
)
263 struct pci_bus
*bus
= dev
->bus
;
264 struct resource
*res
= &dev
->resource
[idx
];
265 struct resource orig_res
= *res
;
269 pci_bus_for_each_resource(bus
, r
, i
) {
270 resource_size_t start
, end
;
275 if (resource_type(res
) != resource_type(r
))
278 start
= max(r
->start
, res
->start
);
279 end
= min(r
->end
, res
->end
);
282 continue; /* no overlap */
284 if (res
->start
== start
&& res
->end
== end
)
285 return false; /* no change */
289 res
->flags
&= ~IORESOURCE_UNSET
;
290 orig_res
.flags
&= ~IORESOURCE_UNSET
;
291 pci_printk(KERN_DEBUG
, dev
, "%pR clipped to %pR\n",
300 void __weak
pcibios_resource_survey_bus(struct pci_bus
*bus
) { }
302 void __weak
pcibios_bus_add_device(struct pci_dev
*pdev
) { }
305 * pci_bus_add_device - start driver for a single device
306 * @dev: device to add
308 * This adds add sysfs entries and start device drivers
310 void pci_bus_add_device(struct pci_dev
*dev
)
315 * Can not put in pci_device_add yet because resources
316 * are not assigned yet for some devices.
318 pcibios_bus_add_device(dev
);
319 pci_fixup_device(pci_fixup_final
, dev
);
320 pci_create_sysfs_dev_files(dev
);
321 pci_proc_attach_device(dev
);
322 pci_bridge_d3_update(dev
);
324 dev
->match_driver
= true;
325 retval
= device_attach(&dev
->dev
);
326 if (retval
< 0 && retval
!= -EPROBE_DEFER
) {
327 pci_warn(dev
, "device attach failed (%d)\n", retval
);
328 pci_proc_detach_device(dev
);
329 pci_remove_sysfs_dev_files(dev
);
333 pci_dev_assign_added(dev
, true);
335 EXPORT_SYMBOL_GPL(pci_bus_add_device
);
338 * pci_bus_add_devices - start driver for PCI devices
339 * @bus: bus to check for new devices
341 * Start driver for PCI devices and add some sysfs entries.
343 void pci_bus_add_devices(const struct pci_bus
*bus
)
346 struct pci_bus
*child
;
348 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
349 /* Skip already-added devices */
350 if (pci_dev_is_added(dev
))
352 pci_bus_add_device(dev
);
355 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
356 /* Skip if device attach failed */
357 if (!pci_dev_is_added(dev
))
359 child
= dev
->subordinate
;
361 pci_bus_add_devices(child
);
364 EXPORT_SYMBOL(pci_bus_add_devices
);
366 /** pci_walk_bus - walk devices on/under bus, calling callback.
367 * @top bus whose devices should be walked
368 * @cb callback to be called for each device found
369 * @userdata arbitrary pointer to be passed to callback.
371 * Walk the given bus, including any bridged devices
372 * on buses under this bus. Call the provided callback
373 * on each device found.
375 * We check the return of @cb each time. If it returns anything
376 * other than 0, we break out.
379 void pci_walk_bus(struct pci_bus
*top
, int (*cb
)(struct pci_dev
*, void *),
384 struct list_head
*next
;
388 down_read(&pci_bus_sem
);
389 next
= top
->devices
.next
;
391 if (next
== &bus
->devices
) {
392 /* end of this bus, go up or finish */
395 next
= bus
->self
->bus_list
.next
;
396 bus
= bus
->self
->bus
;
399 dev
= list_entry(next
, struct pci_dev
, bus_list
);
400 if (dev
->subordinate
) {
401 /* this is a pci-pci bridge, do its devices next */
402 next
= dev
->subordinate
->devices
.next
;
403 bus
= dev
->subordinate
;
405 next
= dev
->bus_list
.next
;
407 retval
= cb(dev
, userdata
);
411 up_read(&pci_bus_sem
);
413 EXPORT_SYMBOL_GPL(pci_walk_bus
);
415 struct pci_bus
*pci_bus_get(struct pci_bus
*bus
)
418 get_device(&bus
->dev
);
421 EXPORT_SYMBOL(pci_bus_get
);
423 void pci_bus_put(struct pci_bus
*bus
)
426 put_device(&bus
->dev
);
428 EXPORT_SYMBOL(pci_bus_put
);