1 // SPDX-License-Identifier: GPL-2.0
3 * (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <greg@kroah.com>
4 * (C) Copyright 2007 Novell Inc.
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/device.h>
11 #include <linux/mempolicy.h>
12 #include <linux/string.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/cpu.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/suspend.h>
18 #include <linux/kexec.h>
19 #include <linux/of_device.h>
20 #include <linux/acpi.h>
22 #include "pcie/portdrv.h"
25 struct list_head node
;
26 struct pci_device_id id
;
30 * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices
31 * @drv: target pci driver
32 * @vendor: PCI vendor ID
33 * @device: PCI device ID
34 * @subvendor: PCI subvendor ID
35 * @subdevice: PCI subdevice ID
37 * @class_mask: PCI class mask
38 * @driver_data: private driver data
40 * Adds a new dynamic pci device ID to this driver and causes the
41 * driver to probe for all devices again. @drv must have been
42 * registered prior to calling this function.
45 * Does GFP_KERNEL allocation.
48 * 0 on success, -errno on failure.
50 int pci_add_dynid(struct pci_driver
*drv
,
51 unsigned int vendor
, unsigned int device
,
52 unsigned int subvendor
, unsigned int subdevice
,
53 unsigned int class, unsigned int class_mask
,
54 unsigned long driver_data
)
56 struct pci_dynid
*dynid
;
58 dynid
= kzalloc(sizeof(*dynid
), GFP_KERNEL
);
62 dynid
->id
.vendor
= vendor
;
63 dynid
->id
.device
= device
;
64 dynid
->id
.subvendor
= subvendor
;
65 dynid
->id
.subdevice
= subdevice
;
66 dynid
->id
.class = class;
67 dynid
->id
.class_mask
= class_mask
;
68 dynid
->id
.driver_data
= driver_data
;
70 spin_lock(&drv
->dynids
.lock
);
71 list_add_tail(&dynid
->node
, &drv
->dynids
.list
);
72 spin_unlock(&drv
->dynids
.lock
);
74 return driver_attach(&drv
->driver
);
76 EXPORT_SYMBOL_GPL(pci_add_dynid
);
78 static void pci_free_dynids(struct pci_driver
*drv
)
80 struct pci_dynid
*dynid
, *n
;
82 spin_lock(&drv
->dynids
.lock
);
83 list_for_each_entry_safe(dynid
, n
, &drv
->dynids
.list
, node
) {
84 list_del(&dynid
->node
);
87 spin_unlock(&drv
->dynids
.lock
);
91 * store_new_id - sysfs frontend to pci_add_dynid()
92 * @driver: target device driver
93 * @buf: buffer for scanning device ID data
96 * Allow PCI IDs to be added to an existing driver via sysfs.
98 static ssize_t
new_id_store(struct device_driver
*driver
, const char *buf
,
101 struct pci_driver
*pdrv
= to_pci_driver(driver
);
102 const struct pci_device_id
*ids
= pdrv
->id_table
;
103 u32 vendor
, device
, subvendor
= PCI_ANY_ID
,
104 subdevice
= PCI_ANY_ID
, class = 0, class_mask
= 0;
105 unsigned long driver_data
= 0;
109 fields
= sscanf(buf
, "%x %x %x %x %x %x %lx",
110 &vendor
, &device
, &subvendor
, &subdevice
,
111 &class, &class_mask
, &driver_data
);
116 struct pci_dev
*pdev
= kzalloc(sizeof(*pdev
), GFP_KERNEL
);
120 pdev
->vendor
= vendor
;
121 pdev
->device
= device
;
122 pdev
->subsystem_vendor
= subvendor
;
123 pdev
->subsystem_device
= subdevice
;
126 if (pci_match_id(pdrv
->id_table
, pdev
))
135 /* Only accept driver_data values that match an existing id_table
139 while (ids
->vendor
|| ids
->subvendor
|| ids
->class_mask
) {
140 if (driver_data
== ids
->driver_data
) {
146 if (retval
) /* No match */
150 retval
= pci_add_dynid(pdrv
, vendor
, device
, subvendor
, subdevice
,
151 class, class_mask
, driver_data
);
156 static DRIVER_ATTR_WO(new_id
);
159 * store_remove_id - remove a PCI device ID from this driver
160 * @driver: target device driver
161 * @buf: buffer for scanning device ID data
164 * Removes a dynamic pci device ID to this driver.
166 static ssize_t
remove_id_store(struct device_driver
*driver
, const char *buf
,
169 struct pci_dynid
*dynid
, *n
;
170 struct pci_driver
*pdrv
= to_pci_driver(driver
);
171 u32 vendor
, device
, subvendor
= PCI_ANY_ID
,
172 subdevice
= PCI_ANY_ID
, class = 0, class_mask
= 0;
174 size_t retval
= -ENODEV
;
176 fields
= sscanf(buf
, "%x %x %x %x %x %x",
177 &vendor
, &device
, &subvendor
, &subdevice
,
178 &class, &class_mask
);
182 spin_lock(&pdrv
->dynids
.lock
);
183 list_for_each_entry_safe(dynid
, n
, &pdrv
->dynids
.list
, node
) {
184 struct pci_device_id
*id
= &dynid
->id
;
185 if ((id
->vendor
== vendor
) &&
186 (id
->device
== device
) &&
187 (subvendor
== PCI_ANY_ID
|| id
->subvendor
== subvendor
) &&
188 (subdevice
== PCI_ANY_ID
|| id
->subdevice
== subdevice
) &&
189 !((id
->class ^ class) & class_mask
)) {
190 list_del(&dynid
->node
);
196 spin_unlock(&pdrv
->dynids
.lock
);
200 static DRIVER_ATTR_WO(remove_id
);
202 static struct attribute
*pci_drv_attrs
[] = {
203 &driver_attr_new_id
.attr
,
204 &driver_attr_remove_id
.attr
,
207 ATTRIBUTE_GROUPS(pci_drv
);
210 * pci_match_id - See if a pci device matches a given pci_id table
211 * @ids: array of PCI device id structures to search in
212 * @dev: the PCI device structure to match against.
214 * Used by a driver to check whether a PCI device present in the
215 * system is in its list of supported devices. Returns the matching
216 * pci_device_id structure or %NULL if there is no match.
218 * Deprecated, don't use this as it will not catch any dynamic ids
219 * that a driver might want to check for.
221 const struct pci_device_id
*pci_match_id(const struct pci_device_id
*ids
,
225 while (ids
->vendor
|| ids
->subvendor
|| ids
->class_mask
) {
226 if (pci_match_one_device(ids
, dev
))
233 EXPORT_SYMBOL(pci_match_id
);
235 static const struct pci_device_id pci_device_id_any
= {
236 .vendor
= PCI_ANY_ID
,
237 .device
= PCI_ANY_ID
,
238 .subvendor
= PCI_ANY_ID
,
239 .subdevice
= PCI_ANY_ID
,
243 * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure
244 * @drv: the PCI driver to match against
245 * @dev: the PCI device structure to match against
247 * Used by a driver to check whether a PCI device present in the
248 * system is in its list of supported devices. Returns the matching
249 * pci_device_id structure or %NULL if there is no match.
251 static const struct pci_device_id
*pci_match_device(struct pci_driver
*drv
,
254 struct pci_dynid
*dynid
;
255 const struct pci_device_id
*found_id
= NULL
;
257 /* When driver_override is set, only bind to the matching driver */
258 if (dev
->driver_override
&& strcmp(dev
->driver_override
, drv
->name
))
261 /* Look at the dynamic ids first, before the static ones */
262 spin_lock(&drv
->dynids
.lock
);
263 list_for_each_entry(dynid
, &drv
->dynids
.list
, node
) {
264 if (pci_match_one_device(&dynid
->id
, dev
)) {
265 found_id
= &dynid
->id
;
269 spin_unlock(&drv
->dynids
.lock
);
272 found_id
= pci_match_id(drv
->id_table
, dev
);
274 /* driver_override will always match, send a dummy id */
275 if (!found_id
&& dev
->driver_override
)
276 found_id
= &pci_device_id_any
;
281 struct drv_dev_and_id
{
282 struct pci_driver
*drv
;
284 const struct pci_device_id
*id
;
287 static long local_pci_probe(void *_ddi
)
289 struct drv_dev_and_id
*ddi
= _ddi
;
290 struct pci_dev
*pci_dev
= ddi
->dev
;
291 struct pci_driver
*pci_drv
= ddi
->drv
;
292 struct device
*dev
= &pci_dev
->dev
;
296 * Unbound PCI devices are always put in D0, regardless of
297 * runtime PM status. During probe, the device is set to
298 * active and the usage count is incremented. If the driver
299 * supports runtime PM, it should call pm_runtime_put_noidle(),
300 * or any other runtime PM helper function decrementing the usage
301 * count, in its probe routine and pm_runtime_get_noresume() in
302 * its remove routine.
304 pm_runtime_get_sync(dev
);
305 pci_dev
->driver
= pci_drv
;
306 rc
= pci_drv
->probe(pci_dev
, ddi
->id
);
310 pci_dev
->driver
= NULL
;
311 pm_runtime_put_sync(dev
);
315 * Probe function should return < 0 for failure, 0 for success
316 * Treat values > 0 as success, but warn.
318 pci_warn(pci_dev
, "Driver probe function unexpectedly returned %d\n",
323 static bool pci_physfn_is_probed(struct pci_dev
*dev
)
325 #ifdef CONFIG_PCI_IOV
326 return dev
->is_virtfn
&& dev
->physfn
->is_probed
;
332 static int pci_call_probe(struct pci_driver
*drv
, struct pci_dev
*dev
,
333 const struct pci_device_id
*id
)
335 int error
, node
, cpu
;
336 struct drv_dev_and_id ddi
= { drv
, dev
, id
};
339 * Execute driver initialization on node where the device is
340 * attached. This way the driver likely allocates its local memory
343 node
= dev_to_node(&dev
->dev
);
346 cpu_hotplug_disable();
349 * Prevent nesting work_on_cpu() for the case where a Virtual Function
350 * device is probed from work_on_cpu() of the Physical device.
352 if (node
< 0 || node
>= MAX_NUMNODES
|| !node_online(node
) ||
353 pci_physfn_is_probed(dev
))
356 cpu
= cpumask_any_and(cpumask_of_node(node
), cpu_online_mask
);
358 if (cpu
< nr_cpu_ids
)
359 error
= work_on_cpu(cpu
, local_pci_probe
, &ddi
);
361 error
= local_pci_probe(&ddi
);
364 cpu_hotplug_enable();
369 * __pci_device_probe - check if a driver wants to claim a specific PCI device
370 * @drv: driver to call to check if it wants the PCI device
371 * @pci_dev: PCI device being probed
373 * returns 0 on success, else error.
374 * side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
376 static int __pci_device_probe(struct pci_driver
*drv
, struct pci_dev
*pci_dev
)
378 const struct pci_device_id
*id
;
381 if (!pci_dev
->driver
&& drv
->probe
) {
384 id
= pci_match_device(drv
, pci_dev
);
386 error
= pci_call_probe(drv
, pci_dev
, id
);
391 int __weak
pcibios_alloc_irq(struct pci_dev
*dev
)
396 void __weak
pcibios_free_irq(struct pci_dev
*dev
)
400 #ifdef CONFIG_PCI_IOV
401 static inline bool pci_device_can_probe(struct pci_dev
*pdev
)
403 return (!pdev
->is_virtfn
|| pdev
->physfn
->sriov
->drivers_autoprobe
||
404 pdev
->driver_override
);
407 static inline bool pci_device_can_probe(struct pci_dev
*pdev
)
413 static int pci_device_probe(struct device
*dev
)
416 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
417 struct pci_driver
*drv
= to_pci_driver(dev
->driver
);
419 if (!pci_device_can_probe(pci_dev
))
422 pci_assign_irq(pci_dev
);
424 error
= pcibios_alloc_irq(pci_dev
);
428 pci_dev_get(pci_dev
);
429 error
= __pci_device_probe(drv
, pci_dev
);
431 pcibios_free_irq(pci_dev
);
432 pci_dev_put(pci_dev
);
438 static int pci_device_remove(struct device
*dev
)
440 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
441 struct pci_driver
*drv
= pci_dev
->driver
;
445 pm_runtime_get_sync(dev
);
446 drv
->remove(pci_dev
);
447 pm_runtime_put_noidle(dev
);
449 pcibios_free_irq(pci_dev
);
450 pci_dev
->driver
= NULL
;
451 pci_iov_remove(pci_dev
);
454 /* Undo the runtime PM settings in local_pci_probe() */
455 pm_runtime_put_sync(dev
);
458 * If the device is still on, set the power state as "unknown",
459 * since it might change by the next time we load the driver.
461 if (pci_dev
->current_state
== PCI_D0
)
462 pci_dev
->current_state
= PCI_UNKNOWN
;
465 * We would love to complain here if pci_dev->is_enabled is set, that
466 * the driver should have called pci_disable_device(), but the
467 * unfortunate fact is there are too many odd BIOS and bridge setups
468 * that don't like drivers doing that all of the time.
469 * Oh well, we can dream of sane hardware when we sleep, no matter how
470 * horrible the crap we have to deal with is when we are awake...
473 pci_dev_put(pci_dev
);
477 static void pci_device_shutdown(struct device
*dev
)
479 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
480 struct pci_driver
*drv
= pci_dev
->driver
;
482 pm_runtime_resume(dev
);
484 if (drv
&& drv
->shutdown
)
485 drv
->shutdown(pci_dev
);
488 * If this is a kexec reboot, turn off Bus Master bit on the
489 * device to tell it to not continue to do DMA. Don't touch
490 * devices in D3cold or unknown states.
491 * If it is not a kexec reboot, firmware will hit the PCI
492 * devices with big hammer and stop their DMA any way.
494 if (kexec_in_progress
&& (pci_dev
->current_state
<= PCI_D3hot
))
495 pci_clear_master(pci_dev
);
500 /* Auxiliary functions used for system resume and run-time resume. */
503 * pci_restore_standard_config - restore standard config registers of PCI device
504 * @pci_dev: PCI device to handle
506 static int pci_restore_standard_config(struct pci_dev
*pci_dev
)
508 pci_update_current_state(pci_dev
, PCI_UNKNOWN
);
510 if (pci_dev
->current_state
!= PCI_D0
) {
511 int error
= pci_set_power_state(pci_dev
, PCI_D0
);
516 pci_restore_state(pci_dev
);
517 pci_pme_restore(pci_dev
);
521 static void pci_pm_default_resume(struct pci_dev
*pci_dev
)
523 pci_fixup_device(pci_fixup_resume
, pci_dev
);
524 pci_enable_wake(pci_dev
, PCI_D0
, false);
529 #ifdef CONFIG_PM_SLEEP
531 static void pci_pm_default_resume_early(struct pci_dev
*pci_dev
)
533 pci_power_up(pci_dev
);
534 pci_update_current_state(pci_dev
, PCI_D0
);
535 pci_restore_state(pci_dev
);
536 pci_pme_restore(pci_dev
);
540 * Default "suspend" method for devices that have no driver provided suspend,
541 * or not even a driver at all (second part).
543 static void pci_pm_set_unknown_state(struct pci_dev
*pci_dev
)
546 * mark its power state as "unknown", since we don't know if
547 * e.g. the BIOS will change its device state when we suspend.
549 if (pci_dev
->current_state
== PCI_D0
)
550 pci_dev
->current_state
= PCI_UNKNOWN
;
554 * Default "resume" method for devices that have no driver provided resume,
555 * or not even a driver at all (second part).
557 static int pci_pm_reenable_device(struct pci_dev
*pci_dev
)
561 /* if the device was enabled before suspend, reenable */
562 retval
= pci_reenable_device(pci_dev
);
564 * if the device was busmaster before the suspend, make it busmaster
567 if (pci_dev
->is_busmaster
)
568 pci_set_master(pci_dev
);
573 static int pci_legacy_suspend(struct device
*dev
, pm_message_t state
)
575 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
576 struct pci_driver
*drv
= pci_dev
->driver
;
578 if (drv
&& drv
->suspend
) {
579 pci_power_t prev
= pci_dev
->current_state
;
582 error
= drv
->suspend(pci_dev
, state
);
583 suspend_report_result(drv
->suspend
, error
);
587 if (!pci_dev
->state_saved
&& pci_dev
->current_state
!= PCI_D0
588 && pci_dev
->current_state
!= PCI_UNKNOWN
) {
589 pci_WARN_ONCE(pci_dev
, pci_dev
->current_state
!= prev
,
590 "PCI PM: Device state not saved by %pS\n",
595 pci_fixup_device(pci_fixup_suspend
, pci_dev
);
600 static int pci_legacy_suspend_late(struct device
*dev
, pm_message_t state
)
602 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
604 if (!pci_dev
->state_saved
)
605 pci_save_state(pci_dev
);
607 pci_pm_set_unknown_state(pci_dev
);
609 pci_fixup_device(pci_fixup_suspend_late
, pci_dev
);
614 static int pci_legacy_resume(struct device
*dev
)
616 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
617 struct pci_driver
*drv
= pci_dev
->driver
;
619 pci_fixup_device(pci_fixup_resume
, pci_dev
);
621 return drv
&& drv
->resume
?
622 drv
->resume(pci_dev
) : pci_pm_reenable_device(pci_dev
);
625 /* Auxiliary functions used by the new power management framework */
627 static void pci_pm_default_suspend(struct pci_dev
*pci_dev
)
629 /* Disable non-bridge devices without PM support */
630 if (!pci_has_subordinate(pci_dev
))
631 pci_disable_enabled_device(pci_dev
);
634 static bool pci_has_legacy_pm_support(struct pci_dev
*pci_dev
)
636 struct pci_driver
*drv
= pci_dev
->driver
;
637 bool ret
= drv
&& (drv
->suspend
|| drv
->resume
);
640 * Legacy PM support is used by default, so warn if the new framework is
641 * supported as well. Drivers are supposed to support either the
642 * former, or the latter, but not both at the same time.
644 pci_WARN(pci_dev
, ret
&& drv
->driver
.pm
, "device %04x:%04x\n",
645 pci_dev
->vendor
, pci_dev
->device
);
650 /* New power management framework */
652 static int pci_pm_prepare(struct device
*dev
)
654 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
655 const struct dev_pm_ops
*pm
= dev
->driver
? dev
->driver
->pm
: NULL
;
657 if (pm
&& pm
->prepare
) {
658 int error
= pm
->prepare(dev
);
662 if (!error
&& dev_pm_test_driver_flags(dev
, DPM_FLAG_SMART_PREPARE
))
665 if (pci_dev_need_resume(pci_dev
))
669 * The PME setting needs to be adjusted here in case the direct-complete
670 * optimization is used with respect to this device.
672 pci_dev_adjust_pme(pci_dev
);
676 static void pci_pm_complete(struct device
*dev
)
678 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
680 pci_dev_complete_resume(pci_dev
);
681 pm_generic_complete(dev
);
683 /* Resume device if platform firmware has put it in reset-power-on */
684 if (pm_runtime_suspended(dev
) && pm_resume_via_firmware()) {
685 pci_power_t pre_sleep_state
= pci_dev
->current_state
;
687 pci_refresh_power_state(pci_dev
);
689 * On platforms with ACPI this check may also trigger for
690 * devices sharing power resources if one of those power
691 * resources has been activated as a result of a change of the
692 * power state of another device sharing it. However, in that
693 * case it is also better to resume the device, in general.
695 if (pci_dev
->current_state
< pre_sleep_state
)
696 pm_request_resume(dev
);
700 #else /* !CONFIG_PM_SLEEP */
702 #define pci_pm_prepare NULL
703 #define pci_pm_complete NULL
705 #endif /* !CONFIG_PM_SLEEP */
707 #ifdef CONFIG_SUSPEND
708 static void pcie_pme_root_status_cleanup(struct pci_dev
*pci_dev
)
711 * Some BIOSes forget to clear Root PME Status bits after system
712 * wakeup, which breaks ACPI-based runtime wakeup on PCI Express.
713 * Clear those bits now just in case (shouldn't hurt).
715 if (pci_is_pcie(pci_dev
) &&
716 (pci_pcie_type(pci_dev
) == PCI_EXP_TYPE_ROOT_PORT
||
717 pci_pcie_type(pci_dev
) == PCI_EXP_TYPE_RC_EC
))
718 pcie_clear_root_pme_status(pci_dev
);
721 static int pci_pm_suspend(struct device
*dev
)
723 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
724 const struct dev_pm_ops
*pm
= dev
->driver
? dev
->driver
->pm
: NULL
;
726 pci_dev
->skip_bus_pm
= false;
728 if (pci_has_legacy_pm_support(pci_dev
))
729 return pci_legacy_suspend(dev
, PMSG_SUSPEND
);
732 pci_pm_default_suspend(pci_dev
);
737 * PCI devices suspended at run time may need to be resumed at this
738 * point, because in general it may be necessary to reconfigure them for
739 * system suspend. Namely, if the device is expected to wake up the
740 * system from the sleep state, it may have to be reconfigured for this
741 * purpose, or if the device is not expected to wake up the system from
742 * the sleep state, it should be prevented from signaling wakeup events
745 * Also if the driver of the device does not indicate that its system
746 * suspend callbacks can cope with runtime-suspended devices, it is
747 * better to resume the device from runtime suspend here.
749 if (!dev_pm_test_driver_flags(dev
, DPM_FLAG_SMART_SUSPEND
) ||
750 pci_dev_need_resume(pci_dev
)) {
751 pm_runtime_resume(dev
);
752 pci_dev
->state_saved
= false;
754 pci_dev_adjust_pme(pci_dev
);
758 pci_power_t prev
= pci_dev
->current_state
;
761 error
= pm
->suspend(dev
);
762 suspend_report_result(pm
->suspend
, error
);
766 if (!pci_dev
->state_saved
&& pci_dev
->current_state
!= PCI_D0
767 && pci_dev
->current_state
!= PCI_UNKNOWN
) {
768 pci_WARN_ONCE(pci_dev
, pci_dev
->current_state
!= prev
,
769 "PCI PM: State of device not saved by %pS\n",
777 static int pci_pm_suspend_late(struct device
*dev
)
779 if (dev_pm_smart_suspend_and_suspended(dev
))
782 pci_fixup_device(pci_fixup_suspend
, to_pci_dev(dev
));
784 return pm_generic_suspend_late(dev
);
787 static int pci_pm_suspend_noirq(struct device
*dev
)
789 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
790 const struct dev_pm_ops
*pm
= dev
->driver
? dev
->driver
->pm
: NULL
;
792 if (dev_pm_smart_suspend_and_suspended(dev
)) {
793 dev
->power
.may_skip_resume
= true;
797 if (pci_has_legacy_pm_support(pci_dev
))
798 return pci_legacy_suspend_late(dev
, PMSG_SUSPEND
);
801 pci_save_state(pci_dev
);
805 if (pm
->suspend_noirq
) {
806 pci_power_t prev
= pci_dev
->current_state
;
809 error
= pm
->suspend_noirq(dev
);
810 suspend_report_result(pm
->suspend_noirq
, error
);
814 if (!pci_dev
->state_saved
&& pci_dev
->current_state
!= PCI_D0
815 && pci_dev
->current_state
!= PCI_UNKNOWN
) {
816 pci_WARN_ONCE(pci_dev
, pci_dev
->current_state
!= prev
,
817 "PCI PM: State of device not saved by %pS\n",
823 if (pci_dev
->skip_bus_pm
) {
825 * Either the device is a bridge with a child in D0 below it, or
826 * the function is running for the second time in a row without
827 * going through full resume, which is possible only during
828 * suspend-to-idle in a spurious wakeup case. The device should
829 * be in D0 at this point, but if it is a bridge, it may be
830 * necessary to save its state.
832 if (!pci_dev
->state_saved
)
833 pci_save_state(pci_dev
);
834 } else if (!pci_dev
->state_saved
) {
835 pci_save_state(pci_dev
);
836 if (pci_power_manageable(pci_dev
))
837 pci_prepare_to_sleep(pci_dev
);
840 pci_dbg(pci_dev
, "PCI PM: Suspend power state: %s\n",
841 pci_power_name(pci_dev
->current_state
));
843 if (pci_dev
->current_state
== PCI_D0
) {
844 pci_dev
->skip_bus_pm
= true;
846 * Per PCI PM r1.2, table 6-1, a bridge must be in D0 if any
847 * downstream device is in D0, so avoid changing the power state
848 * of the parent bridge by setting the skip_bus_pm flag for it.
850 if (pci_dev
->bus
->self
)
851 pci_dev
->bus
->self
->skip_bus_pm
= true;
854 if (pci_dev
->skip_bus_pm
&& pm_suspend_no_platform()) {
855 pci_dbg(pci_dev
, "PCI PM: Skipped\n");
859 pci_pm_set_unknown_state(pci_dev
);
862 * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
863 * PCI COMMAND register isn't 0, the BIOS assumes that the controller
864 * hasn't been quiesced and tries to turn it off. If the controller
865 * is already in D3, this can hang or cause memory corruption.
867 * Since the value of the COMMAND register doesn't matter once the
868 * device has been suspended, we can safely set it to 0 here.
870 if (pci_dev
->class == PCI_CLASS_SERIAL_USB_EHCI
)
871 pci_write_config_word(pci_dev
, PCI_COMMAND
, 0);
874 pci_fixup_device(pci_fixup_suspend_late
, pci_dev
);
877 * If the target system sleep state is suspend-to-idle, it is sufficient
878 * to check whether or not the device's wakeup settings are good for
879 * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
880 * pci_pm_complete() to take care of fixing up the device's state
881 * anyway, if need be.
883 dev
->power
.may_skip_resume
= device_may_wakeup(dev
) ||
884 !device_can_wakeup(dev
);
889 static int pci_pm_resume_noirq(struct device
*dev
)
891 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
892 const struct dev_pm_ops
*pm
= dev
->driver
? dev
->driver
->pm
: NULL
;
893 pci_power_t prev_state
= pci_dev
->current_state
;
894 bool skip_bus_pm
= pci_dev
->skip_bus_pm
;
896 if (dev_pm_may_skip_resume(dev
))
900 * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
901 * during system suspend, so update their runtime PM status to "active"
902 * as they are going to be put into D0 shortly.
904 if (dev_pm_smart_suspend_and_suspended(dev
))
905 pm_runtime_set_active(dev
);
908 * In the suspend-to-idle case, devices left in D0 during suspend will
909 * stay in D0, so it is not necessary to restore or update their
910 * configuration here and attempting to put them into D0 again is
911 * pointless, so avoid doing that.
913 if (!(skip_bus_pm
&& pm_suspend_no_platform()))
914 pci_pm_default_resume_early(pci_dev
);
916 pci_fixup_device(pci_fixup_resume_early
, pci_dev
);
917 pcie_pme_root_status_cleanup(pci_dev
);
919 if (!skip_bus_pm
&& prev_state
== PCI_D3cold
)
920 pci_bridge_wait_for_secondary_bus(pci_dev
);
922 if (pci_has_legacy_pm_support(pci_dev
))
925 if (pm
&& pm
->resume_noirq
)
926 return pm
->resume_noirq(dev
);
931 static int pci_pm_resume(struct device
*dev
)
933 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
934 const struct dev_pm_ops
*pm
= dev
->driver
? dev
->driver
->pm
: NULL
;
937 * This is necessary for the suspend error path in which resume is
938 * called without restoring the standard config registers of the device.
940 if (pci_dev
->state_saved
)
941 pci_restore_standard_config(pci_dev
);
943 if (pci_has_legacy_pm_support(pci_dev
))
944 return pci_legacy_resume(dev
);
946 pci_pm_default_resume(pci_dev
);
950 return pm
->resume(dev
);
952 pci_pm_reenable_device(pci_dev
);
958 #else /* !CONFIG_SUSPEND */
960 #define pci_pm_suspend NULL
961 #define pci_pm_suspend_late NULL
962 #define pci_pm_suspend_noirq NULL
963 #define pci_pm_resume NULL
964 #define pci_pm_resume_noirq NULL
966 #endif /* !CONFIG_SUSPEND */
968 #ifdef CONFIG_HIBERNATE_CALLBACKS
971 * pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing
972 * a hibernate transition
974 struct dev_pm_ops __weak pcibios_pm_ops
;
976 static int pci_pm_freeze(struct device
*dev
)
978 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
979 const struct dev_pm_ops
*pm
= dev
->driver
? dev
->driver
->pm
: NULL
;
981 if (pci_has_legacy_pm_support(pci_dev
))
982 return pci_legacy_suspend(dev
, PMSG_FREEZE
);
985 pci_pm_default_suspend(pci_dev
);
990 * Resume all runtime-suspended devices before creating a snapshot
991 * image of system memory, because the restore kernel generally cannot
992 * be expected to always handle them consistently and they need to be
993 * put into the runtime-active metastate during system resume anyway,
994 * so it is better to ensure that the state saved in the image will be
995 * always consistent with that.
997 pm_runtime_resume(dev
);
998 pci_dev
->state_saved
= false;
1003 error
= pm
->freeze(dev
);
1004 suspend_report_result(pm
->freeze
, error
);
1012 static int pci_pm_freeze_noirq(struct device
*dev
)
1014 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1015 const struct dev_pm_ops
*pm
= dev
->driver
? dev
->driver
->pm
: NULL
;
1017 if (pci_has_legacy_pm_support(pci_dev
))
1018 return pci_legacy_suspend_late(dev
, PMSG_FREEZE
);
1020 if (pm
&& pm
->freeze_noirq
) {
1023 error
= pm
->freeze_noirq(dev
);
1024 suspend_report_result(pm
->freeze_noirq
, error
);
1029 if (!pci_dev
->state_saved
)
1030 pci_save_state(pci_dev
);
1032 pci_pm_set_unknown_state(pci_dev
);
1034 if (pcibios_pm_ops
.freeze_noirq
)
1035 return pcibios_pm_ops
.freeze_noirq(dev
);
1040 static int pci_pm_thaw_noirq(struct device
*dev
)
1042 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1043 const struct dev_pm_ops
*pm
= dev
->driver
? dev
->driver
->pm
: NULL
;
1046 if (pcibios_pm_ops
.thaw_noirq
) {
1047 error
= pcibios_pm_ops
.thaw_noirq(dev
);
1053 * The pm->thaw_noirq() callback assumes the device has been
1054 * returned to D0 and its config state has been restored.
1056 * In addition, pci_restore_state() restores MSI-X state in MMIO
1057 * space, which requires the device to be in D0, so return it to D0
1058 * in case the driver's "freeze" callbacks put it into a low-power
1061 pci_set_power_state(pci_dev
, PCI_D0
);
1062 pci_restore_state(pci_dev
);
1064 if (pci_has_legacy_pm_support(pci_dev
))
1067 if (pm
&& pm
->thaw_noirq
)
1068 return pm
->thaw_noirq(dev
);
1073 static int pci_pm_thaw(struct device
*dev
)
1075 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1076 const struct dev_pm_ops
*pm
= dev
->driver
? dev
->driver
->pm
: NULL
;
1079 if (pci_has_legacy_pm_support(pci_dev
))
1080 return pci_legacy_resume(dev
);
1084 error
= pm
->thaw(dev
);
1086 pci_pm_reenable_device(pci_dev
);
1089 pci_dev
->state_saved
= false;
1094 static int pci_pm_poweroff(struct device
*dev
)
1096 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1097 const struct dev_pm_ops
*pm
= dev
->driver
? dev
->driver
->pm
: NULL
;
1099 if (pci_has_legacy_pm_support(pci_dev
))
1100 return pci_legacy_suspend(dev
, PMSG_HIBERNATE
);
1103 pci_pm_default_suspend(pci_dev
);
1107 /* The reason to do that is the same as in pci_pm_suspend(). */
1108 if (!dev_pm_test_driver_flags(dev
, DPM_FLAG_SMART_SUSPEND
) ||
1109 pci_dev_need_resume(pci_dev
)) {
1110 pm_runtime_resume(dev
);
1111 pci_dev
->state_saved
= false;
1113 pci_dev_adjust_pme(pci_dev
);
1119 error
= pm
->poweroff(dev
);
1120 suspend_report_result(pm
->poweroff
, error
);
1128 static int pci_pm_poweroff_late(struct device
*dev
)
1130 if (dev_pm_smart_suspend_and_suspended(dev
))
1133 pci_fixup_device(pci_fixup_suspend
, to_pci_dev(dev
));
1135 return pm_generic_poweroff_late(dev
);
1138 static int pci_pm_poweroff_noirq(struct device
*dev
)
1140 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1141 const struct dev_pm_ops
*pm
= dev
->driver
? dev
->driver
->pm
: NULL
;
1143 if (dev_pm_smart_suspend_and_suspended(dev
))
1146 if (pci_has_legacy_pm_support(pci_dev
))
1147 return pci_legacy_suspend_late(dev
, PMSG_HIBERNATE
);
1150 pci_fixup_device(pci_fixup_suspend_late
, pci_dev
);
1154 if (pm
->poweroff_noirq
) {
1157 error
= pm
->poweroff_noirq(dev
);
1158 suspend_report_result(pm
->poweroff_noirq
, error
);
1163 if (!pci_dev
->state_saved
&& !pci_has_subordinate(pci_dev
))
1164 pci_prepare_to_sleep(pci_dev
);
1167 * The reason for doing this here is the same as for the analogous code
1168 * in pci_pm_suspend_noirq().
1170 if (pci_dev
->class == PCI_CLASS_SERIAL_USB_EHCI
)
1171 pci_write_config_word(pci_dev
, PCI_COMMAND
, 0);
1173 pci_fixup_device(pci_fixup_suspend_late
, pci_dev
);
1175 if (pcibios_pm_ops
.poweroff_noirq
)
1176 return pcibios_pm_ops
.poweroff_noirq(dev
);
1181 static int pci_pm_restore_noirq(struct device
*dev
)
1183 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1184 const struct dev_pm_ops
*pm
= dev
->driver
? dev
->driver
->pm
: NULL
;
1187 if (pcibios_pm_ops
.restore_noirq
) {
1188 error
= pcibios_pm_ops
.restore_noirq(dev
);
1193 pci_pm_default_resume_early(pci_dev
);
1194 pci_fixup_device(pci_fixup_resume_early
, pci_dev
);
1196 if (pci_has_legacy_pm_support(pci_dev
))
1199 if (pm
&& pm
->restore_noirq
)
1200 return pm
->restore_noirq(dev
);
1205 static int pci_pm_restore(struct device
*dev
)
1207 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1208 const struct dev_pm_ops
*pm
= dev
->driver
? dev
->driver
->pm
: NULL
;
1211 * This is necessary for the hibernation error path in which restore is
1212 * called without restoring the standard config registers of the device.
1214 if (pci_dev
->state_saved
)
1215 pci_restore_standard_config(pci_dev
);
1217 if (pci_has_legacy_pm_support(pci_dev
))
1218 return pci_legacy_resume(dev
);
1220 pci_pm_default_resume(pci_dev
);
1224 return pm
->restore(dev
);
1226 pci_pm_reenable_device(pci_dev
);
1232 #else /* !CONFIG_HIBERNATE_CALLBACKS */
1234 #define pci_pm_freeze NULL
1235 #define pci_pm_freeze_noirq NULL
1236 #define pci_pm_thaw NULL
1237 #define pci_pm_thaw_noirq NULL
1238 #define pci_pm_poweroff NULL
1239 #define pci_pm_poweroff_late NULL
1240 #define pci_pm_poweroff_noirq NULL
1241 #define pci_pm_restore NULL
1242 #define pci_pm_restore_noirq NULL
1244 #endif /* !CONFIG_HIBERNATE_CALLBACKS */
1248 static int pci_pm_runtime_suspend(struct device
*dev
)
1250 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1251 const struct dev_pm_ops
*pm
= dev
->driver
? dev
->driver
->pm
: NULL
;
1252 pci_power_t prev
= pci_dev
->current_state
;
1256 * If pci_dev->driver is not set (unbound), we leave the device in D0,
1257 * but it may go to D3cold when the bridge above it runtime suspends.
1258 * Save its config space in case that happens.
1260 if (!pci_dev
->driver
) {
1261 pci_save_state(pci_dev
);
1265 pci_dev
->state_saved
= false;
1266 if (pm
&& pm
->runtime_suspend
) {
1267 error
= pm
->runtime_suspend(dev
);
1269 * -EBUSY and -EAGAIN is used to request the runtime PM core
1270 * to schedule a new suspend, so log the event only with debug
1273 if (error
== -EBUSY
|| error
== -EAGAIN
) {
1274 pci_dbg(pci_dev
, "can't suspend now (%ps returned %d)\n",
1275 pm
->runtime_suspend
, error
);
1278 pci_err(pci_dev
, "can't suspend (%ps returned %d)\n",
1279 pm
->runtime_suspend
, error
);
1284 pci_fixup_device(pci_fixup_suspend
, pci_dev
);
1286 if (pm
&& pm
->runtime_suspend
1287 && !pci_dev
->state_saved
&& pci_dev
->current_state
!= PCI_D0
1288 && pci_dev
->current_state
!= PCI_UNKNOWN
) {
1289 pci_WARN_ONCE(pci_dev
, pci_dev
->current_state
!= prev
,
1290 "PCI PM: State of device not saved by %pS\n",
1291 pm
->runtime_suspend
);
1295 if (!pci_dev
->state_saved
) {
1296 pci_save_state(pci_dev
);
1297 pci_finish_runtime_suspend(pci_dev
);
1303 static int pci_pm_runtime_resume(struct device
*dev
)
1305 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1306 const struct dev_pm_ops
*pm
= dev
->driver
? dev
->driver
->pm
: NULL
;
1307 pci_power_t prev_state
= pci_dev
->current_state
;
1311 * Restoring config space is necessary even if the device is not bound
1312 * to a driver because although we left it in D0, it may have gone to
1313 * D3cold when the bridge above it runtime suspended.
1315 pci_restore_standard_config(pci_dev
);
1317 if (!pci_dev
->driver
)
1320 pci_fixup_device(pci_fixup_resume_early
, pci_dev
);
1321 pci_pm_default_resume(pci_dev
);
1323 if (prev_state
== PCI_D3cold
)
1324 pci_bridge_wait_for_secondary_bus(pci_dev
);
1326 if (pm
&& pm
->runtime_resume
)
1327 error
= pm
->runtime_resume(dev
);
1329 pci_dev
->runtime_d3cold
= false;
1334 static int pci_pm_runtime_idle(struct device
*dev
)
1336 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1337 const struct dev_pm_ops
*pm
= dev
->driver
? dev
->driver
->pm
: NULL
;
1340 * If pci_dev->driver is not set (unbound), the device should
1341 * always remain in D0 regardless of the runtime PM status
1343 if (!pci_dev
->driver
)
1349 if (pm
->runtime_idle
)
1350 return pm
->runtime_idle(dev
);
1355 static const struct dev_pm_ops pci_dev_pm_ops
= {
1356 .prepare
= pci_pm_prepare
,
1357 .complete
= pci_pm_complete
,
1358 .suspend
= pci_pm_suspend
,
1359 .suspend_late
= pci_pm_suspend_late
,
1360 .resume
= pci_pm_resume
,
1361 .freeze
= pci_pm_freeze
,
1362 .thaw
= pci_pm_thaw
,
1363 .poweroff
= pci_pm_poweroff
,
1364 .poweroff_late
= pci_pm_poweroff_late
,
1365 .restore
= pci_pm_restore
,
1366 .suspend_noirq
= pci_pm_suspend_noirq
,
1367 .resume_noirq
= pci_pm_resume_noirq
,
1368 .freeze_noirq
= pci_pm_freeze_noirq
,
1369 .thaw_noirq
= pci_pm_thaw_noirq
,
1370 .poweroff_noirq
= pci_pm_poweroff_noirq
,
1371 .restore_noirq
= pci_pm_restore_noirq
,
1372 .runtime_suspend
= pci_pm_runtime_suspend
,
1373 .runtime_resume
= pci_pm_runtime_resume
,
1374 .runtime_idle
= pci_pm_runtime_idle
,
1377 #define PCI_PM_OPS_PTR (&pci_dev_pm_ops)
1379 #else /* !CONFIG_PM */
1381 #define pci_pm_runtime_suspend NULL
1382 #define pci_pm_runtime_resume NULL
1383 #define pci_pm_runtime_idle NULL
1385 #define PCI_PM_OPS_PTR NULL
1387 #endif /* !CONFIG_PM */
1390 * __pci_register_driver - register a new pci driver
1391 * @drv: the driver structure to register
1392 * @owner: owner module of drv
1393 * @mod_name: module name string
1395 * Adds the driver structure to the list of registered drivers.
1396 * Returns a negative value on error, otherwise 0.
1397 * If no error occurred, the driver remains registered even if
1398 * no device was claimed during registration.
1400 int __pci_register_driver(struct pci_driver
*drv
, struct module
*owner
,
1401 const char *mod_name
)
1403 /* initialize common driver fields */
1404 drv
->driver
.name
= drv
->name
;
1405 drv
->driver
.bus
= &pci_bus_type
;
1406 drv
->driver
.owner
= owner
;
1407 drv
->driver
.mod_name
= mod_name
;
1408 drv
->driver
.groups
= drv
->groups
;
1410 spin_lock_init(&drv
->dynids
.lock
);
1411 INIT_LIST_HEAD(&drv
->dynids
.list
);
1413 /* register with core */
1414 return driver_register(&drv
->driver
);
1416 EXPORT_SYMBOL(__pci_register_driver
);
1419 * pci_unregister_driver - unregister a pci driver
1420 * @drv: the driver structure to unregister
1422 * Deletes the driver structure from the list of registered PCI drivers,
1423 * gives it a chance to clean up by calling its remove() function for
1424 * each device it was responsible for, and marks those devices as
1428 void pci_unregister_driver(struct pci_driver
*drv
)
1430 driver_unregister(&drv
->driver
);
1431 pci_free_dynids(drv
);
1433 EXPORT_SYMBOL(pci_unregister_driver
);
1435 static struct pci_driver pci_compat_driver
= {
1440 * pci_dev_driver - get the pci_driver of a device
1441 * @dev: the device to query
1443 * Returns the appropriate pci_driver structure or %NULL if there is no
1444 * registered driver for the device.
1446 struct pci_driver
*pci_dev_driver(const struct pci_dev
*dev
)
1452 for (i
= 0; i
<= PCI_ROM_RESOURCE
; i
++)
1453 if (dev
->resource
[i
].flags
& IORESOURCE_BUSY
)
1454 return &pci_compat_driver
;
1458 EXPORT_SYMBOL(pci_dev_driver
);
1461 * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure
1462 * @dev: the PCI device structure to match against
1463 * @drv: the device driver to search for matching PCI device id structures
1465 * Used by a driver to check whether a PCI device present in the
1466 * system is in its list of supported devices. Returns the matching
1467 * pci_device_id structure or %NULL if there is no match.
1469 static int pci_bus_match(struct device
*dev
, struct device_driver
*drv
)
1471 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
1472 struct pci_driver
*pci_drv
;
1473 const struct pci_device_id
*found_id
;
1475 if (!pci_dev
->match_driver
)
1478 pci_drv
= to_pci_driver(drv
);
1479 found_id
= pci_match_device(pci_drv
, pci_dev
);
1487 * pci_dev_get - increments the reference count of the pci device structure
1488 * @dev: the device being referenced
1490 * Each live reference to a device should be refcounted.
1492 * Drivers for PCI devices should normally record such references in
1493 * their probe() methods, when they bind to a device, and release
1494 * them by calling pci_dev_put(), in their disconnect() methods.
1496 * A pointer to the device with the incremented reference counter is returned.
1498 struct pci_dev
*pci_dev_get(struct pci_dev
*dev
)
1501 get_device(&dev
->dev
);
1504 EXPORT_SYMBOL(pci_dev_get
);
1507 * pci_dev_put - release a use of the pci device structure
1508 * @dev: device that's been disconnected
1510 * Must be called when a user of a device is finished with it. When the last
1511 * user of the device calls this function, the memory of the device is freed.
1513 void pci_dev_put(struct pci_dev
*dev
)
1516 put_device(&dev
->dev
);
1518 EXPORT_SYMBOL(pci_dev_put
);
1520 static int pci_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
1522 struct pci_dev
*pdev
;
1527 pdev
= to_pci_dev(dev
);
1529 if (add_uevent_var(env
, "PCI_CLASS=%04X", pdev
->class))
1532 if (add_uevent_var(env
, "PCI_ID=%04X:%04X", pdev
->vendor
, pdev
->device
))
1535 if (add_uevent_var(env
, "PCI_SUBSYS_ID=%04X:%04X", pdev
->subsystem_vendor
,
1536 pdev
->subsystem_device
))
1539 if (add_uevent_var(env
, "PCI_SLOT_NAME=%s", pci_name(pdev
)))
1542 if (add_uevent_var(env
, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X",
1543 pdev
->vendor
, pdev
->device
,
1544 pdev
->subsystem_vendor
, pdev
->subsystem_device
,
1545 (u8
)(pdev
->class >> 16), (u8
)(pdev
->class >> 8),
1552 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
1554 * pci_uevent_ers - emit a uevent during recovery path of PCI device
1555 * @pdev: PCI device undergoing error recovery
1556 * @err_type: type of error event
1558 void pci_uevent_ers(struct pci_dev
*pdev
, enum pci_ers_result err_type
)
1564 case PCI_ERS_RESULT_NONE
:
1565 case PCI_ERS_RESULT_CAN_RECOVER
:
1566 envp
[idx
++] = "ERROR_EVENT=BEGIN_RECOVERY";
1567 envp
[idx
++] = "DEVICE_ONLINE=0";
1569 case PCI_ERS_RESULT_RECOVERED
:
1570 envp
[idx
++] = "ERROR_EVENT=SUCCESSFUL_RECOVERY";
1571 envp
[idx
++] = "DEVICE_ONLINE=1";
1573 case PCI_ERS_RESULT_DISCONNECT
:
1574 envp
[idx
++] = "ERROR_EVENT=FAILED_RECOVERY";
1575 envp
[idx
++] = "DEVICE_ONLINE=0";
1583 kobject_uevent_env(&pdev
->dev
.kobj
, KOBJ_CHANGE
, envp
);
1588 static int pci_bus_num_vf(struct device
*dev
)
1590 return pci_num_vf(to_pci_dev(dev
));
1594 * pci_dma_configure - Setup DMA configuration
1595 * @dev: ptr to dev structure
1597 * Function to update PCI devices's DMA configuration using the same
1598 * info from the OF node or ACPI node of host bridge's parent (if any).
1600 static int pci_dma_configure(struct device
*dev
)
1602 struct device
*bridge
;
1605 bridge
= pci_get_host_bridge_device(to_pci_dev(dev
));
1607 if (IS_ENABLED(CONFIG_OF
) && bridge
->parent
&&
1608 bridge
->parent
->of_node
) {
1609 ret
= of_dma_configure(dev
, bridge
->parent
->of_node
, true);
1610 } else if (has_acpi_companion(bridge
)) {
1611 struct acpi_device
*adev
= to_acpi_device_node(bridge
->fwnode
);
1613 ret
= acpi_dma_configure(dev
, acpi_get_dma_attr(adev
));
1616 pci_put_host_bridge_device(bridge
);
1620 struct bus_type pci_bus_type
= {
1622 .match
= pci_bus_match
,
1623 .uevent
= pci_uevent
,
1624 .probe
= pci_device_probe
,
1625 .remove
= pci_device_remove
,
1626 .shutdown
= pci_device_shutdown
,
1627 .dev_groups
= pci_dev_groups
,
1628 .bus_groups
= pci_bus_groups
,
1629 .drv_groups
= pci_drv_groups
,
1630 .pm
= PCI_PM_OPS_PTR
,
1631 .num_vf
= pci_bus_num_vf
,
1632 .dma_configure
= pci_dma_configure
,
1634 EXPORT_SYMBOL(pci_bus_type
);
1636 #ifdef CONFIG_PCIEPORTBUS
1637 static int pcie_port_bus_match(struct device
*dev
, struct device_driver
*drv
)
1639 struct pcie_device
*pciedev
;
1640 struct pcie_port_service_driver
*driver
;
1642 if (drv
->bus
!= &pcie_port_bus_type
|| dev
->bus
!= &pcie_port_bus_type
)
1645 pciedev
= to_pcie_device(dev
);
1646 driver
= to_service_driver(drv
);
1648 if (driver
->service
!= pciedev
->service
)
1651 if (driver
->port_type
!= PCIE_ANY_PORT
&&
1652 driver
->port_type
!= pci_pcie_type(pciedev
->port
))
1658 struct bus_type pcie_port_bus_type
= {
1659 .name
= "pci_express",
1660 .match
= pcie_port_bus_match
,
1662 EXPORT_SYMBOL_GPL(pcie_port_bus_type
);
1665 static int __init
pci_driver_init(void)
1669 ret
= bus_register(&pci_bus_type
);
1673 #ifdef CONFIG_PCIEPORTBUS
1674 ret
= bus_register(&pcie_port_bus_type
);
1678 dma_debug_add_bus(&pci_bus_type
);
1681 postcore_initcall(pci_driver_init
);