2 * PCI Bus Services, see include/linux/pci.h for further explanation.
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
10 #include <linux/kernel.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/log2.h>
20 #include <linux/pci-aspm.h>
21 #include <linux/pm_wakeup.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/pm_runtime.h>
25 #include <asm/setup.h>
28 const char *pci_power_names
[] = {
29 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
31 EXPORT_SYMBOL_GPL(pci_power_names
);
33 int isa_dma_bridge_buggy
;
34 EXPORT_SYMBOL(isa_dma_bridge_buggy
);
37 EXPORT_SYMBOL(pci_pci_problems
);
39 unsigned int pci_pm_d3_delay
;
41 static void pci_pme_list_scan(struct work_struct
*work
);
43 static LIST_HEAD(pci_pme_list
);
44 static DEFINE_MUTEX(pci_pme_list_mutex
);
45 static DECLARE_DELAYED_WORK(pci_pme_work
, pci_pme_list_scan
);
47 struct pci_pme_device
{
48 struct list_head list
;
52 #define PME_TIMEOUT 1000 /* How long between PME checks */
54 static void pci_dev_d3_sleep(struct pci_dev
*dev
)
56 unsigned int delay
= dev
->d3_delay
;
58 if (delay
< pci_pm_d3_delay
)
59 delay
= pci_pm_d3_delay
;
64 #ifdef CONFIG_PCI_DOMAINS
65 int pci_domains_supported
= 1;
68 #define DEFAULT_CARDBUS_IO_SIZE (256)
69 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
70 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
71 unsigned long pci_cardbus_io_size
= DEFAULT_CARDBUS_IO_SIZE
;
72 unsigned long pci_cardbus_mem_size
= DEFAULT_CARDBUS_MEM_SIZE
;
74 #define DEFAULT_HOTPLUG_IO_SIZE (256)
75 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
76 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
77 unsigned long pci_hotplug_io_size
= DEFAULT_HOTPLUG_IO_SIZE
;
78 unsigned long pci_hotplug_mem_size
= DEFAULT_HOTPLUG_MEM_SIZE
;
80 enum pcie_bus_config_types pcie_bus_config
= PCIE_BUS_TUNE_OFF
;
83 * The default CLS is used if arch didn't set CLS explicitly and not
84 * all pci devices agree on the same value. Arch can override either
85 * the dfl or actual value as it sees fit. Don't forget this is
86 * measured in 32-bit words, not bytes.
88 u8 pci_dfl_cache_line_size __devinitdata
= L1_CACHE_BYTES
>> 2;
89 u8 pci_cache_line_size
;
92 * If we set up a device for bus mastering, we need to check the latency
93 * timer as certain BIOSes forget to set it properly.
95 unsigned int pcibios_max_latency
= 255;
98 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
99 * @bus: pointer to PCI bus structure to search
101 * Given a PCI bus, returns the highest PCI bus number present in the set
102 * including the given PCI bus and its list of child PCI buses.
104 unsigned char pci_bus_max_busnr(struct pci_bus
* bus
)
106 struct list_head
*tmp
;
107 unsigned char max
, n
;
109 max
= bus
->subordinate
;
110 list_for_each(tmp
, &bus
->children
) {
111 n
= pci_bus_max_busnr(pci_bus_b(tmp
));
117 EXPORT_SYMBOL_GPL(pci_bus_max_busnr
);
119 #ifdef CONFIG_HAS_IOMEM
120 void __iomem
*pci_ioremap_bar(struct pci_dev
*pdev
, int bar
)
123 * Make sure the BAR is actually a memory resource, not an IO resource
125 if (!(pci_resource_flags(pdev
, bar
) & IORESOURCE_MEM
)) {
129 return ioremap_nocache(pci_resource_start(pdev
, bar
),
130 pci_resource_len(pdev
, bar
));
132 EXPORT_SYMBOL_GPL(pci_ioremap_bar
);
137 * pci_max_busnr - returns maximum PCI bus number
139 * Returns the highest PCI bus number present in the system global list of
142 unsigned char __devinit
145 struct pci_bus
*bus
= NULL
;
146 unsigned char max
, n
;
149 while ((bus
= pci_find_next_bus(bus
)) != NULL
) {
150 n
= pci_bus_max_busnr(bus
);
159 #define PCI_FIND_CAP_TTL 48
161 static int __pci_find_next_cap_ttl(struct pci_bus
*bus
, unsigned int devfn
,
162 u8 pos
, int cap
, int *ttl
)
167 pci_bus_read_config_byte(bus
, devfn
, pos
, &pos
);
171 pci_bus_read_config_byte(bus
, devfn
, pos
+ PCI_CAP_LIST_ID
,
177 pos
+= PCI_CAP_LIST_NEXT
;
182 static int __pci_find_next_cap(struct pci_bus
*bus
, unsigned int devfn
,
185 int ttl
= PCI_FIND_CAP_TTL
;
187 return __pci_find_next_cap_ttl(bus
, devfn
, pos
, cap
, &ttl
);
190 int pci_find_next_capability(struct pci_dev
*dev
, u8 pos
, int cap
)
192 return __pci_find_next_cap(dev
->bus
, dev
->devfn
,
193 pos
+ PCI_CAP_LIST_NEXT
, cap
);
195 EXPORT_SYMBOL_GPL(pci_find_next_capability
);
197 static int __pci_bus_find_cap_start(struct pci_bus
*bus
,
198 unsigned int devfn
, u8 hdr_type
)
202 pci_bus_read_config_word(bus
, devfn
, PCI_STATUS
, &status
);
203 if (!(status
& PCI_STATUS_CAP_LIST
))
207 case PCI_HEADER_TYPE_NORMAL
:
208 case PCI_HEADER_TYPE_BRIDGE
:
209 return PCI_CAPABILITY_LIST
;
210 case PCI_HEADER_TYPE_CARDBUS
:
211 return PCI_CB_CAPABILITY_LIST
;
220 * pci_find_capability - query for devices' capabilities
221 * @dev: PCI device to query
222 * @cap: capability code
224 * Tell if a device supports a given PCI capability.
225 * Returns the address of the requested capability structure within the
226 * device's PCI configuration space or 0 in case the device does not
227 * support it. Possible values for @cap:
229 * %PCI_CAP_ID_PM Power Management
230 * %PCI_CAP_ID_AGP Accelerated Graphics Port
231 * %PCI_CAP_ID_VPD Vital Product Data
232 * %PCI_CAP_ID_SLOTID Slot Identification
233 * %PCI_CAP_ID_MSI Message Signalled Interrupts
234 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
235 * %PCI_CAP_ID_PCIX PCI-X
236 * %PCI_CAP_ID_EXP PCI Express
238 int pci_find_capability(struct pci_dev
*dev
, int cap
)
242 pos
= __pci_bus_find_cap_start(dev
->bus
, dev
->devfn
, dev
->hdr_type
);
244 pos
= __pci_find_next_cap(dev
->bus
, dev
->devfn
, pos
, cap
);
250 * pci_bus_find_capability - query for devices' capabilities
251 * @bus: the PCI bus to query
252 * @devfn: PCI device to query
253 * @cap: capability code
255 * Like pci_find_capability() but works for pci devices that do not have a
256 * pci_dev structure set up yet.
258 * Returns the address of the requested capability structure within the
259 * device's PCI configuration space or 0 in case the device does not
262 int pci_bus_find_capability(struct pci_bus
*bus
, unsigned int devfn
, int cap
)
267 pci_bus_read_config_byte(bus
, devfn
, PCI_HEADER_TYPE
, &hdr_type
);
269 pos
= __pci_bus_find_cap_start(bus
, devfn
, hdr_type
& 0x7f);
271 pos
= __pci_find_next_cap(bus
, devfn
, pos
, cap
);
277 * pci_find_ext_capability - Find an extended capability
278 * @dev: PCI device to query
279 * @cap: capability code
281 * Returns the address of the requested extended capability structure
282 * within the device's PCI configuration space or 0 if the device does
283 * not support it. Possible values for @cap:
285 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
286 * %PCI_EXT_CAP_ID_VC Virtual Channel
287 * %PCI_EXT_CAP_ID_DSN Device Serial Number
288 * %PCI_EXT_CAP_ID_PWR Power Budgeting
290 int pci_find_ext_capability(struct pci_dev
*dev
, int cap
)
294 int pos
= PCI_CFG_SPACE_SIZE
;
296 /* minimum 8 bytes per capability */
297 ttl
= (PCI_CFG_SPACE_EXP_SIZE
- PCI_CFG_SPACE_SIZE
) / 8;
299 if (dev
->cfg_size
<= PCI_CFG_SPACE_SIZE
)
302 if (pci_read_config_dword(dev
, pos
, &header
) != PCIBIOS_SUCCESSFUL
)
306 * If we have no capabilities, this is indicated by cap ID,
307 * cap version and next pointer all being 0.
313 if (PCI_EXT_CAP_ID(header
) == cap
)
316 pos
= PCI_EXT_CAP_NEXT(header
);
317 if (pos
< PCI_CFG_SPACE_SIZE
)
320 if (pci_read_config_dword(dev
, pos
, &header
) != PCIBIOS_SUCCESSFUL
)
326 EXPORT_SYMBOL_GPL(pci_find_ext_capability
);
329 * pci_bus_find_ext_capability - find an extended capability
330 * @bus: the PCI bus to query
331 * @devfn: PCI device to query
332 * @cap: capability code
334 * Like pci_find_ext_capability() but works for pci devices that do not have a
335 * pci_dev structure set up yet.
337 * Returns the address of the requested capability structure within the
338 * device's PCI configuration space or 0 in case the device does not
341 int pci_bus_find_ext_capability(struct pci_bus
*bus
, unsigned int devfn
,
346 int pos
= PCI_CFG_SPACE_SIZE
;
348 /* minimum 8 bytes per capability */
349 ttl
= (PCI_CFG_SPACE_EXP_SIZE
- PCI_CFG_SPACE_SIZE
) / 8;
351 if (!pci_bus_read_config_dword(bus
, devfn
, pos
, &header
))
353 if (header
== 0xffffffff || header
== 0)
357 if (PCI_EXT_CAP_ID(header
) == cap
)
360 pos
= PCI_EXT_CAP_NEXT(header
);
361 if (pos
< PCI_CFG_SPACE_SIZE
)
364 if (!pci_bus_read_config_dword(bus
, devfn
, pos
, &header
))
371 static int __pci_find_next_ht_cap(struct pci_dev
*dev
, int pos
, int ht_cap
)
373 int rc
, ttl
= PCI_FIND_CAP_TTL
;
376 if (ht_cap
== HT_CAPTYPE_SLAVE
|| ht_cap
== HT_CAPTYPE_HOST
)
377 mask
= HT_3BIT_CAP_MASK
;
379 mask
= HT_5BIT_CAP_MASK
;
381 pos
= __pci_find_next_cap_ttl(dev
->bus
, dev
->devfn
, pos
,
382 PCI_CAP_ID_HT
, &ttl
);
384 rc
= pci_read_config_byte(dev
, pos
+ 3, &cap
);
385 if (rc
!= PCIBIOS_SUCCESSFUL
)
388 if ((cap
& mask
) == ht_cap
)
391 pos
= __pci_find_next_cap_ttl(dev
->bus
, dev
->devfn
,
392 pos
+ PCI_CAP_LIST_NEXT
,
393 PCI_CAP_ID_HT
, &ttl
);
399 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
400 * @dev: PCI device to query
401 * @pos: Position from which to continue searching
402 * @ht_cap: Hypertransport capability code
404 * To be used in conjunction with pci_find_ht_capability() to search for
405 * all capabilities matching @ht_cap. @pos should always be a value returned
406 * from pci_find_ht_capability().
408 * NB. To be 100% safe against broken PCI devices, the caller should take
409 * steps to avoid an infinite loop.
411 int pci_find_next_ht_capability(struct pci_dev
*dev
, int pos
, int ht_cap
)
413 return __pci_find_next_ht_cap(dev
, pos
+ PCI_CAP_LIST_NEXT
, ht_cap
);
415 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability
);
418 * pci_find_ht_capability - query a device's Hypertransport capabilities
419 * @dev: PCI device to query
420 * @ht_cap: Hypertransport capability code
422 * Tell if a device supports a given Hypertransport capability.
423 * Returns an address within the device's PCI configuration space
424 * or 0 in case the device does not support the request capability.
425 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
426 * which has a Hypertransport capability matching @ht_cap.
428 int pci_find_ht_capability(struct pci_dev
*dev
, int ht_cap
)
432 pos
= __pci_bus_find_cap_start(dev
->bus
, dev
->devfn
, dev
->hdr_type
);
434 pos
= __pci_find_next_ht_cap(dev
, pos
, ht_cap
);
438 EXPORT_SYMBOL_GPL(pci_find_ht_capability
);
441 * pci_find_parent_resource - return resource region of parent bus of given region
442 * @dev: PCI device structure contains resources to be searched
443 * @res: child resource record for which parent is sought
445 * For given resource region of given device, return the resource
446 * region of parent bus the given region is contained in or where
447 * it should be allocated from.
450 pci_find_parent_resource(const struct pci_dev
*dev
, struct resource
*res
)
452 const struct pci_bus
*bus
= dev
->bus
;
454 struct resource
*best
= NULL
, *r
;
456 pci_bus_for_each_resource(bus
, r
, i
) {
459 if (res
->start
&& !(res
->start
>= r
->start
&& res
->end
<= r
->end
))
460 continue; /* Not contained */
461 if ((res
->flags
^ r
->flags
) & (IORESOURCE_IO
| IORESOURCE_MEM
))
462 continue; /* Wrong type */
463 if (!((res
->flags
^ r
->flags
) & IORESOURCE_PREFETCH
))
464 return r
; /* Exact match */
465 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
466 if (r
->flags
& IORESOURCE_PREFETCH
)
468 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
476 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
477 * @dev: PCI device to have its BARs restored
479 * Restore the BAR values for a given device, so as to make it
480 * accessible by its driver.
483 pci_restore_bars(struct pci_dev
*dev
)
487 for (i
= 0; i
< PCI_BRIDGE_RESOURCES
; i
++)
488 pci_update_resource(dev
, i
);
491 static struct pci_platform_pm_ops
*pci_platform_pm
;
493 int pci_set_platform_pm(struct pci_platform_pm_ops
*ops
)
495 if (!ops
->is_manageable
|| !ops
->set_state
|| !ops
->choose_state
496 || !ops
->sleep_wake
|| !ops
->can_wakeup
)
498 pci_platform_pm
= ops
;
502 static inline bool platform_pci_power_manageable(struct pci_dev
*dev
)
504 return pci_platform_pm
? pci_platform_pm
->is_manageable(dev
) : false;
507 static inline int platform_pci_set_power_state(struct pci_dev
*dev
,
510 return pci_platform_pm
? pci_platform_pm
->set_state(dev
, t
) : -ENOSYS
;
513 static inline pci_power_t
platform_pci_choose_state(struct pci_dev
*dev
)
515 return pci_platform_pm
?
516 pci_platform_pm
->choose_state(dev
) : PCI_POWER_ERROR
;
519 static inline bool platform_pci_can_wakeup(struct pci_dev
*dev
)
521 return pci_platform_pm
? pci_platform_pm
->can_wakeup(dev
) : false;
524 static inline int platform_pci_sleep_wake(struct pci_dev
*dev
, bool enable
)
526 return pci_platform_pm
?
527 pci_platform_pm
->sleep_wake(dev
, enable
) : -ENODEV
;
530 static inline int platform_pci_run_wake(struct pci_dev
*dev
, bool enable
)
532 return pci_platform_pm
?
533 pci_platform_pm
->run_wake(dev
, enable
) : -ENODEV
;
537 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
539 * @dev: PCI device to handle.
540 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
543 * -EINVAL if the requested state is invalid.
544 * -EIO if device does not support PCI PM or its PM capabilities register has a
545 * wrong version, or device doesn't support the requested state.
546 * 0 if device already is in the requested state.
547 * 0 if device's power state has been successfully changed.
549 static int pci_raw_set_power_state(struct pci_dev
*dev
, pci_power_t state
)
552 bool need_restore
= false;
554 /* Check if we're already there */
555 if (dev
->current_state
== state
)
561 if (state
< PCI_D0
|| state
> PCI_D3hot
)
564 /* Validate current state:
565 * Can enter D0 from any state, but if we can only go deeper
566 * to sleep if we're already in a low power state
568 if (state
!= PCI_D0
&& dev
->current_state
<= PCI_D3cold
569 && dev
->current_state
> state
) {
570 dev_err(&dev
->dev
, "invalid power transition "
571 "(from state %d to %d)\n", dev
->current_state
, state
);
575 /* check if this device supports the desired state */
576 if ((state
== PCI_D1
&& !dev
->d1_support
)
577 || (state
== PCI_D2
&& !dev
->d2_support
))
580 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
582 /* If we're (effectively) in D3, force entire word to 0.
583 * This doesn't affect PME_Status, disables PME_En, and
584 * sets PowerState to 0.
586 switch (dev
->current_state
) {
590 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
595 case PCI_UNKNOWN
: /* Boot-up */
596 if ((pmcsr
& PCI_PM_CTRL_STATE_MASK
) == PCI_D3hot
597 && !(pmcsr
& PCI_PM_CTRL_NO_SOFT_RESET
))
599 /* Fall-through: force to D0 */
605 /* enter specified state */
606 pci_write_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
);
608 /* Mandatory power management transition delays */
609 /* see PCI PM 1.1 5.6.1 table 18 */
610 if (state
== PCI_D3hot
|| dev
->current_state
== PCI_D3hot
)
611 pci_dev_d3_sleep(dev
);
612 else if (state
== PCI_D2
|| dev
->current_state
== PCI_D2
)
613 udelay(PCI_PM_D2_DELAY
);
615 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
616 dev
->current_state
= (pmcsr
& PCI_PM_CTRL_STATE_MASK
);
617 if (dev
->current_state
!= state
&& printk_ratelimit())
618 dev_info(&dev
->dev
, "Refused to change power state, "
619 "currently in D%d\n", dev
->current_state
);
621 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
622 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
623 * from D3hot to D0 _may_ perform an internal reset, thereby
624 * going to "D0 Uninitialized" rather than "D0 Initialized".
625 * For example, at least some versions of the 3c905B and the
626 * 3c556B exhibit this behaviour.
628 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
629 * devices in a D3hot state at boot. Consequently, we need to
630 * restore at least the BARs so that the device will be
631 * accessible to its driver.
634 pci_restore_bars(dev
);
637 pcie_aspm_pm_state_change(dev
->bus
->self
);
643 * pci_update_current_state - Read PCI power state of given device from its
644 * PCI PM registers and cache it
645 * @dev: PCI device to handle.
646 * @state: State to cache in case the device doesn't have the PM capability
648 void pci_update_current_state(struct pci_dev
*dev
, pci_power_t state
)
653 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
654 dev
->current_state
= (pmcsr
& PCI_PM_CTRL_STATE_MASK
);
656 dev
->current_state
= state
;
661 * pci_platform_power_transition - Use platform to change device power state
662 * @dev: PCI device to handle.
663 * @state: State to put the device into.
665 static int pci_platform_power_transition(struct pci_dev
*dev
, pci_power_t state
)
669 if (platform_pci_power_manageable(dev
)) {
670 error
= platform_pci_set_power_state(dev
, state
);
672 pci_update_current_state(dev
, state
);
673 /* Fall back to PCI_D0 if native PM is not supported */
675 dev
->current_state
= PCI_D0
;
678 /* Fall back to PCI_D0 if native PM is not supported */
680 dev
->current_state
= PCI_D0
;
687 * __pci_start_power_transition - Start power transition of a PCI device
688 * @dev: PCI device to handle.
689 * @state: State to put the device into.
691 static void __pci_start_power_transition(struct pci_dev
*dev
, pci_power_t state
)
694 pci_platform_power_transition(dev
, PCI_D0
);
698 * __pci_complete_power_transition - Complete power transition of a PCI device
699 * @dev: PCI device to handle.
700 * @state: State to put the device into.
702 * This function should not be called directly by device drivers.
704 int __pci_complete_power_transition(struct pci_dev
*dev
, pci_power_t state
)
706 return state
>= PCI_D0
?
707 pci_platform_power_transition(dev
, state
) : -EINVAL
;
709 EXPORT_SYMBOL_GPL(__pci_complete_power_transition
);
712 * pci_set_power_state - Set the power state of a PCI device
713 * @dev: PCI device to handle.
714 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
716 * Transition a device to a new power state, using the platform firmware and/or
717 * the device's PCI PM registers.
720 * -EINVAL if the requested state is invalid.
721 * -EIO if device does not support PCI PM or its PM capabilities register has a
722 * wrong version, or device doesn't support the requested state.
723 * 0 if device already is in the requested state.
724 * 0 if device's power state has been successfully changed.
726 int pci_set_power_state(struct pci_dev
*dev
, pci_power_t state
)
730 /* bound the state we're entering */
731 if (state
> PCI_D3hot
)
733 else if (state
< PCI_D0
)
735 else if ((state
== PCI_D1
|| state
== PCI_D2
) && pci_no_d1d2(dev
))
737 * If the device or the parent bridge do not support PCI PM,
738 * ignore the request if we're doing anything other than putting
739 * it into D0 (which would only happen on boot).
743 __pci_start_power_transition(dev
, state
);
745 /* This device is quirked not to be put into D3, so
746 don't put it in D3 */
747 if (state
== PCI_D3hot
&& (dev
->dev_flags
& PCI_DEV_FLAGS_NO_D3
))
750 error
= pci_raw_set_power_state(dev
, state
);
752 if (!__pci_complete_power_transition(dev
, state
))
755 * When aspm_policy is "powersave" this call ensures
756 * that ASPM is configured.
758 if (!error
&& dev
->bus
->self
)
759 pcie_aspm_powersave_config_link(dev
->bus
->self
);
765 * pci_choose_state - Choose the power state of a PCI device
766 * @dev: PCI device to be suspended
767 * @state: target sleep state for the whole system. This is the value
768 * that is passed to suspend() function.
770 * Returns PCI power state suitable for given device and given system
774 pci_power_t
pci_choose_state(struct pci_dev
*dev
, pm_message_t state
)
778 if (!pci_find_capability(dev
, PCI_CAP_ID_PM
))
781 ret
= platform_pci_choose_state(dev
);
782 if (ret
!= PCI_POWER_ERROR
)
785 switch (state
.event
) {
788 case PM_EVENT_FREEZE
:
789 case PM_EVENT_PRETHAW
:
790 /* REVISIT both freeze and pre-thaw "should" use D0 */
791 case PM_EVENT_SUSPEND
:
792 case PM_EVENT_HIBERNATE
:
795 dev_info(&dev
->dev
, "unrecognized suspend event %d\n",
802 EXPORT_SYMBOL(pci_choose_state
);
804 #define PCI_EXP_SAVE_REGS 7
806 #define pcie_cap_has_devctl(type, flags) 1
807 #define pcie_cap_has_lnkctl(type, flags) \
808 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
809 (type == PCI_EXP_TYPE_ROOT_PORT || \
810 type == PCI_EXP_TYPE_ENDPOINT || \
811 type == PCI_EXP_TYPE_LEG_END))
812 #define pcie_cap_has_sltctl(type, flags) \
813 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
814 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
815 (type == PCI_EXP_TYPE_DOWNSTREAM && \
816 (flags & PCI_EXP_FLAGS_SLOT))))
817 #define pcie_cap_has_rtctl(type, flags) \
818 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
819 (type == PCI_EXP_TYPE_ROOT_PORT || \
820 type == PCI_EXP_TYPE_RC_EC))
821 #define pcie_cap_has_devctl2(type, flags) \
822 ((flags & PCI_EXP_FLAGS_VERS) > 1)
823 #define pcie_cap_has_lnkctl2(type, flags) \
824 ((flags & PCI_EXP_FLAGS_VERS) > 1)
825 #define pcie_cap_has_sltctl2(type, flags) \
826 ((flags & PCI_EXP_FLAGS_VERS) > 1)
828 static int pci_save_pcie_state(struct pci_dev
*dev
)
831 struct pci_cap_saved_state
*save_state
;
835 pos
= pci_pcie_cap(dev
);
839 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_EXP
);
841 dev_err(&dev
->dev
, "buffer not found in %s\n", __func__
);
844 cap
= (u16
*)&save_state
->cap
.data
[0];
846 pci_read_config_word(dev
, pos
+ PCI_EXP_FLAGS
, &flags
);
848 if (pcie_cap_has_devctl(dev
->pcie_type
, flags
))
849 pci_read_config_word(dev
, pos
+ PCI_EXP_DEVCTL
, &cap
[i
++]);
850 if (pcie_cap_has_lnkctl(dev
->pcie_type
, flags
))
851 pci_read_config_word(dev
, pos
+ PCI_EXP_LNKCTL
, &cap
[i
++]);
852 if (pcie_cap_has_sltctl(dev
->pcie_type
, flags
))
853 pci_read_config_word(dev
, pos
+ PCI_EXP_SLTCTL
, &cap
[i
++]);
854 if (pcie_cap_has_rtctl(dev
->pcie_type
, flags
))
855 pci_read_config_word(dev
, pos
+ PCI_EXP_RTCTL
, &cap
[i
++]);
856 if (pcie_cap_has_devctl2(dev
->pcie_type
, flags
))
857 pci_read_config_word(dev
, pos
+ PCI_EXP_DEVCTL2
, &cap
[i
++]);
858 if (pcie_cap_has_lnkctl2(dev
->pcie_type
, flags
))
859 pci_read_config_word(dev
, pos
+ PCI_EXP_LNKCTL2
, &cap
[i
++]);
860 if (pcie_cap_has_sltctl2(dev
->pcie_type
, flags
))
861 pci_read_config_word(dev
, pos
+ PCI_EXP_SLTCTL2
, &cap
[i
++]);
866 static void pci_restore_pcie_state(struct pci_dev
*dev
)
869 struct pci_cap_saved_state
*save_state
;
873 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_EXP
);
874 pos
= pci_find_capability(dev
, PCI_CAP_ID_EXP
);
875 if (!save_state
|| pos
<= 0)
877 cap
= (u16
*)&save_state
->cap
.data
[0];
879 pci_read_config_word(dev
, pos
+ PCI_EXP_FLAGS
, &flags
);
881 if (pcie_cap_has_devctl(dev
->pcie_type
, flags
))
882 pci_write_config_word(dev
, pos
+ PCI_EXP_DEVCTL
, cap
[i
++]);
883 if (pcie_cap_has_lnkctl(dev
->pcie_type
, flags
))
884 pci_write_config_word(dev
, pos
+ PCI_EXP_LNKCTL
, cap
[i
++]);
885 if (pcie_cap_has_sltctl(dev
->pcie_type
, flags
))
886 pci_write_config_word(dev
, pos
+ PCI_EXP_SLTCTL
, cap
[i
++]);
887 if (pcie_cap_has_rtctl(dev
->pcie_type
, flags
))
888 pci_write_config_word(dev
, pos
+ PCI_EXP_RTCTL
, cap
[i
++]);
889 if (pcie_cap_has_devctl2(dev
->pcie_type
, flags
))
890 pci_write_config_word(dev
, pos
+ PCI_EXP_DEVCTL2
, cap
[i
++]);
891 if (pcie_cap_has_lnkctl2(dev
->pcie_type
, flags
))
892 pci_write_config_word(dev
, pos
+ PCI_EXP_LNKCTL2
, cap
[i
++]);
893 if (pcie_cap_has_sltctl2(dev
->pcie_type
, flags
))
894 pci_write_config_word(dev
, pos
+ PCI_EXP_SLTCTL2
, cap
[i
++]);
898 static int pci_save_pcix_state(struct pci_dev
*dev
)
901 struct pci_cap_saved_state
*save_state
;
903 pos
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
907 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_PCIX
);
909 dev_err(&dev
->dev
, "buffer not found in %s\n", __func__
);
913 pci_read_config_word(dev
, pos
+ PCI_X_CMD
,
914 (u16
*)save_state
->cap
.data
);
919 static void pci_restore_pcix_state(struct pci_dev
*dev
)
922 struct pci_cap_saved_state
*save_state
;
925 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_PCIX
);
926 pos
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
927 if (!save_state
|| pos
<= 0)
929 cap
= (u16
*)&save_state
->cap
.data
[0];
931 pci_write_config_word(dev
, pos
+ PCI_X_CMD
, cap
[i
++]);
936 * pci_save_state - save the PCI configuration space of a device before suspending
937 * @dev: - PCI device that we're dealing with
940 pci_save_state(struct pci_dev
*dev
)
943 /* XXX: 100% dword access ok here? */
944 for (i
= 0; i
< 16; i
++)
945 pci_read_config_dword(dev
, i
* 4, &dev
->saved_config_space
[i
]);
946 dev
->state_saved
= true;
947 if ((i
= pci_save_pcie_state(dev
)) != 0)
949 if ((i
= pci_save_pcix_state(dev
)) != 0)
955 * pci_restore_state - Restore the saved state of a PCI device
956 * @dev: - PCI device that we're dealing with
958 void pci_restore_state(struct pci_dev
*dev
)
963 if (!dev
->state_saved
)
966 /* PCI Express register must be restored first */
967 pci_restore_pcie_state(dev
);
968 pci_restore_ats_state(dev
);
971 * The Base Address register should be programmed before the command
974 for (i
= 15; i
>= 0; i
--) {
975 pci_read_config_dword(dev
, i
* 4, &val
);
976 if (val
!= dev
->saved_config_space
[i
]) {
977 dev_dbg(&dev
->dev
, "restoring config "
978 "space at offset %#x (was %#x, writing %#x)\n",
979 i
, val
, (int)dev
->saved_config_space
[i
]);
980 pci_write_config_dword(dev
,i
* 4,
981 dev
->saved_config_space
[i
]);
984 pci_restore_pcix_state(dev
);
985 pci_restore_msi_state(dev
);
986 pci_restore_iov_state(dev
);
988 dev
->state_saved
= false;
991 struct pci_saved_state
{
992 u32 config_space
[16];
993 struct pci_cap_saved_data cap
[0];
997 * pci_store_saved_state - Allocate and return an opaque struct containing
998 * the device saved state.
999 * @dev: PCI device that we're dealing with
1001 * Rerturn NULL if no state or error.
1003 struct pci_saved_state
*pci_store_saved_state(struct pci_dev
*dev
)
1005 struct pci_saved_state
*state
;
1006 struct pci_cap_saved_state
*tmp
;
1007 struct pci_cap_saved_data
*cap
;
1008 struct hlist_node
*pos
;
1011 if (!dev
->state_saved
)
1014 size
= sizeof(*state
) + sizeof(struct pci_cap_saved_data
);
1016 hlist_for_each_entry(tmp
, pos
, &dev
->saved_cap_space
, next
)
1017 size
+= sizeof(struct pci_cap_saved_data
) + tmp
->cap
.size
;
1019 state
= kzalloc(size
, GFP_KERNEL
);
1023 memcpy(state
->config_space
, dev
->saved_config_space
,
1024 sizeof(state
->config_space
));
1027 hlist_for_each_entry(tmp
, pos
, &dev
->saved_cap_space
, next
) {
1028 size_t len
= sizeof(struct pci_cap_saved_data
) + tmp
->cap
.size
;
1029 memcpy(cap
, &tmp
->cap
, len
);
1030 cap
= (struct pci_cap_saved_data
*)((u8
*)cap
+ len
);
1032 /* Empty cap_save terminates list */
1036 EXPORT_SYMBOL_GPL(pci_store_saved_state
);
1039 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1040 * @dev: PCI device that we're dealing with
1041 * @state: Saved state returned from pci_store_saved_state()
1043 int pci_load_saved_state(struct pci_dev
*dev
, struct pci_saved_state
*state
)
1045 struct pci_cap_saved_data
*cap
;
1047 dev
->state_saved
= false;
1052 memcpy(dev
->saved_config_space
, state
->config_space
,
1053 sizeof(state
->config_space
));
1057 struct pci_cap_saved_state
*tmp
;
1059 tmp
= pci_find_saved_cap(dev
, cap
->cap_nr
);
1060 if (!tmp
|| tmp
->cap
.size
!= cap
->size
)
1063 memcpy(tmp
->cap
.data
, cap
->data
, tmp
->cap
.size
);
1064 cap
= (struct pci_cap_saved_data
*)((u8
*)cap
+
1065 sizeof(struct pci_cap_saved_data
) + cap
->size
);
1068 dev
->state_saved
= true;
1071 EXPORT_SYMBOL_GPL(pci_load_saved_state
);
1074 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1075 * and free the memory allocated for it.
1076 * @dev: PCI device that we're dealing with
1077 * @state: Pointer to saved state returned from pci_store_saved_state()
1079 int pci_load_and_free_saved_state(struct pci_dev
*dev
,
1080 struct pci_saved_state
**state
)
1082 int ret
= pci_load_saved_state(dev
, *state
);
1087 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state
);
1089 static int do_pci_enable_device(struct pci_dev
*dev
, int bars
)
1093 err
= pci_set_power_state(dev
, PCI_D0
);
1094 if (err
< 0 && err
!= -EIO
)
1096 err
= pcibios_enable_device(dev
, bars
);
1099 pci_fixup_device(pci_fixup_enable
, dev
);
1105 * pci_reenable_device - Resume abandoned device
1106 * @dev: PCI device to be resumed
1108 * Note this function is a backend of pci_default_resume and is not supposed
1109 * to be called by normal code, write proper resume handler and use it instead.
1111 int pci_reenable_device(struct pci_dev
*dev
)
1113 if (pci_is_enabled(dev
))
1114 return do_pci_enable_device(dev
, (1 << PCI_NUM_RESOURCES
) - 1);
1118 static int __pci_enable_device_flags(struct pci_dev
*dev
,
1119 resource_size_t flags
)
1125 * Power state could be unknown at this point, either due to a fresh
1126 * boot or a device removal call. So get the current power state
1127 * so that things like MSI message writing will behave as expected
1128 * (e.g. if the device really is in D0 at enable time).
1132 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
1133 dev
->current_state
= (pmcsr
& PCI_PM_CTRL_STATE_MASK
);
1136 if (atomic_add_return(1, &dev
->enable_cnt
) > 1)
1137 return 0; /* already enabled */
1139 /* only skip sriov related */
1140 for (i
= 0; i
<= PCI_ROM_RESOURCE
; i
++)
1141 if (dev
->resource
[i
].flags
& flags
)
1143 for (i
= PCI_BRIDGE_RESOURCES
; i
< DEVICE_COUNT_RESOURCE
; i
++)
1144 if (dev
->resource
[i
].flags
& flags
)
1147 err
= do_pci_enable_device(dev
, bars
);
1149 atomic_dec(&dev
->enable_cnt
);
1154 * pci_enable_device_io - Initialize a device for use with IO space
1155 * @dev: PCI device to be initialized
1157 * Initialize device before it's used by a driver. Ask low-level code
1158 * to enable I/O resources. Wake up the device if it was suspended.
1159 * Beware, this function can fail.
1161 int pci_enable_device_io(struct pci_dev
*dev
)
1163 return __pci_enable_device_flags(dev
, IORESOURCE_IO
);
1167 * pci_enable_device_mem - Initialize a device for use with Memory space
1168 * @dev: PCI device to be initialized
1170 * Initialize device before it's used by a driver. Ask low-level code
1171 * to enable Memory resources. Wake up the device if it was suspended.
1172 * Beware, this function can fail.
1174 int pci_enable_device_mem(struct pci_dev
*dev
)
1176 return __pci_enable_device_flags(dev
, IORESOURCE_MEM
);
1180 * pci_enable_device - Initialize device before it's used by a driver.
1181 * @dev: PCI device to be initialized
1183 * Initialize device before it's used by a driver. Ask low-level code
1184 * to enable I/O and memory. Wake up the device if it was suspended.
1185 * Beware, this function can fail.
1187 * Note we don't actually enable the device many times if we call
1188 * this function repeatedly (we just increment the count).
1190 int pci_enable_device(struct pci_dev
*dev
)
1192 return __pci_enable_device_flags(dev
, IORESOURCE_MEM
| IORESOURCE_IO
);
1196 * Managed PCI resources. This manages device on/off, intx/msi/msix
1197 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1198 * there's no need to track it separately. pci_devres is initialized
1199 * when a device is enabled using managed PCI device enable interface.
1202 unsigned int enabled
:1;
1203 unsigned int pinned
:1;
1204 unsigned int orig_intx
:1;
1205 unsigned int restore_intx
:1;
1209 static void pcim_release(struct device
*gendev
, void *res
)
1211 struct pci_dev
*dev
= container_of(gendev
, struct pci_dev
, dev
);
1212 struct pci_devres
*this = res
;
1215 if (dev
->msi_enabled
)
1216 pci_disable_msi(dev
);
1217 if (dev
->msix_enabled
)
1218 pci_disable_msix(dev
);
1220 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++)
1221 if (this->region_mask
& (1 << i
))
1222 pci_release_region(dev
, i
);
1224 if (this->restore_intx
)
1225 pci_intx(dev
, this->orig_intx
);
1227 if (this->enabled
&& !this->pinned
)
1228 pci_disable_device(dev
);
1231 static struct pci_devres
* get_pci_dr(struct pci_dev
*pdev
)
1233 struct pci_devres
*dr
, *new_dr
;
1235 dr
= devres_find(&pdev
->dev
, pcim_release
, NULL
, NULL
);
1239 new_dr
= devres_alloc(pcim_release
, sizeof(*new_dr
), GFP_KERNEL
);
1242 return devres_get(&pdev
->dev
, new_dr
, NULL
, NULL
);
1245 static struct pci_devres
* find_pci_dr(struct pci_dev
*pdev
)
1247 if (pci_is_managed(pdev
))
1248 return devres_find(&pdev
->dev
, pcim_release
, NULL
, NULL
);
1253 * pcim_enable_device - Managed pci_enable_device()
1254 * @pdev: PCI device to be initialized
1256 * Managed pci_enable_device().
1258 int pcim_enable_device(struct pci_dev
*pdev
)
1260 struct pci_devres
*dr
;
1263 dr
= get_pci_dr(pdev
);
1269 rc
= pci_enable_device(pdev
);
1271 pdev
->is_managed
= 1;
1278 * pcim_pin_device - Pin managed PCI device
1279 * @pdev: PCI device to pin
1281 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1282 * driver detach. @pdev must have been enabled with
1283 * pcim_enable_device().
1285 void pcim_pin_device(struct pci_dev
*pdev
)
1287 struct pci_devres
*dr
;
1289 dr
= find_pci_dr(pdev
);
1290 WARN_ON(!dr
|| !dr
->enabled
);
1296 * pcibios_disable_device - disable arch specific PCI resources for device dev
1297 * @dev: the PCI device to disable
1299 * Disables architecture specific PCI resources for the device. This
1300 * is the default implementation. Architecture implementations can
1303 void __attribute__ ((weak
)) pcibios_disable_device (struct pci_dev
*dev
) {}
1305 static void do_pci_disable_device(struct pci_dev
*dev
)
1309 pci_read_config_word(dev
, PCI_COMMAND
, &pci_command
);
1310 if (pci_command
& PCI_COMMAND_MASTER
) {
1311 pci_command
&= ~PCI_COMMAND_MASTER
;
1312 pci_write_config_word(dev
, PCI_COMMAND
, pci_command
);
1315 pcibios_disable_device(dev
);
1319 * pci_disable_enabled_device - Disable device without updating enable_cnt
1320 * @dev: PCI device to disable
1322 * NOTE: This function is a backend of PCI power management routines and is
1323 * not supposed to be called drivers.
1325 void pci_disable_enabled_device(struct pci_dev
*dev
)
1327 if (pci_is_enabled(dev
))
1328 do_pci_disable_device(dev
);
1332 * pci_disable_device - Disable PCI device after use
1333 * @dev: PCI device to be disabled
1335 * Signal to the system that the PCI device is not in use by the system
1336 * anymore. This only involves disabling PCI bus-mastering, if active.
1338 * Note we don't actually disable the device until all callers of
1339 * pci_enable_device() have called pci_disable_device().
1342 pci_disable_device(struct pci_dev
*dev
)
1344 struct pci_devres
*dr
;
1346 dr
= find_pci_dr(dev
);
1350 if (atomic_sub_return(1, &dev
->enable_cnt
) != 0)
1353 do_pci_disable_device(dev
);
1355 dev
->is_busmaster
= 0;
1359 * pcibios_set_pcie_reset_state - set reset state for device dev
1360 * @dev: the PCIe device reset
1361 * @state: Reset state to enter into
1364 * Sets the PCIe reset state for the device. This is the default
1365 * implementation. Architecture implementations can override this.
1367 int __attribute__ ((weak
)) pcibios_set_pcie_reset_state(struct pci_dev
*dev
,
1368 enum pcie_reset_state state
)
1374 * pci_set_pcie_reset_state - set reset state for device dev
1375 * @dev: the PCIe device reset
1376 * @state: Reset state to enter into
1379 * Sets the PCI reset state for the device.
1381 int pci_set_pcie_reset_state(struct pci_dev
*dev
, enum pcie_reset_state state
)
1383 return pcibios_set_pcie_reset_state(dev
, state
);
1387 * pci_check_pme_status - Check if given device has generated PME.
1388 * @dev: Device to check.
1390 * Check the PME status of the device and if set, clear it and clear PME enable
1391 * (if set). Return 'true' if PME status and PME enable were both set or
1392 * 'false' otherwise.
1394 bool pci_check_pme_status(struct pci_dev
*dev
)
1403 pmcsr_pos
= dev
->pm_cap
+ PCI_PM_CTRL
;
1404 pci_read_config_word(dev
, pmcsr_pos
, &pmcsr
);
1405 if (!(pmcsr
& PCI_PM_CTRL_PME_STATUS
))
1408 /* Clear PME status. */
1409 pmcsr
|= PCI_PM_CTRL_PME_STATUS
;
1410 if (pmcsr
& PCI_PM_CTRL_PME_ENABLE
) {
1411 /* Disable PME to avoid interrupt flood. */
1412 pmcsr
&= ~PCI_PM_CTRL_PME_ENABLE
;
1416 pci_write_config_word(dev
, pmcsr_pos
, pmcsr
);
1422 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1423 * @dev: Device to handle.
1424 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1426 * Check if @dev has generated PME and queue a resume request for it in that
1429 static int pci_pme_wakeup(struct pci_dev
*dev
, void *pme_poll_reset
)
1431 if (pme_poll_reset
&& dev
->pme_poll
)
1432 dev
->pme_poll
= false;
1434 if (pci_check_pme_status(dev
)) {
1435 pci_wakeup_event(dev
);
1436 pm_request_resume(&dev
->dev
);
1442 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1443 * @bus: Top bus of the subtree to walk.
1445 void pci_pme_wakeup_bus(struct pci_bus
*bus
)
1448 pci_walk_bus(bus
, pci_pme_wakeup
, (void *)true);
1452 * pci_pme_capable - check the capability of PCI device to generate PME#
1453 * @dev: PCI device to handle.
1454 * @state: PCI state from which device will issue PME#.
1456 bool pci_pme_capable(struct pci_dev
*dev
, pci_power_t state
)
1461 return !!(dev
->pme_support
& (1 << state
));
1464 static void pci_pme_list_scan(struct work_struct
*work
)
1466 struct pci_pme_device
*pme_dev
, *n
;
1468 mutex_lock(&pci_pme_list_mutex
);
1469 if (!list_empty(&pci_pme_list
)) {
1470 list_for_each_entry_safe(pme_dev
, n
, &pci_pme_list
, list
) {
1471 if (pme_dev
->dev
->pme_poll
) {
1472 pci_pme_wakeup(pme_dev
->dev
, NULL
);
1474 list_del(&pme_dev
->list
);
1478 if (!list_empty(&pci_pme_list
))
1479 schedule_delayed_work(&pci_pme_work
,
1480 msecs_to_jiffies(PME_TIMEOUT
));
1482 mutex_unlock(&pci_pme_list_mutex
);
1486 * pci_pme_active - enable or disable PCI device's PME# function
1487 * @dev: PCI device to handle.
1488 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1490 * The caller must verify that the device is capable of generating PME# before
1491 * calling this function with @enable equal to 'true'.
1493 void pci_pme_active(struct pci_dev
*dev
, bool enable
)
1500 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
1501 /* Clear PME_Status by writing 1 to it and enable PME# */
1502 pmcsr
|= PCI_PM_CTRL_PME_STATUS
| PCI_PM_CTRL_PME_ENABLE
;
1504 pmcsr
&= ~PCI_PM_CTRL_PME_ENABLE
;
1506 pci_write_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
);
1508 /* PCI (as opposed to PCIe) PME requires that the device have
1509 its PME# line hooked up correctly. Not all hardware vendors
1510 do this, so the PME never gets delivered and the device
1511 remains asleep. The easiest way around this is to
1512 periodically walk the list of suspended devices and check
1513 whether any have their PME flag set. The assumption is that
1514 we'll wake up often enough anyway that this won't be a huge
1515 hit, and the power savings from the devices will still be a
1518 if (dev
->pme_poll
) {
1519 struct pci_pme_device
*pme_dev
;
1521 pme_dev
= kmalloc(sizeof(struct pci_pme_device
),
1526 mutex_lock(&pci_pme_list_mutex
);
1527 list_add(&pme_dev
->list
, &pci_pme_list
);
1528 if (list_is_singular(&pci_pme_list
))
1529 schedule_delayed_work(&pci_pme_work
,
1530 msecs_to_jiffies(PME_TIMEOUT
));
1531 mutex_unlock(&pci_pme_list_mutex
);
1533 mutex_lock(&pci_pme_list_mutex
);
1534 list_for_each_entry(pme_dev
, &pci_pme_list
, list
) {
1535 if (pme_dev
->dev
== dev
) {
1536 list_del(&pme_dev
->list
);
1541 mutex_unlock(&pci_pme_list_mutex
);
1546 dev_dbg(&dev
->dev
, "PME# %s\n", enable
? "enabled" : "disabled");
1550 * __pci_enable_wake - enable PCI device as wakeup event source
1551 * @dev: PCI device affected
1552 * @state: PCI state from which device will issue wakeup events
1553 * @runtime: True if the events are to be generated at run time
1554 * @enable: True to enable event generation; false to disable
1556 * This enables the device as a wakeup event source, or disables it.
1557 * When such events involves platform-specific hooks, those hooks are
1558 * called automatically by this routine.
1560 * Devices with legacy power management (no standard PCI PM capabilities)
1561 * always require such platform hooks.
1564 * 0 is returned on success
1565 * -EINVAL is returned if device is not supposed to wake up the system
1566 * Error code depending on the platform is returned if both the platform and
1567 * the native mechanism fail to enable the generation of wake-up events
1569 int __pci_enable_wake(struct pci_dev
*dev
, pci_power_t state
,
1570 bool runtime
, bool enable
)
1574 if (enable
&& !runtime
&& !device_may_wakeup(&dev
->dev
))
1577 /* Don't do the same thing twice in a row for one device. */
1578 if (!!enable
== !!dev
->wakeup_prepared
)
1582 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1583 * Anderson we should be doing PME# wake enable followed by ACPI wake
1584 * enable. To disable wake-up we call the platform first, for symmetry.
1590 if (pci_pme_capable(dev
, state
))
1591 pci_pme_active(dev
, true);
1594 error
= runtime
? platform_pci_run_wake(dev
, true) :
1595 platform_pci_sleep_wake(dev
, true);
1599 dev
->wakeup_prepared
= true;
1602 platform_pci_run_wake(dev
, false);
1604 platform_pci_sleep_wake(dev
, false);
1605 pci_pme_active(dev
, false);
1606 dev
->wakeup_prepared
= false;
1611 EXPORT_SYMBOL(__pci_enable_wake
);
1614 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1615 * @dev: PCI device to prepare
1616 * @enable: True to enable wake-up event generation; false to disable
1618 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1619 * and this function allows them to set that up cleanly - pci_enable_wake()
1620 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1621 * ordering constraints.
1623 * This function only returns error code if the device is not capable of
1624 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1625 * enable wake-up power for it.
1627 int pci_wake_from_d3(struct pci_dev
*dev
, bool enable
)
1629 return pci_pme_capable(dev
, PCI_D3cold
) ?
1630 pci_enable_wake(dev
, PCI_D3cold
, enable
) :
1631 pci_enable_wake(dev
, PCI_D3hot
, enable
);
1635 * pci_target_state - find an appropriate low power state for a given PCI dev
1638 * Use underlying platform code to find a supported low power state for @dev.
1639 * If the platform can't manage @dev, return the deepest state from which it
1640 * can generate wake events, based on any available PME info.
1642 pci_power_t
pci_target_state(struct pci_dev
*dev
)
1644 pci_power_t target_state
= PCI_D3hot
;
1646 if (platform_pci_power_manageable(dev
)) {
1648 * Call the platform to choose the target state of the device
1649 * and enable wake-up from this state if supported.
1651 pci_power_t state
= platform_pci_choose_state(dev
);
1654 case PCI_POWER_ERROR
:
1659 if (pci_no_d1d2(dev
))
1662 target_state
= state
;
1664 } else if (!dev
->pm_cap
) {
1665 target_state
= PCI_D0
;
1666 } else if (device_may_wakeup(&dev
->dev
)) {
1668 * Find the deepest state from which the device can generate
1669 * wake-up events, make it the target state and enable device
1672 if (dev
->pme_support
) {
1674 && !(dev
->pme_support
& (1 << target_state
)))
1679 return target_state
;
1683 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1684 * @dev: Device to handle.
1686 * Choose the power state appropriate for the device depending on whether
1687 * it can wake up the system and/or is power manageable by the platform
1688 * (PCI_D3hot is the default) and put the device into that state.
1690 int pci_prepare_to_sleep(struct pci_dev
*dev
)
1692 pci_power_t target_state
= pci_target_state(dev
);
1695 if (target_state
== PCI_POWER_ERROR
)
1698 pci_enable_wake(dev
, target_state
, device_may_wakeup(&dev
->dev
));
1700 error
= pci_set_power_state(dev
, target_state
);
1703 pci_enable_wake(dev
, target_state
, false);
1709 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1710 * @dev: Device to handle.
1712 * Disable device's system wake-up capability and put it into D0.
1714 int pci_back_from_sleep(struct pci_dev
*dev
)
1716 pci_enable_wake(dev
, PCI_D0
, false);
1717 return pci_set_power_state(dev
, PCI_D0
);
1721 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1722 * @dev: PCI device being suspended.
1724 * Prepare @dev to generate wake-up events at run time and put it into a low
1727 int pci_finish_runtime_suspend(struct pci_dev
*dev
)
1729 pci_power_t target_state
= pci_target_state(dev
);
1732 if (target_state
== PCI_POWER_ERROR
)
1735 __pci_enable_wake(dev
, target_state
, true, pci_dev_run_wake(dev
));
1737 error
= pci_set_power_state(dev
, target_state
);
1740 __pci_enable_wake(dev
, target_state
, true, false);
1746 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1747 * @dev: Device to check.
1749 * Return true if the device itself is cabable of generating wake-up events
1750 * (through the platform or using the native PCIe PME) or if the device supports
1751 * PME and one of its upstream bridges can generate wake-up events.
1753 bool pci_dev_run_wake(struct pci_dev
*dev
)
1755 struct pci_bus
*bus
= dev
->bus
;
1757 if (device_run_wake(&dev
->dev
))
1760 if (!dev
->pme_support
)
1763 while (bus
->parent
) {
1764 struct pci_dev
*bridge
= bus
->self
;
1766 if (device_run_wake(&bridge
->dev
))
1772 /* We have reached the root bus. */
1774 return device_run_wake(bus
->bridge
);
1778 EXPORT_SYMBOL_GPL(pci_dev_run_wake
);
1781 * pci_pm_init - Initialize PM functions of given PCI device
1782 * @dev: PCI device to handle.
1784 void pci_pm_init(struct pci_dev
*dev
)
1789 pm_runtime_forbid(&dev
->dev
);
1790 device_enable_async_suspend(&dev
->dev
);
1791 dev
->wakeup_prepared
= false;
1795 /* find PCI PM capability in list */
1796 pm
= pci_find_capability(dev
, PCI_CAP_ID_PM
);
1799 /* Check device's ability to generate PME# */
1800 pci_read_config_word(dev
, pm
+ PCI_PM_PMC
, &pmc
);
1802 if ((pmc
& PCI_PM_CAP_VER_MASK
) > 3) {
1803 dev_err(&dev
->dev
, "unsupported PM cap regs version (%u)\n",
1804 pmc
& PCI_PM_CAP_VER_MASK
);
1809 dev
->d3_delay
= PCI_PM_D3_WAIT
;
1811 dev
->d1_support
= false;
1812 dev
->d2_support
= false;
1813 if (!pci_no_d1d2(dev
)) {
1814 if (pmc
& PCI_PM_CAP_D1
)
1815 dev
->d1_support
= true;
1816 if (pmc
& PCI_PM_CAP_D2
)
1817 dev
->d2_support
= true;
1819 if (dev
->d1_support
|| dev
->d2_support
)
1820 dev_printk(KERN_DEBUG
, &dev
->dev
, "supports%s%s\n",
1821 dev
->d1_support
? " D1" : "",
1822 dev
->d2_support
? " D2" : "");
1825 pmc
&= PCI_PM_CAP_PME_MASK
;
1827 dev_printk(KERN_DEBUG
, &dev
->dev
,
1828 "PME# supported from%s%s%s%s%s\n",
1829 (pmc
& PCI_PM_CAP_PME_D0
) ? " D0" : "",
1830 (pmc
& PCI_PM_CAP_PME_D1
) ? " D1" : "",
1831 (pmc
& PCI_PM_CAP_PME_D2
) ? " D2" : "",
1832 (pmc
& PCI_PM_CAP_PME_D3
) ? " D3hot" : "",
1833 (pmc
& PCI_PM_CAP_PME_D3cold
) ? " D3cold" : "");
1834 dev
->pme_support
= pmc
>> PCI_PM_CAP_PME_SHIFT
;
1835 dev
->pme_poll
= true;
1837 * Make device's PM flags reflect the wake-up capability, but
1838 * let the user space enable it to wake up the system as needed.
1840 device_set_wakeup_capable(&dev
->dev
, true);
1841 /* Disable the PME# generation functionality */
1842 pci_pme_active(dev
, false);
1844 dev
->pme_support
= 0;
1849 * platform_pci_wakeup_init - init platform wakeup if present
1852 * Some devices don't have PCI PM caps but can still generate wakeup
1853 * events through platform methods (like ACPI events). If @dev supports
1854 * platform wakeup events, set the device flag to indicate as much. This
1855 * may be redundant if the device also supports PCI PM caps, but double
1856 * initialization should be safe in that case.
1858 void platform_pci_wakeup_init(struct pci_dev
*dev
)
1860 if (!platform_pci_can_wakeup(dev
))
1863 device_set_wakeup_capable(&dev
->dev
, true);
1864 platform_pci_sleep_wake(dev
, false);
1868 * pci_add_save_buffer - allocate buffer for saving given capability registers
1869 * @dev: the PCI device
1870 * @cap: the capability to allocate the buffer for
1871 * @size: requested size of the buffer
1873 static int pci_add_cap_save_buffer(
1874 struct pci_dev
*dev
, char cap
, unsigned int size
)
1877 struct pci_cap_saved_state
*save_state
;
1879 pos
= pci_find_capability(dev
, cap
);
1883 save_state
= kzalloc(sizeof(*save_state
) + size
, GFP_KERNEL
);
1887 save_state
->cap
.cap_nr
= cap
;
1888 save_state
->cap
.size
= size
;
1889 pci_add_saved_cap(dev
, save_state
);
1895 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1896 * @dev: the PCI device
1898 void pci_allocate_cap_save_buffers(struct pci_dev
*dev
)
1902 error
= pci_add_cap_save_buffer(dev
, PCI_CAP_ID_EXP
,
1903 PCI_EXP_SAVE_REGS
* sizeof(u16
));
1906 "unable to preallocate PCI Express save buffer\n");
1908 error
= pci_add_cap_save_buffer(dev
, PCI_CAP_ID_PCIX
, sizeof(u16
));
1911 "unable to preallocate PCI-X save buffer\n");
1915 * pci_enable_ari - enable ARI forwarding if hardware support it
1916 * @dev: the PCI device
1918 void pci_enable_ari(struct pci_dev
*dev
)
1923 struct pci_dev
*bridge
;
1925 if (!pci_is_pcie(dev
) || dev
->devfn
)
1928 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_ARI
);
1932 bridge
= dev
->bus
->self
;
1933 if (!bridge
|| !pci_is_pcie(bridge
))
1936 pos
= pci_pcie_cap(bridge
);
1940 /* ARI is a PCIe v2 feature */
1941 pci_read_config_word(bridge
, pos
+ PCI_EXP_FLAGS
, &flags
);
1942 if ((flags
& PCI_EXP_FLAGS_VERS
) < 2)
1945 pci_read_config_dword(bridge
, pos
+ PCI_EXP_DEVCAP2
, &cap
);
1946 if (!(cap
& PCI_EXP_DEVCAP2_ARI
))
1949 pci_read_config_word(bridge
, pos
+ PCI_EXP_DEVCTL2
, &ctrl
);
1950 ctrl
|= PCI_EXP_DEVCTL2_ARI
;
1951 pci_write_config_word(bridge
, pos
+ PCI_EXP_DEVCTL2
, ctrl
);
1953 bridge
->ari_enabled
= 1;
1957 * pci_enable_ido - enable ID-based ordering on a device
1958 * @dev: the PCI device
1959 * @type: which types of IDO to enable
1961 * Enable ID-based ordering on @dev. @type can contain the bits
1962 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
1963 * which types of transactions are allowed to be re-ordered.
1965 void pci_enable_ido(struct pci_dev
*dev
, unsigned long type
)
1970 pos
= pci_pcie_cap(dev
);
1974 pci_read_config_word(dev
, pos
+ PCI_EXP_DEVCTL2
, &ctrl
);
1975 if (type
& PCI_EXP_IDO_REQUEST
)
1976 ctrl
|= PCI_EXP_IDO_REQ_EN
;
1977 if (type
& PCI_EXP_IDO_COMPLETION
)
1978 ctrl
|= PCI_EXP_IDO_CMP_EN
;
1979 pci_write_config_word(dev
, pos
+ PCI_EXP_DEVCTL2
, ctrl
);
1981 EXPORT_SYMBOL(pci_enable_ido
);
1984 * pci_disable_ido - disable ID-based ordering on a device
1985 * @dev: the PCI device
1986 * @type: which types of IDO to disable
1988 void pci_disable_ido(struct pci_dev
*dev
, unsigned long type
)
1993 if (!pci_is_pcie(dev
))
1996 pos
= pci_pcie_cap(dev
);
2000 pci_read_config_word(dev
, pos
+ PCI_EXP_DEVCTL2
, &ctrl
);
2001 if (type
& PCI_EXP_IDO_REQUEST
)
2002 ctrl
&= ~PCI_EXP_IDO_REQ_EN
;
2003 if (type
& PCI_EXP_IDO_COMPLETION
)
2004 ctrl
&= ~PCI_EXP_IDO_CMP_EN
;
2005 pci_write_config_word(dev
, pos
+ PCI_EXP_DEVCTL2
, ctrl
);
2007 EXPORT_SYMBOL(pci_disable_ido
);
2010 * pci_enable_obff - enable optimized buffer flush/fill
2012 * @type: type of signaling to use
2014 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
2015 * signaling if possible, falling back to message signaling only if
2016 * WAKE# isn't supported. @type should indicate whether the PCIe link
2017 * be brought out of L0s or L1 to send the message. It should be either
2018 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2020 * If your device can benefit from receiving all messages, even at the
2021 * power cost of bringing the link back up from a low power state, use
2022 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2026 * Zero on success, appropriate error number on failure.
2028 int pci_enable_obff(struct pci_dev
*dev
, enum pci_obff_signal_type type
)
2035 if (!pci_is_pcie(dev
))
2038 pos
= pci_pcie_cap(dev
);
2042 pci_read_config_dword(dev
, pos
+ PCI_EXP_DEVCAP2
, &cap
);
2043 if (!(cap
& PCI_EXP_OBFF_MASK
))
2044 return -ENOTSUPP
; /* no OBFF support at all */
2046 /* Make sure the topology supports OBFF as well */
2048 ret
= pci_enable_obff(dev
->bus
->self
, type
);
2053 pci_read_config_word(dev
, pos
+ PCI_EXP_DEVCTL2
, &ctrl
);
2054 if (cap
& PCI_EXP_OBFF_WAKE
)
2055 ctrl
|= PCI_EXP_OBFF_WAKE_EN
;
2058 case PCI_EXP_OBFF_SIGNAL_L0
:
2059 if (!(ctrl
& PCI_EXP_OBFF_WAKE_EN
))
2060 ctrl
|= PCI_EXP_OBFF_MSGA_EN
;
2062 case PCI_EXP_OBFF_SIGNAL_ALWAYS
:
2063 ctrl
&= ~PCI_EXP_OBFF_WAKE_EN
;
2064 ctrl
|= PCI_EXP_OBFF_MSGB_EN
;
2067 WARN(1, "bad OBFF signal type\n");
2071 pci_write_config_word(dev
, pos
+ PCI_EXP_DEVCTL2
, ctrl
);
2075 EXPORT_SYMBOL(pci_enable_obff
);
2078 * pci_disable_obff - disable optimized buffer flush/fill
2081 * Disable OBFF on @dev.
2083 void pci_disable_obff(struct pci_dev
*dev
)
2088 if (!pci_is_pcie(dev
))
2091 pos
= pci_pcie_cap(dev
);
2095 pci_read_config_word(dev
, pos
+ PCI_EXP_DEVCTL2
, &ctrl
);
2096 ctrl
&= ~PCI_EXP_OBFF_WAKE_EN
;
2097 pci_write_config_word(dev
, pos
+ PCI_EXP_DEVCTL2
, ctrl
);
2099 EXPORT_SYMBOL(pci_disable_obff
);
2102 * pci_ltr_supported - check whether a device supports LTR
2106 * True if @dev supports latency tolerance reporting, false otherwise.
2108 bool pci_ltr_supported(struct pci_dev
*dev
)
2113 if (!pci_is_pcie(dev
))
2116 pos
= pci_pcie_cap(dev
);
2120 pci_read_config_dword(dev
, pos
+ PCI_EXP_DEVCAP2
, &cap
);
2122 return cap
& PCI_EXP_DEVCAP2_LTR
;
2124 EXPORT_SYMBOL(pci_ltr_supported
);
2127 * pci_enable_ltr - enable latency tolerance reporting
2130 * Enable LTR on @dev if possible, which means enabling it first on
2134 * Zero on success, errno on failure.
2136 int pci_enable_ltr(struct pci_dev
*dev
)
2142 if (!pci_ltr_supported(dev
))
2145 pos
= pci_pcie_cap(dev
);
2149 /* Only primary function can enable/disable LTR */
2150 if (PCI_FUNC(dev
->devfn
) != 0)
2153 /* Enable upstream ports first */
2155 ret
= pci_enable_ltr(dev
->bus
->self
);
2160 pci_read_config_word(dev
, pos
+ PCI_EXP_DEVCTL2
, &ctrl
);
2161 ctrl
|= PCI_EXP_LTR_EN
;
2162 pci_write_config_word(dev
, pos
+ PCI_EXP_DEVCTL2
, ctrl
);
2166 EXPORT_SYMBOL(pci_enable_ltr
);
2169 * pci_disable_ltr - disable latency tolerance reporting
2172 void pci_disable_ltr(struct pci_dev
*dev
)
2177 if (!pci_ltr_supported(dev
))
2180 pos
= pci_pcie_cap(dev
);
2184 /* Only primary function can enable/disable LTR */
2185 if (PCI_FUNC(dev
->devfn
) != 0)
2188 pci_read_config_word(dev
, pos
+ PCI_EXP_DEVCTL2
, &ctrl
);
2189 ctrl
&= ~PCI_EXP_LTR_EN
;
2190 pci_write_config_word(dev
, pos
+ PCI_EXP_DEVCTL2
, ctrl
);
2192 EXPORT_SYMBOL(pci_disable_ltr
);
2194 static int __pci_ltr_scale(int *val
)
2198 while (*val
> 1023) {
2199 *val
= (*val
+ 31) / 32;
2206 * pci_set_ltr - set LTR latency values
2208 * @snoop_lat_ns: snoop latency in nanoseconds
2209 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2211 * Figure out the scale and set the LTR values accordingly.
2213 int pci_set_ltr(struct pci_dev
*dev
, int snoop_lat_ns
, int nosnoop_lat_ns
)
2215 int pos
, ret
, snoop_scale
, nosnoop_scale
;
2218 if (!pci_ltr_supported(dev
))
2221 snoop_scale
= __pci_ltr_scale(&snoop_lat_ns
);
2222 nosnoop_scale
= __pci_ltr_scale(&nosnoop_lat_ns
);
2224 if (snoop_lat_ns
> PCI_LTR_VALUE_MASK
||
2225 nosnoop_lat_ns
> PCI_LTR_VALUE_MASK
)
2228 if ((snoop_scale
> (PCI_LTR_SCALE_MASK
>> PCI_LTR_SCALE_SHIFT
)) ||
2229 (nosnoop_scale
> (PCI_LTR_SCALE_MASK
>> PCI_LTR_SCALE_SHIFT
)))
2232 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_LTR
);
2236 val
= (snoop_scale
<< PCI_LTR_SCALE_SHIFT
) | snoop_lat_ns
;
2237 ret
= pci_write_config_word(dev
, pos
+ PCI_LTR_MAX_SNOOP_LAT
, val
);
2241 val
= (nosnoop_scale
<< PCI_LTR_SCALE_SHIFT
) | nosnoop_lat_ns
;
2242 ret
= pci_write_config_word(dev
, pos
+ PCI_LTR_MAX_NOSNOOP_LAT
, val
);
2248 EXPORT_SYMBOL(pci_set_ltr
);
2250 static int pci_acs_enable
;
2253 * pci_request_acs - ask for ACS to be enabled if supported
2255 void pci_request_acs(void)
2261 * pci_enable_acs - enable ACS if hardware support it
2262 * @dev: the PCI device
2264 void pci_enable_acs(struct pci_dev
*dev
)
2270 if (!pci_acs_enable
)
2273 if (!pci_is_pcie(dev
))
2276 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_ACS
);
2280 pci_read_config_word(dev
, pos
+ PCI_ACS_CAP
, &cap
);
2281 pci_read_config_word(dev
, pos
+ PCI_ACS_CTRL
, &ctrl
);
2283 /* Source Validation */
2284 ctrl
|= (cap
& PCI_ACS_SV
);
2286 /* P2P Request Redirect */
2287 ctrl
|= (cap
& PCI_ACS_RR
);
2289 /* P2P Completion Redirect */
2290 ctrl
|= (cap
& PCI_ACS_CR
);
2292 /* Upstream Forwarding */
2293 ctrl
|= (cap
& PCI_ACS_UF
);
2295 pci_write_config_word(dev
, pos
+ PCI_ACS_CTRL
, ctrl
);
2299 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2300 * @dev: the PCI device
2301 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2303 * Perform INTx swizzling for a device behind one level of bridge. This is
2304 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2305 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2306 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2307 * the PCI Express Base Specification, Revision 2.1)
2309 u8
pci_swizzle_interrupt_pin(struct pci_dev
*dev
, u8 pin
)
2313 if (pci_ari_enabled(dev
->bus
))
2316 slot
= PCI_SLOT(dev
->devfn
);
2318 return (((pin
- 1) + slot
) % 4) + 1;
2322 pci_get_interrupt_pin(struct pci_dev
*dev
, struct pci_dev
**bridge
)
2330 while (!pci_is_root_bus(dev
->bus
)) {
2331 pin
= pci_swizzle_interrupt_pin(dev
, pin
);
2332 dev
= dev
->bus
->self
;
2339 * pci_common_swizzle - swizzle INTx all the way to root bridge
2340 * @dev: the PCI device
2341 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2343 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2344 * bridges all the way up to a PCI root bus.
2346 u8
pci_common_swizzle(struct pci_dev
*dev
, u8
*pinp
)
2350 while (!pci_is_root_bus(dev
->bus
)) {
2351 pin
= pci_swizzle_interrupt_pin(dev
, pin
);
2352 dev
= dev
->bus
->self
;
2355 return PCI_SLOT(dev
->devfn
);
2359 * pci_release_region - Release a PCI bar
2360 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2361 * @bar: BAR to release
2363 * Releases the PCI I/O and memory resources previously reserved by a
2364 * successful call to pci_request_region. Call this function only
2365 * after all use of the PCI regions has ceased.
2367 void pci_release_region(struct pci_dev
*pdev
, int bar
)
2369 struct pci_devres
*dr
;
2371 if (pci_resource_len(pdev
, bar
) == 0)
2373 if (pci_resource_flags(pdev
, bar
) & IORESOURCE_IO
)
2374 release_region(pci_resource_start(pdev
, bar
),
2375 pci_resource_len(pdev
, bar
));
2376 else if (pci_resource_flags(pdev
, bar
) & IORESOURCE_MEM
)
2377 release_mem_region(pci_resource_start(pdev
, bar
),
2378 pci_resource_len(pdev
, bar
));
2380 dr
= find_pci_dr(pdev
);
2382 dr
->region_mask
&= ~(1 << bar
);
2386 * __pci_request_region - Reserved PCI I/O and memory resource
2387 * @pdev: PCI device whose resources are to be reserved
2388 * @bar: BAR to be reserved
2389 * @res_name: Name to be associated with resource.
2390 * @exclusive: whether the region access is exclusive or not
2392 * Mark the PCI region associated with PCI device @pdev BR @bar as
2393 * being reserved by owner @res_name. Do not access any
2394 * address inside the PCI regions unless this call returns
2397 * If @exclusive is set, then the region is marked so that userspace
2398 * is explicitly not allowed to map the resource via /dev/mem or
2399 * sysfs MMIO access.
2401 * Returns 0 on success, or %EBUSY on error. A warning
2402 * message is also printed on failure.
2404 static int __pci_request_region(struct pci_dev
*pdev
, int bar
, const char *res_name
,
2407 struct pci_devres
*dr
;
2409 if (pci_resource_len(pdev
, bar
) == 0)
2412 if (pci_resource_flags(pdev
, bar
) & IORESOURCE_IO
) {
2413 if (!request_region(pci_resource_start(pdev
, bar
),
2414 pci_resource_len(pdev
, bar
), res_name
))
2417 else if (pci_resource_flags(pdev
, bar
) & IORESOURCE_MEM
) {
2418 if (!__request_mem_region(pci_resource_start(pdev
, bar
),
2419 pci_resource_len(pdev
, bar
), res_name
,
2424 dr
= find_pci_dr(pdev
);
2426 dr
->region_mask
|= 1 << bar
;
2431 dev_warn(&pdev
->dev
, "BAR %d: can't reserve %pR\n", bar
,
2432 &pdev
->resource
[bar
]);
2437 * pci_request_region - Reserve PCI I/O and memory resource
2438 * @pdev: PCI device whose resources are to be reserved
2439 * @bar: BAR to be reserved
2440 * @res_name: Name to be associated with resource
2442 * Mark the PCI region associated with PCI device @pdev BAR @bar as
2443 * being reserved by owner @res_name. Do not access any
2444 * address inside the PCI regions unless this call returns
2447 * Returns 0 on success, or %EBUSY on error. A warning
2448 * message is also printed on failure.
2450 int pci_request_region(struct pci_dev
*pdev
, int bar
, const char *res_name
)
2452 return __pci_request_region(pdev
, bar
, res_name
, 0);
2456 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2457 * @pdev: PCI device whose resources are to be reserved
2458 * @bar: BAR to be reserved
2459 * @res_name: Name to be associated with resource.
2461 * Mark the PCI region associated with PCI device @pdev BR @bar as
2462 * being reserved by owner @res_name. Do not access any
2463 * address inside the PCI regions unless this call returns
2466 * Returns 0 on success, or %EBUSY on error. A warning
2467 * message is also printed on failure.
2469 * The key difference that _exclusive makes it that userspace is
2470 * explicitly not allowed to map the resource via /dev/mem or
2473 int pci_request_region_exclusive(struct pci_dev
*pdev
, int bar
, const char *res_name
)
2475 return __pci_request_region(pdev
, bar
, res_name
, IORESOURCE_EXCLUSIVE
);
2478 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2479 * @pdev: PCI device whose resources were previously reserved
2480 * @bars: Bitmask of BARs to be released
2482 * Release selected PCI I/O and memory resources previously reserved.
2483 * Call this function only after all use of the PCI regions has ceased.
2485 void pci_release_selected_regions(struct pci_dev
*pdev
, int bars
)
2489 for (i
= 0; i
< 6; i
++)
2490 if (bars
& (1 << i
))
2491 pci_release_region(pdev
, i
);
2494 int __pci_request_selected_regions(struct pci_dev
*pdev
, int bars
,
2495 const char *res_name
, int excl
)
2499 for (i
= 0; i
< 6; i
++)
2500 if (bars
& (1 << i
))
2501 if (__pci_request_region(pdev
, i
, res_name
, excl
))
2507 if (bars
& (1 << i
))
2508 pci_release_region(pdev
, i
);
2515 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2516 * @pdev: PCI device whose resources are to be reserved
2517 * @bars: Bitmask of BARs to be requested
2518 * @res_name: Name to be associated with resource
2520 int pci_request_selected_regions(struct pci_dev
*pdev
, int bars
,
2521 const char *res_name
)
2523 return __pci_request_selected_regions(pdev
, bars
, res_name
, 0);
2526 int pci_request_selected_regions_exclusive(struct pci_dev
*pdev
,
2527 int bars
, const char *res_name
)
2529 return __pci_request_selected_regions(pdev
, bars
, res_name
,
2530 IORESOURCE_EXCLUSIVE
);
2534 * pci_release_regions - Release reserved PCI I/O and memory resources
2535 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2537 * Releases all PCI I/O and memory resources previously reserved by a
2538 * successful call to pci_request_regions. Call this function only
2539 * after all use of the PCI regions has ceased.
2542 void pci_release_regions(struct pci_dev
*pdev
)
2544 pci_release_selected_regions(pdev
, (1 << 6) - 1);
2548 * pci_request_regions - Reserved PCI I/O and memory resources
2549 * @pdev: PCI device whose resources are to be reserved
2550 * @res_name: Name to be associated with resource.
2552 * Mark all PCI regions associated with PCI device @pdev as
2553 * being reserved by owner @res_name. Do not access any
2554 * address inside the PCI regions unless this call returns
2557 * Returns 0 on success, or %EBUSY on error. A warning
2558 * message is also printed on failure.
2560 int pci_request_regions(struct pci_dev
*pdev
, const char *res_name
)
2562 return pci_request_selected_regions(pdev
, ((1 << 6) - 1), res_name
);
2566 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2567 * @pdev: PCI device whose resources are to be reserved
2568 * @res_name: Name to be associated with resource.
2570 * Mark all PCI regions associated with PCI device @pdev as
2571 * being reserved by owner @res_name. Do not access any
2572 * address inside the PCI regions unless this call returns
2575 * pci_request_regions_exclusive() will mark the region so that
2576 * /dev/mem and the sysfs MMIO access will not be allowed.
2578 * Returns 0 on success, or %EBUSY on error. A warning
2579 * message is also printed on failure.
2581 int pci_request_regions_exclusive(struct pci_dev
*pdev
, const char *res_name
)
2583 return pci_request_selected_regions_exclusive(pdev
,
2584 ((1 << 6) - 1), res_name
);
2587 static void __pci_set_master(struct pci_dev
*dev
, bool enable
)
2591 pci_read_config_word(dev
, PCI_COMMAND
, &old_cmd
);
2593 cmd
= old_cmd
| PCI_COMMAND_MASTER
;
2595 cmd
= old_cmd
& ~PCI_COMMAND_MASTER
;
2596 if (cmd
!= old_cmd
) {
2597 dev_dbg(&dev
->dev
, "%s bus mastering\n",
2598 enable
? "enabling" : "disabling");
2599 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
2601 dev
->is_busmaster
= enable
;
2605 * pcibios_set_master - enable PCI bus-mastering for device dev
2606 * @dev: the PCI device to enable
2608 * Enables PCI bus-mastering for the device. This is the default
2609 * implementation. Architecture specific implementations can override
2610 * this if necessary.
2612 void __weak
pcibios_set_master(struct pci_dev
*dev
)
2616 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2617 if (pci_is_pcie(dev
))
2620 pci_read_config_byte(dev
, PCI_LATENCY_TIMER
, &lat
);
2622 lat
= (64 <= pcibios_max_latency
) ? 64 : pcibios_max_latency
;
2623 else if (lat
> pcibios_max_latency
)
2624 lat
= pcibios_max_latency
;
2627 dev_printk(KERN_DEBUG
, &dev
->dev
, "setting latency timer to %d\n", lat
);
2628 pci_write_config_byte(dev
, PCI_LATENCY_TIMER
, lat
);
2632 * pci_set_master - enables bus-mastering for device dev
2633 * @dev: the PCI device to enable
2635 * Enables bus-mastering on the device and calls pcibios_set_master()
2636 * to do the needed arch specific settings.
2638 void pci_set_master(struct pci_dev
*dev
)
2640 __pci_set_master(dev
, true);
2641 pcibios_set_master(dev
);
2645 * pci_clear_master - disables bus-mastering for device dev
2646 * @dev: the PCI device to disable
2648 void pci_clear_master(struct pci_dev
*dev
)
2650 __pci_set_master(dev
, false);
2654 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2655 * @dev: the PCI device for which MWI is to be enabled
2657 * Helper function for pci_set_mwi.
2658 * Originally copied from drivers/net/acenic.c.
2659 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2661 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2663 int pci_set_cacheline_size(struct pci_dev
*dev
)
2667 if (!pci_cache_line_size
)
2670 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2671 equal to or multiple of the right value. */
2672 pci_read_config_byte(dev
, PCI_CACHE_LINE_SIZE
, &cacheline_size
);
2673 if (cacheline_size
>= pci_cache_line_size
&&
2674 (cacheline_size
% pci_cache_line_size
) == 0)
2677 /* Write the correct value. */
2678 pci_write_config_byte(dev
, PCI_CACHE_LINE_SIZE
, pci_cache_line_size
);
2680 pci_read_config_byte(dev
, PCI_CACHE_LINE_SIZE
, &cacheline_size
);
2681 if (cacheline_size
== pci_cache_line_size
)
2684 dev_printk(KERN_DEBUG
, &dev
->dev
, "cache line size of %d is not "
2685 "supported\n", pci_cache_line_size
<< 2);
2689 EXPORT_SYMBOL_GPL(pci_set_cacheline_size
);
2691 #ifdef PCI_DISABLE_MWI
2692 int pci_set_mwi(struct pci_dev
*dev
)
2697 int pci_try_set_mwi(struct pci_dev
*dev
)
2702 void pci_clear_mwi(struct pci_dev
*dev
)
2709 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2710 * @dev: the PCI device for which MWI is enabled
2712 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2714 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2717 pci_set_mwi(struct pci_dev
*dev
)
2722 rc
= pci_set_cacheline_size(dev
);
2726 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
2727 if (! (cmd
& PCI_COMMAND_INVALIDATE
)) {
2728 dev_dbg(&dev
->dev
, "enabling Mem-Wr-Inval\n");
2729 cmd
|= PCI_COMMAND_INVALIDATE
;
2730 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
2737 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2738 * @dev: the PCI device for which MWI is enabled
2740 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2741 * Callers are not required to check the return value.
2743 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2745 int pci_try_set_mwi(struct pci_dev
*dev
)
2747 int rc
= pci_set_mwi(dev
);
2752 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2753 * @dev: the PCI device to disable
2755 * Disables PCI Memory-Write-Invalidate transaction on the device
2758 pci_clear_mwi(struct pci_dev
*dev
)
2762 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
2763 if (cmd
& PCI_COMMAND_INVALIDATE
) {
2764 cmd
&= ~PCI_COMMAND_INVALIDATE
;
2765 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
2768 #endif /* ! PCI_DISABLE_MWI */
2771 * pci_intx - enables/disables PCI INTx for device dev
2772 * @pdev: the PCI device to operate on
2773 * @enable: boolean: whether to enable or disable PCI INTx
2775 * Enables/disables PCI INTx for device dev
2778 pci_intx(struct pci_dev
*pdev
, int enable
)
2780 u16 pci_command
, new;
2782 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_command
);
2785 new = pci_command
& ~PCI_COMMAND_INTX_DISABLE
;
2787 new = pci_command
| PCI_COMMAND_INTX_DISABLE
;
2790 if (new != pci_command
) {
2791 struct pci_devres
*dr
;
2793 pci_write_config_word(pdev
, PCI_COMMAND
, new);
2795 dr
= find_pci_dr(pdev
);
2796 if (dr
&& !dr
->restore_intx
) {
2797 dr
->restore_intx
= 1;
2798 dr
->orig_intx
= !enable
;
2804 * pci_intx_mask_supported - probe for INTx masking support
2805 * @dev: the PCI device to operate on
2807 * Check if the device dev support INTx masking via the config space
2810 bool pci_intx_mask_supported(struct pci_dev
*dev
)
2812 bool mask_supported
= false;
2815 pci_cfg_access_lock(dev
);
2817 pci_read_config_word(dev
, PCI_COMMAND
, &orig
);
2818 pci_write_config_word(dev
, PCI_COMMAND
,
2819 orig
^ PCI_COMMAND_INTX_DISABLE
);
2820 pci_read_config_word(dev
, PCI_COMMAND
, &new);
2823 * There's no way to protect against hardware bugs or detect them
2824 * reliably, but as long as we know what the value should be, let's
2825 * go ahead and check it.
2827 if ((new ^ orig
) & ~PCI_COMMAND_INTX_DISABLE
) {
2828 dev_err(&dev
->dev
, "Command register changed from "
2829 "0x%x to 0x%x: driver or hardware bug?\n", orig
, new);
2830 } else if ((new ^ orig
) & PCI_COMMAND_INTX_DISABLE
) {
2831 mask_supported
= true;
2832 pci_write_config_word(dev
, PCI_COMMAND
, orig
);
2835 pci_cfg_access_unlock(dev
);
2836 return mask_supported
;
2838 EXPORT_SYMBOL_GPL(pci_intx_mask_supported
);
2840 static bool pci_check_and_set_intx_mask(struct pci_dev
*dev
, bool mask
)
2842 struct pci_bus
*bus
= dev
->bus
;
2843 bool mask_updated
= true;
2844 u32 cmd_status_dword
;
2845 u16 origcmd
, newcmd
;
2846 unsigned long flags
;
2850 * We do a single dword read to retrieve both command and status.
2851 * Document assumptions that make this possible.
2853 BUILD_BUG_ON(PCI_COMMAND
% 4);
2854 BUILD_BUG_ON(PCI_COMMAND
+ 2 != PCI_STATUS
);
2856 raw_spin_lock_irqsave(&pci_lock
, flags
);
2858 bus
->ops
->read(bus
, dev
->devfn
, PCI_COMMAND
, 4, &cmd_status_dword
);
2860 irq_pending
= (cmd_status_dword
>> 16) & PCI_STATUS_INTERRUPT
;
2863 * Check interrupt status register to see whether our device
2864 * triggered the interrupt (when masking) or the next IRQ is
2865 * already pending (when unmasking).
2867 if (mask
!= irq_pending
) {
2868 mask_updated
= false;
2872 origcmd
= cmd_status_dword
;
2873 newcmd
= origcmd
& ~PCI_COMMAND_INTX_DISABLE
;
2875 newcmd
|= PCI_COMMAND_INTX_DISABLE
;
2876 if (newcmd
!= origcmd
)
2877 bus
->ops
->write(bus
, dev
->devfn
, PCI_COMMAND
, 2, newcmd
);
2880 raw_spin_unlock_irqrestore(&pci_lock
, flags
);
2882 return mask_updated
;
2886 * pci_check_and_mask_intx - mask INTx on pending interrupt
2887 * @dev: the PCI device to operate on
2889 * Check if the device dev has its INTx line asserted, mask it and
2890 * return true in that case. False is returned if not interrupt was
2893 bool pci_check_and_mask_intx(struct pci_dev
*dev
)
2895 return pci_check_and_set_intx_mask(dev
, true);
2897 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx
);
2900 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
2901 * @dev: the PCI device to operate on
2903 * Check if the device dev has its INTx line asserted, unmask it if not
2904 * and return true. False is returned and the mask remains active if
2905 * there was still an interrupt pending.
2907 bool pci_check_and_unmask_intx(struct pci_dev
*dev
)
2909 return pci_check_and_set_intx_mask(dev
, false);
2911 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx
);
2914 * pci_msi_off - disables any msi or msix capabilities
2915 * @dev: the PCI device to operate on
2917 * If you want to use msi see pci_enable_msi and friends.
2918 * This is a lower level primitive that allows us to disable
2919 * msi operation at the device level.
2921 void pci_msi_off(struct pci_dev
*dev
)
2926 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSI
);
2928 pci_read_config_word(dev
, pos
+ PCI_MSI_FLAGS
, &control
);
2929 control
&= ~PCI_MSI_FLAGS_ENABLE
;
2930 pci_write_config_word(dev
, pos
+ PCI_MSI_FLAGS
, control
);
2932 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
2934 pci_read_config_word(dev
, pos
+ PCI_MSIX_FLAGS
, &control
);
2935 control
&= ~PCI_MSIX_FLAGS_ENABLE
;
2936 pci_write_config_word(dev
, pos
+ PCI_MSIX_FLAGS
, control
);
2939 EXPORT_SYMBOL_GPL(pci_msi_off
);
2941 int pci_set_dma_max_seg_size(struct pci_dev
*dev
, unsigned int size
)
2943 return dma_set_max_seg_size(&dev
->dev
, size
);
2945 EXPORT_SYMBOL(pci_set_dma_max_seg_size
);
2947 int pci_set_dma_seg_boundary(struct pci_dev
*dev
, unsigned long mask
)
2949 return dma_set_seg_boundary(&dev
->dev
, mask
);
2951 EXPORT_SYMBOL(pci_set_dma_seg_boundary
);
2953 static int pcie_flr(struct pci_dev
*dev
, int probe
)
2958 u16 status
, control
;
2960 pos
= pci_pcie_cap(dev
);
2964 pci_read_config_dword(dev
, pos
+ PCI_EXP_DEVCAP
, &cap
);
2965 if (!(cap
& PCI_EXP_DEVCAP_FLR
))
2971 /* Wait for Transaction Pending bit clean */
2972 for (i
= 0; i
< 4; i
++) {
2974 msleep((1 << (i
- 1)) * 100);
2976 pci_read_config_word(dev
, pos
+ PCI_EXP_DEVSTA
, &status
);
2977 if (!(status
& PCI_EXP_DEVSTA_TRPND
))
2981 dev_err(&dev
->dev
, "transaction is not cleared; "
2982 "proceeding with reset anyway\n");
2985 pci_read_config_word(dev
, pos
+ PCI_EXP_DEVCTL
, &control
);
2986 control
|= PCI_EXP_DEVCTL_BCR_FLR
;
2987 pci_write_config_word(dev
, pos
+ PCI_EXP_DEVCTL
, control
);
2994 static int pci_af_flr(struct pci_dev
*dev
, int probe
)
3001 pos
= pci_find_capability(dev
, PCI_CAP_ID_AF
);
3005 pci_read_config_byte(dev
, pos
+ PCI_AF_CAP
, &cap
);
3006 if (!(cap
& PCI_AF_CAP_TP
) || !(cap
& PCI_AF_CAP_FLR
))
3012 /* Wait for Transaction Pending bit clean */
3013 for (i
= 0; i
< 4; i
++) {
3015 msleep((1 << (i
- 1)) * 100);
3017 pci_read_config_byte(dev
, pos
+ PCI_AF_STATUS
, &status
);
3018 if (!(status
& PCI_AF_STATUS_TP
))
3022 dev_err(&dev
->dev
, "transaction is not cleared; "
3023 "proceeding with reset anyway\n");
3026 pci_write_config_byte(dev
, pos
+ PCI_AF_CTRL
, PCI_AF_CTRL_FLR
);
3033 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3034 * @dev: Device to reset.
3035 * @probe: If set, only check if the device can be reset this way.
3037 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3038 * unset, it will be reinitialized internally when going from PCI_D3hot to
3039 * PCI_D0. If that's the case and the device is not in a low-power state
3040 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3042 * NOTE: This causes the caller to sleep for twice the device power transition
3043 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3044 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3045 * Moreover, only devices in D0 can be reset by this function.
3047 static int pci_pm_reset(struct pci_dev
*dev
, int probe
)
3054 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &csr
);
3055 if (csr
& PCI_PM_CTRL_NO_SOFT_RESET
)
3061 if (dev
->current_state
!= PCI_D0
)
3064 csr
&= ~PCI_PM_CTRL_STATE_MASK
;
3066 pci_write_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, csr
);
3067 pci_dev_d3_sleep(dev
);
3069 csr
&= ~PCI_PM_CTRL_STATE_MASK
;
3071 pci_write_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, csr
);
3072 pci_dev_d3_sleep(dev
);
3077 static int pci_parent_bus_reset(struct pci_dev
*dev
, int probe
)
3080 struct pci_dev
*pdev
;
3082 if (pci_is_root_bus(dev
->bus
) || dev
->subordinate
|| !dev
->bus
->self
)
3085 list_for_each_entry(pdev
, &dev
->bus
->devices
, bus_list
)
3092 pci_read_config_word(dev
->bus
->self
, PCI_BRIDGE_CONTROL
, &ctrl
);
3093 ctrl
|= PCI_BRIDGE_CTL_BUS_RESET
;
3094 pci_write_config_word(dev
->bus
->self
, PCI_BRIDGE_CONTROL
, ctrl
);
3097 ctrl
&= ~PCI_BRIDGE_CTL_BUS_RESET
;
3098 pci_write_config_word(dev
->bus
->self
, PCI_BRIDGE_CONTROL
, ctrl
);
3104 static int pci_dev_reset(struct pci_dev
*dev
, int probe
)
3111 pci_cfg_access_lock(dev
);
3112 /* block PM suspend, driver probe, etc. */
3113 device_lock(&dev
->dev
);
3116 rc
= pci_dev_specific_reset(dev
, probe
);
3120 rc
= pcie_flr(dev
, probe
);
3124 rc
= pci_af_flr(dev
, probe
);
3128 rc
= pci_pm_reset(dev
, probe
);
3132 rc
= pci_parent_bus_reset(dev
, probe
);
3135 device_unlock(&dev
->dev
);
3136 pci_cfg_access_unlock(dev
);
3143 * __pci_reset_function - reset a PCI device function
3144 * @dev: PCI device to reset
3146 * Some devices allow an individual function to be reset without affecting
3147 * other functions in the same device. The PCI device must be responsive
3148 * to PCI config space in order to use this function.
3150 * The device function is presumed to be unused when this function is called.
3151 * Resetting the device will make the contents of PCI configuration space
3152 * random, so any caller of this must be prepared to reinitialise the
3153 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3156 * Returns 0 if the device function was successfully reset or negative if the
3157 * device doesn't support resetting a single function.
3159 int __pci_reset_function(struct pci_dev
*dev
)
3161 return pci_dev_reset(dev
, 0);
3163 EXPORT_SYMBOL_GPL(__pci_reset_function
);
3166 * pci_probe_reset_function - check whether the device can be safely reset
3167 * @dev: PCI device to reset
3169 * Some devices allow an individual function to be reset without affecting
3170 * other functions in the same device. The PCI device must be responsive
3171 * to PCI config space in order to use this function.
3173 * Returns 0 if the device function can be reset or negative if the
3174 * device doesn't support resetting a single function.
3176 int pci_probe_reset_function(struct pci_dev
*dev
)
3178 return pci_dev_reset(dev
, 1);
3182 * pci_reset_function - quiesce and reset a PCI device function
3183 * @dev: PCI device to reset
3185 * Some devices allow an individual function to be reset without affecting
3186 * other functions in the same device. The PCI device must be responsive
3187 * to PCI config space in order to use this function.
3189 * This function does not just reset the PCI portion of a device, but
3190 * clears all the state associated with the device. This function differs
3191 * from __pci_reset_function in that it saves and restores device state
3194 * Returns 0 if the device function was successfully reset or negative if the
3195 * device doesn't support resetting a single function.
3197 int pci_reset_function(struct pci_dev
*dev
)
3201 rc
= pci_dev_reset(dev
, 1);
3205 pci_save_state(dev
);
3208 * both INTx and MSI are disabled after the Interrupt Disable bit
3209 * is set and the Bus Master bit is cleared.
3211 pci_write_config_word(dev
, PCI_COMMAND
, PCI_COMMAND_INTX_DISABLE
);
3213 rc
= pci_dev_reset(dev
, 0);
3215 pci_restore_state(dev
);
3219 EXPORT_SYMBOL_GPL(pci_reset_function
);
3222 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3223 * @dev: PCI device to query
3225 * Returns mmrbc: maximum designed memory read count in bytes
3226 * or appropriate error value.
3228 int pcix_get_max_mmrbc(struct pci_dev
*dev
)
3233 cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
3237 if (pci_read_config_dword(dev
, cap
+ PCI_X_STATUS
, &stat
))
3240 return 512 << ((stat
& PCI_X_STATUS_MAX_READ
) >> 21);
3242 EXPORT_SYMBOL(pcix_get_max_mmrbc
);
3245 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3246 * @dev: PCI device to query
3248 * Returns mmrbc: maximum memory read count in bytes
3249 * or appropriate error value.
3251 int pcix_get_mmrbc(struct pci_dev
*dev
)
3256 cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
3260 if (pci_read_config_word(dev
, cap
+ PCI_X_CMD
, &cmd
))
3263 return 512 << ((cmd
& PCI_X_CMD_MAX_READ
) >> 2);
3265 EXPORT_SYMBOL(pcix_get_mmrbc
);
3268 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3269 * @dev: PCI device to query
3270 * @mmrbc: maximum memory read count in bytes
3271 * valid values are 512, 1024, 2048, 4096
3273 * If possible sets maximum memory read byte count, some bridges have erratas
3274 * that prevent this.
3276 int pcix_set_mmrbc(struct pci_dev
*dev
, int mmrbc
)
3282 if (mmrbc
< 512 || mmrbc
> 4096 || !is_power_of_2(mmrbc
))
3285 v
= ffs(mmrbc
) - 10;
3287 cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
3291 if (pci_read_config_dword(dev
, cap
+ PCI_X_STATUS
, &stat
))
3294 if (v
> (stat
& PCI_X_STATUS_MAX_READ
) >> 21)
3297 if (pci_read_config_word(dev
, cap
+ PCI_X_CMD
, &cmd
))
3300 o
= (cmd
& PCI_X_CMD_MAX_READ
) >> 2;
3302 if (v
> o
&& dev
->bus
&&
3303 (dev
->bus
->bus_flags
& PCI_BUS_FLAGS_NO_MMRBC
))
3306 cmd
&= ~PCI_X_CMD_MAX_READ
;
3308 if (pci_write_config_word(dev
, cap
+ PCI_X_CMD
, cmd
))
3313 EXPORT_SYMBOL(pcix_set_mmrbc
);
3316 * pcie_get_readrq - get PCI Express read request size
3317 * @dev: PCI device to query
3319 * Returns maximum memory read request in bytes
3320 * or appropriate error value.
3322 int pcie_get_readrq(struct pci_dev
*dev
)
3327 cap
= pci_pcie_cap(dev
);
3331 ret
= pci_read_config_word(dev
, cap
+ PCI_EXP_DEVCTL
, &ctl
);
3333 ret
= 128 << ((ctl
& PCI_EXP_DEVCTL_READRQ
) >> 12);
3337 EXPORT_SYMBOL(pcie_get_readrq
);
3340 * pcie_set_readrq - set PCI Express maximum memory read request
3341 * @dev: PCI device to query
3342 * @rq: maximum memory read count in bytes
3343 * valid values are 128, 256, 512, 1024, 2048, 4096
3345 * If possible sets maximum memory read request in bytes
3347 int pcie_set_readrq(struct pci_dev
*dev
, int rq
)
3349 int cap
, err
= -EINVAL
;
3352 if (rq
< 128 || rq
> 4096 || !is_power_of_2(rq
))
3355 cap
= pci_pcie_cap(dev
);
3359 err
= pci_read_config_word(dev
, cap
+ PCI_EXP_DEVCTL
, &ctl
);
3363 * If using the "performance" PCIe config, we clamp the
3364 * read rq size to the max packet size to prevent the
3365 * host bridge generating requests larger than we can
3368 if (pcie_bus_config
== PCIE_BUS_PERFORMANCE
) {
3369 int mps
= pcie_get_mps(dev
);
3377 v
= (ffs(rq
) - 8) << 12;
3379 if ((ctl
& PCI_EXP_DEVCTL_READRQ
) != v
) {
3380 ctl
&= ~PCI_EXP_DEVCTL_READRQ
;
3382 err
= pci_write_config_word(dev
, cap
+ PCI_EXP_DEVCTL
, ctl
);
3388 EXPORT_SYMBOL(pcie_set_readrq
);
3391 * pcie_get_mps - get PCI Express maximum payload size
3392 * @dev: PCI device to query
3394 * Returns maximum payload size in bytes
3395 * or appropriate error value.
3397 int pcie_get_mps(struct pci_dev
*dev
)
3402 cap
= pci_pcie_cap(dev
);
3406 ret
= pci_read_config_word(dev
, cap
+ PCI_EXP_DEVCTL
, &ctl
);
3408 ret
= 128 << ((ctl
& PCI_EXP_DEVCTL_PAYLOAD
) >> 5);
3414 * pcie_set_mps - set PCI Express maximum payload size
3415 * @dev: PCI device to query
3416 * @mps: maximum payload size in bytes
3417 * valid values are 128, 256, 512, 1024, 2048, 4096
3419 * If possible sets maximum payload size
3421 int pcie_set_mps(struct pci_dev
*dev
, int mps
)
3423 int cap
, err
= -EINVAL
;
3426 if (mps
< 128 || mps
> 4096 || !is_power_of_2(mps
))
3430 if (v
> dev
->pcie_mpss
)
3434 cap
= pci_pcie_cap(dev
);
3438 err
= pci_read_config_word(dev
, cap
+ PCI_EXP_DEVCTL
, &ctl
);
3442 if ((ctl
& PCI_EXP_DEVCTL_PAYLOAD
) != v
) {
3443 ctl
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
3445 err
= pci_write_config_word(dev
, cap
+ PCI_EXP_DEVCTL
, ctl
);
3452 * pci_select_bars - Make BAR mask from the type of resource
3453 * @dev: the PCI device for which BAR mask is made
3454 * @flags: resource type mask to be selected
3456 * This helper routine makes bar mask from the type of resource.
3458 int pci_select_bars(struct pci_dev
*dev
, unsigned long flags
)
3461 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++)
3462 if (pci_resource_flags(dev
, i
) & flags
)
3468 * pci_resource_bar - get position of the BAR associated with a resource
3469 * @dev: the PCI device
3470 * @resno: the resource number
3471 * @type: the BAR type to be filled in
3473 * Returns BAR position in config space, or 0 if the BAR is invalid.
3475 int pci_resource_bar(struct pci_dev
*dev
, int resno
, enum pci_bar_type
*type
)
3479 if (resno
< PCI_ROM_RESOURCE
) {
3480 *type
= pci_bar_unknown
;
3481 return PCI_BASE_ADDRESS_0
+ 4 * resno
;
3482 } else if (resno
== PCI_ROM_RESOURCE
) {
3483 *type
= pci_bar_mem32
;
3484 return dev
->rom_base_reg
;
3485 } else if (resno
< PCI_BRIDGE_RESOURCES
) {
3486 /* device specific resource */
3487 reg
= pci_iov_resource_bar(dev
, resno
, type
);
3492 dev_err(&dev
->dev
, "BAR %d: invalid resource\n", resno
);
3496 /* Some architectures require additional programming to enable VGA */
3497 static arch_set_vga_state_t arch_set_vga_state
;
3499 void __init
pci_register_set_vga_state(arch_set_vga_state_t func
)
3501 arch_set_vga_state
= func
; /* NULL disables */
3504 static int pci_set_vga_state_arch(struct pci_dev
*dev
, bool decode
,
3505 unsigned int command_bits
, u32 flags
)
3507 if (arch_set_vga_state
)
3508 return arch_set_vga_state(dev
, decode
, command_bits
,
3514 * pci_set_vga_state - set VGA decode state on device and parents if requested
3515 * @dev: the PCI device
3516 * @decode: true = enable decoding, false = disable decoding
3517 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3518 * @flags: traverse ancestors and change bridges
3519 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3521 int pci_set_vga_state(struct pci_dev
*dev
, bool decode
,
3522 unsigned int command_bits
, u32 flags
)
3524 struct pci_bus
*bus
;
3525 struct pci_dev
*bridge
;
3529 WARN_ON((flags
& PCI_VGA_STATE_CHANGE_DECODES
) & (command_bits
& ~(PCI_COMMAND_IO
|PCI_COMMAND_MEMORY
)));
3531 /* ARCH specific VGA enables */
3532 rc
= pci_set_vga_state_arch(dev
, decode
, command_bits
, flags
);
3536 if (flags
& PCI_VGA_STATE_CHANGE_DECODES
) {
3537 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
3539 cmd
|= command_bits
;
3541 cmd
&= ~command_bits
;
3542 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
3545 if (!(flags
& PCI_VGA_STATE_CHANGE_BRIDGE
))
3552 pci_read_config_word(bridge
, PCI_BRIDGE_CONTROL
,
3555 cmd
|= PCI_BRIDGE_CTL_VGA
;
3557 cmd
&= ~PCI_BRIDGE_CTL_VGA
;
3558 pci_write_config_word(bridge
, PCI_BRIDGE_CONTROL
,
3566 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3567 static char resource_alignment_param
[RESOURCE_ALIGNMENT_PARAM_SIZE
] = {0};
3568 static DEFINE_SPINLOCK(resource_alignment_lock
);
3571 * pci_specified_resource_alignment - get resource alignment specified by user.
3572 * @dev: the PCI device to get
3574 * RETURNS: Resource alignment if it is specified.
3575 * Zero if it is not specified.
3577 resource_size_t
pci_specified_resource_alignment(struct pci_dev
*dev
)
3579 int seg
, bus
, slot
, func
, align_order
, count
;
3580 resource_size_t align
= 0;
3583 spin_lock(&resource_alignment_lock
);
3584 p
= resource_alignment_param
;
3587 if (sscanf(p
, "%d%n", &align_order
, &count
) == 1 &&
3593 if (sscanf(p
, "%x:%x:%x.%x%n",
3594 &seg
, &bus
, &slot
, &func
, &count
) != 4) {
3596 if (sscanf(p
, "%x:%x.%x%n",
3597 &bus
, &slot
, &func
, &count
) != 3) {
3598 /* Invalid format */
3599 printk(KERN_ERR
"PCI: Can't parse resource_alignment parameter: %s\n",
3605 if (seg
== pci_domain_nr(dev
->bus
) &&
3606 bus
== dev
->bus
->number
&&
3607 slot
== PCI_SLOT(dev
->devfn
) &&
3608 func
== PCI_FUNC(dev
->devfn
)) {
3609 if (align_order
== -1) {
3612 align
= 1 << align_order
;
3617 if (*p
!= ';' && *p
!= ',') {
3618 /* End of param or invalid format */
3623 spin_unlock(&resource_alignment_lock
);
3628 * pci_is_reassigndev - check if specified PCI is target device to reassign
3629 * @dev: the PCI device to check
3631 * RETURNS: non-zero for PCI device is a target device to reassign,
3634 int pci_is_reassigndev(struct pci_dev
*dev
)
3636 return (pci_specified_resource_alignment(dev
) != 0);
3639 ssize_t
pci_set_resource_alignment_param(const char *buf
, size_t count
)
3641 if (count
> RESOURCE_ALIGNMENT_PARAM_SIZE
- 1)
3642 count
= RESOURCE_ALIGNMENT_PARAM_SIZE
- 1;
3643 spin_lock(&resource_alignment_lock
);
3644 strncpy(resource_alignment_param
, buf
, count
);
3645 resource_alignment_param
[count
] = '\0';
3646 spin_unlock(&resource_alignment_lock
);
3650 ssize_t
pci_get_resource_alignment_param(char *buf
, size_t size
)
3653 spin_lock(&resource_alignment_lock
);
3654 count
= snprintf(buf
, size
, "%s", resource_alignment_param
);
3655 spin_unlock(&resource_alignment_lock
);
3659 static ssize_t
pci_resource_alignment_show(struct bus_type
*bus
, char *buf
)
3661 return pci_get_resource_alignment_param(buf
, PAGE_SIZE
);
3664 static ssize_t
pci_resource_alignment_store(struct bus_type
*bus
,
3665 const char *buf
, size_t count
)
3667 return pci_set_resource_alignment_param(buf
, count
);
3670 BUS_ATTR(resource_alignment
, 0644, pci_resource_alignment_show
,
3671 pci_resource_alignment_store
);
3673 static int __init
pci_resource_alignment_sysfs_init(void)
3675 return bus_create_file(&pci_bus_type
,
3676 &bus_attr_resource_alignment
);
3679 late_initcall(pci_resource_alignment_sysfs_init
);
3681 static void __devinit
pci_no_domains(void)
3683 #ifdef CONFIG_PCI_DOMAINS
3684 pci_domains_supported
= 0;
3689 * pci_ext_cfg_enabled - can we access extended PCI config space?
3690 * @dev: The PCI device of the root bridge.
3692 * Returns 1 if we can access PCI extended config space (offsets
3693 * greater than 0xff). This is the default implementation. Architecture
3694 * implementations can override this.
3696 int __attribute__ ((weak
)) pci_ext_cfg_avail(struct pci_dev
*dev
)
3701 void __weak
pci_fixup_cardbus(struct pci_bus
*bus
)
3704 EXPORT_SYMBOL(pci_fixup_cardbus
);
3706 static int __init
pci_setup(char *str
)
3709 char *k
= strchr(str
, ',');
3712 if (*str
&& (str
= pcibios_setup(str
)) && *str
) {
3713 if (!strcmp(str
, "nomsi")) {
3715 } else if (!strcmp(str
, "noaer")) {
3717 } else if (!strncmp(str
, "realloc", 7)) {
3719 } else if (!strcmp(str
, "nodomains")) {
3721 } else if (!strncmp(str
, "cbiosize=", 9)) {
3722 pci_cardbus_io_size
= memparse(str
+ 9, &str
);
3723 } else if (!strncmp(str
, "cbmemsize=", 10)) {
3724 pci_cardbus_mem_size
= memparse(str
+ 10, &str
);
3725 } else if (!strncmp(str
, "resource_alignment=", 19)) {
3726 pci_set_resource_alignment_param(str
+ 19,
3728 } else if (!strncmp(str
, "ecrc=", 5)) {
3729 pcie_ecrc_get_policy(str
+ 5);
3730 } else if (!strncmp(str
, "hpiosize=", 9)) {
3731 pci_hotplug_io_size
= memparse(str
+ 9, &str
);
3732 } else if (!strncmp(str
, "hpmemsize=", 10)) {
3733 pci_hotplug_mem_size
= memparse(str
+ 10, &str
);
3734 } else if (!strncmp(str
, "pcie_bus_tune_off", 17)) {
3735 pcie_bus_config
= PCIE_BUS_TUNE_OFF
;
3736 } else if (!strncmp(str
, "pcie_bus_safe", 13)) {
3737 pcie_bus_config
= PCIE_BUS_SAFE
;
3738 } else if (!strncmp(str
, "pcie_bus_perf", 13)) {
3739 pcie_bus_config
= PCIE_BUS_PERFORMANCE
;
3740 } else if (!strncmp(str
, "pcie_bus_peer2peer", 18)) {
3741 pcie_bus_config
= PCIE_BUS_PEER2PEER
;
3743 printk(KERN_ERR
"PCI: Unknown option `%s'\n",
3751 early_param("pci", pci_setup
);
3753 EXPORT_SYMBOL(pci_reenable_device
);
3754 EXPORT_SYMBOL(pci_enable_device_io
);
3755 EXPORT_SYMBOL(pci_enable_device_mem
);
3756 EXPORT_SYMBOL(pci_enable_device
);
3757 EXPORT_SYMBOL(pcim_enable_device
);
3758 EXPORT_SYMBOL(pcim_pin_device
);
3759 EXPORT_SYMBOL(pci_disable_device
);
3760 EXPORT_SYMBOL(pci_find_capability
);
3761 EXPORT_SYMBOL(pci_bus_find_capability
);
3762 EXPORT_SYMBOL(pci_release_regions
);
3763 EXPORT_SYMBOL(pci_request_regions
);
3764 EXPORT_SYMBOL(pci_request_regions_exclusive
);
3765 EXPORT_SYMBOL(pci_release_region
);
3766 EXPORT_SYMBOL(pci_request_region
);
3767 EXPORT_SYMBOL(pci_request_region_exclusive
);
3768 EXPORT_SYMBOL(pci_release_selected_regions
);
3769 EXPORT_SYMBOL(pci_request_selected_regions
);
3770 EXPORT_SYMBOL(pci_request_selected_regions_exclusive
);
3771 EXPORT_SYMBOL(pci_set_master
);
3772 EXPORT_SYMBOL(pci_clear_master
);
3773 EXPORT_SYMBOL(pci_set_mwi
);
3774 EXPORT_SYMBOL(pci_try_set_mwi
);
3775 EXPORT_SYMBOL(pci_clear_mwi
);
3776 EXPORT_SYMBOL_GPL(pci_intx
);
3777 EXPORT_SYMBOL(pci_assign_resource
);
3778 EXPORT_SYMBOL(pci_find_parent_resource
);
3779 EXPORT_SYMBOL(pci_select_bars
);
3781 EXPORT_SYMBOL(pci_set_power_state
);
3782 EXPORT_SYMBOL(pci_save_state
);
3783 EXPORT_SYMBOL(pci_restore_state
);
3784 EXPORT_SYMBOL(pci_pme_capable
);
3785 EXPORT_SYMBOL(pci_pme_active
);
3786 EXPORT_SYMBOL(pci_wake_from_d3
);
3787 EXPORT_SYMBOL(pci_target_state
);
3788 EXPORT_SYMBOL(pci_prepare_to_sleep
);
3789 EXPORT_SYMBOL(pci_back_from_sleep
);
3790 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state
);