1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <console/console.h>
4 #include <commonlib/helpers.h>
6 #include <device/device.h>
7 #include <device/pci.h>
8 #include <device/pci_ids.h>
9 #include <device/pci_ops.h>
10 #include <device/pciexp.h>
12 static unsigned int ext_cap_id(unsigned int cap
)
17 static unsigned int ext_cap_next_offset(unsigned int cap
)
19 return cap
>> 20 & 0xffc;
22 static unsigned int find_ext_cap_offset(const struct device
*dev
, unsigned int cap_id
,
25 unsigned int this_cap_offset
= offset
;
27 while (this_cap_offset
>= PCIE_EXT_CAP_OFFSET
) {
28 const unsigned int this_cap
= pci_read_config32(dev
, this_cap_offset
);
30 /* Bail out when this request is unsupported */
31 if (this_cap
== 0xffffffff)
34 if (ext_cap_id(this_cap
) == cap_id
)
35 return this_cap_offset
;
37 this_cap_offset
= ext_cap_next_offset(this_cap
);
44 * Search for an extended capability with the ID `cap`.
46 * Returns the offset of the first matching extended
47 * capability if found, or 0 otherwise.
49 * A new search is started with `offset == 0`.
50 * To continue a search, the prior return value
51 * should be passed as `offset`.
53 unsigned int pciexp_find_extended_cap(const struct device
*dev
, unsigned int cap
,
56 unsigned int next_cap_offset
;
59 next_cap_offset
= ext_cap_next_offset(pci_read_config32(dev
, offset
));
61 next_cap_offset
= PCIE_EXT_CAP_OFFSET
;
63 return find_ext_cap_offset(dev
, cap
, next_cap_offset
);
67 * Search for a vendor-specific extended capability,
68 * with the vendor-specific ID `cap`.
70 * Returns the offset of the vendor-specific header,
71 * i.e. the offset of the extended capability + 4,
72 * or 0 if none is found.
74 * A new search is started with `offset == 0`.
75 * To continue a search, the prior return value
76 * should be passed as `offset`.
78 unsigned int pciexp_find_ext_vendor_cap(const struct device
*dev
, unsigned int cap
,
81 /* Reconstruct capability offset from vendor-specific header offset. */
86 offset
= pciexp_find_extended_cap(dev
, PCI_EXT_CAP_ID_VNDR
, offset
);
90 const unsigned int vndr_cap
= pci_read_config32(dev
, offset
+ 4);
91 if ((vndr_cap
& 0xffff) == cap
)
97 * Find a PCIe device with a given serial number, and a given VID if applicable
99 * @param serial The serial number of the device.
100 * @param vid Vendor ID of the device, may be 0 if not applicable.
101 * @param from Pointer to the device structure, used as a starting point in
102 * the linked list of all_devices, which can be 0 to start at the
103 * head of the list (i.e. all_devices).
104 * @return Pointer to the device struct.
106 struct device
*pcie_find_dsn(const uint64_t serial
, const uint16_t vid
,
125 if (from
->path
.type
== DEVICE_PATH_PCI
) {
126 cap
= pciexp_find_extended_cap(from
, PCI_EXT_CAP_ID_DSN
, 0);
128 * For PCIe device, find extended capability for serial number.
129 * The capability header is 4 bytes, followed by lower 4 bytes
130 * of serial number, then higher 4 byes of serial number.
133 dsn
.dsn_low
= pci_read_config32(from
, cap
+ 4);
134 dsn
.dsn_high
= pci_read_config32(from
, cap
+ 8);
135 vendor_id
= pci_read_config16(from
, PCI_VENDOR_ID
);
136 if ((dsn
.dsn
== serial
) && (vid
== 0 || vendor_id
== vid
))
147 static bool pcie_is_root_port(struct device
*dev
)
149 unsigned int pcie_pos
, pcie_type
;
151 pcie_pos
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
155 pcie_type
= pci_read_config16(dev
, pcie_pos
+ PCI_EXP_FLAGS
) & PCI_EXP_FLAGS_TYPE
;
158 return (pcie_type
== PCI_EXP_TYPE_ROOT_PORT
);
161 static bool pcie_is_endpoint(struct device
*dev
)
163 unsigned int pcie_pos
, pcie_type
;
165 pcie_pos
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
169 pcie_type
= pci_read_config16(dev
, pcie_pos
+ PCI_EXP_FLAGS
) & PCI_EXP_FLAGS_TYPE
;
172 return ((pcie_type
== PCI_EXP_TYPE_ENDPOINT
) || (pcie_type
== PCI_EXP_TYPE_LEG_END
));
177 * Re-train a PCIe link
179 #define PCIE_TRAIN_RETRY 10000
180 static int pciexp_retrain_link(struct device
*dev
, unsigned int cap
)
186 * Implementation note (page 633) in PCIe Specification 3.0 suggests
187 * polling the Link Training bit in the Link Status register until the
188 * value returned is 0 before setting the Retrain Link bit to 1.
189 * This is meant to avoid a race condition when using the
190 * Retrain Link mechanism.
192 for (try = PCIE_TRAIN_RETRY
; try > 0; try--) {
193 lnk
= pci_read_config16(dev
, cap
+ PCI_EXP_LNKSTA
);
194 if (!(lnk
& PCI_EXP_LNKSTA_LT
))
199 printk(BIOS_ERR
, "%s: Link Retrain timeout\n", dev_path(dev
));
203 /* Start link retraining */
204 lnk
= pci_read_config16(dev
, cap
+ PCI_EXP_LNKCTL
);
205 lnk
|= PCI_EXP_LNKCTL_RL
;
206 pci_write_config16(dev
, cap
+ PCI_EXP_LNKCTL
, lnk
);
208 /* Wait for training to complete */
209 for (try = PCIE_TRAIN_RETRY
; try > 0; try--) {
210 lnk
= pci_read_config16(dev
, cap
+ PCI_EXP_LNKSTA
);
211 if (!(lnk
& PCI_EXP_LNKSTA_LT
))
216 printk(BIOS_ERR
, "%s: Link Retrain timeout\n", dev_path(dev
));
220 static bool pciexp_is_ccc_active(struct device
*root
, unsigned int root_cap
,
221 struct device
*endp
, unsigned int endp_cap
)
223 u16 root_ccc
, endp_ccc
;
225 root_ccc
= pci_read_config16(root
, root_cap
+ PCI_EXP_LNKCTL
) & PCI_EXP_LNKCTL_CCC
;
226 endp_ccc
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
) & PCI_EXP_LNKCTL_CCC
;
227 if (root_ccc
&& endp_ccc
) {
228 printk(BIOS_INFO
, "PCIe: Common Clock Configuration already enabled\n");
235 * Check the Slot Clock Configuration for root port and endpoint
236 * and enable Common Clock Configuration if possible. If CCC is
237 * enabled the link must be retrained.
239 static void pciexp_enable_common_clock(struct device
*root
, unsigned int root_cap
,
240 struct device
*endp
, unsigned int endp_cap
)
242 u16 root_scc
, endp_scc
, lnkctl
;
244 /* No need to enable common clock if it is already active. */
245 if (pciexp_is_ccc_active(root
, root_cap
, endp
, endp_cap
))
248 /* Get Slot Clock Configuration for root port */
249 root_scc
= pci_read_config16(root
, root_cap
+ PCI_EXP_LNKSTA
);
250 root_scc
&= PCI_EXP_LNKSTA_SLC
;
252 /* Get Slot Clock Configuration for endpoint */
253 endp_scc
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKSTA
);
254 endp_scc
&= PCI_EXP_LNKSTA_SLC
;
256 /* Enable Common Clock Configuration and retrain */
257 if (root_scc
&& endp_scc
) {
258 printk(BIOS_INFO
, "Enabling Common Clock Configuration\n");
260 /* Set in endpoint */
261 lnkctl
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
);
262 lnkctl
|= PCI_EXP_LNKCTL_CCC
;
263 pci_write_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
265 /* Set in root port */
266 lnkctl
= pci_read_config16(root
, root_cap
+ PCI_EXP_LNKCTL
);
267 lnkctl
|= PCI_EXP_LNKCTL_CCC
;
268 pci_write_config16(root
, root_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
270 /* Retrain link if CCC was enabled */
271 pciexp_retrain_link(root
, root_cap
);
275 static void pciexp_enable_clock_power_pm(struct device
*endp
, unsigned int endp_cap
)
277 /* check if per port clkreq is supported in device */
280 endp_ca
= pci_read_config32(endp
, endp_cap
+ PCI_EXP_LNKCAP
);
281 if ((endp_ca
& PCI_EXP_CLK_PM
) == 0) {
282 printk(BIOS_INFO
, "PCIE CLK PM is not supported by endpoint\n");
285 lnkctl
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
);
286 lnkctl
= lnkctl
| PCI_EXP_EN_CLK_PM
;
287 pci_write_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
290 static bool _pciexp_ltr_supported(struct device
*dev
, unsigned int cap
)
292 return pci_read_config16(dev
, cap
+ PCI_EXP_DEVCAP2
) & PCI_EXP_DEVCAP2_LTR
;
295 static bool _pciexp_ltr_enabled(struct device
*dev
, unsigned int cap
)
297 return pci_read_config16(dev
, cap
+ PCI_EXP_DEVCTL2
) & PCI_EXP_DEV2_LTR
;
300 static bool _pciexp_enable_ltr(struct device
*parent
, unsigned int parent_cap
,
301 struct device
*dev
, unsigned int cap
)
303 if (!_pciexp_ltr_supported(dev
, cap
)) {
304 printk(BIOS_DEBUG
, "%s: No LTR support\n", dev_path(dev
));
308 if (_pciexp_ltr_enabled(dev
, cap
))
312 (!_pciexp_ltr_supported(parent
, parent_cap
) ||
313 !_pciexp_ltr_enabled(parent
, parent_cap
)))
316 pci_or_config16(dev
, cap
+ PCI_EXP_DEVCTL2
, PCI_EXP_DEV2_LTR
);
317 printk(BIOS_INFO
, "%s: Enabled LTR\n", dev_path(dev
));
321 static void pciexp_enable_ltr(struct device
*dev
)
323 const unsigned int cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
328 * If we have get_ltr_max_latencies(), treat `dev` as the root.
329 * If not, let _pciexp_enable_ltr() query the parent's state.
331 struct device
*parent
= NULL
;
332 unsigned int parent_cap
= 0;
333 if (!dev
->ops
->ops_pci
|| !dev
->ops
->ops_pci
->get_ltr_max_latencies
) {
334 parent
= dev
->upstream
->dev
;
335 if (parent
->path
.type
!= DEVICE_PATH_PCI
)
337 parent_cap
= pci_find_capability(parent
, PCI_CAP_ID_PCIE
);
342 (void)_pciexp_enable_ltr(parent
, parent_cap
, dev
, cap
);
345 bool pciexp_get_ltr_max_latencies(struct device
*dev
, u16
*max_snoop
, u16
*max_nosnoop
)
347 /* Walk the hierarchy up to find get_ltr_max_latencies(). */
349 if (dev
->ops
->ops_pci
&& dev
->ops
->ops_pci
->get_ltr_max_latencies
)
351 if (dev
->upstream
->dev
== dev
|| dev
->upstream
->dev
->path
.type
!= DEVICE_PATH_PCI
)
353 dev
= dev
->upstream
->dev
;
356 dev
->ops
->ops_pci
->get_ltr_max_latencies(max_snoop
, max_nosnoop
);
360 static void pciexp_configure_ltr(struct device
*parent
, unsigned int parent_cap
,
361 struct device
*dev
, unsigned int cap
)
363 if (!_pciexp_enable_ltr(parent
, parent_cap
, dev
, cap
))
366 const unsigned int ltr_cap
= pciexp_find_extended_cap(dev
, PCIE_EXT_CAP_LTR_ID
, 0);
370 u16 max_snoop
, max_nosnoop
;
371 if (!pciexp_get_ltr_max_latencies(dev
, &max_snoop
, &max_nosnoop
))
374 pci_write_config16(dev
, ltr_cap
+ PCI_LTR_MAX_SNOOP
, max_snoop
);
375 pci_write_config16(dev
, ltr_cap
+ PCI_LTR_MAX_NOSNOOP
, max_nosnoop
);
376 printk(BIOS_INFO
, "%s: Programmed LTR max latencies\n", dev_path(dev
));
379 static unsigned char pciexp_L1_substate_cal(struct device
*dev
, unsigned int endp_cap
,
382 unsigned char mult
[4] = {2, 10, 100, 0};
384 unsigned int L1SubStateSupport
= *data
& 0xf;
385 unsigned int comm_mode_rst_time
= (*data
>> 8) & 0xff;
386 unsigned int power_on_scale
= (*data
>> 16) & 0x3;
387 unsigned int power_on_value
= (*data
>> 19) & 0x1f;
389 unsigned int endp_data
= pci_read_config32(dev
, endp_cap
+ 4);
390 unsigned int endp_L1SubStateSupport
= endp_data
& 0xf;
391 unsigned int endp_comm_mode_restore_time
= (endp_data
>> 8) & 0xff;
392 unsigned int endp_power_on_scale
= (endp_data
>> 16) & 0x3;
393 unsigned int endp_power_on_value
= (endp_data
>> 19) & 0x1f;
395 L1SubStateSupport
&= endp_L1SubStateSupport
;
397 if (L1SubStateSupport
== 0)
400 if (power_on_value
* mult
[power_on_scale
] <
401 endp_power_on_value
* mult
[endp_power_on_scale
]) {
402 power_on_value
= endp_power_on_value
;
403 power_on_scale
= endp_power_on_scale
;
405 if (comm_mode_rst_time
< endp_comm_mode_restore_time
)
406 comm_mode_rst_time
= endp_comm_mode_restore_time
;
408 *data
= (comm_mode_rst_time
<< 8) | (power_on_scale
<< 16)
409 | (power_on_value
<< 19) | L1SubStateSupport
;
414 static void pciexp_L1_substate_commit(struct device
*root
, struct device
*dev
,
415 unsigned int root_cap
, unsigned int end_cap
)
417 struct device
*dev_t
;
418 unsigned char L1_ss_ok
;
419 unsigned int rp_L1_support
= pci_read_config32(root
, root_cap
+ 4);
420 unsigned int L1SubStateSupport
;
421 unsigned int comm_mode_rst_time
;
422 unsigned int power_on_scale
;
423 unsigned int endp_power_on_value
;
425 for (dev_t
= dev
; dev_t
; dev_t
= dev_t
->sibling
) {
427 * rp_L1_support is init'd above from root port.
428 * it needs coordination with endpoints to reach in common.
429 * if certain endpoint doesn't support L1 Sub-State, abort
430 * this feature enabling.
432 L1_ss_ok
= pciexp_L1_substate_cal(dev_t
, end_cap
,
438 L1SubStateSupport
= rp_L1_support
& 0xf;
439 comm_mode_rst_time
= (rp_L1_support
>> 8) & 0xff;
440 power_on_scale
= (rp_L1_support
>> 16) & 0x3;
441 endp_power_on_value
= (rp_L1_support
>> 19) & 0x1f;
443 printk(BIOS_INFO
, "L1 Sub-State supported from root port %d\n",
444 root
->path
.pci
.devfn
>> 3);
445 printk(BIOS_INFO
, "L1 Sub-State Support = 0x%x\n", L1SubStateSupport
);
446 printk(BIOS_INFO
, "CommonModeRestoreTime = 0x%x\n", comm_mode_rst_time
);
447 printk(BIOS_INFO
, "Power On Value = 0x%x, Power On Scale = 0x%x\n",
448 endp_power_on_value
, power_on_scale
);
450 pci_update_config32(root
, root_cap
+ 0x08, ~0xff00,
451 (comm_mode_rst_time
<< 8));
453 pci_update_config32(root
, root_cap
+ 0x0c, 0xffffff04,
454 (endp_power_on_value
<< 3) | (power_on_scale
));
456 /* TODO: 0xa0, 2 are values that work on some chipsets but really
457 * should be determined dynamically by looking at downstream devices.
459 pci_update_config32(root
, root_cap
+ 0x08,
460 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK
|
461 ASPM_LTR_L12_THRESHOLD_SCALE_MASK
),
462 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET
) |
463 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET
));
465 pci_update_config32(root
, root_cap
+ 0x08, ~0x1f,
468 for (dev_t
= dev
; dev_t
; dev_t
= dev_t
->sibling
) {
469 pci_update_config32(dev_t
, end_cap
+ 0x0c, 0xffffff04,
470 (endp_power_on_value
<< 3) | (power_on_scale
));
472 pci_update_config32(dev_t
, end_cap
+ 0x08,
473 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK
|
474 ASPM_LTR_L12_THRESHOLD_SCALE_MASK
),
475 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET
) |
476 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET
));
478 pci_update_config32(dev_t
, end_cap
+ 0x08, ~0x1f,
483 static void pciexp_config_L1_sub_state(struct device
*root
, struct device
*dev
)
485 unsigned int root_cap
, end_cap
;
487 /* Do it for function 0 only */
488 if (dev
->path
.pci
.devfn
& 0x7)
491 root_cap
= pciexp_find_extended_cap(root
, PCIE_EXT_CAP_L1SS_ID
, 0);
495 end_cap
= pciexp_find_extended_cap(dev
, PCIE_EXT_CAP_L1SS_ID
, 0);
497 if (dev
->vendor
!= PCI_VID_INTEL
)
500 end_cap
= pciexp_find_ext_vendor_cap(dev
, 0xcafe, 0);
505 pciexp_L1_substate_commit(root
, dev
, root_cap
, end_cap
);
509 * Determine the ASPM L0s or L1 exit latency for a link
510 * by checking both root port and endpoint and returning
511 * the highest latency value.
513 static int pciexp_aspm_latency(struct device
*root
, unsigned int root_cap
,
514 struct device
*endp
, unsigned int endp_cap
,
517 int root_lat
= 0, endp_lat
= 0;
518 u32 root_lnkcap
, endp_lnkcap
;
520 root_lnkcap
= pci_read_config32(root
, root_cap
+ PCI_EXP_LNKCAP
);
521 endp_lnkcap
= pci_read_config32(endp
, endp_cap
+ PCI_EXP_LNKCAP
);
523 /* Make sure the link supports this ASPM type by checking
524 * capability bits 11:10 with aspm_type offset by 1 */
525 if (!(root_lnkcap
& (1 << (type
+ 9))) ||
526 !(endp_lnkcap
& (1 << (type
+ 9))))
529 /* Find the one with higher latency */
532 root_lat
= (root_lnkcap
& PCI_EXP_LNKCAP_L0SEL
) >> 12;
533 endp_lat
= (endp_lnkcap
& PCI_EXP_LNKCAP_L0SEL
) >> 12;
536 root_lat
= (root_lnkcap
& PCI_EXP_LNKCAP_L1EL
) >> 15;
537 endp_lat
= (endp_lnkcap
& PCI_EXP_LNKCAP_L1EL
) >> 15;
543 return (endp_lat
> root_lat
) ? endp_lat
: root_lat
;
547 * Enable ASPM on PCIe root port and endpoint.
549 static void pciexp_enable_aspm(struct device
*root
, unsigned int root_cap
,
550 struct device
*endp
, unsigned int endp_cap
)
552 const char *aspm_type_str
[] = { "None", "L0s", "L1", "L0s and L1" };
553 enum aspm_type apmc
= PCIE_ASPM_NONE
;
554 int exit_latency
, ok_latency
;
558 if (endp
->disable_pcie_aspm
)
561 /* Get endpoint device capabilities for acceptable limits */
562 devcap
= pci_read_config32(endp
, endp_cap
+ PCI_EXP_DEVCAP
);
564 /* Enable L0s if it is within endpoint acceptable limit */
565 ok_latency
= (devcap
& PCI_EXP_DEVCAP_L0S
) >> 6;
566 exit_latency
= pciexp_aspm_latency(root
, root_cap
, endp
, endp_cap
,
568 if (exit_latency
>= 0 && exit_latency
<= ok_latency
)
569 apmc
|= PCIE_ASPM_L0S
;
571 /* Enable L1 if it is within endpoint acceptable limit */
572 ok_latency
= (devcap
& PCI_EXP_DEVCAP_L1
) >> 9;
573 exit_latency
= pciexp_aspm_latency(root
, root_cap
, endp
, endp_cap
,
575 if (exit_latency
>= 0 && exit_latency
<= ok_latency
)
576 apmc
|= PCIE_ASPM_L1
;
578 if (apmc
!= PCIE_ASPM_NONE
) {
579 /* Set APMC in root port first */
580 lnkctl
= pci_read_config16(root
, root_cap
+ PCI_EXP_LNKCTL
);
582 pci_write_config16(root
, root_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
584 /* Set APMC in endpoint device next */
585 lnkctl
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
);
587 pci_write_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
590 printk(BIOS_INFO
, "ASPM: Enabled %s\n", aspm_type_str
[apmc
]);
593 static void pciexp_dev_set_max_payload_size(struct device
*dev
, unsigned int max_payload
)
596 unsigned int pcie_cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
601 devctl
= pci_read_config16(dev
, pcie_cap
+ PCI_EXP_DEVCTL
);
602 devctl
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
604 * Should never overflow to higher bits, due to how max_payload is
605 * guarded in this file.
607 devctl
|= max_payload
<< 5;
608 pci_write_config16(dev
, pcie_cap
+ PCI_EXP_DEVCTL
, devctl
);
611 static unsigned int pciexp_dev_get_current_max_payload_size(struct device
*dev
)
614 unsigned int pcie_cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
619 devctl
= pci_read_config16(dev
, pcie_cap
+ PCI_EXP_DEVCTL
);
620 devctl
&= PCI_EXP_DEVCTL_PAYLOAD
;
621 return (devctl
>> 5);
624 static unsigned int pciexp_dev_get_max_payload_size_cap(struct device
*dev
)
627 unsigned int pcie_cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
632 devcap
= pci_read_config16(dev
, pcie_cap
+ PCI_EXP_DEVCAP
);
633 return (devcap
& PCI_EXP_DEVCAP_PAYLOAD
);
637 * Set max payload size of a parent based on max payload size capability of the child.
639 static void pciexp_configure_max_payload_size(struct device
*parent
, struct device
*child
)
641 unsigned int child_max_payload
, parent_max_payload
, max_payload
;
643 /* Get max payload size supported by child */
644 child_max_payload
= pciexp_dev_get_current_max_payload_size(child
);
645 /* Get max payload size configured by parent */
646 parent_max_payload
= pciexp_dev_get_current_max_payload_size(parent
);
647 /* Set max payload to smaller of the reported device capability or parent config. */
648 max_payload
= MIN(child_max_payload
, parent_max_payload
);
650 if (max_payload
> 5) {
651 /* Values 6 and 7 are reserved in PCIe 3.0 specs. */
652 printk(BIOS_ERR
, "PCIe: Max_Payload_Size field restricted from %d to 5\n",
657 if (max_payload
!= parent_max_payload
) {
658 pciexp_dev_set_max_payload_size(parent
, max_payload
);
659 printk(BIOS_INFO
, "%s: Max_Payload_Size adjusted to %d\n", dev_path(parent
),
660 (1 << (max_payload
+ 7)));
665 * Clear Lane Error State at the end of PCIe link training.
666 * Lane error status is cleared if PCIEXP_LANE_ERR_STAT_CLEAR is set.
667 * Lane error is normal during link training, so we need to clear it.
668 * At this moment, link has been used, but for a very short duration.
670 static void clear_lane_error_status(struct device
*dev
)
675 pos
= pciexp_find_extended_cap(dev
, PCI_EXP_SEC_CAP_ID
, 0);
679 reg32
= pci_read_config32(dev
, pos
+ PCI_EXP_SEC_LANE_ERR_STATUS
);
683 printk(BIOS_DEBUG
, "%s: Clear Lane Error Status.\n", dev_path(dev
));
684 printk(BIOS_DEBUG
, "LaneErrStat:0x%x\n", reg32
);
685 pci_write_config32(dev
, pos
+ PCI_EXP_SEC_LANE_ERR_STATUS
, reg32
);
688 static void pciexp_tune_dev(struct device
*dev
)
690 struct device
*root
= dev
->upstream
->dev
;
691 unsigned int root_cap
, cap
;
693 cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
697 root_cap
= pci_find_capability(root
, PCI_CAP_ID_PCIE
);
701 /* Check for and enable Common Clock */
702 if (CONFIG(PCIEXP_COMMON_CLOCK
))
703 pciexp_enable_common_clock(root
, root_cap
, dev
, cap
);
705 /* Check if per port CLK req is supported by endpoint*/
706 if (CONFIG(PCIEXP_CLK_PM
))
707 pciexp_enable_clock_power_pm(dev
, cap
);
709 /* Enable L1 Sub-State when both root port and endpoint support */
710 if (CONFIG(PCIEXP_L1_SUB_STATE
))
711 pciexp_config_L1_sub_state(root
, dev
);
713 /* Check for and enable ASPM */
714 if (CONFIG(PCIEXP_ASPM
))
715 pciexp_enable_aspm(root
, root_cap
, dev
, cap
);
717 /* Clear PCIe Lane Error Status */
718 if (CONFIG(PCIEXP_LANE_ERR_STAT_CLEAR
))
719 clear_lane_error_status(root
);
721 /* Set the Max Payload Size to the maximum supported capability for this device */
722 if (pcie_is_endpoint(dev
))
723 pciexp_dev_set_max_payload_size(dev
, pciexp_dev_get_max_payload_size_cap(dev
));
725 /* Limit the parent's Max Payload Size if needed */
726 pciexp_configure_max_payload_size(root
, dev
);
728 pciexp_configure_ltr(root
, root_cap
, dev
, cap
);
731 static void pciexp_sync_max_payload_size(struct bus
*bus
, unsigned int max_payload
)
733 struct device
*child
;
735 /* Set the max payload for children on the bus and their children, etc. */
736 for (child
= bus
->children
; child
; child
= child
->sibling
) {
740 pciexp_dev_set_max_payload_size(child
, max_payload
);
742 if (child
->downstream
)
743 pciexp_sync_max_payload_size(child
->downstream
, max_payload
);
747 void pciexp_scan_bus(struct bus
*bus
, unsigned int min_devfn
,
748 unsigned int max_devfn
)
750 struct device
*child
;
751 unsigned int max_payload
;
753 pciexp_enable_ltr(bus
->dev
);
756 * Set the Max Payload Size to the maximum supported capability for this bridge.
757 * This value will be used in pciexp_tune_dev to limit the Max Payload size if needed.
759 max_payload
= pciexp_dev_get_max_payload_size_cap(bus
->dev
);
760 pciexp_dev_set_max_payload_size(bus
->dev
, max_payload
);
762 pci_scan_bus(bus
, min_devfn
, max_devfn
);
764 for (child
= bus
->children
; child
; child
= child
->sibling
) {
765 if (child
->path
.type
!= DEVICE_PATH_PCI
)
767 if ((child
->path
.pci
.devfn
< min_devfn
) ||
768 (child
->path
.pci
.devfn
> max_devfn
)) {
771 pciexp_tune_dev(child
);
775 * Now the root port's Max Payload Size should be set to the highest
776 * possible value supported by all devices under a given root port.
777 * Propagate that value down from root port to all devices, so the Max
778 * Payload Size is equal on all devices, as some devices may have
779 * different capabilities and the programmed value depends on the
780 * order of device population the in the subtree.
782 if (pcie_is_root_port(bus
->dev
)) {
783 max_payload
= pciexp_dev_get_current_max_payload_size(bus
->dev
);
785 printk(BIOS_INFO
, "%s: Setting Max_Payload_Size to %d for devices under this"
786 " root port\n", dev_path(bus
->dev
), 1 << (max_payload
+ 7));
788 pciexp_sync_max_payload_size(bus
, max_payload
);
792 void pciexp_scan_bridge(struct device
*dev
)
794 do_pci_scan_bridge(dev
, pciexp_scan_bus
);
797 /** Default device operations for PCI Express bridges */
798 static struct pci_operations pciexp_bus_ops_pci
= {
802 struct device_operations default_pciexp_ops_bus
= {
803 .read_resources
= pci_bus_read_resources
,
804 .set_resources
= pci_dev_set_resources
,
805 .enable_resources
= pci_bus_enable_resources
,
806 .scan_bus
= pciexp_scan_bridge
,
807 .reset_bus
= pci_bus_reset
,
808 .ops_pci
= &pciexp_bus_ops_pci
,
811 static void pciexp_hotplug_dummy_read_resources(struct device
*dev
)
813 struct resource
*resource
;
815 /* Add extra memory space */
816 resource
= new_resource(dev
, 0x10);
817 resource
->size
= CONFIG_PCIEXP_HOTPLUG_MEM
;
818 resource
->align
= 12;
820 resource
->limit
= 0xffffffff;
821 resource
->flags
|= IORESOURCE_MEM
;
823 /* Add extra prefetchable memory space */
824 resource
= new_resource(dev
, 0x14);
825 resource
->size
= CONFIG_PCIEXP_HOTPLUG_PREFETCH_MEM
;
826 resource
->align
= 12;
828 resource
->limit
= 0xffffffffffffffff;
829 resource
->flags
|= IORESOURCE_MEM
| IORESOURCE_PREFETCH
;
831 /* Set resource flag requesting allocation above 4G boundary. */
832 if (CONFIG(PCIEXP_HOTPLUG_PREFETCH_MEM_ABOVE_4G
))
833 resource
->flags
|= IORESOURCE_ABOVE_4G
;
835 /* Add extra I/O space */
836 resource
= new_resource(dev
, 0x18);
837 resource
->size
= CONFIG_PCIEXP_HOTPLUG_IO
;
838 resource
->align
= 12;
840 resource
->limit
= 0xffff;
841 resource
->flags
|= IORESOURCE_IO
;
844 static struct device_operations pciexp_hotplug_dummy_ops
= {
845 .read_resources
= pciexp_hotplug_dummy_read_resources
,
846 .set_resources
= noop_set_resources
,
849 void pciexp_hotplug_scan_bridge(struct device
*dev
)
851 dev
->hotplug_port
= 1;
852 dev
->hotplug_buses
= CONFIG_PCIEXP_HOTPLUG_BUSES
;
854 /* Normal PCIe Scan */
855 pciexp_scan_bridge(dev
);
857 /* Add dummy slot to preserve resources, must happen after bus scan */
858 struct device
*dummy
;
859 struct device_path dummy_path
= { .type
= DEVICE_PATH_NONE
};
860 dummy
= alloc_dev(dev
->downstream
, &dummy_path
);
861 dummy
->ops
= &pciexp_hotplug_dummy_ops
;
864 struct device_operations default_pciexp_hotplug_ops_bus
= {
865 .read_resources
= pci_bus_read_resources
,
866 .set_resources
= pci_dev_set_resources
,
867 .enable_resources
= pci_bus_enable_resources
,
868 .scan_bus
= pciexp_hotplug_scan_bridge
,
869 .reset_bus
= pci_bus_reset
,
870 .ops_pci
= &pciexp_bus_ops_pci
,