1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <console/console.h>
4 #include <commonlib/helpers.h>
6 #include <device/device.h>
7 #include <device/pci.h>
8 #include <device/pci_ids.h>
9 #include <device/pci_ops.h>
10 #include <device/pciexp.h>
12 static unsigned int ext_cap_id(unsigned int cap
)
17 static unsigned int ext_cap_next_offset(unsigned int cap
)
19 return cap
>> 20 & 0xffc;
22 static unsigned int find_ext_cap_offset(const struct device
*dev
, unsigned int cap_id
,
25 unsigned int this_cap_offset
= offset
;
27 while (this_cap_offset
>= PCIE_EXT_CAP_OFFSET
) {
28 const unsigned int this_cap
= pci_read_config32(dev
, this_cap_offset
);
30 /* Bail out when this request is unsupported */
31 if (this_cap
== 0xffffffff)
34 if (ext_cap_id(this_cap
) == cap_id
)
35 return this_cap_offset
;
37 this_cap_offset
= ext_cap_next_offset(this_cap
);
44 * Search for an extended capability with the ID `cap`.
46 * Returns the offset of the first matching extended
47 * capability if found, or 0 otherwise.
49 * A new search is started with `offset == 0`.
50 * To continue a search, the prior return value
51 * should be passed as `offset`.
53 unsigned int pciexp_find_extended_cap(const struct device
*dev
, unsigned int cap
,
56 unsigned int next_cap_offset
;
59 next_cap_offset
= ext_cap_next_offset(pci_read_config32(dev
, offset
));
61 next_cap_offset
= PCIE_EXT_CAP_OFFSET
;
63 return find_ext_cap_offset(dev
, cap
, next_cap_offset
);
67 * Search for a vendor-specific extended capability,
68 * with the vendor-specific ID `cap`.
70 * Returns the offset of the vendor-specific header,
71 * i.e. the offset of the extended capability + 4,
72 * or 0 if none is found.
74 * A new search is started with `offset == 0`.
75 * To continue a search, the prior return value
76 * should be passed as `offset`.
78 unsigned int pciexp_find_ext_vendor_cap(const struct device
*dev
, unsigned int cap
,
81 /* Reconstruct capability offset from vendor-specific header offset. */
86 offset
= pciexp_find_extended_cap(dev
, PCI_EXT_CAP_ID_VNDR
, offset
);
90 const unsigned int vndr_cap
= pci_read_config32(dev
, offset
+ 4);
91 if ((vndr_cap
& 0xffff) == cap
)
97 * Find a PCIe device with a given serial number, and a given VID if applicable
99 * @param serial The serial number of the device.
100 * @param vid Vendor ID of the device, may be 0 if not applicable.
101 * @param from Pointer to the device structure, used as a starting point in
102 * the linked list of all_devices, which can be 0 to start at the
103 * head of the list (i.e. all_devices).
104 * @return Pointer to the device struct.
106 struct device
*pcie_find_dsn(const uint64_t serial
, const uint16_t vid
,
125 if (from
->path
.type
== DEVICE_PATH_PCI
) {
126 cap
= pciexp_find_extended_cap(from
, PCI_EXT_CAP_ID_DSN
, 0);
128 * For PCIe device, find extended capability for serial number.
129 * The capability header is 4 bytes, followed by lower 4 bytes
130 * of serial number, then higher 4 byes of serial number.
133 dsn
.dsn_low
= pci_read_config32(from
, cap
+ 4);
134 dsn
.dsn_high
= pci_read_config32(from
, cap
+ 8);
135 vendor_id
= pci_read_config16(from
, PCI_VENDOR_ID
);
136 if ((dsn
.dsn
== serial
) && (vid
== 0 || vendor_id
== vid
))
148 * Returns true if the device is a hot-plug capable PCIe device.
150 * @param dev Pointer to the device structure.
152 * @return True when marked hot-plug capable.
154 bool pciexp_dev_is_slot_hot_plug_cap(struct device
*dev
)
157 unsigned int pcie_cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
162 sltcap
= pci_read_config16(dev
, pcie_cap
+ PCI_EXP_SLTCAP
);
163 sltcap
&= PCI_EXP_SLTCAP_HPC
;
167 static bool pcie_is_root_port(struct device
*dev
)
169 unsigned int pcie_pos
, pcie_type
;
171 pcie_pos
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
175 pcie_type
= pci_read_config16(dev
, pcie_pos
+ PCI_EXP_FLAGS
) & PCI_EXP_FLAGS_TYPE
;
178 return (pcie_type
== PCI_EXP_TYPE_ROOT_PORT
);
181 static bool pcie_is_endpoint(struct device
*dev
)
183 unsigned int pcie_pos
, pcie_type
;
185 pcie_pos
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
189 pcie_type
= pci_read_config16(dev
, pcie_pos
+ PCI_EXP_FLAGS
) & PCI_EXP_FLAGS_TYPE
;
192 return ((pcie_type
== PCI_EXP_TYPE_ENDPOINT
) || (pcie_type
== PCI_EXP_TYPE_LEG_END
));
197 * Re-train a PCIe link
199 #define PCIE_TRAIN_RETRY 10000
200 static int pciexp_retrain_link(struct device
*dev
, unsigned int cap
)
206 * Implementation note (page 633) in PCIe Specification 3.0 suggests
207 * polling the Link Training bit in the Link Status register until the
208 * value returned is 0 before setting the Retrain Link bit to 1.
209 * This is meant to avoid a race condition when using the
210 * Retrain Link mechanism.
212 for (try = PCIE_TRAIN_RETRY
; try > 0; try--) {
213 lnk
= pci_read_config16(dev
, cap
+ PCI_EXP_LNKSTA
);
214 if (!(lnk
& PCI_EXP_LNKSTA_LT
))
219 printk(BIOS_ERR
, "%s: Link Retrain timeout\n", dev_path(dev
));
223 /* Start link retraining */
224 lnk
= pci_read_config16(dev
, cap
+ PCI_EXP_LNKCTL
);
225 lnk
|= PCI_EXP_LNKCTL_RL
;
226 pci_write_config16(dev
, cap
+ PCI_EXP_LNKCTL
, lnk
);
228 /* Wait for training to complete */
229 for (try = PCIE_TRAIN_RETRY
; try > 0; try--) {
230 lnk
= pci_read_config16(dev
, cap
+ PCI_EXP_LNKSTA
);
231 if (!(lnk
& PCI_EXP_LNKSTA_LT
))
236 printk(BIOS_ERR
, "%s: Link Retrain timeout\n", dev_path(dev
));
240 static bool pciexp_is_ccc_active(struct device
*root
, unsigned int root_cap
,
241 struct device
*endp
, unsigned int endp_cap
)
243 u16 root_ccc
, endp_ccc
;
245 root_ccc
= pci_read_config16(root
, root_cap
+ PCI_EXP_LNKCTL
) & PCI_EXP_LNKCTL_CCC
;
246 endp_ccc
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
) & PCI_EXP_LNKCTL_CCC
;
247 if (root_ccc
&& endp_ccc
) {
248 printk(BIOS_INFO
, "PCIe: Common Clock Configuration already enabled\n");
255 * Check the Slot Clock Configuration for root port and endpoint
256 * and enable Common Clock Configuration if possible. If CCC is
257 * enabled the link must be retrained.
259 static void pciexp_enable_common_clock(struct device
*root
, unsigned int root_cap
,
260 struct device
*endp
, unsigned int endp_cap
)
262 u16 root_scc
, endp_scc
, lnkctl
;
264 /* No need to enable common clock if it is already active. */
265 if (pciexp_is_ccc_active(root
, root_cap
, endp
, endp_cap
))
268 /* Get Slot Clock Configuration for root port */
269 root_scc
= pci_read_config16(root
, root_cap
+ PCI_EXP_LNKSTA
);
270 root_scc
&= PCI_EXP_LNKSTA_SLC
;
272 /* Get Slot Clock Configuration for endpoint */
273 endp_scc
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKSTA
);
274 endp_scc
&= PCI_EXP_LNKSTA_SLC
;
276 /* Enable Common Clock Configuration and retrain */
277 if (root_scc
&& endp_scc
) {
278 printk(BIOS_INFO
, "Enabling Common Clock Configuration\n");
280 /* Set in endpoint */
281 lnkctl
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
);
282 lnkctl
|= PCI_EXP_LNKCTL_CCC
;
283 pci_write_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
285 /* Set in root port */
286 lnkctl
= pci_read_config16(root
, root_cap
+ PCI_EXP_LNKCTL
);
287 lnkctl
|= PCI_EXP_LNKCTL_CCC
;
288 pci_write_config16(root
, root_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
290 /* Retrain link if CCC was enabled */
291 pciexp_retrain_link(root
, root_cap
);
295 static void pciexp_enable_clock_power_pm(struct device
*endp
, unsigned int endp_cap
)
297 /* check if per port clkreq is supported in device */
300 endp_ca
= pci_read_config32(endp
, endp_cap
+ PCI_EXP_LNKCAP
);
301 if ((endp_ca
& PCI_EXP_CLK_PM
) == 0) {
302 printk(BIOS_INFO
, "PCIE CLK PM is not supported by endpoint\n");
305 lnkctl
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
);
306 lnkctl
= lnkctl
| PCI_EXP_EN_CLK_PM
;
307 pci_write_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
310 static bool _pciexp_ltr_supported(struct device
*dev
, unsigned int cap
)
312 return pci_read_config16(dev
, cap
+ PCI_EXP_DEVCAP2
) & PCI_EXP_DEVCAP2_LTR
;
315 static bool _pciexp_ltr_enabled(struct device
*dev
, unsigned int cap
)
317 return pci_read_config16(dev
, cap
+ PCI_EXP_DEVCTL2
) & PCI_EXP_DEV2_LTR
;
320 static bool _pciexp_enable_ltr(struct device
*parent
, unsigned int parent_cap
,
321 struct device
*dev
, unsigned int cap
)
323 if (!_pciexp_ltr_supported(dev
, cap
)) {
324 printk(BIOS_DEBUG
, "%s: No LTR support\n", dev_path(dev
));
328 if (_pciexp_ltr_enabled(dev
, cap
))
332 (!_pciexp_ltr_supported(parent
, parent_cap
) ||
333 !_pciexp_ltr_enabled(parent
, parent_cap
)))
336 pci_or_config16(dev
, cap
+ PCI_EXP_DEVCTL2
, PCI_EXP_DEV2_LTR
);
337 printk(BIOS_INFO
, "%s: Enabled LTR\n", dev_path(dev
));
341 static void pciexp_enable_ltr(struct device
*dev
)
343 const unsigned int cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
348 * If we have get_ltr_max_latencies(), treat `dev` as the root.
349 * If not, let _pciexp_enable_ltr() query the parent's state.
351 struct device
*parent
= NULL
;
352 unsigned int parent_cap
= 0;
353 if (!dev
->ops
->ops_pci
|| !dev
->ops
->ops_pci
->get_ltr_max_latencies
) {
354 parent
= dev
->upstream
->dev
;
355 if (parent
->path
.type
!= DEVICE_PATH_PCI
)
357 parent_cap
= pci_find_capability(parent
, PCI_CAP_ID_PCIE
);
362 (void)_pciexp_enable_ltr(parent
, parent_cap
, dev
, cap
);
365 bool pciexp_get_ltr_max_latencies(struct device
*dev
, u16
*max_snoop
, u16
*max_nosnoop
)
367 /* Walk the hierarchy up to find get_ltr_max_latencies(). */
369 if (dev
->ops
->ops_pci
&& dev
->ops
->ops_pci
->get_ltr_max_latencies
)
371 if (dev
->upstream
->dev
== dev
|| dev
->upstream
->dev
->path
.type
!= DEVICE_PATH_PCI
)
373 dev
= dev
->upstream
->dev
;
376 dev
->ops
->ops_pci
->get_ltr_max_latencies(max_snoop
, max_nosnoop
);
380 static void pciexp_configure_ltr(struct device
*parent
, unsigned int parent_cap
,
381 struct device
*dev
, unsigned int cap
)
383 if (!_pciexp_enable_ltr(parent
, parent_cap
, dev
, cap
))
386 const unsigned int ltr_cap
= pciexp_find_extended_cap(dev
, PCIE_EXT_CAP_LTR_ID
, 0);
390 u16 max_snoop
, max_nosnoop
;
391 if (!pciexp_get_ltr_max_latencies(dev
, &max_snoop
, &max_nosnoop
))
394 pci_write_config16(dev
, ltr_cap
+ PCI_LTR_MAX_SNOOP
, max_snoop
);
395 pci_write_config16(dev
, ltr_cap
+ PCI_LTR_MAX_NOSNOOP
, max_nosnoop
);
396 printk(BIOS_INFO
, "%s: Programmed LTR max latencies\n", dev_path(dev
));
399 static unsigned char pciexp_L1_substate_cal(struct device
*dev
, unsigned int endp_cap
,
402 unsigned char mult
[4] = {2, 10, 100, 0};
404 unsigned int L1SubStateSupport
= *data
& 0xf;
405 unsigned int comm_mode_rst_time
= (*data
>> 8) & 0xff;
406 unsigned int power_on_scale
= (*data
>> 16) & 0x3;
407 unsigned int power_on_value
= (*data
>> 19) & 0x1f;
409 unsigned int endp_data
= pci_read_config32(dev
, endp_cap
+ 4);
410 unsigned int endp_L1SubStateSupport
= endp_data
& 0xf;
411 unsigned int endp_comm_mode_restore_time
= (endp_data
>> 8) & 0xff;
412 unsigned int endp_power_on_scale
= (endp_data
>> 16) & 0x3;
413 unsigned int endp_power_on_value
= (endp_data
>> 19) & 0x1f;
415 L1SubStateSupport
&= endp_L1SubStateSupport
;
417 if (L1SubStateSupport
== 0)
420 if (power_on_value
* mult
[power_on_scale
] <
421 endp_power_on_value
* mult
[endp_power_on_scale
]) {
422 power_on_value
= endp_power_on_value
;
423 power_on_scale
= endp_power_on_scale
;
425 if (comm_mode_rst_time
< endp_comm_mode_restore_time
)
426 comm_mode_rst_time
= endp_comm_mode_restore_time
;
428 *data
= (comm_mode_rst_time
<< 8) | (power_on_scale
<< 16)
429 | (power_on_value
<< 19) | L1SubStateSupport
;
434 static void pciexp_L1_substate_commit(struct device
*root
, struct device
*dev
,
435 unsigned int root_cap
, unsigned int end_cap
)
437 struct device
*dev_t
;
438 unsigned char L1_ss_ok
;
439 unsigned int rp_L1_support
= pci_read_config32(root
, root_cap
+ 4);
440 unsigned int L1SubStateSupport
;
441 unsigned int comm_mode_rst_time
;
442 unsigned int power_on_scale
;
443 unsigned int endp_power_on_value
;
445 for (dev_t
= dev
; dev_t
; dev_t
= dev_t
->sibling
) {
447 * rp_L1_support is init'd above from root port.
448 * it needs coordination with endpoints to reach in common.
449 * if certain endpoint doesn't support L1 Sub-State, abort
450 * this feature enabling.
452 L1_ss_ok
= pciexp_L1_substate_cal(dev_t
, end_cap
,
458 L1SubStateSupport
= rp_L1_support
& 0xf;
459 comm_mode_rst_time
= (rp_L1_support
>> 8) & 0xff;
460 power_on_scale
= (rp_L1_support
>> 16) & 0x3;
461 endp_power_on_value
= (rp_L1_support
>> 19) & 0x1f;
463 printk(BIOS_INFO
, "L1 Sub-State supported from root port %d\n",
464 root
->path
.pci
.devfn
>> 3);
465 printk(BIOS_INFO
, "L1 Sub-State Support = 0x%x\n", L1SubStateSupport
);
466 printk(BIOS_INFO
, "CommonModeRestoreTime = 0x%x\n", comm_mode_rst_time
);
467 printk(BIOS_INFO
, "Power On Value = 0x%x, Power On Scale = 0x%x\n",
468 endp_power_on_value
, power_on_scale
);
470 pci_update_config32(root
, root_cap
+ 0x08, ~0xff00,
471 (comm_mode_rst_time
<< 8));
473 pci_update_config32(root
, root_cap
+ 0x0c, 0xffffff04,
474 (endp_power_on_value
<< 3) | (power_on_scale
));
476 /* TODO: 0xa0, 2 are values that work on some chipsets but really
477 * should be determined dynamically by looking at downstream devices.
479 pci_update_config32(root
, root_cap
+ 0x08,
480 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK
|
481 ASPM_LTR_L12_THRESHOLD_SCALE_MASK
),
482 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET
) |
483 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET
));
485 pci_update_config32(root
, root_cap
+ 0x08, ~0x1f,
488 for (dev_t
= dev
; dev_t
; dev_t
= dev_t
->sibling
) {
489 pci_update_config32(dev_t
, end_cap
+ 0x0c, 0xffffff04,
490 (endp_power_on_value
<< 3) | (power_on_scale
));
492 pci_update_config32(dev_t
, end_cap
+ 0x08,
493 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK
|
494 ASPM_LTR_L12_THRESHOLD_SCALE_MASK
),
495 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET
) |
496 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET
));
498 pci_update_config32(dev_t
, end_cap
+ 0x08, ~0x1f,
503 static void pciexp_config_L1_sub_state(struct device
*root
, struct device
*dev
)
505 unsigned int root_cap
, end_cap
;
507 /* Do it for function 0 only */
508 if (dev
->path
.pci
.devfn
& 0x7)
511 root_cap
= pciexp_find_extended_cap(root
, PCIE_EXT_CAP_L1SS_ID
, 0);
515 end_cap
= pciexp_find_extended_cap(dev
, PCIE_EXT_CAP_L1SS_ID
, 0);
517 if (dev
->vendor
!= PCI_VID_INTEL
)
520 end_cap
= pciexp_find_ext_vendor_cap(dev
, 0xcafe, 0);
525 pciexp_L1_substate_commit(root
, dev
, root_cap
, end_cap
);
529 * Determine the ASPM L0s or L1 exit latency for a link
530 * by checking both root port and endpoint and returning
531 * the highest latency value.
533 static int pciexp_aspm_latency(struct device
*root
, unsigned int root_cap
,
534 struct device
*endp
, unsigned int endp_cap
,
537 int root_lat
= 0, endp_lat
= 0;
538 u32 root_lnkcap
, endp_lnkcap
;
540 root_lnkcap
= pci_read_config32(root
, root_cap
+ PCI_EXP_LNKCAP
);
541 endp_lnkcap
= pci_read_config32(endp
, endp_cap
+ PCI_EXP_LNKCAP
);
543 /* Make sure the link supports this ASPM type by checking
544 * capability bits 11:10 with aspm_type offset by 1 */
545 if (!(root_lnkcap
& (1 << (type
+ 9))) ||
546 !(endp_lnkcap
& (1 << (type
+ 9))))
549 /* Find the one with higher latency */
552 root_lat
= (root_lnkcap
& PCI_EXP_LNKCAP_L0SEL
) >> 12;
553 endp_lat
= (endp_lnkcap
& PCI_EXP_LNKCAP_L0SEL
) >> 12;
556 root_lat
= (root_lnkcap
& PCI_EXP_LNKCAP_L1EL
) >> 15;
557 endp_lat
= (endp_lnkcap
& PCI_EXP_LNKCAP_L1EL
) >> 15;
563 return (endp_lat
> root_lat
) ? endp_lat
: root_lat
;
567 * Enable ASPM on PCIe root port and endpoint.
569 static void pciexp_enable_aspm(struct device
*root
, unsigned int root_cap
,
570 struct device
*endp
, unsigned int endp_cap
)
572 const char *aspm_type_str
[] = { "None", "L0s", "L1", "L0s and L1" };
573 enum aspm_type apmc
= PCIE_ASPM_NONE
;
574 int exit_latency
, ok_latency
;
578 if (endp
->disable_pcie_aspm
)
581 /* Get endpoint device capabilities for acceptable limits */
582 devcap
= pci_read_config32(endp
, endp_cap
+ PCI_EXP_DEVCAP
);
584 /* Enable L0s if it is within endpoint acceptable limit */
585 ok_latency
= (devcap
& PCI_EXP_DEVCAP_L0S
) >> 6;
586 exit_latency
= pciexp_aspm_latency(root
, root_cap
, endp
, endp_cap
,
588 if (exit_latency
>= 0 && exit_latency
<= ok_latency
)
589 apmc
|= PCIE_ASPM_L0S
;
591 /* Enable L1 if it is within endpoint acceptable limit */
592 ok_latency
= (devcap
& PCI_EXP_DEVCAP_L1
) >> 9;
593 exit_latency
= pciexp_aspm_latency(root
, root_cap
, endp
, endp_cap
,
595 if (exit_latency
>= 0 && exit_latency
<= ok_latency
)
596 apmc
|= PCIE_ASPM_L1
;
598 if (apmc
!= PCIE_ASPM_NONE
) {
599 /* Set APMC in root port first */
600 lnkctl
= pci_read_config16(root
, root_cap
+ PCI_EXP_LNKCTL
);
602 pci_write_config16(root
, root_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
604 /* Set APMC in endpoint device next */
605 lnkctl
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
);
607 pci_write_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
610 printk(BIOS_INFO
, "ASPM: Enabled %s\n", aspm_type_str
[apmc
]);
613 static void pciexp_dev_set_max_payload_size(struct device
*dev
, unsigned int max_payload
)
616 unsigned int pcie_cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
621 devctl
= pci_read_config16(dev
, pcie_cap
+ PCI_EXP_DEVCTL
);
622 devctl
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
624 * Should never overflow to higher bits, due to how max_payload is
625 * guarded in this file.
627 devctl
|= max_payload
<< 5;
628 pci_write_config16(dev
, pcie_cap
+ PCI_EXP_DEVCTL
, devctl
);
631 static unsigned int pciexp_dev_get_current_max_payload_size(struct device
*dev
)
634 unsigned int pcie_cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
639 devctl
= pci_read_config16(dev
, pcie_cap
+ PCI_EXP_DEVCTL
);
640 devctl
&= PCI_EXP_DEVCTL_PAYLOAD
;
641 return (devctl
>> 5);
644 static unsigned int pciexp_dev_get_max_payload_size_cap(struct device
*dev
)
647 unsigned int pcie_cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
652 devcap
= pci_read_config16(dev
, pcie_cap
+ PCI_EXP_DEVCAP
);
653 return (devcap
& PCI_EXP_DEVCAP_PAYLOAD
);
657 * Set max payload size of a parent based on max payload size capability of the child.
659 static void pciexp_configure_max_payload_size(struct device
*parent
, struct device
*child
)
661 unsigned int child_max_payload
, parent_max_payload
, max_payload
;
663 /* Get max payload size supported by child */
664 child_max_payload
= pciexp_dev_get_current_max_payload_size(child
);
665 /* Get max payload size configured by parent */
666 parent_max_payload
= pciexp_dev_get_current_max_payload_size(parent
);
667 /* Set max payload to smaller of the reported device capability or parent config. */
668 max_payload
= MIN(child_max_payload
, parent_max_payload
);
670 if (max_payload
> 5) {
671 /* Values 6 and 7 are reserved in PCIe 3.0 specs. */
672 printk(BIOS_ERR
, "PCIe: Max_Payload_Size field restricted from %d to 5\n",
677 if (max_payload
!= parent_max_payload
) {
678 pciexp_dev_set_max_payload_size(parent
, max_payload
);
679 printk(BIOS_INFO
, "%s: Max_Payload_Size adjusted to %d\n", dev_path(parent
),
680 (1 << (max_payload
+ 7)));
685 * Clear Lane Error State at the end of PCIe link training.
686 * Lane error status is cleared if PCIEXP_LANE_ERR_STAT_CLEAR is set.
687 * Lane error is normal during link training, so we need to clear it.
688 * At this moment, link has been used, but for a very short duration.
690 static void clear_lane_error_status(struct device
*dev
)
695 pos
= pciexp_find_extended_cap(dev
, PCI_EXP_SEC_CAP_ID
, 0);
699 reg32
= pci_read_config32(dev
, pos
+ PCI_EXP_SEC_LANE_ERR_STATUS
);
703 printk(BIOS_DEBUG
, "%s: Clear Lane Error Status.\n", dev_path(dev
));
704 printk(BIOS_DEBUG
, "LaneErrStat:0x%x\n", reg32
);
705 pci_write_config32(dev
, pos
+ PCI_EXP_SEC_LANE_ERR_STATUS
, reg32
);
708 static void pciexp_tune_dev(struct device
*dev
)
710 struct device
*root
= dev
->upstream
->dev
;
711 unsigned int root_cap
, cap
;
713 cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
717 root_cap
= pci_find_capability(root
, PCI_CAP_ID_PCIE
);
721 /* Check for and enable Common Clock */
722 if (CONFIG(PCIEXP_COMMON_CLOCK
))
723 pciexp_enable_common_clock(root
, root_cap
, dev
, cap
);
725 /* Check if per port CLK req is supported by endpoint*/
726 if (CONFIG(PCIEXP_CLK_PM
))
727 pciexp_enable_clock_power_pm(dev
, cap
);
729 /* Enable L1 Sub-State when both root port and endpoint support */
730 if (CONFIG(PCIEXP_L1_SUB_STATE
))
731 pciexp_config_L1_sub_state(root
, dev
);
733 /* Check for and enable ASPM */
734 if (CONFIG(PCIEXP_ASPM
))
735 pciexp_enable_aspm(root
, root_cap
, dev
, cap
);
737 /* Clear PCIe Lane Error Status */
738 if (CONFIG(PCIEXP_LANE_ERR_STAT_CLEAR
))
739 clear_lane_error_status(root
);
741 /* Set the Max Payload Size to the maximum supported capability for this device */
742 if (pcie_is_endpoint(dev
))
743 pciexp_dev_set_max_payload_size(dev
, pciexp_dev_get_max_payload_size_cap(dev
));
745 /* Limit the parent's Max Payload Size if needed */
746 pciexp_configure_max_payload_size(root
, dev
);
748 pciexp_configure_ltr(root
, root_cap
, dev
, cap
);
751 static void pciexp_sync_max_payload_size(struct bus
*bus
, unsigned int max_payload
)
753 struct device
*child
;
755 /* Set the max payload for children on the bus and their children, etc. */
756 for (child
= bus
->children
; child
; child
= child
->sibling
) {
760 pciexp_dev_set_max_payload_size(child
, max_payload
);
762 if (child
->downstream
)
763 pciexp_sync_max_payload_size(child
->downstream
, max_payload
);
767 void pciexp_scan_bus(struct bus
*bus
, unsigned int min_devfn
,
768 unsigned int max_devfn
)
770 struct device
*child
;
771 unsigned int max_payload
;
773 pciexp_enable_ltr(bus
->dev
);
776 * Set the Max Payload Size to the maximum supported capability for this bridge.
777 * This value will be used in pciexp_tune_dev to limit the Max Payload size if needed.
779 max_payload
= pciexp_dev_get_max_payload_size_cap(bus
->dev
);
780 pciexp_dev_set_max_payload_size(bus
->dev
, max_payload
);
782 pci_scan_bus(bus
, min_devfn
, max_devfn
);
784 for (child
= bus
->children
; child
; child
= child
->sibling
) {
785 if (child
->path
.type
!= DEVICE_PATH_PCI
)
787 if ((child
->path
.pci
.devfn
< min_devfn
) ||
788 (child
->path
.pci
.devfn
> max_devfn
)) {
791 pciexp_tune_dev(child
);
795 * Now the root port's Max Payload Size should be set to the highest
796 * possible value supported by all devices under a given root port.
797 * Propagate that value down from root port to all devices, so the Max
798 * Payload Size is equal on all devices, as some devices may have
799 * different capabilities and the programmed value depends on the
800 * order of device population the in the subtree.
802 if (pcie_is_root_port(bus
->dev
)) {
803 max_payload
= pciexp_dev_get_current_max_payload_size(bus
->dev
);
805 printk(BIOS_INFO
, "%s: Setting Max_Payload_Size to %d for devices under this"
806 " root port\n", dev_path(bus
->dev
), 1 << (max_payload
+ 7));
808 pciexp_sync_max_payload_size(bus
, max_payload
);
812 void pciexp_scan_bridge(struct device
*dev
)
814 do_pci_scan_bridge(dev
, pciexp_scan_bus
);
817 /** Default device operations for PCI Express bridges */
818 static struct pci_operations pciexp_bus_ops_pci
= {
822 struct device_operations default_pciexp_ops_bus
= {
823 .read_resources
= pci_bus_read_resources
,
824 .set_resources
= pci_dev_set_resources
,
825 .enable_resources
= pci_bus_enable_resources
,
826 .scan_bus
= pciexp_scan_bridge
,
827 .reset_bus
= pci_bus_reset
,
828 .ops_pci
= &pciexp_bus_ops_pci
,
831 static void pciexp_hotplug_dummy_read_resources(struct device
*dev
)
833 struct resource
*resource
;
835 /* Add extra memory space */
836 resource
= new_resource(dev
, 0x10);
837 resource
->size
= CONFIG_PCIEXP_HOTPLUG_MEM
;
838 resource
->align
= 12;
840 resource
->limit
= 0xffffffff;
841 resource
->flags
|= IORESOURCE_MEM
;
843 /* Add extra prefetchable memory space */
844 resource
= new_resource(dev
, 0x14);
845 resource
->size
= CONFIG_PCIEXP_HOTPLUG_PREFETCH_MEM
;
846 resource
->align
= 12;
848 resource
->limit
= 0xffffffffffffffff;
849 resource
->flags
|= IORESOURCE_MEM
| IORESOURCE_PREFETCH
;
851 /* Set resource flag requesting allocation above 4G boundary. */
852 if (CONFIG(PCIEXP_HOTPLUG_PREFETCH_MEM_ABOVE_4G
))
853 resource
->flags
|= IORESOURCE_ABOVE_4G
;
855 /* Add extra I/O space */
856 resource
= new_resource(dev
, 0x18);
857 resource
->size
= CONFIG_PCIEXP_HOTPLUG_IO
;
858 resource
->align
= 12;
860 resource
->limit
= 0xffff;
861 resource
->flags
|= IORESOURCE_IO
;
864 static struct device_operations pciexp_hotplug_dummy_ops
= {
865 .read_resources
= pciexp_hotplug_dummy_read_resources
,
866 .set_resources
= noop_set_resources
,
869 void pciexp_hotplug_scan_bridge(struct device
*dev
)
871 dev
->hotplug_port
= 1;
872 dev
->hotplug_buses
= CONFIG_PCIEXP_HOTPLUG_BUSES
;
874 /* Normal PCIe Scan */
875 pciexp_scan_bridge(dev
);
877 /* Add dummy slot to preserve resources, must happen after bus scan */
878 struct device
*dummy
;
879 struct device_path dummy_path
= { .type
= DEVICE_PATH_NONE
};
880 dummy
= alloc_dev(dev
->downstream
, &dummy_path
);
881 dummy
->ops
= &pciexp_hotplug_dummy_ops
;
884 struct device_operations default_pciexp_hotplug_ops_bus
= {
885 .read_resources
= pci_bus_read_resources
,
886 .set_resources
= pci_dev_set_resources
,
887 .enable_resources
= pci_bus_enable_resources
,
888 .scan_bus
= pciexp_hotplug_scan_bridge
,
889 .reset_bus
= pci_bus_reset
,
890 .ops_pci
= &pciexp_bus_ops_pci
,