soc/intel/alderlake: Add ADL-P 4+4 with 28W TDP
[coreboot.git] / src / device / pciexp_device.c
blob4ec6ef0f0f680ec6f30c6c1ef7b6dc7ccb91b040
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <console/console.h>
4 #include <commonlib/helpers.h>
5 #include <delay.h>
6 #include <device/device.h>
7 #include <device/pci.h>
8 #include <device/pci_ids.h>
9 #include <device/pci_ops.h>
10 #include <device/pciexp.h>
12 static unsigned int ext_cap_id(unsigned int cap)
14 return cap & 0xffff;
17 static unsigned int ext_cap_next_offset(unsigned int cap)
19 return cap >> 20 & 0xffc;
22 static unsigned int find_ext_cap_offset(const struct device *dev, unsigned int cap_id,
23 unsigned int offset)
25 unsigned int this_cap_offset = offset;
27 while (this_cap_offset >= PCIE_EXT_CAP_OFFSET) {
28 const unsigned int this_cap = pci_read_config32(dev, this_cap_offset);
30 /* Bail out when this request is unsupported */
31 if (this_cap == 0xffffffff)
32 break;
34 if (ext_cap_id(this_cap) == cap_id)
35 return this_cap_offset;
37 this_cap_offset = ext_cap_next_offset(this_cap);
40 return 0;
44 * Search for an extended capability with the ID `cap`.
46 * Returns the offset of the first matching extended
47 * capability if found, or 0 otherwise.
49 * A new search is started with `offset == 0`.
50 * To continue a search, the prior return value
51 * should be passed as `offset`.
53 unsigned int pciexp_find_extended_cap(const struct device *dev, unsigned int cap,
54 unsigned int offset)
56 unsigned int next_cap_offset;
58 if (offset)
59 next_cap_offset = ext_cap_next_offset(pci_read_config32(dev, offset));
60 else
61 next_cap_offset = PCIE_EXT_CAP_OFFSET;
63 return find_ext_cap_offset(dev, cap, next_cap_offset);
67 * Search for a vendor-specific extended capability,
68 * with the vendor-specific ID `cap`.
70 * Returns the offset of the vendor-specific header,
71 * i.e. the offset of the extended capability + 4,
72 * or 0 if none is found.
74 * A new search is started with `offset == 0`.
75 * To continue a search, the prior return value
76 * should be passed as `offset`.
78 unsigned int pciexp_find_ext_vendor_cap(const struct device *dev, unsigned int cap,
79 unsigned int offset)
81 /* Reconstruct capability offset from vendor-specific header offset. */
82 if (offset >= 4)
83 offset -= 4;
85 for (;;) {
86 offset = pciexp_find_extended_cap(dev, PCI_EXT_CAP_ID_VNDR, offset);
87 if (!offset)
88 return 0;
90 const unsigned int vndr_cap = pci_read_config32(dev, offset + 4);
91 if ((vndr_cap & 0xffff) == cap)
92 return offset + 4;
96 /**
97 * Find a PCIe device with a given serial number, and a given VID if applicable
99 * @param serial The serial number of the device.
100 * @param vid Vendor ID of the device, may be 0 if not applicable.
101 * @param from Pointer to the device structure, used as a starting point in
102 * the linked list of all_devices, which can be 0 to start at the
103 * head of the list (i.e. all_devices).
104 * @return Pointer to the device struct.
106 struct device *pcie_find_dsn(const uint64_t serial, const uint16_t vid,
107 struct device *from)
109 union dsn {
110 struct {
111 uint32_t dsn_low;
112 uint32_t dsn_high;
114 uint64_t dsn;
115 } dsn;
116 unsigned int cap;
117 uint16_t vendor_id;
119 if (!from)
120 from = all_devices;
121 else
122 from = from->next;
124 while (from) {
125 if (from->path.type == DEVICE_PATH_PCI) {
126 cap = pciexp_find_extended_cap(from, PCI_EXT_CAP_ID_DSN, 0);
128 * For PCIe device, find extended capability for serial number.
129 * The capability header is 4 bytes, followed by lower 4 bytes
130 * of serial number, then higher 4 byes of serial number.
132 if (cap != 0) {
133 dsn.dsn_low = pci_read_config32(from, cap + 4);
134 dsn.dsn_high = pci_read_config32(from, cap + 8);
135 vendor_id = pci_read_config16(from, PCI_VENDOR_ID);
136 if ((dsn.dsn == serial) && (vid == 0 || vendor_id == vid))
137 return from;
141 from = from->next;
144 return from;
148 * Re-train a PCIe link
150 #define PCIE_TRAIN_RETRY 10000
151 static int pciexp_retrain_link(struct device *dev, unsigned int cap)
153 unsigned int try;
154 u16 lnk;
157 * Implementation note (page 633) in PCIe Specification 3.0 suggests
158 * polling the Link Training bit in the Link Status register until the
159 * value returned is 0 before setting the Retrain Link bit to 1.
160 * This is meant to avoid a race condition when using the
161 * Retrain Link mechanism.
163 for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
164 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
165 if (!(lnk & PCI_EXP_LNKSTA_LT))
166 break;
167 udelay(100);
169 if (try == 0) {
170 printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
171 return -1;
174 /* Start link retraining */
175 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKCTL);
176 lnk |= PCI_EXP_LNKCTL_RL;
177 pci_write_config16(dev, cap + PCI_EXP_LNKCTL, lnk);
179 /* Wait for training to complete */
180 for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
181 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
182 if (!(lnk & PCI_EXP_LNKSTA_LT))
183 return 0;
184 udelay(100);
187 printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
188 return -1;
191 static bool pciexp_is_ccc_active(struct device *root, unsigned int root_cap,
192 struct device *endp, unsigned int endp_cap)
194 u16 root_ccc, endp_ccc;
196 root_ccc = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_CCC;
197 endp_ccc = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_CCC;
198 if (root_ccc && endp_ccc) {
199 printk(BIOS_INFO, "PCIe: Common Clock Configuration already enabled\n");
200 return true;
202 return false;
206 * Check the Slot Clock Configuration for root port and endpoint
207 * and enable Common Clock Configuration if possible. If CCC is
208 * enabled the link must be retrained.
210 static void pciexp_enable_common_clock(struct device *root, unsigned int root_cap,
211 struct device *endp, unsigned int endp_cap)
213 u16 root_scc, endp_scc, lnkctl;
215 /* No need to enable common clock if it is already active. */
216 if (pciexp_is_ccc_active(root, root_cap, endp, endp_cap))
217 return;
219 /* Get Slot Clock Configuration for root port */
220 root_scc = pci_read_config16(root, root_cap + PCI_EXP_LNKSTA);
221 root_scc &= PCI_EXP_LNKSTA_SLC;
223 /* Get Slot Clock Configuration for endpoint */
224 endp_scc = pci_read_config16(endp, endp_cap + PCI_EXP_LNKSTA);
225 endp_scc &= PCI_EXP_LNKSTA_SLC;
227 /* Enable Common Clock Configuration and retrain */
228 if (root_scc && endp_scc) {
229 printk(BIOS_INFO, "Enabling Common Clock Configuration\n");
231 /* Set in endpoint */
232 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
233 lnkctl |= PCI_EXP_LNKCTL_CCC;
234 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
236 /* Set in root port */
237 lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
238 lnkctl |= PCI_EXP_LNKCTL_CCC;
239 pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
241 /* Retrain link if CCC was enabled */
242 pciexp_retrain_link(root, root_cap);
246 static void pciexp_enable_clock_power_pm(struct device *endp, unsigned int endp_cap)
248 /* check if per port clk req is supported in device */
249 u32 endp_ca;
250 u16 lnkctl;
251 endp_ca = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
252 if ((endp_ca & PCI_EXP_CLK_PM) == 0) {
253 printk(BIOS_INFO, "PCIE CLK PM is not supported by endpoint\n");
254 return;
256 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
257 lnkctl = lnkctl | PCI_EXP_EN_CLK_PM;
258 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
261 static bool _pciexp_ltr_supported(struct device *dev, unsigned int cap)
263 return pci_read_config16(dev, cap + PCI_EXP_DEVCAP2) & PCI_EXP_DEVCAP2_LTR;
266 static bool _pciexp_ltr_enabled(struct device *dev, unsigned int cap)
268 return pci_read_config16(dev, cap + PCI_EXP_DEVCTL2) & PCI_EXP_DEV2_LTR;
271 static bool _pciexp_enable_ltr(struct device *parent, unsigned int parent_cap,
272 struct device *dev, unsigned int cap)
274 if (!_pciexp_ltr_supported(dev, cap)) {
275 printk(BIOS_DEBUG, "%s: No LTR support\n", dev_path(dev));
276 return false;
279 if (_pciexp_ltr_enabled(dev, cap))
280 return true;
282 if (parent &&
283 (!_pciexp_ltr_supported(parent, parent_cap) ||
284 !_pciexp_ltr_enabled(parent, parent_cap)))
285 return false;
287 pci_or_config16(dev, cap + PCI_EXP_DEVCTL2, PCI_EXP_DEV2_LTR);
288 printk(BIOS_INFO, "%s: Enabled LTR\n", dev_path(dev));
289 return true;
292 static void pciexp_enable_ltr(struct device *dev)
294 const unsigned int cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
295 if (!cap)
296 return;
299 * If we have get_ltr_max_latencies(), treat `dev` as the root.
300 * If not, let _pciexp_enable_ltr() query the parent's state.
302 struct device *parent = NULL;
303 unsigned int parent_cap = 0;
304 if (!dev->ops->ops_pci || !dev->ops->ops_pci->get_ltr_max_latencies) {
305 parent = dev->bus->dev;
306 if (parent->path.type != DEVICE_PATH_PCI)
307 return;
308 parent_cap = pci_find_capability(parent, PCI_CAP_ID_PCIE);
309 if (!parent_cap)
310 return;
313 (void)_pciexp_enable_ltr(parent, parent_cap, dev, cap);
316 bool pciexp_get_ltr_max_latencies(struct device *dev, u16 *max_snoop, u16 *max_nosnoop)
318 /* Walk the hierarchy up to find get_ltr_max_latencies(). */
319 do {
320 if (dev->ops->ops_pci && dev->ops->ops_pci->get_ltr_max_latencies)
321 break;
322 if (dev->bus->dev == dev || dev->bus->dev->path.type != DEVICE_PATH_PCI)
323 return false;
324 dev = dev->bus->dev;
325 } while (true);
327 dev->ops->ops_pci->get_ltr_max_latencies(max_snoop, max_nosnoop);
328 return true;
331 static void pciexp_configure_ltr(struct device *parent, unsigned int parent_cap,
332 struct device *dev, unsigned int cap)
334 if (!_pciexp_enable_ltr(parent, parent_cap, dev, cap))
335 return;
337 const unsigned int ltr_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_LTR_ID, 0);
338 if (!ltr_cap)
339 return;
341 u16 max_snoop, max_nosnoop;
342 if (!pciexp_get_ltr_max_latencies(dev, &max_snoop, &max_nosnoop))
343 return;
345 pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_SNOOP, max_snoop);
346 pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_NOSNOOP, max_nosnoop);
347 printk(BIOS_INFO, "%s: Programmed LTR max latencies\n", dev_path(dev));
350 static unsigned char pciexp_L1_substate_cal(struct device *dev, unsigned int endp_cap,
351 unsigned int *data)
353 unsigned char mult[4] = {2, 10, 100, 0};
355 unsigned int L1SubStateSupport = *data & 0xf;
356 unsigned int comm_mode_rst_time = (*data >> 8) & 0xff;
357 unsigned int power_on_scale = (*data >> 16) & 0x3;
358 unsigned int power_on_value = (*data >> 19) & 0x1f;
360 unsigned int endp_data = pci_read_config32(dev, endp_cap + 4);
361 unsigned int endp_L1SubStateSupport = endp_data & 0xf;
362 unsigned int endp_comm_mode_restore_time = (endp_data >> 8) & 0xff;
363 unsigned int endp_power_on_scale = (endp_data >> 16) & 0x3;
364 unsigned int endp_power_on_value = (endp_data >> 19) & 0x1f;
366 L1SubStateSupport &= endp_L1SubStateSupport;
368 if (L1SubStateSupport == 0)
369 return 0;
371 if (power_on_value * mult[power_on_scale] <
372 endp_power_on_value * mult[endp_power_on_scale]) {
373 power_on_value = endp_power_on_value;
374 power_on_scale = endp_power_on_scale;
376 if (comm_mode_rst_time < endp_comm_mode_restore_time)
377 comm_mode_rst_time = endp_comm_mode_restore_time;
379 *data = (comm_mode_rst_time << 8) | (power_on_scale << 16)
380 | (power_on_value << 19) | L1SubStateSupport;
382 return 1;
385 static void pciexp_L1_substate_commit(struct device *root, struct device *dev,
386 unsigned int root_cap, unsigned int end_cap)
388 struct device *dev_t;
389 unsigned char L1_ss_ok;
390 unsigned int rp_L1_support = pci_read_config32(root, root_cap + 4);
391 unsigned int L1SubStateSupport;
392 unsigned int comm_mode_rst_time;
393 unsigned int power_on_scale;
394 unsigned int endp_power_on_value;
396 for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
398 * rp_L1_support is init'd above from root port.
399 * it needs coordination with endpoints to reach in common.
400 * if certain endpoint doesn't support L1 Sub-State, abort
401 * this feature enabling.
403 L1_ss_ok = pciexp_L1_substate_cal(dev_t, end_cap,
404 &rp_L1_support);
405 if (!L1_ss_ok)
406 return;
409 L1SubStateSupport = rp_L1_support & 0xf;
410 comm_mode_rst_time = (rp_L1_support >> 8) & 0xff;
411 power_on_scale = (rp_L1_support >> 16) & 0x3;
412 endp_power_on_value = (rp_L1_support >> 19) & 0x1f;
414 printk(BIOS_INFO, "L1 Sub-State supported from root port %d\n",
415 root->path.pci.devfn >> 3);
416 printk(BIOS_INFO, "L1 Sub-State Support = 0x%x\n", L1SubStateSupport);
417 printk(BIOS_INFO, "CommonModeRestoreTime = 0x%x\n", comm_mode_rst_time);
418 printk(BIOS_INFO, "Power On Value = 0x%x, Power On Scale = 0x%x\n",
419 endp_power_on_value, power_on_scale);
421 pci_update_config32(root, root_cap + 0x08, ~0xff00,
422 (comm_mode_rst_time << 8));
424 pci_update_config32(root, root_cap + 0x0c, 0xffffff04,
425 (endp_power_on_value << 3) | (power_on_scale));
427 /* TODO: 0xa0, 2 are values that work on some chipsets but really
428 * should be determined dynamically by looking at downstream devices.
430 pci_update_config32(root, root_cap + 0x08,
431 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
432 ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
433 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
434 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
436 pci_update_config32(root, root_cap + 0x08, ~0x1f,
437 L1SubStateSupport);
439 for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
440 pci_update_config32(dev_t, end_cap + 0x0c, 0xffffff04,
441 (endp_power_on_value << 3) | (power_on_scale));
443 pci_update_config32(dev_t, end_cap + 0x08,
444 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
445 ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
446 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
447 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
449 pci_update_config32(dev_t, end_cap + 0x08, ~0x1f,
450 L1SubStateSupport);
454 static void pciexp_config_L1_sub_state(struct device *root, struct device *dev)
456 unsigned int root_cap, end_cap;
458 /* Do it for function 0 only */
459 if (dev->path.pci.devfn & 0x7)
460 return;
462 root_cap = pciexp_find_extended_cap(root, PCIE_EXT_CAP_L1SS_ID, 0);
463 if (!root_cap)
464 return;
466 end_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_L1SS_ID, 0);
467 if (!end_cap) {
468 if (dev->vendor != PCI_VID_INTEL)
469 return;
471 end_cap = pciexp_find_ext_vendor_cap(dev, 0xcafe, 0);
472 if (!end_cap)
473 return;
476 pciexp_L1_substate_commit(root, dev, root_cap, end_cap);
480 * Determine the ASPM L0s or L1 exit latency for a link
481 * by checking both root port and endpoint and returning
482 * the highest latency value.
484 static int pciexp_aspm_latency(struct device *root, unsigned int root_cap,
485 struct device *endp, unsigned int endp_cap,
486 enum aspm_type type)
488 int root_lat = 0, endp_lat = 0;
489 u32 root_lnkcap, endp_lnkcap;
491 root_lnkcap = pci_read_config32(root, root_cap + PCI_EXP_LNKCAP);
492 endp_lnkcap = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
494 /* Make sure the link supports this ASPM type by checking
495 * capability bits 11:10 with aspm_type offset by 1 */
496 if (!(root_lnkcap & (1 << (type + 9))) ||
497 !(endp_lnkcap & (1 << (type + 9))))
498 return -1;
500 /* Find the one with higher latency */
501 switch (type) {
502 case PCIE_ASPM_L0S:
503 root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
504 endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
505 break;
506 case PCIE_ASPM_L1:
507 root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
508 endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
509 break;
510 default:
511 return -1;
514 return (endp_lat > root_lat) ? endp_lat : root_lat;
518 * Enable ASPM on PCIe root port and endpoint.
520 static void pciexp_enable_aspm(struct device *root, unsigned int root_cap,
521 struct device *endp, unsigned int endp_cap)
523 const char *aspm_type_str[] = { "None", "L0s", "L1", "L0s and L1" };
524 enum aspm_type apmc = PCIE_ASPM_NONE;
525 int exit_latency, ok_latency;
526 u16 lnkctl;
527 u32 devcap;
529 if (endp->disable_pcie_aspm)
530 return;
532 /* Get endpoint device capabilities for acceptable limits */
533 devcap = pci_read_config32(endp, endp_cap + PCI_EXP_DEVCAP);
535 /* Enable L0s if it is within endpoint acceptable limit */
536 ok_latency = (devcap & PCI_EXP_DEVCAP_L0S) >> 6;
537 exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
538 PCIE_ASPM_L0S);
539 if (exit_latency >= 0 && exit_latency <= ok_latency)
540 apmc |= PCIE_ASPM_L0S;
542 /* Enable L1 if it is within endpoint acceptable limit */
543 ok_latency = (devcap & PCI_EXP_DEVCAP_L1) >> 9;
544 exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
545 PCIE_ASPM_L1);
546 if (exit_latency >= 0 && exit_latency <= ok_latency)
547 apmc |= PCIE_ASPM_L1;
549 if (apmc != PCIE_ASPM_NONE) {
550 /* Set APMC in root port first */
551 lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
552 lnkctl |= apmc;
553 pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
555 /* Set APMC in endpoint device next */
556 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
557 lnkctl |= apmc;
558 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
561 printk(BIOS_INFO, "ASPM: Enabled %s\n", aspm_type_str[apmc]);
565 * Set max payload size of endpoint in accordance with max payload size of root port.
567 static void pciexp_set_max_payload_size(struct device *root, unsigned int root_cap,
568 struct device *endp, unsigned int endp_cap)
570 unsigned int endp_max_payload, root_max_payload, max_payload;
571 u16 endp_devctl, root_devctl;
572 u32 endp_devcap, root_devcap;
574 /* Get max payload size supported by endpoint */
575 endp_devcap = pci_read_config32(endp, endp_cap + PCI_EXP_DEVCAP);
576 endp_max_payload = endp_devcap & PCI_EXP_DEVCAP_PAYLOAD;
578 /* Get max payload size supported by root port */
579 root_devcap = pci_read_config32(root, root_cap + PCI_EXP_DEVCAP);
580 root_max_payload = root_devcap & PCI_EXP_DEVCAP_PAYLOAD;
582 /* Set max payload to smaller of the reported device capability. */
583 max_payload = MIN(endp_max_payload, root_max_payload);
584 if (max_payload > 5) {
585 /* Values 6 and 7 are reserved in PCIe 3.0 specs. */
586 printk(BIOS_ERR, "PCIe: Max_Payload_Size field restricted from %d to 5\n",
587 max_payload);
588 max_payload = 5;
591 endp_devctl = pci_read_config16(endp, endp_cap + PCI_EXP_DEVCTL);
592 endp_devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
593 endp_devctl |= max_payload << 5;
594 pci_write_config16(endp, endp_cap + PCI_EXP_DEVCTL, endp_devctl);
596 root_devctl = pci_read_config16(root, root_cap + PCI_EXP_DEVCTL);
597 root_devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
598 root_devctl |= max_payload << 5;
599 pci_write_config16(root, root_cap + PCI_EXP_DEVCTL, root_devctl);
601 printk(BIOS_INFO, "PCIe: Max_Payload_Size adjusted to %d\n", (1 << (max_payload + 7)));
605 * Clear Lane Error State at the end of PCIe link training.
606 * Lane error status is cleared if PCIEXP_LANE_ERR_STAT_CLEAR is set.
607 * Lane error is normal during link training, so we need to clear it.
608 * At this moment, link has been used, but for a very short duration.
610 static void clear_lane_error_status(struct device *dev)
612 u32 reg32;
613 u16 pos;
615 pos = pciexp_find_extended_cap(dev, PCI_EXP_SEC_CAP_ID, 0);
616 if (pos == 0)
617 return;
619 reg32 = pci_read_config32(dev, pos + PCI_EXP_SEC_LANE_ERR_STATUS);
620 if (reg32 == 0)
621 return;
623 printk(BIOS_DEBUG, "%s: Clear Lane Error Status.\n", dev_path(dev));
624 printk(BIOS_DEBUG, "LaneErrStat:0x%x\n", reg32);
625 pci_write_config32(dev, pos + PCI_EXP_SEC_LANE_ERR_STATUS, reg32);
628 static void pciexp_tune_dev(struct device *dev)
630 struct device *root = dev->bus->dev;
631 unsigned int root_cap, cap;
633 cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
634 if (!cap)
635 return;
637 root_cap = pci_find_capability(root, PCI_CAP_ID_PCIE);
638 if (!root_cap)
639 return;
641 /* Check for and enable Common Clock */
642 if (CONFIG(PCIEXP_COMMON_CLOCK))
643 pciexp_enable_common_clock(root, root_cap, dev, cap);
645 /* Check if per port CLK req is supported by endpoint*/
646 if (CONFIG(PCIEXP_CLK_PM))
647 pciexp_enable_clock_power_pm(dev, cap);
649 /* Enable L1 Sub-State when both root port and endpoint support */
650 if (CONFIG(PCIEXP_L1_SUB_STATE))
651 pciexp_config_L1_sub_state(root, dev);
653 /* Check for and enable ASPM */
654 if (CONFIG(PCIEXP_ASPM))
655 pciexp_enable_aspm(root, root_cap, dev, cap);
657 /* Clear PCIe Lane Error Status */
658 if (CONFIG(PCIEXP_LANE_ERR_STAT_CLEAR))
659 clear_lane_error_status(root);
661 /* Adjust Max_Payload_Size of link ends. */
662 pciexp_set_max_payload_size(root, root_cap, dev, cap);
664 pciexp_configure_ltr(root, root_cap, dev, cap);
667 void pciexp_scan_bus(struct bus *bus, unsigned int min_devfn,
668 unsigned int max_devfn)
670 struct device *child;
672 pciexp_enable_ltr(bus->dev);
674 pci_scan_bus(bus, min_devfn, max_devfn);
676 for (child = bus->children; child; child = child->sibling) {
677 if (child->path.type != DEVICE_PATH_PCI)
678 continue;
679 if ((child->path.pci.devfn < min_devfn) ||
680 (child->path.pci.devfn > max_devfn)) {
681 continue;
683 pciexp_tune_dev(child);
687 void pciexp_scan_bridge(struct device *dev)
689 do_pci_scan_bridge(dev, pciexp_scan_bus);
692 /** Default device operations for PCI Express bridges */
693 static struct pci_operations pciexp_bus_ops_pci = {
694 .set_subsystem = 0,
697 struct device_operations default_pciexp_ops_bus = {
698 .read_resources = pci_bus_read_resources,
699 .set_resources = pci_dev_set_resources,
700 .enable_resources = pci_bus_enable_resources,
701 .scan_bus = pciexp_scan_bridge,
702 .reset_bus = pci_bus_reset,
703 .ops_pci = &pciexp_bus_ops_pci,
706 static void pciexp_hotplug_dummy_read_resources(struct device *dev)
708 struct resource *resource;
710 /* Add extra memory space */
711 resource = new_resource(dev, 0x10);
712 resource->size = CONFIG_PCIEXP_HOTPLUG_MEM;
713 resource->align = 12;
714 resource->gran = 12;
715 resource->limit = 0xffffffff;
716 resource->flags |= IORESOURCE_MEM;
718 /* Add extra prefetchable memory space */
719 resource = new_resource(dev, 0x14);
720 resource->size = CONFIG_PCIEXP_HOTPLUG_PREFETCH_MEM;
721 resource->align = 12;
722 resource->gran = 12;
723 resource->limit = 0xffffffffffffffff;
724 resource->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
726 /* Set resource flag requesting allocation above 4G boundary. */
727 if (CONFIG(PCIEXP_HOTPLUG_PREFETCH_MEM_ABOVE_4G))
728 resource->flags |= IORESOURCE_ABOVE_4G;
730 /* Add extra I/O space */
731 resource = new_resource(dev, 0x18);
732 resource->size = CONFIG_PCIEXP_HOTPLUG_IO;
733 resource->align = 12;
734 resource->gran = 12;
735 resource->limit = 0xffff;
736 resource->flags |= IORESOURCE_IO;
739 static struct device_operations pciexp_hotplug_dummy_ops = {
740 .read_resources = pciexp_hotplug_dummy_read_resources,
741 .set_resources = noop_set_resources,
744 void pciexp_hotplug_scan_bridge(struct device *dev)
746 dev->hotplug_port = 1;
747 dev->hotplug_buses = CONFIG_PCIEXP_HOTPLUG_BUSES;
749 /* Normal PCIe Scan */
750 pciexp_scan_bridge(dev);
752 /* Add dummy slot to preserve resources, must happen after bus scan */
753 struct device *dummy;
754 struct device_path dummy_path = { .type = DEVICE_PATH_NONE };
755 dummy = alloc_dev(dev->link_list, &dummy_path);
756 dummy->ops = &pciexp_hotplug_dummy_ops;
759 struct device_operations default_pciexp_hotplug_ops_bus = {
760 .read_resources = pci_bus_read_resources,
761 .set_resources = pci_dev_set_resources,
762 .enable_resources = pci_bus_enable_resources,
763 .scan_bus = pciexp_hotplug_scan_bridge,
764 .reset_bus = pci_bus_reset,
765 .ops_pci = &pciexp_bus_ops_pci,