1 // SPDX-License-Identifier: GPL-2.0
3 * Synopsys DesignWare PCIe host controller driver
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com
8 * Author: Jingoo Han <jg1.han@samsung.com>
11 #include <linux/irqchip/chained_irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/of_address.h>
14 #include <linux/of_pci.h>
15 #include <linux/pci_regs.h>
16 #include <linux/platform_device.h>
18 #include "../../pci.h"
19 #include "pcie-designware.h"
21 static struct pci_ops dw_pcie_ops
;
23 static int dw_pcie_rd_own_conf(struct pcie_port
*pp
, int where
, int size
,
28 if (pp
->ops
->rd_own_conf
)
29 return pp
->ops
->rd_own_conf(pp
, where
, size
, val
);
31 pci
= to_dw_pcie_from_pp(pp
);
32 return dw_pcie_read(pci
->dbi_base
+ where
, size
, val
);
35 static int dw_pcie_wr_own_conf(struct pcie_port
*pp
, int where
, int size
,
40 if (pp
->ops
->wr_own_conf
)
41 return pp
->ops
->wr_own_conf(pp
, where
, size
, val
);
43 pci
= to_dw_pcie_from_pp(pp
);
44 return dw_pcie_write(pci
->dbi_base
+ where
, size
, val
);
47 static void dw_msi_ack_irq(struct irq_data
*d
)
49 irq_chip_ack_parent(d
);
52 static void dw_msi_mask_irq(struct irq_data
*d
)
55 irq_chip_mask_parent(d
);
58 static void dw_msi_unmask_irq(struct irq_data
*d
)
60 pci_msi_unmask_irq(d
);
61 irq_chip_unmask_parent(d
);
64 static struct irq_chip dw_pcie_msi_irq_chip
= {
66 .irq_ack
= dw_msi_ack_irq
,
67 .irq_mask
= dw_msi_mask_irq
,
68 .irq_unmask
= dw_msi_unmask_irq
,
71 static struct msi_domain_info dw_pcie_msi_domain_info
= {
72 .flags
= (MSI_FLAG_USE_DEF_DOM_OPS
| MSI_FLAG_USE_DEF_CHIP_OPS
|
73 MSI_FLAG_PCI_MSIX
| MSI_FLAG_MULTI_PCI_MSI
),
74 .chip
= &dw_pcie_msi_irq_chip
,
78 irqreturn_t
dw_handle_msi_irq(struct pcie_port
*pp
)
82 irqreturn_t ret
= IRQ_NONE
;
84 num_ctrls
= pp
->num_vectors
/ MAX_MSI_IRQS_PER_CTRL
;
86 for (i
= 0; i
< num_ctrls
; i
++) {
87 dw_pcie_rd_own_conf(pp
, PCIE_MSI_INTR0_STATUS
+
88 (i
* MSI_REG_CTRL_BLOCK_SIZE
),
95 while ((pos
= find_next_bit((unsigned long *) &val
,
96 MAX_MSI_IRQS_PER_CTRL
,
97 pos
)) != MAX_MSI_IRQS_PER_CTRL
) {
98 irq
= irq_find_mapping(pp
->irq_domain
,
99 (i
* MAX_MSI_IRQS_PER_CTRL
) +
101 generic_handle_irq(irq
);
109 /* Chained MSI interrupt service routine */
110 static void dw_chained_msi_isr(struct irq_desc
*desc
)
112 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
113 struct pcie_port
*pp
;
115 chained_irq_enter(chip
, desc
);
117 pp
= irq_desc_get_handler_data(desc
);
118 dw_handle_msi_irq(pp
);
120 chained_irq_exit(chip
, desc
);
123 static void dw_pci_setup_msi_msg(struct irq_data
*d
, struct msi_msg
*msg
)
125 struct pcie_port
*pp
= irq_data_get_irq_chip_data(d
);
126 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
129 msi_target
= (u64
)pp
->msi_data
;
131 msg
->address_lo
= lower_32_bits(msi_target
);
132 msg
->address_hi
= upper_32_bits(msi_target
);
134 msg
->data
= d
->hwirq
;
136 dev_dbg(pci
->dev
, "msi#%d address_hi %#x address_lo %#x\n",
137 (int)d
->hwirq
, msg
->address_hi
, msg
->address_lo
);
140 static int dw_pci_msi_set_affinity(struct irq_data
*d
,
141 const struct cpumask
*mask
, bool force
)
146 static void dw_pci_bottom_mask(struct irq_data
*d
)
148 struct pcie_port
*pp
= irq_data_get_irq_chip_data(d
);
149 unsigned int res
, bit
, ctrl
;
152 raw_spin_lock_irqsave(&pp
->lock
, flags
);
154 ctrl
= d
->hwirq
/ MAX_MSI_IRQS_PER_CTRL
;
155 res
= ctrl
* MSI_REG_CTRL_BLOCK_SIZE
;
156 bit
= d
->hwirq
% MAX_MSI_IRQS_PER_CTRL
;
158 pp
->irq_mask
[ctrl
] |= BIT(bit
);
159 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_MASK
+ res
, 4,
162 raw_spin_unlock_irqrestore(&pp
->lock
, flags
);
165 static void dw_pci_bottom_unmask(struct irq_data
*d
)
167 struct pcie_port
*pp
= irq_data_get_irq_chip_data(d
);
168 unsigned int res
, bit
, ctrl
;
171 raw_spin_lock_irqsave(&pp
->lock
, flags
);
173 ctrl
= d
->hwirq
/ MAX_MSI_IRQS_PER_CTRL
;
174 res
= ctrl
* MSI_REG_CTRL_BLOCK_SIZE
;
175 bit
= d
->hwirq
% MAX_MSI_IRQS_PER_CTRL
;
177 pp
->irq_mask
[ctrl
] &= ~BIT(bit
);
178 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_MASK
+ res
, 4,
181 raw_spin_unlock_irqrestore(&pp
->lock
, flags
);
184 static void dw_pci_bottom_ack(struct irq_data
*d
)
186 struct pcie_port
*pp
= irq_data_get_irq_chip_data(d
);
187 unsigned int res
, bit
, ctrl
;
189 ctrl
= d
->hwirq
/ MAX_MSI_IRQS_PER_CTRL
;
190 res
= ctrl
* MSI_REG_CTRL_BLOCK_SIZE
;
191 bit
= d
->hwirq
% MAX_MSI_IRQS_PER_CTRL
;
193 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_STATUS
+ res
, 4, BIT(bit
));
196 static struct irq_chip dw_pci_msi_bottom_irq_chip
= {
198 .irq_ack
= dw_pci_bottom_ack
,
199 .irq_compose_msi_msg
= dw_pci_setup_msi_msg
,
200 .irq_set_affinity
= dw_pci_msi_set_affinity
,
201 .irq_mask
= dw_pci_bottom_mask
,
202 .irq_unmask
= dw_pci_bottom_unmask
,
205 static int dw_pcie_irq_domain_alloc(struct irq_domain
*domain
,
206 unsigned int virq
, unsigned int nr_irqs
,
209 struct pcie_port
*pp
= domain
->host_data
;
214 raw_spin_lock_irqsave(&pp
->lock
, flags
);
216 bit
= bitmap_find_free_region(pp
->msi_irq_in_use
, pp
->num_vectors
,
217 order_base_2(nr_irqs
));
219 raw_spin_unlock_irqrestore(&pp
->lock
, flags
);
224 for (i
= 0; i
< nr_irqs
; i
++)
225 irq_domain_set_info(domain
, virq
+ i
, bit
+ i
,
233 static void dw_pcie_irq_domain_free(struct irq_domain
*domain
,
234 unsigned int virq
, unsigned int nr_irqs
)
236 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
);
237 struct pcie_port
*pp
= irq_data_get_irq_chip_data(d
);
240 raw_spin_lock_irqsave(&pp
->lock
, flags
);
242 bitmap_release_region(pp
->msi_irq_in_use
, d
->hwirq
,
243 order_base_2(nr_irqs
));
245 raw_spin_unlock_irqrestore(&pp
->lock
, flags
);
248 static const struct irq_domain_ops dw_pcie_msi_domain_ops
= {
249 .alloc
= dw_pcie_irq_domain_alloc
,
250 .free
= dw_pcie_irq_domain_free
,
253 int dw_pcie_allocate_domains(struct pcie_port
*pp
)
255 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
256 struct fwnode_handle
*fwnode
= of_node_to_fwnode(pci
->dev
->of_node
);
258 pp
->irq_domain
= irq_domain_create_linear(fwnode
, pp
->num_vectors
,
259 &dw_pcie_msi_domain_ops
, pp
);
260 if (!pp
->irq_domain
) {
261 dev_err(pci
->dev
, "Failed to create IRQ domain\n");
265 pp
->msi_domain
= pci_msi_create_irq_domain(fwnode
,
266 &dw_pcie_msi_domain_info
,
268 if (!pp
->msi_domain
) {
269 dev_err(pci
->dev
, "Failed to create MSI domain\n");
270 irq_domain_remove(pp
->irq_domain
);
277 void dw_pcie_free_msi(struct pcie_port
*pp
)
280 irq_set_chained_handler(pp
->msi_irq
, NULL
);
281 irq_set_handler_data(pp
->msi_irq
, NULL
);
284 irq_domain_remove(pp
->msi_domain
);
285 irq_domain_remove(pp
->irq_domain
);
288 __free_page(pp
->msi_page
);
291 void dw_pcie_msi_init(struct pcie_port
*pp
)
293 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
294 struct device
*dev
= pci
->dev
;
297 pp
->msi_page
= alloc_page(GFP_KERNEL
);
298 pp
->msi_data
= dma_map_page(dev
, pp
->msi_page
, 0, PAGE_SIZE
,
300 if (dma_mapping_error(dev
, pp
->msi_data
)) {
301 dev_err(dev
, "Failed to map MSI data\n");
302 __free_page(pp
->msi_page
);
306 msi_target
= (u64
)pp
->msi_data
;
308 /* Program the msi_data */
309 dw_pcie_wr_own_conf(pp
, PCIE_MSI_ADDR_LO
, 4,
310 lower_32_bits(msi_target
));
311 dw_pcie_wr_own_conf(pp
, PCIE_MSI_ADDR_HI
, 4,
312 upper_32_bits(msi_target
));
314 EXPORT_SYMBOL_GPL(dw_pcie_msi_init
);
316 int dw_pcie_host_init(struct pcie_port
*pp
)
318 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
319 struct device
*dev
= pci
->dev
;
320 struct device_node
*np
= dev
->of_node
;
321 struct platform_device
*pdev
= to_platform_device(dev
);
322 struct resource_entry
*win
, *tmp
;
323 struct pci_bus
*child
;
324 struct pci_host_bridge
*bridge
;
325 struct resource
*cfg_res
;
328 raw_spin_lock_init(&pci
->pp
.lock
);
330 cfg_res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "config");
332 pp
->cfg0_size
= resource_size(cfg_res
) >> 1;
333 pp
->cfg1_size
= resource_size(cfg_res
) >> 1;
334 pp
->cfg0_base
= cfg_res
->start
;
335 pp
->cfg1_base
= cfg_res
->start
+ pp
->cfg0_size
;
336 } else if (!pp
->va_cfg0_base
) {
337 dev_err(dev
, "Missing *config* reg space\n");
340 bridge
= devm_pci_alloc_host_bridge(dev
, 0);
344 ret
= devm_of_pci_get_host_bridge_resources(dev
, 0, 0xff,
345 &bridge
->windows
, &pp
->io_base
);
349 ret
= devm_request_pci_bus_resources(dev
, &bridge
->windows
);
353 /* Get the I/O and memory ranges from DT */
354 resource_list_for_each_entry_safe(win
, tmp
, &bridge
->windows
) {
355 switch (resource_type(win
->res
)) {
357 ret
= devm_pci_remap_iospace(dev
, win
->res
,
360 dev_warn(dev
, "Error %d: failed to map resource %pR\n",
362 resource_list_destroy_entry(win
);
365 pp
->io
->name
= "I/O";
366 pp
->io_size
= resource_size(pp
->io
);
367 pp
->io_bus_addr
= pp
->io
->start
- win
->offset
;
372 pp
->mem
->name
= "MEM";
373 pp
->mem_size
= resource_size(pp
->mem
);
374 pp
->mem_bus_addr
= pp
->mem
->start
- win
->offset
;
378 pp
->cfg0_size
= resource_size(pp
->cfg
) >> 1;
379 pp
->cfg1_size
= resource_size(pp
->cfg
) >> 1;
380 pp
->cfg0_base
= pp
->cfg
->start
;
381 pp
->cfg1_base
= pp
->cfg
->start
+ pp
->cfg0_size
;
389 if (!pci
->dbi_base
) {
390 pci
->dbi_base
= devm_pci_remap_cfgspace(dev
,
392 resource_size(pp
->cfg
));
393 if (!pci
->dbi_base
) {
394 dev_err(dev
, "Error with ioremap\n");
399 pp
->mem_base
= pp
->mem
->start
;
401 if (!pp
->va_cfg0_base
) {
402 pp
->va_cfg0_base
= devm_pci_remap_cfgspace(dev
,
403 pp
->cfg0_base
, pp
->cfg0_size
);
404 if (!pp
->va_cfg0_base
) {
405 dev_err(dev
, "Error with ioremap in function\n");
410 if (!pp
->va_cfg1_base
) {
411 pp
->va_cfg1_base
= devm_pci_remap_cfgspace(dev
,
414 if (!pp
->va_cfg1_base
) {
415 dev_err(dev
, "Error with ioremap\n");
420 ret
= of_property_read_u32(np
, "num-viewport", &pci
->num_viewport
);
422 pci
->num_viewport
= 2;
424 if (pci_msi_enabled()) {
426 * If a specific SoC driver needs to change the
427 * default number of vectors, it needs to implement
428 * the set_num_vectors callback.
430 if (!pp
->ops
->set_num_vectors
) {
431 pp
->num_vectors
= MSI_DEF_NUM_VECTORS
;
433 pp
->ops
->set_num_vectors(pp
);
435 if (pp
->num_vectors
> MAX_MSI_IRQS
||
436 pp
->num_vectors
== 0) {
438 "Invalid number of vectors\n");
443 if (!pp
->ops
->msi_host_init
) {
444 pp
->msi_irq_chip
= &dw_pci_msi_bottom_irq_chip
;
446 ret
= dw_pcie_allocate_domains(pp
);
451 irq_set_chained_handler_and_data(pp
->msi_irq
,
455 ret
= pp
->ops
->msi_host_init(pp
);
461 if (pp
->ops
->host_init
) {
462 ret
= pp
->ops
->host_init(pp
);
467 pp
->root_bus_nr
= pp
->busn
->start
;
469 bridge
->dev
.parent
= dev
;
470 bridge
->sysdata
= pp
;
471 bridge
->busnr
= pp
->root_bus_nr
;
472 bridge
->ops
= &dw_pcie_ops
;
473 bridge
->map_irq
= of_irq_parse_and_map_pci
;
474 bridge
->swizzle_irq
= pci_common_swizzle
;
476 ret
= pci_scan_root_bus_bridge(bridge
);
480 pp
->root_bus
= bridge
->bus
;
482 if (pp
->ops
->scan_bus
)
483 pp
->ops
->scan_bus(pp
);
485 pci_bus_size_bridges(pp
->root_bus
);
486 pci_bus_assign_resources(pp
->root_bus
);
488 list_for_each_entry(child
, &pp
->root_bus
->children
, node
)
489 pcie_bus_configure_settings(child
);
491 pci_bus_add_devices(pp
->root_bus
);
495 if (pci_msi_enabled() && !pp
->ops
->msi_host_init
)
496 dw_pcie_free_msi(pp
);
499 EXPORT_SYMBOL_GPL(dw_pcie_host_init
);
501 void dw_pcie_host_deinit(struct pcie_port
*pp
)
503 pci_stop_root_bus(pp
->root_bus
);
504 pci_remove_root_bus(pp
->root_bus
);
505 if (pci_msi_enabled() && !pp
->ops
->msi_host_init
)
506 dw_pcie_free_msi(pp
);
508 EXPORT_SYMBOL_GPL(dw_pcie_host_deinit
);
510 static int dw_pcie_access_other_conf(struct pcie_port
*pp
, struct pci_bus
*bus
,
511 u32 devfn
, int where
, int size
, u32
*val
,
515 u32 busdev
, cfg_size
;
517 void __iomem
*va_cfg_base
;
518 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
520 busdev
= PCIE_ATU_BUS(bus
->number
) | PCIE_ATU_DEV(PCI_SLOT(devfn
)) |
521 PCIE_ATU_FUNC(PCI_FUNC(devfn
));
523 if (bus
->parent
->number
== pp
->root_bus_nr
) {
524 type
= PCIE_ATU_TYPE_CFG0
;
525 cpu_addr
= pp
->cfg0_base
;
526 cfg_size
= pp
->cfg0_size
;
527 va_cfg_base
= pp
->va_cfg0_base
;
529 type
= PCIE_ATU_TYPE_CFG1
;
530 cpu_addr
= pp
->cfg1_base
;
531 cfg_size
= pp
->cfg1_size
;
532 va_cfg_base
= pp
->va_cfg1_base
;
535 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX1
,
539 ret
= dw_pcie_write(va_cfg_base
+ where
, size
, *val
);
541 ret
= dw_pcie_read(va_cfg_base
+ where
, size
, val
);
543 if (pci
->num_viewport
<= 2)
544 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX1
,
545 PCIE_ATU_TYPE_IO
, pp
->io_base
,
546 pp
->io_bus_addr
, pp
->io_size
);
551 static int dw_pcie_rd_other_conf(struct pcie_port
*pp
, struct pci_bus
*bus
,
552 u32 devfn
, int where
, int size
, u32
*val
)
554 if (pp
->ops
->rd_other_conf
)
555 return pp
->ops
->rd_other_conf(pp
, bus
, devfn
, where
,
558 return dw_pcie_access_other_conf(pp
, bus
, devfn
, where
, size
, val
,
562 static int dw_pcie_wr_other_conf(struct pcie_port
*pp
, struct pci_bus
*bus
,
563 u32 devfn
, int where
, int size
, u32 val
)
565 if (pp
->ops
->wr_other_conf
)
566 return pp
->ops
->wr_other_conf(pp
, bus
, devfn
, where
,
569 return dw_pcie_access_other_conf(pp
, bus
, devfn
, where
, size
, &val
,
573 static int dw_pcie_valid_device(struct pcie_port
*pp
, struct pci_bus
*bus
,
576 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
578 /* If there is no link, then there is no device */
579 if (bus
->number
!= pp
->root_bus_nr
) {
580 if (!dw_pcie_link_up(pci
))
584 /* Access only one slot on each root port */
585 if (bus
->number
== pp
->root_bus_nr
&& dev
> 0)
591 static int dw_pcie_rd_conf(struct pci_bus
*bus
, u32 devfn
, int where
,
594 struct pcie_port
*pp
= bus
->sysdata
;
596 if (!dw_pcie_valid_device(pp
, bus
, PCI_SLOT(devfn
))) {
598 return PCIBIOS_DEVICE_NOT_FOUND
;
601 if (bus
->number
== pp
->root_bus_nr
)
602 return dw_pcie_rd_own_conf(pp
, where
, size
, val
);
604 return dw_pcie_rd_other_conf(pp
, bus
, devfn
, where
, size
, val
);
607 static int dw_pcie_wr_conf(struct pci_bus
*bus
, u32 devfn
,
608 int where
, int size
, u32 val
)
610 struct pcie_port
*pp
= bus
->sysdata
;
612 if (!dw_pcie_valid_device(pp
, bus
, PCI_SLOT(devfn
)))
613 return PCIBIOS_DEVICE_NOT_FOUND
;
615 if (bus
->number
== pp
->root_bus_nr
)
616 return dw_pcie_wr_own_conf(pp
, where
, size
, val
);
618 return dw_pcie_wr_other_conf(pp
, bus
, devfn
, where
, size
, val
);
621 static struct pci_ops dw_pcie_ops
= {
622 .read
= dw_pcie_rd_conf
,
623 .write
= dw_pcie_wr_conf
,
626 void dw_pcie_setup_rc(struct pcie_port
*pp
)
628 u32 val
, ctrl
, num_ctrls
;
629 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
633 if (!pp
->ops
->msi_host_init
) {
634 num_ctrls
= pp
->num_vectors
/ MAX_MSI_IRQS_PER_CTRL
;
636 /* Initialize IRQ Status array */
637 for (ctrl
= 0; ctrl
< num_ctrls
; ctrl
++) {
638 pp
->irq_mask
[ctrl
] = ~0;
639 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_MASK
+
640 (ctrl
* MSI_REG_CTRL_BLOCK_SIZE
),
641 4, pp
->irq_mask
[ctrl
]);
642 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_ENABLE
+
643 (ctrl
* MSI_REG_CTRL_BLOCK_SIZE
),
649 dw_pcie_writel_dbi(pci
, PCI_BASE_ADDRESS_0
, 0x00000004);
650 dw_pcie_writel_dbi(pci
, PCI_BASE_ADDRESS_1
, 0x00000000);
652 /* Setup interrupt pins */
653 dw_pcie_dbi_ro_wr_en(pci
);
654 val
= dw_pcie_readl_dbi(pci
, PCI_INTERRUPT_LINE
);
657 dw_pcie_writel_dbi(pci
, PCI_INTERRUPT_LINE
, val
);
658 dw_pcie_dbi_ro_wr_dis(pci
);
660 /* Setup bus numbers */
661 val
= dw_pcie_readl_dbi(pci
, PCI_PRIMARY_BUS
);
664 dw_pcie_writel_dbi(pci
, PCI_PRIMARY_BUS
, val
);
666 /* Setup command register */
667 val
= dw_pcie_readl_dbi(pci
, PCI_COMMAND
);
669 val
|= PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
|
670 PCI_COMMAND_MASTER
| PCI_COMMAND_SERR
;
671 dw_pcie_writel_dbi(pci
, PCI_COMMAND
, val
);
674 * If the platform provides ->rd_other_conf, it means the platform
675 * uses its own address translation component rather than ATU, so
676 * we should not program the ATU here.
678 if (!pp
->ops
->rd_other_conf
) {
679 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX0
,
680 PCIE_ATU_TYPE_MEM
, pp
->mem_base
,
681 pp
->mem_bus_addr
, pp
->mem_size
);
682 if (pci
->num_viewport
> 2)
683 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX2
,
684 PCIE_ATU_TYPE_IO
, pp
->io_base
,
685 pp
->io_bus_addr
, pp
->io_size
);
688 dw_pcie_wr_own_conf(pp
, PCI_BASE_ADDRESS_0
, 4, 0);
690 /* Enable write permission for the DBI read-only register */
691 dw_pcie_dbi_ro_wr_en(pci
);
692 /* Program correct class for RC */
693 dw_pcie_wr_own_conf(pp
, PCI_CLASS_DEVICE
, 2, PCI_CLASS_BRIDGE_PCI
);
694 /* Better disable write permission right after the update */
695 dw_pcie_dbi_ro_wr_dis(pci
);
697 dw_pcie_rd_own_conf(pp
, PCIE_LINK_WIDTH_SPEED_CONTROL
, 4, &val
);
698 val
|= PORT_LOGIC_SPEED_CHANGE
;
699 dw_pcie_wr_own_conf(pp
, PCIE_LINK_WIDTH_SPEED_CONTROL
, 4, val
);
701 EXPORT_SYMBOL_GPL(dw_pcie_setup_rc
);