1 // SPDX-License-Identifier: GPL-2.0
3 * Synopsys DesignWare PCIe host controller driver
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com
8 * Author: Jingoo Han <jg1.han@samsung.com>
11 #include <linux/irqchip/chained_irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/msi.h>
14 #include <linux/of_address.h>
15 #include <linux/of_pci.h>
16 #include <linux/pci_regs.h>
17 #include <linux/platform_device.h>
19 #include "../../pci.h"
20 #include "pcie-designware.h"
22 static struct pci_ops dw_pcie_ops
;
24 static int dw_pcie_rd_own_conf(struct pcie_port
*pp
, int where
, int size
,
29 if (pp
->ops
->rd_own_conf
)
30 return pp
->ops
->rd_own_conf(pp
, where
, size
, val
);
32 pci
= to_dw_pcie_from_pp(pp
);
33 return dw_pcie_read(pci
->dbi_base
+ where
, size
, val
);
36 static int dw_pcie_wr_own_conf(struct pcie_port
*pp
, int where
, int size
,
41 if (pp
->ops
->wr_own_conf
)
42 return pp
->ops
->wr_own_conf(pp
, where
, size
, val
);
44 pci
= to_dw_pcie_from_pp(pp
);
45 return dw_pcie_write(pci
->dbi_base
+ where
, size
, val
);
48 static void dw_msi_ack_irq(struct irq_data
*d
)
50 irq_chip_ack_parent(d
);
53 static void dw_msi_mask_irq(struct irq_data
*d
)
56 irq_chip_mask_parent(d
);
59 static void dw_msi_unmask_irq(struct irq_data
*d
)
61 pci_msi_unmask_irq(d
);
62 irq_chip_unmask_parent(d
);
65 static struct irq_chip dw_pcie_msi_irq_chip
= {
67 .irq_ack
= dw_msi_ack_irq
,
68 .irq_mask
= dw_msi_mask_irq
,
69 .irq_unmask
= dw_msi_unmask_irq
,
72 static struct msi_domain_info dw_pcie_msi_domain_info
= {
73 .flags
= (MSI_FLAG_USE_DEF_DOM_OPS
| MSI_FLAG_USE_DEF_CHIP_OPS
|
74 MSI_FLAG_PCI_MSIX
| MSI_FLAG_MULTI_PCI_MSI
),
75 .chip
= &dw_pcie_msi_irq_chip
,
79 irqreturn_t
dw_handle_msi_irq(struct pcie_port
*pp
)
83 u32 status
, num_ctrls
;
84 irqreturn_t ret
= IRQ_NONE
;
86 num_ctrls
= pp
->num_vectors
/ MAX_MSI_IRQS_PER_CTRL
;
88 for (i
= 0; i
< num_ctrls
; i
++) {
89 dw_pcie_rd_own_conf(pp
, PCIE_MSI_INTR0_STATUS
+
90 (i
* MSI_REG_CTRL_BLOCK_SIZE
),
98 while ((pos
= find_next_bit(&val
, MAX_MSI_IRQS_PER_CTRL
,
99 pos
)) != MAX_MSI_IRQS_PER_CTRL
) {
100 irq
= irq_find_mapping(pp
->irq_domain
,
101 (i
* MAX_MSI_IRQS_PER_CTRL
) +
103 generic_handle_irq(irq
);
111 /* Chained MSI interrupt service routine */
112 static void dw_chained_msi_isr(struct irq_desc
*desc
)
114 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
115 struct pcie_port
*pp
;
117 chained_irq_enter(chip
, desc
);
119 pp
= irq_desc_get_handler_data(desc
);
120 dw_handle_msi_irq(pp
);
122 chained_irq_exit(chip
, desc
);
125 static void dw_pci_setup_msi_msg(struct irq_data
*d
, struct msi_msg
*msg
)
127 struct pcie_port
*pp
= irq_data_get_irq_chip_data(d
);
128 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
131 msi_target
= (u64
)pp
->msi_data
;
133 msg
->address_lo
= lower_32_bits(msi_target
);
134 msg
->address_hi
= upper_32_bits(msi_target
);
136 msg
->data
= d
->hwirq
;
138 dev_dbg(pci
->dev
, "msi#%d address_hi %#x address_lo %#x\n",
139 (int)d
->hwirq
, msg
->address_hi
, msg
->address_lo
);
142 static int dw_pci_msi_set_affinity(struct irq_data
*d
,
143 const struct cpumask
*mask
, bool force
)
148 static void dw_pci_bottom_mask(struct irq_data
*d
)
150 struct pcie_port
*pp
= irq_data_get_irq_chip_data(d
);
151 unsigned int res
, bit
, ctrl
;
154 raw_spin_lock_irqsave(&pp
->lock
, flags
);
156 ctrl
= d
->hwirq
/ MAX_MSI_IRQS_PER_CTRL
;
157 res
= ctrl
* MSI_REG_CTRL_BLOCK_SIZE
;
158 bit
= d
->hwirq
% MAX_MSI_IRQS_PER_CTRL
;
160 pp
->irq_mask
[ctrl
] |= BIT(bit
);
161 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_MASK
+ res
, 4,
164 raw_spin_unlock_irqrestore(&pp
->lock
, flags
);
167 static void dw_pci_bottom_unmask(struct irq_data
*d
)
169 struct pcie_port
*pp
= irq_data_get_irq_chip_data(d
);
170 unsigned int res
, bit
, ctrl
;
173 raw_spin_lock_irqsave(&pp
->lock
, flags
);
175 ctrl
= d
->hwirq
/ MAX_MSI_IRQS_PER_CTRL
;
176 res
= ctrl
* MSI_REG_CTRL_BLOCK_SIZE
;
177 bit
= d
->hwirq
% MAX_MSI_IRQS_PER_CTRL
;
179 pp
->irq_mask
[ctrl
] &= ~BIT(bit
);
180 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_MASK
+ res
, 4,
183 raw_spin_unlock_irqrestore(&pp
->lock
, flags
);
186 static void dw_pci_bottom_ack(struct irq_data
*d
)
188 struct pcie_port
*pp
= irq_data_get_irq_chip_data(d
);
189 unsigned int res
, bit
, ctrl
;
191 ctrl
= d
->hwirq
/ MAX_MSI_IRQS_PER_CTRL
;
192 res
= ctrl
* MSI_REG_CTRL_BLOCK_SIZE
;
193 bit
= d
->hwirq
% MAX_MSI_IRQS_PER_CTRL
;
195 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_STATUS
+ res
, 4, BIT(bit
));
198 static struct irq_chip dw_pci_msi_bottom_irq_chip
= {
200 .irq_ack
= dw_pci_bottom_ack
,
201 .irq_compose_msi_msg
= dw_pci_setup_msi_msg
,
202 .irq_set_affinity
= dw_pci_msi_set_affinity
,
203 .irq_mask
= dw_pci_bottom_mask
,
204 .irq_unmask
= dw_pci_bottom_unmask
,
207 static int dw_pcie_irq_domain_alloc(struct irq_domain
*domain
,
208 unsigned int virq
, unsigned int nr_irqs
,
211 struct pcie_port
*pp
= domain
->host_data
;
216 raw_spin_lock_irqsave(&pp
->lock
, flags
);
218 bit
= bitmap_find_free_region(pp
->msi_irq_in_use
, pp
->num_vectors
,
219 order_base_2(nr_irqs
));
221 raw_spin_unlock_irqrestore(&pp
->lock
, flags
);
226 for (i
= 0; i
< nr_irqs
; i
++)
227 irq_domain_set_info(domain
, virq
+ i
, bit
+ i
,
235 static void dw_pcie_irq_domain_free(struct irq_domain
*domain
,
236 unsigned int virq
, unsigned int nr_irqs
)
238 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
);
239 struct pcie_port
*pp
= irq_data_get_irq_chip_data(d
);
242 raw_spin_lock_irqsave(&pp
->lock
, flags
);
244 bitmap_release_region(pp
->msi_irq_in_use
, d
->hwirq
,
245 order_base_2(nr_irqs
));
247 raw_spin_unlock_irqrestore(&pp
->lock
, flags
);
250 static const struct irq_domain_ops dw_pcie_msi_domain_ops
= {
251 .alloc
= dw_pcie_irq_domain_alloc
,
252 .free
= dw_pcie_irq_domain_free
,
255 int dw_pcie_allocate_domains(struct pcie_port
*pp
)
257 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
258 struct fwnode_handle
*fwnode
= of_node_to_fwnode(pci
->dev
->of_node
);
260 pp
->irq_domain
= irq_domain_create_linear(fwnode
, pp
->num_vectors
,
261 &dw_pcie_msi_domain_ops
, pp
);
262 if (!pp
->irq_domain
) {
263 dev_err(pci
->dev
, "Failed to create IRQ domain\n");
267 pp
->msi_domain
= pci_msi_create_irq_domain(fwnode
,
268 &dw_pcie_msi_domain_info
,
270 if (!pp
->msi_domain
) {
271 dev_err(pci
->dev
, "Failed to create MSI domain\n");
272 irq_domain_remove(pp
->irq_domain
);
279 void dw_pcie_free_msi(struct pcie_port
*pp
)
282 irq_set_chained_handler(pp
->msi_irq
, NULL
);
283 irq_set_handler_data(pp
->msi_irq
, NULL
);
286 irq_domain_remove(pp
->msi_domain
);
287 irq_domain_remove(pp
->irq_domain
);
290 __free_page(pp
->msi_page
);
293 void dw_pcie_msi_init(struct pcie_port
*pp
)
295 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
296 struct device
*dev
= pci
->dev
;
299 pp
->msi_page
= alloc_page(GFP_KERNEL
);
300 pp
->msi_data
= dma_map_page(dev
, pp
->msi_page
, 0, PAGE_SIZE
,
302 if (dma_mapping_error(dev
, pp
->msi_data
)) {
303 dev_err(dev
, "Failed to map MSI data\n");
304 __free_page(pp
->msi_page
);
308 msi_target
= (u64
)pp
->msi_data
;
310 /* Program the msi_data */
311 dw_pcie_wr_own_conf(pp
, PCIE_MSI_ADDR_LO
, 4,
312 lower_32_bits(msi_target
));
313 dw_pcie_wr_own_conf(pp
, PCIE_MSI_ADDR_HI
, 4,
314 upper_32_bits(msi_target
));
316 EXPORT_SYMBOL_GPL(dw_pcie_msi_init
);
318 int dw_pcie_host_init(struct pcie_port
*pp
)
320 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
321 struct device
*dev
= pci
->dev
;
322 struct device_node
*np
= dev
->of_node
;
323 struct platform_device
*pdev
= to_platform_device(dev
);
324 struct resource_entry
*win
;
325 struct pci_bus
*child
;
326 struct pci_host_bridge
*bridge
;
327 struct resource
*cfg_res
;
331 raw_spin_lock_init(&pci
->pp
.lock
);
333 cfg_res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "config");
335 pp
->cfg0_size
= resource_size(cfg_res
) >> 1;
336 pp
->cfg1_size
= resource_size(cfg_res
) >> 1;
337 pp
->cfg0_base
= cfg_res
->start
;
338 pp
->cfg1_base
= cfg_res
->start
+ pp
->cfg0_size
;
339 } else if (!pp
->va_cfg0_base
) {
340 dev_err(dev
, "Missing *config* reg space\n");
343 bridge
= devm_pci_alloc_host_bridge(dev
, 0);
347 ret
= pci_parse_request_of_pci_ranges(dev
, &bridge
->windows
,
348 &bridge
->dma_ranges
, NULL
);
352 /* Get the I/O and memory ranges from DT */
353 resource_list_for_each_entry(win
, &bridge
->windows
) {
354 switch (resource_type(win
->res
)) {
357 pp
->io
->name
= "I/O";
358 pp
->io_size
= resource_size(pp
->io
);
359 pp
->io_bus_addr
= pp
->io
->start
- win
->offset
;
360 pp
->io_base
= pci_pio_to_address(pp
->io
->start
);
364 pp
->mem
->name
= "MEM";
365 pp
->mem_size
= resource_size(pp
->mem
);
366 pp
->mem_bus_addr
= pp
->mem
->start
- win
->offset
;
370 pp
->cfg0_size
= resource_size(pp
->cfg
) >> 1;
371 pp
->cfg1_size
= resource_size(pp
->cfg
) >> 1;
372 pp
->cfg0_base
= pp
->cfg
->start
;
373 pp
->cfg1_base
= pp
->cfg
->start
+ pp
->cfg0_size
;
381 if (!pci
->dbi_base
) {
382 pci
->dbi_base
= devm_pci_remap_cfgspace(dev
,
384 resource_size(pp
->cfg
));
385 if (!pci
->dbi_base
) {
386 dev_err(dev
, "Error with ioremap\n");
391 pp
->mem_base
= pp
->mem
->start
;
393 if (!pp
->va_cfg0_base
) {
394 pp
->va_cfg0_base
= devm_pci_remap_cfgspace(dev
,
395 pp
->cfg0_base
, pp
->cfg0_size
);
396 if (!pp
->va_cfg0_base
) {
397 dev_err(dev
, "Error with ioremap in function\n");
402 if (!pp
->va_cfg1_base
) {
403 pp
->va_cfg1_base
= devm_pci_remap_cfgspace(dev
,
406 if (!pp
->va_cfg1_base
) {
407 dev_err(dev
, "Error with ioremap\n");
412 ret
= of_property_read_u32(np
, "num-viewport", &pci
->num_viewport
);
414 pci
->num_viewport
= 2;
416 if (pci_msi_enabled()) {
418 * If a specific SoC driver needs to change the
419 * default number of vectors, it needs to implement
420 * the set_num_vectors callback.
422 if (!pp
->ops
->set_num_vectors
) {
423 pp
->num_vectors
= MSI_DEF_NUM_VECTORS
;
425 pp
->ops
->set_num_vectors(pp
);
427 if (pp
->num_vectors
> MAX_MSI_IRQS
||
428 pp
->num_vectors
== 0) {
430 "Invalid number of vectors\n");
435 if (!pp
->ops
->msi_host_init
) {
436 pp
->msi_irq_chip
= &dw_pci_msi_bottom_irq_chip
;
438 ret
= dw_pcie_allocate_domains(pp
);
443 irq_set_chained_handler_and_data(pp
->msi_irq
,
447 ret
= pp
->ops
->msi_host_init(pp
);
453 if (pp
->ops
->host_init
) {
454 ret
= pp
->ops
->host_init(pp
);
459 ret
= dw_pcie_rd_own_conf(pp
, PCI_HEADER_TYPE
, 1, &hdr_type
);
460 if (ret
!= PCIBIOS_SUCCESSFUL
) {
461 dev_err(pci
->dev
, "Failed reading PCI_HEADER_TYPE cfg space reg (ret: 0x%x)\n",
463 ret
= pcibios_err_to_errno(ret
);
466 if (hdr_type
!= PCI_HEADER_TYPE_BRIDGE
) {
468 "PCIe controller is not set to bridge type (hdr_type: 0x%x)!\n",
474 pp
->root_bus_nr
= pp
->busn
->start
;
476 bridge
->dev
.parent
= dev
;
477 bridge
->sysdata
= pp
;
478 bridge
->busnr
= pp
->root_bus_nr
;
479 bridge
->ops
= &dw_pcie_ops
;
480 bridge
->map_irq
= of_irq_parse_and_map_pci
;
481 bridge
->swizzle_irq
= pci_common_swizzle
;
483 ret
= pci_scan_root_bus_bridge(bridge
);
487 pp
->root_bus
= bridge
->bus
;
489 if (pp
->ops
->scan_bus
)
490 pp
->ops
->scan_bus(pp
);
492 pci_bus_size_bridges(pp
->root_bus
);
493 pci_bus_assign_resources(pp
->root_bus
);
495 list_for_each_entry(child
, &pp
->root_bus
->children
, node
)
496 pcie_bus_configure_settings(child
);
498 pci_bus_add_devices(pp
->root_bus
);
502 if (pci_msi_enabled() && !pp
->ops
->msi_host_init
)
503 dw_pcie_free_msi(pp
);
506 EXPORT_SYMBOL_GPL(dw_pcie_host_init
);
508 void dw_pcie_host_deinit(struct pcie_port
*pp
)
510 pci_stop_root_bus(pp
->root_bus
);
511 pci_remove_root_bus(pp
->root_bus
);
512 if (pci_msi_enabled() && !pp
->ops
->msi_host_init
)
513 dw_pcie_free_msi(pp
);
515 EXPORT_SYMBOL_GPL(dw_pcie_host_deinit
);
517 static int dw_pcie_access_other_conf(struct pcie_port
*pp
, struct pci_bus
*bus
,
518 u32 devfn
, int where
, int size
, u32
*val
,
522 u32 busdev
, cfg_size
;
524 void __iomem
*va_cfg_base
;
525 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
527 busdev
= PCIE_ATU_BUS(bus
->number
) | PCIE_ATU_DEV(PCI_SLOT(devfn
)) |
528 PCIE_ATU_FUNC(PCI_FUNC(devfn
));
530 if (bus
->parent
->number
== pp
->root_bus_nr
) {
531 type
= PCIE_ATU_TYPE_CFG0
;
532 cpu_addr
= pp
->cfg0_base
;
533 cfg_size
= pp
->cfg0_size
;
534 va_cfg_base
= pp
->va_cfg0_base
;
536 type
= PCIE_ATU_TYPE_CFG1
;
537 cpu_addr
= pp
->cfg1_base
;
538 cfg_size
= pp
->cfg1_size
;
539 va_cfg_base
= pp
->va_cfg1_base
;
542 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX1
,
546 ret
= dw_pcie_write(va_cfg_base
+ where
, size
, *val
);
548 ret
= dw_pcie_read(va_cfg_base
+ where
, size
, val
);
550 if (pci
->num_viewport
<= 2)
551 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX1
,
552 PCIE_ATU_TYPE_IO
, pp
->io_base
,
553 pp
->io_bus_addr
, pp
->io_size
);
558 static int dw_pcie_rd_other_conf(struct pcie_port
*pp
, struct pci_bus
*bus
,
559 u32 devfn
, int where
, int size
, u32
*val
)
561 if (pp
->ops
->rd_other_conf
)
562 return pp
->ops
->rd_other_conf(pp
, bus
, devfn
, where
,
565 return dw_pcie_access_other_conf(pp
, bus
, devfn
, where
, size
, val
,
569 static int dw_pcie_wr_other_conf(struct pcie_port
*pp
, struct pci_bus
*bus
,
570 u32 devfn
, int where
, int size
, u32 val
)
572 if (pp
->ops
->wr_other_conf
)
573 return pp
->ops
->wr_other_conf(pp
, bus
, devfn
, where
,
576 return dw_pcie_access_other_conf(pp
, bus
, devfn
, where
, size
, &val
,
580 static int dw_pcie_valid_device(struct pcie_port
*pp
, struct pci_bus
*bus
,
583 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
585 /* If there is no link, then there is no device */
586 if (bus
->number
!= pp
->root_bus_nr
) {
587 if (!dw_pcie_link_up(pci
))
591 /* Access only one slot on each root port */
592 if (bus
->number
== pp
->root_bus_nr
&& dev
> 0)
598 static int dw_pcie_rd_conf(struct pci_bus
*bus
, u32 devfn
, int where
,
601 struct pcie_port
*pp
= bus
->sysdata
;
603 if (!dw_pcie_valid_device(pp
, bus
, PCI_SLOT(devfn
))) {
605 return PCIBIOS_DEVICE_NOT_FOUND
;
608 if (bus
->number
== pp
->root_bus_nr
)
609 return dw_pcie_rd_own_conf(pp
, where
, size
, val
);
611 return dw_pcie_rd_other_conf(pp
, bus
, devfn
, where
, size
, val
);
614 static int dw_pcie_wr_conf(struct pci_bus
*bus
, u32 devfn
,
615 int where
, int size
, u32 val
)
617 struct pcie_port
*pp
= bus
->sysdata
;
619 if (!dw_pcie_valid_device(pp
, bus
, PCI_SLOT(devfn
)))
620 return PCIBIOS_DEVICE_NOT_FOUND
;
622 if (bus
->number
== pp
->root_bus_nr
)
623 return dw_pcie_wr_own_conf(pp
, where
, size
, val
);
625 return dw_pcie_wr_other_conf(pp
, bus
, devfn
, where
, size
, val
);
628 static struct pci_ops dw_pcie_ops
= {
629 .read
= dw_pcie_rd_conf
,
630 .write
= dw_pcie_wr_conf
,
633 void dw_pcie_setup_rc(struct pcie_port
*pp
)
635 u32 val
, ctrl
, num_ctrls
;
636 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
639 * Enable DBI read-only registers for writing/updating configuration.
640 * Write permission gets disabled towards the end of this function.
642 dw_pcie_dbi_ro_wr_en(pci
);
646 if (!pp
->ops
->msi_host_init
) {
647 num_ctrls
= pp
->num_vectors
/ MAX_MSI_IRQS_PER_CTRL
;
649 /* Initialize IRQ Status array */
650 for (ctrl
= 0; ctrl
< num_ctrls
; ctrl
++) {
651 pp
->irq_mask
[ctrl
] = ~0;
652 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_MASK
+
653 (ctrl
* MSI_REG_CTRL_BLOCK_SIZE
),
654 4, pp
->irq_mask
[ctrl
]);
655 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_ENABLE
+
656 (ctrl
* MSI_REG_CTRL_BLOCK_SIZE
),
662 dw_pcie_writel_dbi(pci
, PCI_BASE_ADDRESS_0
, 0x00000004);
663 dw_pcie_writel_dbi(pci
, PCI_BASE_ADDRESS_1
, 0x00000000);
665 /* Setup interrupt pins */
666 val
= dw_pcie_readl_dbi(pci
, PCI_INTERRUPT_LINE
);
669 dw_pcie_writel_dbi(pci
, PCI_INTERRUPT_LINE
, val
);
671 /* Setup bus numbers */
672 val
= dw_pcie_readl_dbi(pci
, PCI_PRIMARY_BUS
);
675 dw_pcie_writel_dbi(pci
, PCI_PRIMARY_BUS
, val
);
677 /* Setup command register */
678 val
= dw_pcie_readl_dbi(pci
, PCI_COMMAND
);
680 val
|= PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
|
681 PCI_COMMAND_MASTER
| PCI_COMMAND_SERR
;
682 dw_pcie_writel_dbi(pci
, PCI_COMMAND
, val
);
685 * If the platform provides ->rd_other_conf, it means the platform
686 * uses its own address translation component rather than ATU, so
687 * we should not program the ATU here.
689 if (!pp
->ops
->rd_other_conf
) {
690 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX0
,
691 PCIE_ATU_TYPE_MEM
, pp
->mem_base
,
692 pp
->mem_bus_addr
, pp
->mem_size
);
693 if (pci
->num_viewport
> 2)
694 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX2
,
695 PCIE_ATU_TYPE_IO
, pp
->io_base
,
696 pp
->io_bus_addr
, pp
->io_size
);
699 dw_pcie_wr_own_conf(pp
, PCI_BASE_ADDRESS_0
, 4, 0);
701 /* Program correct class for RC */
702 dw_pcie_wr_own_conf(pp
, PCI_CLASS_DEVICE
, 2, PCI_CLASS_BRIDGE_PCI
);
704 dw_pcie_rd_own_conf(pp
, PCIE_LINK_WIDTH_SPEED_CONTROL
, 4, &val
);
705 val
|= PORT_LOGIC_SPEED_CHANGE
;
706 dw_pcie_wr_own_conf(pp
, PCIE_LINK_WIDTH_SPEED_CONTROL
, 4, val
);
708 dw_pcie_dbi_ro_wr_dis(pci
);
710 EXPORT_SYMBOL_GPL(dw_pcie_setup_rc
);