1 // SPDX-License-Identifier: GPL-2.0
3 * Synopsys DesignWare PCIe host controller driver
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com
8 * Author: Jingoo Han <jg1.han@samsung.com>
11 #include <linux/irqchip/chained_irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/of_address.h>
14 #include <linux/of_pci.h>
15 #include <linux/pci_regs.h>
16 #include <linux/platform_device.h>
18 #include "../../pci.h"
19 #include "pcie-designware.h"
21 static struct pci_ops dw_pcie_ops
;
23 static int dw_pcie_rd_own_conf(struct pcie_port
*pp
, int where
, int size
,
28 if (pp
->ops
->rd_own_conf
)
29 return pp
->ops
->rd_own_conf(pp
, where
, size
, val
);
31 pci
= to_dw_pcie_from_pp(pp
);
32 return dw_pcie_read(pci
->dbi_base
+ where
, size
, val
);
35 static int dw_pcie_wr_own_conf(struct pcie_port
*pp
, int where
, int size
,
40 if (pp
->ops
->wr_own_conf
)
41 return pp
->ops
->wr_own_conf(pp
, where
, size
, val
);
43 pci
= to_dw_pcie_from_pp(pp
);
44 return dw_pcie_write(pci
->dbi_base
+ where
, size
, val
);
47 static void dw_msi_ack_irq(struct irq_data
*d
)
49 irq_chip_ack_parent(d
);
52 static void dw_msi_mask_irq(struct irq_data
*d
)
55 irq_chip_mask_parent(d
);
58 static void dw_msi_unmask_irq(struct irq_data
*d
)
60 pci_msi_unmask_irq(d
);
61 irq_chip_unmask_parent(d
);
64 static struct irq_chip dw_pcie_msi_irq_chip
= {
66 .irq_ack
= dw_msi_ack_irq
,
67 .irq_mask
= dw_msi_mask_irq
,
68 .irq_unmask
= dw_msi_unmask_irq
,
71 static struct msi_domain_info dw_pcie_msi_domain_info
= {
72 .flags
= (MSI_FLAG_USE_DEF_DOM_OPS
| MSI_FLAG_USE_DEF_CHIP_OPS
|
73 MSI_FLAG_PCI_MSIX
| MSI_FLAG_MULTI_PCI_MSI
),
74 .chip
= &dw_pcie_msi_irq_chip
,
78 irqreturn_t
dw_handle_msi_irq(struct pcie_port
*pp
)
82 irqreturn_t ret
= IRQ_NONE
;
84 num_ctrls
= pp
->num_vectors
/ MAX_MSI_IRQS_PER_CTRL
;
86 for (i
= 0; i
< num_ctrls
; i
++) {
87 dw_pcie_rd_own_conf(pp
, PCIE_MSI_INTR0_STATUS
+
88 (i
* MSI_REG_CTRL_BLOCK_SIZE
),
95 while ((pos
= find_next_bit((unsigned long *) &val
,
96 MAX_MSI_IRQS_PER_CTRL
,
97 pos
)) != MAX_MSI_IRQS_PER_CTRL
) {
98 irq
= irq_find_mapping(pp
->irq_domain
,
99 (i
* MAX_MSI_IRQS_PER_CTRL
) +
101 generic_handle_irq(irq
);
109 /* Chained MSI interrupt service routine */
110 static void dw_chained_msi_isr(struct irq_desc
*desc
)
112 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
113 struct pcie_port
*pp
;
115 chained_irq_enter(chip
, desc
);
117 pp
= irq_desc_get_handler_data(desc
);
118 dw_handle_msi_irq(pp
);
120 chained_irq_exit(chip
, desc
);
123 static void dw_pci_setup_msi_msg(struct irq_data
*d
, struct msi_msg
*msg
)
125 struct pcie_port
*pp
= irq_data_get_irq_chip_data(d
);
126 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
129 if (pp
->ops
->get_msi_addr
)
130 msi_target
= pp
->ops
->get_msi_addr(pp
);
132 msi_target
= (u64
)pp
->msi_data
;
134 msg
->address_lo
= lower_32_bits(msi_target
);
135 msg
->address_hi
= upper_32_bits(msi_target
);
137 if (pp
->ops
->get_msi_data
)
138 msg
->data
= pp
->ops
->get_msi_data(pp
, d
->hwirq
);
140 msg
->data
= d
->hwirq
;
142 dev_dbg(pci
->dev
, "msi#%d address_hi %#x address_lo %#x\n",
143 (int)d
->hwirq
, msg
->address_hi
, msg
->address_lo
);
146 static int dw_pci_msi_set_affinity(struct irq_data
*d
,
147 const struct cpumask
*mask
, bool force
)
152 static void dw_pci_bottom_mask(struct irq_data
*d
)
154 struct pcie_port
*pp
= irq_data_get_irq_chip_data(d
);
155 unsigned int res
, bit
, ctrl
;
158 raw_spin_lock_irqsave(&pp
->lock
, flags
);
160 if (pp
->ops
->msi_clear_irq
) {
161 pp
->ops
->msi_clear_irq(pp
, d
->hwirq
);
163 ctrl
= d
->hwirq
/ MAX_MSI_IRQS_PER_CTRL
;
164 res
= ctrl
* MSI_REG_CTRL_BLOCK_SIZE
;
165 bit
= d
->hwirq
% MAX_MSI_IRQS_PER_CTRL
;
167 pp
->irq_mask
[ctrl
] |= BIT(bit
);
168 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_MASK
+ res
, 4,
172 raw_spin_unlock_irqrestore(&pp
->lock
, flags
);
175 static void dw_pci_bottom_unmask(struct irq_data
*d
)
177 struct pcie_port
*pp
= irq_data_get_irq_chip_data(d
);
178 unsigned int res
, bit
, ctrl
;
181 raw_spin_lock_irqsave(&pp
->lock
, flags
);
183 if (pp
->ops
->msi_set_irq
) {
184 pp
->ops
->msi_set_irq(pp
, d
->hwirq
);
186 ctrl
= d
->hwirq
/ MAX_MSI_IRQS_PER_CTRL
;
187 res
= ctrl
* MSI_REG_CTRL_BLOCK_SIZE
;
188 bit
= d
->hwirq
% MAX_MSI_IRQS_PER_CTRL
;
190 pp
->irq_mask
[ctrl
] &= ~BIT(bit
);
191 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_MASK
+ res
, 4,
195 raw_spin_unlock_irqrestore(&pp
->lock
, flags
);
198 static void dw_pci_bottom_ack(struct irq_data
*d
)
200 struct pcie_port
*pp
= irq_data_get_irq_chip_data(d
);
201 unsigned int res
, bit
, ctrl
;
204 ctrl
= d
->hwirq
/ MAX_MSI_IRQS_PER_CTRL
;
205 res
= ctrl
* MSI_REG_CTRL_BLOCK_SIZE
;
206 bit
= d
->hwirq
% MAX_MSI_IRQS_PER_CTRL
;
208 raw_spin_lock_irqsave(&pp
->lock
, flags
);
210 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_STATUS
+ res
, 4, BIT(bit
));
212 if (pp
->ops
->msi_irq_ack
)
213 pp
->ops
->msi_irq_ack(d
->hwirq
, pp
);
215 raw_spin_unlock_irqrestore(&pp
->lock
, flags
);
218 static struct irq_chip dw_pci_msi_bottom_irq_chip
= {
220 .irq_ack
= dw_pci_bottom_ack
,
221 .irq_compose_msi_msg
= dw_pci_setup_msi_msg
,
222 .irq_set_affinity
= dw_pci_msi_set_affinity
,
223 .irq_mask
= dw_pci_bottom_mask
,
224 .irq_unmask
= dw_pci_bottom_unmask
,
227 static int dw_pcie_irq_domain_alloc(struct irq_domain
*domain
,
228 unsigned int virq
, unsigned int nr_irqs
,
231 struct pcie_port
*pp
= domain
->host_data
;
236 raw_spin_lock_irqsave(&pp
->lock
, flags
);
238 bit
= bitmap_find_free_region(pp
->msi_irq_in_use
, pp
->num_vectors
,
239 order_base_2(nr_irqs
));
241 raw_spin_unlock_irqrestore(&pp
->lock
, flags
);
246 for (i
= 0; i
< nr_irqs
; i
++)
247 irq_domain_set_info(domain
, virq
+ i
, bit
+ i
,
248 &dw_pci_msi_bottom_irq_chip
,
255 static void dw_pcie_irq_domain_free(struct irq_domain
*domain
,
256 unsigned int virq
, unsigned int nr_irqs
)
258 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
);
259 struct pcie_port
*pp
= irq_data_get_irq_chip_data(d
);
262 raw_spin_lock_irqsave(&pp
->lock
, flags
);
264 bitmap_release_region(pp
->msi_irq_in_use
, d
->hwirq
,
265 order_base_2(nr_irqs
));
267 raw_spin_unlock_irqrestore(&pp
->lock
, flags
);
270 static const struct irq_domain_ops dw_pcie_msi_domain_ops
= {
271 .alloc
= dw_pcie_irq_domain_alloc
,
272 .free
= dw_pcie_irq_domain_free
,
275 int dw_pcie_allocate_domains(struct pcie_port
*pp
)
277 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
278 struct fwnode_handle
*fwnode
= of_node_to_fwnode(pci
->dev
->of_node
);
280 pp
->irq_domain
= irq_domain_create_linear(fwnode
, pp
->num_vectors
,
281 &dw_pcie_msi_domain_ops
, pp
);
282 if (!pp
->irq_domain
) {
283 dev_err(pci
->dev
, "Failed to create IRQ domain\n");
287 pp
->msi_domain
= pci_msi_create_irq_domain(fwnode
,
288 &dw_pcie_msi_domain_info
,
290 if (!pp
->msi_domain
) {
291 dev_err(pci
->dev
, "Failed to create MSI domain\n");
292 irq_domain_remove(pp
->irq_domain
);
299 void dw_pcie_free_msi(struct pcie_port
*pp
)
301 irq_set_chained_handler(pp
->msi_irq
, NULL
);
302 irq_set_handler_data(pp
->msi_irq
, NULL
);
304 irq_domain_remove(pp
->msi_domain
);
305 irq_domain_remove(pp
->irq_domain
);
308 void dw_pcie_msi_init(struct pcie_port
*pp
)
310 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
311 struct device
*dev
= pci
->dev
;
315 page
= alloc_page(GFP_KERNEL
);
316 pp
->msi_data
= dma_map_page(dev
, page
, 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
317 if (dma_mapping_error(dev
, pp
->msi_data
)) {
318 dev_err(dev
, "Failed to map MSI data\n");
322 msi_target
= (u64
)pp
->msi_data
;
324 /* Program the msi_data */
325 dw_pcie_wr_own_conf(pp
, PCIE_MSI_ADDR_LO
, 4,
326 lower_32_bits(msi_target
));
327 dw_pcie_wr_own_conf(pp
, PCIE_MSI_ADDR_HI
, 4,
328 upper_32_bits(msi_target
));
331 int dw_pcie_host_init(struct pcie_port
*pp
)
333 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
334 struct device
*dev
= pci
->dev
;
335 struct device_node
*np
= dev
->of_node
;
336 struct platform_device
*pdev
= to_platform_device(dev
);
337 struct resource_entry
*win
, *tmp
;
338 struct pci_bus
*bus
, *child
;
339 struct pci_host_bridge
*bridge
;
340 struct resource
*cfg_res
;
343 raw_spin_lock_init(&pci
->pp
.lock
);
345 cfg_res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "config");
347 pp
->cfg0_size
= resource_size(cfg_res
) >> 1;
348 pp
->cfg1_size
= resource_size(cfg_res
) >> 1;
349 pp
->cfg0_base
= cfg_res
->start
;
350 pp
->cfg1_base
= cfg_res
->start
+ pp
->cfg0_size
;
351 } else if (!pp
->va_cfg0_base
) {
352 dev_err(dev
, "Missing *config* reg space\n");
355 bridge
= pci_alloc_host_bridge(0);
359 ret
= devm_of_pci_get_host_bridge_resources(dev
, 0, 0xff,
360 &bridge
->windows
, &pp
->io_base
);
364 ret
= devm_request_pci_bus_resources(dev
, &bridge
->windows
);
368 /* Get the I/O and memory ranges from DT */
369 resource_list_for_each_entry_safe(win
, tmp
, &bridge
->windows
) {
370 switch (resource_type(win
->res
)) {
372 ret
= devm_pci_remap_iospace(dev
, win
->res
,
375 dev_warn(dev
, "Error %d: failed to map resource %pR\n",
377 resource_list_destroy_entry(win
);
380 pp
->io
->name
= "I/O";
381 pp
->io_size
= resource_size(pp
->io
);
382 pp
->io_bus_addr
= pp
->io
->start
- win
->offset
;
387 pp
->mem
->name
= "MEM";
388 pp
->mem_size
= resource_size(pp
->mem
);
389 pp
->mem_bus_addr
= pp
->mem
->start
- win
->offset
;
393 pp
->cfg0_size
= resource_size(pp
->cfg
) >> 1;
394 pp
->cfg1_size
= resource_size(pp
->cfg
) >> 1;
395 pp
->cfg0_base
= pp
->cfg
->start
;
396 pp
->cfg1_base
= pp
->cfg
->start
+ pp
->cfg0_size
;
404 if (!pci
->dbi_base
) {
405 pci
->dbi_base
= devm_pci_remap_cfgspace(dev
,
407 resource_size(pp
->cfg
));
408 if (!pci
->dbi_base
) {
409 dev_err(dev
, "Error with ioremap\n");
415 pp
->mem_base
= pp
->mem
->start
;
417 if (!pp
->va_cfg0_base
) {
418 pp
->va_cfg0_base
= devm_pci_remap_cfgspace(dev
,
419 pp
->cfg0_base
, pp
->cfg0_size
);
420 if (!pp
->va_cfg0_base
) {
421 dev_err(dev
, "Error with ioremap in function\n");
427 if (!pp
->va_cfg1_base
) {
428 pp
->va_cfg1_base
= devm_pci_remap_cfgspace(dev
,
431 if (!pp
->va_cfg1_base
) {
432 dev_err(dev
, "Error with ioremap\n");
438 ret
= of_property_read_u32(np
, "num-viewport", &pci
->num_viewport
);
440 pci
->num_viewport
= 2;
442 if (IS_ENABLED(CONFIG_PCI_MSI
) && pci_msi_enabled()) {
444 * If a specific SoC driver needs to change the
445 * default number of vectors, it needs to implement
446 * the set_num_vectors callback.
448 if (!pp
->ops
->set_num_vectors
) {
449 pp
->num_vectors
= MSI_DEF_NUM_VECTORS
;
451 pp
->ops
->set_num_vectors(pp
);
453 if (pp
->num_vectors
> MAX_MSI_IRQS
||
454 pp
->num_vectors
== 0) {
456 "Invalid number of vectors\n");
461 if (!pp
->ops
->msi_host_init
) {
462 ret
= dw_pcie_allocate_domains(pp
);
467 irq_set_chained_handler_and_data(pp
->msi_irq
,
471 ret
= pp
->ops
->msi_host_init(pp
);
477 if (pp
->ops
->host_init
) {
478 ret
= pp
->ops
->host_init(pp
);
483 pp
->root_bus_nr
= pp
->busn
->start
;
485 bridge
->dev
.parent
= dev
;
486 bridge
->sysdata
= pp
;
487 bridge
->busnr
= pp
->root_bus_nr
;
488 bridge
->ops
= &dw_pcie_ops
;
489 bridge
->map_irq
= of_irq_parse_and_map_pci
;
490 bridge
->swizzle_irq
= pci_common_swizzle
;
492 ret
= pci_scan_root_bus_bridge(bridge
);
498 if (pp
->ops
->scan_bus
)
499 pp
->ops
->scan_bus(pp
);
501 pci_bus_size_bridges(bus
);
502 pci_bus_assign_resources(bus
);
504 list_for_each_entry(child
, &bus
->children
, node
)
505 pcie_bus_configure_settings(child
);
507 pci_bus_add_devices(bus
);
511 pci_free_host_bridge(bridge
);
515 static int dw_pcie_access_other_conf(struct pcie_port
*pp
, struct pci_bus
*bus
,
516 u32 devfn
, int where
, int size
, u32
*val
,
520 u32 busdev
, cfg_size
;
522 void __iomem
*va_cfg_base
;
523 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
525 busdev
= PCIE_ATU_BUS(bus
->number
) | PCIE_ATU_DEV(PCI_SLOT(devfn
)) |
526 PCIE_ATU_FUNC(PCI_FUNC(devfn
));
528 if (bus
->parent
->number
== pp
->root_bus_nr
) {
529 type
= PCIE_ATU_TYPE_CFG0
;
530 cpu_addr
= pp
->cfg0_base
;
531 cfg_size
= pp
->cfg0_size
;
532 va_cfg_base
= pp
->va_cfg0_base
;
534 type
= PCIE_ATU_TYPE_CFG1
;
535 cpu_addr
= pp
->cfg1_base
;
536 cfg_size
= pp
->cfg1_size
;
537 va_cfg_base
= pp
->va_cfg1_base
;
540 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX1
,
544 ret
= dw_pcie_write(va_cfg_base
+ where
, size
, *val
);
546 ret
= dw_pcie_read(va_cfg_base
+ where
, size
, val
);
548 if (pci
->num_viewport
<= 2)
549 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX1
,
550 PCIE_ATU_TYPE_IO
, pp
->io_base
,
551 pp
->io_bus_addr
, pp
->io_size
);
556 static int dw_pcie_rd_other_conf(struct pcie_port
*pp
, struct pci_bus
*bus
,
557 u32 devfn
, int where
, int size
, u32
*val
)
559 if (pp
->ops
->rd_other_conf
)
560 return pp
->ops
->rd_other_conf(pp
, bus
, devfn
, where
,
563 return dw_pcie_access_other_conf(pp
, bus
, devfn
, where
, size
, val
,
567 static int dw_pcie_wr_other_conf(struct pcie_port
*pp
, struct pci_bus
*bus
,
568 u32 devfn
, int where
, int size
, u32 val
)
570 if (pp
->ops
->wr_other_conf
)
571 return pp
->ops
->wr_other_conf(pp
, bus
, devfn
, where
,
574 return dw_pcie_access_other_conf(pp
, bus
, devfn
, where
, size
, &val
,
578 static int dw_pcie_valid_device(struct pcie_port
*pp
, struct pci_bus
*bus
,
581 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
583 /* If there is no link, then there is no device */
584 if (bus
->number
!= pp
->root_bus_nr
) {
585 if (!dw_pcie_link_up(pci
))
589 /* Access only one slot on each root port */
590 if (bus
->number
== pp
->root_bus_nr
&& dev
> 0)
596 static int dw_pcie_rd_conf(struct pci_bus
*bus
, u32 devfn
, int where
,
599 struct pcie_port
*pp
= bus
->sysdata
;
601 if (!dw_pcie_valid_device(pp
, bus
, PCI_SLOT(devfn
))) {
603 return PCIBIOS_DEVICE_NOT_FOUND
;
606 if (bus
->number
== pp
->root_bus_nr
)
607 return dw_pcie_rd_own_conf(pp
, where
, size
, val
);
609 return dw_pcie_rd_other_conf(pp
, bus
, devfn
, where
, size
, val
);
612 static int dw_pcie_wr_conf(struct pci_bus
*bus
, u32 devfn
,
613 int where
, int size
, u32 val
)
615 struct pcie_port
*pp
= bus
->sysdata
;
617 if (!dw_pcie_valid_device(pp
, bus
, PCI_SLOT(devfn
)))
618 return PCIBIOS_DEVICE_NOT_FOUND
;
620 if (bus
->number
== pp
->root_bus_nr
)
621 return dw_pcie_wr_own_conf(pp
, where
, size
, val
);
623 return dw_pcie_wr_other_conf(pp
, bus
, devfn
, where
, size
, val
);
626 static struct pci_ops dw_pcie_ops
= {
627 .read
= dw_pcie_rd_conf
,
628 .write
= dw_pcie_wr_conf
,
631 static u8
dw_pcie_iatu_unroll_enabled(struct dw_pcie
*pci
)
635 val
= dw_pcie_readl_dbi(pci
, PCIE_ATU_VIEWPORT
);
636 if (val
== 0xffffffff)
642 void dw_pcie_setup_rc(struct pcie_port
*pp
)
644 u32 val
, ctrl
, num_ctrls
;
645 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
649 num_ctrls
= pp
->num_vectors
/ MAX_MSI_IRQS_PER_CTRL
;
651 /* Initialize IRQ Status array */
652 for (ctrl
= 0; ctrl
< num_ctrls
; ctrl
++) {
653 pp
->irq_mask
[ctrl
] = ~0;
654 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_MASK
+
655 (ctrl
* MSI_REG_CTRL_BLOCK_SIZE
),
656 4, pp
->irq_mask
[ctrl
]);
657 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_ENABLE
+
658 (ctrl
* MSI_REG_CTRL_BLOCK_SIZE
),
663 dw_pcie_writel_dbi(pci
, PCI_BASE_ADDRESS_0
, 0x00000004);
664 dw_pcie_writel_dbi(pci
, PCI_BASE_ADDRESS_1
, 0x00000000);
666 /* Setup interrupt pins */
667 dw_pcie_dbi_ro_wr_en(pci
);
668 val
= dw_pcie_readl_dbi(pci
, PCI_INTERRUPT_LINE
);
671 dw_pcie_writel_dbi(pci
, PCI_INTERRUPT_LINE
, val
);
672 dw_pcie_dbi_ro_wr_dis(pci
);
674 /* Setup bus numbers */
675 val
= dw_pcie_readl_dbi(pci
, PCI_PRIMARY_BUS
);
678 dw_pcie_writel_dbi(pci
, PCI_PRIMARY_BUS
, val
);
680 /* Setup command register */
681 val
= dw_pcie_readl_dbi(pci
, PCI_COMMAND
);
683 val
|= PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
|
684 PCI_COMMAND_MASTER
| PCI_COMMAND_SERR
;
685 dw_pcie_writel_dbi(pci
, PCI_COMMAND
, val
);
688 * If the platform provides ->rd_other_conf, it means the platform
689 * uses its own address translation component rather than ATU, so
690 * we should not program the ATU here.
692 if (!pp
->ops
->rd_other_conf
) {
693 /* Get iATU unroll support */
694 pci
->iatu_unroll_enabled
= dw_pcie_iatu_unroll_enabled(pci
);
695 dev_dbg(pci
->dev
, "iATU unroll: %s\n",
696 pci
->iatu_unroll_enabled
? "enabled" : "disabled");
698 if (pci
->iatu_unroll_enabled
&& !pci
->atu_base
)
699 pci
->atu_base
= pci
->dbi_base
+ DEFAULT_DBI_ATU_OFFSET
;
701 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX0
,
702 PCIE_ATU_TYPE_MEM
, pp
->mem_base
,
703 pp
->mem_bus_addr
, pp
->mem_size
);
704 if (pci
->num_viewport
> 2)
705 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX2
,
706 PCIE_ATU_TYPE_IO
, pp
->io_base
,
707 pp
->io_bus_addr
, pp
->io_size
);
710 dw_pcie_wr_own_conf(pp
, PCI_BASE_ADDRESS_0
, 4, 0);
712 /* Enable write permission for the DBI read-only register */
713 dw_pcie_dbi_ro_wr_en(pci
);
714 /* Program correct class for RC */
715 dw_pcie_wr_own_conf(pp
, PCI_CLASS_DEVICE
, 2, PCI_CLASS_BRIDGE_PCI
);
716 /* Better disable write permission right after the update */
717 dw_pcie_dbi_ro_wr_dis(pci
);
719 dw_pcie_rd_own_conf(pp
, PCIE_LINK_WIDTH_SPEED_CONTROL
, 4, &val
);
720 val
|= PORT_LOGIC_SPEED_CHANGE
;
721 dw_pcie_wr_own_conf(pp
, PCIE_LINK_WIDTH_SPEED_CONTROL
, 4, val
);