1 // SPDX-License-Identifier: GPL-2.0
3 * Synopsys DesignWare PCIe host controller driver
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com
8 * Author: Jingoo Han <jg1.han@samsung.com>
11 #include <linux/irqchip/chained_irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/of_address.h>
14 #include <linux/of_pci.h>
15 #include <linux/pci_regs.h>
16 #include <linux/platform_device.h>
18 #include "../../pci.h"
19 #include "pcie-designware.h"
21 static struct pci_ops dw_pcie_ops
;
23 static int dw_pcie_rd_own_conf(struct pcie_port
*pp
, int where
, int size
,
28 if (pp
->ops
->rd_own_conf
)
29 return pp
->ops
->rd_own_conf(pp
, where
, size
, val
);
31 pci
= to_dw_pcie_from_pp(pp
);
32 return dw_pcie_read(pci
->dbi_base
+ where
, size
, val
);
35 static int dw_pcie_wr_own_conf(struct pcie_port
*pp
, int where
, int size
,
40 if (pp
->ops
->wr_own_conf
)
41 return pp
->ops
->wr_own_conf(pp
, where
, size
, val
);
43 pci
= to_dw_pcie_from_pp(pp
);
44 return dw_pcie_write(pci
->dbi_base
+ where
, size
, val
);
47 static void dw_msi_ack_irq(struct irq_data
*d
)
49 irq_chip_ack_parent(d
);
52 static void dw_msi_mask_irq(struct irq_data
*d
)
55 irq_chip_mask_parent(d
);
58 static void dw_msi_unmask_irq(struct irq_data
*d
)
60 pci_msi_unmask_irq(d
);
61 irq_chip_unmask_parent(d
);
64 static struct irq_chip dw_pcie_msi_irq_chip
= {
66 .irq_ack
= dw_msi_ack_irq
,
67 .irq_mask
= dw_msi_mask_irq
,
68 .irq_unmask
= dw_msi_unmask_irq
,
71 static struct msi_domain_info dw_pcie_msi_domain_info
= {
72 .flags
= (MSI_FLAG_USE_DEF_DOM_OPS
| MSI_FLAG_USE_DEF_CHIP_OPS
|
73 MSI_FLAG_PCI_MSIX
| MSI_FLAG_MULTI_PCI_MSI
),
74 .chip
= &dw_pcie_msi_irq_chip
,
78 irqreturn_t
dw_handle_msi_irq(struct pcie_port
*pp
)
82 irqreturn_t ret
= IRQ_NONE
;
84 num_ctrls
= pp
->num_vectors
/ MAX_MSI_IRQS_PER_CTRL
;
86 for (i
= 0; i
< num_ctrls
; i
++) {
87 dw_pcie_rd_own_conf(pp
, PCIE_MSI_INTR0_STATUS
+
88 (i
* MSI_REG_CTRL_BLOCK_SIZE
),
95 while ((pos
= find_next_bit((unsigned long *) &val
,
96 MAX_MSI_IRQS_PER_CTRL
,
97 pos
)) != MAX_MSI_IRQS_PER_CTRL
) {
98 irq
= irq_find_mapping(pp
->irq_domain
,
99 (i
* MAX_MSI_IRQS_PER_CTRL
) +
101 generic_handle_irq(irq
);
102 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_STATUS
+
103 (i
* MSI_REG_CTRL_BLOCK_SIZE
),
112 /* Chained MSI interrupt service routine */
113 static void dw_chained_msi_isr(struct irq_desc
*desc
)
115 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
116 struct pcie_port
*pp
;
118 chained_irq_enter(chip
, desc
);
120 pp
= irq_desc_get_handler_data(desc
);
121 dw_handle_msi_irq(pp
);
123 chained_irq_exit(chip
, desc
);
126 static void dw_pci_setup_msi_msg(struct irq_data
*data
, struct msi_msg
*msg
)
128 struct pcie_port
*pp
= irq_data_get_irq_chip_data(data
);
129 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
132 if (pp
->ops
->get_msi_addr
)
133 msi_target
= pp
->ops
->get_msi_addr(pp
);
135 msi_target
= (u64
)pp
->msi_data
;
137 msg
->address_lo
= lower_32_bits(msi_target
);
138 msg
->address_hi
= upper_32_bits(msi_target
);
140 if (pp
->ops
->get_msi_data
)
141 msg
->data
= pp
->ops
->get_msi_data(pp
, data
->hwirq
);
143 msg
->data
= data
->hwirq
;
145 dev_dbg(pci
->dev
, "msi#%d address_hi %#x address_lo %#x\n",
146 (int)data
->hwirq
, msg
->address_hi
, msg
->address_lo
);
149 static int dw_pci_msi_set_affinity(struct irq_data
*irq_data
,
150 const struct cpumask
*mask
, bool force
)
155 static void dw_pci_bottom_mask(struct irq_data
*data
)
157 struct pcie_port
*pp
= irq_data_get_irq_chip_data(data
);
158 unsigned int res
, bit
, ctrl
;
161 raw_spin_lock_irqsave(&pp
->lock
, flags
);
163 if (pp
->ops
->msi_clear_irq
) {
164 pp
->ops
->msi_clear_irq(pp
, data
->hwirq
);
166 ctrl
= data
->hwirq
/ MAX_MSI_IRQS_PER_CTRL
;
167 res
= ctrl
* MSI_REG_CTRL_BLOCK_SIZE
;
168 bit
= data
->hwirq
% MAX_MSI_IRQS_PER_CTRL
;
170 pp
->irq_status
[ctrl
] &= ~(1 << bit
);
171 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_ENABLE
+ res
, 4,
172 pp
->irq_status
[ctrl
]);
175 raw_spin_unlock_irqrestore(&pp
->lock
, flags
);
178 static void dw_pci_bottom_unmask(struct irq_data
*data
)
180 struct pcie_port
*pp
= irq_data_get_irq_chip_data(data
);
181 unsigned int res
, bit
, ctrl
;
184 raw_spin_lock_irqsave(&pp
->lock
, flags
);
186 if (pp
->ops
->msi_set_irq
) {
187 pp
->ops
->msi_set_irq(pp
, data
->hwirq
);
189 ctrl
= data
->hwirq
/ MAX_MSI_IRQS_PER_CTRL
;
190 res
= ctrl
* MSI_REG_CTRL_BLOCK_SIZE
;
191 bit
= data
->hwirq
% MAX_MSI_IRQS_PER_CTRL
;
193 pp
->irq_status
[ctrl
] |= 1 << bit
;
194 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_ENABLE
+ res
, 4,
195 pp
->irq_status
[ctrl
]);
198 raw_spin_unlock_irqrestore(&pp
->lock
, flags
);
201 static void dw_pci_bottom_ack(struct irq_data
*d
)
203 struct msi_desc
*msi
= irq_data_get_msi_desc(d
);
204 struct pcie_port
*pp
;
206 pp
= msi_desc_to_pci_sysdata(msi
);
208 if (pp
->ops
->msi_irq_ack
)
209 pp
->ops
->msi_irq_ack(d
->hwirq
, pp
);
212 static struct irq_chip dw_pci_msi_bottom_irq_chip
= {
214 .irq_ack
= dw_pci_bottom_ack
,
215 .irq_compose_msi_msg
= dw_pci_setup_msi_msg
,
216 .irq_set_affinity
= dw_pci_msi_set_affinity
,
217 .irq_mask
= dw_pci_bottom_mask
,
218 .irq_unmask
= dw_pci_bottom_unmask
,
221 static int dw_pcie_irq_domain_alloc(struct irq_domain
*domain
,
222 unsigned int virq
, unsigned int nr_irqs
,
225 struct pcie_port
*pp
= domain
->host_data
;
230 raw_spin_lock_irqsave(&pp
->lock
, flags
);
232 bit
= bitmap_find_free_region(pp
->msi_irq_in_use
, pp
->num_vectors
,
233 order_base_2(nr_irqs
));
235 raw_spin_unlock_irqrestore(&pp
->lock
, flags
);
240 for (i
= 0; i
< nr_irqs
; i
++)
241 irq_domain_set_info(domain
, virq
+ i
, bit
+ i
,
242 &dw_pci_msi_bottom_irq_chip
,
249 static void dw_pcie_irq_domain_free(struct irq_domain
*domain
,
250 unsigned int virq
, unsigned int nr_irqs
)
252 struct irq_data
*data
= irq_domain_get_irq_data(domain
, virq
);
253 struct pcie_port
*pp
= irq_data_get_irq_chip_data(data
);
256 raw_spin_lock_irqsave(&pp
->lock
, flags
);
258 bitmap_release_region(pp
->msi_irq_in_use
, data
->hwirq
,
259 order_base_2(nr_irqs
));
261 raw_spin_unlock_irqrestore(&pp
->lock
, flags
);
264 static const struct irq_domain_ops dw_pcie_msi_domain_ops
= {
265 .alloc
= dw_pcie_irq_domain_alloc
,
266 .free
= dw_pcie_irq_domain_free
,
269 int dw_pcie_allocate_domains(struct pcie_port
*pp
)
271 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
272 struct fwnode_handle
*fwnode
= of_node_to_fwnode(pci
->dev
->of_node
);
274 pp
->irq_domain
= irq_domain_create_linear(fwnode
, pp
->num_vectors
,
275 &dw_pcie_msi_domain_ops
, pp
);
276 if (!pp
->irq_domain
) {
277 dev_err(pci
->dev
, "Failed to create IRQ domain\n");
281 pp
->msi_domain
= pci_msi_create_irq_domain(fwnode
,
282 &dw_pcie_msi_domain_info
,
284 if (!pp
->msi_domain
) {
285 dev_err(pci
->dev
, "Failed to create MSI domain\n");
286 irq_domain_remove(pp
->irq_domain
);
293 void dw_pcie_free_msi(struct pcie_port
*pp
)
295 irq_set_chained_handler(pp
->msi_irq
, NULL
);
296 irq_set_handler_data(pp
->msi_irq
, NULL
);
298 irq_domain_remove(pp
->msi_domain
);
299 irq_domain_remove(pp
->irq_domain
);
302 void dw_pcie_msi_init(struct pcie_port
*pp
)
304 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
305 struct device
*dev
= pci
->dev
;
309 page
= alloc_page(GFP_KERNEL
);
310 pp
->msi_data
= dma_map_page(dev
, page
, 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
311 if (dma_mapping_error(dev
, pp
->msi_data
)) {
312 dev_err(dev
, "Failed to map MSI data\n");
316 msi_target
= (u64
)pp
->msi_data
;
318 /* Program the msi_data */
319 dw_pcie_wr_own_conf(pp
, PCIE_MSI_ADDR_LO
, 4,
320 lower_32_bits(msi_target
));
321 dw_pcie_wr_own_conf(pp
, PCIE_MSI_ADDR_HI
, 4,
322 upper_32_bits(msi_target
));
325 int dw_pcie_host_init(struct pcie_port
*pp
)
327 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
328 struct device
*dev
= pci
->dev
;
329 struct device_node
*np
= dev
->of_node
;
330 struct platform_device
*pdev
= to_platform_device(dev
);
331 struct resource_entry
*win
, *tmp
;
332 struct pci_bus
*bus
, *child
;
333 struct pci_host_bridge
*bridge
;
334 struct resource
*cfg_res
;
337 raw_spin_lock_init(&pci
->pp
.lock
);
339 cfg_res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "config");
341 pp
->cfg0_size
= resource_size(cfg_res
) >> 1;
342 pp
->cfg1_size
= resource_size(cfg_res
) >> 1;
343 pp
->cfg0_base
= cfg_res
->start
;
344 pp
->cfg1_base
= cfg_res
->start
+ pp
->cfg0_size
;
345 } else if (!pp
->va_cfg0_base
) {
346 dev_err(dev
, "Missing *config* reg space\n");
349 bridge
= pci_alloc_host_bridge(0);
353 ret
= devm_of_pci_get_host_bridge_resources(dev
, 0, 0xff,
354 &bridge
->windows
, &pp
->io_base
);
358 ret
= devm_request_pci_bus_resources(dev
, &bridge
->windows
);
362 /* Get the I/O and memory ranges from DT */
363 resource_list_for_each_entry_safe(win
, tmp
, &bridge
->windows
) {
364 switch (resource_type(win
->res
)) {
366 ret
= devm_pci_remap_iospace(dev
, win
->res
,
369 dev_warn(dev
, "Error %d: failed to map resource %pR\n",
371 resource_list_destroy_entry(win
);
374 pp
->io
->name
= "I/O";
375 pp
->io_size
= resource_size(pp
->io
);
376 pp
->io_bus_addr
= pp
->io
->start
- win
->offset
;
381 pp
->mem
->name
= "MEM";
382 pp
->mem_size
= resource_size(pp
->mem
);
383 pp
->mem_bus_addr
= pp
->mem
->start
- win
->offset
;
387 pp
->cfg0_size
= resource_size(pp
->cfg
) >> 1;
388 pp
->cfg1_size
= resource_size(pp
->cfg
) >> 1;
389 pp
->cfg0_base
= pp
->cfg
->start
;
390 pp
->cfg1_base
= pp
->cfg
->start
+ pp
->cfg0_size
;
398 if (!pci
->dbi_base
) {
399 pci
->dbi_base
= devm_pci_remap_cfgspace(dev
,
401 resource_size(pp
->cfg
));
402 if (!pci
->dbi_base
) {
403 dev_err(dev
, "Error with ioremap\n");
409 pp
->mem_base
= pp
->mem
->start
;
411 if (!pp
->va_cfg0_base
) {
412 pp
->va_cfg0_base
= devm_pci_remap_cfgspace(dev
,
413 pp
->cfg0_base
, pp
->cfg0_size
);
414 if (!pp
->va_cfg0_base
) {
415 dev_err(dev
, "Error with ioremap in function\n");
421 if (!pp
->va_cfg1_base
) {
422 pp
->va_cfg1_base
= devm_pci_remap_cfgspace(dev
,
425 if (!pp
->va_cfg1_base
) {
426 dev_err(dev
, "Error with ioremap\n");
432 ret
= of_property_read_u32(np
, "num-viewport", &pci
->num_viewport
);
434 pci
->num_viewport
= 2;
436 if (IS_ENABLED(CONFIG_PCI_MSI
)) {
438 * If a specific SoC driver needs to change the
439 * default number of vectors, it needs to implement
440 * the set_num_vectors callback.
442 if (!pp
->ops
->set_num_vectors
) {
443 pp
->num_vectors
= MSI_DEF_NUM_VECTORS
;
445 pp
->ops
->set_num_vectors(pp
);
447 if (pp
->num_vectors
> MAX_MSI_IRQS
||
448 pp
->num_vectors
== 0) {
450 "Invalid number of vectors\n");
455 if (!pp
->ops
->msi_host_init
) {
456 ret
= dw_pcie_allocate_domains(pp
);
461 irq_set_chained_handler_and_data(pp
->msi_irq
,
465 ret
= pp
->ops
->msi_host_init(pp
);
471 if (pp
->ops
->host_init
) {
472 ret
= pp
->ops
->host_init(pp
);
477 pp
->root_bus_nr
= pp
->busn
->start
;
479 bridge
->dev
.parent
= dev
;
480 bridge
->sysdata
= pp
;
481 bridge
->busnr
= pp
->root_bus_nr
;
482 bridge
->ops
= &dw_pcie_ops
;
483 bridge
->map_irq
= of_irq_parse_and_map_pci
;
484 bridge
->swizzle_irq
= pci_common_swizzle
;
486 ret
= pci_scan_root_bus_bridge(bridge
);
492 if (pp
->ops
->scan_bus
)
493 pp
->ops
->scan_bus(pp
);
495 pci_bus_size_bridges(bus
);
496 pci_bus_assign_resources(bus
);
498 list_for_each_entry(child
, &bus
->children
, node
)
499 pcie_bus_configure_settings(child
);
501 pci_bus_add_devices(bus
);
505 pci_free_host_bridge(bridge
);
509 static int dw_pcie_rd_other_conf(struct pcie_port
*pp
, struct pci_bus
*bus
,
510 u32 devfn
, int where
, int size
, u32
*val
)
513 u32 busdev
, cfg_size
;
515 void __iomem
*va_cfg_base
;
516 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
518 if (pp
->ops
->rd_other_conf
)
519 return pp
->ops
->rd_other_conf(pp
, bus
, devfn
, where
, size
, val
);
521 busdev
= PCIE_ATU_BUS(bus
->number
) | PCIE_ATU_DEV(PCI_SLOT(devfn
)) |
522 PCIE_ATU_FUNC(PCI_FUNC(devfn
));
524 if (bus
->parent
->number
== pp
->root_bus_nr
) {
525 type
= PCIE_ATU_TYPE_CFG0
;
526 cpu_addr
= pp
->cfg0_base
;
527 cfg_size
= pp
->cfg0_size
;
528 va_cfg_base
= pp
->va_cfg0_base
;
530 type
= PCIE_ATU_TYPE_CFG1
;
531 cpu_addr
= pp
->cfg1_base
;
532 cfg_size
= pp
->cfg1_size
;
533 va_cfg_base
= pp
->va_cfg1_base
;
536 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX1
,
539 ret
= dw_pcie_read(va_cfg_base
+ where
, size
, val
);
540 if (pci
->num_viewport
<= 2)
541 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX1
,
542 PCIE_ATU_TYPE_IO
, pp
->io_base
,
543 pp
->io_bus_addr
, pp
->io_size
);
548 static int dw_pcie_wr_other_conf(struct pcie_port
*pp
, struct pci_bus
*bus
,
549 u32 devfn
, int where
, int size
, u32 val
)
552 u32 busdev
, cfg_size
;
554 void __iomem
*va_cfg_base
;
555 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
557 if (pp
->ops
->wr_other_conf
)
558 return pp
->ops
->wr_other_conf(pp
, bus
, devfn
, where
, size
, val
);
560 busdev
= PCIE_ATU_BUS(bus
->number
) | PCIE_ATU_DEV(PCI_SLOT(devfn
)) |
561 PCIE_ATU_FUNC(PCI_FUNC(devfn
));
563 if (bus
->parent
->number
== pp
->root_bus_nr
) {
564 type
= PCIE_ATU_TYPE_CFG0
;
565 cpu_addr
= pp
->cfg0_base
;
566 cfg_size
= pp
->cfg0_size
;
567 va_cfg_base
= pp
->va_cfg0_base
;
569 type
= PCIE_ATU_TYPE_CFG1
;
570 cpu_addr
= pp
->cfg1_base
;
571 cfg_size
= pp
->cfg1_size
;
572 va_cfg_base
= pp
->va_cfg1_base
;
575 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX1
,
578 ret
= dw_pcie_write(va_cfg_base
+ where
, size
, val
);
579 if (pci
->num_viewport
<= 2)
580 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX1
,
581 PCIE_ATU_TYPE_IO
, pp
->io_base
,
582 pp
->io_bus_addr
, pp
->io_size
);
587 static int dw_pcie_valid_device(struct pcie_port
*pp
, struct pci_bus
*bus
,
590 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
592 /* If there is no link, then there is no device */
593 if (bus
->number
!= pp
->root_bus_nr
) {
594 if (!dw_pcie_link_up(pci
))
598 /* Access only one slot on each root port */
599 if (bus
->number
== pp
->root_bus_nr
&& dev
> 0)
605 static int dw_pcie_rd_conf(struct pci_bus
*bus
, u32 devfn
, int where
,
608 struct pcie_port
*pp
= bus
->sysdata
;
610 if (!dw_pcie_valid_device(pp
, bus
, PCI_SLOT(devfn
))) {
612 return PCIBIOS_DEVICE_NOT_FOUND
;
615 if (bus
->number
== pp
->root_bus_nr
)
616 return dw_pcie_rd_own_conf(pp
, where
, size
, val
);
618 return dw_pcie_rd_other_conf(pp
, bus
, devfn
, where
, size
, val
);
621 static int dw_pcie_wr_conf(struct pci_bus
*bus
, u32 devfn
,
622 int where
, int size
, u32 val
)
624 struct pcie_port
*pp
= bus
->sysdata
;
626 if (!dw_pcie_valid_device(pp
, bus
, PCI_SLOT(devfn
)))
627 return PCIBIOS_DEVICE_NOT_FOUND
;
629 if (bus
->number
== pp
->root_bus_nr
)
630 return dw_pcie_wr_own_conf(pp
, where
, size
, val
);
632 return dw_pcie_wr_other_conf(pp
, bus
, devfn
, where
, size
, val
);
635 static struct pci_ops dw_pcie_ops
= {
636 .read
= dw_pcie_rd_conf
,
637 .write
= dw_pcie_wr_conf
,
640 static u8
dw_pcie_iatu_unroll_enabled(struct dw_pcie
*pci
)
644 val
= dw_pcie_readl_dbi(pci
, PCIE_ATU_VIEWPORT
);
645 if (val
== 0xffffffff)
651 void dw_pcie_setup_rc(struct pcie_port
*pp
)
653 u32 val
, ctrl
, num_ctrls
;
654 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
658 num_ctrls
= pp
->num_vectors
/ MAX_MSI_IRQS_PER_CTRL
;
660 /* Initialize IRQ Status array */
661 for (ctrl
= 0; ctrl
< num_ctrls
; ctrl
++)
662 dw_pcie_rd_own_conf(pp
, PCIE_MSI_INTR0_ENABLE
+
663 (ctrl
* MSI_REG_CTRL_BLOCK_SIZE
),
664 4, &pp
->irq_status
[ctrl
]);
667 dw_pcie_writel_dbi(pci
, PCI_BASE_ADDRESS_0
, 0x00000004);
668 dw_pcie_writel_dbi(pci
, PCI_BASE_ADDRESS_1
, 0x00000000);
670 /* Setup interrupt pins */
671 dw_pcie_dbi_ro_wr_en(pci
);
672 val
= dw_pcie_readl_dbi(pci
, PCI_INTERRUPT_LINE
);
675 dw_pcie_writel_dbi(pci
, PCI_INTERRUPT_LINE
, val
);
676 dw_pcie_dbi_ro_wr_dis(pci
);
678 /* Setup bus numbers */
679 val
= dw_pcie_readl_dbi(pci
, PCI_PRIMARY_BUS
);
682 dw_pcie_writel_dbi(pci
, PCI_PRIMARY_BUS
, val
);
684 /* Setup command register */
685 val
= dw_pcie_readl_dbi(pci
, PCI_COMMAND
);
687 val
|= PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
|
688 PCI_COMMAND_MASTER
| PCI_COMMAND_SERR
;
689 dw_pcie_writel_dbi(pci
, PCI_COMMAND
, val
);
692 * If the platform provides ->rd_other_conf, it means the platform
693 * uses its own address translation component rather than ATU, so
694 * we should not program the ATU here.
696 if (!pp
->ops
->rd_other_conf
) {
697 /* Get iATU unroll support */
698 pci
->iatu_unroll_enabled
= dw_pcie_iatu_unroll_enabled(pci
);
699 dev_dbg(pci
->dev
, "iATU unroll: %s\n",
700 pci
->iatu_unroll_enabled
? "enabled" : "disabled");
702 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX0
,
703 PCIE_ATU_TYPE_MEM
, pp
->mem_base
,
704 pp
->mem_bus_addr
, pp
->mem_size
);
705 if (pci
->num_viewport
> 2)
706 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX2
,
707 PCIE_ATU_TYPE_IO
, pp
->io_base
,
708 pp
->io_bus_addr
, pp
->io_size
);
711 dw_pcie_wr_own_conf(pp
, PCI_BASE_ADDRESS_0
, 4, 0);
713 /* Enable write permission for the DBI read-only register */
714 dw_pcie_dbi_ro_wr_en(pci
);
715 /* Program correct class for RC */
716 dw_pcie_wr_own_conf(pp
, PCI_CLASS_DEVICE
, 2, PCI_CLASS_BRIDGE_PCI
);
717 /* Better disable write permission right after the update */
718 dw_pcie_dbi_ro_wr_dis(pci
);
720 dw_pcie_rd_own_conf(pp
, PCIE_LINK_WIDTH_SPEED_CONTROL
, 4, &val
);
721 val
|= PORT_LOGIC_SPEED_CHANGE
;
722 dw_pcie_wr_own_conf(pp
, PCIE_LINK_WIDTH_SPEED_CONTROL
, 4, val
);