1 // SPDX-License-Identifier: GPL-2.0
3 * Synopsys DesignWare PCIe host controller driver
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com
8 * Author: Jingoo Han <jg1.han@samsung.com>
11 #include <linux/irqdomain.h>
12 #include <linux/of_address.h>
13 #include <linux/of_pci.h>
14 #include <linux/pci_regs.h>
15 #include <linux/platform_device.h>
17 #include "pcie-designware.h"
19 static struct pci_ops dw_pcie_ops
;
21 static int dw_pcie_rd_own_conf(struct pcie_port
*pp
, int where
, int size
,
26 if (pp
->ops
->rd_own_conf
)
27 return pp
->ops
->rd_own_conf(pp
, where
, size
, val
);
29 pci
= to_dw_pcie_from_pp(pp
);
30 return dw_pcie_read(pci
->dbi_base
+ where
, size
, val
);
33 static int dw_pcie_wr_own_conf(struct pcie_port
*pp
, int where
, int size
,
38 if (pp
->ops
->wr_own_conf
)
39 return pp
->ops
->wr_own_conf(pp
, where
, size
, val
);
41 pci
= to_dw_pcie_from_pp(pp
);
42 return dw_pcie_write(pci
->dbi_base
+ where
, size
, val
);
45 static struct irq_chip dw_msi_irq_chip
= {
47 .irq_enable
= pci_msi_unmask_irq
,
48 .irq_disable
= pci_msi_mask_irq
,
49 .irq_mask
= pci_msi_mask_irq
,
50 .irq_unmask
= pci_msi_unmask_irq
,
54 irqreturn_t
dw_handle_msi_irq(struct pcie_port
*pp
)
58 irqreturn_t ret
= IRQ_NONE
;
60 for (i
= 0; i
< MAX_MSI_CTRLS
; i
++) {
61 dw_pcie_rd_own_conf(pp
, PCIE_MSI_INTR0_STATUS
+ i
* 12, 4,
68 while ((pos
= find_next_bit((unsigned long *) &val
, 32,
70 irq
= irq_find_mapping(pp
->irq_domain
, i
* 32 + pos
);
71 generic_handle_irq(irq
);
72 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_STATUS
+ i
* 12,
81 void dw_pcie_msi_init(struct pcie_port
*pp
)
83 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
84 struct device
*dev
= pci
->dev
;
88 page
= alloc_page(GFP_KERNEL
);
89 pp
->msi_data
= dma_map_page(dev
, page
, 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
90 if (dma_mapping_error(dev
, pp
->msi_data
)) {
91 dev_err(dev
, "failed to map MSI data\n");
95 msi_target
= (u64
)pp
->msi_data
;
97 /* program the msi_data */
98 dw_pcie_wr_own_conf(pp
, PCIE_MSI_ADDR_LO
, 4,
99 (u32
)(msi_target
& 0xffffffff));
100 dw_pcie_wr_own_conf(pp
, PCIE_MSI_ADDR_HI
, 4,
101 (u32
)(msi_target
>> 32 & 0xffffffff));
104 static void dw_pcie_msi_clear_irq(struct pcie_port
*pp
, int irq
)
106 unsigned int res
, bit
, val
;
108 res
= (irq
/ 32) * 12;
110 dw_pcie_rd_own_conf(pp
, PCIE_MSI_INTR0_ENABLE
+ res
, 4, &val
);
112 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_ENABLE
+ res
, 4, val
);
115 static void clear_irq_range(struct pcie_port
*pp
, unsigned int irq_base
,
116 unsigned int nvec
, unsigned int pos
)
120 for (i
= 0; i
< nvec
; i
++) {
121 irq_set_msi_desc_off(irq_base
, i
, NULL
);
122 /* Disable corresponding interrupt on MSI controller */
123 if (pp
->ops
->msi_clear_irq
)
124 pp
->ops
->msi_clear_irq(pp
, pos
+ i
);
126 dw_pcie_msi_clear_irq(pp
, pos
+ i
);
129 bitmap_release_region(pp
->msi_irq_in_use
, pos
, order_base_2(nvec
));
132 static void dw_pcie_msi_set_irq(struct pcie_port
*pp
, int irq
)
134 unsigned int res
, bit
, val
;
136 res
= (irq
/ 32) * 12;
138 dw_pcie_rd_own_conf(pp
, PCIE_MSI_INTR0_ENABLE
+ res
, 4, &val
);
140 dw_pcie_wr_own_conf(pp
, PCIE_MSI_INTR0_ENABLE
+ res
, 4, val
);
143 static int assign_irq(int no_irqs
, struct msi_desc
*desc
, int *pos
)
146 struct pcie_port
*pp
;
148 pp
= (struct pcie_port
*)msi_desc_to_pci_sysdata(desc
);
149 pos0
= bitmap_find_free_region(pp
->msi_irq_in_use
, MAX_MSI_IRQS
,
150 order_base_2(no_irqs
));
154 irq
= irq_find_mapping(pp
->irq_domain
, pos0
);
159 * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
160 * descs so there is no need to allocate descs here. We can therefore
161 * assume that if irq_find_mapping above returns non-zero, then the
162 * descs are also successfully allocated.
165 for (i
= 0; i
< no_irqs
; i
++) {
166 if (irq_set_msi_desc_off(irq
, i
, desc
) != 0) {
167 clear_irq_range(pp
, irq
, i
, pos0
);
170 /*Enable corresponding interrupt in MSI interrupt controller */
171 if (pp
->ops
->msi_set_irq
)
172 pp
->ops
->msi_set_irq(pp
, pos0
+ i
);
174 dw_pcie_msi_set_irq(pp
, pos0
+ i
);
178 desc
->nvec_used
= no_irqs
;
179 desc
->msi_attrib
.multiple
= order_base_2(no_irqs
);
188 static void dw_msi_setup_msg(struct pcie_port
*pp
, unsigned int irq
, u32 pos
)
193 if (pp
->ops
->get_msi_addr
)
194 msi_target
= pp
->ops
->get_msi_addr(pp
);
196 msi_target
= (u64
)pp
->msi_data
;
198 msg
.address_lo
= (u32
)(msi_target
& 0xffffffff);
199 msg
.address_hi
= (u32
)(msi_target
>> 32 & 0xffffffff);
201 if (pp
->ops
->get_msi_data
)
202 msg
.data
= pp
->ops
->get_msi_data(pp
, pos
);
206 pci_write_msi_msg(irq
, &msg
);
209 static int dw_msi_setup_irq(struct msi_controller
*chip
, struct pci_dev
*pdev
,
210 struct msi_desc
*desc
)
213 struct pcie_port
*pp
= pdev
->bus
->sysdata
;
215 if (desc
->msi_attrib
.is_msix
)
218 irq
= assign_irq(1, desc
, &pos
);
222 dw_msi_setup_msg(pp
, irq
, pos
);
227 static int dw_msi_setup_irqs(struct msi_controller
*chip
, struct pci_dev
*pdev
,
230 #ifdef CONFIG_PCI_MSI
232 struct msi_desc
*desc
;
233 struct pcie_port
*pp
= pdev
->bus
->sysdata
;
235 /* MSI-X interrupts are not supported */
236 if (type
== PCI_CAP_ID_MSIX
)
239 WARN_ON(!list_is_singular(&pdev
->dev
.msi_list
));
240 desc
= list_entry(pdev
->dev
.msi_list
.next
, struct msi_desc
, list
);
242 irq
= assign_irq(nvec
, desc
, &pos
);
246 dw_msi_setup_msg(pp
, irq
, pos
);
254 static void dw_msi_teardown_irq(struct msi_controller
*chip
, unsigned int irq
)
256 struct irq_data
*data
= irq_get_irq_data(irq
);
257 struct msi_desc
*msi
= irq_data_get_msi_desc(data
);
258 struct pcie_port
*pp
= (struct pcie_port
*)msi_desc_to_pci_sysdata(msi
);
260 clear_irq_range(pp
, irq
, 1, data
->hwirq
);
263 static struct msi_controller dw_pcie_msi_chip
= {
264 .setup_irq
= dw_msi_setup_irq
,
265 .setup_irqs
= dw_msi_setup_irqs
,
266 .teardown_irq
= dw_msi_teardown_irq
,
269 static int dw_pcie_msi_map(struct irq_domain
*domain
, unsigned int irq
,
270 irq_hw_number_t hwirq
)
272 irq_set_chip_and_handler(irq
, &dw_msi_irq_chip
, handle_simple_irq
);
273 irq_set_chip_data(irq
, domain
->host_data
);
278 static const struct irq_domain_ops msi_domain_ops
= {
279 .map
= dw_pcie_msi_map
,
282 int dw_pcie_host_init(struct pcie_port
*pp
)
284 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
285 struct device
*dev
= pci
->dev
;
286 struct device_node
*np
= dev
->of_node
;
287 struct platform_device
*pdev
= to_platform_device(dev
);
288 struct pci_bus
*bus
, *child
;
289 struct pci_host_bridge
*bridge
;
290 struct resource
*cfg_res
;
292 struct resource_entry
*win
, *tmp
;
294 cfg_res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "config");
296 pp
->cfg0_size
= resource_size(cfg_res
) / 2;
297 pp
->cfg1_size
= resource_size(cfg_res
) / 2;
298 pp
->cfg0_base
= cfg_res
->start
;
299 pp
->cfg1_base
= cfg_res
->start
+ pp
->cfg0_size
;
300 } else if (!pp
->va_cfg0_base
) {
301 dev_err(dev
, "missing *config* reg space\n");
304 bridge
= pci_alloc_host_bridge(0);
308 ret
= of_pci_get_host_bridge_resources(np
, 0, 0xff,
309 &bridge
->windows
, &pp
->io_base
);
313 ret
= devm_request_pci_bus_resources(dev
, &bridge
->windows
);
317 /* Get the I/O and memory ranges from DT */
318 resource_list_for_each_entry_safe(win
, tmp
, &bridge
->windows
) {
319 switch (resource_type(win
->res
)) {
321 ret
= pci_remap_iospace(win
->res
, pp
->io_base
);
323 dev_warn(dev
, "error %d: failed to map resource %pR\n",
325 resource_list_destroy_entry(win
);
328 pp
->io
->name
= "I/O";
329 pp
->io_size
= resource_size(pp
->io
);
330 pp
->io_bus_addr
= pp
->io
->start
- win
->offset
;
335 pp
->mem
->name
= "MEM";
336 pp
->mem_size
= resource_size(pp
->mem
);
337 pp
->mem_bus_addr
= pp
->mem
->start
- win
->offset
;
341 pp
->cfg0_size
= resource_size(pp
->cfg
) / 2;
342 pp
->cfg1_size
= resource_size(pp
->cfg
) / 2;
343 pp
->cfg0_base
= pp
->cfg
->start
;
344 pp
->cfg1_base
= pp
->cfg
->start
+ pp
->cfg0_size
;
352 if (!pci
->dbi_base
) {
353 pci
->dbi_base
= devm_pci_remap_cfgspace(dev
,
355 resource_size(pp
->cfg
));
356 if (!pci
->dbi_base
) {
357 dev_err(dev
, "error with ioremap\n");
363 pp
->mem_base
= pp
->mem
->start
;
365 if (!pp
->va_cfg0_base
) {
366 pp
->va_cfg0_base
= devm_pci_remap_cfgspace(dev
,
367 pp
->cfg0_base
, pp
->cfg0_size
);
368 if (!pp
->va_cfg0_base
) {
369 dev_err(dev
, "error with ioremap in function\n");
375 if (!pp
->va_cfg1_base
) {
376 pp
->va_cfg1_base
= devm_pci_remap_cfgspace(dev
,
379 if (!pp
->va_cfg1_base
) {
380 dev_err(dev
, "error with ioremap\n");
386 ret
= of_property_read_u32(np
, "num-viewport", &pci
->num_viewport
);
388 pci
->num_viewport
= 2;
390 if (IS_ENABLED(CONFIG_PCI_MSI
)) {
391 if (!pp
->ops
->msi_host_init
) {
392 pp
->irq_domain
= irq_domain_add_linear(dev
->of_node
,
393 MAX_MSI_IRQS
, &msi_domain_ops
,
395 if (!pp
->irq_domain
) {
396 dev_err(dev
, "irq domain init failed\n");
401 for (i
= 0; i
< MAX_MSI_IRQS
; i
++)
402 irq_create_mapping(pp
->irq_domain
, i
);
404 ret
= pp
->ops
->msi_host_init(pp
, &dw_pcie_msi_chip
);
410 if (pp
->ops
->host_init
) {
411 ret
= pp
->ops
->host_init(pp
);
416 pp
->root_bus_nr
= pp
->busn
->start
;
418 bridge
->dev
.parent
= dev
;
419 bridge
->sysdata
= pp
;
420 bridge
->busnr
= pp
->root_bus_nr
;
421 bridge
->ops
= &dw_pcie_ops
;
422 bridge
->map_irq
= of_irq_parse_and_map_pci
;
423 bridge
->swizzle_irq
= pci_common_swizzle
;
424 if (IS_ENABLED(CONFIG_PCI_MSI
)) {
425 bridge
->msi
= &dw_pcie_msi_chip
;
426 dw_pcie_msi_chip
.dev
= dev
;
429 ret
= pci_scan_root_bus_bridge(bridge
);
435 if (pp
->ops
->scan_bus
)
436 pp
->ops
->scan_bus(pp
);
438 pci_bus_size_bridges(bus
);
439 pci_bus_assign_resources(bus
);
441 list_for_each_entry(child
, &bus
->children
, node
)
442 pcie_bus_configure_settings(child
);
444 pci_bus_add_devices(bus
);
448 pci_free_host_bridge(bridge
);
452 static int dw_pcie_rd_other_conf(struct pcie_port
*pp
, struct pci_bus
*bus
,
453 u32 devfn
, int where
, int size
, u32
*val
)
456 u32 busdev
, cfg_size
;
458 void __iomem
*va_cfg_base
;
459 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
461 if (pp
->ops
->rd_other_conf
)
462 return pp
->ops
->rd_other_conf(pp
, bus
, devfn
, where
, size
, val
);
464 busdev
= PCIE_ATU_BUS(bus
->number
) | PCIE_ATU_DEV(PCI_SLOT(devfn
)) |
465 PCIE_ATU_FUNC(PCI_FUNC(devfn
));
467 if (bus
->parent
->number
== pp
->root_bus_nr
) {
468 type
= PCIE_ATU_TYPE_CFG0
;
469 cpu_addr
= pp
->cfg0_base
;
470 cfg_size
= pp
->cfg0_size
;
471 va_cfg_base
= pp
->va_cfg0_base
;
473 type
= PCIE_ATU_TYPE_CFG1
;
474 cpu_addr
= pp
->cfg1_base
;
475 cfg_size
= pp
->cfg1_size
;
476 va_cfg_base
= pp
->va_cfg1_base
;
479 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX1
,
482 ret
= dw_pcie_read(va_cfg_base
+ where
, size
, val
);
483 if (pci
->num_viewport
<= 2)
484 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX1
,
485 PCIE_ATU_TYPE_IO
, pp
->io_base
,
486 pp
->io_bus_addr
, pp
->io_size
);
491 static int dw_pcie_wr_other_conf(struct pcie_port
*pp
, struct pci_bus
*bus
,
492 u32 devfn
, int where
, int size
, u32 val
)
495 u32 busdev
, cfg_size
;
497 void __iomem
*va_cfg_base
;
498 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
500 if (pp
->ops
->wr_other_conf
)
501 return pp
->ops
->wr_other_conf(pp
, bus
, devfn
, where
, size
, val
);
503 busdev
= PCIE_ATU_BUS(bus
->number
) | PCIE_ATU_DEV(PCI_SLOT(devfn
)) |
504 PCIE_ATU_FUNC(PCI_FUNC(devfn
));
506 if (bus
->parent
->number
== pp
->root_bus_nr
) {
507 type
= PCIE_ATU_TYPE_CFG0
;
508 cpu_addr
= pp
->cfg0_base
;
509 cfg_size
= pp
->cfg0_size
;
510 va_cfg_base
= pp
->va_cfg0_base
;
512 type
= PCIE_ATU_TYPE_CFG1
;
513 cpu_addr
= pp
->cfg1_base
;
514 cfg_size
= pp
->cfg1_size
;
515 va_cfg_base
= pp
->va_cfg1_base
;
518 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX1
,
521 ret
= dw_pcie_write(va_cfg_base
+ where
, size
, val
);
522 if (pci
->num_viewport
<= 2)
523 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX1
,
524 PCIE_ATU_TYPE_IO
, pp
->io_base
,
525 pp
->io_bus_addr
, pp
->io_size
);
530 static int dw_pcie_valid_device(struct pcie_port
*pp
, struct pci_bus
*bus
,
533 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
535 /* If there is no link, then there is no device */
536 if (bus
->number
!= pp
->root_bus_nr
) {
537 if (!dw_pcie_link_up(pci
))
541 /* access only one slot on each root port */
542 if (bus
->number
== pp
->root_bus_nr
&& dev
> 0)
548 static int dw_pcie_rd_conf(struct pci_bus
*bus
, u32 devfn
, int where
,
551 struct pcie_port
*pp
= bus
->sysdata
;
553 if (!dw_pcie_valid_device(pp
, bus
, PCI_SLOT(devfn
))) {
555 return PCIBIOS_DEVICE_NOT_FOUND
;
558 if (bus
->number
== pp
->root_bus_nr
)
559 return dw_pcie_rd_own_conf(pp
, where
, size
, val
);
561 return dw_pcie_rd_other_conf(pp
, bus
, devfn
, where
, size
, val
);
564 static int dw_pcie_wr_conf(struct pci_bus
*bus
, u32 devfn
,
565 int where
, int size
, u32 val
)
567 struct pcie_port
*pp
= bus
->sysdata
;
569 if (!dw_pcie_valid_device(pp
, bus
, PCI_SLOT(devfn
)))
570 return PCIBIOS_DEVICE_NOT_FOUND
;
572 if (bus
->number
== pp
->root_bus_nr
)
573 return dw_pcie_wr_own_conf(pp
, where
, size
, val
);
575 return dw_pcie_wr_other_conf(pp
, bus
, devfn
, where
, size
, val
);
578 static struct pci_ops dw_pcie_ops
= {
579 .read
= dw_pcie_rd_conf
,
580 .write
= dw_pcie_wr_conf
,
583 static u8
dw_pcie_iatu_unroll_enabled(struct dw_pcie
*pci
)
587 val
= dw_pcie_readl_dbi(pci
, PCIE_ATU_VIEWPORT
);
588 if (val
== 0xffffffff)
594 void dw_pcie_setup_rc(struct pcie_port
*pp
)
597 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
602 dw_pcie_writel_dbi(pci
, PCI_BASE_ADDRESS_0
, 0x00000004);
603 dw_pcie_writel_dbi(pci
, PCI_BASE_ADDRESS_1
, 0x00000000);
605 /* setup interrupt pins */
606 dw_pcie_dbi_ro_wr_en(pci
);
607 val
= dw_pcie_readl_dbi(pci
, PCI_INTERRUPT_LINE
);
610 dw_pcie_writel_dbi(pci
, PCI_INTERRUPT_LINE
, val
);
611 dw_pcie_dbi_ro_wr_dis(pci
);
613 /* setup bus numbers */
614 val
= dw_pcie_readl_dbi(pci
, PCI_PRIMARY_BUS
);
617 dw_pcie_writel_dbi(pci
, PCI_PRIMARY_BUS
, val
);
619 /* setup command register */
620 val
= dw_pcie_readl_dbi(pci
, PCI_COMMAND
);
622 val
|= PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
|
623 PCI_COMMAND_MASTER
| PCI_COMMAND_SERR
;
624 dw_pcie_writel_dbi(pci
, PCI_COMMAND
, val
);
627 * If the platform provides ->rd_other_conf, it means the platform
628 * uses its own address translation component rather than ATU, so
629 * we should not program the ATU here.
631 if (!pp
->ops
->rd_other_conf
) {
632 /* get iATU unroll support */
633 pci
->iatu_unroll_enabled
= dw_pcie_iatu_unroll_enabled(pci
);
634 dev_dbg(pci
->dev
, "iATU unroll: %s\n",
635 pci
->iatu_unroll_enabled
? "enabled" : "disabled");
637 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX0
,
638 PCIE_ATU_TYPE_MEM
, pp
->mem_base
,
639 pp
->mem_bus_addr
, pp
->mem_size
);
640 if (pci
->num_viewport
> 2)
641 dw_pcie_prog_outbound_atu(pci
, PCIE_ATU_REGION_INDEX2
,
642 PCIE_ATU_TYPE_IO
, pp
->io_base
,
643 pp
->io_bus_addr
, pp
->io_size
);
646 dw_pcie_wr_own_conf(pp
, PCI_BASE_ADDRESS_0
, 4, 0);
648 /* Enable write permission for the DBI read-only register */
649 dw_pcie_dbi_ro_wr_en(pci
);
650 /* program correct class for RC */
651 dw_pcie_wr_own_conf(pp
, PCI_CLASS_DEVICE
, 2, PCI_CLASS_BRIDGE_PCI
);
652 /* Better disable write permission right after the update */
653 dw_pcie_dbi_ro_wr_dis(pci
);
655 dw_pcie_rd_own_conf(pp
, PCIE_LINK_WIDTH_SPEED_CONTROL
, 4, &val
);
656 val
|= PORT_LOGIC_SPEED_CHANGE
;
657 dw_pcie_wr_own_conf(pp
, PCIE_LINK_WIDTH_SPEED_CONTROL
, 4, val
);