1 // SPDX-License-Identifier: GPL-2.0
3 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
5 * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com
7 * Authors: Kishon Vijay Abraham I <kishon@ti.com>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/err.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/irqdomain.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/of_device.h>
19 #include <linux/of_gpio.h>
20 #include <linux/of_pci.h>
21 #include <linux/pci.h>
22 #include <linux/phy/phy.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/resource.h>
26 #include <linux/types.h>
27 #include <linux/mfd/syscon.h>
28 #include <linux/regmap.h>
29 #include <linux/gpio/consumer.h>
31 #include "../../pci.h"
32 #include "pcie-designware.h"
34 /* PCIe controller wrapper DRA7XX configuration registers */
36 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024
37 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028
38 #define ERR_SYS BIT(0)
39 #define ERR_FATAL BIT(1)
40 #define ERR_NONFATAL BIT(2)
41 #define ERR_COR BIT(3)
42 #define ERR_AXI BIT(4)
43 #define ERR_ECRC BIT(5)
44 #define PME_TURN_OFF BIT(8)
45 #define PME_TO_ACK BIT(9)
46 #define PM_PME BIT(10)
47 #define LINK_REQ_RST BIT(11)
48 #define LINK_UP_EVT BIT(12)
49 #define CFG_BME_EVT BIT(13)
50 #define CFG_MSE_EVT BIT(14)
51 #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
52 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
53 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
55 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034
56 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038
62 #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
64 #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100
65 #define DEVICE_TYPE_EP 0x0
66 #define DEVICE_TYPE_LEG_EP 0x1
67 #define DEVICE_TYPE_RC 0x4
69 #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
72 #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
73 #define LINK_UP BIT(16)
74 #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
76 #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
77 #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
79 #define PCIECTRL_TI_CONF_MSI_XMT 0x012c
80 #define MSI_REQ_GRANT BIT(0)
81 #define MSI_VECTOR_SHIFT 7
83 #define PCIE_1LANE_2LANE_SELECTION BIT(13)
84 #define PCIE_B1C0_MODE_SEL BIT(2)
85 #define PCIE_B0_B1_TSYNCEN BIT(0)
89 void __iomem
*base
; /* DT ti_conf */
90 int phy_count
; /* DT phy-names count */
92 struct irq_domain
*irq_domain
;
93 enum dw_pcie_device_mode mode
;
96 struct dra7xx_pcie_of_data
{
97 enum dw_pcie_device_mode mode
;
98 u32 b1co_mode_sel_mask
;
101 #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
103 static inline u32
dra7xx_pcie_readl(struct dra7xx_pcie
*pcie
, u32 offset
)
105 return readl(pcie
->base
+ offset
);
108 static inline void dra7xx_pcie_writel(struct dra7xx_pcie
*pcie
, u32 offset
,
111 writel(value
, pcie
->base
+ offset
);
114 static u64
dra7xx_pcie_cpu_addr_fixup(struct dw_pcie
*pci
, u64 pci_addr
)
116 return pci_addr
& DRA7XX_CPU_TO_BUS_ADDR
;
119 static int dra7xx_pcie_link_up(struct dw_pcie
*pci
)
121 struct dra7xx_pcie
*dra7xx
= to_dra7xx_pcie(pci
);
122 u32 reg
= dra7xx_pcie_readl(dra7xx
, PCIECTRL_DRA7XX_CONF_PHY_CS
);
124 return !!(reg
& LINK_UP
);
127 static void dra7xx_pcie_stop_link(struct dw_pcie
*pci
)
129 struct dra7xx_pcie
*dra7xx
= to_dra7xx_pcie(pci
);
132 reg
= dra7xx_pcie_readl(dra7xx
, PCIECTRL_DRA7XX_CONF_DEVICE_CMD
);
134 dra7xx_pcie_writel(dra7xx
, PCIECTRL_DRA7XX_CONF_DEVICE_CMD
, reg
);
137 static int dra7xx_pcie_establish_link(struct dw_pcie
*pci
)
139 struct dra7xx_pcie
*dra7xx
= to_dra7xx_pcie(pci
);
140 struct device
*dev
= pci
->dev
;
143 if (dw_pcie_link_up(pci
)) {
144 dev_err(dev
, "link is already up\n");
148 reg
= dra7xx_pcie_readl(dra7xx
, PCIECTRL_DRA7XX_CONF_DEVICE_CMD
);
150 dra7xx_pcie_writel(dra7xx
, PCIECTRL_DRA7XX_CONF_DEVICE_CMD
, reg
);
155 static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie
*dra7xx
)
157 dra7xx_pcie_writel(dra7xx
, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI
,
158 LEG_EP_INTERRUPTS
| MSI
);
160 dra7xx_pcie_writel(dra7xx
,
161 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI
,
162 MSI
| LEG_EP_INTERRUPTS
);
165 static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie
*dra7xx
)
167 dra7xx_pcie_writel(dra7xx
, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN
,
169 dra7xx_pcie_writel(dra7xx
, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN
,
173 static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie
*dra7xx
)
175 dra7xx_pcie_enable_wrapper_interrupts(dra7xx
);
176 dra7xx_pcie_enable_msi_interrupts(dra7xx
);
179 static int dra7xx_pcie_host_init(struct pcie_port
*pp
)
181 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
182 struct dra7xx_pcie
*dra7xx
= to_dra7xx_pcie(pci
);
184 dra7xx_pcie_enable_interrupts(dra7xx
);
189 static int dra7xx_pcie_intx_map(struct irq_domain
*domain
, unsigned int irq
,
190 irq_hw_number_t hwirq
)
192 irq_set_chip_and_handler(irq
, &dummy_irq_chip
, handle_simple_irq
);
193 irq_set_chip_data(irq
, domain
->host_data
);
198 static const struct irq_domain_ops intx_domain_ops
= {
199 .map
= dra7xx_pcie_intx_map
,
200 .xlate
= pci_irqd_intx_xlate
,
203 static int dra7xx_pcie_handle_msi(struct pcie_port
*pp
, int index
)
205 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
209 val
= dw_pcie_readl_dbi(pci
, PCIE_MSI_INTR0_STATUS
+
210 (index
* MSI_REG_CTRL_BLOCK_SIZE
));
214 pos
= find_next_bit(&val
, MAX_MSI_IRQS_PER_CTRL
, 0);
215 while (pos
!= MAX_MSI_IRQS_PER_CTRL
) {
216 irq
= irq_find_mapping(pp
->irq_domain
,
217 (index
* MAX_MSI_IRQS_PER_CTRL
) + pos
);
218 generic_handle_irq(irq
);
220 pos
= find_next_bit(&val
, MAX_MSI_IRQS_PER_CTRL
, pos
);
226 static void dra7xx_pcie_handle_msi_irq(struct pcie_port
*pp
)
228 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
229 int ret
, i
, count
, num_ctrls
;
231 num_ctrls
= pp
->num_vectors
/ MAX_MSI_IRQS_PER_CTRL
;
234 * Need to make sure all MSI status bits read 0 before exiting.
235 * Else, new MSI IRQs are not registered by the wrapper. Have an
236 * upperbound for the loop and exit the IRQ in case of IRQ flood
237 * to avoid locking up system in interrupt context.
243 for (i
= 0; i
< num_ctrls
; i
++)
244 ret
|= dra7xx_pcie_handle_msi(pp
, i
);
246 } while (ret
&& count
<= 1000);
249 dev_warn_ratelimited(pci
->dev
,
250 "Too many MSI IRQs to handle\n");
253 static void dra7xx_pcie_msi_irq_handler(struct irq_desc
*desc
)
255 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
256 struct dra7xx_pcie
*dra7xx
;
258 struct pcie_port
*pp
;
262 chained_irq_enter(chip
, desc
);
264 pp
= irq_desc_get_handler_data(desc
);
265 pci
= to_dw_pcie_from_pp(pp
);
266 dra7xx
= to_dra7xx_pcie(pci
);
268 reg
= dra7xx_pcie_readl(dra7xx
, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI
);
269 dra7xx_pcie_writel(dra7xx
, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI
, reg
);
273 dra7xx_pcie_handle_msi_irq(pp
);
279 for_each_set_bit(bit
, ®
, PCI_NUM_INTX
) {
280 virq
= irq_find_mapping(dra7xx
->irq_domain
, bit
);
282 generic_handle_irq(virq
);
287 chained_irq_exit(chip
, desc
);
290 static irqreturn_t
dra7xx_pcie_irq_handler(int irq
, void *arg
)
292 struct dra7xx_pcie
*dra7xx
= arg
;
293 struct dw_pcie
*pci
= dra7xx
->pci
;
294 struct device
*dev
= pci
->dev
;
295 struct dw_pcie_ep
*ep
= &pci
->ep
;
298 reg
= dra7xx_pcie_readl(dra7xx
, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN
);
301 dev_dbg(dev
, "System Error\n");
304 dev_dbg(dev
, "Fatal Error\n");
306 if (reg
& ERR_NONFATAL
)
307 dev_dbg(dev
, "Non Fatal Error\n");
310 dev_dbg(dev
, "Correctable Error\n");
313 dev_dbg(dev
, "AXI tag lookup fatal Error\n");
316 dev_dbg(dev
, "ECRC Error\n");
318 if (reg
& PME_TURN_OFF
)
320 "Power Management Event Turn-Off message received\n");
322 if (reg
& PME_TO_ACK
)
324 "Power Management Turn-Off Ack message received\n");
327 dev_dbg(dev
, "PM Power Management Event message received\n");
329 if (reg
& LINK_REQ_RST
)
330 dev_dbg(dev
, "Link Request Reset\n");
332 if (reg
& LINK_UP_EVT
) {
333 if (dra7xx
->mode
== DW_PCIE_EP_TYPE
)
334 dw_pcie_ep_linkup(ep
);
335 dev_dbg(dev
, "Link-up state change\n");
338 if (reg
& CFG_BME_EVT
)
339 dev_dbg(dev
, "CFG 'Bus Master Enable' change\n");
341 if (reg
& CFG_MSE_EVT
)
342 dev_dbg(dev
, "CFG 'Memory Space Enable' change\n");
344 dra7xx_pcie_writel(dra7xx
, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN
, reg
);
349 static int dra7xx_pcie_init_irq_domain(struct pcie_port
*pp
)
351 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
352 struct device
*dev
= pci
->dev
;
353 struct dra7xx_pcie
*dra7xx
= to_dra7xx_pcie(pci
);
354 struct device_node
*node
= dev
->of_node
;
355 struct device_node
*pcie_intc_node
= of_get_next_child(node
, NULL
);
357 if (!pcie_intc_node
) {
358 dev_err(dev
, "No PCIe Intc node found\n");
362 irq_set_chained_handler_and_data(pp
->irq
, dra7xx_pcie_msi_irq_handler
,
364 dra7xx
->irq_domain
= irq_domain_add_linear(pcie_intc_node
, PCI_NUM_INTX
,
365 &intx_domain_ops
, pp
);
366 of_node_put(pcie_intc_node
);
367 if (!dra7xx
->irq_domain
) {
368 dev_err(dev
, "Failed to get a INTx IRQ domain\n");
375 static const struct dw_pcie_host_ops dra7xx_pcie_host_ops
= {
376 .host_init
= dra7xx_pcie_host_init
,
379 static void dra7xx_pcie_ep_init(struct dw_pcie_ep
*ep
)
381 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
382 struct dra7xx_pcie
*dra7xx
= to_dra7xx_pcie(pci
);
385 for (bar
= 0; bar
< PCI_STD_NUM_BARS
; bar
++)
386 dw_pcie_ep_reset_bar(pci
, bar
);
388 dra7xx_pcie_enable_wrapper_interrupts(dra7xx
);
391 static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie
*dra7xx
)
393 dra7xx_pcie_writel(dra7xx
, PCIECTRL_TI_CONF_INTX_ASSERT
, 0x1);
395 dra7xx_pcie_writel(dra7xx
, PCIECTRL_TI_CONF_INTX_DEASSERT
, 0x1);
398 static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie
*dra7xx
,
403 reg
= (interrupt_num
- 1) << MSI_VECTOR_SHIFT
;
404 reg
|= MSI_REQ_GRANT
;
405 dra7xx_pcie_writel(dra7xx
, PCIECTRL_TI_CONF_MSI_XMT
, reg
);
408 static int dra7xx_pcie_raise_irq(struct dw_pcie_ep
*ep
, u8 func_no
,
409 enum pci_epc_irq_type type
, u16 interrupt_num
)
411 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
412 struct dra7xx_pcie
*dra7xx
= to_dra7xx_pcie(pci
);
415 case PCI_EPC_IRQ_LEGACY
:
416 dra7xx_pcie_raise_legacy_irq(dra7xx
);
418 case PCI_EPC_IRQ_MSI
:
419 dra7xx_pcie_raise_msi_irq(dra7xx
, interrupt_num
);
422 dev_err(pci
->dev
, "UNKNOWN IRQ type\n");
428 static const struct pci_epc_features dra7xx_pcie_epc_features
= {
429 .linkup_notifier
= true,
431 .msix_capable
= false,
434 static const struct pci_epc_features
*
435 dra7xx_pcie_get_features(struct dw_pcie_ep
*ep
)
437 return &dra7xx_pcie_epc_features
;
440 static const struct dw_pcie_ep_ops pcie_ep_ops
= {
441 .ep_init
= dra7xx_pcie_ep_init
,
442 .raise_irq
= dra7xx_pcie_raise_irq
,
443 .get_features
= dra7xx_pcie_get_features
,
446 static int __init
dra7xx_add_pcie_ep(struct dra7xx_pcie
*dra7xx
,
447 struct platform_device
*pdev
)
450 struct dw_pcie_ep
*ep
;
451 struct device
*dev
= &pdev
->dev
;
452 struct dw_pcie
*pci
= dra7xx
->pci
;
455 ep
->ops
= &pcie_ep_ops
;
457 pci
->dbi_base
= devm_platform_ioremap_resource_byname(pdev
, "ep_dbics");
458 if (IS_ERR(pci
->dbi_base
))
459 return PTR_ERR(pci
->dbi_base
);
462 devm_platform_ioremap_resource_byname(pdev
, "ep_dbics2");
463 if (IS_ERR(pci
->dbi_base2
))
464 return PTR_ERR(pci
->dbi_base2
);
466 ret
= dw_pcie_ep_init(ep
);
468 dev_err(dev
, "failed to initialize endpoint\n");
475 static int __init
dra7xx_add_pcie_port(struct dra7xx_pcie
*dra7xx
,
476 struct platform_device
*pdev
)
479 struct dw_pcie
*pci
= dra7xx
->pci
;
480 struct pcie_port
*pp
= &pci
->pp
;
481 struct device
*dev
= pci
->dev
;
483 pp
->irq
= platform_get_irq(pdev
, 1);
487 /* MSI IRQ is muxed */
488 pp
->msi_irq
= -ENODEV
;
490 ret
= dra7xx_pcie_init_irq_domain(pp
);
494 pci
->dbi_base
= devm_platform_ioremap_resource_byname(pdev
, "rc_dbics");
495 if (IS_ERR(pci
->dbi_base
))
496 return PTR_ERR(pci
->dbi_base
);
498 pp
->ops
= &dra7xx_pcie_host_ops
;
500 ret
= dw_pcie_host_init(pp
);
502 dev_err(dev
, "failed to initialize host\n");
509 static const struct dw_pcie_ops dw_pcie_ops
= {
510 .cpu_addr_fixup
= dra7xx_pcie_cpu_addr_fixup
,
511 .start_link
= dra7xx_pcie_establish_link
,
512 .stop_link
= dra7xx_pcie_stop_link
,
513 .link_up
= dra7xx_pcie_link_up
,
516 static void dra7xx_pcie_disable_phy(struct dra7xx_pcie
*dra7xx
)
518 int phy_count
= dra7xx
->phy_count
;
520 while (phy_count
--) {
521 phy_power_off(dra7xx
->phy
[phy_count
]);
522 phy_exit(dra7xx
->phy
[phy_count
]);
526 static int dra7xx_pcie_enable_phy(struct dra7xx_pcie
*dra7xx
)
528 int phy_count
= dra7xx
->phy_count
;
532 for (i
= 0; i
< phy_count
; i
++) {
533 ret
= phy_set_mode(dra7xx
->phy
[i
], PHY_MODE_PCIE
);
537 ret
= phy_init(dra7xx
->phy
[i
]);
541 ret
= phy_power_on(dra7xx
->phy
[i
]);
543 phy_exit(dra7xx
->phy
[i
]);
552 phy_power_off(dra7xx
->phy
[i
]);
553 phy_exit(dra7xx
->phy
[i
]);
559 static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data
= {
560 .mode
= DW_PCIE_RC_TYPE
,
563 static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data
= {
564 .mode
= DW_PCIE_EP_TYPE
,
567 static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data
= {
568 .b1co_mode_sel_mask
= BIT(2),
569 .mode
= DW_PCIE_RC_TYPE
,
572 static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data
= {
573 .b1co_mode_sel_mask
= GENMASK(3, 2),
574 .mode
= DW_PCIE_RC_TYPE
,
577 static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data
= {
578 .b1co_mode_sel_mask
= BIT(2),
579 .mode
= DW_PCIE_EP_TYPE
,
582 static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data
= {
583 .b1co_mode_sel_mask
= GENMASK(3, 2),
584 .mode
= DW_PCIE_EP_TYPE
,
587 static const struct of_device_id of_dra7xx_pcie_match
[] = {
589 .compatible
= "ti,dra7-pcie",
590 .data
= &dra7xx_pcie_rc_of_data
,
593 .compatible
= "ti,dra7-pcie-ep",
594 .data
= &dra7xx_pcie_ep_of_data
,
597 .compatible
= "ti,dra746-pcie-rc",
598 .data
= &dra746_pcie_rc_of_data
,
601 .compatible
= "ti,dra726-pcie-rc",
602 .data
= &dra726_pcie_rc_of_data
,
605 .compatible
= "ti,dra746-pcie-ep",
606 .data
= &dra746_pcie_ep_of_data
,
609 .compatible
= "ti,dra726-pcie-ep",
610 .data
= &dra726_pcie_ep_of_data
,
616 * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
617 * @dra7xx: the dra7xx device where the workaround should be applied
619 * Access to the PCIe slave port that are not 32-bit aligned will result
620 * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
621 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
624 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
626 static int dra7xx_pcie_unaligned_memaccess(struct device
*dev
)
629 struct device_node
*np
= dev
->of_node
;
630 struct of_phandle_args args
;
631 struct regmap
*regmap
;
633 regmap
= syscon_regmap_lookup_by_phandle(np
,
634 "ti,syscon-unaligned-access");
635 if (IS_ERR(regmap
)) {
636 dev_dbg(dev
, "can't get ti,syscon-unaligned-access\n");
640 ret
= of_parse_phandle_with_fixed_args(np
, "ti,syscon-unaligned-access",
643 dev_err(dev
, "failed to parse ti,syscon-unaligned-access\n");
647 ret
= regmap_update_bits(regmap
, args
.args
[0], args
.args
[1],
650 dev_err(dev
, "failed to enable unaligned access\n");
652 of_node_put(args
.np
);
657 static int dra7xx_pcie_configure_two_lane(struct device
*dev
,
658 u32 b1co_mode_sel_mask
)
660 struct device_node
*np
= dev
->of_node
;
661 struct regmap
*pcie_syscon
;
662 unsigned int pcie_reg
;
666 pcie_syscon
= syscon_regmap_lookup_by_phandle(np
, "ti,syscon-lane-sel");
667 if (IS_ERR(pcie_syscon
)) {
668 dev_err(dev
, "unable to get ti,syscon-lane-sel\n");
672 if (of_property_read_u32_index(np
, "ti,syscon-lane-sel", 1,
674 dev_err(dev
, "couldn't get lane selection reg offset\n");
678 mask
= b1co_mode_sel_mask
| PCIE_B0_B1_TSYNCEN
;
679 val
= PCIE_B1C0_MODE_SEL
| PCIE_B0_B1_TSYNCEN
;
680 regmap_update_bits(pcie_syscon
, pcie_reg
, mask
, val
);
685 static int __init
dra7xx_pcie_probe(struct platform_device
*pdev
)
693 struct device_link
**link
;
696 struct dra7xx_pcie
*dra7xx
;
697 struct device
*dev
= &pdev
->dev
;
698 struct device_node
*np
= dev
->of_node
;
700 struct gpio_desc
*reset
;
701 const struct of_device_id
*match
;
702 const struct dra7xx_pcie_of_data
*data
;
703 enum dw_pcie_device_mode mode
;
704 u32 b1co_mode_sel_mask
;
706 match
= of_match_device(of_match_ptr(of_dra7xx_pcie_match
), dev
);
710 data
= (struct dra7xx_pcie_of_data
*)match
->data
;
711 mode
= (enum dw_pcie_device_mode
)data
->mode
;
712 b1co_mode_sel_mask
= data
->b1co_mode_sel_mask
;
714 dra7xx
= devm_kzalloc(dev
, sizeof(*dra7xx
), GFP_KERNEL
);
718 pci
= devm_kzalloc(dev
, sizeof(*pci
), GFP_KERNEL
);
723 pci
->ops
= &dw_pcie_ops
;
725 irq
= platform_get_irq(pdev
, 0);
729 base
= devm_platform_ioremap_resource_byname(pdev
, "ti_conf");
731 return PTR_ERR(base
);
733 phy_count
= of_property_count_strings(np
, "phy-names");
735 dev_err(dev
, "unable to find the strings\n");
739 phy
= devm_kcalloc(dev
, phy_count
, sizeof(*phy
), GFP_KERNEL
);
743 link
= devm_kcalloc(dev
, phy_count
, sizeof(*link
), GFP_KERNEL
);
747 for (i
= 0; i
< phy_count
; i
++) {
748 snprintf(name
, sizeof(name
), "pcie-phy%d", i
);
749 phy
[i
] = devm_phy_get(dev
, name
);
751 return PTR_ERR(phy
[i
]);
753 link
[i
] = device_link_add(dev
, &phy
[i
]->dev
, DL_FLAG_STATELESS
);
763 dra7xx
->phy_count
= phy_count
;
765 if (phy_count
== 2) {
766 ret
= dra7xx_pcie_configure_two_lane(dev
, b1co_mode_sel_mask
);
768 dra7xx
->phy_count
= 1; /* Fallback to x1 lane mode */
771 ret
= dra7xx_pcie_enable_phy(dra7xx
);
773 dev_err(dev
, "failed to enable phy\n");
777 platform_set_drvdata(pdev
, dra7xx
);
779 pm_runtime_enable(dev
);
780 ret
= pm_runtime_get_sync(dev
);
782 dev_err(dev
, "pm_runtime_get_sync failed\n");
786 reset
= devm_gpiod_get_optional(dev
, NULL
, GPIOD_OUT_HIGH
);
788 ret
= PTR_ERR(reset
);
789 dev_err(&pdev
->dev
, "gpio request failed, ret %d\n", ret
);
793 reg
= dra7xx_pcie_readl(dra7xx
, PCIECTRL_DRA7XX_CONF_DEVICE_CMD
);
795 dra7xx_pcie_writel(dra7xx
, PCIECTRL_DRA7XX_CONF_DEVICE_CMD
, reg
);
798 case DW_PCIE_RC_TYPE
:
799 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST
)) {
804 dra7xx_pcie_writel(dra7xx
, PCIECTRL_TI_CONF_DEVICE_TYPE
,
807 ret
= dra7xx_pcie_unaligned_memaccess(dev
);
809 dev_err(dev
, "WA for Errata i870 not applied\n");
811 ret
= dra7xx_add_pcie_port(dra7xx
, pdev
);
815 case DW_PCIE_EP_TYPE
:
816 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP
)) {
821 dra7xx_pcie_writel(dra7xx
, PCIECTRL_TI_CONF_DEVICE_TYPE
,
824 ret
= dra7xx_pcie_unaligned_memaccess(dev
);
828 ret
= dra7xx_add_pcie_ep(dra7xx
, pdev
);
833 dev_err(dev
, "INVALID device type %d\n", mode
);
837 ret
= devm_request_irq(dev
, irq
, dra7xx_pcie_irq_handler
,
838 IRQF_SHARED
, "dra7xx-pcie-main", dra7xx
);
840 dev_err(dev
, "failed to request irq\n");
849 pm_runtime_disable(dev
);
850 dra7xx_pcie_disable_phy(dra7xx
);
854 device_link_del(link
[i
]);
859 #ifdef CONFIG_PM_SLEEP
860 static int dra7xx_pcie_suspend(struct device
*dev
)
862 struct dra7xx_pcie
*dra7xx
= dev_get_drvdata(dev
);
863 struct dw_pcie
*pci
= dra7xx
->pci
;
866 if (dra7xx
->mode
!= DW_PCIE_RC_TYPE
)
870 val
= dw_pcie_readl_dbi(pci
, PCI_COMMAND
);
871 val
&= ~PCI_COMMAND_MEMORY
;
872 dw_pcie_writel_dbi(pci
, PCI_COMMAND
, val
);
877 static int dra7xx_pcie_resume(struct device
*dev
)
879 struct dra7xx_pcie
*dra7xx
= dev_get_drvdata(dev
);
880 struct dw_pcie
*pci
= dra7xx
->pci
;
883 if (dra7xx
->mode
!= DW_PCIE_RC_TYPE
)
887 val
= dw_pcie_readl_dbi(pci
, PCI_COMMAND
);
888 val
|= PCI_COMMAND_MEMORY
;
889 dw_pcie_writel_dbi(pci
, PCI_COMMAND
, val
);
894 static int dra7xx_pcie_suspend_noirq(struct device
*dev
)
896 struct dra7xx_pcie
*dra7xx
= dev_get_drvdata(dev
);
898 dra7xx_pcie_disable_phy(dra7xx
);
903 static int dra7xx_pcie_resume_noirq(struct device
*dev
)
905 struct dra7xx_pcie
*dra7xx
= dev_get_drvdata(dev
);
908 ret
= dra7xx_pcie_enable_phy(dra7xx
);
910 dev_err(dev
, "failed to enable phy\n");
918 static void dra7xx_pcie_shutdown(struct platform_device
*pdev
)
920 struct device
*dev
= &pdev
->dev
;
921 struct dra7xx_pcie
*dra7xx
= dev_get_drvdata(dev
);
924 dra7xx_pcie_stop_link(dra7xx
->pci
);
926 ret
= pm_runtime_put_sync(dev
);
928 dev_dbg(dev
, "pm_runtime_put_sync failed\n");
930 pm_runtime_disable(dev
);
931 dra7xx_pcie_disable_phy(dra7xx
);
934 static const struct dev_pm_ops dra7xx_pcie_pm_ops
= {
935 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend
, dra7xx_pcie_resume
)
936 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq
,
937 dra7xx_pcie_resume_noirq
)
940 static struct platform_driver dra7xx_pcie_driver
= {
943 .of_match_table
= of_dra7xx_pcie_match
,
944 .suppress_bind_attrs
= true,
945 .pm
= &dra7xx_pcie_pm_ops
,
947 .shutdown
= dra7xx_pcie_shutdown
,
949 builtin_platform_driver_probe(dra7xx_pcie_driver
, dra7xx_pcie_probe
);