1 // SPDX-License-Identifier: GPL-2.0
3 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
5 * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com
7 * Authors: Kishon Vijay Abraham I <kishon@ti.com>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/err.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/irqchip/chained_irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
21 #include <linux/of_pci.h>
22 #include <linux/pci.h>
23 #include <linux/phy/phy.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resource.h>
27 #include <linux/types.h>
28 #include <linux/mfd/syscon.h>
29 #include <linux/regmap.h>
30 #include <linux/gpio/consumer.h>
32 #include "../../pci.h"
33 #include "pcie-designware.h"
35 /* PCIe controller wrapper DRA7XX configuration registers */
37 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024
38 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028
39 #define ERR_SYS BIT(0)
40 #define ERR_FATAL BIT(1)
41 #define ERR_NONFATAL BIT(2)
42 #define ERR_COR BIT(3)
43 #define ERR_AXI BIT(4)
44 #define ERR_ECRC BIT(5)
45 #define PME_TURN_OFF BIT(8)
46 #define PME_TO_ACK BIT(9)
47 #define PM_PME BIT(10)
48 #define LINK_REQ_RST BIT(11)
49 #define LINK_UP_EVT BIT(12)
50 #define CFG_BME_EVT BIT(13)
51 #define CFG_MSE_EVT BIT(14)
52 #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
53 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
54 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
56 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034
57 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038
63 #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
65 #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100
66 #define DEVICE_TYPE_EP 0x0
67 #define DEVICE_TYPE_LEG_EP 0x1
68 #define DEVICE_TYPE_RC 0x4
70 #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
73 #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
74 #define LINK_UP BIT(16)
75 #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
77 #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
78 #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
80 #define PCIECTRL_TI_CONF_MSI_XMT 0x012c
81 #define MSI_REQ_GRANT BIT(0)
82 #define MSI_VECTOR_SHIFT 7
84 #define PCIE_1LANE_2LANE_SELECTION BIT(13)
85 #define PCIE_B1C0_MODE_SEL BIT(2)
86 #define PCIE_B0_B1_TSYNCEN BIT(0)
90 void __iomem
*base
; /* DT ti_conf */
91 int phy_count
; /* DT phy-names count */
93 struct irq_domain
*irq_domain
;
95 enum dw_pcie_device_mode mode
;
98 struct dra7xx_pcie_of_data
{
99 enum dw_pcie_device_mode mode
;
100 u32 b1co_mode_sel_mask
;
103 #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
105 static inline u32
dra7xx_pcie_readl(struct dra7xx_pcie
*pcie
, u32 offset
)
107 return readl(pcie
->base
+ offset
);
110 static inline void dra7xx_pcie_writel(struct dra7xx_pcie
*pcie
, u32 offset
,
113 writel(value
, pcie
->base
+ offset
);
116 static u64
dra7xx_pcie_cpu_addr_fixup(struct dw_pcie
*pci
, u64 cpu_addr
)
118 return cpu_addr
& DRA7XX_CPU_TO_BUS_ADDR
;
121 static int dra7xx_pcie_link_up(struct dw_pcie
*pci
)
123 struct dra7xx_pcie
*dra7xx
= to_dra7xx_pcie(pci
);
124 u32 reg
= dra7xx_pcie_readl(dra7xx
, PCIECTRL_DRA7XX_CONF_PHY_CS
);
126 return !!(reg
& LINK_UP
);
129 static void dra7xx_pcie_stop_link(struct dw_pcie
*pci
)
131 struct dra7xx_pcie
*dra7xx
= to_dra7xx_pcie(pci
);
134 reg
= dra7xx_pcie_readl(dra7xx
, PCIECTRL_DRA7XX_CONF_DEVICE_CMD
);
136 dra7xx_pcie_writel(dra7xx
, PCIECTRL_DRA7XX_CONF_DEVICE_CMD
, reg
);
139 static int dra7xx_pcie_establish_link(struct dw_pcie
*pci
)
141 struct dra7xx_pcie
*dra7xx
= to_dra7xx_pcie(pci
);
142 struct device
*dev
= pci
->dev
;
145 if (dw_pcie_link_up(pci
)) {
146 dev_err(dev
, "link is already up\n");
150 reg
= dra7xx_pcie_readl(dra7xx
, PCIECTRL_DRA7XX_CONF_DEVICE_CMD
);
152 dra7xx_pcie_writel(dra7xx
, PCIECTRL_DRA7XX_CONF_DEVICE_CMD
, reg
);
157 static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie
*dra7xx
)
159 dra7xx_pcie_writel(dra7xx
, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI
,
160 LEG_EP_INTERRUPTS
| MSI
);
162 dra7xx_pcie_writel(dra7xx
,
163 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI
,
164 MSI
| LEG_EP_INTERRUPTS
);
167 static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie
*dra7xx
)
169 dra7xx_pcie_writel(dra7xx
, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN
,
171 dra7xx_pcie_writel(dra7xx
, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN
,
175 static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie
*dra7xx
)
177 dra7xx_pcie_enable_wrapper_interrupts(dra7xx
);
178 dra7xx_pcie_enable_msi_interrupts(dra7xx
);
181 static int dra7xx_pcie_host_init(struct dw_pcie_rp
*pp
)
183 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
184 struct dra7xx_pcie
*dra7xx
= to_dra7xx_pcie(pci
);
186 dra7xx_pcie_enable_interrupts(dra7xx
);
191 static int dra7xx_pcie_intx_map(struct irq_domain
*domain
, unsigned int irq
,
192 irq_hw_number_t hwirq
)
194 irq_set_chip_and_handler(irq
, &dummy_irq_chip
, handle_simple_irq
);
195 irq_set_chip_data(irq
, domain
->host_data
);
200 static const struct irq_domain_ops intx_domain_ops
= {
201 .map
= dra7xx_pcie_intx_map
,
202 .xlate
= pci_irqd_intx_xlate
,
205 static int dra7xx_pcie_handle_msi(struct dw_pcie_rp
*pp
, int index
)
207 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
211 val
= dw_pcie_readl_dbi(pci
, PCIE_MSI_INTR0_STATUS
+
212 (index
* MSI_REG_CTRL_BLOCK_SIZE
));
216 pos
= find_first_bit(&val
, MAX_MSI_IRQS_PER_CTRL
);
217 while (pos
!= MAX_MSI_IRQS_PER_CTRL
) {
218 generic_handle_domain_irq(pp
->irq_domain
,
219 (index
* MAX_MSI_IRQS_PER_CTRL
) + pos
);
221 pos
= find_next_bit(&val
, MAX_MSI_IRQS_PER_CTRL
, pos
);
227 static void dra7xx_pcie_handle_msi_irq(struct dw_pcie_rp
*pp
)
229 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
230 int ret
, i
, count
, num_ctrls
;
232 num_ctrls
= pp
->num_vectors
/ MAX_MSI_IRQS_PER_CTRL
;
235 * Need to make sure all MSI status bits read 0 before exiting.
236 * Else, new MSI IRQs are not registered by the wrapper. Have an
237 * upperbound for the loop and exit the IRQ in case of IRQ flood
238 * to avoid locking up system in interrupt context.
244 for (i
= 0; i
< num_ctrls
; i
++)
245 ret
|= dra7xx_pcie_handle_msi(pp
, i
);
247 } while (ret
&& count
<= 1000);
250 dev_warn_ratelimited(pci
->dev
,
251 "Too many MSI IRQs to handle\n");
254 static void dra7xx_pcie_msi_irq_handler(struct irq_desc
*desc
)
256 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
257 struct dra7xx_pcie
*dra7xx
;
258 struct dw_pcie_rp
*pp
;
263 chained_irq_enter(chip
, desc
);
265 pp
= irq_desc_get_handler_data(desc
);
266 pci
= to_dw_pcie_from_pp(pp
);
267 dra7xx
= to_dra7xx_pcie(pci
);
269 reg
= dra7xx_pcie_readl(dra7xx
, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI
);
270 dra7xx_pcie_writel(dra7xx
, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI
, reg
);
274 dra7xx_pcie_handle_msi_irq(pp
);
280 for_each_set_bit(bit
, ®
, PCI_NUM_INTX
)
281 generic_handle_domain_irq(dra7xx
->irq_domain
, bit
);
285 chained_irq_exit(chip
, desc
);
288 static irqreturn_t
dra7xx_pcie_irq_handler(int irq
, void *arg
)
290 struct dra7xx_pcie
*dra7xx
= arg
;
291 struct dw_pcie
*pci
= dra7xx
->pci
;
292 struct device
*dev
= pci
->dev
;
293 struct dw_pcie_ep
*ep
= &pci
->ep
;
296 reg
= dra7xx_pcie_readl(dra7xx
, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN
);
299 dev_dbg(dev
, "System Error\n");
302 dev_dbg(dev
, "Fatal Error\n");
304 if (reg
& ERR_NONFATAL
)
305 dev_dbg(dev
, "Non Fatal Error\n");
308 dev_dbg(dev
, "Correctable Error\n");
311 dev_dbg(dev
, "AXI tag lookup fatal Error\n");
314 dev_dbg(dev
, "ECRC Error\n");
316 if (reg
& PME_TURN_OFF
)
318 "Power Management Event Turn-Off message received\n");
320 if (reg
& PME_TO_ACK
)
322 "Power Management Turn-Off Ack message received\n");
325 dev_dbg(dev
, "PM Power Management Event message received\n");
327 if (reg
& LINK_REQ_RST
)
328 dev_dbg(dev
, "Link Request Reset\n");
330 if (reg
& LINK_UP_EVT
) {
331 if (dra7xx
->mode
== DW_PCIE_EP_TYPE
)
332 dw_pcie_ep_linkup(ep
);
333 dev_dbg(dev
, "Link-up state change\n");
336 if (reg
& CFG_BME_EVT
)
337 dev_dbg(dev
, "CFG 'Bus Master Enable' change\n");
339 if (reg
& CFG_MSE_EVT
)
340 dev_dbg(dev
, "CFG 'Memory Space Enable' change\n");
342 dra7xx_pcie_writel(dra7xx
, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN
, reg
);
347 static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp
*pp
)
349 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
350 struct device
*dev
= pci
->dev
;
351 struct dra7xx_pcie
*dra7xx
= to_dra7xx_pcie(pci
);
352 struct device_node
*node
= dev
->of_node
;
353 struct device_node
*pcie_intc_node
= of_get_next_child(node
, NULL
);
355 if (!pcie_intc_node
) {
356 dev_err(dev
, "No PCIe Intc node found\n");
360 irq_set_chained_handler_and_data(pp
->irq
, dra7xx_pcie_msi_irq_handler
,
362 dra7xx
->irq_domain
= irq_domain_add_linear(pcie_intc_node
, PCI_NUM_INTX
,
363 &intx_domain_ops
, pp
);
364 of_node_put(pcie_intc_node
);
365 if (!dra7xx
->irq_domain
) {
366 dev_err(dev
, "Failed to get a INTx IRQ domain\n");
373 static const struct dw_pcie_host_ops dra7xx_pcie_host_ops
= {
374 .init
= dra7xx_pcie_host_init
,
377 static void dra7xx_pcie_ep_init(struct dw_pcie_ep
*ep
)
379 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
380 struct dra7xx_pcie
*dra7xx
= to_dra7xx_pcie(pci
);
383 for (bar
= 0; bar
< PCI_STD_NUM_BARS
; bar
++)
384 dw_pcie_ep_reset_bar(pci
, bar
);
386 dra7xx_pcie_enable_wrapper_interrupts(dra7xx
);
389 static void dra7xx_pcie_raise_intx_irq(struct dra7xx_pcie
*dra7xx
)
391 dra7xx_pcie_writel(dra7xx
, PCIECTRL_TI_CONF_INTX_ASSERT
, 0x1);
393 dra7xx_pcie_writel(dra7xx
, PCIECTRL_TI_CONF_INTX_DEASSERT
, 0x1);
396 static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie
*dra7xx
,
401 reg
= (interrupt_num
- 1) << MSI_VECTOR_SHIFT
;
402 reg
|= MSI_REQ_GRANT
;
403 dra7xx_pcie_writel(dra7xx
, PCIECTRL_TI_CONF_MSI_XMT
, reg
);
406 static int dra7xx_pcie_raise_irq(struct dw_pcie_ep
*ep
, u8 func_no
,
407 unsigned int type
, u16 interrupt_num
)
409 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
410 struct dra7xx_pcie
*dra7xx
= to_dra7xx_pcie(pci
);
414 dra7xx_pcie_raise_intx_irq(dra7xx
);
417 dra7xx_pcie_raise_msi_irq(dra7xx
, interrupt_num
);
420 dev_err(pci
->dev
, "UNKNOWN IRQ type\n");
426 static const struct pci_epc_features dra7xx_pcie_epc_features
= {
427 .linkup_notifier
= true,
429 .msix_capable
= false,
432 static const struct pci_epc_features
*
433 dra7xx_pcie_get_features(struct dw_pcie_ep
*ep
)
435 return &dra7xx_pcie_epc_features
;
438 static const struct dw_pcie_ep_ops pcie_ep_ops
= {
439 .init
= dra7xx_pcie_ep_init
,
440 .raise_irq
= dra7xx_pcie_raise_irq
,
441 .get_features
= dra7xx_pcie_get_features
,
444 static int dra7xx_add_pcie_ep(struct dra7xx_pcie
*dra7xx
,
445 struct platform_device
*pdev
)
448 struct dw_pcie_ep
*ep
;
449 struct device
*dev
= &pdev
->dev
;
450 struct dw_pcie
*pci
= dra7xx
->pci
;
453 ep
->ops
= &pcie_ep_ops
;
455 pci
->dbi_base
= devm_platform_ioremap_resource_byname(pdev
, "ep_dbics");
456 if (IS_ERR(pci
->dbi_base
))
457 return PTR_ERR(pci
->dbi_base
);
460 devm_platform_ioremap_resource_byname(pdev
, "ep_dbics2");
461 if (IS_ERR(pci
->dbi_base2
))
462 return PTR_ERR(pci
->dbi_base2
);
464 ret
= dw_pcie_ep_init(ep
);
466 dev_err(dev
, "failed to initialize endpoint\n");
470 ret
= dw_pcie_ep_init_registers(ep
);
472 dev_err(dev
, "Failed to initialize DWC endpoint registers\n");
473 dw_pcie_ep_deinit(ep
);
477 pci_epc_init_notify(ep
->epc
);
482 static int dra7xx_add_pcie_port(struct dra7xx_pcie
*dra7xx
,
483 struct platform_device
*pdev
)
486 struct dw_pcie
*pci
= dra7xx
->pci
;
487 struct dw_pcie_rp
*pp
= &pci
->pp
;
488 struct device
*dev
= pci
->dev
;
490 pp
->irq
= platform_get_irq(pdev
, 1);
494 /* MSI IRQ is muxed */
495 pp
->msi_irq
[0] = -ENODEV
;
497 ret
= dra7xx_pcie_init_irq_domain(pp
);
501 pci
->dbi_base
= devm_platform_ioremap_resource_byname(pdev
, "rc_dbics");
502 if (IS_ERR(pci
->dbi_base
))
503 return PTR_ERR(pci
->dbi_base
);
505 pp
->ops
= &dra7xx_pcie_host_ops
;
507 ret
= dw_pcie_host_init(pp
);
509 dev_err(dev
, "failed to initialize host\n");
516 static const struct dw_pcie_ops dw_pcie_ops
= {
517 .cpu_addr_fixup
= dra7xx_pcie_cpu_addr_fixup
,
518 .start_link
= dra7xx_pcie_establish_link
,
519 .stop_link
= dra7xx_pcie_stop_link
,
520 .link_up
= dra7xx_pcie_link_up
,
523 static void dra7xx_pcie_disable_phy(struct dra7xx_pcie
*dra7xx
)
525 int phy_count
= dra7xx
->phy_count
;
527 while (phy_count
--) {
528 phy_power_off(dra7xx
->phy
[phy_count
]);
529 phy_exit(dra7xx
->phy
[phy_count
]);
533 static int dra7xx_pcie_enable_phy(struct dra7xx_pcie
*dra7xx
)
535 int phy_count
= dra7xx
->phy_count
;
539 for (i
= 0; i
< phy_count
; i
++) {
540 ret
= phy_set_mode(dra7xx
->phy
[i
], PHY_MODE_PCIE
);
544 ret
= phy_init(dra7xx
->phy
[i
]);
548 ret
= phy_power_on(dra7xx
->phy
[i
]);
550 phy_exit(dra7xx
->phy
[i
]);
559 phy_power_off(dra7xx
->phy
[i
]);
560 phy_exit(dra7xx
->phy
[i
]);
566 static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data
= {
567 .mode
= DW_PCIE_RC_TYPE
,
570 static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data
= {
571 .mode
= DW_PCIE_EP_TYPE
,
574 static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data
= {
575 .b1co_mode_sel_mask
= BIT(2),
576 .mode
= DW_PCIE_RC_TYPE
,
579 static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data
= {
580 .b1co_mode_sel_mask
= GENMASK(3, 2),
581 .mode
= DW_PCIE_RC_TYPE
,
584 static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data
= {
585 .b1co_mode_sel_mask
= BIT(2),
586 .mode
= DW_PCIE_EP_TYPE
,
589 static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data
= {
590 .b1co_mode_sel_mask
= GENMASK(3, 2),
591 .mode
= DW_PCIE_EP_TYPE
,
594 static const struct of_device_id of_dra7xx_pcie_match
[] = {
596 .compatible
= "ti,dra7-pcie",
597 .data
= &dra7xx_pcie_rc_of_data
,
600 .compatible
= "ti,dra7-pcie-ep",
601 .data
= &dra7xx_pcie_ep_of_data
,
604 .compatible
= "ti,dra746-pcie-rc",
605 .data
= &dra746_pcie_rc_of_data
,
608 .compatible
= "ti,dra726-pcie-rc",
609 .data
= &dra726_pcie_rc_of_data
,
612 .compatible
= "ti,dra746-pcie-ep",
613 .data
= &dra746_pcie_ep_of_data
,
616 .compatible
= "ti,dra726-pcie-ep",
617 .data
= &dra726_pcie_ep_of_data
,
621 MODULE_DEVICE_TABLE(of
, of_dra7xx_pcie_match
);
624 * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
625 * @dra7xx: the dra7xx device where the workaround should be applied
627 * Access to the PCIe slave port that are not 32-bit aligned will result
628 * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
629 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
632 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
634 static int dra7xx_pcie_unaligned_memaccess(struct device
*dev
)
637 struct device_node
*np
= dev
->of_node
;
638 struct of_phandle_args args
;
639 struct regmap
*regmap
;
641 regmap
= syscon_regmap_lookup_by_phandle(np
,
642 "ti,syscon-unaligned-access");
643 if (IS_ERR(regmap
)) {
644 dev_dbg(dev
, "can't get ti,syscon-unaligned-access\n");
648 ret
= of_parse_phandle_with_fixed_args(np
, "ti,syscon-unaligned-access",
651 dev_err(dev
, "failed to parse ti,syscon-unaligned-access\n");
655 ret
= regmap_update_bits(regmap
, args
.args
[0], args
.args
[1],
658 dev_err(dev
, "failed to enable unaligned access\n");
660 of_node_put(args
.np
);
665 static int dra7xx_pcie_configure_two_lane(struct device
*dev
,
666 u32 b1co_mode_sel_mask
)
668 struct device_node
*np
= dev
->of_node
;
669 struct regmap
*pcie_syscon
;
670 unsigned int pcie_reg
;
674 pcie_syscon
= syscon_regmap_lookup_by_phandle(np
, "ti,syscon-lane-sel");
675 if (IS_ERR(pcie_syscon
)) {
676 dev_err(dev
, "unable to get ti,syscon-lane-sel\n");
680 if (of_property_read_u32_index(np
, "ti,syscon-lane-sel", 1,
682 dev_err(dev
, "couldn't get lane selection reg offset\n");
686 mask
= b1co_mode_sel_mask
| PCIE_B0_B1_TSYNCEN
;
687 val
= PCIE_B1C0_MODE_SEL
| PCIE_B0_B1_TSYNCEN
;
688 regmap_update_bits(pcie_syscon
, pcie_reg
, mask
, val
);
693 static int dra7xx_pcie_probe(struct platform_device
*pdev
)
701 struct device_link
**link
;
704 struct dra7xx_pcie
*dra7xx
;
705 struct device
*dev
= &pdev
->dev
;
706 struct device_node
*np
= dev
->of_node
;
708 struct gpio_desc
*reset
;
709 const struct dra7xx_pcie_of_data
*data
;
710 enum dw_pcie_device_mode mode
;
711 u32 b1co_mode_sel_mask
;
713 data
= of_device_get_match_data(dev
);
717 mode
= (enum dw_pcie_device_mode
)data
->mode
;
718 b1co_mode_sel_mask
= data
->b1co_mode_sel_mask
;
720 dra7xx
= devm_kzalloc(dev
, sizeof(*dra7xx
), GFP_KERNEL
);
724 pci
= devm_kzalloc(dev
, sizeof(*pci
), GFP_KERNEL
);
729 pci
->ops
= &dw_pcie_ops
;
731 irq
= platform_get_irq(pdev
, 0);
735 base
= devm_platform_ioremap_resource_byname(pdev
, "ti_conf");
737 return PTR_ERR(base
);
739 phy_count
= of_property_count_strings(np
, "phy-names");
741 dev_err(dev
, "unable to find the strings\n");
745 phy
= devm_kcalloc(dev
, phy_count
, sizeof(*phy
), GFP_KERNEL
);
749 link
= devm_kcalloc(dev
, phy_count
, sizeof(*link
), GFP_KERNEL
);
753 dra7xx
->clk
= devm_clk_get_optional(dev
, NULL
);
754 if (IS_ERR(dra7xx
->clk
))
755 return dev_err_probe(dev
, PTR_ERR(dra7xx
->clk
),
756 "clock request failed");
758 ret
= clk_prepare_enable(dra7xx
->clk
);
762 for (i
= 0; i
< phy_count
; i
++) {
763 snprintf(name
, sizeof(name
), "pcie-phy%d", i
);
764 phy
[i
] = devm_phy_get(dev
, name
);
766 return PTR_ERR(phy
[i
]);
768 link
[i
] = device_link_add(dev
, &phy
[i
]->dev
, DL_FLAG_STATELESS
);
778 dra7xx
->phy_count
= phy_count
;
780 if (phy_count
== 2) {
781 ret
= dra7xx_pcie_configure_two_lane(dev
, b1co_mode_sel_mask
);
783 dra7xx
->phy_count
= 1; /* Fallback to x1 lane mode */
786 ret
= dra7xx_pcie_enable_phy(dra7xx
);
788 dev_err(dev
, "failed to enable phy\n");
792 platform_set_drvdata(pdev
, dra7xx
);
794 pm_runtime_enable(dev
);
795 ret
= pm_runtime_get_sync(dev
);
797 dev_err(dev
, "pm_runtime_get_sync failed\n");
801 reset
= devm_gpiod_get_optional(dev
, NULL
, GPIOD_OUT_HIGH
);
803 ret
= PTR_ERR(reset
);
804 dev_err(&pdev
->dev
, "gpio request failed, ret %d\n", ret
);
808 reg
= dra7xx_pcie_readl(dra7xx
, PCIECTRL_DRA7XX_CONF_DEVICE_CMD
);
810 dra7xx_pcie_writel(dra7xx
, PCIECTRL_DRA7XX_CONF_DEVICE_CMD
, reg
);
813 case DW_PCIE_RC_TYPE
:
814 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST
)) {
819 dra7xx_pcie_writel(dra7xx
, PCIECTRL_TI_CONF_DEVICE_TYPE
,
822 ret
= dra7xx_pcie_unaligned_memaccess(dev
);
824 dev_err(dev
, "WA for Errata i870 not applied\n");
826 ret
= dra7xx_add_pcie_port(dra7xx
, pdev
);
830 case DW_PCIE_EP_TYPE
:
831 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP
)) {
836 dra7xx_pcie_writel(dra7xx
, PCIECTRL_TI_CONF_DEVICE_TYPE
,
839 ret
= dra7xx_pcie_unaligned_memaccess(dev
);
843 ret
= dra7xx_add_pcie_ep(dra7xx
, pdev
);
848 dev_err(dev
, "INVALID device type %d\n", mode
);
852 ret
= devm_request_threaded_irq(dev
, irq
, NULL
, dra7xx_pcie_irq_handler
,
853 IRQF_SHARED
| IRQF_ONESHOT
,
854 "dra7xx-pcie-main", dra7xx
);
856 dev_err(dev
, "failed to request irq\n");
863 if (dra7xx
->mode
== DW_PCIE_RC_TYPE
)
864 dw_pcie_host_deinit(&dra7xx
->pci
->pp
);
866 dw_pcie_ep_deinit(&dra7xx
->pci
->ep
);
871 pm_runtime_disable(dev
);
872 dra7xx_pcie_disable_phy(dra7xx
);
876 device_link_del(link
[i
]);
881 static int dra7xx_pcie_suspend(struct device
*dev
)
883 struct dra7xx_pcie
*dra7xx
= dev_get_drvdata(dev
);
884 struct dw_pcie
*pci
= dra7xx
->pci
;
887 if (dra7xx
->mode
!= DW_PCIE_RC_TYPE
)
891 val
= dw_pcie_readl_dbi(pci
, PCI_COMMAND
);
892 val
&= ~PCI_COMMAND_MEMORY
;
893 dw_pcie_writel_dbi(pci
, PCI_COMMAND
, val
);
898 static int dra7xx_pcie_resume(struct device
*dev
)
900 struct dra7xx_pcie
*dra7xx
= dev_get_drvdata(dev
);
901 struct dw_pcie
*pci
= dra7xx
->pci
;
904 if (dra7xx
->mode
!= DW_PCIE_RC_TYPE
)
908 val
= dw_pcie_readl_dbi(pci
, PCI_COMMAND
);
909 val
|= PCI_COMMAND_MEMORY
;
910 dw_pcie_writel_dbi(pci
, PCI_COMMAND
, val
);
915 static int dra7xx_pcie_suspend_noirq(struct device
*dev
)
917 struct dra7xx_pcie
*dra7xx
= dev_get_drvdata(dev
);
919 dra7xx_pcie_disable_phy(dra7xx
);
924 static int dra7xx_pcie_resume_noirq(struct device
*dev
)
926 struct dra7xx_pcie
*dra7xx
= dev_get_drvdata(dev
);
929 ret
= dra7xx_pcie_enable_phy(dra7xx
);
931 dev_err(dev
, "failed to enable phy\n");
938 static void dra7xx_pcie_shutdown(struct platform_device
*pdev
)
940 struct device
*dev
= &pdev
->dev
;
941 struct dra7xx_pcie
*dra7xx
= dev_get_drvdata(dev
);
944 dra7xx_pcie_stop_link(dra7xx
->pci
);
946 ret
= pm_runtime_put_sync(dev
);
948 dev_dbg(dev
, "pm_runtime_put_sync failed\n");
950 pm_runtime_disable(dev
);
951 dra7xx_pcie_disable_phy(dra7xx
);
953 clk_disable_unprepare(dra7xx
->clk
);
956 static const struct dev_pm_ops dra7xx_pcie_pm_ops
= {
957 SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend
, dra7xx_pcie_resume
)
958 NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq
,
959 dra7xx_pcie_resume_noirq
)
962 static struct platform_driver dra7xx_pcie_driver
= {
963 .probe
= dra7xx_pcie_probe
,
966 .of_match_table
= of_dra7xx_pcie_match
,
967 .suppress_bind_attrs
= true,
968 .pm
= &dra7xx_pcie_pm_ops
,
970 .shutdown
= dra7xx_pcie_shutdown
,
972 module_platform_driver(dra7xx_pcie_driver
);
974 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
975 MODULE_DESCRIPTION("PCIe controller driver for TI DRA7xx SoCs");
976 MODULE_LICENSE("GPL v2");