1 // SPDX-License-Identifier: GPL-2.0
3 * Qualcomm PCIe Endpoint controller driver
5 * Copyright (c) 2020, The Linux Foundation. All rights reserved.
6 * Author: Siddartha Mohanadoss <smohanad@codeaurora.org
8 * Copyright (c) 2021, Linaro Ltd.
9 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org
12 #include <linux/clk.h>
13 #include <linux/debugfs.h>
14 #include <linux/delay.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/interconnect.h>
17 #include <linux/mfd/syscon.h>
18 #include <linux/phy/pcie.h>
19 #include <linux/phy/phy.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_domain.h>
22 #include <linux/regmap.h>
23 #include <linux/reset.h>
24 #include <linux/module.h>
26 #include "../../pci.h"
27 #include "pcie-designware.h"
28 #include "pcie-qcom-common.h"
31 #define PARF_SYS_CTRL 0x00
32 #define PARF_DB_CTRL 0x10
33 #define PARF_PM_CTRL 0x20
34 #define PARF_MHI_CLOCK_RESET_CTRL 0x174
35 #define PARF_MHI_BASE_ADDR_LOWER 0x178
36 #define PARF_MHI_BASE_ADDR_UPPER 0x17c
37 #define PARF_DEBUG_INT_EN 0x190
38 #define PARF_AXI_MSTR_RD_HALT_NO_WRITES 0x1a4
39 #define PARF_AXI_MSTR_WR_ADDR_HALT 0x1a8
40 #define PARF_Q2A_FLUSH 0x1ac
41 #define PARF_LTSSM 0x1b0
42 #define PARF_CFG_BITS 0x210
43 #define PARF_INT_ALL_STATUS 0x224
44 #define PARF_INT_ALL_CLEAR 0x228
45 #define PARF_INT_ALL_MASK 0x22c
46 #define PARF_SLV_ADDR_MSB_CTRL 0x2c0
47 #define PARF_DBI_BASE_ADDR 0x350
48 #define PARF_DBI_BASE_ADDR_HI 0x354
49 #define PARF_SLV_ADDR_SPACE_SIZE 0x358
50 #define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c
51 #define PARF_NO_SNOOP_OVERIDE 0x3d4
52 #define PARF_ATU_BASE_ADDR 0x634
53 #define PARF_ATU_BASE_ADDR_HI 0x638
54 #define PARF_SRIS_MODE 0x644
55 #define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04
56 #define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c
57 #define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10
58 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84
59 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88
60 #define PARF_DEVICE_TYPE 0x1000
61 #define PARF_BDF_TO_SID_CFG 0x2c00
62 #define PARF_INT_ALL_5_MASK 0x2dcc
64 /* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
65 #define PARF_INT_ALL_LINK_DOWN BIT(1)
66 #define PARF_INT_ALL_BME BIT(2)
67 #define PARF_INT_ALL_PM_TURNOFF BIT(3)
68 #define PARF_INT_ALL_DEBUG BIT(4)
69 #define PARF_INT_ALL_LTR BIT(5)
70 #define PARF_INT_ALL_MHI_Q6 BIT(6)
71 #define PARF_INT_ALL_MHI_A7 BIT(7)
72 #define PARF_INT_ALL_DSTATE_CHANGE BIT(8)
73 #define PARF_INT_ALL_L1SUB_TIMEOUT BIT(9)
74 #define PARF_INT_ALL_MMIO_WRITE BIT(10)
75 #define PARF_INT_ALL_CFG_WRITE BIT(11)
76 #define PARF_INT_ALL_BRIDGE_FLUSH_N BIT(12)
77 #define PARF_INT_ALL_LINK_UP BIT(13)
78 #define PARF_INT_ALL_AER_LEGACY BIT(14)
79 #define PARF_INT_ALL_PLS_ERR BIT(15)
80 #define PARF_INT_ALL_PME_LEGACY BIT(16)
81 #define PARF_INT_ALL_PLS_PME BIT(17)
82 #define PARF_INT_ALL_EDMA BIT(22)
84 /* PARF_BDF_TO_SID_CFG register fields */
85 #define PARF_BDF_TO_SID_BYPASS BIT(0)
87 /* PARF_DEBUG_INT_EN register fields */
88 #define PARF_DEBUG_INT_PM_DSTATE_CHANGE BIT(1)
89 #define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2)
90 #define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3)
92 /* PARF_NO_SNOOP_OVERIDE register fields */
93 #define WR_NO_SNOOP_OVERIDE_EN BIT(1)
94 #define RD_NO_SNOOP_OVERIDE_EN BIT(3)
96 /* PARF_DEVICE_TYPE register fields */
97 #define PARF_DEVICE_TYPE_EP 0x0
99 /* PARF_PM_CTRL register fields */
100 #define PARF_PM_CTRL_REQ_EXIT_L1 BIT(1)
101 #define PARF_PM_CTRL_READY_ENTR_L23 BIT(2)
102 #define PARF_PM_CTRL_REQ_NOT_ENTR_L1 BIT(5)
104 /* PARF_MHI_CLOCK_RESET_CTRL fields */
105 #define PARF_MSTR_AXI_CLK_EN BIT(1)
107 /* PARF_AXI_MSTR_RD_HALT_NO_WRITES register fields */
108 #define PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN BIT(0)
110 /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */
111 #define PARF_AXI_MSTR_WR_ADDR_HALT_EN BIT(31)
113 /* PARF_Q2A_FLUSH register fields */
114 #define PARF_Q2A_FLUSH_EN BIT(16)
116 /* PARF_SYS_CTRL register fields */
117 #define PARF_SYS_CTRL_AUX_PWR_DET BIT(4)
118 #define PARF_SYS_CTRL_CORE_CLK_CGC_DIS BIT(6)
119 #define PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS BIT(10)
120 #define PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE BIT(11)
122 /* PARF_DB_CTRL register fields */
123 #define PARF_DB_CTRL_INSR_DBNCR_BLOCK BIT(0)
124 #define PARF_DB_CTRL_RMVL_DBNCR_BLOCK BIT(1)
125 #define PARF_DB_CTRL_DBI_WKP_BLOCK BIT(4)
126 #define PARF_DB_CTRL_SLV_WKP_BLOCK BIT(5)
127 #define PARF_DB_CTRL_MST_WKP_BLOCK BIT(6)
129 /* PARF_CFG_BITS register fields */
130 #define PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN BIT(1)
132 /* PARF_INT_ALL_5_MASK fields */
133 #define PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR BIT(0)
136 #define ELBI_SYS_STTS 0x08
137 #define ELBI_CS2_ENABLE 0xa4
140 #define DBI_CON_STATUS 0x44
142 /* DBI register fields */
143 #define DBI_CON_STATUS_POWER_STATE_MASK GENMASK(1, 0)
145 #define XMLH_LINK_UP 0x400
146 #define CORE_RESET_TIME_US_MIN 1000
147 #define CORE_RESET_TIME_US_MAX 1005
148 #define WAKE_DELAY_US 2000 /* 2 ms */
150 #define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
151 Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
153 #define to_pcie_ep(x) dev_get_drvdata((x)->dev)
155 enum qcom_pcie_ep_link_status
{
156 QCOM_PCIE_EP_LINK_DISABLED
,
157 QCOM_PCIE_EP_LINK_ENABLED
,
158 QCOM_PCIE_EP_LINK_UP
,
159 QCOM_PCIE_EP_LINK_DOWN
,
163 * struct qcom_pcie_ep_cfg - Per SoC config struct
164 * @hdma_support: HDMA support on this SoC
165 * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache snooping
166 * @disable_mhi_ram_parity_check: Disable MHI RAM data parity error check
168 struct qcom_pcie_ep_cfg
{
170 bool override_no_snoop
;
171 bool disable_mhi_ram_parity_check
;
175 * struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller
176 * @pci: Designware PCIe controller struct
177 * @parf: Qualcomm PCIe specific PARF register base
178 * @elbi: Designware PCIe specific ELBI register base
179 * @mmio: MMIO register base
180 * @perst_map: PERST regmap
181 * @mmio_res: MMIO region resource
182 * @core_reset: PCIe Endpoint core reset
183 * @reset: PERST# GPIO
185 * @phy: PHY controller block
186 * @debugfs: PCIe Endpoint Debugfs directory
187 * @icc_mem: Handle to an interconnect path between PCIe and MEM
189 * @num_clks: PCIe clocks count
190 * @perst_en: Flag for PERST enable
191 * @perst_sep_en: Flag for PERST separation enable
192 * @cfg: PCIe EP config struct
193 * @link_status: PCIe Link status
194 * @global_irq: Qualcomm PCIe specific Global IRQ
195 * @perst_irq: PERST# IRQ
197 struct qcom_pcie_ep
{
203 struct regmap
*perst_map
;
204 struct resource
*mmio_res
;
206 struct reset_control
*core_reset
;
207 struct gpio_desc
*reset
;
208 struct gpio_desc
*wake
;
210 struct dentry
*debugfs
;
212 struct icc_path
*icc_mem
;
214 struct clk_bulk_data
*clks
;
220 const struct qcom_pcie_ep_cfg
*cfg
;
221 enum qcom_pcie_ep_link_status link_status
;
226 static int qcom_pcie_ep_core_reset(struct qcom_pcie_ep
*pcie_ep
)
228 struct dw_pcie
*pci
= &pcie_ep
->pci
;
229 struct device
*dev
= pci
->dev
;
232 ret
= reset_control_assert(pcie_ep
->core_reset
);
234 dev_err(dev
, "Cannot assert core reset\n");
238 usleep_range(CORE_RESET_TIME_US_MIN
, CORE_RESET_TIME_US_MAX
);
240 ret
= reset_control_deassert(pcie_ep
->core_reset
);
242 dev_err(dev
, "Cannot de-assert core reset\n");
246 usleep_range(CORE_RESET_TIME_US_MIN
, CORE_RESET_TIME_US_MAX
);
252 * Delatch PERST_EN and PERST_SEPARATION_ENABLE with TCSR to avoid
253 * device reset during host reboot and hibernation. The driver is
254 * expected to handle this situation.
256 static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep
*pcie_ep
)
258 if (pcie_ep
->perst_map
) {
259 regmap_write(pcie_ep
->perst_map
, pcie_ep
->perst_en
, 0);
260 regmap_write(pcie_ep
->perst_map
, pcie_ep
->perst_sep_en
, 0);
264 static int qcom_pcie_dw_link_up(struct dw_pcie
*pci
)
266 struct qcom_pcie_ep
*pcie_ep
= to_pcie_ep(pci
);
269 reg
= readl_relaxed(pcie_ep
->elbi
+ ELBI_SYS_STTS
);
271 return reg
& XMLH_LINK_UP
;
274 static int qcom_pcie_dw_start_link(struct dw_pcie
*pci
)
276 struct qcom_pcie_ep
*pcie_ep
= to_pcie_ep(pci
);
278 enable_irq(pcie_ep
->perst_irq
);
283 static void qcom_pcie_dw_stop_link(struct dw_pcie
*pci
)
285 struct qcom_pcie_ep
*pcie_ep
= to_pcie_ep(pci
);
287 disable_irq(pcie_ep
->perst_irq
);
290 static void qcom_pcie_dw_write_dbi2(struct dw_pcie
*pci
, void __iomem
*base
,
291 u32 reg
, size_t size
, u32 val
)
293 struct qcom_pcie_ep
*pcie_ep
= to_pcie_ep(pci
);
296 writel(1, pcie_ep
->elbi
+ ELBI_CS2_ENABLE
);
298 ret
= dw_pcie_write(pci
->dbi_base2
+ reg
, size
, val
);
300 dev_err(pci
->dev
, "Failed to write DBI2 register (0x%x): %d\n", reg
, ret
);
302 writel(0, pcie_ep
->elbi
+ ELBI_CS2_ENABLE
);
305 static void qcom_pcie_ep_icc_update(struct qcom_pcie_ep
*pcie_ep
)
307 struct dw_pcie
*pci
= &pcie_ep
->pci
;
312 if (!pcie_ep
->icc_mem
)
315 offset
= dw_pcie_find_capability(pci
, PCI_CAP_ID_EXP
);
316 status
= readw(pci
->dbi_base
+ offset
+ PCI_EXP_LNKSTA
);
318 speed
= FIELD_GET(PCI_EXP_LNKSTA_CLS
, status
);
319 width
= FIELD_GET(PCI_EXP_LNKSTA_NLW
, status
);
321 ret
= icc_set_bw(pcie_ep
->icc_mem
, 0, width
* QCOM_PCIE_LINK_SPEED_TO_BW(speed
));
323 dev_err(pci
->dev
, "failed to set interconnect bandwidth: %d\n",
327 static int qcom_pcie_enable_resources(struct qcom_pcie_ep
*pcie_ep
)
329 struct dw_pcie
*pci
= &pcie_ep
->pci
;
332 ret
= clk_bulk_prepare_enable(pcie_ep
->num_clks
, pcie_ep
->clks
);
336 ret
= qcom_pcie_ep_core_reset(pcie_ep
);
338 goto err_disable_clk
;
340 ret
= phy_init(pcie_ep
->phy
);
342 goto err_disable_clk
;
344 ret
= phy_set_mode_ext(pcie_ep
->phy
, PHY_MODE_PCIE
, PHY_MODE_PCIE_EP
);
348 ret
= phy_power_on(pcie_ep
->phy
);
353 * Some Qualcomm platforms require interconnect bandwidth constraints
354 * to be set before enabling interconnect clocks.
356 * Set an initial peak bandwidth corresponding to single-lane Gen 1
357 * for the pcie-mem path.
359 ret
= icc_set_bw(pcie_ep
->icc_mem
, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));
361 dev_err(pci
->dev
, "failed to set interconnect bandwidth: %d\n",
369 phy_power_off(pcie_ep
->phy
);
371 phy_exit(pcie_ep
->phy
);
373 clk_bulk_disable_unprepare(pcie_ep
->num_clks
, pcie_ep
->clks
);
378 static void qcom_pcie_disable_resources(struct qcom_pcie_ep
*pcie_ep
)
380 icc_set_bw(pcie_ep
->icc_mem
, 0, 0);
381 phy_power_off(pcie_ep
->phy
);
382 phy_exit(pcie_ep
->phy
);
383 clk_bulk_disable_unprepare(pcie_ep
->num_clks
, pcie_ep
->clks
);
386 static int qcom_pcie_perst_deassert(struct dw_pcie
*pci
)
388 struct qcom_pcie_ep
*pcie_ep
= to_pcie_ep(pci
);
389 struct device
*dev
= pci
->dev
;
393 ret
= qcom_pcie_enable_resources(pcie_ep
);
395 dev_err(dev
, "Failed to enable resources: %d\n", ret
);
399 /* Perform cleanup that requires refclk */
400 pci_epc_deinit_notify(pci
->ep
.epc
);
401 dw_pcie_ep_cleanup(&pci
->ep
);
403 /* Assert WAKE# to RC to indicate device is ready */
404 gpiod_set_value_cansleep(pcie_ep
->wake
, 1);
405 usleep_range(WAKE_DELAY_US
, WAKE_DELAY_US
+ 500);
406 gpiod_set_value_cansleep(pcie_ep
->wake
, 0);
408 qcom_pcie_ep_configure_tcsr(pcie_ep
);
410 /* Disable BDF to SID mapping */
411 val
= readl_relaxed(pcie_ep
->parf
+ PARF_BDF_TO_SID_CFG
);
412 val
|= PARF_BDF_TO_SID_BYPASS
;
413 writel_relaxed(val
, pcie_ep
->parf
+ PARF_BDF_TO_SID_CFG
);
415 /* Enable debug IRQ */
416 val
= readl_relaxed(pcie_ep
->parf
+ PARF_DEBUG_INT_EN
);
417 val
|= PARF_DEBUG_INT_RADM_PM_TURNOFF
|
418 PARF_DEBUG_INT_CFG_BUS_MASTER_EN
|
419 PARF_DEBUG_INT_PM_DSTATE_CHANGE
;
420 writel_relaxed(val
, pcie_ep
->parf
+ PARF_DEBUG_INT_EN
);
422 /* Configure PCIe to endpoint mode */
423 writel_relaxed(PARF_DEVICE_TYPE_EP
, pcie_ep
->parf
+ PARF_DEVICE_TYPE
);
425 /* Allow entering L1 state */
426 val
= readl_relaxed(pcie_ep
->parf
+ PARF_PM_CTRL
);
427 val
&= ~PARF_PM_CTRL_REQ_NOT_ENTR_L1
;
428 writel_relaxed(val
, pcie_ep
->parf
+ PARF_PM_CTRL
);
430 /* Read halts write */
431 val
= readl_relaxed(pcie_ep
->parf
+ PARF_AXI_MSTR_RD_HALT_NO_WRITES
);
432 val
&= ~PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN
;
433 writel_relaxed(val
, pcie_ep
->parf
+ PARF_AXI_MSTR_RD_HALT_NO_WRITES
);
435 /* Write after write halt */
436 val
= readl_relaxed(pcie_ep
->parf
+ PARF_AXI_MSTR_WR_ADDR_HALT
);
437 val
|= PARF_AXI_MSTR_WR_ADDR_HALT_EN
;
438 writel_relaxed(val
, pcie_ep
->parf
+ PARF_AXI_MSTR_WR_ADDR_HALT
);
440 /* Q2A flush disable */
441 val
= readl_relaxed(pcie_ep
->parf
+ PARF_Q2A_FLUSH
);
442 val
&= ~PARF_Q2A_FLUSH_EN
;
443 writel_relaxed(val
, pcie_ep
->parf
+ PARF_Q2A_FLUSH
);
446 * Disable Master AXI clock during idle. Do not allow DBI access
447 * to take the core out of L1. Disable core clock gating that
448 * gates PIPE clock from propagating to core clock. Report to the
449 * host that Vaux is present.
451 val
= readl_relaxed(pcie_ep
->parf
+ PARF_SYS_CTRL
);
452 val
&= ~PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS
;
453 val
|= PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE
|
454 PARF_SYS_CTRL_CORE_CLK_CGC_DIS
|
455 PARF_SYS_CTRL_AUX_PWR_DET
;
456 writel_relaxed(val
, pcie_ep
->parf
+ PARF_SYS_CTRL
);
458 /* Disable the debouncers */
459 val
= readl_relaxed(pcie_ep
->parf
+ PARF_DB_CTRL
);
460 val
|= PARF_DB_CTRL_INSR_DBNCR_BLOCK
| PARF_DB_CTRL_RMVL_DBNCR_BLOCK
|
461 PARF_DB_CTRL_DBI_WKP_BLOCK
| PARF_DB_CTRL_SLV_WKP_BLOCK
|
462 PARF_DB_CTRL_MST_WKP_BLOCK
;
463 writel_relaxed(val
, pcie_ep
->parf
+ PARF_DB_CTRL
);
465 /* Request to exit from L1SS for MSI and LTR MSG */
466 val
= readl_relaxed(pcie_ep
->parf
+ PARF_CFG_BITS
);
467 val
|= PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN
;
468 writel_relaxed(val
, pcie_ep
->parf
+ PARF_CFG_BITS
);
470 dw_pcie_dbi_ro_wr_en(pci
);
472 /* Set the L0s Exit Latency to 2us-4us = 0x6 */
473 offset
= dw_pcie_find_capability(pci
, PCI_CAP_ID_EXP
);
474 val
= dw_pcie_readl_dbi(pci
, offset
+ PCI_EXP_LNKCAP
);
475 val
&= ~PCI_EXP_LNKCAP_L0SEL
;
476 val
|= FIELD_PREP(PCI_EXP_LNKCAP_L0SEL
, 0x6);
477 dw_pcie_writel_dbi(pci
, offset
+ PCI_EXP_LNKCAP
, val
);
479 /* Set the L1 Exit Latency to be 32us-64 us = 0x6 */
480 offset
= dw_pcie_find_capability(pci
, PCI_CAP_ID_EXP
);
481 val
= dw_pcie_readl_dbi(pci
, offset
+ PCI_EXP_LNKCAP
);
482 val
&= ~PCI_EXP_LNKCAP_L1EL
;
483 val
|= FIELD_PREP(PCI_EXP_LNKCAP_L1EL
, 0x6);
484 dw_pcie_writel_dbi(pci
, offset
+ PCI_EXP_LNKCAP
, val
);
486 dw_pcie_dbi_ro_wr_dis(pci
);
488 writel_relaxed(0, pcie_ep
->parf
+ PARF_INT_ALL_MASK
);
489 val
= PARF_INT_ALL_LINK_DOWN
| PARF_INT_ALL_BME
|
490 PARF_INT_ALL_PM_TURNOFF
| PARF_INT_ALL_DSTATE_CHANGE
|
491 PARF_INT_ALL_LINK_UP
| PARF_INT_ALL_EDMA
;
492 writel_relaxed(val
, pcie_ep
->parf
+ PARF_INT_ALL_MASK
);
494 if (pcie_ep
->cfg
&& pcie_ep
->cfg
->disable_mhi_ram_parity_check
) {
495 val
= readl_relaxed(pcie_ep
->parf
+ PARF_INT_ALL_5_MASK
);
496 val
&= ~PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR
;
497 writel_relaxed(val
, pcie_ep
->parf
+ PARF_INT_ALL_5_MASK
);
500 ret
= dw_pcie_ep_init_registers(&pcie_ep
->pci
.ep
);
502 dev_err(dev
, "Failed to complete initialization: %d\n", ret
);
503 goto err_disable_resources
;
506 if (pcie_link_speed
[pci
->max_link_speed
] == PCIE_SPEED_16_0GT
) {
507 qcom_pcie_common_set_16gt_equalization(pci
);
508 qcom_pcie_common_set_16gt_lane_margining(pci
);
512 * The physical address of the MMIO region which is exposed as the BAR
513 * should be written to MHI BASE registers.
515 writel_relaxed(pcie_ep
->mmio_res
->start
,
516 pcie_ep
->parf
+ PARF_MHI_BASE_ADDR_LOWER
);
517 writel_relaxed(0, pcie_ep
->parf
+ PARF_MHI_BASE_ADDR_UPPER
);
519 /* Gate Master AXI clock to MHI bus during L1SS */
520 val
= readl_relaxed(pcie_ep
->parf
+ PARF_MHI_CLOCK_RESET_CTRL
);
521 val
&= ~PARF_MSTR_AXI_CLK_EN
;
522 writel_relaxed(val
, pcie_ep
->parf
+ PARF_MHI_CLOCK_RESET_CTRL
);
524 pci_epc_init_notify(pcie_ep
->pci
.ep
.epc
);
527 val
= readl_relaxed(pcie_ep
->parf
+ PARF_LTSSM
);
529 writel_relaxed(val
, pcie_ep
->parf
+ PARF_LTSSM
);
531 if (pcie_ep
->cfg
&& pcie_ep
->cfg
->override_no_snoop
)
532 writel_relaxed(WR_NO_SNOOP_OVERIDE_EN
| RD_NO_SNOOP_OVERIDE_EN
,
533 pcie_ep
->parf
+ PARF_NO_SNOOP_OVERIDE
);
537 err_disable_resources
:
538 qcom_pcie_disable_resources(pcie_ep
);
543 static void qcom_pcie_perst_assert(struct dw_pcie
*pci
)
545 struct qcom_pcie_ep
*pcie_ep
= to_pcie_ep(pci
);
547 qcom_pcie_disable_resources(pcie_ep
);
548 pcie_ep
->link_status
= QCOM_PCIE_EP_LINK_DISABLED
;
551 /* Common DWC controller ops */
552 static const struct dw_pcie_ops pci_ops
= {
553 .link_up
= qcom_pcie_dw_link_up
,
554 .start_link
= qcom_pcie_dw_start_link
,
555 .stop_link
= qcom_pcie_dw_stop_link
,
556 .write_dbi2
= qcom_pcie_dw_write_dbi2
,
559 static int qcom_pcie_ep_get_io_resources(struct platform_device
*pdev
,
560 struct qcom_pcie_ep
*pcie_ep
)
562 struct device
*dev
= &pdev
->dev
;
563 struct dw_pcie
*pci
= &pcie_ep
->pci
;
564 struct device_node
*syscon
;
565 struct resource
*res
;
568 pcie_ep
->parf
= devm_platform_ioremap_resource_byname(pdev
, "parf");
569 if (IS_ERR(pcie_ep
->parf
))
570 return PTR_ERR(pcie_ep
->parf
);
572 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "dbi");
573 pci
->dbi_base
= devm_pci_remap_cfg_resource(dev
, res
);
574 if (IS_ERR(pci
->dbi_base
))
575 return PTR_ERR(pci
->dbi_base
);
576 pci
->dbi_base2
= pci
->dbi_base
;
578 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "elbi");
579 pcie_ep
->elbi
= devm_pci_remap_cfg_resource(dev
, res
);
580 if (IS_ERR(pcie_ep
->elbi
))
581 return PTR_ERR(pcie_ep
->elbi
);
583 pcie_ep
->mmio_res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
585 if (!pcie_ep
->mmio_res
) {
586 dev_err(dev
, "Failed to get mmio resource\n");
590 pcie_ep
->mmio
= devm_pci_remap_cfg_resource(dev
, pcie_ep
->mmio_res
);
591 if (IS_ERR(pcie_ep
->mmio
))
592 return PTR_ERR(pcie_ep
->mmio
);
594 syscon
= of_parse_phandle(dev
->of_node
, "qcom,perst-regs", 0);
596 dev_dbg(dev
, "PERST separation not available\n");
600 pcie_ep
->perst_map
= syscon_node_to_regmap(syscon
);
602 if (IS_ERR(pcie_ep
->perst_map
))
603 return PTR_ERR(pcie_ep
->perst_map
);
605 ret
= of_property_read_u32_index(dev
->of_node
, "qcom,perst-regs",
606 1, &pcie_ep
->perst_en
);
608 dev_err(dev
, "No Perst Enable offset in syscon\n");
612 ret
= of_property_read_u32_index(dev
->of_node
, "qcom,perst-regs",
613 2, &pcie_ep
->perst_sep_en
);
615 dev_err(dev
, "No Perst Separation Enable offset in syscon\n");
622 static int qcom_pcie_ep_get_resources(struct platform_device
*pdev
,
623 struct qcom_pcie_ep
*pcie_ep
)
625 struct device
*dev
= &pdev
->dev
;
628 ret
= qcom_pcie_ep_get_io_resources(pdev
, pcie_ep
);
630 dev_err(dev
, "Failed to get io resources %d\n", ret
);
634 pcie_ep
->num_clks
= devm_clk_bulk_get_all(dev
, &pcie_ep
->clks
);
635 if (pcie_ep
->num_clks
< 0) {
636 dev_err(dev
, "Failed to get clocks\n");
637 return pcie_ep
->num_clks
;
640 pcie_ep
->core_reset
= devm_reset_control_get_exclusive(dev
, "core");
641 if (IS_ERR(pcie_ep
->core_reset
))
642 return PTR_ERR(pcie_ep
->core_reset
);
644 pcie_ep
->reset
= devm_gpiod_get(dev
, "reset", GPIOD_IN
);
645 if (IS_ERR(pcie_ep
->reset
))
646 return PTR_ERR(pcie_ep
->reset
);
648 pcie_ep
->wake
= devm_gpiod_get_optional(dev
, "wake", GPIOD_OUT_LOW
);
649 if (IS_ERR(pcie_ep
->wake
))
650 return PTR_ERR(pcie_ep
->wake
);
652 pcie_ep
->phy
= devm_phy_optional_get(dev
, "pciephy");
653 if (IS_ERR(pcie_ep
->phy
))
654 ret
= PTR_ERR(pcie_ep
->phy
);
656 pcie_ep
->icc_mem
= devm_of_icc_get(dev
, "pcie-mem");
657 if (IS_ERR(pcie_ep
->icc_mem
))
658 ret
= PTR_ERR(pcie_ep
->icc_mem
);
663 /* TODO: Notify clients about PCIe state change */
664 static irqreturn_t
qcom_pcie_ep_global_irq_thread(int irq
, void *data
)
666 struct qcom_pcie_ep
*pcie_ep
= data
;
667 struct dw_pcie
*pci
= &pcie_ep
->pci
;
668 struct device
*dev
= pci
->dev
;
669 u32 status
= readl_relaxed(pcie_ep
->parf
+ PARF_INT_ALL_STATUS
);
672 writel_relaxed(status
, pcie_ep
->parf
+ PARF_INT_ALL_CLEAR
);
674 if (FIELD_GET(PARF_INT_ALL_LINK_DOWN
, status
)) {
675 dev_dbg(dev
, "Received Linkdown event\n");
676 pcie_ep
->link_status
= QCOM_PCIE_EP_LINK_DOWN
;
677 dw_pcie_ep_linkdown(&pci
->ep
);
678 } else if (FIELD_GET(PARF_INT_ALL_BME
, status
)) {
679 dev_dbg(dev
, "Received Bus Master Enable event\n");
680 pcie_ep
->link_status
= QCOM_PCIE_EP_LINK_ENABLED
;
681 qcom_pcie_ep_icc_update(pcie_ep
);
682 pci_epc_bus_master_enable_notify(pci
->ep
.epc
);
683 } else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF
, status
)) {
684 dev_dbg(dev
, "Received PM Turn-off event! Entering L23\n");
685 val
= readl_relaxed(pcie_ep
->parf
+ PARF_PM_CTRL
);
686 val
|= PARF_PM_CTRL_READY_ENTR_L23
;
687 writel_relaxed(val
, pcie_ep
->parf
+ PARF_PM_CTRL
);
688 } else if (FIELD_GET(PARF_INT_ALL_DSTATE_CHANGE
, status
)) {
689 dstate
= dw_pcie_readl_dbi(pci
, DBI_CON_STATUS
) &
690 DBI_CON_STATUS_POWER_STATE_MASK
;
691 dev_dbg(dev
, "Received D%d state event\n", dstate
);
693 val
= readl_relaxed(pcie_ep
->parf
+ PARF_PM_CTRL
);
694 val
|= PARF_PM_CTRL_REQ_EXIT_L1
;
695 writel_relaxed(val
, pcie_ep
->parf
+ PARF_PM_CTRL
);
697 } else if (FIELD_GET(PARF_INT_ALL_LINK_UP
, status
)) {
698 dev_dbg(dev
, "Received Linkup event. Enumeration complete!\n");
699 dw_pcie_ep_linkup(&pci
->ep
);
700 pcie_ep
->link_status
= QCOM_PCIE_EP_LINK_UP
;
702 dev_WARN_ONCE(dev
, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
709 static irqreturn_t
qcom_pcie_ep_perst_irq_thread(int irq
, void *data
)
711 struct qcom_pcie_ep
*pcie_ep
= data
;
712 struct dw_pcie
*pci
= &pcie_ep
->pci
;
713 struct device
*dev
= pci
->dev
;
716 perst
= gpiod_get_value(pcie_ep
->reset
);
718 dev_dbg(dev
, "PERST asserted by host. Shutting down the PCIe link!\n");
719 qcom_pcie_perst_assert(pci
);
721 dev_dbg(dev
, "PERST de-asserted by host. Starting link training!\n");
722 qcom_pcie_perst_deassert(pci
);
725 irq_set_irq_type(gpiod_to_irq(pcie_ep
->reset
),
726 (perst
? IRQF_TRIGGER_HIGH
: IRQF_TRIGGER_LOW
));
731 static int qcom_pcie_ep_enable_irq_resources(struct platform_device
*pdev
,
732 struct qcom_pcie_ep
*pcie_ep
)
734 struct device
*dev
= pcie_ep
->pci
.dev
;
738 name
= devm_kasprintf(dev
, GFP_KERNEL
, "qcom_pcie_ep_global_irq%d",
739 pcie_ep
->pci
.ep
.epc
->domain_nr
);
743 pcie_ep
->global_irq
= platform_get_irq_byname(pdev
, "global");
744 if (pcie_ep
->global_irq
< 0)
745 return pcie_ep
->global_irq
;
747 ret
= devm_request_threaded_irq(&pdev
->dev
, pcie_ep
->global_irq
, NULL
,
748 qcom_pcie_ep_global_irq_thread
,
752 dev_err(&pdev
->dev
, "Failed to request Global IRQ\n");
756 name
= devm_kasprintf(dev
, GFP_KERNEL
, "qcom_pcie_ep_perst_irq%d",
757 pcie_ep
->pci
.ep
.epc
->domain_nr
);
761 pcie_ep
->perst_irq
= gpiod_to_irq(pcie_ep
->reset
);
762 irq_set_status_flags(pcie_ep
->perst_irq
, IRQ_NOAUTOEN
);
763 ret
= devm_request_threaded_irq(&pdev
->dev
, pcie_ep
->perst_irq
, NULL
,
764 qcom_pcie_ep_perst_irq_thread
,
765 IRQF_TRIGGER_HIGH
| IRQF_ONESHOT
,
768 dev_err(&pdev
->dev
, "Failed to request PERST IRQ\n");
769 disable_irq(pcie_ep
->global_irq
);
776 static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep
*ep
, u8 func_no
,
777 unsigned int type
, u16 interrupt_num
)
779 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
783 return dw_pcie_ep_raise_intx_irq(ep
, func_no
);
785 return dw_pcie_ep_raise_msi_irq(ep
, func_no
, interrupt_num
);
787 dev_err(pci
->dev
, "Unknown IRQ type\n");
792 static int qcom_pcie_ep_link_transition_count(struct seq_file
*s
, void *data
)
794 struct qcom_pcie_ep
*pcie_ep
= (struct qcom_pcie_ep
*)
795 dev_get_drvdata(s
->private);
797 seq_printf(s
, "L0s transition count: %u\n",
798 readl_relaxed(pcie_ep
->mmio
+ PARF_DEBUG_CNT_PM_LINKST_IN_L0S
));
800 seq_printf(s
, "L1 transition count: %u\n",
801 readl_relaxed(pcie_ep
->mmio
+ PARF_DEBUG_CNT_PM_LINKST_IN_L1
));
803 seq_printf(s
, "L1.1 transition count: %u\n",
804 readl_relaxed(pcie_ep
->mmio
+ PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1
));
806 seq_printf(s
, "L1.2 transition count: %u\n",
807 readl_relaxed(pcie_ep
->mmio
+ PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2
));
809 seq_printf(s
, "L2 transition count: %u\n",
810 readl_relaxed(pcie_ep
->mmio
+ PARF_DEBUG_CNT_PM_LINKST_IN_L2
));
815 static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep
*pcie_ep
)
817 struct dw_pcie
*pci
= &pcie_ep
->pci
;
819 debugfs_create_devm_seqfile(pci
->dev
, "link_transition_count", pcie_ep
->debugfs
,
820 qcom_pcie_ep_link_transition_count
);
823 static const struct pci_epc_features qcom_pcie_epc_features
= {
824 .linkup_notifier
= true,
826 .msix_capable
= false,
830 static const struct pci_epc_features
*
831 qcom_pcie_epc_get_features(struct dw_pcie_ep
*pci_ep
)
833 return &qcom_pcie_epc_features
;
836 static void qcom_pcie_ep_init(struct dw_pcie_ep
*ep
)
838 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
841 for (bar
= BAR_0
; bar
<= BAR_5
; bar
++)
842 dw_pcie_ep_reset_bar(pci
, bar
);
845 static const struct dw_pcie_ep_ops pci_ep_ops
= {
846 .init
= qcom_pcie_ep_init
,
847 .raise_irq
= qcom_pcie_ep_raise_irq
,
848 .get_features
= qcom_pcie_epc_get_features
,
851 static int qcom_pcie_ep_probe(struct platform_device
*pdev
)
853 struct device
*dev
= &pdev
->dev
;
854 struct qcom_pcie_ep
*pcie_ep
;
858 pcie_ep
= devm_kzalloc(dev
, sizeof(*pcie_ep
), GFP_KERNEL
);
862 pcie_ep
->pci
.dev
= dev
;
863 pcie_ep
->pci
.ops
= &pci_ops
;
864 pcie_ep
->pci
.ep
.ops
= &pci_ep_ops
;
865 pcie_ep
->pci
.edma
.nr_irqs
= 1;
867 pcie_ep
->cfg
= of_device_get_match_data(dev
);
868 if (pcie_ep
->cfg
&& pcie_ep
->cfg
->hdma_support
) {
869 pcie_ep
->pci
.edma
.ll_wr_cnt
= 8;
870 pcie_ep
->pci
.edma
.ll_rd_cnt
= 8;
871 pcie_ep
->pci
.edma
.mf
= EDMA_MF_HDMA_NATIVE
;
874 platform_set_drvdata(pdev
, pcie_ep
);
876 ret
= qcom_pcie_ep_get_resources(pdev
, pcie_ep
);
880 ret
= dw_pcie_ep_init(&pcie_ep
->pci
.ep
);
882 dev_err(dev
, "Failed to initialize endpoint: %d\n", ret
);
886 ret
= qcom_pcie_ep_enable_irq_resources(pdev
, pcie_ep
);
890 name
= devm_kasprintf(dev
, GFP_KERNEL
, "%pOFP", dev
->of_node
);
893 goto err_disable_irqs
;
896 pcie_ep
->debugfs
= debugfs_create_dir(name
, NULL
);
897 qcom_pcie_ep_init_debugfs(pcie_ep
);
902 disable_irq(pcie_ep
->global_irq
);
903 disable_irq(pcie_ep
->perst_irq
);
906 dw_pcie_ep_deinit(&pcie_ep
->pci
.ep
);
911 static void qcom_pcie_ep_remove(struct platform_device
*pdev
)
913 struct qcom_pcie_ep
*pcie_ep
= platform_get_drvdata(pdev
);
915 disable_irq(pcie_ep
->global_irq
);
916 disable_irq(pcie_ep
->perst_irq
);
918 debugfs_remove_recursive(pcie_ep
->debugfs
);
920 if (pcie_ep
->link_status
== QCOM_PCIE_EP_LINK_DISABLED
)
923 qcom_pcie_disable_resources(pcie_ep
);
926 static const struct qcom_pcie_ep_cfg cfg_1_34_0
= {
927 .hdma_support
= true,
928 .override_no_snoop
= true,
929 .disable_mhi_ram_parity_check
= true,
932 static const struct of_device_id qcom_pcie_ep_match
[] = {
933 { .compatible
= "qcom,sa8775p-pcie-ep", .data
= &cfg_1_34_0
},
934 { .compatible
= "qcom,sdx55-pcie-ep", },
935 { .compatible
= "qcom,sm8450-pcie-ep", },
938 MODULE_DEVICE_TABLE(of
, qcom_pcie_ep_match
);
940 static struct platform_driver qcom_pcie_ep_driver
= {
941 .probe
= qcom_pcie_ep_probe
,
942 .remove
= qcom_pcie_ep_remove
,
944 .name
= "qcom-pcie-ep",
945 .of_match_table
= qcom_pcie_ep_match
,
948 builtin_platform_driver(qcom_pcie_ep_driver
);
950 MODULE_AUTHOR("Siddartha Mohanadoss <smohanad@codeaurora.org>");
951 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
952 MODULE_DESCRIPTION("Qualcomm PCIe Endpoint controller driver");
953 MODULE_LICENSE("GPL v2");