1 // SPDX-License-Identifier: GPL-2.0+
3 * PCIe host controller driver for Tegra194 SoC
5 * Copyright (C) 2019 NVIDIA Corporation.
7 * Author: Vidya Sagar <vidyas@nvidia.com>
10 #include <linux/clk.h>
11 #include <linux/debugfs.h>
12 #include <linux/delay.h>
13 #include <linux/gpio.h>
14 #include <linux/gpio/consumer.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
20 #include <linux/of_device.h>
21 #include <linux/of_gpio.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_pci.h>
24 #include <linux/pci.h>
25 #include <linux/phy/phy.h>
26 #include <linux/pinctrl/consumer.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/random.h>
30 #include <linux/reset.h>
31 #include <linux/resource.h>
32 #include <linux/types.h>
33 #include "pcie-designware.h"
34 #include <soc/tegra/bpmp.h>
35 #include <soc/tegra/bpmp-abi.h>
36 #include "../../pci.h"
38 #define APPL_PINMUX 0x0
39 #define APPL_PINMUX_PEX_RST BIT(0)
40 #define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2)
41 #define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3)
42 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4)
43 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5)
46 #define APPL_CTRL_SYS_PRE_DET_STATE BIT(6)
47 #define APPL_CTRL_LTSSM_EN BIT(7)
48 #define APPL_CTRL_HW_HOT_RST_EN BIT(20)
49 #define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0)
50 #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22
51 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1
53 #define APPL_INTR_EN_L0_0 0x8
54 #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
55 #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4)
56 #define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8)
57 #define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15)
58 #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19)
59 #define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30)
60 #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31)
62 #define APPL_INTR_STATUS_L0 0xC
63 #define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0)
64 #define APPL_INTR_STATUS_L0_INT_INT BIT(8)
65 #define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15)
66 #define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16)
67 #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18)
69 #define APPL_INTR_EN_L1_0_0 0x1C
70 #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1)
71 #define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3)
72 #define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30)
74 #define APPL_INTR_STATUS_L1_0_0 0x20
75 #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1)
76 #define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3)
77 #define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30)
79 #define APPL_INTR_STATUS_L1_1 0x2C
80 #define APPL_INTR_STATUS_L1_2 0x30
81 #define APPL_INTR_STATUS_L1_3 0x34
82 #define APPL_INTR_STATUS_L1_6 0x3C
83 #define APPL_INTR_STATUS_L1_7 0x40
84 #define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1)
86 #define APPL_INTR_EN_L1_8_0 0x44
87 #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2)
88 #define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3)
89 #define APPL_INTR_EN_L1_8_INTX_EN BIT(11)
90 #define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15)
92 #define APPL_INTR_STATUS_L1_8_0 0x4C
93 #define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6)
94 #define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2)
95 #define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3)
97 #define APPL_INTR_STATUS_L1_9 0x54
98 #define APPL_INTR_STATUS_L1_10 0x58
99 #define APPL_INTR_STATUS_L1_11 0x64
100 #define APPL_INTR_STATUS_L1_13 0x74
101 #define APPL_INTR_STATUS_L1_14 0x78
102 #define APPL_INTR_STATUS_L1_15 0x7C
103 #define APPL_INTR_STATUS_L1_17 0x88
105 #define APPL_INTR_EN_L1_18 0x90
106 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2)
107 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
108 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
110 #define APPL_INTR_STATUS_L1_18 0x94
111 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2)
112 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
113 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
115 #define APPL_MSI_CTRL_1 0xAC
117 #define APPL_MSI_CTRL_2 0xB0
119 #define APPL_LEGACY_INTX 0xB8
121 #define APPL_LTR_MSG_1 0xC4
122 #define LTR_MSG_REQ BIT(15)
123 #define LTR_MST_NO_SNOOP_SHIFT 16
125 #define APPL_LTR_MSG_2 0xC8
126 #define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3)
128 #define APPL_LINK_STATUS 0xCC
129 #define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0)
131 #define APPL_DEBUG 0xD0
132 #define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21)
133 #define APPL_DEBUG_PM_LINKST_IN_L0 0x11
134 #define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3)
135 #define APPL_DEBUG_LTSSM_STATE_SHIFT 3
136 #define LTSSM_STATE_PRE_DETECT 5
138 #define APPL_RADM_STATUS 0xE4
139 #define APPL_PM_XMT_TURNOFF_STATE BIT(0)
141 #define APPL_DM_TYPE 0x100
142 #define APPL_DM_TYPE_MASK GENMASK(3, 0)
143 #define APPL_DM_TYPE_RP 0x4
144 #define APPL_DM_TYPE_EP 0x0
146 #define APPL_CFG_BASE_ADDR 0x104
147 #define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12)
149 #define APPL_CFG_IATU_DMA_BASE_ADDR 0x108
150 #define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18)
152 #define APPL_CFG_MISC 0x110
153 #define APPL_CFG_MISC_SLV_EP_MODE BIT(14)
154 #define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10)
155 #define APPL_CFG_MISC_ARCACHE_SHIFT 10
156 #define APPL_CFG_MISC_ARCACHE_VAL 3
158 #define APPL_CFG_SLCG_OVERRIDE 0x114
159 #define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0)
161 #define APPL_CAR_RESET_OVRD 0x12C
162 #define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0)
164 #define IO_BASE_IO_DECODE BIT(0)
165 #define IO_BASE_IO_DECODE_BIT8 BIT(8)
167 #define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0)
168 #define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16)
170 #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718
171 #define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19)
173 #define EVENT_COUNTER_ALL_CLEAR 0x3
174 #define EVENT_COUNTER_ENABLE_ALL 0x7
175 #define EVENT_COUNTER_ENABLE_SHIFT 2
176 #define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0)
177 #define EVENT_COUNTER_EVENT_SEL_SHIFT 16
178 #define EVENT_COUNTER_EVENT_Tx_L0S 0x2
179 #define EVENT_COUNTER_EVENT_Rx_L0S 0x3
180 #define EVENT_COUNTER_EVENT_L1 0x5
181 #define EVENT_COUNTER_EVENT_L1_1 0x7
182 #define EVENT_COUNTER_EVENT_L1_2 0x8
183 #define EVENT_COUNTER_GROUP_SEL_SHIFT 24
184 #define EVENT_COUNTER_GROUP_5 0x5
186 #define PORT_LOGIC_ACK_F_ASPM_CTRL 0x70C
187 #define ENTER_ASPM BIT(30)
188 #define L0S_ENTRANCE_LAT_SHIFT 24
189 #define L0S_ENTRANCE_LAT_MASK GENMASK(26, 24)
190 #define L1_ENTRANCE_LAT_SHIFT 27
191 #define L1_ENTRANCE_LAT_MASK GENMASK(29, 27)
192 #define N_FTS_SHIFT 8
193 #define N_FTS_MASK GENMASK(7, 0)
196 #define PORT_LOGIC_GEN2_CTRL 0x80C
197 #define PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE BIT(17)
198 #define FTS_MASK GENMASK(7, 0)
201 #define PORT_LOGIC_MSI_CTRL_INT_0_EN 0x828
203 #define GEN3_EQ_CONTROL_OFF 0x8a8
204 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
205 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
206 #define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
208 #define GEN3_RELATED_OFF 0x890
209 #define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
210 #define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
211 #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
212 #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
214 #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
215 #define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
216 #define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
217 #define AMBA_ERROR_RESPONSE_CRS_OKAY 0
218 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
219 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
221 #define MSIX_ADDR_MATCH_LOW_OFF 0x940
222 #define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
223 #define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2)
225 #define MSIX_ADDR_MATCH_HIGH_OFF 0x944
226 #define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0)
228 #define PORT_LOGIC_MSIX_DOORBELL 0x948
230 #define CAP_SPCIE_CAP_OFF 0x154
231 #define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0)
232 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8)
233 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8
235 #define PME_ACK_TIMEOUT 10000
237 #define LTSSM_TIMEOUT 50000 /* 50ms */
239 #define GEN3_GEN4_EQ_PRESET_INIT 5
241 #define GEN1_CORE_CLK_FREQ 62500000
242 #define GEN2_CORE_CLK_FREQ 125000000
243 #define GEN3_CORE_CLK_FREQ 250000000
244 #define GEN4_CORE_CLK_FREQ 500000000
246 #define LTR_MSG_TIMEOUT (100 * 1000)
248 #define PERST_DEBOUNCE_TIME (5 * 1000)
250 #define EP_STATE_DISABLED 0
251 #define EP_STATE_ENABLED 1
253 static const unsigned int pcie_gen_freq
[] = {
260 static const u32 event_cntr_ctrl_offset
[] = {
269 static const u32 event_cntr_data_offset
[] = {
278 struct tegra_pcie_dw
{
280 struct resource
*appl_res
;
281 struct resource
*dbi_res
;
282 struct resource
*atu_dma_res
;
283 void __iomem
*appl_base
;
284 struct clk
*core_clk
;
285 struct reset_control
*core_apb_rst
;
286 struct reset_control
*core_rst
;
288 struct tegra_bpmp
*bpmp
;
290 enum dw_pcie_device_mode mode
;
292 bool supports_clkreq
;
293 bool enable_cdm_check
;
295 bool update_fc_fixup
;
301 u32 cfg_link_cap_l1sub
;
305 u32 aspm_l0s_enter_lat
;
307 struct regulator
*pex_ctl_supply
;
308 struct regulator
*slot_ctl_3v3
;
309 struct regulator
*slot_ctl_12v
;
311 unsigned int phy_count
;
314 struct dentry
*debugfs
;
316 /* Endpoint mode specific */
317 struct gpio_desc
*pex_rst_gpiod
;
318 struct gpio_desc
*pex_refclk_sel_gpiod
;
319 unsigned int pex_rst_irq
;
323 struct tegra_pcie_dw_of_data
{
324 enum dw_pcie_device_mode mode
;
327 static inline struct tegra_pcie_dw
*to_tegra_pcie(struct dw_pcie
*pci
)
329 return container_of(pci
, struct tegra_pcie_dw
, pci
);
332 static inline void appl_writel(struct tegra_pcie_dw
*pcie
, const u32 value
,
335 writel_relaxed(value
, pcie
->appl_base
+ reg
);
338 static inline u32
appl_readl(struct tegra_pcie_dw
*pcie
, const u32 reg
)
340 return readl_relaxed(pcie
->appl_base
+ reg
);
343 struct tegra_pcie_soc
{
344 enum dw_pcie_device_mode mode
;
347 static void apply_bad_link_workaround(struct pcie_port
*pp
)
349 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
350 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
351 u32 current_link_width
;
355 * NOTE:- Since this scenario is uncommon and link as such is not
356 * stable anyway, not waiting to confirm if link is really
357 * transitioning to Gen-2 speed
359 val
= dw_pcie_readw_dbi(pci
, pcie
->pcie_cap_base
+ PCI_EXP_LNKSTA
);
360 if (val
& PCI_EXP_LNKSTA_LBMS
) {
361 current_link_width
= (val
& PCI_EXP_LNKSTA_NLW
) >>
362 PCI_EXP_LNKSTA_NLW_SHIFT
;
363 if (pcie
->init_link_width
> current_link_width
) {
364 dev_warn(pci
->dev
, "PCIe link is bad, width reduced\n");
365 val
= dw_pcie_readw_dbi(pci
, pcie
->pcie_cap_base
+
367 val
&= ~PCI_EXP_LNKCTL2_TLS
;
368 val
|= PCI_EXP_LNKCTL2_TLS_2_5GT
;
369 dw_pcie_writew_dbi(pci
, pcie
->pcie_cap_base
+
370 PCI_EXP_LNKCTL2
, val
);
372 val
= dw_pcie_readw_dbi(pci
, pcie
->pcie_cap_base
+
374 val
|= PCI_EXP_LNKCTL_RL
;
375 dw_pcie_writew_dbi(pci
, pcie
->pcie_cap_base
+
376 PCI_EXP_LNKCTL
, val
);
381 static irqreturn_t
tegra_pcie_rp_irq_handler(int irq
, void *arg
)
383 struct tegra_pcie_dw
*pcie
= arg
;
384 struct dw_pcie
*pci
= &pcie
->pci
;
385 struct pcie_port
*pp
= &pci
->pp
;
389 val
= appl_readl(pcie
, APPL_INTR_STATUS_L0
);
390 if (val
& APPL_INTR_STATUS_L0_LINK_STATE_INT
) {
391 val
= appl_readl(pcie
, APPL_INTR_STATUS_L1_0_0
);
392 if (val
& APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED
) {
393 appl_writel(pcie
, val
, APPL_INTR_STATUS_L1_0_0
);
395 /* SBR & Surprise Link Down WAR */
396 val
= appl_readl(pcie
, APPL_CAR_RESET_OVRD
);
397 val
&= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N
;
398 appl_writel(pcie
, val
, APPL_CAR_RESET_OVRD
);
400 val
= appl_readl(pcie
, APPL_CAR_RESET_OVRD
);
401 val
|= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N
;
402 appl_writel(pcie
, val
, APPL_CAR_RESET_OVRD
);
404 val
= dw_pcie_readl_dbi(pci
, PORT_LOGIC_GEN2_CTRL
);
405 val
|= PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE
;
406 dw_pcie_writel_dbi(pci
, PORT_LOGIC_GEN2_CTRL
, val
);
410 if (val
& APPL_INTR_STATUS_L0_INT_INT
) {
411 val
= appl_readl(pcie
, APPL_INTR_STATUS_L1_8_0
);
412 if (val
& APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS
) {
414 APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS
,
415 APPL_INTR_STATUS_L1_8_0
);
416 apply_bad_link_workaround(pp
);
418 if (val
& APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS
) {
420 APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS
,
421 APPL_INTR_STATUS_L1_8_0
);
423 val_w
= dw_pcie_readw_dbi(pci
, pcie
->pcie_cap_base
+
425 dev_dbg(pci
->dev
, "Link Speed : Gen-%u\n", val_w
&
430 val
= appl_readl(pcie
, APPL_INTR_STATUS_L0
);
431 if (val
& APPL_INTR_STATUS_L0_CDM_REG_CHK_INT
) {
432 val
= appl_readl(pcie
, APPL_INTR_STATUS_L1_18
);
433 tmp
= dw_pcie_readl_dbi(pci
, PCIE_PL_CHK_REG_CONTROL_STATUS
);
434 if (val
& APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT
) {
435 dev_info(pci
->dev
, "CDM check complete\n");
436 tmp
|= PCIE_PL_CHK_REG_CHK_REG_COMPLETE
;
438 if (val
& APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR
) {
439 dev_err(pci
->dev
, "CDM comparison mismatch\n");
440 tmp
|= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR
;
442 if (val
& APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR
) {
443 dev_err(pci
->dev
, "CDM Logic error\n");
444 tmp
|= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR
;
446 dw_pcie_writel_dbi(pci
, PCIE_PL_CHK_REG_CONTROL_STATUS
, tmp
);
447 tmp
= dw_pcie_readl_dbi(pci
, PCIE_PL_CHK_REG_ERR_ADDR
);
448 dev_err(pci
->dev
, "CDM Error Address Offset = 0x%08X\n", tmp
);
454 static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw
*pcie
)
458 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L0
);
459 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0
);
460 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1
);
461 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2
);
462 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3
);
463 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6
);
464 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7
);
465 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0
);
466 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9
);
467 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10
);
468 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11
);
469 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13
);
470 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14
);
471 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15
);
472 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17
);
473 appl_writel(pcie
, 0xFFFFFFFF, APPL_MSI_CTRL_2
);
475 val
= appl_readl(pcie
, APPL_CTRL
);
476 val
|= APPL_CTRL_LTSSM_EN
;
477 appl_writel(pcie
, val
, APPL_CTRL
);
480 static irqreturn_t
tegra_pcie_ep_irq_thread(int irq
, void *arg
)
482 struct tegra_pcie_dw
*pcie
= arg
;
483 struct dw_pcie
*pci
= &pcie
->pci
;
486 speed
= dw_pcie_readw_dbi(pci
, pcie
->pcie_cap_base
+ PCI_EXP_LNKSTA
) &
488 clk_set_rate(pcie
->core_clk
, pcie_gen_freq
[speed
- 1]);
490 /* If EP doesn't advertise L1SS, just return */
491 val
= dw_pcie_readl_dbi(pci
, pcie
->cfg_link_cap_l1sub
);
492 if (!(val
& (PCI_L1SS_CAP_ASPM_L1_1
| PCI_L1SS_CAP_ASPM_L1_2
)))
495 /* Check if BME is set to '1' */
496 val
= dw_pcie_readl_dbi(pci
, PCI_COMMAND
);
497 if (val
& PCI_COMMAND_MASTER
) {
500 /* 110us for both snoop and no-snoop */
501 val
= 110 | (2 << PCI_LTR_SCALE_SHIFT
) | LTR_MSG_REQ
;
502 val
|= (val
<< LTR_MST_NO_SNOOP_SHIFT
);
503 appl_writel(pcie
, val
, APPL_LTR_MSG_1
);
505 /* Send LTR upstream */
506 val
= appl_readl(pcie
, APPL_LTR_MSG_2
);
507 val
|= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE
;
508 appl_writel(pcie
, val
, APPL_LTR_MSG_2
);
510 timeout
= ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT
);
512 val
= appl_readl(pcie
, APPL_LTR_MSG_2
);
513 if (!(val
& APPL_LTR_MSG_2_LTR_MSG_REQ_STATE
))
515 if (ktime_after(ktime_get(), timeout
))
517 usleep_range(1000, 1100);
519 if (val
& APPL_LTR_MSG_2_LTR_MSG_REQ_STATE
)
520 dev_err(pcie
->dev
, "Failed to send LTR message\n");
526 static irqreturn_t
tegra_pcie_ep_hard_irq(int irq
, void *arg
)
528 struct tegra_pcie_dw
*pcie
= arg
;
529 struct dw_pcie_ep
*ep
= &pcie
->pci
.ep
;
533 val
= appl_readl(pcie
, APPL_INTR_STATUS_L0
);
534 if (val
& APPL_INTR_STATUS_L0_LINK_STATE_INT
) {
535 val
= appl_readl(pcie
, APPL_INTR_STATUS_L1_0_0
);
536 appl_writel(pcie
, val
, APPL_INTR_STATUS_L1_0_0
);
538 if (val
& APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE
)
539 pex_ep_event_hot_rst_done(pcie
);
541 if (val
& APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED
) {
542 tmp
= appl_readl(pcie
, APPL_LINK_STATUS
);
543 if (tmp
& APPL_LINK_STATUS_RDLH_LINK_UP
) {
544 dev_dbg(pcie
->dev
, "Link is up with Host\n");
545 dw_pcie_ep_linkup(ep
);
552 if (val
& APPL_INTR_STATUS_L0_PCI_CMD_EN_INT
) {
553 val
= appl_readl(pcie
, APPL_INTR_STATUS_L1_15
);
554 appl_writel(pcie
, val
, APPL_INTR_STATUS_L1_15
);
556 if (val
& APPL_INTR_STATUS_L1_15_CFG_BME_CHGED
)
557 return IRQ_WAKE_THREAD
;
563 dev_warn(pcie
->dev
, "Random interrupt (STATUS = 0x%08X)\n",
565 appl_writel(pcie
, val
, APPL_INTR_STATUS_L0
);
571 static int tegra_pcie_dw_rd_own_conf(struct pcie_port
*pp
, int where
, int size
,
574 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
577 * This is an endpoint mode specific register happen to appear even
578 * when controller is operating in root port mode and system hangs
579 * when it is accessed with link being in ASPM-L1 state.
580 * So skip accessing it altogether
582 if (where
== PORT_LOGIC_MSIX_DOORBELL
) {
584 return PCIBIOS_SUCCESSFUL
;
587 return dw_pcie_read(pci
->dbi_base
+ where
, size
, val
);
590 static int tegra_pcie_dw_wr_own_conf(struct pcie_port
*pp
, int where
, int size
,
593 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
596 * This is an endpoint mode specific register happen to appear even
597 * when controller is operating in root port mode and system hangs
598 * when it is accessed with link being in ASPM-L1 state.
599 * So skip accessing it altogether
601 if (where
== PORT_LOGIC_MSIX_DOORBELL
)
602 return PCIBIOS_SUCCESSFUL
;
604 return dw_pcie_write(pci
->dbi_base
+ where
, size
, val
);
607 #if defined(CONFIG_PCIEASPM)
608 static void disable_aspm_l11(struct tegra_pcie_dw
*pcie
)
612 val
= dw_pcie_readl_dbi(&pcie
->pci
, pcie
->cfg_link_cap_l1sub
);
613 val
&= ~PCI_L1SS_CAP_ASPM_L1_1
;
614 dw_pcie_writel_dbi(&pcie
->pci
, pcie
->cfg_link_cap_l1sub
, val
);
617 static void disable_aspm_l12(struct tegra_pcie_dw
*pcie
)
621 val
= dw_pcie_readl_dbi(&pcie
->pci
, pcie
->cfg_link_cap_l1sub
);
622 val
&= ~PCI_L1SS_CAP_ASPM_L1_2
;
623 dw_pcie_writel_dbi(&pcie
->pci
, pcie
->cfg_link_cap_l1sub
, val
);
626 static inline u32
event_counter_prog(struct tegra_pcie_dw
*pcie
, u32 event
)
630 val
= dw_pcie_readl_dbi(&pcie
->pci
, event_cntr_ctrl_offset
[pcie
->cid
]);
631 val
&= ~(EVENT_COUNTER_EVENT_SEL_MASK
<< EVENT_COUNTER_EVENT_SEL_SHIFT
);
632 val
|= EVENT_COUNTER_GROUP_5
<< EVENT_COUNTER_GROUP_SEL_SHIFT
;
633 val
|= event
<< EVENT_COUNTER_EVENT_SEL_SHIFT
;
634 val
|= EVENT_COUNTER_ENABLE_ALL
<< EVENT_COUNTER_ENABLE_SHIFT
;
635 dw_pcie_writel_dbi(&pcie
->pci
, event_cntr_ctrl_offset
[pcie
->cid
], val
);
636 val
= dw_pcie_readl_dbi(&pcie
->pci
, event_cntr_data_offset
[pcie
->cid
]);
641 static int aspm_state_cnt(struct seq_file
*s
, void *data
)
643 struct tegra_pcie_dw
*pcie
= (struct tegra_pcie_dw
*)
644 dev_get_drvdata(s
->private);
647 seq_printf(s
, "Tx L0s entry count : %u\n",
648 event_counter_prog(pcie
, EVENT_COUNTER_EVENT_Tx_L0S
));
650 seq_printf(s
, "Rx L0s entry count : %u\n",
651 event_counter_prog(pcie
, EVENT_COUNTER_EVENT_Rx_L0S
));
653 seq_printf(s
, "Link L1 entry count : %u\n",
654 event_counter_prog(pcie
, EVENT_COUNTER_EVENT_L1
));
656 seq_printf(s
, "Link L1.1 entry count : %u\n",
657 event_counter_prog(pcie
, EVENT_COUNTER_EVENT_L1_1
));
659 seq_printf(s
, "Link L1.2 entry count : %u\n",
660 event_counter_prog(pcie
, EVENT_COUNTER_EVENT_L1_2
));
662 /* Clear all counters */
663 dw_pcie_writel_dbi(&pcie
->pci
, event_cntr_ctrl_offset
[pcie
->cid
],
664 EVENT_COUNTER_ALL_CLEAR
);
666 /* Re-enable counting */
667 val
= EVENT_COUNTER_ENABLE_ALL
<< EVENT_COUNTER_ENABLE_SHIFT
;
668 val
|= EVENT_COUNTER_GROUP_5
<< EVENT_COUNTER_GROUP_SEL_SHIFT
;
669 dw_pcie_writel_dbi(&pcie
->pci
, event_cntr_ctrl_offset
[pcie
->cid
], val
);
674 static void init_host_aspm(struct tegra_pcie_dw
*pcie
)
676 struct dw_pcie
*pci
= &pcie
->pci
;
679 val
= dw_pcie_find_ext_capability(pci
, PCI_EXT_CAP_ID_L1SS
);
680 pcie
->cfg_link_cap_l1sub
= val
+ PCI_L1SS_CAP
;
682 /* Enable ASPM counters */
683 val
= EVENT_COUNTER_ENABLE_ALL
<< EVENT_COUNTER_ENABLE_SHIFT
;
684 val
|= EVENT_COUNTER_GROUP_5
<< EVENT_COUNTER_GROUP_SEL_SHIFT
;
685 dw_pcie_writel_dbi(pci
, event_cntr_ctrl_offset
[pcie
->cid
], val
);
687 /* Program T_cmrt and T_pwr_on values */
688 val
= dw_pcie_readl_dbi(pci
, pcie
->cfg_link_cap_l1sub
);
689 val
&= ~(PCI_L1SS_CAP_CM_RESTORE_TIME
| PCI_L1SS_CAP_P_PWR_ON_VALUE
);
690 val
|= (pcie
->aspm_cmrt
<< 8);
691 val
|= (pcie
->aspm_pwr_on_t
<< 19);
692 dw_pcie_writel_dbi(pci
, pcie
->cfg_link_cap_l1sub
, val
);
694 /* Program L0s and L1 entrance latencies */
695 val
= dw_pcie_readl_dbi(pci
, PORT_LOGIC_ACK_F_ASPM_CTRL
);
696 val
&= ~L0S_ENTRANCE_LAT_MASK
;
697 val
|= (pcie
->aspm_l0s_enter_lat
<< L0S_ENTRANCE_LAT_SHIFT
);
699 dw_pcie_writel_dbi(pci
, PORT_LOGIC_ACK_F_ASPM_CTRL
, val
);
702 static int init_debugfs(struct tegra_pcie_dw
*pcie
)
706 d
= debugfs_create_devm_seqfile(pcie
->dev
, "aspm_state_cnt",
707 pcie
->debugfs
, aspm_state_cnt
);
708 if (IS_ERR_OR_NULL(d
))
710 "Failed to create debugfs file \"aspm_state_cnt\"\n");
715 static inline void disable_aspm_l12(struct tegra_pcie_dw
*pcie
) { return; }
716 static inline void disable_aspm_l11(struct tegra_pcie_dw
*pcie
) { return; }
717 static inline void init_host_aspm(struct tegra_pcie_dw
*pcie
) { return; }
718 static inline int init_debugfs(struct tegra_pcie_dw
*pcie
) { return 0; }
721 static void tegra_pcie_enable_system_interrupts(struct pcie_port
*pp
)
723 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
724 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
728 val
= appl_readl(pcie
, APPL_INTR_EN_L0_0
);
729 val
|= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN
;
730 appl_writel(pcie
, val
, APPL_INTR_EN_L0_0
);
732 val
= appl_readl(pcie
, APPL_INTR_EN_L1_0_0
);
733 val
|= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN
;
734 appl_writel(pcie
, val
, APPL_INTR_EN_L1_0_0
);
736 if (pcie
->enable_cdm_check
) {
737 val
= appl_readl(pcie
, APPL_INTR_EN_L0_0
);
738 val
|= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN
;
739 appl_writel(pcie
, val
, APPL_INTR_EN_L0_0
);
741 val
= appl_readl(pcie
, APPL_INTR_EN_L1_18
);
742 val
|= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR
;
743 val
|= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR
;
744 appl_writel(pcie
, val
, APPL_INTR_EN_L1_18
);
747 val_w
= dw_pcie_readw_dbi(&pcie
->pci
, pcie
->pcie_cap_base
+
749 pcie
->init_link_width
= (val_w
& PCI_EXP_LNKSTA_NLW
) >>
750 PCI_EXP_LNKSTA_NLW_SHIFT
;
752 val_w
= dw_pcie_readw_dbi(&pcie
->pci
, pcie
->pcie_cap_base
+
754 val_w
|= PCI_EXP_LNKCTL_LBMIE
;
755 dw_pcie_writew_dbi(&pcie
->pci
, pcie
->pcie_cap_base
+ PCI_EXP_LNKCTL
,
759 static void tegra_pcie_enable_legacy_interrupts(struct pcie_port
*pp
)
761 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
762 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
765 /* Enable legacy interrupt generation */
766 val
= appl_readl(pcie
, APPL_INTR_EN_L0_0
);
767 val
|= APPL_INTR_EN_L0_0_SYS_INTR_EN
;
768 val
|= APPL_INTR_EN_L0_0_INT_INT_EN
;
769 appl_writel(pcie
, val
, APPL_INTR_EN_L0_0
);
771 val
= appl_readl(pcie
, APPL_INTR_EN_L1_8_0
);
772 val
|= APPL_INTR_EN_L1_8_INTX_EN
;
773 val
|= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN
;
774 val
|= APPL_INTR_EN_L1_8_BW_MGT_INT_EN
;
775 if (IS_ENABLED(CONFIG_PCIEAER
))
776 val
|= APPL_INTR_EN_L1_8_AER_INT_EN
;
777 appl_writel(pcie
, val
, APPL_INTR_EN_L1_8_0
);
780 static void tegra_pcie_enable_msi_interrupts(struct pcie_port
*pp
)
782 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
783 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
786 dw_pcie_msi_init(pp
);
788 /* Enable MSI interrupt generation */
789 val
= appl_readl(pcie
, APPL_INTR_EN_L0_0
);
790 val
|= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN
;
791 val
|= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN
;
792 appl_writel(pcie
, val
, APPL_INTR_EN_L0_0
);
795 static void tegra_pcie_enable_interrupts(struct pcie_port
*pp
)
797 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
798 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
800 /* Clear interrupt statuses before enabling interrupts */
801 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L0
);
802 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0
);
803 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1
);
804 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2
);
805 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3
);
806 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6
);
807 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7
);
808 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0
);
809 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9
);
810 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10
);
811 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11
);
812 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13
);
813 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14
);
814 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15
);
815 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17
);
817 tegra_pcie_enable_system_interrupts(pp
);
818 tegra_pcie_enable_legacy_interrupts(pp
);
819 if (IS_ENABLED(CONFIG_PCI_MSI
))
820 tegra_pcie_enable_msi_interrupts(pp
);
823 static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw
*pcie
)
825 struct dw_pcie
*pci
= &pcie
->pci
;
828 /* Program init preset */
829 for (i
= 0; i
< pcie
->num_lanes
; i
++) {
830 dw_pcie_read(pci
->dbi_base
+ CAP_SPCIE_CAP_OFF
832 val
&= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK
;
833 val
|= GEN3_GEN4_EQ_PRESET_INIT
;
834 val
&= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK
;
835 val
|= (GEN3_GEN4_EQ_PRESET_INIT
<<
836 CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT
);
837 dw_pcie_write(pci
->dbi_base
+ CAP_SPCIE_CAP_OFF
840 offset
= dw_pcie_find_ext_capability(pci
,
841 PCI_EXT_CAP_ID_PL_16GT
) +
843 dw_pcie_read(pci
->dbi_base
+ offset
+ i
, 1, &val
);
844 val
&= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK
;
845 val
|= GEN3_GEN4_EQ_PRESET_INIT
;
846 val
&= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK
;
847 val
|= (GEN3_GEN4_EQ_PRESET_INIT
<<
848 PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT
);
849 dw_pcie_write(pci
->dbi_base
+ offset
+ i
, 1, val
);
852 val
= dw_pcie_readl_dbi(pci
, GEN3_RELATED_OFF
);
853 val
&= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK
;
854 dw_pcie_writel_dbi(pci
, GEN3_RELATED_OFF
, val
);
856 val
= dw_pcie_readl_dbi(pci
, GEN3_EQ_CONTROL_OFF
);
857 val
&= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK
;
858 val
|= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT
);
859 val
&= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK
;
860 dw_pcie_writel_dbi(pci
, GEN3_EQ_CONTROL_OFF
, val
);
862 val
= dw_pcie_readl_dbi(pci
, GEN3_RELATED_OFF
);
863 val
&= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK
;
864 val
|= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT
);
865 dw_pcie_writel_dbi(pci
, GEN3_RELATED_OFF
, val
);
867 val
= dw_pcie_readl_dbi(pci
, GEN3_EQ_CONTROL_OFF
);
868 val
&= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK
;
869 val
|= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT
);
870 val
&= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK
;
871 dw_pcie_writel_dbi(pci
, GEN3_EQ_CONTROL_OFF
, val
);
873 val
= dw_pcie_readl_dbi(pci
, GEN3_RELATED_OFF
);
874 val
&= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK
;
875 dw_pcie_writel_dbi(pci
, GEN3_RELATED_OFF
, val
);
878 static void tegra_pcie_prepare_host(struct pcie_port
*pp
)
880 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
881 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
884 val
= dw_pcie_readl_dbi(pci
, PCI_IO_BASE
);
885 val
&= ~(IO_BASE_IO_DECODE
| IO_BASE_IO_DECODE_BIT8
);
886 dw_pcie_writel_dbi(pci
, PCI_IO_BASE
, val
);
888 val
= dw_pcie_readl_dbi(pci
, PCI_PREF_MEMORY_BASE
);
889 val
|= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE
;
890 val
|= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE
;
891 dw_pcie_writel_dbi(pci
, PCI_PREF_MEMORY_BASE
, val
);
893 dw_pcie_writel_dbi(pci
, PCI_BASE_ADDRESS_0
, 0);
896 val
= dw_pcie_readl_dbi(pci
, PORT_LOGIC_ACK_F_ASPM_CTRL
);
897 val
&= ~(N_FTS_MASK
<< N_FTS_SHIFT
);
898 val
|= N_FTS_VAL
<< N_FTS_SHIFT
;
899 dw_pcie_writel_dbi(pci
, PORT_LOGIC_ACK_F_ASPM_CTRL
, val
);
901 val
= dw_pcie_readl_dbi(pci
, PORT_LOGIC_GEN2_CTRL
);
904 dw_pcie_writel_dbi(pci
, PORT_LOGIC_GEN2_CTRL
, val
);
906 /* Enable as 0xFFFF0001 response for CRS */
907 val
= dw_pcie_readl_dbi(pci
, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT
);
908 val
&= ~(AMBA_ERROR_RESPONSE_CRS_MASK
<< AMBA_ERROR_RESPONSE_CRS_SHIFT
);
909 val
|= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001
<<
910 AMBA_ERROR_RESPONSE_CRS_SHIFT
);
911 dw_pcie_writel_dbi(pci
, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT
, val
);
913 /* Configure Max Speed from DT */
914 if (pcie
->max_speed
&& pcie
->max_speed
!= -EINVAL
) {
915 val
= dw_pcie_readl_dbi(pci
, pcie
->pcie_cap_base
+
917 val
&= ~PCI_EXP_LNKCAP_SLS
;
918 val
|= pcie
->max_speed
;
919 dw_pcie_writel_dbi(pci
, pcie
->pcie_cap_base
+ PCI_EXP_LNKCAP
,
923 /* Configure Max lane width from DT */
924 val
= dw_pcie_readl_dbi(pci
, pcie
->pcie_cap_base
+ PCI_EXP_LNKCAP
);
925 val
&= ~PCI_EXP_LNKCAP_MLW
;
926 val
|= (pcie
->num_lanes
<< PCI_EXP_LNKSTA_NLW_SHIFT
);
927 dw_pcie_writel_dbi(pci
, pcie
->pcie_cap_base
+ PCI_EXP_LNKCAP
, val
);
929 config_gen3_gen4_eq_presets(pcie
);
931 init_host_aspm(pcie
);
933 val
= dw_pcie_readl_dbi(pci
, GEN3_RELATED_OFF
);
934 val
&= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL
;
935 dw_pcie_writel_dbi(pci
, GEN3_RELATED_OFF
, val
);
937 if (pcie
->update_fc_fixup
) {
938 val
= dw_pcie_readl_dbi(pci
, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF
);
939 val
|= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT
;
940 dw_pcie_writel_dbi(pci
, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF
, val
);
943 dw_pcie_setup_rc(pp
);
945 clk_set_rate(pcie
->core_clk
, GEN4_CORE_CLK_FREQ
);
948 val
= appl_readl(pcie
, APPL_PINMUX
);
949 val
&= ~APPL_PINMUX_PEX_RST
;
950 appl_writel(pcie
, val
, APPL_PINMUX
);
952 usleep_range(100, 200);
955 val
= appl_readl(pcie
, APPL_CTRL
);
956 val
|= APPL_CTRL_LTSSM_EN
;
957 appl_writel(pcie
, val
, APPL_CTRL
);
960 val
= appl_readl(pcie
, APPL_PINMUX
);
961 val
|= APPL_PINMUX_PEX_RST
;
962 appl_writel(pcie
, val
, APPL_PINMUX
);
967 static int tegra_pcie_dw_host_init(struct pcie_port
*pp
)
969 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
970 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
971 u32 val
, tmp
, offset
, speed
;
973 tegra_pcie_prepare_host(pp
);
975 if (dw_pcie_wait_for_link(pci
)) {
977 * There are some endpoints which can't get the link up if
978 * root port has Data Link Feature (DLF) enabled.
979 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
980 * on Scaled Flow Control and DLF.
981 * So, need to confirm that is indeed the case here and attempt
982 * link up once again with DLF disabled.
984 val
= appl_readl(pcie
, APPL_DEBUG
);
985 val
&= APPL_DEBUG_LTSSM_STATE_MASK
;
986 val
>>= APPL_DEBUG_LTSSM_STATE_SHIFT
;
987 tmp
= appl_readl(pcie
, APPL_LINK_STATUS
);
988 tmp
&= APPL_LINK_STATUS_RDLH_LINK_UP
;
989 if (!(val
== 0x11 && !tmp
)) {
990 /* Link is down for all good reasons */
994 dev_info(pci
->dev
, "Link is down in DLL");
995 dev_info(pci
->dev
, "Trying again with DLFE disabled\n");
997 val
= appl_readl(pcie
, APPL_CTRL
);
998 val
&= ~APPL_CTRL_LTSSM_EN
;
999 appl_writel(pcie
, val
, APPL_CTRL
);
1001 reset_control_assert(pcie
->core_rst
);
1002 reset_control_deassert(pcie
->core_rst
);
1004 offset
= dw_pcie_find_ext_capability(pci
, PCI_EXT_CAP_ID_DLF
);
1005 val
= dw_pcie_readl_dbi(pci
, offset
+ PCI_DLF_CAP
);
1006 val
&= ~PCI_DLF_EXCHANGE_ENABLE
;
1007 dw_pcie_writel_dbi(pci
, offset
, val
);
1009 tegra_pcie_prepare_host(pp
);
1011 if (dw_pcie_wait_for_link(pci
))
1015 speed
= dw_pcie_readw_dbi(pci
, pcie
->pcie_cap_base
+ PCI_EXP_LNKSTA
) &
1017 clk_set_rate(pcie
->core_clk
, pcie_gen_freq
[speed
- 1]);
1019 tegra_pcie_enable_interrupts(pp
);
1024 static int tegra_pcie_dw_link_up(struct dw_pcie
*pci
)
1026 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
1027 u32 val
= dw_pcie_readw_dbi(pci
, pcie
->pcie_cap_base
+ PCI_EXP_LNKSTA
);
1029 return !!(val
& PCI_EXP_LNKSTA_DLLLA
);
1032 static void tegra_pcie_set_msi_vec_num(struct pcie_port
*pp
)
1034 pp
->num_vectors
= MAX_MSI_IRQS
;
1037 static int tegra_pcie_dw_start_link(struct dw_pcie
*pci
)
1039 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
1041 enable_irq(pcie
->pex_rst_irq
);
1046 static void tegra_pcie_dw_stop_link(struct dw_pcie
*pci
)
1048 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
1050 disable_irq(pcie
->pex_rst_irq
);
1053 static const struct dw_pcie_ops tegra_dw_pcie_ops
= {
1054 .link_up
= tegra_pcie_dw_link_up
,
1055 .start_link
= tegra_pcie_dw_start_link
,
1056 .stop_link
= tegra_pcie_dw_stop_link
,
1059 static struct dw_pcie_host_ops tegra_pcie_dw_host_ops
= {
1060 .rd_own_conf
= tegra_pcie_dw_rd_own_conf
,
1061 .wr_own_conf
= tegra_pcie_dw_wr_own_conf
,
1062 .host_init
= tegra_pcie_dw_host_init
,
1063 .set_num_vectors
= tegra_pcie_set_msi_vec_num
,
1066 static void tegra_pcie_disable_phy(struct tegra_pcie_dw
*pcie
)
1068 unsigned int phy_count
= pcie
->phy_count
;
1070 while (phy_count
--) {
1071 phy_power_off(pcie
->phys
[phy_count
]);
1072 phy_exit(pcie
->phys
[phy_count
]);
1076 static int tegra_pcie_enable_phy(struct tegra_pcie_dw
*pcie
)
1081 for (i
= 0; i
< pcie
->phy_count
; i
++) {
1082 ret
= phy_init(pcie
->phys
[i
]);
1086 ret
= phy_power_on(pcie
->phys
[i
]);
1095 phy_power_off(pcie
->phys
[i
]);
1097 phy_exit(pcie
->phys
[i
]);
1103 static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw
*pcie
)
1105 struct device_node
*np
= pcie
->dev
->of_node
;
1108 ret
= of_property_read_u32(np
, "nvidia,aspm-cmrt-us", &pcie
->aspm_cmrt
);
1110 dev_info(pcie
->dev
, "Failed to read ASPM T_cmrt: %d\n", ret
);
1114 ret
= of_property_read_u32(np
, "nvidia,aspm-pwr-on-t-us",
1115 &pcie
->aspm_pwr_on_t
);
1117 dev_info(pcie
->dev
, "Failed to read ASPM Power On time: %d\n",
1120 ret
= of_property_read_u32(np
, "nvidia,aspm-l0s-entrance-latency-us",
1121 &pcie
->aspm_l0s_enter_lat
);
1124 "Failed to read ASPM L0s Entrance latency: %d\n", ret
);
1126 ret
= of_property_read_u32(np
, "num-lanes", &pcie
->num_lanes
);
1128 dev_err(pcie
->dev
, "Failed to read num-lanes: %d\n", ret
);
1132 pcie
->max_speed
= of_pci_get_max_link_speed(np
);
1134 ret
= of_property_read_u32_index(np
, "nvidia,bpmp", 1, &pcie
->cid
);
1136 dev_err(pcie
->dev
, "Failed to read Controller-ID: %d\n", ret
);
1140 ret
= of_property_count_strings(np
, "phy-names");
1142 dev_err(pcie
->dev
, "Failed to find PHY entries: %d\n",
1146 pcie
->phy_count
= ret
;
1148 if (of_property_read_bool(np
, "nvidia,update-fc-fixup"))
1149 pcie
->update_fc_fixup
= true;
1151 pcie
->supports_clkreq
=
1152 of_property_read_bool(pcie
->dev
->of_node
, "supports-clkreq");
1154 pcie
->enable_cdm_check
=
1155 of_property_read_bool(np
, "snps,enable-cdm-check");
1157 if (pcie
->mode
== DW_PCIE_RC_TYPE
)
1160 /* Endpoint mode specific DT entries */
1161 pcie
->pex_rst_gpiod
= devm_gpiod_get(pcie
->dev
, "reset", GPIOD_IN
);
1162 if (IS_ERR(pcie
->pex_rst_gpiod
)) {
1163 int err
= PTR_ERR(pcie
->pex_rst_gpiod
);
1164 const char *level
= KERN_ERR
;
1166 if (err
== -EPROBE_DEFER
)
1169 dev_printk(level
, pcie
->dev
,
1170 dev_fmt("Failed to get PERST GPIO: %d\n"),
1175 pcie
->pex_refclk_sel_gpiod
= devm_gpiod_get(pcie
->dev
,
1176 "nvidia,refclk-select",
1178 if (IS_ERR(pcie
->pex_refclk_sel_gpiod
)) {
1179 int err
= PTR_ERR(pcie
->pex_refclk_sel_gpiod
);
1180 const char *level
= KERN_ERR
;
1182 if (err
== -EPROBE_DEFER
)
1185 dev_printk(level
, pcie
->dev
,
1186 dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
1188 pcie
->pex_refclk_sel_gpiod
= NULL
;
1194 static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw
*pcie
,
1197 struct mrq_uphy_response resp
;
1198 struct tegra_bpmp_message msg
;
1199 struct mrq_uphy_request req
;
1201 /* Controller-5 doesn't need to have its state set by BPMP-FW */
1205 memset(&req
, 0, sizeof(req
));
1206 memset(&resp
, 0, sizeof(resp
));
1208 req
.cmd
= CMD_UPHY_PCIE_CONTROLLER_STATE
;
1209 req
.controller_state
.pcie_controller
= pcie
->cid
;
1210 req
.controller_state
.enable
= enable
;
1212 memset(&msg
, 0, sizeof(msg
));
1215 msg
.tx
.size
= sizeof(req
);
1216 msg
.rx
.data
= &resp
;
1217 msg
.rx
.size
= sizeof(resp
);
1219 return tegra_bpmp_transfer(pcie
->bpmp
, &msg
);
1222 static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw
*pcie
,
1225 struct mrq_uphy_response resp
;
1226 struct tegra_bpmp_message msg
;
1227 struct mrq_uphy_request req
;
1229 memset(&req
, 0, sizeof(req
));
1230 memset(&resp
, 0, sizeof(resp
));
1233 req
.cmd
= CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT
;
1234 req
.ep_ctrlr_pll_init
.ep_controller
= pcie
->cid
;
1236 req
.cmd
= CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF
;
1237 req
.ep_ctrlr_pll_off
.ep_controller
= pcie
->cid
;
1240 memset(&msg
, 0, sizeof(msg
));
1243 msg
.tx
.size
= sizeof(req
);
1244 msg
.rx
.data
= &resp
;
1245 msg
.rx
.size
= sizeof(resp
);
1247 return tegra_bpmp_transfer(pcie
->bpmp
, &msg
);
1250 static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw
*pcie
)
1252 struct pcie_port
*pp
= &pcie
->pci
.pp
;
1253 struct pci_bus
*child
, *root_bus
= NULL
;
1254 struct pci_dev
*pdev
;
1257 * link doesn't go into L2 state with some of the endpoints with Tegra
1258 * if they are not in D0 state. So, need to make sure that immediate
1259 * downstream devices are in D0 state before sending PME_TurnOff to put
1260 * link into L2 state.
1261 * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
1262 * 5.2 Link State Power Management (Page #428).
1265 list_for_each_entry(child
, &pp
->root_bus
->children
, node
) {
1266 /* Bring downstream devices to D0 if they are not already in */
1267 if (child
->parent
== pp
->root_bus
) {
1274 dev_err(pcie
->dev
, "Failed to find downstream devices\n");
1278 list_for_each_entry(pdev
, &root_bus
->devices
, bus_list
) {
1279 if (PCI_SLOT(pdev
->devfn
) == 0) {
1280 if (pci_set_power_state(pdev
, PCI_D0
))
1282 "Failed to transition %s to D0 state\n",
1283 dev_name(&pdev
->dev
));
1288 static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw
*pcie
)
1290 pcie
->slot_ctl_3v3
= devm_regulator_get_optional(pcie
->dev
, "vpcie3v3");
1291 if (IS_ERR(pcie
->slot_ctl_3v3
)) {
1292 if (PTR_ERR(pcie
->slot_ctl_3v3
) != -ENODEV
)
1293 return PTR_ERR(pcie
->slot_ctl_3v3
);
1295 pcie
->slot_ctl_3v3
= NULL
;
1298 pcie
->slot_ctl_12v
= devm_regulator_get_optional(pcie
->dev
, "vpcie12v");
1299 if (IS_ERR(pcie
->slot_ctl_12v
)) {
1300 if (PTR_ERR(pcie
->slot_ctl_12v
) != -ENODEV
)
1301 return PTR_ERR(pcie
->slot_ctl_12v
);
1303 pcie
->slot_ctl_12v
= NULL
;
1309 static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw
*pcie
)
1313 if (pcie
->slot_ctl_3v3
) {
1314 ret
= regulator_enable(pcie
->slot_ctl_3v3
);
1317 "Failed to enable 3.3V slot supply: %d\n", ret
);
1322 if (pcie
->slot_ctl_12v
) {
1323 ret
= regulator_enable(pcie
->slot_ctl_12v
);
1326 "Failed to enable 12V slot supply: %d\n", ret
);
1327 goto fail_12v_enable
;
1332 * According to PCI Express Card Electromechanical Specification
1333 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
1334 * should be a minimum of 100ms.
1336 if (pcie
->slot_ctl_3v3
|| pcie
->slot_ctl_12v
)
1342 if (pcie
->slot_ctl_3v3
)
1343 regulator_disable(pcie
->slot_ctl_3v3
);
1347 static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw
*pcie
)
1349 if (pcie
->slot_ctl_12v
)
1350 regulator_disable(pcie
->slot_ctl_12v
);
1351 if (pcie
->slot_ctl_3v3
)
1352 regulator_disable(pcie
->slot_ctl_3v3
);
1355 static int tegra_pcie_config_controller(struct tegra_pcie_dw
*pcie
,
1361 ret
= tegra_pcie_bpmp_set_ctrl_state(pcie
, true);
1364 "Failed to enable controller %u: %d\n", pcie
->cid
, ret
);
1368 ret
= tegra_pcie_enable_slot_regulators(pcie
);
1370 goto fail_slot_reg_en
;
1372 ret
= regulator_enable(pcie
->pex_ctl_supply
);
1374 dev_err(pcie
->dev
, "Failed to enable regulator: %d\n", ret
);
1378 ret
= clk_prepare_enable(pcie
->core_clk
);
1380 dev_err(pcie
->dev
, "Failed to enable core clock: %d\n", ret
);
1384 ret
= reset_control_deassert(pcie
->core_apb_rst
);
1386 dev_err(pcie
->dev
, "Failed to deassert core APB reset: %d\n",
1388 goto fail_core_apb_rst
;
1391 if (en_hw_hot_rst
) {
1392 /* Enable HW_HOT_RST mode */
1393 val
= appl_readl(pcie
, APPL_CTRL
);
1394 val
&= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK
<<
1395 APPL_CTRL_HW_HOT_RST_MODE_SHIFT
);
1396 val
|= APPL_CTRL_HW_HOT_RST_EN
;
1397 appl_writel(pcie
, val
, APPL_CTRL
);
1400 ret
= tegra_pcie_enable_phy(pcie
);
1402 dev_err(pcie
->dev
, "Failed to enable PHY: %d\n", ret
);
1406 /* Update CFG base address */
1407 appl_writel(pcie
, pcie
->dbi_res
->start
& APPL_CFG_BASE_ADDR_MASK
,
1408 APPL_CFG_BASE_ADDR
);
1410 /* Configure this core for RP mode operation */
1411 appl_writel(pcie
, APPL_DM_TYPE_RP
, APPL_DM_TYPE
);
1413 appl_writel(pcie
, 0x0, APPL_CFG_SLCG_OVERRIDE
);
1415 val
= appl_readl(pcie
, APPL_CTRL
);
1416 appl_writel(pcie
, val
| APPL_CTRL_SYS_PRE_DET_STATE
, APPL_CTRL
);
1418 val
= appl_readl(pcie
, APPL_CFG_MISC
);
1419 val
|= (APPL_CFG_MISC_ARCACHE_VAL
<< APPL_CFG_MISC_ARCACHE_SHIFT
);
1420 appl_writel(pcie
, val
, APPL_CFG_MISC
);
1422 if (!pcie
->supports_clkreq
) {
1423 val
= appl_readl(pcie
, APPL_PINMUX
);
1424 val
|= APPL_PINMUX_CLKREQ_OVERRIDE_EN
;
1425 val
&= ~APPL_PINMUX_CLKREQ_OVERRIDE
;
1426 appl_writel(pcie
, val
, APPL_PINMUX
);
1429 /* Update iATU_DMA base address */
1431 pcie
->atu_dma_res
->start
& APPL_CFG_IATU_DMA_BASE_ADDR_MASK
,
1432 APPL_CFG_IATU_DMA_BASE_ADDR
);
1434 reset_control_deassert(pcie
->core_rst
);
1436 pcie
->pcie_cap_base
= dw_pcie_find_capability(&pcie
->pci
,
1439 /* Disable ASPM-L1SS advertisement as there is no CLKREQ routing */
1440 if (!pcie
->supports_clkreq
) {
1441 disable_aspm_l11(pcie
);
1442 disable_aspm_l12(pcie
);
1448 reset_control_assert(pcie
->core_apb_rst
);
1450 clk_disable_unprepare(pcie
->core_clk
);
1452 regulator_disable(pcie
->pex_ctl_supply
);
1454 tegra_pcie_disable_slot_regulators(pcie
);
1456 tegra_pcie_bpmp_set_ctrl_state(pcie
, false);
1461 static int __deinit_controller(struct tegra_pcie_dw
*pcie
)
1465 ret
= reset_control_assert(pcie
->core_rst
);
1467 dev_err(pcie
->dev
, "Failed to assert \"core\" reset: %d\n",
1472 tegra_pcie_disable_phy(pcie
);
1474 ret
= reset_control_assert(pcie
->core_apb_rst
);
1476 dev_err(pcie
->dev
, "Failed to assert APB reset: %d\n", ret
);
1480 clk_disable_unprepare(pcie
->core_clk
);
1482 ret
= regulator_disable(pcie
->pex_ctl_supply
);
1484 dev_err(pcie
->dev
, "Failed to disable regulator: %d\n", ret
);
1488 tegra_pcie_disable_slot_regulators(pcie
);
1490 ret
= tegra_pcie_bpmp_set_ctrl_state(pcie
, false);
1492 dev_err(pcie
->dev
, "Failed to disable controller %d: %d\n",
1500 static int tegra_pcie_init_controller(struct tegra_pcie_dw
*pcie
)
1502 struct dw_pcie
*pci
= &pcie
->pci
;
1503 struct pcie_port
*pp
= &pci
->pp
;
1506 ret
= tegra_pcie_config_controller(pcie
, false);
1510 pp
->ops
= &tegra_pcie_dw_host_ops
;
1512 ret
= dw_pcie_host_init(pp
);
1514 dev_err(pcie
->dev
, "Failed to add PCIe port: %d\n", ret
);
1515 goto fail_host_init
;
1521 return __deinit_controller(pcie
);
1524 static int tegra_pcie_try_link_l2(struct tegra_pcie_dw
*pcie
)
1528 if (!tegra_pcie_dw_link_up(&pcie
->pci
))
1531 val
= appl_readl(pcie
, APPL_RADM_STATUS
);
1532 val
|= APPL_PM_XMT_TURNOFF_STATE
;
1533 appl_writel(pcie
, val
, APPL_RADM_STATUS
);
1535 return readl_poll_timeout_atomic(pcie
->appl_base
+ APPL_DEBUG
, val
,
1536 val
& APPL_DEBUG_PM_LINKST_IN_L2_LAT
,
1537 1, PME_ACK_TIMEOUT
);
1540 static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw
*pcie
)
1545 if (!tegra_pcie_dw_link_up(&pcie
->pci
)) {
1546 dev_dbg(pcie
->dev
, "PCIe link is not up...!\n");
1550 if (tegra_pcie_try_link_l2(pcie
)) {
1551 dev_info(pcie
->dev
, "Link didn't transition to L2 state\n");
1553 * TX lane clock freq will reset to Gen1 only if link is in L2
1555 * So apply pex_rst to end point to force RP to go into detect
1558 data
= appl_readl(pcie
, APPL_PINMUX
);
1559 data
&= ~APPL_PINMUX_PEX_RST
;
1560 appl_writel(pcie
, data
, APPL_PINMUX
);
1562 err
= readl_poll_timeout_atomic(pcie
->appl_base
+ APPL_DEBUG
,
1565 APPL_DEBUG_LTSSM_STATE_MASK
) >>
1566 APPL_DEBUG_LTSSM_STATE_SHIFT
) ==
1567 LTSSM_STATE_PRE_DETECT
,
1570 dev_info(pcie
->dev
, "Link didn't go to detect state\n");
1572 /* Disable LTSSM after link is in detect state */
1573 data
= appl_readl(pcie
, APPL_CTRL
);
1574 data
&= ~APPL_CTRL_LTSSM_EN
;
1575 appl_writel(pcie
, data
, APPL_CTRL
);
1579 * DBI registers may not be accessible after this as PLL-E would be
1580 * down depending on how CLKREQ is pulled by end point
1582 data
= appl_readl(pcie
, APPL_PINMUX
);
1583 data
|= (APPL_PINMUX_CLKREQ_OVERRIDE_EN
| APPL_PINMUX_CLKREQ_OVERRIDE
);
1584 /* Cut REFCLK to slot */
1585 data
|= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN
;
1586 data
&= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE
;
1587 appl_writel(pcie
, data
, APPL_PINMUX
);
1590 static int tegra_pcie_deinit_controller(struct tegra_pcie_dw
*pcie
)
1592 tegra_pcie_downstream_dev_to_D0(pcie
);
1593 dw_pcie_host_deinit(&pcie
->pci
.pp
);
1594 tegra_pcie_dw_pme_turnoff(pcie
);
1596 return __deinit_controller(pcie
);
1599 static int tegra_pcie_config_rp(struct tegra_pcie_dw
*pcie
)
1601 struct pcie_port
*pp
= &pcie
->pci
.pp
;
1602 struct device
*dev
= pcie
->dev
;
1606 if (IS_ENABLED(CONFIG_PCI_MSI
)) {
1607 pp
->msi_irq
= of_irq_get_byname(dev
->of_node
, "msi");
1609 dev_err(dev
, "Failed to get MSI interrupt\n");
1614 pm_runtime_enable(dev
);
1616 ret
= pm_runtime_get_sync(dev
);
1618 dev_err(dev
, "Failed to get runtime sync for PCIe dev: %d\n",
1620 goto fail_pm_get_sync
;
1623 ret
= pinctrl_pm_select_default_state(dev
);
1625 dev_err(dev
, "Failed to configure sideband pins: %d\n", ret
);
1629 tegra_pcie_init_controller(pcie
);
1631 pcie
->link_state
= tegra_pcie_dw_link_up(&pcie
->pci
);
1632 if (!pcie
->link_state
) {
1634 goto fail_host_init
;
1637 name
= devm_kasprintf(dev
, GFP_KERNEL
, "%pOFP", dev
->of_node
);
1640 goto fail_host_init
;
1643 pcie
->debugfs
= debugfs_create_dir(name
, NULL
);
1645 dev_err(dev
, "Failed to create debugfs\n");
1652 tegra_pcie_deinit_controller(pcie
);
1654 pm_runtime_put_sync(dev
);
1656 pm_runtime_disable(dev
);
1660 static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw
*pcie
)
1665 if (pcie
->ep_state
== EP_STATE_DISABLED
)
1669 val
= appl_readl(pcie
, APPL_CTRL
);
1670 val
&= ~APPL_CTRL_LTSSM_EN
;
1671 appl_writel(pcie
, val
, APPL_CTRL
);
1673 ret
= readl_poll_timeout(pcie
->appl_base
+ APPL_DEBUG
, val
,
1674 ((val
& APPL_DEBUG_LTSSM_STATE_MASK
) >>
1675 APPL_DEBUG_LTSSM_STATE_SHIFT
) ==
1676 LTSSM_STATE_PRE_DETECT
,
1679 dev_err(pcie
->dev
, "Failed to go Detect state: %d\n", ret
);
1681 reset_control_assert(pcie
->core_rst
);
1683 tegra_pcie_disable_phy(pcie
);
1685 reset_control_assert(pcie
->core_apb_rst
);
1687 clk_disable_unprepare(pcie
->core_clk
);
1689 pm_runtime_put_sync(pcie
->dev
);
1691 ret
= tegra_pcie_bpmp_set_pll_state(pcie
, false);
1693 dev_err(pcie
->dev
, "Failed to turn off UPHY: %d\n", ret
);
1695 pcie
->ep_state
= EP_STATE_DISABLED
;
1696 dev_dbg(pcie
->dev
, "Uninitialization of endpoint is completed\n");
1699 static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw
*pcie
)
1701 struct dw_pcie
*pci
= &pcie
->pci
;
1702 struct dw_pcie_ep
*ep
= &pci
->ep
;
1703 struct device
*dev
= pcie
->dev
;
1707 if (pcie
->ep_state
== EP_STATE_ENABLED
)
1710 ret
= pm_runtime_get_sync(dev
);
1712 dev_err(dev
, "Failed to get runtime sync for PCIe dev: %d\n",
1717 ret
= tegra_pcie_bpmp_set_pll_state(pcie
, true);
1719 dev_err(dev
, "Failed to init UPHY for PCIe EP: %d\n", ret
);
1723 ret
= clk_prepare_enable(pcie
->core_clk
);
1725 dev_err(dev
, "Failed to enable core clock: %d\n", ret
);
1726 goto fail_core_clk_enable
;
1729 ret
= reset_control_deassert(pcie
->core_apb_rst
);
1731 dev_err(dev
, "Failed to deassert core APB reset: %d\n", ret
);
1732 goto fail_core_apb_rst
;
1735 ret
= tegra_pcie_enable_phy(pcie
);
1737 dev_err(dev
, "Failed to enable PHY: %d\n", ret
);
1741 /* Clear any stale interrupt statuses */
1742 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L0
);
1743 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0
);
1744 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1
);
1745 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2
);
1746 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3
);
1747 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6
);
1748 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7
);
1749 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0
);
1750 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9
);
1751 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10
);
1752 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11
);
1753 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13
);
1754 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14
);
1755 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15
);
1756 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17
);
1758 /* configure this core for EP mode operation */
1759 val
= appl_readl(pcie
, APPL_DM_TYPE
);
1760 val
&= ~APPL_DM_TYPE_MASK
;
1761 val
|= APPL_DM_TYPE_EP
;
1762 appl_writel(pcie
, val
, APPL_DM_TYPE
);
1764 appl_writel(pcie
, 0x0, APPL_CFG_SLCG_OVERRIDE
);
1766 val
= appl_readl(pcie
, APPL_CTRL
);
1767 val
|= APPL_CTRL_SYS_PRE_DET_STATE
;
1768 val
|= APPL_CTRL_HW_HOT_RST_EN
;
1769 appl_writel(pcie
, val
, APPL_CTRL
);
1771 val
= appl_readl(pcie
, APPL_CFG_MISC
);
1772 val
|= APPL_CFG_MISC_SLV_EP_MODE
;
1773 val
|= (APPL_CFG_MISC_ARCACHE_VAL
<< APPL_CFG_MISC_ARCACHE_SHIFT
);
1774 appl_writel(pcie
, val
, APPL_CFG_MISC
);
1776 val
= appl_readl(pcie
, APPL_PINMUX
);
1777 val
|= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN
;
1778 val
|= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE
;
1779 appl_writel(pcie
, val
, APPL_PINMUX
);
1781 appl_writel(pcie
, pcie
->dbi_res
->start
& APPL_CFG_BASE_ADDR_MASK
,
1782 APPL_CFG_BASE_ADDR
);
1784 appl_writel(pcie
, pcie
->atu_dma_res
->start
&
1785 APPL_CFG_IATU_DMA_BASE_ADDR_MASK
,
1786 APPL_CFG_IATU_DMA_BASE_ADDR
);
1788 val
= appl_readl(pcie
, APPL_INTR_EN_L0_0
);
1789 val
|= APPL_INTR_EN_L0_0_SYS_INTR_EN
;
1790 val
|= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN
;
1791 val
|= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN
;
1792 appl_writel(pcie
, val
, APPL_INTR_EN_L0_0
);
1794 val
= appl_readl(pcie
, APPL_INTR_EN_L1_0_0
);
1795 val
|= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN
;
1796 val
|= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN
;
1797 appl_writel(pcie
, val
, APPL_INTR_EN_L1_0_0
);
1799 reset_control_deassert(pcie
->core_rst
);
1801 if (pcie
->update_fc_fixup
) {
1802 val
= dw_pcie_readl_dbi(pci
, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF
);
1803 val
|= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT
;
1804 dw_pcie_writel_dbi(pci
, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF
, val
);
1807 config_gen3_gen4_eq_presets(pcie
);
1809 init_host_aspm(pcie
);
1811 /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
1812 if (!pcie
->supports_clkreq
) {
1813 disable_aspm_l11(pcie
);
1814 disable_aspm_l12(pcie
);
1817 val
= dw_pcie_readl_dbi(pci
, GEN3_RELATED_OFF
);
1818 val
&= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL
;
1819 dw_pcie_writel_dbi(pci
, GEN3_RELATED_OFF
, val
);
1821 /* Configure N_FTS & FTS */
1822 val
= dw_pcie_readl_dbi(pci
, PORT_LOGIC_ACK_F_ASPM_CTRL
);
1823 val
&= ~(N_FTS_MASK
<< N_FTS_SHIFT
);
1824 val
|= N_FTS_VAL
<< N_FTS_SHIFT
;
1825 dw_pcie_writel_dbi(pci
, PORT_LOGIC_ACK_F_ASPM_CTRL
, val
);
1827 val
= dw_pcie_readl_dbi(pci
, PORT_LOGIC_GEN2_CTRL
);
1830 dw_pcie_writel_dbi(pci
, PORT_LOGIC_GEN2_CTRL
, val
);
1832 /* Configure Max Speed from DT */
1833 if (pcie
->max_speed
&& pcie
->max_speed
!= -EINVAL
) {
1834 val
= dw_pcie_readl_dbi(pci
, pcie
->pcie_cap_base
+
1836 val
&= ~PCI_EXP_LNKCAP_SLS
;
1837 val
|= pcie
->max_speed
;
1838 dw_pcie_writel_dbi(pci
, pcie
->pcie_cap_base
+ PCI_EXP_LNKCAP
,
1842 pcie
->pcie_cap_base
= dw_pcie_find_capability(&pcie
->pci
,
1844 clk_set_rate(pcie
->core_clk
, GEN4_CORE_CLK_FREQ
);
1846 val
= (ep
->msi_mem_phys
& MSIX_ADDR_MATCH_LOW_OFF_MASK
);
1847 val
|= MSIX_ADDR_MATCH_LOW_OFF_EN
;
1848 dw_pcie_writel_dbi(pci
, MSIX_ADDR_MATCH_LOW_OFF
, val
);
1849 val
= (lower_32_bits(ep
->msi_mem_phys
) & MSIX_ADDR_MATCH_HIGH_OFF_MASK
);
1850 dw_pcie_writel_dbi(pci
, MSIX_ADDR_MATCH_HIGH_OFF
, val
);
1852 ret
= dw_pcie_ep_init_complete(ep
);
1854 dev_err(dev
, "Failed to complete initialization: %d\n", ret
);
1855 goto fail_init_complete
;
1858 dw_pcie_ep_init_notify(ep
);
1861 val
= appl_readl(pcie
, APPL_CTRL
);
1862 val
|= APPL_CTRL_LTSSM_EN
;
1863 appl_writel(pcie
, val
, APPL_CTRL
);
1865 pcie
->ep_state
= EP_STATE_ENABLED
;
1866 dev_dbg(dev
, "Initialization of endpoint is completed\n");
1871 reset_control_assert(pcie
->core_rst
);
1872 tegra_pcie_disable_phy(pcie
);
1874 reset_control_assert(pcie
->core_apb_rst
);
1876 clk_disable_unprepare(pcie
->core_clk
);
1877 fail_core_clk_enable
:
1878 tegra_pcie_bpmp_set_pll_state(pcie
, false);
1880 pm_runtime_put_sync(dev
);
1883 static irqreturn_t
tegra_pcie_ep_pex_rst_irq(int irq
, void *arg
)
1885 struct tegra_pcie_dw
*pcie
= arg
;
1887 if (gpiod_get_value(pcie
->pex_rst_gpiod
))
1888 pex_ep_event_pex_rst_assert(pcie
);
1890 pex_ep_event_pex_rst_deassert(pcie
);
1895 static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw
*pcie
, u16 irq
)
1897 /* Tegra194 supports only INTA */
1901 appl_writel(pcie
, 1, APPL_LEGACY_INTX
);
1902 usleep_range(1000, 2000);
1903 appl_writel(pcie
, 0, APPL_LEGACY_INTX
);
1907 static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw
*pcie
, u16 irq
)
1909 if (unlikely(irq
> 31))
1912 appl_writel(pcie
, (1 << irq
), APPL_MSI_CTRL_1
);
1917 static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw
*pcie
, u16 irq
)
1919 struct dw_pcie_ep
*ep
= &pcie
->pci
.ep
;
1921 writel(irq
, ep
->msi_mem
);
1926 static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep
*ep
, u8 func_no
,
1927 enum pci_epc_irq_type type
,
1930 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
1931 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
1934 case PCI_EPC_IRQ_LEGACY
:
1935 return tegra_pcie_ep_raise_legacy_irq(pcie
, interrupt_num
);
1937 case PCI_EPC_IRQ_MSI
:
1938 return tegra_pcie_ep_raise_msi_irq(pcie
, interrupt_num
);
1940 case PCI_EPC_IRQ_MSIX
:
1941 return tegra_pcie_ep_raise_msix_irq(pcie
, interrupt_num
);
1944 dev_err(pci
->dev
, "Unknown IRQ type\n");
1951 static const struct pci_epc_features tegra_pcie_epc_features
= {
1952 .linkup_notifier
= true,
1953 .core_init_notifier
= true,
1954 .msi_capable
= false,
1955 .msix_capable
= false,
1956 .reserved_bar
= 1 << BAR_2
| 1 << BAR_3
| 1 << BAR_4
| 1 << BAR_5
,
1957 .bar_fixed_64bit
= 1 << BAR_0
,
1958 .bar_fixed_size
[0] = SZ_1M
,
1961 static const struct pci_epc_features
*
1962 tegra_pcie_ep_get_features(struct dw_pcie_ep
*ep
)
1964 return &tegra_pcie_epc_features
;
1967 static struct dw_pcie_ep_ops pcie_ep_ops
= {
1968 .raise_irq
= tegra_pcie_ep_raise_irq
,
1969 .get_features
= tegra_pcie_ep_get_features
,
1972 static int tegra_pcie_config_ep(struct tegra_pcie_dw
*pcie
,
1973 struct platform_device
*pdev
)
1975 struct dw_pcie
*pci
= &pcie
->pci
;
1976 struct device
*dev
= pcie
->dev
;
1977 struct dw_pcie_ep
*ep
;
1978 struct resource
*res
;
1983 ep
->ops
= &pcie_ep_ops
;
1985 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "addr_space");
1989 ep
->phys_base
= res
->start
;
1990 ep
->addr_size
= resource_size(res
);
1991 ep
->page_size
= SZ_64K
;
1993 ret
= gpiod_set_debounce(pcie
->pex_rst_gpiod
, PERST_DEBOUNCE_TIME
);
1995 dev_err(dev
, "Failed to set PERST GPIO debounce time: %d\n",
2000 ret
= gpiod_to_irq(pcie
->pex_rst_gpiod
);
2002 dev_err(dev
, "Failed to get IRQ for PERST GPIO: %d\n", ret
);
2005 pcie
->pex_rst_irq
= (unsigned int)ret
;
2007 name
= devm_kasprintf(dev
, GFP_KERNEL
, "tegra_pcie_%u_pex_rst_irq",
2010 dev_err(dev
, "Failed to create PERST IRQ string\n");
2014 irq_set_status_flags(pcie
->pex_rst_irq
, IRQ_NOAUTOEN
);
2016 pcie
->ep_state
= EP_STATE_DISABLED
;
2018 ret
= devm_request_threaded_irq(dev
, pcie
->pex_rst_irq
, NULL
,
2019 tegra_pcie_ep_pex_rst_irq
,
2020 IRQF_TRIGGER_RISING
|
2021 IRQF_TRIGGER_FALLING
| IRQF_ONESHOT
,
2022 name
, (void *)pcie
);
2024 dev_err(dev
, "Failed to request IRQ for PERST: %d\n", ret
);
2028 name
= devm_kasprintf(dev
, GFP_KERNEL
, "tegra_pcie_%u_ep_work",
2031 dev_err(dev
, "Failed to create PCIe EP work thread string\n");
2035 pm_runtime_enable(dev
);
2037 ret
= dw_pcie_ep_init(ep
);
2039 dev_err(dev
, "Failed to initialize DWC Endpoint subsystem: %d\n",
2047 static int tegra_pcie_dw_probe(struct platform_device
*pdev
)
2049 const struct tegra_pcie_dw_of_data
*data
;
2050 struct device
*dev
= &pdev
->dev
;
2051 struct resource
*atu_dma_res
;
2052 struct tegra_pcie_dw
*pcie
;
2053 struct resource
*dbi_res
;
2054 struct pcie_port
*pp
;
2055 struct dw_pcie
*pci
;
2061 data
= of_device_get_match_data(dev
);
2063 pcie
= devm_kzalloc(dev
, sizeof(*pcie
), GFP_KERNEL
);
2068 pci
->dev
= &pdev
->dev
;
2069 pci
->ops
= &tegra_dw_pcie_ops
;
2071 pcie
->dev
= &pdev
->dev
;
2072 pcie
->mode
= (enum dw_pcie_device_mode
)data
->mode
;
2074 ret
= tegra_pcie_dw_parse_dt(pcie
);
2076 const char *level
= KERN_ERR
;
2078 if (ret
== -EPROBE_DEFER
)
2081 dev_printk(level
, dev
,
2082 dev_fmt("Failed to parse device tree: %d\n"),
2087 ret
= tegra_pcie_get_slot_regulators(pcie
);
2089 const char *level
= KERN_ERR
;
2091 if (ret
== -EPROBE_DEFER
)
2094 dev_printk(level
, dev
,
2095 dev_fmt("Failed to get slot regulators: %d\n"),
2100 if (pcie
->pex_refclk_sel_gpiod
)
2101 gpiod_set_value(pcie
->pex_refclk_sel_gpiod
, 1);
2103 pcie
->pex_ctl_supply
= devm_regulator_get(dev
, "vddio-pex-ctl");
2104 if (IS_ERR(pcie
->pex_ctl_supply
)) {
2105 ret
= PTR_ERR(pcie
->pex_ctl_supply
);
2106 if (ret
!= -EPROBE_DEFER
)
2107 dev_err(dev
, "Failed to get regulator: %ld\n",
2108 PTR_ERR(pcie
->pex_ctl_supply
));
2112 pcie
->core_clk
= devm_clk_get(dev
, "core");
2113 if (IS_ERR(pcie
->core_clk
)) {
2114 dev_err(dev
, "Failed to get core clock: %ld\n",
2115 PTR_ERR(pcie
->core_clk
));
2116 return PTR_ERR(pcie
->core_clk
);
2119 pcie
->appl_res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
2121 if (!pcie
->appl_res
) {
2122 dev_err(dev
, "Failed to find \"appl\" region\n");
2126 pcie
->appl_base
= devm_ioremap_resource(dev
, pcie
->appl_res
);
2127 if (IS_ERR(pcie
->appl_base
))
2128 return PTR_ERR(pcie
->appl_base
);
2130 pcie
->core_apb_rst
= devm_reset_control_get(dev
, "apb");
2131 if (IS_ERR(pcie
->core_apb_rst
)) {
2132 dev_err(dev
, "Failed to get APB reset: %ld\n",
2133 PTR_ERR(pcie
->core_apb_rst
));
2134 return PTR_ERR(pcie
->core_apb_rst
);
2137 phys
= devm_kcalloc(dev
, pcie
->phy_count
, sizeof(*phys
), GFP_KERNEL
);
2141 for (i
= 0; i
< pcie
->phy_count
; i
++) {
2142 name
= kasprintf(GFP_KERNEL
, "p2u-%u", i
);
2144 dev_err(dev
, "Failed to create P2U string\n");
2147 phys
[i
] = devm_phy_get(dev
, name
);
2149 if (IS_ERR(phys
[i
])) {
2150 ret
= PTR_ERR(phys
[i
]);
2151 if (ret
!= -EPROBE_DEFER
)
2152 dev_err(dev
, "Failed to get PHY: %d\n", ret
);
2159 dbi_res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "dbi");
2161 dev_err(dev
, "Failed to find \"dbi\" region\n");
2164 pcie
->dbi_res
= dbi_res
;
2166 pci
->dbi_base
= devm_ioremap_resource(dev
, dbi_res
);
2167 if (IS_ERR(pci
->dbi_base
))
2168 return PTR_ERR(pci
->dbi_base
);
2170 /* Tegra HW locates DBI2 at a fixed offset from DBI */
2171 pci
->dbi_base2
= pci
->dbi_base
+ 0x1000;
2173 atu_dma_res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
2176 dev_err(dev
, "Failed to find \"atu_dma\" region\n");
2179 pcie
->atu_dma_res
= atu_dma_res
;
2181 pci
->atu_base
= devm_ioremap_resource(dev
, atu_dma_res
);
2182 if (IS_ERR(pci
->atu_base
))
2183 return PTR_ERR(pci
->atu_base
);
2185 pcie
->core_rst
= devm_reset_control_get(dev
, "core");
2186 if (IS_ERR(pcie
->core_rst
)) {
2187 dev_err(dev
, "Failed to get core reset: %ld\n",
2188 PTR_ERR(pcie
->core_rst
));
2189 return PTR_ERR(pcie
->core_rst
);
2192 pp
->irq
= platform_get_irq_byname(pdev
, "intr");
2194 dev_err(dev
, "Failed to get \"intr\" interrupt\n");
2198 pcie
->bpmp
= tegra_bpmp_get(dev
);
2199 if (IS_ERR(pcie
->bpmp
))
2200 return PTR_ERR(pcie
->bpmp
);
2202 platform_set_drvdata(pdev
, pcie
);
2204 switch (pcie
->mode
) {
2205 case DW_PCIE_RC_TYPE
:
2206 ret
= devm_request_irq(dev
, pp
->irq
, tegra_pcie_rp_irq_handler
,
2207 IRQF_SHARED
, "tegra-pcie-intr", pcie
);
2209 dev_err(dev
, "Failed to request IRQ %d: %d\n", pp
->irq
,
2214 ret
= tegra_pcie_config_rp(pcie
);
2215 if (ret
&& ret
!= -ENOMEDIUM
)
2221 case DW_PCIE_EP_TYPE
:
2222 ret
= devm_request_threaded_irq(dev
, pp
->irq
,
2223 tegra_pcie_ep_hard_irq
,
2224 tegra_pcie_ep_irq_thread
,
2225 IRQF_SHARED
| IRQF_ONESHOT
,
2226 "tegra-pcie-ep-intr", pcie
);
2228 dev_err(dev
, "Failed to request IRQ %d: %d\n", pp
->irq
,
2233 ret
= tegra_pcie_config_ep(pcie
, pdev
);
2239 dev_err(dev
, "Invalid PCIe device type %d\n", pcie
->mode
);
2243 tegra_bpmp_put(pcie
->bpmp
);
2247 static int tegra_pcie_dw_remove(struct platform_device
*pdev
)
2249 struct tegra_pcie_dw
*pcie
= platform_get_drvdata(pdev
);
2251 if (!pcie
->link_state
)
2254 debugfs_remove_recursive(pcie
->debugfs
);
2255 tegra_pcie_deinit_controller(pcie
);
2256 pm_runtime_put_sync(pcie
->dev
);
2257 pm_runtime_disable(pcie
->dev
);
2258 tegra_bpmp_put(pcie
->bpmp
);
2259 if (pcie
->pex_refclk_sel_gpiod
)
2260 gpiod_set_value(pcie
->pex_refclk_sel_gpiod
, 0);
2265 static int tegra_pcie_dw_suspend_late(struct device
*dev
)
2267 struct tegra_pcie_dw
*pcie
= dev_get_drvdata(dev
);
2270 if (!pcie
->link_state
)
2273 /* Enable HW_HOT_RST mode */
2274 val
= appl_readl(pcie
, APPL_CTRL
);
2275 val
&= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK
<<
2276 APPL_CTRL_HW_HOT_RST_MODE_SHIFT
);
2277 val
|= APPL_CTRL_HW_HOT_RST_EN
;
2278 appl_writel(pcie
, val
, APPL_CTRL
);
2283 static int tegra_pcie_dw_suspend_noirq(struct device
*dev
)
2285 struct tegra_pcie_dw
*pcie
= dev_get_drvdata(dev
);
2287 if (!pcie
->link_state
)
2290 /* Save MSI interrupt vector */
2291 pcie
->msi_ctrl_int
= dw_pcie_readl_dbi(&pcie
->pci
,
2292 PORT_LOGIC_MSI_CTRL_INT_0_EN
);
2293 tegra_pcie_downstream_dev_to_D0(pcie
);
2294 tegra_pcie_dw_pme_turnoff(pcie
);
2296 return __deinit_controller(pcie
);
2299 static int tegra_pcie_dw_resume_noirq(struct device
*dev
)
2301 struct tegra_pcie_dw
*pcie
= dev_get_drvdata(dev
);
2304 if (!pcie
->link_state
)
2307 ret
= tegra_pcie_config_controller(pcie
, true);
2311 ret
= tegra_pcie_dw_host_init(&pcie
->pci
.pp
);
2313 dev_err(dev
, "Failed to init host: %d\n", ret
);
2314 goto fail_host_init
;
2317 /* Restore MSI interrupt vector */
2318 dw_pcie_writel_dbi(&pcie
->pci
, PORT_LOGIC_MSI_CTRL_INT_0_EN
,
2319 pcie
->msi_ctrl_int
);
2324 return __deinit_controller(pcie
);
2327 static int tegra_pcie_dw_resume_early(struct device
*dev
)
2329 struct tegra_pcie_dw
*pcie
= dev_get_drvdata(dev
);
2332 if (!pcie
->link_state
)
2335 /* Disable HW_HOT_RST mode */
2336 val
= appl_readl(pcie
, APPL_CTRL
);
2337 val
&= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK
<<
2338 APPL_CTRL_HW_HOT_RST_MODE_SHIFT
);
2339 val
|= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST
<<
2340 APPL_CTRL_HW_HOT_RST_MODE_SHIFT
;
2341 val
&= ~APPL_CTRL_HW_HOT_RST_EN
;
2342 appl_writel(pcie
, val
, APPL_CTRL
);
2347 static void tegra_pcie_dw_shutdown(struct platform_device
*pdev
)
2349 struct tegra_pcie_dw
*pcie
= platform_get_drvdata(pdev
);
2351 if (!pcie
->link_state
)
2354 debugfs_remove_recursive(pcie
->debugfs
);
2355 tegra_pcie_downstream_dev_to_D0(pcie
);
2357 disable_irq(pcie
->pci
.pp
.irq
);
2358 if (IS_ENABLED(CONFIG_PCI_MSI
))
2359 disable_irq(pcie
->pci
.pp
.msi_irq
);
2361 tegra_pcie_dw_pme_turnoff(pcie
);
2362 __deinit_controller(pcie
);
2365 static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data
= {
2366 .mode
= DW_PCIE_RC_TYPE
,
2369 static const struct tegra_pcie_dw_of_data tegra_pcie_dw_ep_of_data
= {
2370 .mode
= DW_PCIE_EP_TYPE
,
2373 static const struct of_device_id tegra_pcie_dw_of_match
[] = {
2375 .compatible
= "nvidia,tegra194-pcie",
2376 .data
= &tegra_pcie_dw_rc_of_data
,
2379 .compatible
= "nvidia,tegra194-pcie-ep",
2380 .data
= &tegra_pcie_dw_ep_of_data
,
2385 static const struct dev_pm_ops tegra_pcie_dw_pm_ops
= {
2386 .suspend_late
= tegra_pcie_dw_suspend_late
,
2387 .suspend_noirq
= tegra_pcie_dw_suspend_noirq
,
2388 .resume_noirq
= tegra_pcie_dw_resume_noirq
,
2389 .resume_early
= tegra_pcie_dw_resume_early
,
2392 static struct platform_driver tegra_pcie_dw_driver
= {
2393 .probe
= tegra_pcie_dw_probe
,
2394 .remove
= tegra_pcie_dw_remove
,
2395 .shutdown
= tegra_pcie_dw_shutdown
,
2397 .name
= "tegra194-pcie",
2398 .pm
= &tegra_pcie_dw_pm_ops
,
2399 .of_match_table
= tegra_pcie_dw_of_match
,
2402 module_platform_driver(tegra_pcie_dw_driver
);
2404 MODULE_DEVICE_TABLE(of
, tegra_pcie_dw_of_match
);
2406 MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
2407 MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
2408 MODULE_LICENSE("GPL v2");