1 // SPDX-License-Identifier: GPL-2.0+
3 * PCIe host controller driver for the following SoCs
7 * Copyright (C) 2019-2022 NVIDIA Corporation.
9 * Author: Vidya Sagar <vidyas@nvidia.com>
12 #include <linux/bitfield.h>
13 #include <linux/clk.h>
14 #include <linux/debugfs.h>
15 #include <linux/delay.h>
16 #include <linux/gpio/consumer.h>
17 #include <linux/interconnect.h>
18 #include <linux/interrupt.h>
19 #include <linux/iopoll.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
23 #include <linux/of_pci.h>
24 #include <linux/pci.h>
25 #include <linux/phy/phy.h>
26 #include <linux/pinctrl/consumer.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/random.h>
30 #include <linux/reset.h>
31 #include <linux/resource.h>
32 #include <linux/types.h>
33 #include "pcie-designware.h"
34 #include <soc/tegra/bpmp.h>
35 #include <soc/tegra/bpmp-abi.h>
36 #include "../../pci.h"
38 #define TEGRA194_DWC_IP_VER 0x490A
39 #define TEGRA234_DWC_IP_VER 0x562A
41 #define APPL_PINMUX 0x0
42 #define APPL_PINMUX_PEX_RST BIT(0)
43 #define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2)
44 #define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3)
45 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4)
46 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5)
49 #define APPL_CTRL_SYS_PRE_DET_STATE BIT(6)
50 #define APPL_CTRL_LTSSM_EN BIT(7)
51 #define APPL_CTRL_HW_HOT_RST_EN BIT(20)
52 #define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0)
53 #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22
54 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1
55 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN 0x2
57 #define APPL_INTR_EN_L0_0 0x8
58 #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
59 #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4)
60 #define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8)
61 #define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15)
62 #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19)
63 #define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30)
64 #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31)
66 #define APPL_INTR_STATUS_L0 0xC
67 #define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0)
68 #define APPL_INTR_STATUS_L0_INT_INT BIT(8)
69 #define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15)
70 #define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16)
71 #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18)
73 #define APPL_INTR_EN_L1_0_0 0x1C
74 #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1)
75 #define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3)
76 #define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30)
78 #define APPL_INTR_STATUS_L1_0_0 0x20
79 #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1)
80 #define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3)
81 #define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30)
83 #define APPL_INTR_STATUS_L1_1 0x2C
84 #define APPL_INTR_STATUS_L1_2 0x30
85 #define APPL_INTR_STATUS_L1_3 0x34
86 #define APPL_INTR_STATUS_L1_6 0x3C
87 #define APPL_INTR_STATUS_L1_7 0x40
88 #define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1)
90 #define APPL_INTR_EN_L1_8_0 0x44
91 #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2)
92 #define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3)
93 #define APPL_INTR_EN_L1_8_INTX_EN BIT(11)
94 #define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15)
96 #define APPL_INTR_STATUS_L1_8_0 0x4C
97 #define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6)
98 #define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2)
99 #define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3)
101 #define APPL_INTR_STATUS_L1_9 0x54
102 #define APPL_INTR_STATUS_L1_10 0x58
103 #define APPL_INTR_STATUS_L1_11 0x64
104 #define APPL_INTR_STATUS_L1_13 0x74
105 #define APPL_INTR_STATUS_L1_14 0x78
106 #define APPL_INTR_STATUS_L1_15 0x7C
107 #define APPL_INTR_STATUS_L1_17 0x88
109 #define APPL_INTR_EN_L1_18 0x90
110 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2)
111 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
112 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
114 #define APPL_INTR_STATUS_L1_18 0x94
115 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2)
116 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
117 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
119 #define APPL_MSI_CTRL_1 0xAC
121 #define APPL_MSI_CTRL_2 0xB0
123 #define APPL_LEGACY_INTX 0xB8
125 #define APPL_LTR_MSG_1 0xC4
126 #define LTR_MSG_REQ BIT(15)
127 #define LTR_NOSNOOP_MSG_REQ BIT(31)
129 #define APPL_LTR_MSG_2 0xC8
130 #define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3)
132 #define APPL_LINK_STATUS 0xCC
133 #define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0)
135 #define APPL_DEBUG 0xD0
136 #define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21)
137 #define APPL_DEBUG_PM_LINKST_IN_L0 0x11
138 #define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3)
139 #define APPL_DEBUG_LTSSM_STATE_SHIFT 3
140 #define LTSSM_STATE_PRE_DETECT 5
142 #define APPL_RADM_STATUS 0xE4
143 #define APPL_PM_XMT_TURNOFF_STATE BIT(0)
145 #define APPL_DM_TYPE 0x100
146 #define APPL_DM_TYPE_MASK GENMASK(3, 0)
147 #define APPL_DM_TYPE_RP 0x4
148 #define APPL_DM_TYPE_EP 0x0
150 #define APPL_CFG_BASE_ADDR 0x104
151 #define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12)
153 #define APPL_CFG_IATU_DMA_BASE_ADDR 0x108
154 #define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18)
156 #define APPL_CFG_MISC 0x110
157 #define APPL_CFG_MISC_SLV_EP_MODE BIT(14)
158 #define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10)
159 #define APPL_CFG_MISC_ARCACHE_SHIFT 10
160 #define APPL_CFG_MISC_ARCACHE_VAL 3
162 #define APPL_CFG_SLCG_OVERRIDE 0x114
163 #define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0)
165 #define APPL_CAR_RESET_OVRD 0x12C
166 #define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0)
168 #define IO_BASE_IO_DECODE BIT(0)
169 #define IO_BASE_IO_DECODE_BIT8 BIT(8)
171 #define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0)
172 #define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16)
174 #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718
175 #define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19)
180 #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
181 #define AMBA_ERROR_RESPONSE_RRS_SHIFT 3
182 #define AMBA_ERROR_RESPONSE_RRS_MASK GENMASK(1, 0)
183 #define AMBA_ERROR_RESPONSE_RRS_OKAY 0
184 #define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFFFFFF 1
185 #define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 2
187 #define MSIX_ADDR_MATCH_LOW_OFF 0x940
188 #define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
189 #define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2)
191 #define MSIX_ADDR_MATCH_HIGH_OFF 0x944
192 #define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0)
194 #define PORT_LOGIC_MSIX_DOORBELL 0x948
196 #define CAP_SPCIE_CAP_OFF 0x154
197 #define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0)
198 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8)
199 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8
201 #define PME_ACK_TIMEOUT 10000
203 #define LTSSM_TIMEOUT 50000 /* 50ms */
205 #define GEN3_GEN4_EQ_PRESET_INIT 5
207 #define GEN1_CORE_CLK_FREQ 62500000
208 #define GEN2_CORE_CLK_FREQ 125000000
209 #define GEN3_CORE_CLK_FREQ 250000000
210 #define GEN4_CORE_CLK_FREQ 500000000
212 #define LTR_MSG_TIMEOUT (100 * 1000)
214 #define PERST_DEBOUNCE_TIME (5 * 1000)
216 #define EP_STATE_DISABLED 0
217 #define EP_STATE_ENABLED 1
219 static const unsigned int pcie_gen_freq
[] = {
220 GEN1_CORE_CLK_FREQ
, /* PCI_EXP_LNKSTA_CLS == 0; undefined */
227 struct tegra_pcie_dw_of_data
{
229 enum dw_pcie_device_mode mode
;
230 bool has_msix_doorbell_access_fix
;
231 bool has_sbr_reset_fix
;
232 bool has_l1ss_exit_fix
;
233 bool has_ltr_req_fix
;
234 u32 cdm_chk_int_en_bit
;
239 struct tegra_pcie_dw
{
241 struct resource
*appl_res
;
242 struct resource
*dbi_res
;
243 struct resource
*atu_dma_res
;
244 void __iomem
*appl_base
;
245 struct clk
*core_clk
;
246 struct reset_control
*core_apb_rst
;
247 struct reset_control
*core_rst
;
249 struct tegra_bpmp
*bpmp
;
251 struct tegra_pcie_dw_of_data
*of_data
;
253 bool supports_clkreq
;
254 bool enable_cdm_check
;
257 bool update_fc_fixup
;
258 bool enable_ext_refclk
;
263 u32 cfg_link_cap_l1sub
;
268 u32 aspm_l0s_enter_lat
;
270 struct regulator
*pex_ctl_supply
;
271 struct regulator
*slot_ctl_3v3
;
272 struct regulator
*slot_ctl_12v
;
274 unsigned int phy_count
;
277 struct dentry
*debugfs
;
279 /* Endpoint mode specific */
280 struct gpio_desc
*pex_rst_gpiod
;
281 struct gpio_desc
*pex_refclk_sel_gpiod
;
282 unsigned int pex_rst_irq
;
285 struct icc_path
*icc_path
;
288 static inline struct tegra_pcie_dw
*to_tegra_pcie(struct dw_pcie
*pci
)
290 return container_of(pci
, struct tegra_pcie_dw
, pci
);
293 static inline void appl_writel(struct tegra_pcie_dw
*pcie
, const u32 value
,
296 writel_relaxed(value
, pcie
->appl_base
+ reg
);
299 static inline u32
appl_readl(struct tegra_pcie_dw
*pcie
, const u32 reg
)
301 return readl_relaxed(pcie
->appl_base
+ reg
);
304 static void tegra_pcie_icc_set(struct tegra_pcie_dw
*pcie
)
306 struct dw_pcie
*pci
= &pcie
->pci
;
307 u32 val
, speed
, width
;
309 val
= dw_pcie_readw_dbi(pci
, pcie
->pcie_cap_base
+ PCI_EXP_LNKSTA
);
311 speed
= FIELD_GET(PCI_EXP_LNKSTA_CLS
, val
);
312 width
= FIELD_GET(PCI_EXP_LNKSTA_NLW
, val
);
314 val
= width
* PCIE_SPEED2MBS_ENC(pcie_link_speed
[speed
]);
316 if (icc_set_bw(pcie
->icc_path
, Mbps_to_icc(val
), 0))
317 dev_err(pcie
->dev
, "can't set bw[%u]\n", val
);
319 if (speed
>= ARRAY_SIZE(pcie_gen_freq
))
322 clk_set_rate(pcie
->core_clk
, pcie_gen_freq
[speed
]);
325 static void apply_bad_link_workaround(struct dw_pcie_rp
*pp
)
327 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
328 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
329 u32 current_link_width
;
333 * NOTE:- Since this scenario is uncommon and link as such is not
334 * stable anyway, not waiting to confirm if link is really
335 * transitioning to Gen-2 speed
337 val
= dw_pcie_readw_dbi(pci
, pcie
->pcie_cap_base
+ PCI_EXP_LNKSTA
);
338 if (val
& PCI_EXP_LNKSTA_LBMS
) {
339 current_link_width
= FIELD_GET(PCI_EXP_LNKSTA_NLW
, val
);
340 if (pcie
->init_link_width
> current_link_width
) {
341 dev_warn(pci
->dev
, "PCIe link is bad, width reduced\n");
342 val
= dw_pcie_readw_dbi(pci
, pcie
->pcie_cap_base
+
344 val
&= ~PCI_EXP_LNKCTL2_TLS
;
345 val
|= PCI_EXP_LNKCTL2_TLS_2_5GT
;
346 dw_pcie_writew_dbi(pci
, pcie
->pcie_cap_base
+
347 PCI_EXP_LNKCTL2
, val
);
349 val
= dw_pcie_readw_dbi(pci
, pcie
->pcie_cap_base
+
351 val
|= PCI_EXP_LNKCTL_RL
;
352 dw_pcie_writew_dbi(pci
, pcie
->pcie_cap_base
+
353 PCI_EXP_LNKCTL
, val
);
358 static irqreturn_t
tegra_pcie_rp_irq_handler(int irq
, void *arg
)
360 struct tegra_pcie_dw
*pcie
= arg
;
361 struct dw_pcie
*pci
= &pcie
->pci
;
362 struct dw_pcie_rp
*pp
= &pci
->pp
;
363 u32 val
, status_l0
, status_l1
;
366 status_l0
= appl_readl(pcie
, APPL_INTR_STATUS_L0
);
367 if (status_l0
& APPL_INTR_STATUS_L0_LINK_STATE_INT
) {
368 status_l1
= appl_readl(pcie
, APPL_INTR_STATUS_L1_0_0
);
369 appl_writel(pcie
, status_l1
, APPL_INTR_STATUS_L1_0_0
);
370 if (!pcie
->of_data
->has_sbr_reset_fix
&&
371 status_l1
& APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED
) {
372 /* SBR & Surprise Link Down WAR */
373 val
= appl_readl(pcie
, APPL_CAR_RESET_OVRD
);
374 val
&= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N
;
375 appl_writel(pcie
, val
, APPL_CAR_RESET_OVRD
);
377 val
= appl_readl(pcie
, APPL_CAR_RESET_OVRD
);
378 val
|= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N
;
379 appl_writel(pcie
, val
, APPL_CAR_RESET_OVRD
);
381 val
= dw_pcie_readl_dbi(pci
, PCIE_LINK_WIDTH_SPEED_CONTROL
);
382 val
|= PORT_LOGIC_SPEED_CHANGE
;
383 dw_pcie_writel_dbi(pci
, PCIE_LINK_WIDTH_SPEED_CONTROL
, val
);
387 if (status_l0
& APPL_INTR_STATUS_L0_INT_INT
) {
388 status_l1
= appl_readl(pcie
, APPL_INTR_STATUS_L1_8_0
);
389 if (status_l1
& APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS
) {
391 APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS
,
392 APPL_INTR_STATUS_L1_8_0
);
393 apply_bad_link_workaround(pp
);
395 if (status_l1
& APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS
) {
396 val_w
= dw_pcie_readw_dbi(pci
, pcie
->pcie_cap_base
+
398 val_w
|= PCI_EXP_LNKSTA_LBMS
;
399 dw_pcie_writew_dbi(pci
, pcie
->pcie_cap_base
+
400 PCI_EXP_LNKSTA
, val_w
);
403 APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS
,
404 APPL_INTR_STATUS_L1_8_0
);
406 val_w
= dw_pcie_readw_dbi(pci
, pcie
->pcie_cap_base
+
408 dev_dbg(pci
->dev
, "Link Speed : Gen-%u\n", val_w
&
413 if (status_l0
& APPL_INTR_STATUS_L0_CDM_REG_CHK_INT
) {
414 status_l1
= appl_readl(pcie
, APPL_INTR_STATUS_L1_18
);
415 val
= dw_pcie_readl_dbi(pci
, PCIE_PL_CHK_REG_CONTROL_STATUS
);
416 if (status_l1
& APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT
) {
417 dev_info(pci
->dev
, "CDM check complete\n");
418 val
|= PCIE_PL_CHK_REG_CHK_REG_COMPLETE
;
420 if (status_l1
& APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR
) {
421 dev_err(pci
->dev
, "CDM comparison mismatch\n");
422 val
|= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR
;
424 if (status_l1
& APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR
) {
425 dev_err(pci
->dev
, "CDM Logic error\n");
426 val
|= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR
;
428 dw_pcie_writel_dbi(pci
, PCIE_PL_CHK_REG_CONTROL_STATUS
, val
);
429 val
= dw_pcie_readl_dbi(pci
, PCIE_PL_CHK_REG_ERR_ADDR
);
430 dev_err(pci
->dev
, "CDM Error Address Offset = 0x%08X\n", val
);
436 static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw
*pcie
)
440 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L0
);
441 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0
);
442 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1
);
443 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2
);
444 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3
);
445 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6
);
446 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7
);
447 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0
);
448 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9
);
449 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10
);
450 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11
);
451 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13
);
452 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14
);
453 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15
);
454 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17
);
455 appl_writel(pcie
, 0xFFFFFFFF, APPL_MSI_CTRL_2
);
457 val
= appl_readl(pcie
, APPL_CTRL
);
458 val
|= APPL_CTRL_LTSSM_EN
;
459 appl_writel(pcie
, val
, APPL_CTRL
);
462 static irqreturn_t
tegra_pcie_ep_irq_thread(int irq
, void *arg
)
464 struct tegra_pcie_dw
*pcie
= arg
;
465 struct dw_pcie_ep
*ep
= &pcie
->pci
.ep
;
466 struct dw_pcie
*pci
= &pcie
->pci
;
469 if (test_and_clear_bit(0, &pcie
->link_status
))
470 dw_pcie_ep_linkup(ep
);
472 tegra_pcie_icc_set(pcie
);
474 if (pcie
->of_data
->has_ltr_req_fix
)
477 /* If EP doesn't advertise L1SS, just return */
478 val
= dw_pcie_readl_dbi(pci
, pcie
->cfg_link_cap_l1sub
);
479 if (!(val
& (PCI_L1SS_CAP_ASPM_L1_1
| PCI_L1SS_CAP_ASPM_L1_2
)))
482 /* Check if BME is set to '1' */
483 val
= dw_pcie_readl_dbi(pci
, PCI_COMMAND
);
484 if (val
& PCI_COMMAND_MASTER
) {
487 /* 110us for both snoop and no-snoop */
488 val
= FIELD_PREP(PCI_LTR_VALUE_MASK
, 110) |
489 FIELD_PREP(PCI_LTR_SCALE_MASK
, 2) |
491 FIELD_PREP(PCI_LTR_NOSNOOP_VALUE
, 110) |
492 FIELD_PREP(PCI_LTR_NOSNOOP_SCALE
, 2) |
494 appl_writel(pcie
, val
, APPL_LTR_MSG_1
);
496 /* Send LTR upstream */
497 val
= appl_readl(pcie
, APPL_LTR_MSG_2
);
498 val
|= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE
;
499 appl_writel(pcie
, val
, APPL_LTR_MSG_2
);
501 timeout
= ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT
);
503 val
= appl_readl(pcie
, APPL_LTR_MSG_2
);
504 if (!(val
& APPL_LTR_MSG_2_LTR_MSG_REQ_STATE
))
506 if (ktime_after(ktime_get(), timeout
))
508 usleep_range(1000, 1100);
510 if (val
& APPL_LTR_MSG_2_LTR_MSG_REQ_STATE
)
511 dev_err(pcie
->dev
, "Failed to send LTR message\n");
517 static irqreturn_t
tegra_pcie_ep_hard_irq(int irq
, void *arg
)
519 struct tegra_pcie_dw
*pcie
= arg
;
521 u32 status_l0
, status_l1
, link_status
;
523 status_l0
= appl_readl(pcie
, APPL_INTR_STATUS_L0
);
524 if (status_l0
& APPL_INTR_STATUS_L0_LINK_STATE_INT
) {
525 status_l1
= appl_readl(pcie
, APPL_INTR_STATUS_L1_0_0
);
526 appl_writel(pcie
, status_l1
, APPL_INTR_STATUS_L1_0_0
);
528 if (status_l1
& APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE
)
529 pex_ep_event_hot_rst_done(pcie
);
531 if (status_l1
& APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED
) {
532 link_status
= appl_readl(pcie
, APPL_LINK_STATUS
);
533 if (link_status
& APPL_LINK_STATUS_RDLH_LINK_UP
) {
534 dev_dbg(pcie
->dev
, "Link is up with Host\n");
535 set_bit(0, &pcie
->link_status
);
536 return IRQ_WAKE_THREAD
;
543 if (status_l0
& APPL_INTR_STATUS_L0_PCI_CMD_EN_INT
) {
544 status_l1
= appl_readl(pcie
, APPL_INTR_STATUS_L1_15
);
545 appl_writel(pcie
, status_l1
, APPL_INTR_STATUS_L1_15
);
547 if (status_l1
& APPL_INTR_STATUS_L1_15_CFG_BME_CHGED
)
548 return IRQ_WAKE_THREAD
;
554 dev_warn(pcie
->dev
, "Random interrupt (STATUS = 0x%08X)\n",
556 appl_writel(pcie
, status_l0
, APPL_INTR_STATUS_L0
);
562 static int tegra_pcie_dw_rd_own_conf(struct pci_bus
*bus
, u32 devfn
, int where
,
565 struct dw_pcie_rp
*pp
= bus
->sysdata
;
566 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
567 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
570 * This is an endpoint mode specific register happen to appear even
571 * when controller is operating in root port mode and system hangs
572 * when it is accessed with link being in ASPM-L1 state.
573 * So skip accessing it altogether
575 if (!pcie
->of_data
->has_msix_doorbell_access_fix
&&
576 !PCI_SLOT(devfn
) && where
== PORT_LOGIC_MSIX_DOORBELL
) {
578 return PCIBIOS_SUCCESSFUL
;
581 return pci_generic_config_read(bus
, devfn
, where
, size
, val
);
584 static int tegra_pcie_dw_wr_own_conf(struct pci_bus
*bus
, u32 devfn
, int where
,
587 struct dw_pcie_rp
*pp
= bus
->sysdata
;
588 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
589 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
592 * This is an endpoint mode specific register happen to appear even
593 * when controller is operating in root port mode and system hangs
594 * when it is accessed with link being in ASPM-L1 state.
595 * So skip accessing it altogether
597 if (!pcie
->of_data
->has_msix_doorbell_access_fix
&&
598 !PCI_SLOT(devfn
) && where
== PORT_LOGIC_MSIX_DOORBELL
)
599 return PCIBIOS_SUCCESSFUL
;
601 return pci_generic_config_write(bus
, devfn
, where
, size
, val
);
604 static struct pci_ops tegra_pci_ops
= {
605 .map_bus
= dw_pcie_own_conf_map_bus
,
606 .read
= tegra_pcie_dw_rd_own_conf
,
607 .write
= tegra_pcie_dw_wr_own_conf
,
610 #if defined(CONFIG_PCIEASPM)
611 static void disable_aspm_l11(struct tegra_pcie_dw
*pcie
)
615 val
= dw_pcie_readl_dbi(&pcie
->pci
, pcie
->cfg_link_cap_l1sub
);
616 val
&= ~PCI_L1SS_CAP_ASPM_L1_1
;
617 dw_pcie_writel_dbi(&pcie
->pci
, pcie
->cfg_link_cap_l1sub
, val
);
620 static void disable_aspm_l12(struct tegra_pcie_dw
*pcie
)
624 val
= dw_pcie_readl_dbi(&pcie
->pci
, pcie
->cfg_link_cap_l1sub
);
625 val
&= ~PCI_L1SS_CAP_ASPM_L1_2
;
626 dw_pcie_writel_dbi(&pcie
->pci
, pcie
->cfg_link_cap_l1sub
, val
);
629 static inline u32
event_counter_prog(struct tegra_pcie_dw
*pcie
, u32 event
)
633 val
= dw_pcie_readl_dbi(&pcie
->pci
, pcie
->ras_des_cap
+
634 PCIE_RAS_DES_EVENT_COUNTER_CONTROL
);
635 val
&= ~(EVENT_COUNTER_EVENT_SEL_MASK
<< EVENT_COUNTER_EVENT_SEL_SHIFT
);
636 val
|= EVENT_COUNTER_GROUP_5
<< EVENT_COUNTER_GROUP_SEL_SHIFT
;
637 val
|= event
<< EVENT_COUNTER_EVENT_SEL_SHIFT
;
638 val
|= EVENT_COUNTER_ENABLE_ALL
<< EVENT_COUNTER_ENABLE_SHIFT
;
639 dw_pcie_writel_dbi(&pcie
->pci
, pcie
->ras_des_cap
+
640 PCIE_RAS_DES_EVENT_COUNTER_CONTROL
, val
);
641 val
= dw_pcie_readl_dbi(&pcie
->pci
, pcie
->ras_des_cap
+
642 PCIE_RAS_DES_EVENT_COUNTER_DATA
);
647 static int aspm_state_cnt(struct seq_file
*s
, void *data
)
649 struct tegra_pcie_dw
*pcie
= (struct tegra_pcie_dw
*)
650 dev_get_drvdata(s
->private);
653 seq_printf(s
, "Tx L0s entry count : %u\n",
654 event_counter_prog(pcie
, EVENT_COUNTER_EVENT_Tx_L0S
));
656 seq_printf(s
, "Rx L0s entry count : %u\n",
657 event_counter_prog(pcie
, EVENT_COUNTER_EVENT_Rx_L0S
));
659 seq_printf(s
, "Link L1 entry count : %u\n",
660 event_counter_prog(pcie
, EVENT_COUNTER_EVENT_L1
));
662 seq_printf(s
, "Link L1.1 entry count : %u\n",
663 event_counter_prog(pcie
, EVENT_COUNTER_EVENT_L1_1
));
665 seq_printf(s
, "Link L1.2 entry count : %u\n",
666 event_counter_prog(pcie
, EVENT_COUNTER_EVENT_L1_2
));
668 /* Clear all counters */
669 dw_pcie_writel_dbi(&pcie
->pci
, pcie
->ras_des_cap
+
670 PCIE_RAS_DES_EVENT_COUNTER_CONTROL
,
671 EVENT_COUNTER_ALL_CLEAR
);
673 /* Re-enable counting */
674 val
= EVENT_COUNTER_ENABLE_ALL
<< EVENT_COUNTER_ENABLE_SHIFT
;
675 val
|= EVENT_COUNTER_GROUP_5
<< EVENT_COUNTER_GROUP_SEL_SHIFT
;
676 dw_pcie_writel_dbi(&pcie
->pci
, pcie
->ras_des_cap
+
677 PCIE_RAS_DES_EVENT_COUNTER_CONTROL
, val
);
682 static void init_host_aspm(struct tegra_pcie_dw
*pcie
)
684 struct dw_pcie
*pci
= &pcie
->pci
;
687 val
= dw_pcie_find_ext_capability(pci
, PCI_EXT_CAP_ID_L1SS
);
688 pcie
->cfg_link_cap_l1sub
= val
+ PCI_L1SS_CAP
;
690 pcie
->ras_des_cap
= dw_pcie_find_ext_capability(&pcie
->pci
,
691 PCI_EXT_CAP_ID_VNDR
);
693 /* Enable ASPM counters */
694 val
= EVENT_COUNTER_ENABLE_ALL
<< EVENT_COUNTER_ENABLE_SHIFT
;
695 val
|= EVENT_COUNTER_GROUP_5
<< EVENT_COUNTER_GROUP_SEL_SHIFT
;
696 dw_pcie_writel_dbi(pci
, pcie
->ras_des_cap
+
697 PCIE_RAS_DES_EVENT_COUNTER_CONTROL
, val
);
699 /* Program T_cmrt and T_pwr_on values */
700 val
= dw_pcie_readl_dbi(pci
, pcie
->cfg_link_cap_l1sub
);
701 val
&= ~(PCI_L1SS_CAP_CM_RESTORE_TIME
| PCI_L1SS_CAP_P_PWR_ON_VALUE
);
702 val
|= (pcie
->aspm_cmrt
<< 8);
703 val
|= (pcie
->aspm_pwr_on_t
<< 19);
704 dw_pcie_writel_dbi(pci
, pcie
->cfg_link_cap_l1sub
, val
);
706 /* Program L0s and L1 entrance latencies */
707 val
= dw_pcie_readl_dbi(pci
, PCIE_PORT_AFR
);
708 val
&= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK
;
709 val
|= (pcie
->aspm_l0s_enter_lat
<< PORT_AFR_L0S_ENTRANCE_LAT_SHIFT
);
710 val
|= PORT_AFR_ENTER_ASPM
;
711 dw_pcie_writel_dbi(pci
, PCIE_PORT_AFR
, val
);
714 static void init_debugfs(struct tegra_pcie_dw
*pcie
)
716 debugfs_create_devm_seqfile(pcie
->dev
, "aspm_state_cnt", pcie
->debugfs
,
720 static inline void disable_aspm_l12(struct tegra_pcie_dw
*pcie
) { return; }
721 static inline void disable_aspm_l11(struct tegra_pcie_dw
*pcie
) { return; }
722 static inline void init_host_aspm(struct tegra_pcie_dw
*pcie
) { return; }
723 static inline void init_debugfs(struct tegra_pcie_dw
*pcie
) { return; }
726 static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp
*pp
)
728 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
729 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
733 val
= appl_readl(pcie
, APPL_INTR_EN_L0_0
);
734 val
|= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN
;
735 appl_writel(pcie
, val
, APPL_INTR_EN_L0_0
);
737 if (!pcie
->of_data
->has_sbr_reset_fix
) {
738 val
= appl_readl(pcie
, APPL_INTR_EN_L1_0_0
);
739 val
|= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN
;
740 appl_writel(pcie
, val
, APPL_INTR_EN_L1_0_0
);
743 if (pcie
->enable_cdm_check
) {
744 val
= appl_readl(pcie
, APPL_INTR_EN_L0_0
);
745 val
|= pcie
->of_data
->cdm_chk_int_en_bit
;
746 appl_writel(pcie
, val
, APPL_INTR_EN_L0_0
);
748 val
= appl_readl(pcie
, APPL_INTR_EN_L1_18
);
749 val
|= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR
;
750 val
|= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR
;
751 appl_writel(pcie
, val
, APPL_INTR_EN_L1_18
);
754 val_w
= dw_pcie_readw_dbi(&pcie
->pci
, pcie
->pcie_cap_base
+
756 pcie
->init_link_width
= FIELD_GET(PCI_EXP_LNKSTA_NLW
, val_w
);
758 val_w
= dw_pcie_readw_dbi(&pcie
->pci
, pcie
->pcie_cap_base
+
760 val_w
|= PCI_EXP_LNKCTL_LBMIE
;
761 dw_pcie_writew_dbi(&pcie
->pci
, pcie
->pcie_cap_base
+ PCI_EXP_LNKCTL
,
765 static void tegra_pcie_enable_intx_interrupts(struct dw_pcie_rp
*pp
)
767 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
768 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
771 /* Enable INTX interrupt generation */
772 val
= appl_readl(pcie
, APPL_INTR_EN_L0_0
);
773 val
|= APPL_INTR_EN_L0_0_SYS_INTR_EN
;
774 val
|= APPL_INTR_EN_L0_0_INT_INT_EN
;
775 appl_writel(pcie
, val
, APPL_INTR_EN_L0_0
);
777 val
= appl_readl(pcie
, APPL_INTR_EN_L1_8_0
);
778 val
|= APPL_INTR_EN_L1_8_INTX_EN
;
779 val
|= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN
;
780 val
|= APPL_INTR_EN_L1_8_BW_MGT_INT_EN
;
781 if (IS_ENABLED(CONFIG_PCIEAER
))
782 val
|= APPL_INTR_EN_L1_8_AER_INT_EN
;
783 appl_writel(pcie
, val
, APPL_INTR_EN_L1_8_0
);
786 static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp
*pp
)
788 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
789 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
792 /* Enable MSI interrupt generation */
793 val
= appl_readl(pcie
, APPL_INTR_EN_L0_0
);
794 val
|= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN
;
795 val
|= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN
;
796 appl_writel(pcie
, val
, APPL_INTR_EN_L0_0
);
799 static void tegra_pcie_enable_interrupts(struct dw_pcie_rp
*pp
)
801 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
802 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
804 /* Clear interrupt statuses before enabling interrupts */
805 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L0
);
806 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0
);
807 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1
);
808 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2
);
809 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3
);
810 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6
);
811 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7
);
812 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0
);
813 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9
);
814 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10
);
815 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11
);
816 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13
);
817 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14
);
818 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15
);
819 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17
);
821 tegra_pcie_enable_system_interrupts(pp
);
822 tegra_pcie_enable_intx_interrupts(pp
);
823 if (IS_ENABLED(CONFIG_PCI_MSI
))
824 tegra_pcie_enable_msi_interrupts(pp
);
827 static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw
*pcie
)
829 struct dw_pcie
*pci
= &pcie
->pci
;
832 /* Program init preset */
833 for (i
= 0; i
< pcie
->num_lanes
; i
++) {
834 val
= dw_pcie_readw_dbi(pci
, CAP_SPCIE_CAP_OFF
+ (i
* 2));
835 val
&= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK
;
836 val
|= GEN3_GEN4_EQ_PRESET_INIT
;
837 val
&= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK
;
838 val
|= (GEN3_GEN4_EQ_PRESET_INIT
<<
839 CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT
);
840 dw_pcie_writew_dbi(pci
, CAP_SPCIE_CAP_OFF
+ (i
* 2), val
);
842 offset
= dw_pcie_find_ext_capability(pci
,
843 PCI_EXT_CAP_ID_PL_16GT
) +
845 val
= dw_pcie_readb_dbi(pci
, offset
+ i
);
846 val
&= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK
;
847 val
|= GEN3_GEN4_EQ_PRESET_INIT
;
848 val
&= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK
;
849 val
|= (GEN3_GEN4_EQ_PRESET_INIT
<<
850 PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT
);
851 dw_pcie_writeb_dbi(pci
, offset
+ i
, val
);
854 val
= dw_pcie_readl_dbi(pci
, GEN3_RELATED_OFF
);
855 val
&= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK
;
856 dw_pcie_writel_dbi(pci
, GEN3_RELATED_OFF
, val
);
858 val
= dw_pcie_readl_dbi(pci
, GEN3_EQ_CONTROL_OFF
);
859 val
&= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC
;
860 val
|= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC
, 0x3ff);
861 val
&= ~GEN3_EQ_CONTROL_OFF_FB_MODE
;
862 dw_pcie_writel_dbi(pci
, GEN3_EQ_CONTROL_OFF
, val
);
864 val
= dw_pcie_readl_dbi(pci
, GEN3_RELATED_OFF
);
865 val
&= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK
;
866 val
|= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT
);
867 dw_pcie_writel_dbi(pci
, GEN3_RELATED_OFF
, val
);
869 val
= dw_pcie_readl_dbi(pci
, GEN3_EQ_CONTROL_OFF
);
870 val
&= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC
;
871 val
|= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC
,
872 pcie
->of_data
->gen4_preset_vec
);
873 val
&= ~GEN3_EQ_CONTROL_OFF_FB_MODE
;
874 dw_pcie_writel_dbi(pci
, GEN3_EQ_CONTROL_OFF
, val
);
876 val
= dw_pcie_readl_dbi(pci
, GEN3_RELATED_OFF
);
877 val
&= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK
;
878 dw_pcie_writel_dbi(pci
, GEN3_RELATED_OFF
, val
);
881 static int tegra_pcie_dw_host_init(struct dw_pcie_rp
*pp
)
883 struct dw_pcie
*pci
= to_dw_pcie_from_pp(pp
);
884 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
888 pp
->bridge
->ops
= &tegra_pci_ops
;
890 if (!pcie
->pcie_cap_base
)
891 pcie
->pcie_cap_base
= dw_pcie_find_capability(&pcie
->pci
,
894 val
= dw_pcie_readl_dbi(pci
, PCI_IO_BASE
);
895 val
&= ~(IO_BASE_IO_DECODE
| IO_BASE_IO_DECODE_BIT8
);
896 dw_pcie_writel_dbi(pci
, PCI_IO_BASE
, val
);
898 val
= dw_pcie_readl_dbi(pci
, PCI_PREF_MEMORY_BASE
);
899 val
|= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE
;
900 val
|= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE
;
901 dw_pcie_writel_dbi(pci
, PCI_PREF_MEMORY_BASE
, val
);
903 dw_pcie_writel_dbi(pci
, PCI_BASE_ADDRESS_0
, 0);
905 /* Enable as 0xFFFF0001 response for RRS */
906 val
= dw_pcie_readl_dbi(pci
, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT
);
907 val
&= ~(AMBA_ERROR_RESPONSE_RRS_MASK
<< AMBA_ERROR_RESPONSE_RRS_SHIFT
);
908 val
|= (AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001
<<
909 AMBA_ERROR_RESPONSE_RRS_SHIFT
);
910 dw_pcie_writel_dbi(pci
, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT
, val
);
912 /* Clear Slot Clock Configuration bit if SRNS configuration */
913 if (pcie
->enable_srns
) {
914 val_16
= dw_pcie_readw_dbi(pci
, pcie
->pcie_cap_base
+
916 val_16
&= ~PCI_EXP_LNKSTA_SLC
;
917 dw_pcie_writew_dbi(pci
, pcie
->pcie_cap_base
+ PCI_EXP_LNKSTA
,
921 config_gen3_gen4_eq_presets(pcie
);
923 init_host_aspm(pcie
);
925 /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
926 if (!pcie
->supports_clkreq
) {
927 disable_aspm_l11(pcie
);
928 disable_aspm_l12(pcie
);
931 if (!pcie
->of_data
->has_l1ss_exit_fix
) {
932 val
= dw_pcie_readl_dbi(pci
, GEN3_RELATED_OFF
);
933 val
&= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL
;
934 dw_pcie_writel_dbi(pci
, GEN3_RELATED_OFF
, val
);
937 if (pcie
->update_fc_fixup
) {
938 val
= dw_pcie_readl_dbi(pci
, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF
);
939 val
|= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT
;
940 dw_pcie_writel_dbi(pci
, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF
, val
);
943 clk_set_rate(pcie
->core_clk
, GEN4_CORE_CLK_FREQ
);
948 static int tegra_pcie_dw_start_link(struct dw_pcie
*pci
)
950 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
951 struct dw_pcie_rp
*pp
= &pci
->pp
;
952 u32 val
, offset
, tmp
;
955 if (pcie
->of_data
->mode
== DW_PCIE_EP_TYPE
) {
956 enable_irq(pcie
->pex_rst_irq
);
962 val
= appl_readl(pcie
, APPL_PINMUX
);
963 val
&= ~APPL_PINMUX_PEX_RST
;
964 appl_writel(pcie
, val
, APPL_PINMUX
);
966 usleep_range(100, 200);
969 val
= appl_readl(pcie
, APPL_CTRL
);
970 val
|= APPL_CTRL_LTSSM_EN
;
971 appl_writel(pcie
, val
, APPL_CTRL
);
974 val
= appl_readl(pcie
, APPL_PINMUX
);
975 val
|= APPL_PINMUX_PEX_RST
;
976 appl_writel(pcie
, val
, APPL_PINMUX
);
980 if (dw_pcie_wait_for_link(pci
)) {
984 * There are some endpoints which can't get the link up if
985 * root port has Data Link Feature (DLF) enabled.
986 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
987 * on Scaled Flow Control and DLF.
988 * So, need to confirm that is indeed the case here and attempt
989 * link up once again with DLF disabled.
991 val
= appl_readl(pcie
, APPL_DEBUG
);
992 val
&= APPL_DEBUG_LTSSM_STATE_MASK
;
993 val
>>= APPL_DEBUG_LTSSM_STATE_SHIFT
;
994 tmp
= appl_readl(pcie
, APPL_LINK_STATUS
);
995 tmp
&= APPL_LINK_STATUS_RDLH_LINK_UP
;
996 if (!(val
== 0x11 && !tmp
)) {
997 /* Link is down for all good reasons */
1001 dev_info(pci
->dev
, "Link is down in DLL");
1002 dev_info(pci
->dev
, "Trying again with DLFE disabled\n");
1004 val
= appl_readl(pcie
, APPL_CTRL
);
1005 val
&= ~APPL_CTRL_LTSSM_EN
;
1006 appl_writel(pcie
, val
, APPL_CTRL
);
1008 reset_control_assert(pcie
->core_rst
);
1009 reset_control_deassert(pcie
->core_rst
);
1011 offset
= dw_pcie_find_ext_capability(pci
, PCI_EXT_CAP_ID_DLF
);
1012 val
= dw_pcie_readl_dbi(pci
, offset
+ PCI_DLF_CAP
);
1013 val
&= ~PCI_DLF_EXCHANGE_ENABLE
;
1014 dw_pcie_writel_dbi(pci
, offset
+ PCI_DLF_CAP
, val
);
1016 tegra_pcie_dw_host_init(pp
);
1017 dw_pcie_setup_rc(pp
);
1023 tegra_pcie_icc_set(pcie
);
1025 tegra_pcie_enable_interrupts(pp
);
1030 static int tegra_pcie_dw_link_up(struct dw_pcie
*pci
)
1032 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
1033 u32 val
= dw_pcie_readw_dbi(pci
, pcie
->pcie_cap_base
+ PCI_EXP_LNKSTA
);
1035 return !!(val
& PCI_EXP_LNKSTA_DLLLA
);
1038 static void tegra_pcie_dw_stop_link(struct dw_pcie
*pci
)
1040 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
1042 disable_irq(pcie
->pex_rst_irq
);
1045 static const struct dw_pcie_ops tegra_dw_pcie_ops
= {
1046 .link_up
= tegra_pcie_dw_link_up
,
1047 .start_link
= tegra_pcie_dw_start_link
,
1048 .stop_link
= tegra_pcie_dw_stop_link
,
1051 static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops
= {
1052 .init
= tegra_pcie_dw_host_init
,
1055 static void tegra_pcie_disable_phy(struct tegra_pcie_dw
*pcie
)
1057 unsigned int phy_count
= pcie
->phy_count
;
1059 while (phy_count
--) {
1060 phy_power_off(pcie
->phys
[phy_count
]);
1061 phy_exit(pcie
->phys
[phy_count
]);
1065 static int tegra_pcie_enable_phy(struct tegra_pcie_dw
*pcie
)
1070 for (i
= 0; i
< pcie
->phy_count
; i
++) {
1071 ret
= phy_init(pcie
->phys
[i
]);
1075 ret
= phy_power_on(pcie
->phys
[i
]);
1084 phy_power_off(pcie
->phys
[i
]);
1086 phy_exit(pcie
->phys
[i
]);
1092 static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw
*pcie
)
1094 struct platform_device
*pdev
= to_platform_device(pcie
->dev
);
1095 struct device_node
*np
= pcie
->dev
->of_node
;
1098 pcie
->dbi_res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "dbi");
1099 if (!pcie
->dbi_res
) {
1100 dev_err(pcie
->dev
, "Failed to find \"dbi\" region\n");
1104 ret
= of_property_read_u32(np
, "nvidia,aspm-cmrt-us", &pcie
->aspm_cmrt
);
1106 dev_info(pcie
->dev
, "Failed to read ASPM T_cmrt: %d\n", ret
);
1110 ret
= of_property_read_u32(np
, "nvidia,aspm-pwr-on-t-us",
1111 &pcie
->aspm_pwr_on_t
);
1113 dev_info(pcie
->dev
, "Failed to read ASPM Power On time: %d\n",
1116 ret
= of_property_read_u32(np
, "nvidia,aspm-l0s-entrance-latency-us",
1117 &pcie
->aspm_l0s_enter_lat
);
1120 "Failed to read ASPM L0s Entrance latency: %d\n", ret
);
1122 ret
= of_property_read_u32(np
, "num-lanes", &pcie
->num_lanes
);
1124 dev_err(pcie
->dev
, "Failed to read num-lanes: %d\n", ret
);
1128 ret
= of_property_read_u32_index(np
, "nvidia,bpmp", 1, &pcie
->cid
);
1130 dev_err(pcie
->dev
, "Failed to read Controller-ID: %d\n", ret
);
1134 ret
= of_property_count_strings(np
, "phy-names");
1136 dev_err(pcie
->dev
, "Failed to find PHY entries: %d\n",
1140 pcie
->phy_count
= ret
;
1142 if (of_property_read_bool(np
, "nvidia,update-fc-fixup"))
1143 pcie
->update_fc_fixup
= true;
1145 /* RP using an external REFCLK is supported only in Tegra234 */
1146 if (pcie
->of_data
->version
== TEGRA194_DWC_IP_VER
) {
1147 if (pcie
->of_data
->mode
== DW_PCIE_EP_TYPE
)
1148 pcie
->enable_ext_refclk
= true;
1150 pcie
->enable_ext_refclk
=
1151 of_property_read_bool(pcie
->dev
->of_node
,
1152 "nvidia,enable-ext-refclk");
1155 pcie
->supports_clkreq
=
1156 of_property_read_bool(pcie
->dev
->of_node
, "supports-clkreq");
1158 pcie
->enable_cdm_check
=
1159 of_property_read_bool(np
, "snps,enable-cdm-check");
1161 if (pcie
->of_data
->version
== TEGRA234_DWC_IP_VER
)
1163 of_property_read_bool(np
, "nvidia,enable-srns");
1165 if (pcie
->of_data
->mode
== DW_PCIE_RC_TYPE
)
1168 /* Endpoint mode specific DT entries */
1169 pcie
->pex_rst_gpiod
= devm_gpiod_get(pcie
->dev
, "reset", GPIOD_IN
);
1170 if (IS_ERR(pcie
->pex_rst_gpiod
)) {
1171 int err
= PTR_ERR(pcie
->pex_rst_gpiod
);
1172 const char *level
= KERN_ERR
;
1174 if (err
== -EPROBE_DEFER
)
1177 dev_printk(level
, pcie
->dev
,
1178 dev_fmt("Failed to get PERST GPIO: %d\n"),
1183 pcie
->pex_refclk_sel_gpiod
= devm_gpiod_get(pcie
->dev
,
1184 "nvidia,refclk-select",
1186 if (IS_ERR(pcie
->pex_refclk_sel_gpiod
)) {
1187 int err
= PTR_ERR(pcie
->pex_refclk_sel_gpiod
);
1188 const char *level
= KERN_ERR
;
1190 if (err
== -EPROBE_DEFER
)
1193 dev_printk(level
, pcie
->dev
,
1194 dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
1196 pcie
->pex_refclk_sel_gpiod
= NULL
;
1202 static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw
*pcie
,
1205 struct mrq_uphy_response resp
;
1206 struct tegra_bpmp_message msg
;
1207 struct mrq_uphy_request req
;
1210 * Controller-5 doesn't need to have its state set by BPMP-FW in
1213 if (pcie
->of_data
->version
== TEGRA194_DWC_IP_VER
&& pcie
->cid
== 5)
1216 memset(&req
, 0, sizeof(req
));
1217 memset(&resp
, 0, sizeof(resp
));
1219 req
.cmd
= CMD_UPHY_PCIE_CONTROLLER_STATE
;
1220 req
.controller_state
.pcie_controller
= pcie
->cid
;
1221 req
.controller_state
.enable
= enable
;
1223 memset(&msg
, 0, sizeof(msg
));
1226 msg
.tx
.size
= sizeof(req
);
1227 msg
.rx
.data
= &resp
;
1228 msg
.rx
.size
= sizeof(resp
);
1230 return tegra_bpmp_transfer(pcie
->bpmp
, &msg
);
1233 static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw
*pcie
,
1236 struct mrq_uphy_response resp
;
1237 struct tegra_bpmp_message msg
;
1238 struct mrq_uphy_request req
;
1240 memset(&req
, 0, sizeof(req
));
1241 memset(&resp
, 0, sizeof(resp
));
1244 req
.cmd
= CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT
;
1245 req
.ep_ctrlr_pll_init
.ep_controller
= pcie
->cid
;
1247 req
.cmd
= CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF
;
1248 req
.ep_ctrlr_pll_off
.ep_controller
= pcie
->cid
;
1251 memset(&msg
, 0, sizeof(msg
));
1254 msg
.tx
.size
= sizeof(req
);
1255 msg
.rx
.data
= &resp
;
1256 msg
.rx
.size
= sizeof(resp
);
1258 return tegra_bpmp_transfer(pcie
->bpmp
, &msg
);
1261 static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw
*pcie
)
1263 struct dw_pcie_rp
*pp
= &pcie
->pci
.pp
;
1264 struct pci_bus
*child
, *root_bus
= NULL
;
1265 struct pci_dev
*pdev
;
1268 * link doesn't go into L2 state with some of the endpoints with Tegra
1269 * if they are not in D0 state. So, need to make sure that immediate
1270 * downstream devices are in D0 state before sending PME_TurnOff to put
1271 * link into L2 state.
1272 * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
1273 * 5.2 Link State Power Management (Page #428).
1276 list_for_each_entry(child
, &pp
->bridge
->bus
->children
, node
) {
1277 /* Bring downstream devices to D0 if they are not already in */
1278 if (child
->parent
== pp
->bridge
->bus
) {
1285 dev_err(pcie
->dev
, "Failed to find downstream devices\n");
1289 list_for_each_entry(pdev
, &root_bus
->devices
, bus_list
) {
1290 if (PCI_SLOT(pdev
->devfn
) == 0) {
1291 if (pci_set_power_state(pdev
, PCI_D0
))
1293 "Failed to transition %s to D0 state\n",
1294 dev_name(&pdev
->dev
));
1299 static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw
*pcie
)
1301 pcie
->slot_ctl_3v3
= devm_regulator_get_optional(pcie
->dev
, "vpcie3v3");
1302 if (IS_ERR(pcie
->slot_ctl_3v3
)) {
1303 if (PTR_ERR(pcie
->slot_ctl_3v3
) != -ENODEV
)
1304 return PTR_ERR(pcie
->slot_ctl_3v3
);
1306 pcie
->slot_ctl_3v3
= NULL
;
1309 pcie
->slot_ctl_12v
= devm_regulator_get_optional(pcie
->dev
, "vpcie12v");
1310 if (IS_ERR(pcie
->slot_ctl_12v
)) {
1311 if (PTR_ERR(pcie
->slot_ctl_12v
) != -ENODEV
)
1312 return PTR_ERR(pcie
->slot_ctl_12v
);
1314 pcie
->slot_ctl_12v
= NULL
;
1320 static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw
*pcie
)
1324 if (pcie
->slot_ctl_3v3
) {
1325 ret
= regulator_enable(pcie
->slot_ctl_3v3
);
1328 "Failed to enable 3.3V slot supply: %d\n", ret
);
1333 if (pcie
->slot_ctl_12v
) {
1334 ret
= regulator_enable(pcie
->slot_ctl_12v
);
1337 "Failed to enable 12V slot supply: %d\n", ret
);
1338 goto fail_12v_enable
;
1343 * According to PCI Express Card Electromechanical Specification
1344 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
1345 * should be a minimum of 100ms.
1347 if (pcie
->slot_ctl_3v3
|| pcie
->slot_ctl_12v
)
1353 if (pcie
->slot_ctl_3v3
)
1354 regulator_disable(pcie
->slot_ctl_3v3
);
1358 static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw
*pcie
)
1360 if (pcie
->slot_ctl_12v
)
1361 regulator_disable(pcie
->slot_ctl_12v
);
1362 if (pcie
->slot_ctl_3v3
)
1363 regulator_disable(pcie
->slot_ctl_3v3
);
1366 static int tegra_pcie_config_controller(struct tegra_pcie_dw
*pcie
,
1372 ret
= tegra_pcie_bpmp_set_ctrl_state(pcie
, true);
1375 "Failed to enable controller %u: %d\n", pcie
->cid
, ret
);
1379 if (pcie
->enable_ext_refclk
) {
1380 ret
= tegra_pcie_bpmp_set_pll_state(pcie
, true);
1382 dev_err(pcie
->dev
, "Failed to init UPHY: %d\n", ret
);
1387 ret
= tegra_pcie_enable_slot_regulators(pcie
);
1389 goto fail_slot_reg_en
;
1391 ret
= regulator_enable(pcie
->pex_ctl_supply
);
1393 dev_err(pcie
->dev
, "Failed to enable regulator: %d\n", ret
);
1397 ret
= clk_prepare_enable(pcie
->core_clk
);
1399 dev_err(pcie
->dev
, "Failed to enable core clock: %d\n", ret
);
1403 ret
= reset_control_deassert(pcie
->core_apb_rst
);
1405 dev_err(pcie
->dev
, "Failed to deassert core APB reset: %d\n",
1407 goto fail_core_apb_rst
;
1410 if (en_hw_hot_rst
|| pcie
->of_data
->has_sbr_reset_fix
) {
1411 /* Enable HW_HOT_RST mode */
1412 val
= appl_readl(pcie
, APPL_CTRL
);
1413 val
&= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK
<<
1414 APPL_CTRL_HW_HOT_RST_MODE_SHIFT
);
1415 val
|= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN
<<
1416 APPL_CTRL_HW_HOT_RST_MODE_SHIFT
);
1417 val
|= APPL_CTRL_HW_HOT_RST_EN
;
1418 appl_writel(pcie
, val
, APPL_CTRL
);
1421 ret
= tegra_pcie_enable_phy(pcie
);
1423 dev_err(pcie
->dev
, "Failed to enable PHY: %d\n", ret
);
1427 /* Update CFG base address */
1428 appl_writel(pcie
, pcie
->dbi_res
->start
& APPL_CFG_BASE_ADDR_MASK
,
1429 APPL_CFG_BASE_ADDR
);
1431 /* Configure this core for RP mode operation */
1432 appl_writel(pcie
, APPL_DM_TYPE_RP
, APPL_DM_TYPE
);
1434 appl_writel(pcie
, 0x0, APPL_CFG_SLCG_OVERRIDE
);
1436 val
= appl_readl(pcie
, APPL_CTRL
);
1437 appl_writel(pcie
, val
| APPL_CTRL_SYS_PRE_DET_STATE
, APPL_CTRL
);
1439 val
= appl_readl(pcie
, APPL_CFG_MISC
);
1440 val
|= (APPL_CFG_MISC_ARCACHE_VAL
<< APPL_CFG_MISC_ARCACHE_SHIFT
);
1441 appl_writel(pcie
, val
, APPL_CFG_MISC
);
1443 if (pcie
->enable_srns
|| pcie
->enable_ext_refclk
) {
1445 * When Tegra PCIe RP is using external clock, it cannot supply
1446 * same clock to its downstream hierarchy. Hence, gate PCIe RP
1447 * REFCLK out pads when RP & EP are using separate clocks or RP
1448 * is using an external REFCLK.
1450 val
= appl_readl(pcie
, APPL_PINMUX
);
1451 val
|= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN
;
1452 val
&= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE
;
1453 appl_writel(pcie
, val
, APPL_PINMUX
);
1456 if (!pcie
->supports_clkreq
) {
1457 val
= appl_readl(pcie
, APPL_PINMUX
);
1458 val
|= APPL_PINMUX_CLKREQ_OVERRIDE_EN
;
1459 val
&= ~APPL_PINMUX_CLKREQ_OVERRIDE
;
1460 appl_writel(pcie
, val
, APPL_PINMUX
);
1463 /* Update iATU_DMA base address */
1465 pcie
->atu_dma_res
->start
& APPL_CFG_IATU_DMA_BASE_ADDR_MASK
,
1466 APPL_CFG_IATU_DMA_BASE_ADDR
);
1468 reset_control_deassert(pcie
->core_rst
);
1473 reset_control_assert(pcie
->core_apb_rst
);
1475 clk_disable_unprepare(pcie
->core_clk
);
1477 regulator_disable(pcie
->pex_ctl_supply
);
1479 tegra_pcie_disable_slot_regulators(pcie
);
1481 if (pcie
->enable_ext_refclk
)
1482 tegra_pcie_bpmp_set_pll_state(pcie
, false);
1484 tegra_pcie_bpmp_set_ctrl_state(pcie
, false);
1489 static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw
*pcie
)
1493 ret
= reset_control_assert(pcie
->core_rst
);
1495 dev_err(pcie
->dev
, "Failed to assert \"core\" reset: %d\n", ret
);
1497 tegra_pcie_disable_phy(pcie
);
1499 ret
= reset_control_assert(pcie
->core_apb_rst
);
1501 dev_err(pcie
->dev
, "Failed to assert APB reset: %d\n", ret
);
1503 clk_disable_unprepare(pcie
->core_clk
);
1505 ret
= regulator_disable(pcie
->pex_ctl_supply
);
1507 dev_err(pcie
->dev
, "Failed to disable regulator: %d\n", ret
);
1509 tegra_pcie_disable_slot_regulators(pcie
);
1511 if (pcie
->enable_ext_refclk
) {
1512 ret
= tegra_pcie_bpmp_set_pll_state(pcie
, false);
1514 dev_err(pcie
->dev
, "Failed to deinit UPHY: %d\n", ret
);
1517 ret
= tegra_pcie_bpmp_set_ctrl_state(pcie
, false);
1519 dev_err(pcie
->dev
, "Failed to disable controller %d: %d\n",
1523 static int tegra_pcie_init_controller(struct tegra_pcie_dw
*pcie
)
1525 struct dw_pcie
*pci
= &pcie
->pci
;
1526 struct dw_pcie_rp
*pp
= &pci
->pp
;
1529 ret
= tegra_pcie_config_controller(pcie
, false);
1533 pp
->ops
= &tegra_pcie_dw_host_ops
;
1535 ret
= dw_pcie_host_init(pp
);
1537 dev_err(pcie
->dev
, "Failed to add PCIe port: %d\n", ret
);
1538 goto fail_host_init
;
1544 tegra_pcie_unconfig_controller(pcie
);
1548 static int tegra_pcie_try_link_l2(struct tegra_pcie_dw
*pcie
)
1552 if (!tegra_pcie_dw_link_up(&pcie
->pci
))
1555 val
= appl_readl(pcie
, APPL_RADM_STATUS
);
1556 val
|= APPL_PM_XMT_TURNOFF_STATE
;
1557 appl_writel(pcie
, val
, APPL_RADM_STATUS
);
1559 return readl_poll_timeout_atomic(pcie
->appl_base
+ APPL_DEBUG
, val
,
1560 val
& APPL_DEBUG_PM_LINKST_IN_L2_LAT
,
1561 1, PME_ACK_TIMEOUT
);
1564 static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw
*pcie
)
1569 if (!tegra_pcie_dw_link_up(&pcie
->pci
)) {
1570 dev_dbg(pcie
->dev
, "PCIe link is not up...!\n");
1575 * PCIe controller exits from L2 only if reset is applied, so
1576 * controller doesn't handle interrupts. But in cases where
1577 * L2 entry fails, PERST# is asserted which can trigger surprise
1578 * link down AER. However this function call happens in
1579 * suspend_noirq(), so AER interrupt will not be processed.
1580 * Disable all interrupts to avoid such a scenario.
1582 appl_writel(pcie
, 0x0, APPL_INTR_EN_L0_0
);
1584 if (tegra_pcie_try_link_l2(pcie
)) {
1585 dev_info(pcie
->dev
, "Link didn't transition to L2 state\n");
1587 * TX lane clock freq will reset to Gen1 only if link is in L2
1589 * So apply pex_rst to end point to force RP to go into detect
1592 data
= appl_readl(pcie
, APPL_PINMUX
);
1593 data
&= ~APPL_PINMUX_PEX_RST
;
1594 appl_writel(pcie
, data
, APPL_PINMUX
);
1597 * Some cards do not go to detect state even after de-asserting
1598 * PERST#. So, de-assert LTSSM to bring link to detect state.
1600 data
= readl(pcie
->appl_base
+ APPL_CTRL
);
1601 data
&= ~APPL_CTRL_LTSSM_EN
;
1602 writel(data
, pcie
->appl_base
+ APPL_CTRL
);
1604 err
= readl_poll_timeout_atomic(pcie
->appl_base
+ APPL_DEBUG
,
1607 APPL_DEBUG_LTSSM_STATE_MASK
) >>
1608 APPL_DEBUG_LTSSM_STATE_SHIFT
) ==
1609 LTSSM_STATE_PRE_DETECT
,
1612 dev_info(pcie
->dev
, "Link didn't go to detect state\n");
1615 * DBI registers may not be accessible after this as PLL-E would be
1616 * down depending on how CLKREQ is pulled by end point
1618 data
= appl_readl(pcie
, APPL_PINMUX
);
1619 data
|= (APPL_PINMUX_CLKREQ_OVERRIDE_EN
| APPL_PINMUX_CLKREQ_OVERRIDE
);
1620 /* Cut REFCLK to slot */
1621 data
|= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN
;
1622 data
&= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE
;
1623 appl_writel(pcie
, data
, APPL_PINMUX
);
1626 static void tegra_pcie_deinit_controller(struct tegra_pcie_dw
*pcie
)
1628 tegra_pcie_downstream_dev_to_D0(pcie
);
1629 dw_pcie_host_deinit(&pcie
->pci
.pp
);
1630 tegra_pcie_dw_pme_turnoff(pcie
);
1631 tegra_pcie_unconfig_controller(pcie
);
1634 static int tegra_pcie_config_rp(struct tegra_pcie_dw
*pcie
)
1636 struct device
*dev
= pcie
->dev
;
1640 pm_runtime_enable(dev
);
1642 ret
= pm_runtime_get_sync(dev
);
1644 dev_err(dev
, "Failed to get runtime sync for PCIe dev: %d\n",
1646 goto fail_pm_get_sync
;
1649 ret
= pinctrl_pm_select_default_state(dev
);
1651 dev_err(dev
, "Failed to configure sideband pins: %d\n", ret
);
1652 goto fail_pm_get_sync
;
1655 ret
= tegra_pcie_init_controller(pcie
);
1657 dev_err(dev
, "Failed to initialize controller: %d\n", ret
);
1658 goto fail_pm_get_sync
;
1661 pcie
->link_state
= tegra_pcie_dw_link_up(&pcie
->pci
);
1662 if (!pcie
->link_state
) {
1664 goto fail_host_init
;
1667 name
= devm_kasprintf(dev
, GFP_KERNEL
, "%pOFP", dev
->of_node
);
1670 goto fail_host_init
;
1673 pcie
->debugfs
= debugfs_create_dir(name
, NULL
);
1679 tegra_pcie_deinit_controller(pcie
);
1681 pm_runtime_put_sync(dev
);
1682 pm_runtime_disable(dev
);
1686 static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw
*pcie
)
1691 if (pcie
->ep_state
== EP_STATE_DISABLED
)
1695 val
= appl_readl(pcie
, APPL_CTRL
);
1696 val
&= ~APPL_CTRL_LTSSM_EN
;
1697 appl_writel(pcie
, val
, APPL_CTRL
);
1699 ret
= readl_poll_timeout(pcie
->appl_base
+ APPL_DEBUG
, val
,
1700 ((val
& APPL_DEBUG_LTSSM_STATE_MASK
) >>
1701 APPL_DEBUG_LTSSM_STATE_SHIFT
) ==
1702 LTSSM_STATE_PRE_DETECT
,
1705 dev_err(pcie
->dev
, "Failed to go Detect state: %d\n", ret
);
1707 reset_control_assert(pcie
->core_rst
);
1709 tegra_pcie_disable_phy(pcie
);
1711 reset_control_assert(pcie
->core_apb_rst
);
1713 clk_disable_unprepare(pcie
->core_clk
);
1715 pm_runtime_put_sync(pcie
->dev
);
1717 if (pcie
->enable_ext_refclk
) {
1718 ret
= tegra_pcie_bpmp_set_pll_state(pcie
, false);
1720 dev_err(pcie
->dev
, "Failed to turn off UPHY: %d\n",
1724 ret
= tegra_pcie_bpmp_set_pll_state(pcie
, false);
1726 dev_err(pcie
->dev
, "Failed to turn off UPHY: %d\n", ret
);
1728 pcie
->ep_state
= EP_STATE_DISABLED
;
1729 dev_dbg(pcie
->dev
, "Uninitialization of endpoint is completed\n");
1732 static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw
*pcie
)
1734 struct dw_pcie
*pci
= &pcie
->pci
;
1735 struct dw_pcie_ep
*ep
= &pci
->ep
;
1736 struct device
*dev
= pcie
->dev
;
1741 if (pcie
->ep_state
== EP_STATE_ENABLED
)
1744 ret
= pm_runtime_resume_and_get(dev
);
1746 dev_err(dev
, "Failed to get runtime sync for PCIe dev: %d\n",
1751 ret
= tegra_pcie_bpmp_set_ctrl_state(pcie
, true);
1753 dev_err(pcie
->dev
, "Failed to enable controller %u: %d\n",
1755 goto fail_set_ctrl_state
;
1758 if (pcie
->enable_ext_refclk
) {
1759 ret
= tegra_pcie_bpmp_set_pll_state(pcie
, true);
1761 dev_err(dev
, "Failed to init UPHY for PCIe EP: %d\n",
1767 ret
= clk_prepare_enable(pcie
->core_clk
);
1769 dev_err(dev
, "Failed to enable core clock: %d\n", ret
);
1770 goto fail_core_clk_enable
;
1773 ret
= reset_control_deassert(pcie
->core_apb_rst
);
1775 dev_err(dev
, "Failed to deassert core APB reset: %d\n", ret
);
1776 goto fail_core_apb_rst
;
1779 ret
= tegra_pcie_enable_phy(pcie
);
1781 dev_err(dev
, "Failed to enable PHY: %d\n", ret
);
1785 /* Perform cleanup that requires refclk */
1786 pci_epc_deinit_notify(pcie
->pci
.ep
.epc
);
1787 dw_pcie_ep_cleanup(&pcie
->pci
.ep
);
1789 /* Clear any stale interrupt statuses */
1790 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L0
);
1791 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0
);
1792 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1
);
1793 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2
);
1794 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3
);
1795 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6
);
1796 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7
);
1797 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0
);
1798 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9
);
1799 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10
);
1800 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11
);
1801 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13
);
1802 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14
);
1803 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15
);
1804 appl_writel(pcie
, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17
);
1806 /* configure this core for EP mode operation */
1807 val
= appl_readl(pcie
, APPL_DM_TYPE
);
1808 val
&= ~APPL_DM_TYPE_MASK
;
1809 val
|= APPL_DM_TYPE_EP
;
1810 appl_writel(pcie
, val
, APPL_DM_TYPE
);
1812 appl_writel(pcie
, 0x0, APPL_CFG_SLCG_OVERRIDE
);
1814 val
= appl_readl(pcie
, APPL_CTRL
);
1815 val
|= APPL_CTRL_SYS_PRE_DET_STATE
;
1816 val
|= APPL_CTRL_HW_HOT_RST_EN
;
1817 appl_writel(pcie
, val
, APPL_CTRL
);
1819 val
= appl_readl(pcie
, APPL_CFG_MISC
);
1820 val
|= APPL_CFG_MISC_SLV_EP_MODE
;
1821 val
|= (APPL_CFG_MISC_ARCACHE_VAL
<< APPL_CFG_MISC_ARCACHE_SHIFT
);
1822 appl_writel(pcie
, val
, APPL_CFG_MISC
);
1824 val
= appl_readl(pcie
, APPL_PINMUX
);
1825 val
|= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN
;
1826 val
|= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE
;
1827 appl_writel(pcie
, val
, APPL_PINMUX
);
1829 appl_writel(pcie
, pcie
->dbi_res
->start
& APPL_CFG_BASE_ADDR_MASK
,
1830 APPL_CFG_BASE_ADDR
);
1832 appl_writel(pcie
, pcie
->atu_dma_res
->start
&
1833 APPL_CFG_IATU_DMA_BASE_ADDR_MASK
,
1834 APPL_CFG_IATU_DMA_BASE_ADDR
);
1836 val
= appl_readl(pcie
, APPL_INTR_EN_L0_0
);
1837 val
|= APPL_INTR_EN_L0_0_SYS_INTR_EN
;
1838 val
|= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN
;
1839 val
|= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN
;
1840 appl_writel(pcie
, val
, APPL_INTR_EN_L0_0
);
1842 val
= appl_readl(pcie
, APPL_INTR_EN_L1_0_0
);
1843 val
|= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN
;
1844 val
|= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN
;
1845 appl_writel(pcie
, val
, APPL_INTR_EN_L1_0_0
);
1847 reset_control_deassert(pcie
->core_rst
);
1849 if (pcie
->update_fc_fixup
) {
1850 val
= dw_pcie_readl_dbi(pci
, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF
);
1851 val
|= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT
;
1852 dw_pcie_writel_dbi(pci
, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF
, val
);
1855 config_gen3_gen4_eq_presets(pcie
);
1857 init_host_aspm(pcie
);
1859 /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
1860 if (!pcie
->supports_clkreq
) {
1861 disable_aspm_l11(pcie
);
1862 disable_aspm_l12(pcie
);
1865 if (!pcie
->of_data
->has_l1ss_exit_fix
) {
1866 val
= dw_pcie_readl_dbi(pci
, GEN3_RELATED_OFF
);
1867 val
&= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL
;
1868 dw_pcie_writel_dbi(pci
, GEN3_RELATED_OFF
, val
);
1871 pcie
->pcie_cap_base
= dw_pcie_find_capability(&pcie
->pci
,
1874 /* Clear Slot Clock Configuration bit if SRNS configuration */
1875 if (pcie
->enable_srns
) {
1876 val_16
= dw_pcie_readw_dbi(pci
, pcie
->pcie_cap_base
+
1878 val_16
&= ~PCI_EXP_LNKSTA_SLC
;
1879 dw_pcie_writew_dbi(pci
, pcie
->pcie_cap_base
+ PCI_EXP_LNKSTA
,
1883 clk_set_rate(pcie
->core_clk
, GEN4_CORE_CLK_FREQ
);
1885 val
= (ep
->msi_mem_phys
& MSIX_ADDR_MATCH_LOW_OFF_MASK
);
1886 val
|= MSIX_ADDR_MATCH_LOW_OFF_EN
;
1887 dw_pcie_writel_dbi(pci
, MSIX_ADDR_MATCH_LOW_OFF
, val
);
1888 val
= (upper_32_bits(ep
->msi_mem_phys
) & MSIX_ADDR_MATCH_HIGH_OFF_MASK
);
1889 dw_pcie_writel_dbi(pci
, MSIX_ADDR_MATCH_HIGH_OFF
, val
);
1891 ret
= dw_pcie_ep_init_registers(ep
);
1893 dev_err(dev
, "Failed to complete initialization: %d\n", ret
);
1894 goto fail_init_complete
;
1897 pci_epc_init_notify(ep
->epc
);
1899 /* Program the private control to allow sending LTR upstream */
1900 if (pcie
->of_data
->has_ltr_req_fix
) {
1901 val
= appl_readl(pcie
, APPL_LTR_MSG_2
);
1902 val
|= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE
;
1903 appl_writel(pcie
, val
, APPL_LTR_MSG_2
);
1907 val
= appl_readl(pcie
, APPL_CTRL
);
1908 val
|= APPL_CTRL_LTSSM_EN
;
1909 appl_writel(pcie
, val
, APPL_CTRL
);
1911 pcie
->ep_state
= EP_STATE_ENABLED
;
1912 dev_dbg(dev
, "Initialization of endpoint is completed\n");
1917 reset_control_assert(pcie
->core_rst
);
1918 tegra_pcie_disable_phy(pcie
);
1920 reset_control_assert(pcie
->core_apb_rst
);
1922 clk_disable_unprepare(pcie
->core_clk
);
1923 fail_core_clk_enable
:
1924 tegra_pcie_bpmp_set_pll_state(pcie
, false);
1926 tegra_pcie_bpmp_set_ctrl_state(pcie
, false);
1927 fail_set_ctrl_state
:
1928 pm_runtime_put_sync(dev
);
1931 static irqreturn_t
tegra_pcie_ep_pex_rst_irq(int irq
, void *arg
)
1933 struct tegra_pcie_dw
*pcie
= arg
;
1935 if (gpiod_get_value(pcie
->pex_rst_gpiod
))
1936 pex_ep_event_pex_rst_assert(pcie
);
1938 pex_ep_event_pex_rst_deassert(pcie
);
1943 static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw
*pcie
, u16 irq
)
1945 /* Tegra194 supports only INTA */
1949 appl_writel(pcie
, 1, APPL_LEGACY_INTX
);
1950 usleep_range(1000, 2000);
1951 appl_writel(pcie
, 0, APPL_LEGACY_INTX
);
1955 static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw
*pcie
, u16 irq
)
1957 if (unlikely(irq
> 31))
1960 appl_writel(pcie
, BIT(irq
), APPL_MSI_CTRL_1
);
1965 static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw
*pcie
, u16 irq
)
1967 struct dw_pcie_ep
*ep
= &pcie
->pci
.ep
;
1969 writel(irq
, ep
->msi_mem
);
1974 static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep
*ep
, u8 func_no
,
1975 unsigned int type
, u16 interrupt_num
)
1977 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
1978 struct tegra_pcie_dw
*pcie
= to_tegra_pcie(pci
);
1982 return tegra_pcie_ep_raise_intx_irq(pcie
, interrupt_num
);
1985 return tegra_pcie_ep_raise_msi_irq(pcie
, interrupt_num
);
1988 return tegra_pcie_ep_raise_msix_irq(pcie
, interrupt_num
);
1991 dev_err(pci
->dev
, "Unknown IRQ type\n");
1998 static const struct pci_epc_features tegra_pcie_epc_features
= {
1999 .linkup_notifier
= true,
2000 .msi_capable
= false,
2001 .msix_capable
= false,
2002 .bar
[BAR_0
] = { .type
= BAR_FIXED
, .fixed_size
= SZ_1M
,
2003 .only_64bit
= true, },
2004 .bar
[BAR_1
] = { .type
= BAR_RESERVED
, },
2005 .bar
[BAR_2
] = { .type
= BAR_RESERVED
, },
2006 .bar
[BAR_3
] = { .type
= BAR_RESERVED
, },
2007 .bar
[BAR_4
] = { .type
= BAR_RESERVED
, },
2008 .bar
[BAR_5
] = { .type
= BAR_RESERVED
, },
2012 static const struct pci_epc_features
*
2013 tegra_pcie_ep_get_features(struct dw_pcie_ep
*ep
)
2015 return &tegra_pcie_epc_features
;
2018 static const struct dw_pcie_ep_ops pcie_ep_ops
= {
2019 .raise_irq
= tegra_pcie_ep_raise_irq
,
2020 .get_features
= tegra_pcie_ep_get_features
,
2023 static int tegra_pcie_config_ep(struct tegra_pcie_dw
*pcie
,
2024 struct platform_device
*pdev
)
2026 struct dw_pcie
*pci
= &pcie
->pci
;
2027 struct device
*dev
= pcie
->dev
;
2028 struct dw_pcie_ep
*ep
;
2033 ep
->ops
= &pcie_ep_ops
;
2035 ep
->page_size
= SZ_64K
;
2037 ret
= gpiod_set_debounce(pcie
->pex_rst_gpiod
, PERST_DEBOUNCE_TIME
);
2039 dev_err(dev
, "Failed to set PERST GPIO debounce time: %d\n",
2044 ret
= gpiod_to_irq(pcie
->pex_rst_gpiod
);
2046 dev_err(dev
, "Failed to get IRQ for PERST GPIO: %d\n", ret
);
2049 pcie
->pex_rst_irq
= (unsigned int)ret
;
2051 name
= devm_kasprintf(dev
, GFP_KERNEL
, "tegra_pcie_%u_pex_rst_irq",
2054 dev_err(dev
, "Failed to create PERST IRQ string\n");
2058 irq_set_status_flags(pcie
->pex_rst_irq
, IRQ_NOAUTOEN
);
2060 pcie
->ep_state
= EP_STATE_DISABLED
;
2062 ret
= devm_request_threaded_irq(dev
, pcie
->pex_rst_irq
, NULL
,
2063 tegra_pcie_ep_pex_rst_irq
,
2064 IRQF_TRIGGER_RISING
|
2065 IRQF_TRIGGER_FALLING
| IRQF_ONESHOT
,
2066 name
, (void *)pcie
);
2068 dev_err(dev
, "Failed to request IRQ for PERST: %d\n", ret
);
2072 pm_runtime_enable(dev
);
2074 ret
= dw_pcie_ep_init(ep
);
2076 dev_err(dev
, "Failed to initialize DWC Endpoint subsystem: %d\n",
2078 pm_runtime_disable(dev
);
2085 static int tegra_pcie_dw_probe(struct platform_device
*pdev
)
2087 const struct tegra_pcie_dw_of_data
*data
;
2088 struct device
*dev
= &pdev
->dev
;
2089 struct resource
*atu_dma_res
;
2090 struct tegra_pcie_dw
*pcie
;
2091 struct dw_pcie_rp
*pp
;
2092 struct dw_pcie
*pci
;
2098 data
= of_device_get_match_data(dev
);
2100 pcie
= devm_kzalloc(dev
, sizeof(*pcie
), GFP_KERNEL
);
2105 pci
->dev
= &pdev
->dev
;
2106 pci
->ops
= &tegra_dw_pcie_ops
;
2107 pcie
->dev
= &pdev
->dev
;
2108 pcie
->of_data
= (struct tegra_pcie_dw_of_data
*)data
;
2109 pci
->n_fts
[0] = pcie
->of_data
->n_fts
[0];
2110 pci
->n_fts
[1] = pcie
->of_data
->n_fts
[1];
2112 pp
->num_vectors
= MAX_MSI_IRQS
;
2114 ret
= tegra_pcie_dw_parse_dt(pcie
);
2116 const char *level
= KERN_ERR
;
2118 if (ret
== -EPROBE_DEFER
)
2121 dev_printk(level
, dev
,
2122 dev_fmt("Failed to parse device tree: %d\n"),
2127 ret
= tegra_pcie_get_slot_regulators(pcie
);
2129 const char *level
= KERN_ERR
;
2131 if (ret
== -EPROBE_DEFER
)
2134 dev_printk(level
, dev
,
2135 dev_fmt("Failed to get slot regulators: %d\n"),
2140 if (pcie
->pex_refclk_sel_gpiod
)
2141 gpiod_set_value(pcie
->pex_refclk_sel_gpiod
, 1);
2143 pcie
->pex_ctl_supply
= devm_regulator_get(dev
, "vddio-pex-ctl");
2144 if (IS_ERR(pcie
->pex_ctl_supply
)) {
2145 ret
= PTR_ERR(pcie
->pex_ctl_supply
);
2146 if (ret
!= -EPROBE_DEFER
)
2147 dev_err(dev
, "Failed to get regulator: %ld\n",
2148 PTR_ERR(pcie
->pex_ctl_supply
));
2152 pcie
->core_clk
= devm_clk_get(dev
, "core");
2153 if (IS_ERR(pcie
->core_clk
)) {
2154 dev_err(dev
, "Failed to get core clock: %ld\n",
2155 PTR_ERR(pcie
->core_clk
));
2156 return PTR_ERR(pcie
->core_clk
);
2159 pcie
->appl_res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
2161 if (!pcie
->appl_res
) {
2162 dev_err(dev
, "Failed to find \"appl\" region\n");
2166 pcie
->appl_base
= devm_ioremap_resource(dev
, pcie
->appl_res
);
2167 if (IS_ERR(pcie
->appl_base
))
2168 return PTR_ERR(pcie
->appl_base
);
2170 pcie
->core_apb_rst
= devm_reset_control_get(dev
, "apb");
2171 if (IS_ERR(pcie
->core_apb_rst
)) {
2172 dev_err(dev
, "Failed to get APB reset: %ld\n",
2173 PTR_ERR(pcie
->core_apb_rst
));
2174 return PTR_ERR(pcie
->core_apb_rst
);
2177 phys
= devm_kcalloc(dev
, pcie
->phy_count
, sizeof(*phys
), GFP_KERNEL
);
2181 for (i
= 0; i
< pcie
->phy_count
; i
++) {
2182 name
= kasprintf(GFP_KERNEL
, "p2u-%u", i
);
2184 dev_err(dev
, "Failed to create P2U string\n");
2187 phys
[i
] = devm_phy_get(dev
, name
);
2189 if (IS_ERR(phys
[i
])) {
2190 ret
= PTR_ERR(phys
[i
]);
2191 if (ret
!= -EPROBE_DEFER
)
2192 dev_err(dev
, "Failed to get PHY: %d\n", ret
);
2199 atu_dma_res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
2202 dev_err(dev
, "Failed to find \"atu_dma\" region\n");
2205 pcie
->atu_dma_res
= atu_dma_res
;
2207 pci
->atu_size
= resource_size(atu_dma_res
);
2208 pci
->atu_base
= devm_ioremap_resource(dev
, atu_dma_res
);
2209 if (IS_ERR(pci
->atu_base
))
2210 return PTR_ERR(pci
->atu_base
);
2212 pcie
->core_rst
= devm_reset_control_get(dev
, "core");
2213 if (IS_ERR(pcie
->core_rst
)) {
2214 dev_err(dev
, "Failed to get core reset: %ld\n",
2215 PTR_ERR(pcie
->core_rst
));
2216 return PTR_ERR(pcie
->core_rst
);
2219 pp
->irq
= platform_get_irq_byname(pdev
, "intr");
2223 pcie
->bpmp
= tegra_bpmp_get(dev
);
2224 if (IS_ERR(pcie
->bpmp
))
2225 return PTR_ERR(pcie
->bpmp
);
2227 platform_set_drvdata(pdev
, pcie
);
2229 pcie
->icc_path
= devm_of_icc_get(&pdev
->dev
, "write");
2230 ret
= PTR_ERR_OR_ZERO(pcie
->icc_path
);
2232 tegra_bpmp_put(pcie
->bpmp
);
2233 dev_err_probe(&pdev
->dev
, ret
, "failed to get write interconnect\n");
2237 switch (pcie
->of_data
->mode
) {
2238 case DW_PCIE_RC_TYPE
:
2239 ret
= devm_request_irq(dev
, pp
->irq
, tegra_pcie_rp_irq_handler
,
2240 IRQF_SHARED
, "tegra-pcie-intr", pcie
);
2242 dev_err(dev
, "Failed to request IRQ %d: %d\n", pp
->irq
,
2247 ret
= tegra_pcie_config_rp(pcie
);
2248 if (ret
&& ret
!= -ENOMEDIUM
)
2254 case DW_PCIE_EP_TYPE
:
2255 ret
= devm_request_threaded_irq(dev
, pp
->irq
,
2256 tegra_pcie_ep_hard_irq
,
2257 tegra_pcie_ep_irq_thread
,
2258 IRQF_SHARED
| IRQF_ONESHOT
,
2259 "tegra-pcie-ep-intr", pcie
);
2261 dev_err(dev
, "Failed to request IRQ %d: %d\n", pp
->irq
,
2266 ret
= tegra_pcie_config_ep(pcie
, pdev
);
2274 dev_err(dev
, "Invalid PCIe device type %d\n",
2275 pcie
->of_data
->mode
);
2280 tegra_bpmp_put(pcie
->bpmp
);
2284 static void tegra_pcie_dw_remove(struct platform_device
*pdev
)
2286 struct tegra_pcie_dw
*pcie
= platform_get_drvdata(pdev
);
2288 if (pcie
->of_data
->mode
== DW_PCIE_RC_TYPE
) {
2289 if (!pcie
->link_state
)
2292 debugfs_remove_recursive(pcie
->debugfs
);
2293 tegra_pcie_deinit_controller(pcie
);
2294 pm_runtime_put_sync(pcie
->dev
);
2296 disable_irq(pcie
->pex_rst_irq
);
2297 pex_ep_event_pex_rst_assert(pcie
);
2300 pm_runtime_disable(pcie
->dev
);
2301 tegra_bpmp_put(pcie
->bpmp
);
2302 if (pcie
->pex_refclk_sel_gpiod
)
2303 gpiod_set_value(pcie
->pex_refclk_sel_gpiod
, 0);
2306 static int tegra_pcie_dw_suspend_late(struct device
*dev
)
2308 struct tegra_pcie_dw
*pcie
= dev_get_drvdata(dev
);
2311 if (pcie
->of_data
->mode
== DW_PCIE_EP_TYPE
) {
2312 dev_err(dev
, "Failed to Suspend as Tegra PCIe is in EP mode\n");
2316 if (!pcie
->link_state
)
2319 /* Enable HW_HOT_RST mode */
2320 if (!pcie
->of_data
->has_sbr_reset_fix
) {
2321 val
= appl_readl(pcie
, APPL_CTRL
);
2322 val
&= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK
<<
2323 APPL_CTRL_HW_HOT_RST_MODE_SHIFT
);
2324 val
|= APPL_CTRL_HW_HOT_RST_EN
;
2325 appl_writel(pcie
, val
, APPL_CTRL
);
2331 static int tegra_pcie_dw_suspend_noirq(struct device
*dev
)
2333 struct tegra_pcie_dw
*pcie
= dev_get_drvdata(dev
);
2335 if (!pcie
->link_state
)
2338 tegra_pcie_downstream_dev_to_D0(pcie
);
2339 tegra_pcie_dw_pme_turnoff(pcie
);
2340 tegra_pcie_unconfig_controller(pcie
);
2345 static int tegra_pcie_dw_resume_noirq(struct device
*dev
)
2347 struct tegra_pcie_dw
*pcie
= dev_get_drvdata(dev
);
2350 if (!pcie
->link_state
)
2353 ret
= tegra_pcie_config_controller(pcie
, true);
2357 ret
= tegra_pcie_dw_host_init(&pcie
->pci
.pp
);
2359 dev_err(dev
, "Failed to init host: %d\n", ret
);
2360 goto fail_host_init
;
2363 dw_pcie_setup_rc(&pcie
->pci
.pp
);
2365 ret
= tegra_pcie_dw_start_link(&pcie
->pci
);
2367 goto fail_host_init
;
2372 tegra_pcie_unconfig_controller(pcie
);
2376 static int tegra_pcie_dw_resume_early(struct device
*dev
)
2378 struct tegra_pcie_dw
*pcie
= dev_get_drvdata(dev
);
2381 if (pcie
->of_data
->mode
== DW_PCIE_EP_TYPE
) {
2382 dev_err(dev
, "Suspend is not supported in EP mode");
2386 if (!pcie
->link_state
)
2389 /* Disable HW_HOT_RST mode */
2390 if (!pcie
->of_data
->has_sbr_reset_fix
) {
2391 val
= appl_readl(pcie
, APPL_CTRL
);
2392 val
&= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK
<<
2393 APPL_CTRL_HW_HOT_RST_MODE_SHIFT
);
2394 val
|= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST
<<
2395 APPL_CTRL_HW_HOT_RST_MODE_SHIFT
;
2396 val
&= ~APPL_CTRL_HW_HOT_RST_EN
;
2397 appl_writel(pcie
, val
, APPL_CTRL
);
2403 static void tegra_pcie_dw_shutdown(struct platform_device
*pdev
)
2405 struct tegra_pcie_dw
*pcie
= platform_get_drvdata(pdev
);
2407 if (pcie
->of_data
->mode
== DW_PCIE_RC_TYPE
) {
2408 if (!pcie
->link_state
)
2411 debugfs_remove_recursive(pcie
->debugfs
);
2412 tegra_pcie_downstream_dev_to_D0(pcie
);
2414 disable_irq(pcie
->pci
.pp
.irq
);
2415 if (IS_ENABLED(CONFIG_PCI_MSI
))
2416 disable_irq(pcie
->pci
.pp
.msi_irq
[0]);
2418 tegra_pcie_dw_pme_turnoff(pcie
);
2419 tegra_pcie_unconfig_controller(pcie
);
2420 pm_runtime_put_sync(pcie
->dev
);
2422 disable_irq(pcie
->pex_rst_irq
);
2423 pex_ep_event_pex_rst_assert(pcie
);
2427 static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data
= {
2428 .version
= TEGRA194_DWC_IP_VER
,
2429 .mode
= DW_PCIE_RC_TYPE
,
2430 .cdm_chk_int_en_bit
= BIT(19),
2431 /* Gen4 - 5, 6, 8 and 9 presets enabled */
2432 .gen4_preset_vec
= 0x360,
2433 .n_fts
= { 52, 52 },
2436 static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data
= {
2437 .version
= TEGRA194_DWC_IP_VER
,
2438 .mode
= DW_PCIE_EP_TYPE
,
2439 .cdm_chk_int_en_bit
= BIT(19),
2440 /* Gen4 - 5, 6, 8 and 9 presets enabled */
2441 .gen4_preset_vec
= 0x360,
2442 .n_fts
= { 52, 52 },
2445 static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data
= {
2446 .version
= TEGRA234_DWC_IP_VER
,
2447 .mode
= DW_PCIE_RC_TYPE
,
2448 .has_msix_doorbell_access_fix
= true,
2449 .has_sbr_reset_fix
= true,
2450 .has_l1ss_exit_fix
= true,
2451 .cdm_chk_int_en_bit
= BIT(18),
2452 /* Gen4 - 6, 8 and 9 presets enabled */
2453 .gen4_preset_vec
= 0x340,
2454 .n_fts
= { 52, 80 },
2457 static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data
= {
2458 .version
= TEGRA234_DWC_IP_VER
,
2459 .mode
= DW_PCIE_EP_TYPE
,
2460 .has_l1ss_exit_fix
= true,
2461 .has_ltr_req_fix
= true,
2462 .cdm_chk_int_en_bit
= BIT(18),
2463 /* Gen4 - 6, 8 and 9 presets enabled */
2464 .gen4_preset_vec
= 0x340,
2465 .n_fts
= { 52, 80 },
2468 static const struct of_device_id tegra_pcie_dw_of_match
[] = {
2470 .compatible
= "nvidia,tegra194-pcie",
2471 .data
= &tegra194_pcie_dw_rc_of_data
,
2474 .compatible
= "nvidia,tegra194-pcie-ep",
2475 .data
= &tegra194_pcie_dw_ep_of_data
,
2478 .compatible
= "nvidia,tegra234-pcie",
2479 .data
= &tegra234_pcie_dw_rc_of_data
,
2482 .compatible
= "nvidia,tegra234-pcie-ep",
2483 .data
= &tegra234_pcie_dw_ep_of_data
,
2488 static const struct dev_pm_ops tegra_pcie_dw_pm_ops
= {
2489 .suspend_late
= tegra_pcie_dw_suspend_late
,
2490 .suspend_noirq
= tegra_pcie_dw_suspend_noirq
,
2491 .resume_noirq
= tegra_pcie_dw_resume_noirq
,
2492 .resume_early
= tegra_pcie_dw_resume_early
,
2495 static struct platform_driver tegra_pcie_dw_driver
= {
2496 .probe
= tegra_pcie_dw_probe
,
2497 .remove
= tegra_pcie_dw_remove
,
2498 .shutdown
= tegra_pcie_dw_shutdown
,
2500 .name
= "tegra194-pcie",
2501 .pm
= &tegra_pcie_dw_pm_ops
,
2502 .of_match_table
= tegra_pcie_dw_of_match
,
2505 module_platform_driver(tegra_pcie_dw_driver
);
2507 MODULE_DEVICE_TABLE(of
, tegra_pcie_dw_of_match
);
2509 MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
2510 MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
2511 MODULE_LICENSE("GPL v2");