2 * PCIe host controller driver for Tegra SoCs
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
10 * Bits taken from arch/arm/mach-dove/pcie.c
12 * Author: Thierry Reding <treding@nvidia.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful, but WITHOUT
20 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
29 #include <linux/clk.h>
30 #include <linux/debugfs.h>
31 #include <linux/delay.h>
32 #include <linux/export.h>
33 #include <linux/interrupt.h>
34 #include <linux/irq.h>
35 #include <linux/irqdomain.h>
36 #include <linux/kernel.h>
37 #include <linux/init.h>
38 #include <linux/msi.h>
39 #include <linux/of_address.h>
40 #include <linux/of_pci.h>
41 #include <linux/of_platform.h>
42 #include <linux/pci.h>
43 #include <linux/phy/phy.h>
44 #include <linux/platform_device.h>
45 #include <linux/reset.h>
46 #include <linux/sizes.h>
47 #include <linux/slab.h>
48 #include <linux/vmalloc.h>
49 #include <linux/regulator/consumer.h>
51 #include <soc/tegra/cpuidle.h>
52 #include <soc/tegra/pmc.h>
54 #include <asm/mach/irq.h>
55 #include <asm/mach/map.h>
56 #include <asm/mach/pci.h>
58 #define INT_PCI_MSI_NR (8 * 32)
60 /* register definitions */
62 #define AFI_AXI_BAR0_SZ 0x00
63 #define AFI_AXI_BAR1_SZ 0x04
64 #define AFI_AXI_BAR2_SZ 0x08
65 #define AFI_AXI_BAR3_SZ 0x0c
66 #define AFI_AXI_BAR4_SZ 0x10
67 #define AFI_AXI_BAR5_SZ 0x14
69 #define AFI_AXI_BAR0_START 0x18
70 #define AFI_AXI_BAR1_START 0x1c
71 #define AFI_AXI_BAR2_START 0x20
72 #define AFI_AXI_BAR3_START 0x24
73 #define AFI_AXI_BAR4_START 0x28
74 #define AFI_AXI_BAR5_START 0x2c
76 #define AFI_FPCI_BAR0 0x30
77 #define AFI_FPCI_BAR1 0x34
78 #define AFI_FPCI_BAR2 0x38
79 #define AFI_FPCI_BAR3 0x3c
80 #define AFI_FPCI_BAR4 0x40
81 #define AFI_FPCI_BAR5 0x44
83 #define AFI_CACHE_BAR0_SZ 0x48
84 #define AFI_CACHE_BAR0_ST 0x4c
85 #define AFI_CACHE_BAR1_SZ 0x50
86 #define AFI_CACHE_BAR1_ST 0x54
88 #define AFI_MSI_BAR_SZ 0x60
89 #define AFI_MSI_FPCI_BAR_ST 0x64
90 #define AFI_MSI_AXI_BAR_ST 0x68
92 #define AFI_MSI_VEC0 0x6c
93 #define AFI_MSI_VEC1 0x70
94 #define AFI_MSI_VEC2 0x74
95 #define AFI_MSI_VEC3 0x78
96 #define AFI_MSI_VEC4 0x7c
97 #define AFI_MSI_VEC5 0x80
98 #define AFI_MSI_VEC6 0x84
99 #define AFI_MSI_VEC7 0x88
101 #define AFI_MSI_EN_VEC0 0x8c
102 #define AFI_MSI_EN_VEC1 0x90
103 #define AFI_MSI_EN_VEC2 0x94
104 #define AFI_MSI_EN_VEC3 0x98
105 #define AFI_MSI_EN_VEC4 0x9c
106 #define AFI_MSI_EN_VEC5 0xa0
107 #define AFI_MSI_EN_VEC6 0xa4
108 #define AFI_MSI_EN_VEC7 0xa8
110 #define AFI_CONFIGURATION 0xac
111 #define AFI_CONFIGURATION_EN_FPCI (1 << 0)
113 #define AFI_FPCI_ERROR_MASKS 0xb0
115 #define AFI_INTR_MASK 0xb4
116 #define AFI_INTR_MASK_INT_MASK (1 << 0)
117 #define AFI_INTR_MASK_MSI_MASK (1 << 8)
119 #define AFI_INTR_CODE 0xb8
120 #define AFI_INTR_CODE_MASK 0xf
121 #define AFI_INTR_INI_SLAVE_ERROR 1
122 #define AFI_INTR_INI_DECODE_ERROR 2
123 #define AFI_INTR_TARGET_ABORT 3
124 #define AFI_INTR_MASTER_ABORT 4
125 #define AFI_INTR_INVALID_WRITE 5
126 #define AFI_INTR_LEGACY 6
127 #define AFI_INTR_FPCI_DECODE_ERROR 7
128 #define AFI_INTR_AXI_DECODE_ERROR 8
129 #define AFI_INTR_FPCI_TIMEOUT 9
130 #define AFI_INTR_PE_PRSNT_SENSE 10
131 #define AFI_INTR_PE_CLKREQ_SENSE 11
132 #define AFI_INTR_CLKCLAMP_SENSE 12
133 #define AFI_INTR_RDY4PD_SENSE 13
134 #define AFI_INTR_P2P_ERROR 14
136 #define AFI_INTR_SIGNATURE 0xbc
137 #define AFI_UPPER_FPCI_ADDRESS 0xc0
138 #define AFI_SM_INTR_ENABLE 0xc4
139 #define AFI_SM_INTR_INTA_ASSERT (1 << 0)
140 #define AFI_SM_INTR_INTB_ASSERT (1 << 1)
141 #define AFI_SM_INTR_INTC_ASSERT (1 << 2)
142 #define AFI_SM_INTR_INTD_ASSERT (1 << 3)
143 #define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
144 #define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
145 #define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
146 #define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
148 #define AFI_AFI_INTR_ENABLE 0xc8
149 #define AFI_INTR_EN_INI_SLVERR (1 << 0)
150 #define AFI_INTR_EN_INI_DECERR (1 << 1)
151 #define AFI_INTR_EN_TGT_SLVERR (1 << 2)
152 #define AFI_INTR_EN_TGT_DECERR (1 << 3)
153 #define AFI_INTR_EN_TGT_WRERR (1 << 4)
154 #define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
155 #define AFI_INTR_EN_AXI_DECERR (1 << 6)
156 #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
157 #define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
159 #define AFI_PCIE_CONFIG 0x0f8
160 #define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
161 #define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
162 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
163 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
164 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
165 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
166 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
167 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
168 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
169 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
171 #define AFI_FUSE 0x104
172 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
174 #define AFI_PEX0_CTRL 0x110
175 #define AFI_PEX1_CTRL 0x118
176 #define AFI_PEX2_CTRL 0x128
177 #define AFI_PEX_CTRL_RST (1 << 0)
178 #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
179 #define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
180 #define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
182 #define AFI_PLLE_CONTROL 0x160
183 #define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
184 #define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
186 #define AFI_PEXBIAS_CTRL_0 0x168
188 #define RP_VEND_XP 0x00000f00
189 #define RP_VEND_XP_DL_UP (1 << 30)
191 #define RP_PRIV_MISC 0x00000fe0
192 #define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
193 #define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
195 #define RP_LINK_CONTROL_STATUS 0x00000090
196 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
197 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
199 #define PADS_CTL_SEL 0x0000009c
201 #define PADS_CTL 0x000000a0
202 #define PADS_CTL_IDDQ_1L (1 << 0)
203 #define PADS_CTL_TX_DATA_EN_1L (1 << 6)
204 #define PADS_CTL_RX_DATA_EN_1L (1 << 10)
206 #define PADS_PLL_CTL_TEGRA20 0x000000b8
207 #define PADS_PLL_CTL_TEGRA30 0x000000b4
208 #define PADS_PLL_CTL_RST_B4SM (1 << 1)
209 #define PADS_PLL_CTL_LOCKDET (1 << 8)
210 #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
211 #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
212 #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
213 #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
214 #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
215 #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
216 #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
217 #define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
219 #define PADS_REFCLK_CFG0 0x000000c8
220 #define PADS_REFCLK_CFG1 0x000000cc
221 #define PADS_REFCLK_BIAS 0x000000d0
224 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
225 * entries, one entry per PCIe port. These field definitions and desired
226 * values aren't in the TRM, but do come from NVIDIA.
228 #define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
229 #define PADS_REFCLK_CFG_E_TERM_SHIFT 7
230 #define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
231 #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
234 struct msi_controller chip
;
235 DECLARE_BITMAP(used
, INT_PCI_MSI_NR
);
236 struct irq_domain
*domain
;
242 /* used to differentiate between Tegra SoC generations */
243 struct tegra_pcie_soc
{
244 unsigned int num_ports
;
245 unsigned int msi_base_shift
;
248 u32 pads_refclk_cfg0
;
249 u32 pads_refclk_cfg1
;
250 bool has_pex_clkreq_en
;
251 bool has_pex_bias_ctrl
;
252 bool has_intr_prsnt_sense
;
257 static inline struct tegra_msi
*to_tegra_msi(struct msi_controller
*chip
)
259 return container_of(chip
, struct tegra_msi
, chip
);
269 struct list_head buses
;
275 struct resource prefetch
;
276 struct resource busn
;
288 struct reset_control
*pex_rst
;
289 struct reset_control
*afi_rst
;
290 struct reset_control
*pcie_xrst
;
295 struct tegra_msi msi
;
297 struct list_head ports
;
300 struct regulator_bulk_data
*supplies
;
301 unsigned int num_supplies
;
303 const struct tegra_pcie_soc
*soc
;
304 struct dentry
*debugfs
;
307 struct tegra_pcie_port
{
308 struct tegra_pcie
*pcie
;
309 struct device_node
*np
;
310 struct list_head list
;
311 struct resource regs
;
319 struct tegra_pcie_bus
{
320 struct vm_struct
*area
;
321 struct list_head list
;
325 static inline struct tegra_pcie
*sys_to_pcie(struct pci_sys_data
*sys
)
327 return sys
->private_data
;
330 static inline void afi_writel(struct tegra_pcie
*pcie
, u32 value
,
331 unsigned long offset
)
333 writel(value
, pcie
->afi
+ offset
);
336 static inline u32
afi_readl(struct tegra_pcie
*pcie
, unsigned long offset
)
338 return readl(pcie
->afi
+ offset
);
341 static inline void pads_writel(struct tegra_pcie
*pcie
, u32 value
,
342 unsigned long offset
)
344 writel(value
, pcie
->pads
+ offset
);
347 static inline u32
pads_readl(struct tegra_pcie
*pcie
, unsigned long offset
)
349 return readl(pcie
->pads
+ offset
);
353 * The configuration space mapping on Tegra is somewhat similar to the ECAM
354 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
355 * register accesses are mapped:
357 * [27:24] extended register number
359 * [15:11] device number
360 * [10: 8] function number
361 * [ 7: 0] register number
363 * Mapping the whole extended configuration space would require 256 MiB of
364 * virtual address space, only a small part of which will actually be used.
365 * To work around this, a 1 MiB of virtual addresses are allocated per bus
366 * when the bus is first accessed. When the physical range is mapped, the
367 * the bus number bits are hidden so that the extended register number bits
368 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
370 * [19:16] extended register number
371 * [15:11] device number
372 * [10: 8] function number
373 * [ 7: 0] register number
375 * This is achieved by stitching together 16 chunks of 64 KiB of physical
376 * address space via the MMU.
378 static unsigned long tegra_pcie_conf_offset(unsigned int devfn
, int where
)
380 return ((where
& 0xf00) << 8) | (PCI_SLOT(devfn
) << 11) |
381 (PCI_FUNC(devfn
) << 8) | (where
& 0xfc);
384 static struct tegra_pcie_bus
*tegra_pcie_bus_alloc(struct tegra_pcie
*pcie
,
387 struct device
*dev
= pcie
->dev
;
388 pgprot_t prot
= __pgprot(L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
389 L_PTE_XN
| L_PTE_MT_DEV_SHARED
| L_PTE_SHARED
);
390 phys_addr_t cs
= pcie
->cs
->start
;
391 struct tegra_pcie_bus
*bus
;
395 bus
= kzalloc(sizeof(*bus
), GFP_KERNEL
);
397 return ERR_PTR(-ENOMEM
);
399 INIT_LIST_HEAD(&bus
->list
);
402 /* allocate 1 MiB of virtual addresses */
403 bus
->area
= get_vm_area(SZ_1M
, VM_IOREMAP
);
409 /* map each of the 16 chunks of 64 KiB each */
410 for (i
= 0; i
< 16; i
++) {
411 unsigned long virt
= (unsigned long)bus
->area
->addr
+
413 phys_addr_t phys
= cs
+ i
* SZ_16M
+ busnr
* SZ_64K
;
415 err
= ioremap_page_range(virt
, virt
+ SZ_64K
, phys
, prot
);
417 dev_err(dev
, "ioremap_page_range() failed: %d\n", err
);
425 vunmap(bus
->area
->addr
);
431 static int tegra_pcie_add_bus(struct pci_bus
*bus
)
433 struct tegra_pcie
*pcie
= sys_to_pcie(bus
->sysdata
);
434 struct tegra_pcie_bus
*b
;
436 b
= tegra_pcie_bus_alloc(pcie
, bus
->number
);
440 list_add_tail(&b
->list
, &pcie
->buses
);
445 static void tegra_pcie_remove_bus(struct pci_bus
*child
)
447 struct tegra_pcie
*pcie
= sys_to_pcie(child
->sysdata
);
448 struct tegra_pcie_bus
*bus
, *tmp
;
450 list_for_each_entry_safe(bus
, tmp
, &pcie
->buses
, list
) {
451 if (bus
->nr
== child
->number
) {
452 vunmap(bus
->area
->addr
);
453 list_del(&bus
->list
);
460 static void __iomem
*tegra_pcie_map_bus(struct pci_bus
*bus
,
464 struct tegra_pcie
*pcie
= sys_to_pcie(bus
->sysdata
);
465 struct device
*dev
= pcie
->dev
;
466 void __iomem
*addr
= NULL
;
468 if (bus
->number
== 0) {
469 unsigned int slot
= PCI_SLOT(devfn
);
470 struct tegra_pcie_port
*port
;
472 list_for_each_entry(port
, &pcie
->ports
, list
) {
473 if (port
->index
+ 1 == slot
) {
474 addr
= port
->base
+ (where
& ~3);
479 struct tegra_pcie_bus
*b
;
481 list_for_each_entry(b
, &pcie
->buses
, list
)
482 if (b
->nr
== bus
->number
)
483 addr
= (void __iomem
*)b
->area
->addr
;
486 dev_err(dev
, "failed to map cfg. space for bus %u\n",
491 addr
+= tegra_pcie_conf_offset(devfn
, where
);
497 static struct pci_ops tegra_pcie_ops
= {
498 .add_bus
= tegra_pcie_add_bus
,
499 .remove_bus
= tegra_pcie_remove_bus
,
500 .map_bus
= tegra_pcie_map_bus
,
501 .read
= pci_generic_config_read32
,
502 .write
= pci_generic_config_write32
,
505 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port
*port
)
507 unsigned long ret
= 0;
509 switch (port
->index
) {
526 static void tegra_pcie_port_reset(struct tegra_pcie_port
*port
)
528 unsigned long ctrl
= tegra_pcie_port_get_pex_ctrl(port
);
531 /* pulse reset signal */
532 value
= afi_readl(port
->pcie
, ctrl
);
533 value
&= ~AFI_PEX_CTRL_RST
;
534 afi_writel(port
->pcie
, value
, ctrl
);
536 usleep_range(1000, 2000);
538 value
= afi_readl(port
->pcie
, ctrl
);
539 value
|= AFI_PEX_CTRL_RST
;
540 afi_writel(port
->pcie
, value
, ctrl
);
543 static void tegra_pcie_port_enable(struct tegra_pcie_port
*port
)
545 unsigned long ctrl
= tegra_pcie_port_get_pex_ctrl(port
);
546 const struct tegra_pcie_soc
*soc
= port
->pcie
->soc
;
549 /* enable reference clock */
550 value
= afi_readl(port
->pcie
, ctrl
);
551 value
|= AFI_PEX_CTRL_REFCLK_EN
;
553 if (soc
->has_pex_clkreq_en
)
554 value
|= AFI_PEX_CTRL_CLKREQ_EN
;
556 value
|= AFI_PEX_CTRL_OVERRIDE_EN
;
558 afi_writel(port
->pcie
, value
, ctrl
);
560 tegra_pcie_port_reset(port
);
563 static void tegra_pcie_port_disable(struct tegra_pcie_port
*port
)
565 unsigned long ctrl
= tegra_pcie_port_get_pex_ctrl(port
);
566 const struct tegra_pcie_soc
*soc
= port
->pcie
->soc
;
569 /* assert port reset */
570 value
= afi_readl(port
->pcie
, ctrl
);
571 value
&= ~AFI_PEX_CTRL_RST
;
572 afi_writel(port
->pcie
, value
, ctrl
);
574 /* disable reference clock */
575 value
= afi_readl(port
->pcie
, ctrl
);
577 if (soc
->has_pex_clkreq_en
)
578 value
&= ~AFI_PEX_CTRL_CLKREQ_EN
;
580 value
&= ~AFI_PEX_CTRL_REFCLK_EN
;
581 afi_writel(port
->pcie
, value
, ctrl
);
584 static void tegra_pcie_port_free(struct tegra_pcie_port
*port
)
586 struct tegra_pcie
*pcie
= port
->pcie
;
587 struct device
*dev
= pcie
->dev
;
589 devm_iounmap(dev
, port
->base
);
590 devm_release_mem_region(dev
, port
->regs
.start
,
591 resource_size(&port
->regs
));
592 list_del(&port
->list
);
593 devm_kfree(dev
, port
);
596 /* Tegra PCIE root complex wrongly reports device class */
597 static void tegra_pcie_fixup_class(struct pci_dev
*dev
)
599 dev
->class = PCI_CLASS_BRIDGE_PCI
<< 8;
601 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA
, 0x0bf0, tegra_pcie_fixup_class
);
602 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA
, 0x0bf1, tegra_pcie_fixup_class
);
603 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA
, 0x0e1c, tegra_pcie_fixup_class
);
604 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA
, 0x0e1d, tegra_pcie_fixup_class
);
606 /* Tegra20 and Tegra30 PCIE requires relaxed ordering */
607 static void tegra_pcie_relax_enable(struct pci_dev
*dev
)
609 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL
, PCI_EXP_DEVCTL_RELAX_EN
);
611 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA
, 0x0bf0, tegra_pcie_relax_enable
);
612 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA
, 0x0bf1, tegra_pcie_relax_enable
);
613 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA
, 0x0e1c, tegra_pcie_relax_enable
);
614 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA
, 0x0e1d, tegra_pcie_relax_enable
);
616 static int tegra_pcie_setup(int nr
, struct pci_sys_data
*sys
)
618 struct tegra_pcie
*pcie
= sys_to_pcie(sys
);
619 struct device
*dev
= pcie
->dev
;
622 sys
->mem_offset
= pcie
->offset
.mem
;
623 sys
->io_offset
= pcie
->offset
.io
;
625 err
= devm_request_resource(dev
, &iomem_resource
, &pcie
->io
);
629 err
= pci_remap_iospace(&pcie
->pio
, pcie
->io
.start
);
631 pci_add_resource_offset(&sys
->resources
, &pcie
->pio
,
634 pci_add_resource_offset(&sys
->resources
, &pcie
->mem
, sys
->mem_offset
);
635 pci_add_resource_offset(&sys
->resources
, &pcie
->prefetch
,
637 pci_add_resource(&sys
->resources
, &pcie
->busn
);
639 err
= devm_request_pci_bus_resources(dev
, &sys
->resources
);
646 static int tegra_pcie_map_irq(const struct pci_dev
*pdev
, u8 slot
, u8 pin
)
648 struct tegra_pcie
*pcie
= sys_to_pcie(pdev
->bus
->sysdata
);
651 tegra_cpuidle_pcie_irqs_in_use();
653 irq
= of_irq_parse_and_map_pci(pdev
, slot
, pin
);
660 static irqreturn_t
tegra_pcie_isr(int irq
, void *arg
)
662 const char *err_msg
[] = {
670 "Response decoding error",
671 "AXI response decoding error",
672 "Transaction timeout",
673 "Slot present pin change",
674 "Slot clock request change",
675 "TMS clock ramp change",
676 "TMS ready for power down",
679 struct tegra_pcie
*pcie
= arg
;
680 struct device
*dev
= pcie
->dev
;
683 code
= afi_readl(pcie
, AFI_INTR_CODE
) & AFI_INTR_CODE_MASK
;
684 signature
= afi_readl(pcie
, AFI_INTR_SIGNATURE
);
685 afi_writel(pcie
, 0, AFI_INTR_CODE
);
687 if (code
== AFI_INTR_LEGACY
)
690 if (code
>= ARRAY_SIZE(err_msg
))
694 * do not pollute kernel log with master abort reports since they
695 * happen a lot during enumeration
697 if (code
== AFI_INTR_MASTER_ABORT
)
698 dev_dbg(dev
, "%s, signature: %08x\n", err_msg
[code
], signature
);
700 dev_err(dev
, "%s, signature: %08x\n", err_msg
[code
], signature
);
702 if (code
== AFI_INTR_TARGET_ABORT
|| code
== AFI_INTR_MASTER_ABORT
||
703 code
== AFI_INTR_FPCI_DECODE_ERROR
) {
704 u32 fpci
= afi_readl(pcie
, AFI_UPPER_FPCI_ADDRESS
) & 0xff;
705 u64 address
= (u64
)fpci
<< 32 | (signature
& 0xfffffffc);
707 if (code
== AFI_INTR_MASTER_ABORT
)
708 dev_dbg(dev
, " FPCI address: %10llx\n", address
);
710 dev_err(dev
, " FPCI address: %10llx\n", address
);
717 * FPCI map is as follows:
718 * - 0xfdfc000000: I/O space
719 * - 0xfdfe000000: type 0 configuration space
720 * - 0xfdff000000: type 1 configuration space
721 * - 0xfe00000000: type 0 extended configuration space
722 * - 0xfe10000000: type 1 extended configuration space
724 static void tegra_pcie_setup_translations(struct tegra_pcie
*pcie
)
726 u32 fpci_bar
, size
, axi_address
;
728 /* Bar 0: type 1 extended configuration space */
729 fpci_bar
= 0xfe100000;
730 size
= resource_size(pcie
->cs
);
731 axi_address
= pcie
->cs
->start
;
732 afi_writel(pcie
, axi_address
, AFI_AXI_BAR0_START
);
733 afi_writel(pcie
, size
>> 12, AFI_AXI_BAR0_SZ
);
734 afi_writel(pcie
, fpci_bar
, AFI_FPCI_BAR0
);
736 /* Bar 1: downstream IO bar */
737 fpci_bar
= 0xfdfc0000;
738 size
= resource_size(&pcie
->io
);
739 axi_address
= pcie
->io
.start
;
740 afi_writel(pcie
, axi_address
, AFI_AXI_BAR1_START
);
741 afi_writel(pcie
, size
>> 12, AFI_AXI_BAR1_SZ
);
742 afi_writel(pcie
, fpci_bar
, AFI_FPCI_BAR1
);
744 /* Bar 2: prefetchable memory BAR */
745 fpci_bar
= (((pcie
->prefetch
.start
>> 12) & 0x0fffffff) << 4) | 0x1;
746 size
= resource_size(&pcie
->prefetch
);
747 axi_address
= pcie
->prefetch
.start
;
748 afi_writel(pcie
, axi_address
, AFI_AXI_BAR2_START
);
749 afi_writel(pcie
, size
>> 12, AFI_AXI_BAR2_SZ
);
750 afi_writel(pcie
, fpci_bar
, AFI_FPCI_BAR2
);
752 /* Bar 3: non prefetchable memory BAR */
753 fpci_bar
= (((pcie
->mem
.start
>> 12) & 0x0fffffff) << 4) | 0x1;
754 size
= resource_size(&pcie
->mem
);
755 axi_address
= pcie
->mem
.start
;
756 afi_writel(pcie
, axi_address
, AFI_AXI_BAR3_START
);
757 afi_writel(pcie
, size
>> 12, AFI_AXI_BAR3_SZ
);
758 afi_writel(pcie
, fpci_bar
, AFI_FPCI_BAR3
);
760 /* NULL out the remaining BARs as they are not used */
761 afi_writel(pcie
, 0, AFI_AXI_BAR4_START
);
762 afi_writel(pcie
, 0, AFI_AXI_BAR4_SZ
);
763 afi_writel(pcie
, 0, AFI_FPCI_BAR4
);
765 afi_writel(pcie
, 0, AFI_AXI_BAR5_START
);
766 afi_writel(pcie
, 0, AFI_AXI_BAR5_SZ
);
767 afi_writel(pcie
, 0, AFI_FPCI_BAR5
);
769 /* map all upstream transactions as uncached */
770 afi_writel(pcie
, 0, AFI_CACHE_BAR0_ST
);
771 afi_writel(pcie
, 0, AFI_CACHE_BAR0_SZ
);
772 afi_writel(pcie
, 0, AFI_CACHE_BAR1_ST
);
773 afi_writel(pcie
, 0, AFI_CACHE_BAR1_SZ
);
775 /* MSI translations are setup only when needed */
776 afi_writel(pcie
, 0, AFI_MSI_FPCI_BAR_ST
);
777 afi_writel(pcie
, 0, AFI_MSI_BAR_SZ
);
778 afi_writel(pcie
, 0, AFI_MSI_AXI_BAR_ST
);
779 afi_writel(pcie
, 0, AFI_MSI_BAR_SZ
);
782 static int tegra_pcie_pll_wait(struct tegra_pcie
*pcie
, unsigned long timeout
)
784 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
787 timeout
= jiffies
+ msecs_to_jiffies(timeout
);
789 while (time_before(jiffies
, timeout
)) {
790 value
= pads_readl(pcie
, soc
->pads_pll_ctl
);
791 if (value
& PADS_PLL_CTL_LOCKDET
)
798 static int tegra_pcie_phy_enable(struct tegra_pcie
*pcie
)
800 struct device
*dev
= pcie
->dev
;
801 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
805 /* initialize internal PHY, enable up to 16 PCIE lanes */
806 pads_writel(pcie
, 0x0, PADS_CTL_SEL
);
808 /* override IDDQ to 1 on all 4 lanes */
809 value
= pads_readl(pcie
, PADS_CTL
);
810 value
|= PADS_CTL_IDDQ_1L
;
811 pads_writel(pcie
, value
, PADS_CTL
);
814 * Set up PHY PLL inputs select PLLE output as refclock,
815 * set TX ref sel to div10 (not div5).
817 value
= pads_readl(pcie
, soc
->pads_pll_ctl
);
818 value
&= ~(PADS_PLL_CTL_REFCLK_MASK
| PADS_PLL_CTL_TXCLKREF_MASK
);
819 value
|= PADS_PLL_CTL_REFCLK_INTERNAL_CML
| soc
->tx_ref_sel
;
820 pads_writel(pcie
, value
, soc
->pads_pll_ctl
);
823 value
= pads_readl(pcie
, soc
->pads_pll_ctl
);
824 value
&= ~PADS_PLL_CTL_RST_B4SM
;
825 pads_writel(pcie
, value
, soc
->pads_pll_ctl
);
827 usleep_range(20, 100);
829 /* take PLL out of reset */
830 value
= pads_readl(pcie
, soc
->pads_pll_ctl
);
831 value
|= PADS_PLL_CTL_RST_B4SM
;
832 pads_writel(pcie
, value
, soc
->pads_pll_ctl
);
834 /* wait for the PLL to lock */
835 err
= tegra_pcie_pll_wait(pcie
, 500);
837 dev_err(dev
, "PLL failed to lock: %d\n", err
);
841 /* turn off IDDQ override */
842 value
= pads_readl(pcie
, PADS_CTL
);
843 value
&= ~PADS_CTL_IDDQ_1L
;
844 pads_writel(pcie
, value
, PADS_CTL
);
846 /* enable TX/RX data */
847 value
= pads_readl(pcie
, PADS_CTL
);
848 value
|= PADS_CTL_TX_DATA_EN_1L
| PADS_CTL_RX_DATA_EN_1L
;
849 pads_writel(pcie
, value
, PADS_CTL
);
854 static int tegra_pcie_phy_disable(struct tegra_pcie
*pcie
)
856 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
859 /* disable TX/RX data */
860 value
= pads_readl(pcie
, PADS_CTL
);
861 value
&= ~(PADS_CTL_TX_DATA_EN_1L
| PADS_CTL_RX_DATA_EN_1L
);
862 pads_writel(pcie
, value
, PADS_CTL
);
865 value
= pads_readl(pcie
, PADS_CTL
);
866 value
|= PADS_CTL_IDDQ_1L
;
867 pads_writel(pcie
, value
, PADS_CTL
);
870 value
= pads_readl(pcie
, soc
->pads_pll_ctl
);
871 value
&= ~PADS_PLL_CTL_RST_B4SM
;
872 pads_writel(pcie
, value
, soc
->pads_pll_ctl
);
874 usleep_range(20, 100);
879 static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port
*port
)
881 struct device
*dev
= port
->pcie
->dev
;
885 for (i
= 0; i
< port
->lanes
; i
++) {
886 err
= phy_power_on(port
->phys
[i
]);
888 dev_err(dev
, "failed to power on PHY#%u: %d\n", i
, err
);
896 static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port
*port
)
898 struct device
*dev
= port
->pcie
->dev
;
902 for (i
= 0; i
< port
->lanes
; i
++) {
903 err
= phy_power_off(port
->phys
[i
]);
905 dev_err(dev
, "failed to power off PHY#%u: %d\n", i
,
914 static int tegra_pcie_phy_power_on(struct tegra_pcie
*pcie
)
916 struct device
*dev
= pcie
->dev
;
917 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
918 struct tegra_pcie_port
*port
;
921 if (pcie
->legacy_phy
) {
923 err
= phy_power_on(pcie
->phy
);
925 err
= tegra_pcie_phy_enable(pcie
);
928 dev_err(dev
, "failed to power on PHY: %d\n", err
);
933 list_for_each_entry(port
, &pcie
->ports
, list
) {
934 err
= tegra_pcie_port_phy_power_on(port
);
937 "failed to power on PCIe port %u PHY: %d\n",
943 /* Configure the reference clock driver */
944 pads_writel(pcie
, soc
->pads_refclk_cfg0
, PADS_REFCLK_CFG0
);
946 if (soc
->num_ports
> 2)
947 pads_writel(pcie
, soc
->pads_refclk_cfg1
, PADS_REFCLK_CFG1
);
952 static int tegra_pcie_phy_power_off(struct tegra_pcie
*pcie
)
954 struct device
*dev
= pcie
->dev
;
955 struct tegra_pcie_port
*port
;
958 if (pcie
->legacy_phy
) {
960 err
= phy_power_off(pcie
->phy
);
962 err
= tegra_pcie_phy_disable(pcie
);
965 dev_err(dev
, "failed to power off PHY: %d\n", err
);
970 list_for_each_entry(port
, &pcie
->ports
, list
) {
971 err
= tegra_pcie_port_phy_power_off(port
);
974 "failed to power off PCIe port %u PHY: %d\n",
983 static int tegra_pcie_enable_controller(struct tegra_pcie
*pcie
)
985 struct device
*dev
= pcie
->dev
;
986 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
987 struct tegra_pcie_port
*port
;
991 /* enable PLL power down */
993 value
= afi_readl(pcie
, AFI_PLLE_CONTROL
);
994 value
&= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL
;
995 value
|= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN
;
996 afi_writel(pcie
, value
, AFI_PLLE_CONTROL
);
999 /* power down PCIe slot clock bias pad */
1000 if (soc
->has_pex_bias_ctrl
)
1001 afi_writel(pcie
, 0, AFI_PEXBIAS_CTRL_0
);
1003 /* configure mode and disable all ports */
1004 value
= afi_readl(pcie
, AFI_PCIE_CONFIG
);
1005 value
&= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK
;
1006 value
|= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL
| pcie
->xbar_config
;
1008 list_for_each_entry(port
, &pcie
->ports
, list
)
1009 value
&= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port
->index
);
1011 afi_writel(pcie
, value
, AFI_PCIE_CONFIG
);
1013 if (soc
->has_gen2
) {
1014 value
= afi_readl(pcie
, AFI_FUSE
);
1015 value
&= ~AFI_FUSE_PCIE_T0_GEN2_DIS
;
1016 afi_writel(pcie
, value
, AFI_FUSE
);
1018 value
= afi_readl(pcie
, AFI_FUSE
);
1019 value
|= AFI_FUSE_PCIE_T0_GEN2_DIS
;
1020 afi_writel(pcie
, value
, AFI_FUSE
);
1023 err
= tegra_pcie_phy_power_on(pcie
);
1025 dev_err(dev
, "failed to power on PHY(s): %d\n", err
);
1029 /* take the PCIe interface module out of reset */
1030 reset_control_deassert(pcie
->pcie_xrst
);
1032 /* finally enable PCIe */
1033 value
= afi_readl(pcie
, AFI_CONFIGURATION
);
1034 value
|= AFI_CONFIGURATION_EN_FPCI
;
1035 afi_writel(pcie
, value
, AFI_CONFIGURATION
);
1037 value
= AFI_INTR_EN_INI_SLVERR
| AFI_INTR_EN_INI_DECERR
|
1038 AFI_INTR_EN_TGT_SLVERR
| AFI_INTR_EN_TGT_DECERR
|
1039 AFI_INTR_EN_TGT_WRERR
| AFI_INTR_EN_DFPCI_DECERR
;
1041 if (soc
->has_intr_prsnt_sense
)
1042 value
|= AFI_INTR_EN_PRSNT_SENSE
;
1044 afi_writel(pcie
, value
, AFI_AFI_INTR_ENABLE
);
1045 afi_writel(pcie
, 0xffffffff, AFI_SM_INTR_ENABLE
);
1047 /* don't enable MSI for now, only when needed */
1048 afi_writel(pcie
, AFI_INTR_MASK_INT_MASK
, AFI_INTR_MASK
);
1050 /* disable all exceptions */
1051 afi_writel(pcie
, 0, AFI_FPCI_ERROR_MASKS
);
1056 static void tegra_pcie_power_off(struct tegra_pcie
*pcie
)
1058 struct device
*dev
= pcie
->dev
;
1061 /* TODO: disable and unprepare clocks? */
1063 err
= tegra_pcie_phy_power_off(pcie
);
1065 dev_err(dev
, "failed to power off PHY(s): %d\n", err
);
1067 reset_control_assert(pcie
->pcie_xrst
);
1068 reset_control_assert(pcie
->afi_rst
);
1069 reset_control_assert(pcie
->pex_rst
);
1071 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE
);
1073 err
= regulator_bulk_disable(pcie
->num_supplies
, pcie
->supplies
);
1075 dev_warn(dev
, "failed to disable regulators: %d\n", err
);
1078 static int tegra_pcie_power_on(struct tegra_pcie
*pcie
)
1080 struct device
*dev
= pcie
->dev
;
1081 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
1084 reset_control_assert(pcie
->pcie_xrst
);
1085 reset_control_assert(pcie
->afi_rst
);
1086 reset_control_assert(pcie
->pex_rst
);
1088 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE
);
1090 /* enable regulators */
1091 err
= regulator_bulk_enable(pcie
->num_supplies
, pcie
->supplies
);
1093 dev_err(dev
, "failed to enable regulators: %d\n", err
);
1095 err
= tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE
,
1099 dev_err(dev
, "powerup sequence failed: %d\n", err
);
1103 reset_control_deassert(pcie
->afi_rst
);
1105 err
= clk_prepare_enable(pcie
->afi_clk
);
1107 dev_err(dev
, "failed to enable AFI clock: %d\n", err
);
1111 if (soc
->has_cml_clk
) {
1112 err
= clk_prepare_enable(pcie
->cml_clk
);
1114 dev_err(dev
, "failed to enable CML clock: %d\n", err
);
1119 err
= clk_prepare_enable(pcie
->pll_e
);
1121 dev_err(dev
, "failed to enable PLLE clock: %d\n", err
);
1128 static int tegra_pcie_clocks_get(struct tegra_pcie
*pcie
)
1130 struct device
*dev
= pcie
->dev
;
1131 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
1133 pcie
->pex_clk
= devm_clk_get(dev
, "pex");
1134 if (IS_ERR(pcie
->pex_clk
))
1135 return PTR_ERR(pcie
->pex_clk
);
1137 pcie
->afi_clk
= devm_clk_get(dev
, "afi");
1138 if (IS_ERR(pcie
->afi_clk
))
1139 return PTR_ERR(pcie
->afi_clk
);
1141 pcie
->pll_e
= devm_clk_get(dev
, "pll_e");
1142 if (IS_ERR(pcie
->pll_e
))
1143 return PTR_ERR(pcie
->pll_e
);
1145 if (soc
->has_cml_clk
) {
1146 pcie
->cml_clk
= devm_clk_get(dev
, "cml");
1147 if (IS_ERR(pcie
->cml_clk
))
1148 return PTR_ERR(pcie
->cml_clk
);
1154 static int tegra_pcie_resets_get(struct tegra_pcie
*pcie
)
1156 struct device
*dev
= pcie
->dev
;
1158 pcie
->pex_rst
= devm_reset_control_get(dev
, "pex");
1159 if (IS_ERR(pcie
->pex_rst
))
1160 return PTR_ERR(pcie
->pex_rst
);
1162 pcie
->afi_rst
= devm_reset_control_get(dev
, "afi");
1163 if (IS_ERR(pcie
->afi_rst
))
1164 return PTR_ERR(pcie
->afi_rst
);
1166 pcie
->pcie_xrst
= devm_reset_control_get(dev
, "pcie_x");
1167 if (IS_ERR(pcie
->pcie_xrst
))
1168 return PTR_ERR(pcie
->pcie_xrst
);
1173 static int tegra_pcie_phys_get_legacy(struct tegra_pcie
*pcie
)
1175 struct device
*dev
= pcie
->dev
;
1178 pcie
->phy
= devm_phy_optional_get(dev
, "pcie");
1179 if (IS_ERR(pcie
->phy
)) {
1180 err
= PTR_ERR(pcie
->phy
);
1181 dev_err(dev
, "failed to get PHY: %d\n", err
);
1185 err
= phy_init(pcie
->phy
);
1187 dev_err(dev
, "failed to initialize PHY: %d\n", err
);
1191 pcie
->legacy_phy
= true;
1196 static struct phy
*devm_of_phy_optional_get_index(struct device
*dev
,
1197 struct device_node
*np
,
1198 const char *consumer
,
1204 name
= kasprintf(GFP_KERNEL
, "%s-%u", consumer
, index
);
1206 return ERR_PTR(-ENOMEM
);
1208 phy
= devm_of_phy_get(dev
, np
, name
);
1211 if (IS_ERR(phy
) && PTR_ERR(phy
) == -ENODEV
)
1217 static int tegra_pcie_port_get_phys(struct tegra_pcie_port
*port
)
1219 struct device
*dev
= port
->pcie
->dev
;
1224 port
->phys
= devm_kcalloc(dev
, sizeof(phy
), port
->lanes
, GFP_KERNEL
);
1228 for (i
= 0; i
< port
->lanes
; i
++) {
1229 phy
= devm_of_phy_optional_get_index(dev
, port
->np
, "pcie", i
);
1231 dev_err(dev
, "failed to get PHY#%u: %ld\n", i
,
1233 return PTR_ERR(phy
);
1236 err
= phy_init(phy
);
1238 dev_err(dev
, "failed to initialize PHY#%u: %d\n", i
,
1243 port
->phys
[i
] = phy
;
1249 static int tegra_pcie_phys_get(struct tegra_pcie
*pcie
)
1251 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
1252 struct device_node
*np
= pcie
->dev
->of_node
;
1253 struct tegra_pcie_port
*port
;
1256 if (!soc
->has_gen2
|| of_find_property(np
, "phys", NULL
) != NULL
)
1257 return tegra_pcie_phys_get_legacy(pcie
);
1259 list_for_each_entry(port
, &pcie
->ports
, list
) {
1260 err
= tegra_pcie_port_get_phys(port
);
1268 static int tegra_pcie_get_resources(struct tegra_pcie
*pcie
)
1270 struct device
*dev
= pcie
->dev
;
1271 struct platform_device
*pdev
= to_platform_device(dev
);
1272 struct resource
*pads
, *afi
, *res
;
1275 err
= tegra_pcie_clocks_get(pcie
);
1277 dev_err(dev
, "failed to get clocks: %d\n", err
);
1281 err
= tegra_pcie_resets_get(pcie
);
1283 dev_err(dev
, "failed to get resets: %d\n", err
);
1287 err
= tegra_pcie_phys_get(pcie
);
1289 dev_err(dev
, "failed to get PHYs: %d\n", err
);
1293 err
= tegra_pcie_power_on(pcie
);
1295 dev_err(dev
, "failed to power up: %d\n", err
);
1299 pads
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "pads");
1300 pcie
->pads
= devm_ioremap_resource(dev
, pads
);
1301 if (IS_ERR(pcie
->pads
)) {
1302 err
= PTR_ERR(pcie
->pads
);
1306 afi
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "afi");
1307 pcie
->afi
= devm_ioremap_resource(dev
, afi
);
1308 if (IS_ERR(pcie
->afi
)) {
1309 err
= PTR_ERR(pcie
->afi
);
1313 /* request configuration space, but remap later, on demand */
1314 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "cs");
1316 err
= -EADDRNOTAVAIL
;
1320 pcie
->cs
= devm_request_mem_region(dev
, res
->start
,
1321 resource_size(res
), res
->name
);
1323 err
= -EADDRNOTAVAIL
;
1327 /* request interrupt */
1328 err
= platform_get_irq_byname(pdev
, "intr");
1330 dev_err(dev
, "failed to get IRQ: %d\n", err
);
1336 err
= request_irq(pcie
->irq
, tegra_pcie_isr
, IRQF_SHARED
, "PCIE", pcie
);
1338 dev_err(dev
, "failed to register IRQ: %d\n", err
);
1345 tegra_pcie_power_off(pcie
);
1349 static int tegra_pcie_put_resources(struct tegra_pcie
*pcie
)
1351 struct device
*dev
= pcie
->dev
;
1355 free_irq(pcie
->irq
, pcie
);
1357 tegra_pcie_power_off(pcie
);
1359 err
= phy_exit(pcie
->phy
);
1361 dev_err(dev
, "failed to teardown PHY: %d\n", err
);
1366 static int tegra_msi_alloc(struct tegra_msi
*chip
)
1370 mutex_lock(&chip
->lock
);
1372 msi
= find_first_zero_bit(chip
->used
, INT_PCI_MSI_NR
);
1373 if (msi
< INT_PCI_MSI_NR
)
1374 set_bit(msi
, chip
->used
);
1378 mutex_unlock(&chip
->lock
);
1383 static void tegra_msi_free(struct tegra_msi
*chip
, unsigned long irq
)
1385 struct device
*dev
= chip
->chip
.dev
;
1387 mutex_lock(&chip
->lock
);
1389 if (!test_bit(irq
, chip
->used
))
1390 dev_err(dev
, "trying to free unused MSI#%lu\n", irq
);
1392 clear_bit(irq
, chip
->used
);
1394 mutex_unlock(&chip
->lock
);
1397 static irqreturn_t
tegra_pcie_msi_irq(int irq
, void *data
)
1399 struct tegra_pcie
*pcie
= data
;
1400 struct device
*dev
= pcie
->dev
;
1401 struct tegra_msi
*msi
= &pcie
->msi
;
1402 unsigned int i
, processed
= 0;
1404 for (i
= 0; i
< 8; i
++) {
1405 unsigned long reg
= afi_readl(pcie
, AFI_MSI_VEC0
+ i
* 4);
1408 unsigned int offset
= find_first_bit(®
, 32);
1409 unsigned int index
= i
* 32 + offset
;
1412 /* clear the interrupt */
1413 afi_writel(pcie
, 1 << offset
, AFI_MSI_VEC0
+ i
* 4);
1415 irq
= irq_find_mapping(msi
->domain
, index
);
1417 if (test_bit(index
, msi
->used
))
1418 generic_handle_irq(irq
);
1420 dev_info(dev
, "unhandled MSI\n");
1423 * that's weird who triggered this?
1426 dev_info(dev
, "unexpected MSI\n");
1429 /* see if there's any more pending in this vector */
1430 reg
= afi_readl(pcie
, AFI_MSI_VEC0
+ i
* 4);
1436 return processed
> 0 ? IRQ_HANDLED
: IRQ_NONE
;
1439 static int tegra_msi_setup_irq(struct msi_controller
*chip
,
1440 struct pci_dev
*pdev
, struct msi_desc
*desc
)
1442 struct tegra_msi
*msi
= to_tegra_msi(chip
);
1447 hwirq
= tegra_msi_alloc(msi
);
1451 irq
= irq_create_mapping(msi
->domain
, hwirq
);
1453 tegra_msi_free(msi
, hwirq
);
1457 irq_set_msi_desc(irq
, desc
);
1459 msg
.address_lo
= virt_to_phys((void *)msi
->pages
);
1460 /* 32 bit address only */
1464 pci_write_msi_msg(irq
, &msg
);
1469 static void tegra_msi_teardown_irq(struct msi_controller
*chip
,
1472 struct tegra_msi
*msi
= to_tegra_msi(chip
);
1473 struct irq_data
*d
= irq_get_irq_data(irq
);
1474 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
1476 irq_dispose_mapping(irq
);
1477 tegra_msi_free(msi
, hwirq
);
1480 static struct irq_chip tegra_msi_irq_chip
= {
1481 .name
= "Tegra PCIe MSI",
1482 .irq_enable
= pci_msi_unmask_irq
,
1483 .irq_disable
= pci_msi_mask_irq
,
1484 .irq_mask
= pci_msi_mask_irq
,
1485 .irq_unmask
= pci_msi_unmask_irq
,
1488 static int tegra_msi_map(struct irq_domain
*domain
, unsigned int irq
,
1489 irq_hw_number_t hwirq
)
1491 irq_set_chip_and_handler(irq
, &tegra_msi_irq_chip
, handle_simple_irq
);
1492 irq_set_chip_data(irq
, domain
->host_data
);
1494 tegra_cpuidle_pcie_irqs_in_use();
1499 static const struct irq_domain_ops msi_domain_ops
= {
1500 .map
= tegra_msi_map
,
1503 static int tegra_pcie_enable_msi(struct tegra_pcie
*pcie
)
1505 struct device
*dev
= pcie
->dev
;
1506 struct platform_device
*pdev
= to_platform_device(dev
);
1507 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
1508 struct tegra_msi
*msi
= &pcie
->msi
;
1513 mutex_init(&msi
->lock
);
1515 msi
->chip
.dev
= dev
;
1516 msi
->chip
.setup_irq
= tegra_msi_setup_irq
;
1517 msi
->chip
.teardown_irq
= tegra_msi_teardown_irq
;
1519 msi
->domain
= irq_domain_add_linear(dev
->of_node
, INT_PCI_MSI_NR
,
1520 &msi_domain_ops
, &msi
->chip
);
1522 dev_err(dev
, "failed to create IRQ domain\n");
1526 err
= platform_get_irq_byname(pdev
, "msi");
1528 dev_err(dev
, "failed to get IRQ: %d\n", err
);
1534 err
= request_irq(msi
->irq
, tegra_pcie_msi_irq
, IRQF_NO_THREAD
,
1535 tegra_msi_irq_chip
.name
, pcie
);
1537 dev_err(dev
, "failed to request IRQ: %d\n", err
);
1541 /* setup AFI/FPCI range */
1542 msi
->pages
= __get_free_pages(GFP_KERNEL
, 0);
1543 base
= virt_to_phys((void *)msi
->pages
);
1545 afi_writel(pcie
, base
>> soc
->msi_base_shift
, AFI_MSI_FPCI_BAR_ST
);
1546 afi_writel(pcie
, base
, AFI_MSI_AXI_BAR_ST
);
1547 /* this register is in 4K increments */
1548 afi_writel(pcie
, 1, AFI_MSI_BAR_SZ
);
1550 /* enable all MSI vectors */
1551 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC0
);
1552 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC1
);
1553 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC2
);
1554 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC3
);
1555 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC4
);
1556 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC5
);
1557 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC6
);
1558 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC7
);
1560 /* and unmask the MSI interrupt */
1561 reg
= afi_readl(pcie
, AFI_INTR_MASK
);
1562 reg
|= AFI_INTR_MASK_MSI_MASK
;
1563 afi_writel(pcie
, reg
, AFI_INTR_MASK
);
1568 irq_domain_remove(msi
->domain
);
1572 static int tegra_pcie_disable_msi(struct tegra_pcie
*pcie
)
1574 struct tegra_msi
*msi
= &pcie
->msi
;
1575 unsigned int i
, irq
;
1578 /* mask the MSI interrupt */
1579 value
= afi_readl(pcie
, AFI_INTR_MASK
);
1580 value
&= ~AFI_INTR_MASK_MSI_MASK
;
1581 afi_writel(pcie
, value
, AFI_INTR_MASK
);
1583 /* disable all MSI vectors */
1584 afi_writel(pcie
, 0, AFI_MSI_EN_VEC0
);
1585 afi_writel(pcie
, 0, AFI_MSI_EN_VEC1
);
1586 afi_writel(pcie
, 0, AFI_MSI_EN_VEC2
);
1587 afi_writel(pcie
, 0, AFI_MSI_EN_VEC3
);
1588 afi_writel(pcie
, 0, AFI_MSI_EN_VEC4
);
1589 afi_writel(pcie
, 0, AFI_MSI_EN_VEC5
);
1590 afi_writel(pcie
, 0, AFI_MSI_EN_VEC6
);
1591 afi_writel(pcie
, 0, AFI_MSI_EN_VEC7
);
1593 free_pages(msi
->pages
, 0);
1596 free_irq(msi
->irq
, pcie
);
1598 for (i
= 0; i
< INT_PCI_MSI_NR
; i
++) {
1599 irq
= irq_find_mapping(msi
->domain
, i
);
1601 irq_dispose_mapping(irq
);
1604 irq_domain_remove(msi
->domain
);
1609 static int tegra_pcie_get_xbar_config(struct tegra_pcie
*pcie
, u32 lanes
,
1612 struct device
*dev
= pcie
->dev
;
1613 struct device_node
*np
= dev
->of_node
;
1615 if (of_device_is_compatible(np
, "nvidia,tegra124-pcie")) {
1618 dev_info(dev
, "4x1, 1x1 configuration\n");
1619 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1
;
1623 dev_info(dev
, "2x1, 1x1 configuration\n");
1624 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1
;
1627 } else if (of_device_is_compatible(np
, "nvidia,tegra30-pcie")) {
1630 dev_info(dev
, "4x1, 2x1 configuration\n");
1631 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420
;
1635 dev_info(dev
, "2x3 configuration\n");
1636 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222
;
1640 dev_info(dev
, "4x1, 1x2 configuration\n");
1641 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411
;
1644 } else if (of_device_is_compatible(np
, "nvidia,tegra20-pcie")) {
1647 dev_info(dev
, "single-mode configuration\n");
1648 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE
;
1652 dev_info(dev
, "dual-mode configuration\n");
1653 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL
;
1662 * Check whether a given set of supplies is available in a device tree node.
1663 * This is used to check whether the new or the legacy device tree bindings
1666 static bool of_regulator_bulk_available(struct device_node
*np
,
1667 struct regulator_bulk_data
*supplies
,
1668 unsigned int num_supplies
)
1673 for (i
= 0; i
< num_supplies
; i
++) {
1674 snprintf(property
, 32, "%s-supply", supplies
[i
].supply
);
1676 if (of_find_property(np
, property
, NULL
) == NULL
)
1684 * Old versions of the device tree binding for this device used a set of power
1685 * supplies that didn't match the hardware inputs. This happened to work for a
1686 * number of cases but is not future proof. However to preserve backwards-
1687 * compatibility with old device trees, this function will try to use the old
1690 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie
*pcie
)
1692 struct device
*dev
= pcie
->dev
;
1693 struct device_node
*np
= dev
->of_node
;
1695 if (of_device_is_compatible(np
, "nvidia,tegra30-pcie"))
1696 pcie
->num_supplies
= 3;
1697 else if (of_device_is_compatible(np
, "nvidia,tegra20-pcie"))
1698 pcie
->num_supplies
= 2;
1700 if (pcie
->num_supplies
== 0) {
1701 dev_err(dev
, "device %s not supported in legacy mode\n",
1706 pcie
->supplies
= devm_kcalloc(dev
, pcie
->num_supplies
,
1707 sizeof(*pcie
->supplies
),
1709 if (!pcie
->supplies
)
1712 pcie
->supplies
[0].supply
= "pex-clk";
1713 pcie
->supplies
[1].supply
= "vdd";
1715 if (pcie
->num_supplies
> 2)
1716 pcie
->supplies
[2].supply
= "avdd";
1718 return devm_regulator_bulk_get(dev
, pcie
->num_supplies
, pcie
->supplies
);
1722 * Obtains the list of regulators required for a particular generation of the
1725 * This would've been nice to do simply by providing static tables for use
1726 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1727 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1728 * and either seems to be optional depending on which ports are being used.
1730 static int tegra_pcie_get_regulators(struct tegra_pcie
*pcie
, u32 lane_mask
)
1732 struct device
*dev
= pcie
->dev
;
1733 struct device_node
*np
= dev
->of_node
;
1736 if (of_device_is_compatible(np
, "nvidia,tegra124-pcie")) {
1737 pcie
->num_supplies
= 7;
1739 pcie
->supplies
= devm_kcalloc(dev
, pcie
->num_supplies
,
1740 sizeof(*pcie
->supplies
),
1742 if (!pcie
->supplies
)
1745 pcie
->supplies
[i
++].supply
= "avddio-pex";
1746 pcie
->supplies
[i
++].supply
= "dvddio-pex";
1747 pcie
->supplies
[i
++].supply
= "avdd-pex-pll";
1748 pcie
->supplies
[i
++].supply
= "hvdd-pex";
1749 pcie
->supplies
[i
++].supply
= "hvdd-pex-pll-e";
1750 pcie
->supplies
[i
++].supply
= "vddio-pex-ctl";
1751 pcie
->supplies
[i
++].supply
= "avdd-pll-erefe";
1752 } else if (of_device_is_compatible(np
, "nvidia,tegra30-pcie")) {
1753 bool need_pexa
= false, need_pexb
= false;
1755 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
1756 if (lane_mask
& 0x0f)
1759 /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
1760 if (lane_mask
& 0x30)
1763 pcie
->num_supplies
= 4 + (need_pexa
? 2 : 0) +
1764 (need_pexb
? 2 : 0);
1766 pcie
->supplies
= devm_kcalloc(dev
, pcie
->num_supplies
,
1767 sizeof(*pcie
->supplies
),
1769 if (!pcie
->supplies
)
1772 pcie
->supplies
[i
++].supply
= "avdd-pex-pll";
1773 pcie
->supplies
[i
++].supply
= "hvdd-pex";
1774 pcie
->supplies
[i
++].supply
= "vddio-pex-ctl";
1775 pcie
->supplies
[i
++].supply
= "avdd-plle";
1778 pcie
->supplies
[i
++].supply
= "avdd-pexa";
1779 pcie
->supplies
[i
++].supply
= "vdd-pexa";
1783 pcie
->supplies
[i
++].supply
= "avdd-pexb";
1784 pcie
->supplies
[i
++].supply
= "vdd-pexb";
1786 } else if (of_device_is_compatible(np
, "nvidia,tegra20-pcie")) {
1787 pcie
->num_supplies
= 5;
1789 pcie
->supplies
= devm_kcalloc(dev
, pcie
->num_supplies
,
1790 sizeof(*pcie
->supplies
),
1792 if (!pcie
->supplies
)
1795 pcie
->supplies
[0].supply
= "avdd-pex";
1796 pcie
->supplies
[1].supply
= "vdd-pex";
1797 pcie
->supplies
[2].supply
= "avdd-pex-pll";
1798 pcie
->supplies
[3].supply
= "avdd-plle";
1799 pcie
->supplies
[4].supply
= "vddio-pex-clk";
1802 if (of_regulator_bulk_available(dev
->of_node
, pcie
->supplies
,
1803 pcie
->num_supplies
))
1804 return devm_regulator_bulk_get(dev
, pcie
->num_supplies
,
1808 * If not all regulators are available for this new scheme, assume
1809 * that the device tree complies with an older version of the device
1812 dev_info(dev
, "using legacy DT binding for power supplies\n");
1814 devm_kfree(dev
, pcie
->supplies
);
1815 pcie
->num_supplies
= 0;
1817 return tegra_pcie_get_legacy_regulators(pcie
);
1820 static int tegra_pcie_parse_dt(struct tegra_pcie
*pcie
)
1822 struct device
*dev
= pcie
->dev
;
1823 struct device_node
*np
= dev
->of_node
, *port
;
1824 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
1825 struct of_pci_range_parser parser
;
1826 struct of_pci_range range
;
1827 u32 lanes
= 0, mask
= 0;
1828 unsigned int lane
= 0;
1829 struct resource res
;
1832 if (of_pci_range_parser_init(&parser
, np
)) {
1833 dev_err(dev
, "missing \"ranges\" property\n");
1837 for_each_of_pci_range(&parser
, &range
) {
1838 err
= of_pci_range_to_resource(&range
, np
, &res
);
1842 switch (res
.flags
& IORESOURCE_TYPE_BITS
) {
1844 /* Track the bus -> CPU I/O mapping offset. */
1845 pcie
->offset
.io
= res
.start
- range
.pci_addr
;
1847 memcpy(&pcie
->pio
, &res
, sizeof(res
));
1848 pcie
->pio
.name
= np
->full_name
;
1851 * The Tegra PCIe host bridge uses this to program the
1852 * mapping of the I/O space to the physical address,
1853 * so we override the .start and .end fields here that
1854 * of_pci_range_to_resource() converted to I/O space.
1855 * We also set the IORESOURCE_MEM type to clarify that
1856 * the resource is in the physical memory space.
1858 pcie
->io
.start
= range
.cpu_addr
;
1859 pcie
->io
.end
= range
.cpu_addr
+ range
.size
- 1;
1860 pcie
->io
.flags
= IORESOURCE_MEM
;
1861 pcie
->io
.name
= "I/O";
1863 memcpy(&res
, &pcie
->io
, sizeof(res
));
1866 case IORESOURCE_MEM
:
1868 * Track the bus -> CPU memory mapping offset. This
1869 * assumes that the prefetchable and non-prefetchable
1870 * regions will be the last of type IORESOURCE_MEM in
1871 * the ranges property.
1873 pcie
->offset
.mem
= res
.start
- range
.pci_addr
;
1875 if (res
.flags
& IORESOURCE_PREFETCH
) {
1876 memcpy(&pcie
->prefetch
, &res
, sizeof(res
));
1877 pcie
->prefetch
.name
= "prefetchable";
1879 memcpy(&pcie
->mem
, &res
, sizeof(res
));
1880 pcie
->mem
.name
= "non-prefetchable";
1886 err
= of_pci_parse_bus_range(np
, &pcie
->busn
);
1888 dev_err(dev
, "failed to parse ranges property: %d\n", err
);
1889 pcie
->busn
.name
= np
->name
;
1890 pcie
->busn
.start
= 0;
1891 pcie
->busn
.end
= 0xff;
1892 pcie
->busn
.flags
= IORESOURCE_BUS
;
1895 /* parse root ports */
1896 for_each_child_of_node(np
, port
) {
1897 struct tegra_pcie_port
*rp
;
1901 err
= of_pci_get_devfn(port
);
1903 dev_err(dev
, "failed to parse address: %d\n", err
);
1907 index
= PCI_SLOT(err
);
1909 if (index
< 1 || index
> soc
->num_ports
) {
1910 dev_err(dev
, "invalid port number: %d\n", index
);
1917 err
= of_property_read_u32(port
, "nvidia,num-lanes", &value
);
1919 dev_err(dev
, "failed to parse # of lanes: %d\n",
1925 dev_err(dev
, "invalid # of lanes: %u\n", value
);
1930 lanes
|= value
<< (index
<< 3);
1932 if (!of_device_is_available(port
)) {
1937 mask
|= ((1 << value
) - 1) << lane
;
1940 rp
= devm_kzalloc(dev
, sizeof(*rp
), GFP_KERNEL
);
1946 err
= of_address_to_resource(port
, 0, &rp
->regs
);
1948 dev_err(dev
, "failed to parse address: %d\n", err
);
1952 INIT_LIST_HEAD(&rp
->list
);
1958 rp
->base
= devm_ioremap_resource(dev
, &rp
->regs
);
1959 if (IS_ERR(rp
->base
))
1960 return PTR_ERR(rp
->base
);
1962 list_add_tail(&rp
->list
, &pcie
->ports
);
1965 err
= tegra_pcie_get_xbar_config(pcie
, lanes
, &pcie
->xbar_config
);
1967 dev_err(dev
, "invalid lane configuration\n");
1971 err
= tegra_pcie_get_regulators(pcie
, mask
);
1983 * FIXME: If there are no PCIe cards attached, then calling this function
1984 * can result in the increase of the bootup time as there are big timeout
1987 #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
1988 static bool tegra_pcie_port_check_link(struct tegra_pcie_port
*port
)
1990 struct device
*dev
= port
->pcie
->dev
;
1991 unsigned int retries
= 3;
1992 unsigned long value
;
1994 /* override presence detection */
1995 value
= readl(port
->base
+ RP_PRIV_MISC
);
1996 value
&= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT
;
1997 value
|= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT
;
1998 writel(value
, port
->base
+ RP_PRIV_MISC
);
2001 unsigned int timeout
= TEGRA_PCIE_LINKUP_TIMEOUT
;
2004 value
= readl(port
->base
+ RP_VEND_XP
);
2006 if (value
& RP_VEND_XP_DL_UP
)
2009 usleep_range(1000, 2000);
2010 } while (--timeout
);
2013 dev_err(dev
, "link %u down, retrying\n", port
->index
);
2017 timeout
= TEGRA_PCIE_LINKUP_TIMEOUT
;
2020 value
= readl(port
->base
+ RP_LINK_CONTROL_STATUS
);
2022 if (value
& RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE
)
2025 usleep_range(1000, 2000);
2026 } while (--timeout
);
2029 tegra_pcie_port_reset(port
);
2030 } while (--retries
);
2035 static int tegra_pcie_enable(struct tegra_pcie
*pcie
)
2037 struct device
*dev
= pcie
->dev
;
2038 struct tegra_pcie_port
*port
, *tmp
;
2041 list_for_each_entry_safe(port
, tmp
, &pcie
->ports
, list
) {
2042 dev_info(dev
, "probing port %u, using %u lanes\n",
2043 port
->index
, port
->lanes
);
2045 tegra_pcie_port_enable(port
);
2047 if (tegra_pcie_port_check_link(port
))
2050 dev_info(dev
, "link %u down, ignoring\n", port
->index
);
2052 tegra_pcie_port_disable(port
);
2053 tegra_pcie_port_free(port
);
2056 memset(&hw
, 0, sizeof(hw
));
2058 #ifdef CONFIG_PCI_MSI
2059 hw
.msi_ctrl
= &pcie
->msi
.chip
;
2062 hw
.nr_controllers
= 1;
2063 hw
.private_data
= (void **)&pcie
;
2064 hw
.setup
= tegra_pcie_setup
;
2065 hw
.map_irq
= tegra_pcie_map_irq
;
2066 hw
.ops
= &tegra_pcie_ops
;
2068 pci_common_init_dev(dev
, &hw
);
2072 static const struct tegra_pcie_soc tegra20_pcie
= {
2074 .msi_base_shift
= 0,
2075 .pads_pll_ctl
= PADS_PLL_CTL_TEGRA20
,
2076 .tx_ref_sel
= PADS_PLL_CTL_TXCLKREF_DIV10
,
2077 .pads_refclk_cfg0
= 0xfa5cfa5c,
2078 .has_pex_clkreq_en
= false,
2079 .has_pex_bias_ctrl
= false,
2080 .has_intr_prsnt_sense
= false,
2081 .has_cml_clk
= false,
2085 static const struct tegra_pcie_soc tegra30_pcie
= {
2087 .msi_base_shift
= 8,
2088 .pads_pll_ctl
= PADS_PLL_CTL_TEGRA30
,
2089 .tx_ref_sel
= PADS_PLL_CTL_TXCLKREF_BUF_EN
,
2090 .pads_refclk_cfg0
= 0xfa5cfa5c,
2091 .pads_refclk_cfg1
= 0xfa5cfa5c,
2092 .has_pex_clkreq_en
= true,
2093 .has_pex_bias_ctrl
= true,
2094 .has_intr_prsnt_sense
= true,
2095 .has_cml_clk
= true,
2099 static const struct tegra_pcie_soc tegra124_pcie
= {
2101 .msi_base_shift
= 8,
2102 .pads_pll_ctl
= PADS_PLL_CTL_TEGRA30
,
2103 .tx_ref_sel
= PADS_PLL_CTL_TXCLKREF_BUF_EN
,
2104 .pads_refclk_cfg0
= 0x44ac44ac,
2105 .has_pex_clkreq_en
= true,
2106 .has_pex_bias_ctrl
= true,
2107 .has_intr_prsnt_sense
= true,
2108 .has_cml_clk
= true,
2112 static const struct of_device_id tegra_pcie_of_match
[] = {
2113 { .compatible
= "nvidia,tegra124-pcie", .data
= &tegra124_pcie
},
2114 { .compatible
= "nvidia,tegra30-pcie", .data
= &tegra30_pcie
},
2115 { .compatible
= "nvidia,tegra20-pcie", .data
= &tegra20_pcie
},
2119 static void *tegra_pcie_ports_seq_start(struct seq_file
*s
, loff_t
*pos
)
2121 struct tegra_pcie
*pcie
= s
->private;
2123 if (list_empty(&pcie
->ports
))
2126 seq_printf(s
, "Index Status\n");
2128 return seq_list_start(&pcie
->ports
, *pos
);
2131 static void *tegra_pcie_ports_seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
2133 struct tegra_pcie
*pcie
= s
->private;
2135 return seq_list_next(v
, &pcie
->ports
, pos
);
2138 static void tegra_pcie_ports_seq_stop(struct seq_file
*s
, void *v
)
2142 static int tegra_pcie_ports_seq_show(struct seq_file
*s
, void *v
)
2144 bool up
= false, active
= false;
2145 struct tegra_pcie_port
*port
;
2148 port
= list_entry(v
, struct tegra_pcie_port
, list
);
2150 value
= readl(port
->base
+ RP_VEND_XP
);
2152 if (value
& RP_VEND_XP_DL_UP
)
2155 value
= readl(port
->base
+ RP_LINK_CONTROL_STATUS
);
2157 if (value
& RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE
)
2160 seq_printf(s
, "%2u ", port
->index
);
2163 seq_printf(s
, "up");
2167 seq_printf(s
, ", ");
2169 seq_printf(s
, "active");
2172 seq_printf(s
, "\n");
2176 static const struct seq_operations tegra_pcie_ports_seq_ops
= {
2177 .start
= tegra_pcie_ports_seq_start
,
2178 .next
= tegra_pcie_ports_seq_next
,
2179 .stop
= tegra_pcie_ports_seq_stop
,
2180 .show
= tegra_pcie_ports_seq_show
,
2183 static int tegra_pcie_ports_open(struct inode
*inode
, struct file
*file
)
2185 struct tegra_pcie
*pcie
= inode
->i_private
;
2189 err
= seq_open(file
, &tegra_pcie_ports_seq_ops
);
2193 s
= file
->private_data
;
2199 static const struct file_operations tegra_pcie_ports_ops
= {
2200 .owner
= THIS_MODULE
,
2201 .open
= tegra_pcie_ports_open
,
2203 .llseek
= seq_lseek
,
2204 .release
= seq_release
,
2207 static int tegra_pcie_debugfs_init(struct tegra_pcie
*pcie
)
2209 struct dentry
*file
;
2211 pcie
->debugfs
= debugfs_create_dir("pcie", NULL
);
2215 file
= debugfs_create_file("ports", S_IFREG
| S_IRUGO
, pcie
->debugfs
,
2216 pcie
, &tegra_pcie_ports_ops
);
2223 debugfs_remove_recursive(pcie
->debugfs
);
2224 pcie
->debugfs
= NULL
;
2228 static int tegra_pcie_probe(struct platform_device
*pdev
)
2230 struct device
*dev
= &pdev
->dev
;
2231 struct tegra_pcie
*pcie
;
2234 pcie
= devm_kzalloc(dev
, sizeof(*pcie
), GFP_KERNEL
);
2238 pcie
->soc
= of_device_get_match_data(dev
);
2239 INIT_LIST_HEAD(&pcie
->buses
);
2240 INIT_LIST_HEAD(&pcie
->ports
);
2243 err
= tegra_pcie_parse_dt(pcie
);
2247 err
= tegra_pcie_get_resources(pcie
);
2249 dev_err(dev
, "failed to request resources: %d\n", err
);
2253 err
= tegra_pcie_enable_controller(pcie
);
2257 /* setup the AFI address translations */
2258 tegra_pcie_setup_translations(pcie
);
2260 if (IS_ENABLED(CONFIG_PCI_MSI
)) {
2261 err
= tegra_pcie_enable_msi(pcie
);
2263 dev_err(dev
, "failed to enable MSI support: %d\n", err
);
2268 err
= tegra_pcie_enable(pcie
);
2270 dev_err(dev
, "failed to enable PCIe ports: %d\n", err
);
2274 if (IS_ENABLED(CONFIG_DEBUG_FS
)) {
2275 err
= tegra_pcie_debugfs_init(pcie
);
2277 dev_err(dev
, "failed to setup debugfs: %d\n", err
);
2283 if (IS_ENABLED(CONFIG_PCI_MSI
))
2284 tegra_pcie_disable_msi(pcie
);
2286 tegra_pcie_put_resources(pcie
);
2290 static struct platform_driver tegra_pcie_driver
= {
2292 .name
= "tegra-pcie",
2293 .of_match_table
= tegra_pcie_of_match
,
2294 .suppress_bind_attrs
= true,
2296 .probe
= tegra_pcie_probe
,
2298 builtin_platform_driver(tegra_pcie_driver
);