2 * PCIe host controller driver for Tegra SoCs
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
10 * Bits taken from arch/arm/mach-dove/pcie.c
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
27 #include <linux/clk.h>
28 #include <linux/delay.h>
29 #include <linux/export.h>
30 #include <linux/interrupt.h>
31 #include <linux/irq.h>
32 #include <linux/irqdomain.h>
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/msi.h>
36 #include <linux/of_address.h>
37 #include <linux/of_pci.h>
38 #include <linux/of_platform.h>
39 #include <linux/pci.h>
40 #include <linux/platform_device.h>
41 #include <linux/reset.h>
42 #include <linux/sizes.h>
43 #include <linux/slab.h>
44 #include <linux/tegra-cpuidle.h>
45 #include <linux/tegra-powergate.h>
46 #include <linux/vmalloc.h>
47 #include <linux/regulator/consumer.h>
49 #include <asm/mach/irq.h>
50 #include <asm/mach/map.h>
51 #include <asm/mach/pci.h>
53 #define INT_PCI_MSI_NR (8 * 32)
55 /* register definitions */
57 #define AFI_AXI_BAR0_SZ 0x00
58 #define AFI_AXI_BAR1_SZ 0x04
59 #define AFI_AXI_BAR2_SZ 0x08
60 #define AFI_AXI_BAR3_SZ 0x0c
61 #define AFI_AXI_BAR4_SZ 0x10
62 #define AFI_AXI_BAR5_SZ 0x14
64 #define AFI_AXI_BAR0_START 0x18
65 #define AFI_AXI_BAR1_START 0x1c
66 #define AFI_AXI_BAR2_START 0x20
67 #define AFI_AXI_BAR3_START 0x24
68 #define AFI_AXI_BAR4_START 0x28
69 #define AFI_AXI_BAR5_START 0x2c
71 #define AFI_FPCI_BAR0 0x30
72 #define AFI_FPCI_BAR1 0x34
73 #define AFI_FPCI_BAR2 0x38
74 #define AFI_FPCI_BAR3 0x3c
75 #define AFI_FPCI_BAR4 0x40
76 #define AFI_FPCI_BAR5 0x44
78 #define AFI_CACHE_BAR0_SZ 0x48
79 #define AFI_CACHE_BAR0_ST 0x4c
80 #define AFI_CACHE_BAR1_SZ 0x50
81 #define AFI_CACHE_BAR1_ST 0x54
83 #define AFI_MSI_BAR_SZ 0x60
84 #define AFI_MSI_FPCI_BAR_ST 0x64
85 #define AFI_MSI_AXI_BAR_ST 0x68
87 #define AFI_MSI_VEC0 0x6c
88 #define AFI_MSI_VEC1 0x70
89 #define AFI_MSI_VEC2 0x74
90 #define AFI_MSI_VEC3 0x78
91 #define AFI_MSI_VEC4 0x7c
92 #define AFI_MSI_VEC5 0x80
93 #define AFI_MSI_VEC6 0x84
94 #define AFI_MSI_VEC7 0x88
96 #define AFI_MSI_EN_VEC0 0x8c
97 #define AFI_MSI_EN_VEC1 0x90
98 #define AFI_MSI_EN_VEC2 0x94
99 #define AFI_MSI_EN_VEC3 0x98
100 #define AFI_MSI_EN_VEC4 0x9c
101 #define AFI_MSI_EN_VEC5 0xa0
102 #define AFI_MSI_EN_VEC6 0xa4
103 #define AFI_MSI_EN_VEC7 0xa8
105 #define AFI_CONFIGURATION 0xac
106 #define AFI_CONFIGURATION_EN_FPCI (1 << 0)
108 #define AFI_FPCI_ERROR_MASKS 0xb0
110 #define AFI_INTR_MASK 0xb4
111 #define AFI_INTR_MASK_INT_MASK (1 << 0)
112 #define AFI_INTR_MASK_MSI_MASK (1 << 8)
114 #define AFI_INTR_CODE 0xb8
115 #define AFI_INTR_CODE_MASK 0xf
116 #define AFI_INTR_AXI_SLAVE_ERROR 1
117 #define AFI_INTR_AXI_DECODE_ERROR 2
118 #define AFI_INTR_TARGET_ABORT 3
119 #define AFI_INTR_MASTER_ABORT 4
120 #define AFI_INTR_INVALID_WRITE 5
121 #define AFI_INTR_LEGACY 6
122 #define AFI_INTR_FPCI_DECODE_ERROR 7
124 #define AFI_INTR_SIGNATURE 0xbc
125 #define AFI_UPPER_FPCI_ADDRESS 0xc0
126 #define AFI_SM_INTR_ENABLE 0xc4
127 #define AFI_SM_INTR_INTA_ASSERT (1 << 0)
128 #define AFI_SM_INTR_INTB_ASSERT (1 << 1)
129 #define AFI_SM_INTR_INTC_ASSERT (1 << 2)
130 #define AFI_SM_INTR_INTD_ASSERT (1 << 3)
131 #define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
132 #define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
133 #define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
134 #define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
136 #define AFI_AFI_INTR_ENABLE 0xc8
137 #define AFI_INTR_EN_INI_SLVERR (1 << 0)
138 #define AFI_INTR_EN_INI_DECERR (1 << 1)
139 #define AFI_INTR_EN_TGT_SLVERR (1 << 2)
140 #define AFI_INTR_EN_TGT_DECERR (1 << 3)
141 #define AFI_INTR_EN_TGT_WRERR (1 << 4)
142 #define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
143 #define AFI_INTR_EN_AXI_DECERR (1 << 6)
144 #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
145 #define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
147 #define AFI_PCIE_CONFIG 0x0f8
148 #define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
149 #define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
150 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
151 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
152 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
153 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
154 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
155 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
157 #define AFI_FUSE 0x104
158 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
160 #define AFI_PEX0_CTRL 0x110
161 #define AFI_PEX1_CTRL 0x118
162 #define AFI_PEX2_CTRL 0x128
163 #define AFI_PEX_CTRL_RST (1 << 0)
164 #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
165 #define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
167 #define AFI_PEXBIAS_CTRL_0 0x168
169 #define RP_VEND_XP 0x00000F00
170 #define RP_VEND_XP_DL_UP (1 << 30)
172 #define RP_LINK_CONTROL_STATUS 0x00000090
173 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
174 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
176 #define PADS_CTL_SEL 0x0000009C
178 #define PADS_CTL 0x000000A0
179 #define PADS_CTL_IDDQ_1L (1 << 0)
180 #define PADS_CTL_TX_DATA_EN_1L (1 << 6)
181 #define PADS_CTL_RX_DATA_EN_1L (1 << 10)
183 #define PADS_PLL_CTL_TEGRA20 0x000000B8
184 #define PADS_PLL_CTL_TEGRA30 0x000000B4
185 #define PADS_PLL_CTL_RST_B4SM (1 << 1)
186 #define PADS_PLL_CTL_LOCKDET (1 << 8)
187 #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
188 #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
189 #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
190 #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
191 #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
192 #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
193 #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
194 #define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
196 #define PADS_REFCLK_CFG0 0x000000C8
197 #define PADS_REFCLK_CFG1 0x000000CC
200 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
201 * entries, one entry per PCIe port. These field definitions and desired
202 * values aren't in the TRM, but do come from NVIDIA.
204 #define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
205 #define PADS_REFCLK_CFG_E_TERM_SHIFT 7
206 #define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
207 #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
209 /* Default value provided by HW engineering is 0xfa5c */
210 #define PADS_REFCLK_CFG_VALUE \
212 (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \
213 (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
214 (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \
215 (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \
219 struct msi_chip chip
;
220 DECLARE_BITMAP(used
, INT_PCI_MSI_NR
);
221 struct irq_domain
*domain
;
227 /* used to differentiate between Tegra SoC generations */
228 struct tegra_pcie_soc_data
{
229 unsigned int num_ports
;
230 unsigned int msi_base_shift
;
233 bool has_pex_clkreq_en
;
234 bool has_pex_bias_ctrl
;
235 bool has_intr_prsnt_sense
;
236 bool has_avdd_supply
;
240 static inline struct tegra_msi
*to_tegra_msi(struct msi_chip
*chip
)
242 return container_of(chip
, struct tegra_msi
, chip
);
252 struct list_head buses
;
257 struct resource prefetch
;
258 struct resource busn
;
265 struct reset_control
*pex_rst
;
266 struct reset_control
*afi_rst
;
267 struct reset_control
*pcie_xrst
;
269 struct tegra_msi msi
;
271 struct list_head ports
;
272 unsigned int num_ports
;
275 struct regulator
*pex_clk_supply
;
276 struct regulator
*vdd_supply
;
277 struct regulator
*avdd_supply
;
279 const struct tegra_pcie_soc_data
*soc_data
;
282 struct tegra_pcie_port
{
283 struct tegra_pcie
*pcie
;
284 struct list_head list
;
285 struct resource regs
;
291 struct tegra_pcie_bus
{
292 struct vm_struct
*area
;
293 struct list_head list
;
297 static inline struct tegra_pcie
*sys_to_pcie(struct pci_sys_data
*sys
)
299 return sys
->private_data
;
302 static inline void afi_writel(struct tegra_pcie
*pcie
, u32 value
,
303 unsigned long offset
)
305 writel(value
, pcie
->afi
+ offset
);
308 static inline u32
afi_readl(struct tegra_pcie
*pcie
, unsigned long offset
)
310 return readl(pcie
->afi
+ offset
);
313 static inline void pads_writel(struct tegra_pcie
*pcie
, u32 value
,
314 unsigned long offset
)
316 writel(value
, pcie
->pads
+ offset
);
319 static inline u32
pads_readl(struct tegra_pcie
*pcie
, unsigned long offset
)
321 return readl(pcie
->pads
+ offset
);
325 * The configuration space mapping on Tegra is somewhat similar to the ECAM
326 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
327 * register accesses are mapped:
329 * [27:24] extended register number
331 * [15:11] device number
332 * [10: 8] function number
333 * [ 7: 0] register number
335 * Mapping the whole extended configuration space would require 256 MiB of
336 * virtual address space, only a small part of which will actually be used.
337 * To work around this, a 1 MiB of virtual addresses are allocated per bus
338 * when the bus is first accessed. When the physical range is mapped, the
339 * the bus number bits are hidden so that the extended register number bits
340 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
342 * [19:16] extended register number
343 * [15:11] device number
344 * [10: 8] function number
345 * [ 7: 0] register number
347 * This is achieved by stitching together 16 chunks of 64 KiB of physical
348 * address space via the MMU.
350 static unsigned long tegra_pcie_conf_offset(unsigned int devfn
, int where
)
352 return ((where
& 0xf00) << 8) | (PCI_SLOT(devfn
) << 11) |
353 (PCI_FUNC(devfn
) << 8) | (where
& 0xfc);
356 static struct tegra_pcie_bus
*tegra_pcie_bus_alloc(struct tegra_pcie
*pcie
,
359 pgprot_t prot
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
| L_PTE_XN
|
360 L_PTE_MT_DEV_SHARED
| L_PTE_SHARED
;
361 phys_addr_t cs
= pcie
->cs
->start
;
362 struct tegra_pcie_bus
*bus
;
366 bus
= kzalloc(sizeof(*bus
), GFP_KERNEL
);
368 return ERR_PTR(-ENOMEM
);
370 INIT_LIST_HEAD(&bus
->list
);
373 /* allocate 1 MiB of virtual addresses */
374 bus
->area
= get_vm_area(SZ_1M
, VM_IOREMAP
);
380 /* map each of the 16 chunks of 64 KiB each */
381 for (i
= 0; i
< 16; i
++) {
382 unsigned long virt
= (unsigned long)bus
->area
->addr
+
384 phys_addr_t phys
= cs
+ i
* SZ_1M
+ busnr
* SZ_64K
;
386 err
= ioremap_page_range(virt
, virt
+ SZ_64K
, phys
, prot
);
388 dev_err(pcie
->dev
, "ioremap_page_range() failed: %d\n",
397 vunmap(bus
->area
->addr
);
404 * Look up a virtual address mapping for the specified bus number. If no such
405 * mapping exists, try to create one.
407 static void __iomem
*tegra_pcie_bus_map(struct tegra_pcie
*pcie
,
410 struct tegra_pcie_bus
*bus
;
412 list_for_each_entry(bus
, &pcie
->buses
, list
)
413 if (bus
->nr
== busnr
)
414 return (void __iomem
*)bus
->area
->addr
;
416 bus
= tegra_pcie_bus_alloc(pcie
, busnr
);
420 list_add_tail(&bus
->list
, &pcie
->buses
);
422 return (void __iomem
*)bus
->area
->addr
;
425 static void __iomem
*tegra_pcie_conf_address(struct pci_bus
*bus
,
429 struct tegra_pcie
*pcie
= sys_to_pcie(bus
->sysdata
);
430 void __iomem
*addr
= NULL
;
432 if (bus
->number
== 0) {
433 unsigned int slot
= PCI_SLOT(devfn
);
434 struct tegra_pcie_port
*port
;
436 list_for_each_entry(port
, &pcie
->ports
, list
) {
437 if (port
->index
+ 1 == slot
) {
438 addr
= port
->base
+ (where
& ~3);
443 addr
= tegra_pcie_bus_map(pcie
, bus
->number
);
446 "failed to map cfg. space for bus %u\n",
451 addr
+= tegra_pcie_conf_offset(devfn
, where
);
457 static int tegra_pcie_read_conf(struct pci_bus
*bus
, unsigned int devfn
,
458 int where
, int size
, u32
*value
)
462 addr
= tegra_pcie_conf_address(bus
, devfn
, where
);
465 return PCIBIOS_DEVICE_NOT_FOUND
;
468 *value
= readl(addr
);
471 *value
= (*value
>> (8 * (where
& 3))) & 0xff;
473 *value
= (*value
>> (8 * (where
& 3))) & 0xffff;
475 return PCIBIOS_SUCCESSFUL
;
478 static int tegra_pcie_write_conf(struct pci_bus
*bus
, unsigned int devfn
,
479 int where
, int size
, u32 value
)
484 addr
= tegra_pcie_conf_address(bus
, devfn
, where
);
486 return PCIBIOS_DEVICE_NOT_FOUND
;
490 return PCIBIOS_SUCCESSFUL
;
494 mask
= ~(0xffff << ((where
& 0x3) * 8));
496 mask
= ~(0xff << ((where
& 0x3) * 8));
498 return PCIBIOS_BAD_REGISTER_NUMBER
;
500 tmp
= readl(addr
) & mask
;
501 tmp
|= value
<< ((where
& 0x3) * 8);
504 return PCIBIOS_SUCCESSFUL
;
507 static struct pci_ops tegra_pcie_ops
= {
508 .read
= tegra_pcie_read_conf
,
509 .write
= tegra_pcie_write_conf
,
512 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port
*port
)
514 unsigned long ret
= 0;
516 switch (port
->index
) {
533 static void tegra_pcie_port_reset(struct tegra_pcie_port
*port
)
535 unsigned long ctrl
= tegra_pcie_port_get_pex_ctrl(port
);
538 /* pulse reset signal */
539 value
= afi_readl(port
->pcie
, ctrl
);
540 value
&= ~AFI_PEX_CTRL_RST
;
541 afi_writel(port
->pcie
, value
, ctrl
);
543 usleep_range(1000, 2000);
545 value
= afi_readl(port
->pcie
, ctrl
);
546 value
|= AFI_PEX_CTRL_RST
;
547 afi_writel(port
->pcie
, value
, ctrl
);
550 static void tegra_pcie_port_enable(struct tegra_pcie_port
*port
)
552 const struct tegra_pcie_soc_data
*soc
= port
->pcie
->soc_data
;
553 unsigned long ctrl
= tegra_pcie_port_get_pex_ctrl(port
);
556 /* enable reference clock */
557 value
= afi_readl(port
->pcie
, ctrl
);
558 value
|= AFI_PEX_CTRL_REFCLK_EN
;
560 if (soc
->has_pex_clkreq_en
)
561 value
|= AFI_PEX_CTRL_CLKREQ_EN
;
563 afi_writel(port
->pcie
, value
, ctrl
);
565 tegra_pcie_port_reset(port
);
568 static void tegra_pcie_port_disable(struct tegra_pcie_port
*port
)
570 unsigned long ctrl
= tegra_pcie_port_get_pex_ctrl(port
);
573 /* assert port reset */
574 value
= afi_readl(port
->pcie
, ctrl
);
575 value
&= ~AFI_PEX_CTRL_RST
;
576 afi_writel(port
->pcie
, value
, ctrl
);
578 /* disable reference clock */
579 value
= afi_readl(port
->pcie
, ctrl
);
580 value
&= ~AFI_PEX_CTRL_REFCLK_EN
;
581 afi_writel(port
->pcie
, value
, ctrl
);
584 static void tegra_pcie_port_free(struct tegra_pcie_port
*port
)
586 struct tegra_pcie
*pcie
= port
->pcie
;
588 devm_iounmap(pcie
->dev
, port
->base
);
589 devm_release_mem_region(pcie
->dev
, port
->regs
.start
,
590 resource_size(&port
->regs
));
591 list_del(&port
->list
);
592 devm_kfree(pcie
->dev
, port
);
595 static void tegra_pcie_fixup_bridge(struct pci_dev
*dev
)
599 if ((dev
->class >> 16) == PCI_BASE_CLASS_BRIDGE
) {
600 pci_read_config_word(dev
, PCI_COMMAND
, ®
);
601 reg
|= (PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
|
602 PCI_COMMAND_MASTER
| PCI_COMMAND_SERR
);
603 pci_write_config_word(dev
, PCI_COMMAND
, reg
);
606 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID
, PCI_ANY_ID
, tegra_pcie_fixup_bridge
);
608 /* Tegra PCIE root complex wrongly reports device class */
609 static void tegra_pcie_fixup_class(struct pci_dev
*dev
)
611 dev
->class = PCI_CLASS_BRIDGE_PCI
<< 8;
613 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA
, 0x0bf0, tegra_pcie_fixup_class
);
614 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA
, 0x0bf1, tegra_pcie_fixup_class
);
615 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA
, 0x0e1c, tegra_pcie_fixup_class
);
616 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA
, 0x0e1d, tegra_pcie_fixup_class
);
618 /* Tegra PCIE requires relaxed ordering */
619 static void tegra_pcie_relax_enable(struct pci_dev
*dev
)
621 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL
, PCI_EXP_DEVCTL_RELAX_EN
);
623 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID
, PCI_ANY_ID
, tegra_pcie_relax_enable
);
625 static int tegra_pcie_setup(int nr
, struct pci_sys_data
*sys
)
627 struct tegra_pcie
*pcie
= sys_to_pcie(sys
);
629 pci_add_resource_offset(&sys
->resources
, &pcie
->mem
, sys
->mem_offset
);
630 pci_add_resource_offset(&sys
->resources
, &pcie
->prefetch
,
632 pci_add_resource(&sys
->resources
, &pcie
->busn
);
634 pci_ioremap_io(nr
* SZ_64K
, pcie
->io
.start
);
639 static int tegra_pcie_map_irq(const struct pci_dev
*pdev
, u8 slot
, u8 pin
)
641 struct tegra_pcie
*pcie
= sys_to_pcie(pdev
->bus
->sysdata
);
644 tegra_cpuidle_pcie_irqs_in_use();
646 irq
= of_irq_parse_and_map_pci(pdev
, slot
, pin
);
653 static void tegra_pcie_add_bus(struct pci_bus
*bus
)
655 if (IS_ENABLED(CONFIG_PCI_MSI
)) {
656 struct tegra_pcie
*pcie
= sys_to_pcie(bus
->sysdata
);
658 bus
->msi
= &pcie
->msi
.chip
;
662 static struct pci_bus
*tegra_pcie_scan_bus(int nr
, struct pci_sys_data
*sys
)
664 struct tegra_pcie
*pcie
= sys_to_pcie(sys
);
667 bus
= pci_create_root_bus(pcie
->dev
, sys
->busnr
, &tegra_pcie_ops
, sys
,
672 pci_scan_child_bus(bus
);
677 static irqreturn_t
tegra_pcie_isr(int irq
, void *arg
)
679 const char *err_msg
[] = {
686 "Response decoding error",
687 "AXI response decoding error",
688 "Transaction timeout",
690 struct tegra_pcie
*pcie
= arg
;
693 code
= afi_readl(pcie
, AFI_INTR_CODE
) & AFI_INTR_CODE_MASK
;
694 signature
= afi_readl(pcie
, AFI_INTR_SIGNATURE
);
695 afi_writel(pcie
, 0, AFI_INTR_CODE
);
697 if (code
== AFI_INTR_LEGACY
)
700 if (code
>= ARRAY_SIZE(err_msg
))
704 * do not pollute kernel log with master abort reports since they
705 * happen a lot during enumeration
707 if (code
== AFI_INTR_MASTER_ABORT
)
708 dev_dbg(pcie
->dev
, "%s, signature: %08x\n", err_msg
[code
],
711 dev_err(pcie
->dev
, "%s, signature: %08x\n", err_msg
[code
],
714 if (code
== AFI_INTR_TARGET_ABORT
|| code
== AFI_INTR_MASTER_ABORT
||
715 code
== AFI_INTR_FPCI_DECODE_ERROR
) {
716 u32 fpci
= afi_readl(pcie
, AFI_UPPER_FPCI_ADDRESS
) & 0xff;
717 u64 address
= (u64
)fpci
<< 32 | (signature
& 0xfffffffc);
719 if (code
== AFI_INTR_MASTER_ABORT
)
720 dev_dbg(pcie
->dev
, " FPCI address: %10llx\n", address
);
722 dev_err(pcie
->dev
, " FPCI address: %10llx\n", address
);
729 * FPCI map is as follows:
730 * - 0xfdfc000000: I/O space
731 * - 0xfdfe000000: type 0 configuration space
732 * - 0xfdff000000: type 1 configuration space
733 * - 0xfe00000000: type 0 extended configuration space
734 * - 0xfe10000000: type 1 extended configuration space
736 static void tegra_pcie_setup_translations(struct tegra_pcie
*pcie
)
738 u32 fpci_bar
, size
, axi_address
;
740 /* Bar 0: type 1 extended configuration space */
741 fpci_bar
= 0xfe100000;
742 size
= resource_size(pcie
->cs
);
743 axi_address
= pcie
->cs
->start
;
744 afi_writel(pcie
, axi_address
, AFI_AXI_BAR0_START
);
745 afi_writel(pcie
, size
>> 12, AFI_AXI_BAR0_SZ
);
746 afi_writel(pcie
, fpci_bar
, AFI_FPCI_BAR0
);
748 /* Bar 1: downstream IO bar */
749 fpci_bar
= 0xfdfc0000;
750 size
= resource_size(&pcie
->io
);
751 axi_address
= pcie
->io
.start
;
752 afi_writel(pcie
, axi_address
, AFI_AXI_BAR1_START
);
753 afi_writel(pcie
, size
>> 12, AFI_AXI_BAR1_SZ
);
754 afi_writel(pcie
, fpci_bar
, AFI_FPCI_BAR1
);
756 /* Bar 2: prefetchable memory BAR */
757 fpci_bar
= (((pcie
->prefetch
.start
>> 12) & 0x0fffffff) << 4) | 0x1;
758 size
= resource_size(&pcie
->prefetch
);
759 axi_address
= pcie
->prefetch
.start
;
760 afi_writel(pcie
, axi_address
, AFI_AXI_BAR2_START
);
761 afi_writel(pcie
, size
>> 12, AFI_AXI_BAR2_SZ
);
762 afi_writel(pcie
, fpci_bar
, AFI_FPCI_BAR2
);
764 /* Bar 3: non prefetchable memory BAR */
765 fpci_bar
= (((pcie
->mem
.start
>> 12) & 0x0fffffff) << 4) | 0x1;
766 size
= resource_size(&pcie
->mem
);
767 axi_address
= pcie
->mem
.start
;
768 afi_writel(pcie
, axi_address
, AFI_AXI_BAR3_START
);
769 afi_writel(pcie
, size
>> 12, AFI_AXI_BAR3_SZ
);
770 afi_writel(pcie
, fpci_bar
, AFI_FPCI_BAR3
);
772 /* NULL out the remaining BARs as they are not used */
773 afi_writel(pcie
, 0, AFI_AXI_BAR4_START
);
774 afi_writel(pcie
, 0, AFI_AXI_BAR4_SZ
);
775 afi_writel(pcie
, 0, AFI_FPCI_BAR4
);
777 afi_writel(pcie
, 0, AFI_AXI_BAR5_START
);
778 afi_writel(pcie
, 0, AFI_AXI_BAR5_SZ
);
779 afi_writel(pcie
, 0, AFI_FPCI_BAR5
);
781 /* map all upstream transactions as uncached */
782 afi_writel(pcie
, PHYS_OFFSET
, AFI_CACHE_BAR0_ST
);
783 afi_writel(pcie
, 0, AFI_CACHE_BAR0_SZ
);
784 afi_writel(pcie
, 0, AFI_CACHE_BAR1_ST
);
785 afi_writel(pcie
, 0, AFI_CACHE_BAR1_SZ
);
787 /* MSI translations are setup only when needed */
788 afi_writel(pcie
, 0, AFI_MSI_FPCI_BAR_ST
);
789 afi_writel(pcie
, 0, AFI_MSI_BAR_SZ
);
790 afi_writel(pcie
, 0, AFI_MSI_AXI_BAR_ST
);
791 afi_writel(pcie
, 0, AFI_MSI_BAR_SZ
);
794 static int tegra_pcie_enable_controller(struct tegra_pcie
*pcie
)
796 const struct tegra_pcie_soc_data
*soc
= pcie
->soc_data
;
797 struct tegra_pcie_port
*port
;
798 unsigned int timeout
;
801 /* power down PCIe slot clock bias pad */
802 if (soc
->has_pex_bias_ctrl
)
803 afi_writel(pcie
, 0, AFI_PEXBIAS_CTRL_0
);
805 /* configure mode and disable all ports */
806 value
= afi_readl(pcie
, AFI_PCIE_CONFIG
);
807 value
&= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK
;
808 value
|= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL
| pcie
->xbar_config
;
810 list_for_each_entry(port
, &pcie
->ports
, list
)
811 value
&= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port
->index
);
813 afi_writel(pcie
, value
, AFI_PCIE_CONFIG
);
815 value
= afi_readl(pcie
, AFI_FUSE
);
816 value
|= AFI_FUSE_PCIE_T0_GEN2_DIS
;
817 afi_writel(pcie
, value
, AFI_FUSE
);
819 /* initialize internal PHY, enable up to 16 PCIE lanes */
820 pads_writel(pcie
, 0x0, PADS_CTL_SEL
);
822 /* override IDDQ to 1 on all 4 lanes */
823 value
= pads_readl(pcie
, PADS_CTL
);
824 value
|= PADS_CTL_IDDQ_1L
;
825 pads_writel(pcie
, value
, PADS_CTL
);
828 * Set up PHY PLL inputs select PLLE output as refclock,
829 * set TX ref sel to div10 (not div5).
831 value
= pads_readl(pcie
, soc
->pads_pll_ctl
);
832 value
&= ~(PADS_PLL_CTL_REFCLK_MASK
| PADS_PLL_CTL_TXCLKREF_MASK
);
833 value
|= PADS_PLL_CTL_REFCLK_INTERNAL_CML
| soc
->tx_ref_sel
;
834 pads_writel(pcie
, value
, soc
->pads_pll_ctl
);
836 /* take PLL out of reset */
837 value
= pads_readl(pcie
, soc
->pads_pll_ctl
);
838 value
|= PADS_PLL_CTL_RST_B4SM
;
839 pads_writel(pcie
, value
, soc
->pads_pll_ctl
);
841 /* Configure the reference clock driver */
842 value
= PADS_REFCLK_CFG_VALUE
| (PADS_REFCLK_CFG_VALUE
<< 16);
843 pads_writel(pcie
, value
, PADS_REFCLK_CFG0
);
844 if (soc
->num_ports
> 2)
845 pads_writel(pcie
, PADS_REFCLK_CFG_VALUE
, PADS_REFCLK_CFG1
);
847 /* wait for the PLL to lock */
850 value
= pads_readl(pcie
, soc
->pads_pll_ctl
);
851 usleep_range(1000, 2000);
852 if (--timeout
== 0) {
853 pr_err("Tegra PCIe error: timeout waiting for PLL\n");
856 } while (!(value
& PADS_PLL_CTL_LOCKDET
));
858 /* turn off IDDQ override */
859 value
= pads_readl(pcie
, PADS_CTL
);
860 value
&= ~PADS_CTL_IDDQ_1L
;
861 pads_writel(pcie
, value
, PADS_CTL
);
863 /* enable TX/RX data */
864 value
= pads_readl(pcie
, PADS_CTL
);
865 value
|= PADS_CTL_TX_DATA_EN_1L
| PADS_CTL_RX_DATA_EN_1L
;
866 pads_writel(pcie
, value
, PADS_CTL
);
868 /* take the PCIe interface module out of reset */
869 reset_control_deassert(pcie
->pcie_xrst
);
871 /* finally enable PCIe */
872 value
= afi_readl(pcie
, AFI_CONFIGURATION
);
873 value
|= AFI_CONFIGURATION_EN_FPCI
;
874 afi_writel(pcie
, value
, AFI_CONFIGURATION
);
876 value
= AFI_INTR_EN_INI_SLVERR
| AFI_INTR_EN_INI_DECERR
|
877 AFI_INTR_EN_TGT_SLVERR
| AFI_INTR_EN_TGT_DECERR
|
878 AFI_INTR_EN_TGT_WRERR
| AFI_INTR_EN_DFPCI_DECERR
;
880 if (soc
->has_intr_prsnt_sense
)
881 value
|= AFI_INTR_EN_PRSNT_SENSE
;
883 afi_writel(pcie
, value
, AFI_AFI_INTR_ENABLE
);
884 afi_writel(pcie
, 0xffffffff, AFI_SM_INTR_ENABLE
);
886 /* don't enable MSI for now, only when needed */
887 afi_writel(pcie
, AFI_INTR_MASK_INT_MASK
, AFI_INTR_MASK
);
889 /* disable all exceptions */
890 afi_writel(pcie
, 0, AFI_FPCI_ERROR_MASKS
);
895 static void tegra_pcie_power_off(struct tegra_pcie
*pcie
)
897 const struct tegra_pcie_soc_data
*soc
= pcie
->soc_data
;
900 /* TODO: disable and unprepare clocks? */
902 reset_control_assert(pcie
->pcie_xrst
);
903 reset_control_assert(pcie
->afi_rst
);
904 reset_control_assert(pcie
->pex_rst
);
906 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE
);
908 if (soc
->has_avdd_supply
) {
909 err
= regulator_disable(pcie
->avdd_supply
);
912 "failed to disable AVDD regulator: %d\n",
916 err
= regulator_disable(pcie
->pex_clk_supply
);
918 dev_warn(pcie
->dev
, "failed to disable pex-clk regulator: %d\n",
921 err
= regulator_disable(pcie
->vdd_supply
);
923 dev_warn(pcie
->dev
, "failed to disable VDD regulator: %d\n",
927 static int tegra_pcie_power_on(struct tegra_pcie
*pcie
)
929 const struct tegra_pcie_soc_data
*soc
= pcie
->soc_data
;
932 reset_control_assert(pcie
->pcie_xrst
);
933 reset_control_assert(pcie
->afi_rst
);
934 reset_control_assert(pcie
->pex_rst
);
936 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE
);
938 /* enable regulators */
939 err
= regulator_enable(pcie
->vdd_supply
);
941 dev_err(pcie
->dev
, "failed to enable VDD regulator: %d\n", err
);
945 err
= regulator_enable(pcie
->pex_clk_supply
);
947 dev_err(pcie
->dev
, "failed to enable pex-clk regulator: %d\n",
952 if (soc
->has_avdd_supply
) {
953 err
= regulator_enable(pcie
->avdd_supply
);
956 "failed to enable AVDD regulator: %d\n",
962 err
= tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE
,
966 dev_err(pcie
->dev
, "powerup sequence failed: %d\n", err
);
970 reset_control_deassert(pcie
->afi_rst
);
972 err
= clk_prepare_enable(pcie
->afi_clk
);
974 dev_err(pcie
->dev
, "failed to enable AFI clock: %d\n", err
);
978 if (soc
->has_cml_clk
) {
979 err
= clk_prepare_enable(pcie
->cml_clk
);
981 dev_err(pcie
->dev
, "failed to enable CML clock: %d\n",
987 err
= clk_prepare_enable(pcie
->pll_e
);
989 dev_err(pcie
->dev
, "failed to enable PLLE clock: %d\n", err
);
996 static int tegra_pcie_clocks_get(struct tegra_pcie
*pcie
)
998 const struct tegra_pcie_soc_data
*soc
= pcie
->soc_data
;
1000 pcie
->pex_clk
= devm_clk_get(pcie
->dev
, "pex");
1001 if (IS_ERR(pcie
->pex_clk
))
1002 return PTR_ERR(pcie
->pex_clk
);
1004 pcie
->afi_clk
= devm_clk_get(pcie
->dev
, "afi");
1005 if (IS_ERR(pcie
->afi_clk
))
1006 return PTR_ERR(pcie
->afi_clk
);
1008 pcie
->pll_e
= devm_clk_get(pcie
->dev
, "pll_e");
1009 if (IS_ERR(pcie
->pll_e
))
1010 return PTR_ERR(pcie
->pll_e
);
1012 if (soc
->has_cml_clk
) {
1013 pcie
->cml_clk
= devm_clk_get(pcie
->dev
, "cml");
1014 if (IS_ERR(pcie
->cml_clk
))
1015 return PTR_ERR(pcie
->cml_clk
);
1021 static int tegra_pcie_resets_get(struct tegra_pcie
*pcie
)
1023 pcie
->pex_rst
= devm_reset_control_get(pcie
->dev
, "pex");
1024 if (IS_ERR(pcie
->pex_rst
))
1025 return PTR_ERR(pcie
->pex_rst
);
1027 pcie
->afi_rst
= devm_reset_control_get(pcie
->dev
, "afi");
1028 if (IS_ERR(pcie
->afi_rst
))
1029 return PTR_ERR(pcie
->afi_rst
);
1031 pcie
->pcie_xrst
= devm_reset_control_get(pcie
->dev
, "pcie_x");
1032 if (IS_ERR(pcie
->pcie_xrst
))
1033 return PTR_ERR(pcie
->pcie_xrst
);
1038 static int tegra_pcie_get_resources(struct tegra_pcie
*pcie
)
1040 struct platform_device
*pdev
= to_platform_device(pcie
->dev
);
1041 struct resource
*pads
, *afi
, *res
;
1044 err
= tegra_pcie_clocks_get(pcie
);
1046 dev_err(&pdev
->dev
, "failed to get clocks: %d\n", err
);
1050 err
= tegra_pcie_resets_get(pcie
);
1052 dev_err(&pdev
->dev
, "failed to get resets: %d\n", err
);
1056 err
= tegra_pcie_power_on(pcie
);
1058 dev_err(&pdev
->dev
, "failed to power up: %d\n", err
);
1062 pads
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "pads");
1063 pcie
->pads
= devm_ioremap_resource(&pdev
->dev
, pads
);
1064 if (IS_ERR(pcie
->pads
)) {
1065 err
= PTR_ERR(pcie
->pads
);
1069 afi
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "afi");
1070 pcie
->afi
= devm_ioremap_resource(&pdev
->dev
, afi
);
1071 if (IS_ERR(pcie
->afi
)) {
1072 err
= PTR_ERR(pcie
->afi
);
1076 /* request configuration space, but remap later, on demand */
1077 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "cs");
1079 err
= -EADDRNOTAVAIL
;
1083 pcie
->cs
= devm_request_mem_region(pcie
->dev
, res
->start
,
1084 resource_size(res
), res
->name
);
1086 err
= -EADDRNOTAVAIL
;
1090 /* request interrupt */
1091 err
= platform_get_irq_byname(pdev
, "intr");
1093 dev_err(&pdev
->dev
, "failed to get IRQ: %d\n", err
);
1099 err
= request_irq(pcie
->irq
, tegra_pcie_isr
, IRQF_SHARED
, "PCIE", pcie
);
1101 dev_err(&pdev
->dev
, "failed to register IRQ: %d\n", err
);
1108 tegra_pcie_power_off(pcie
);
1112 static int tegra_pcie_put_resources(struct tegra_pcie
*pcie
)
1115 free_irq(pcie
->irq
, pcie
);
1117 tegra_pcie_power_off(pcie
);
1121 static int tegra_msi_alloc(struct tegra_msi
*chip
)
1125 mutex_lock(&chip
->lock
);
1127 msi
= find_first_zero_bit(chip
->used
, INT_PCI_MSI_NR
);
1128 if (msi
< INT_PCI_MSI_NR
)
1129 set_bit(msi
, chip
->used
);
1133 mutex_unlock(&chip
->lock
);
1138 static void tegra_msi_free(struct tegra_msi
*chip
, unsigned long irq
)
1140 struct device
*dev
= chip
->chip
.dev
;
1142 mutex_lock(&chip
->lock
);
1144 if (!test_bit(irq
, chip
->used
))
1145 dev_err(dev
, "trying to free unused MSI#%lu\n", irq
);
1147 clear_bit(irq
, chip
->used
);
1149 mutex_unlock(&chip
->lock
);
1152 static irqreturn_t
tegra_pcie_msi_irq(int irq
, void *data
)
1154 struct tegra_pcie
*pcie
= data
;
1155 struct tegra_msi
*msi
= &pcie
->msi
;
1156 unsigned int i
, processed
= 0;
1158 for (i
= 0; i
< 8; i
++) {
1159 unsigned long reg
= afi_readl(pcie
, AFI_MSI_VEC0
+ i
* 4);
1162 unsigned int offset
= find_first_bit(®
, 32);
1163 unsigned int index
= i
* 32 + offset
;
1166 /* clear the interrupt */
1167 afi_writel(pcie
, 1 << offset
, AFI_MSI_VEC0
+ i
* 4);
1169 irq
= irq_find_mapping(msi
->domain
, index
);
1171 if (test_bit(index
, msi
->used
))
1172 generic_handle_irq(irq
);
1174 dev_info(pcie
->dev
, "unhandled MSI\n");
1177 * that's weird who triggered this?
1180 dev_info(pcie
->dev
, "unexpected MSI\n");
1183 /* see if there's any more pending in this vector */
1184 reg
= afi_readl(pcie
, AFI_MSI_VEC0
+ i
* 4);
1190 return processed
> 0 ? IRQ_HANDLED
: IRQ_NONE
;
1193 static int tegra_msi_setup_irq(struct msi_chip
*chip
, struct pci_dev
*pdev
,
1194 struct msi_desc
*desc
)
1196 struct tegra_msi
*msi
= to_tegra_msi(chip
);
1201 hwirq
= tegra_msi_alloc(msi
);
1205 irq
= irq_create_mapping(msi
->domain
, hwirq
);
1209 irq_set_msi_desc(irq
, desc
);
1211 msg
.address_lo
= virt_to_phys((void *)msi
->pages
);
1212 /* 32 bit address only */
1216 write_msi_msg(irq
, &msg
);
1221 static void tegra_msi_teardown_irq(struct msi_chip
*chip
, unsigned int irq
)
1223 struct tegra_msi
*msi
= to_tegra_msi(chip
);
1224 struct irq_data
*d
= irq_get_irq_data(irq
);
1226 tegra_msi_free(msi
, d
->hwirq
);
1229 static struct irq_chip tegra_msi_irq_chip
= {
1230 .name
= "Tegra PCIe MSI",
1231 .irq_enable
= unmask_msi_irq
,
1232 .irq_disable
= mask_msi_irq
,
1233 .irq_mask
= mask_msi_irq
,
1234 .irq_unmask
= unmask_msi_irq
,
1237 static int tegra_msi_map(struct irq_domain
*domain
, unsigned int irq
,
1238 irq_hw_number_t hwirq
)
1240 irq_set_chip_and_handler(irq
, &tegra_msi_irq_chip
, handle_simple_irq
);
1241 irq_set_chip_data(irq
, domain
->host_data
);
1242 set_irq_flags(irq
, IRQF_VALID
);
1244 tegra_cpuidle_pcie_irqs_in_use();
1249 static const struct irq_domain_ops msi_domain_ops
= {
1250 .map
= tegra_msi_map
,
1253 static int tegra_pcie_enable_msi(struct tegra_pcie
*pcie
)
1255 struct platform_device
*pdev
= to_platform_device(pcie
->dev
);
1256 const struct tegra_pcie_soc_data
*soc
= pcie
->soc_data
;
1257 struct tegra_msi
*msi
= &pcie
->msi
;
1262 mutex_init(&msi
->lock
);
1264 msi
->chip
.dev
= pcie
->dev
;
1265 msi
->chip
.setup_irq
= tegra_msi_setup_irq
;
1266 msi
->chip
.teardown_irq
= tegra_msi_teardown_irq
;
1268 msi
->domain
= irq_domain_add_linear(pcie
->dev
->of_node
, INT_PCI_MSI_NR
,
1269 &msi_domain_ops
, &msi
->chip
);
1271 dev_err(&pdev
->dev
, "failed to create IRQ domain\n");
1275 err
= platform_get_irq_byname(pdev
, "msi");
1277 dev_err(&pdev
->dev
, "failed to get IRQ: %d\n", err
);
1283 err
= request_irq(msi
->irq
, tegra_pcie_msi_irq
, IRQF_NO_THREAD
,
1284 tegra_msi_irq_chip
.name
, pcie
);
1286 dev_err(&pdev
->dev
, "failed to request IRQ: %d\n", err
);
1290 /* setup AFI/FPCI range */
1291 msi
->pages
= __get_free_pages(GFP_KERNEL
, 0);
1292 base
= virt_to_phys((void *)msi
->pages
);
1294 afi_writel(pcie
, base
>> soc
->msi_base_shift
, AFI_MSI_FPCI_BAR_ST
);
1295 afi_writel(pcie
, base
, AFI_MSI_AXI_BAR_ST
);
1296 /* this register is in 4K increments */
1297 afi_writel(pcie
, 1, AFI_MSI_BAR_SZ
);
1299 /* enable all MSI vectors */
1300 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC0
);
1301 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC1
);
1302 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC2
);
1303 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC3
);
1304 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC4
);
1305 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC5
);
1306 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC6
);
1307 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC7
);
1309 /* and unmask the MSI interrupt */
1310 reg
= afi_readl(pcie
, AFI_INTR_MASK
);
1311 reg
|= AFI_INTR_MASK_MSI_MASK
;
1312 afi_writel(pcie
, reg
, AFI_INTR_MASK
);
1317 irq_domain_remove(msi
->domain
);
1321 static int tegra_pcie_disable_msi(struct tegra_pcie
*pcie
)
1323 struct tegra_msi
*msi
= &pcie
->msi
;
1324 unsigned int i
, irq
;
1327 /* mask the MSI interrupt */
1328 value
= afi_readl(pcie
, AFI_INTR_MASK
);
1329 value
&= ~AFI_INTR_MASK_MSI_MASK
;
1330 afi_writel(pcie
, value
, AFI_INTR_MASK
);
1332 /* disable all MSI vectors */
1333 afi_writel(pcie
, 0, AFI_MSI_EN_VEC0
);
1334 afi_writel(pcie
, 0, AFI_MSI_EN_VEC1
);
1335 afi_writel(pcie
, 0, AFI_MSI_EN_VEC2
);
1336 afi_writel(pcie
, 0, AFI_MSI_EN_VEC3
);
1337 afi_writel(pcie
, 0, AFI_MSI_EN_VEC4
);
1338 afi_writel(pcie
, 0, AFI_MSI_EN_VEC5
);
1339 afi_writel(pcie
, 0, AFI_MSI_EN_VEC6
);
1340 afi_writel(pcie
, 0, AFI_MSI_EN_VEC7
);
1342 free_pages(msi
->pages
, 0);
1345 free_irq(msi
->irq
, pcie
);
1347 for (i
= 0; i
< INT_PCI_MSI_NR
; i
++) {
1348 irq
= irq_find_mapping(msi
->domain
, i
);
1350 irq_dispose_mapping(irq
);
1353 irq_domain_remove(msi
->domain
);
1358 static int tegra_pcie_get_xbar_config(struct tegra_pcie
*pcie
, u32 lanes
,
1361 struct device_node
*np
= pcie
->dev
->of_node
;
1363 if (of_device_is_compatible(np
, "nvidia,tegra30-pcie")) {
1366 dev_info(pcie
->dev
, "4x1, 2x1 configuration\n");
1367 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420
;
1371 dev_info(pcie
->dev
, "2x3 configuration\n");
1372 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222
;
1376 dev_info(pcie
->dev
, "4x1, 1x2 configuration\n");
1377 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411
;
1380 } else if (of_device_is_compatible(np
, "nvidia,tegra20-pcie")) {
1383 dev_info(pcie
->dev
, "single-mode configuration\n");
1384 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE
;
1388 dev_info(pcie
->dev
, "dual-mode configuration\n");
1389 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL
;
1397 static int tegra_pcie_parse_dt(struct tegra_pcie
*pcie
)
1399 const struct tegra_pcie_soc_data
*soc
= pcie
->soc_data
;
1400 struct device_node
*np
= pcie
->dev
->of_node
, *port
;
1401 struct of_pci_range_parser parser
;
1402 struct of_pci_range range
;
1403 struct resource res
;
1407 if (of_pci_range_parser_init(&parser
, np
)) {
1408 dev_err(pcie
->dev
, "missing \"ranges\" property\n");
1412 pcie
->vdd_supply
= devm_regulator_get(pcie
->dev
, "vdd");
1413 if (IS_ERR(pcie
->vdd_supply
))
1414 return PTR_ERR(pcie
->vdd_supply
);
1416 pcie
->pex_clk_supply
= devm_regulator_get(pcie
->dev
, "pex-clk");
1417 if (IS_ERR(pcie
->pex_clk_supply
))
1418 return PTR_ERR(pcie
->pex_clk_supply
);
1420 if (soc
->has_avdd_supply
) {
1421 pcie
->avdd_supply
= devm_regulator_get(pcie
->dev
, "avdd");
1422 if (IS_ERR(pcie
->avdd_supply
))
1423 return PTR_ERR(pcie
->avdd_supply
);
1426 for_each_of_pci_range(&parser
, &range
) {
1427 of_pci_range_to_resource(&range
, np
, &res
);
1429 switch (res
.flags
& IORESOURCE_TYPE_BITS
) {
1431 memcpy(&pcie
->io
, &res
, sizeof(res
));
1432 pcie
->io
.name
= "I/O";
1435 case IORESOURCE_MEM
:
1436 if (res
.flags
& IORESOURCE_PREFETCH
) {
1437 memcpy(&pcie
->prefetch
, &res
, sizeof(res
));
1438 pcie
->prefetch
.name
= "PREFETCH";
1440 memcpy(&pcie
->mem
, &res
, sizeof(res
));
1441 pcie
->mem
.name
= "MEM";
1447 err
= of_pci_parse_bus_range(np
, &pcie
->busn
);
1449 dev_err(pcie
->dev
, "failed to parse ranges property: %d\n",
1451 pcie
->busn
.name
= np
->name
;
1452 pcie
->busn
.start
= 0;
1453 pcie
->busn
.end
= 0xff;
1454 pcie
->busn
.flags
= IORESOURCE_BUS
;
1457 /* parse root ports */
1458 for_each_child_of_node(np
, port
) {
1459 struct tegra_pcie_port
*rp
;
1463 err
= of_pci_get_devfn(port
);
1465 dev_err(pcie
->dev
, "failed to parse address: %d\n",
1470 index
= PCI_SLOT(err
);
1472 if (index
< 1 || index
> soc
->num_ports
) {
1473 dev_err(pcie
->dev
, "invalid port number: %d\n", index
);
1479 err
= of_property_read_u32(port
, "nvidia,num-lanes", &value
);
1481 dev_err(pcie
->dev
, "failed to parse # of lanes: %d\n",
1487 dev_err(pcie
->dev
, "invalid # of lanes: %u\n", value
);
1491 lanes
|= value
<< (index
<< 3);
1493 if (!of_device_is_available(port
))
1496 rp
= devm_kzalloc(pcie
->dev
, sizeof(*rp
), GFP_KERNEL
);
1500 err
= of_address_to_resource(port
, 0, &rp
->regs
);
1502 dev_err(pcie
->dev
, "failed to parse address: %d\n",
1507 INIT_LIST_HEAD(&rp
->list
);
1512 rp
->base
= devm_ioremap_resource(pcie
->dev
, &rp
->regs
);
1513 if (IS_ERR(rp
->base
))
1514 return PTR_ERR(rp
->base
);
1516 list_add_tail(&rp
->list
, &pcie
->ports
);
1519 err
= tegra_pcie_get_xbar_config(pcie
, lanes
, &pcie
->xbar_config
);
1521 dev_err(pcie
->dev
, "invalid lane configuration\n");
1529 * FIXME: If there are no PCIe cards attached, then calling this function
1530 * can result in the increase of the bootup time as there are big timeout
1533 #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
1534 static bool tegra_pcie_port_check_link(struct tegra_pcie_port
*port
)
1536 unsigned int retries
= 3;
1537 unsigned long value
;
1540 unsigned int timeout
= TEGRA_PCIE_LINKUP_TIMEOUT
;
1543 value
= readl(port
->base
+ RP_VEND_XP
);
1545 if (value
& RP_VEND_XP_DL_UP
)
1548 usleep_range(1000, 2000);
1549 } while (--timeout
);
1552 dev_err(port
->pcie
->dev
, "link %u down, retrying\n",
1557 timeout
= TEGRA_PCIE_LINKUP_TIMEOUT
;
1560 value
= readl(port
->base
+ RP_LINK_CONTROL_STATUS
);
1562 if (value
& RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE
)
1565 usleep_range(1000, 2000);
1566 } while (--timeout
);
1569 tegra_pcie_port_reset(port
);
1570 } while (--retries
);
1575 static int tegra_pcie_enable(struct tegra_pcie
*pcie
)
1577 struct tegra_pcie_port
*port
, *tmp
;
1580 list_for_each_entry_safe(port
, tmp
, &pcie
->ports
, list
) {
1581 dev_info(pcie
->dev
, "probing port %u, using %u lanes\n",
1582 port
->index
, port
->lanes
);
1584 tegra_pcie_port_enable(port
);
1586 if (tegra_pcie_port_check_link(port
))
1589 dev_info(pcie
->dev
, "link %u down, ignoring\n", port
->index
);
1591 tegra_pcie_port_disable(port
);
1592 tegra_pcie_port_free(port
);
1595 memset(&hw
, 0, sizeof(hw
));
1597 hw
.nr_controllers
= 1;
1598 hw
.private_data
= (void **)&pcie
;
1599 hw
.setup
= tegra_pcie_setup
;
1600 hw
.map_irq
= tegra_pcie_map_irq
;
1601 hw
.add_bus
= tegra_pcie_add_bus
;
1602 hw
.scan
= tegra_pcie_scan_bus
;
1603 hw
.ops
= &tegra_pcie_ops
;
1605 pci_common_init_dev(pcie
->dev
, &hw
);
1610 static const struct tegra_pcie_soc_data tegra20_pcie_data
= {
1612 .msi_base_shift
= 0,
1613 .pads_pll_ctl
= PADS_PLL_CTL_TEGRA20
,
1614 .tx_ref_sel
= PADS_PLL_CTL_TXCLKREF_DIV10
,
1615 .has_pex_clkreq_en
= false,
1616 .has_pex_bias_ctrl
= false,
1617 .has_intr_prsnt_sense
= false,
1618 .has_avdd_supply
= false,
1619 .has_cml_clk
= false,
1622 static const struct tegra_pcie_soc_data tegra30_pcie_data
= {
1624 .msi_base_shift
= 8,
1625 .pads_pll_ctl
= PADS_PLL_CTL_TEGRA30
,
1626 .tx_ref_sel
= PADS_PLL_CTL_TXCLKREF_BUF_EN
,
1627 .has_pex_clkreq_en
= true,
1628 .has_pex_bias_ctrl
= true,
1629 .has_intr_prsnt_sense
= true,
1630 .has_avdd_supply
= true,
1631 .has_cml_clk
= true,
1634 static const struct of_device_id tegra_pcie_of_match
[] = {
1635 { .compatible
= "nvidia,tegra30-pcie", .data
= &tegra30_pcie_data
},
1636 { .compatible
= "nvidia,tegra20-pcie", .data
= &tegra20_pcie_data
},
1639 MODULE_DEVICE_TABLE(of
, tegra_pcie_of_match
);
1641 static int tegra_pcie_probe(struct platform_device
*pdev
)
1643 const struct of_device_id
*match
;
1644 struct tegra_pcie
*pcie
;
1647 match
= of_match_device(tegra_pcie_of_match
, &pdev
->dev
);
1651 pcie
= devm_kzalloc(&pdev
->dev
, sizeof(*pcie
), GFP_KERNEL
);
1655 INIT_LIST_HEAD(&pcie
->buses
);
1656 INIT_LIST_HEAD(&pcie
->ports
);
1657 pcie
->soc_data
= match
->data
;
1658 pcie
->dev
= &pdev
->dev
;
1660 err
= tegra_pcie_parse_dt(pcie
);
1664 pcibios_min_mem
= 0;
1666 err
= tegra_pcie_get_resources(pcie
);
1668 dev_err(&pdev
->dev
, "failed to request resources: %d\n", err
);
1672 err
= tegra_pcie_enable_controller(pcie
);
1676 /* setup the AFI address translations */
1677 tegra_pcie_setup_translations(pcie
);
1679 if (IS_ENABLED(CONFIG_PCI_MSI
)) {
1680 err
= tegra_pcie_enable_msi(pcie
);
1683 "failed to enable MSI support: %d\n",
1689 err
= tegra_pcie_enable(pcie
);
1691 dev_err(&pdev
->dev
, "failed to enable PCIe ports: %d\n", err
);
1695 platform_set_drvdata(pdev
, pcie
);
1699 if (IS_ENABLED(CONFIG_PCI_MSI
))
1700 tegra_pcie_disable_msi(pcie
);
1702 tegra_pcie_put_resources(pcie
);
1706 static struct platform_driver tegra_pcie_driver
= {
1708 .name
= "tegra-pcie",
1709 .owner
= THIS_MODULE
,
1710 .of_match_table
= tegra_pcie_of_match
,
1711 .suppress_bind_attrs
= true,
1713 .probe
= tegra_pcie_probe
,
1715 module_platform_driver(tegra_pcie_driver
);
1717 MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
1718 MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
1719 MODULE_LICENSE("GPLv2");