powercap: restrict energy meter to root access
[linux/fpc-iii.git] / drivers / pci / host / pci-tegra.c
blob90be00c1bab995e2dbcf2a3760bf37866455dd0b
1 /*
2 * PCIe host controller driver for Tegra SoCs
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
10 * Bits taken from arch/arm/mach-dove/pcie.c
12 * Author: Thierry Reding <treding@nvidia.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful, but WITHOUT
20 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22 * more details.
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
29 #include <linux/clk.h>
30 #include <linux/debugfs.h>
31 #include <linux/delay.h>
32 #include <linux/export.h>
33 #include <linux/interrupt.h>
34 #include <linux/irq.h>
35 #include <linux/irqdomain.h>
36 #include <linux/kernel.h>
37 #include <linux/init.h>
38 #include <linux/msi.h>
39 #include <linux/of_address.h>
40 #include <linux/of_pci.h>
41 #include <linux/of_platform.h>
42 #include <linux/pci.h>
43 #include <linux/phy/phy.h>
44 #include <linux/platform_device.h>
45 #include <linux/reset.h>
46 #include <linux/sizes.h>
47 #include <linux/slab.h>
48 #include <linux/vmalloc.h>
49 #include <linux/regulator/consumer.h>
51 #include <soc/tegra/cpuidle.h>
52 #include <soc/tegra/pmc.h>
54 #include <asm/mach/irq.h>
55 #include <asm/mach/map.h>
56 #include <asm/mach/pci.h>
58 #define INT_PCI_MSI_NR (8 * 32)
60 /* register definitions */
62 #define AFI_AXI_BAR0_SZ 0x00
63 #define AFI_AXI_BAR1_SZ 0x04
64 #define AFI_AXI_BAR2_SZ 0x08
65 #define AFI_AXI_BAR3_SZ 0x0c
66 #define AFI_AXI_BAR4_SZ 0x10
67 #define AFI_AXI_BAR5_SZ 0x14
69 #define AFI_AXI_BAR0_START 0x18
70 #define AFI_AXI_BAR1_START 0x1c
71 #define AFI_AXI_BAR2_START 0x20
72 #define AFI_AXI_BAR3_START 0x24
73 #define AFI_AXI_BAR4_START 0x28
74 #define AFI_AXI_BAR5_START 0x2c
76 #define AFI_FPCI_BAR0 0x30
77 #define AFI_FPCI_BAR1 0x34
78 #define AFI_FPCI_BAR2 0x38
79 #define AFI_FPCI_BAR3 0x3c
80 #define AFI_FPCI_BAR4 0x40
81 #define AFI_FPCI_BAR5 0x44
83 #define AFI_CACHE_BAR0_SZ 0x48
84 #define AFI_CACHE_BAR0_ST 0x4c
85 #define AFI_CACHE_BAR1_SZ 0x50
86 #define AFI_CACHE_BAR1_ST 0x54
88 #define AFI_MSI_BAR_SZ 0x60
89 #define AFI_MSI_FPCI_BAR_ST 0x64
90 #define AFI_MSI_AXI_BAR_ST 0x68
92 #define AFI_MSI_VEC0 0x6c
93 #define AFI_MSI_VEC1 0x70
94 #define AFI_MSI_VEC2 0x74
95 #define AFI_MSI_VEC3 0x78
96 #define AFI_MSI_VEC4 0x7c
97 #define AFI_MSI_VEC5 0x80
98 #define AFI_MSI_VEC6 0x84
99 #define AFI_MSI_VEC7 0x88
101 #define AFI_MSI_EN_VEC0 0x8c
102 #define AFI_MSI_EN_VEC1 0x90
103 #define AFI_MSI_EN_VEC2 0x94
104 #define AFI_MSI_EN_VEC3 0x98
105 #define AFI_MSI_EN_VEC4 0x9c
106 #define AFI_MSI_EN_VEC5 0xa0
107 #define AFI_MSI_EN_VEC6 0xa4
108 #define AFI_MSI_EN_VEC7 0xa8
110 #define AFI_CONFIGURATION 0xac
111 #define AFI_CONFIGURATION_EN_FPCI (1 << 0)
113 #define AFI_FPCI_ERROR_MASKS 0xb0
115 #define AFI_INTR_MASK 0xb4
116 #define AFI_INTR_MASK_INT_MASK (1 << 0)
117 #define AFI_INTR_MASK_MSI_MASK (1 << 8)
119 #define AFI_INTR_CODE 0xb8
120 #define AFI_INTR_CODE_MASK 0xf
121 #define AFI_INTR_INI_SLAVE_ERROR 1
122 #define AFI_INTR_INI_DECODE_ERROR 2
123 #define AFI_INTR_TARGET_ABORT 3
124 #define AFI_INTR_MASTER_ABORT 4
125 #define AFI_INTR_INVALID_WRITE 5
126 #define AFI_INTR_LEGACY 6
127 #define AFI_INTR_FPCI_DECODE_ERROR 7
128 #define AFI_INTR_AXI_DECODE_ERROR 8
129 #define AFI_INTR_FPCI_TIMEOUT 9
130 #define AFI_INTR_PE_PRSNT_SENSE 10
131 #define AFI_INTR_PE_CLKREQ_SENSE 11
132 #define AFI_INTR_CLKCLAMP_SENSE 12
133 #define AFI_INTR_RDY4PD_SENSE 13
134 #define AFI_INTR_P2P_ERROR 14
136 #define AFI_INTR_SIGNATURE 0xbc
137 #define AFI_UPPER_FPCI_ADDRESS 0xc0
138 #define AFI_SM_INTR_ENABLE 0xc4
139 #define AFI_SM_INTR_INTA_ASSERT (1 << 0)
140 #define AFI_SM_INTR_INTB_ASSERT (1 << 1)
141 #define AFI_SM_INTR_INTC_ASSERT (1 << 2)
142 #define AFI_SM_INTR_INTD_ASSERT (1 << 3)
143 #define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
144 #define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
145 #define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
146 #define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
148 #define AFI_AFI_INTR_ENABLE 0xc8
149 #define AFI_INTR_EN_INI_SLVERR (1 << 0)
150 #define AFI_INTR_EN_INI_DECERR (1 << 1)
151 #define AFI_INTR_EN_TGT_SLVERR (1 << 2)
152 #define AFI_INTR_EN_TGT_DECERR (1 << 3)
153 #define AFI_INTR_EN_TGT_WRERR (1 << 4)
154 #define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
155 #define AFI_INTR_EN_AXI_DECERR (1 << 6)
156 #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
157 #define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
159 #define AFI_PCIE_CONFIG 0x0f8
160 #define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
161 #define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
162 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
163 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
164 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
165 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
166 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
167 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
168 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
169 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
171 #define AFI_FUSE 0x104
172 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
174 #define AFI_PEX0_CTRL 0x110
175 #define AFI_PEX1_CTRL 0x118
176 #define AFI_PEX2_CTRL 0x128
177 #define AFI_PEX_CTRL_RST (1 << 0)
178 #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
179 #define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
180 #define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
182 #define AFI_PLLE_CONTROL 0x160
183 #define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
184 #define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
186 #define AFI_PEXBIAS_CTRL_0 0x168
188 #define RP_VEND_XP 0x00000f00
189 #define RP_VEND_XP_DL_UP (1 << 30)
191 #define RP_PRIV_MISC 0x00000fe0
192 #define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
193 #define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
195 #define RP_LINK_CONTROL_STATUS 0x00000090
196 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
197 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
199 #define PADS_CTL_SEL 0x0000009c
201 #define PADS_CTL 0x000000a0
202 #define PADS_CTL_IDDQ_1L (1 << 0)
203 #define PADS_CTL_TX_DATA_EN_1L (1 << 6)
204 #define PADS_CTL_RX_DATA_EN_1L (1 << 10)
206 #define PADS_PLL_CTL_TEGRA20 0x000000b8
207 #define PADS_PLL_CTL_TEGRA30 0x000000b4
208 #define PADS_PLL_CTL_RST_B4SM (1 << 1)
209 #define PADS_PLL_CTL_LOCKDET (1 << 8)
210 #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
211 #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
212 #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
213 #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
214 #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
215 #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
216 #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
217 #define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
219 #define PADS_REFCLK_CFG0 0x000000c8
220 #define PADS_REFCLK_CFG1 0x000000cc
221 #define PADS_REFCLK_BIAS 0x000000d0
224 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
225 * entries, one entry per PCIe port. These field definitions and desired
226 * values aren't in the TRM, but do come from NVIDIA.
228 #define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
229 #define PADS_REFCLK_CFG_E_TERM_SHIFT 7
230 #define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
231 #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
233 struct tegra_msi {
234 struct msi_controller chip;
235 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
236 struct irq_domain *domain;
237 unsigned long pages;
238 struct mutex lock;
239 int irq;
242 /* used to differentiate between Tegra SoC generations */
243 struct tegra_pcie_soc {
244 unsigned int num_ports;
245 unsigned int msi_base_shift;
246 u32 pads_pll_ctl;
247 u32 tx_ref_sel;
248 u32 pads_refclk_cfg0;
249 u32 pads_refclk_cfg1;
250 bool has_pex_clkreq_en;
251 bool has_pex_bias_ctrl;
252 bool has_intr_prsnt_sense;
253 bool has_cml_clk;
254 bool has_gen2;
257 static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
259 return container_of(chip, struct tegra_msi, chip);
262 struct tegra_pcie {
263 struct device *dev;
265 void __iomem *pads;
266 void __iomem *afi;
267 int irq;
269 struct list_head buses;
270 struct resource *cs;
272 struct resource io;
273 struct resource pio;
274 struct resource mem;
275 struct resource prefetch;
276 struct resource busn;
278 struct {
279 resource_size_t mem;
280 resource_size_t io;
281 } offset;
283 struct clk *pex_clk;
284 struct clk *afi_clk;
285 struct clk *pll_e;
286 struct clk *cml_clk;
288 struct reset_control *pex_rst;
289 struct reset_control *afi_rst;
290 struct reset_control *pcie_xrst;
292 bool legacy_phy;
293 struct phy *phy;
295 struct tegra_msi msi;
297 struct list_head ports;
298 u32 xbar_config;
300 struct regulator_bulk_data *supplies;
301 unsigned int num_supplies;
303 const struct tegra_pcie_soc *soc;
304 struct dentry *debugfs;
307 struct tegra_pcie_port {
308 struct tegra_pcie *pcie;
309 struct device_node *np;
310 struct list_head list;
311 struct resource regs;
312 void __iomem *base;
313 unsigned int index;
314 unsigned int lanes;
316 struct phy **phys;
319 struct tegra_pcie_bus {
320 struct vm_struct *area;
321 struct list_head list;
322 unsigned int nr;
325 static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
327 return sys->private_data;
330 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
331 unsigned long offset)
333 writel(value, pcie->afi + offset);
336 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
338 return readl(pcie->afi + offset);
341 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
342 unsigned long offset)
344 writel(value, pcie->pads + offset);
347 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
349 return readl(pcie->pads + offset);
353 * The configuration space mapping on Tegra is somewhat similar to the ECAM
354 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
355 * register accesses are mapped:
357 * [27:24] extended register number
358 * [23:16] bus number
359 * [15:11] device number
360 * [10: 8] function number
361 * [ 7: 0] register number
363 * Mapping the whole extended configuration space would require 256 MiB of
364 * virtual address space, only a small part of which will actually be used.
365 * To work around this, a 1 MiB of virtual addresses are allocated per bus
366 * when the bus is first accessed. When the physical range is mapped, the
367 * the bus number bits are hidden so that the extended register number bits
368 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
370 * [19:16] extended register number
371 * [15:11] device number
372 * [10: 8] function number
373 * [ 7: 0] register number
375 * This is achieved by stitching together 16 chunks of 64 KiB of physical
376 * address space via the MMU.
378 static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
380 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
381 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
384 static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
385 unsigned int busnr)
387 struct device *dev = pcie->dev;
388 pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
389 L_PTE_XN | L_PTE_MT_DEV_SHARED | L_PTE_SHARED);
390 phys_addr_t cs = pcie->cs->start;
391 struct tegra_pcie_bus *bus;
392 unsigned int i;
393 int err;
395 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
396 if (!bus)
397 return ERR_PTR(-ENOMEM);
399 INIT_LIST_HEAD(&bus->list);
400 bus->nr = busnr;
402 /* allocate 1 MiB of virtual addresses */
403 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
404 if (!bus->area) {
405 err = -ENOMEM;
406 goto free;
409 /* map each of the 16 chunks of 64 KiB each */
410 for (i = 0; i < 16; i++) {
411 unsigned long virt = (unsigned long)bus->area->addr +
412 i * SZ_64K;
413 phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
415 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
416 if (err < 0) {
417 dev_err(dev, "ioremap_page_range() failed: %d\n", err);
418 goto unmap;
422 return bus;
424 unmap:
425 vunmap(bus->area->addr);
426 free:
427 kfree(bus);
428 return ERR_PTR(err);
431 static int tegra_pcie_add_bus(struct pci_bus *bus)
433 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
434 struct tegra_pcie_bus *b;
436 b = tegra_pcie_bus_alloc(pcie, bus->number);
437 if (IS_ERR(b))
438 return PTR_ERR(b);
440 list_add_tail(&b->list, &pcie->buses);
442 return 0;
445 static void tegra_pcie_remove_bus(struct pci_bus *child)
447 struct tegra_pcie *pcie = sys_to_pcie(child->sysdata);
448 struct tegra_pcie_bus *bus, *tmp;
450 list_for_each_entry_safe(bus, tmp, &pcie->buses, list) {
451 if (bus->nr == child->number) {
452 vunmap(bus->area->addr);
453 list_del(&bus->list);
454 kfree(bus);
455 break;
460 static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
461 unsigned int devfn,
462 int where)
464 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
465 struct device *dev = pcie->dev;
466 void __iomem *addr = NULL;
468 if (bus->number == 0) {
469 unsigned int slot = PCI_SLOT(devfn);
470 struct tegra_pcie_port *port;
472 list_for_each_entry(port, &pcie->ports, list) {
473 if (port->index + 1 == slot) {
474 addr = port->base + (where & ~3);
475 break;
478 } else {
479 struct tegra_pcie_bus *b;
481 list_for_each_entry(b, &pcie->buses, list)
482 if (b->nr == bus->number)
483 addr = (void __iomem *)b->area->addr;
485 if (!addr) {
486 dev_err(dev, "failed to map cfg. space for bus %u\n",
487 bus->number);
488 return NULL;
491 addr += tegra_pcie_conf_offset(devfn, where);
494 return addr;
497 static struct pci_ops tegra_pcie_ops = {
498 .add_bus = tegra_pcie_add_bus,
499 .remove_bus = tegra_pcie_remove_bus,
500 .map_bus = tegra_pcie_map_bus,
501 .read = pci_generic_config_read32,
502 .write = pci_generic_config_write32,
505 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
507 unsigned long ret = 0;
509 switch (port->index) {
510 case 0:
511 ret = AFI_PEX0_CTRL;
512 break;
514 case 1:
515 ret = AFI_PEX1_CTRL;
516 break;
518 case 2:
519 ret = AFI_PEX2_CTRL;
520 break;
523 return ret;
526 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
528 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
529 unsigned long value;
531 /* pulse reset signal */
532 value = afi_readl(port->pcie, ctrl);
533 value &= ~AFI_PEX_CTRL_RST;
534 afi_writel(port->pcie, value, ctrl);
536 usleep_range(1000, 2000);
538 value = afi_readl(port->pcie, ctrl);
539 value |= AFI_PEX_CTRL_RST;
540 afi_writel(port->pcie, value, ctrl);
543 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
545 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
546 const struct tegra_pcie_soc *soc = port->pcie->soc;
547 unsigned long value;
549 /* enable reference clock */
550 value = afi_readl(port->pcie, ctrl);
551 value |= AFI_PEX_CTRL_REFCLK_EN;
553 if (soc->has_pex_clkreq_en)
554 value |= AFI_PEX_CTRL_CLKREQ_EN;
556 value |= AFI_PEX_CTRL_OVERRIDE_EN;
558 afi_writel(port->pcie, value, ctrl);
560 tegra_pcie_port_reset(port);
563 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
565 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
566 const struct tegra_pcie_soc *soc = port->pcie->soc;
567 unsigned long value;
569 /* assert port reset */
570 value = afi_readl(port->pcie, ctrl);
571 value &= ~AFI_PEX_CTRL_RST;
572 afi_writel(port->pcie, value, ctrl);
574 /* disable reference clock */
575 value = afi_readl(port->pcie, ctrl);
577 if (soc->has_pex_clkreq_en)
578 value &= ~AFI_PEX_CTRL_CLKREQ_EN;
580 value &= ~AFI_PEX_CTRL_REFCLK_EN;
581 afi_writel(port->pcie, value, ctrl);
584 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
586 struct tegra_pcie *pcie = port->pcie;
587 struct device *dev = pcie->dev;
589 devm_iounmap(dev, port->base);
590 devm_release_mem_region(dev, port->regs.start,
591 resource_size(&port->regs));
592 list_del(&port->list);
593 devm_kfree(dev, port);
596 /* Tegra PCIE root complex wrongly reports device class */
597 static void tegra_pcie_fixup_class(struct pci_dev *dev)
599 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
601 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
602 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
603 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
604 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
606 /* Tegra20 and Tegra30 PCIE requires relaxed ordering */
607 static void tegra_pcie_relax_enable(struct pci_dev *dev)
609 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
611 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
612 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
613 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
614 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
616 static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
618 struct tegra_pcie *pcie = sys_to_pcie(sys);
619 struct device *dev = pcie->dev;
620 int err;
622 sys->mem_offset = pcie->offset.mem;
623 sys->io_offset = pcie->offset.io;
625 err = devm_request_resource(dev, &iomem_resource, &pcie->io);
626 if (err < 0)
627 return err;
629 err = pci_remap_iospace(&pcie->pio, pcie->io.start);
630 if (!err)
631 pci_add_resource_offset(&sys->resources, &pcie->pio,
632 sys->io_offset);
634 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
635 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
636 sys->mem_offset);
637 pci_add_resource(&sys->resources, &pcie->busn);
639 err = devm_request_pci_bus_resources(dev, &sys->resources);
640 if (err < 0)
641 return err;
643 return 1;
646 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
648 struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
649 int irq;
651 tegra_cpuidle_pcie_irqs_in_use();
653 irq = of_irq_parse_and_map_pci(pdev, slot, pin);
654 if (!irq)
655 irq = pcie->irq;
657 return irq;
660 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
662 const char *err_msg[] = {
663 "Unknown",
664 "AXI slave error",
665 "AXI decode error",
666 "Target abort",
667 "Master abort",
668 "Invalid write",
669 "Legacy interrupt",
670 "Response decoding error",
671 "AXI response decoding error",
672 "Transaction timeout",
673 "Slot present pin change",
674 "Slot clock request change",
675 "TMS clock ramp change",
676 "TMS ready for power down",
677 "Peer2Peer error",
679 struct tegra_pcie *pcie = arg;
680 struct device *dev = pcie->dev;
681 u32 code, signature;
683 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
684 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
685 afi_writel(pcie, 0, AFI_INTR_CODE);
687 if (code == AFI_INTR_LEGACY)
688 return IRQ_NONE;
690 if (code >= ARRAY_SIZE(err_msg))
691 code = 0;
694 * do not pollute kernel log with master abort reports since they
695 * happen a lot during enumeration
697 if (code == AFI_INTR_MASTER_ABORT)
698 dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
699 else
700 dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
702 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
703 code == AFI_INTR_FPCI_DECODE_ERROR) {
704 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
705 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
707 if (code == AFI_INTR_MASTER_ABORT)
708 dev_dbg(dev, " FPCI address: %10llx\n", address);
709 else
710 dev_err(dev, " FPCI address: %10llx\n", address);
713 return IRQ_HANDLED;
717 * FPCI map is as follows:
718 * - 0xfdfc000000: I/O space
719 * - 0xfdfe000000: type 0 configuration space
720 * - 0xfdff000000: type 1 configuration space
721 * - 0xfe00000000: type 0 extended configuration space
722 * - 0xfe10000000: type 1 extended configuration space
724 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
726 u32 fpci_bar, size, axi_address;
728 /* Bar 0: type 1 extended configuration space */
729 fpci_bar = 0xfe100000;
730 size = resource_size(pcie->cs);
731 axi_address = pcie->cs->start;
732 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
733 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
734 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
736 /* Bar 1: downstream IO bar */
737 fpci_bar = 0xfdfc0000;
738 size = resource_size(&pcie->io);
739 axi_address = pcie->io.start;
740 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
741 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
742 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
744 /* Bar 2: prefetchable memory BAR */
745 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
746 size = resource_size(&pcie->prefetch);
747 axi_address = pcie->prefetch.start;
748 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
749 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
750 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
752 /* Bar 3: non prefetchable memory BAR */
753 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
754 size = resource_size(&pcie->mem);
755 axi_address = pcie->mem.start;
756 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
757 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
758 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
760 /* NULL out the remaining BARs as they are not used */
761 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
762 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
763 afi_writel(pcie, 0, AFI_FPCI_BAR4);
765 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
766 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
767 afi_writel(pcie, 0, AFI_FPCI_BAR5);
769 /* map all upstream transactions as uncached */
770 afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
771 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
772 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
773 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
775 /* MSI translations are setup only when needed */
776 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
777 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
778 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
779 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
782 static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
784 const struct tegra_pcie_soc *soc = pcie->soc;
785 u32 value;
787 timeout = jiffies + msecs_to_jiffies(timeout);
789 while (time_before(jiffies, timeout)) {
790 value = pads_readl(pcie, soc->pads_pll_ctl);
791 if (value & PADS_PLL_CTL_LOCKDET)
792 return 0;
795 return -ETIMEDOUT;
798 static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
800 struct device *dev = pcie->dev;
801 const struct tegra_pcie_soc *soc = pcie->soc;
802 u32 value;
803 int err;
805 /* initialize internal PHY, enable up to 16 PCIE lanes */
806 pads_writel(pcie, 0x0, PADS_CTL_SEL);
808 /* override IDDQ to 1 on all 4 lanes */
809 value = pads_readl(pcie, PADS_CTL);
810 value |= PADS_CTL_IDDQ_1L;
811 pads_writel(pcie, value, PADS_CTL);
814 * Set up PHY PLL inputs select PLLE output as refclock,
815 * set TX ref sel to div10 (not div5).
817 value = pads_readl(pcie, soc->pads_pll_ctl);
818 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
819 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
820 pads_writel(pcie, value, soc->pads_pll_ctl);
822 /* reset PLL */
823 value = pads_readl(pcie, soc->pads_pll_ctl);
824 value &= ~PADS_PLL_CTL_RST_B4SM;
825 pads_writel(pcie, value, soc->pads_pll_ctl);
827 usleep_range(20, 100);
829 /* take PLL out of reset */
830 value = pads_readl(pcie, soc->pads_pll_ctl);
831 value |= PADS_PLL_CTL_RST_B4SM;
832 pads_writel(pcie, value, soc->pads_pll_ctl);
834 /* wait for the PLL to lock */
835 err = tegra_pcie_pll_wait(pcie, 500);
836 if (err < 0) {
837 dev_err(dev, "PLL failed to lock: %d\n", err);
838 return err;
841 /* turn off IDDQ override */
842 value = pads_readl(pcie, PADS_CTL);
843 value &= ~PADS_CTL_IDDQ_1L;
844 pads_writel(pcie, value, PADS_CTL);
846 /* enable TX/RX data */
847 value = pads_readl(pcie, PADS_CTL);
848 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
849 pads_writel(pcie, value, PADS_CTL);
851 return 0;
854 static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
856 const struct tegra_pcie_soc *soc = pcie->soc;
857 u32 value;
859 /* disable TX/RX data */
860 value = pads_readl(pcie, PADS_CTL);
861 value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
862 pads_writel(pcie, value, PADS_CTL);
864 /* override IDDQ */
865 value = pads_readl(pcie, PADS_CTL);
866 value |= PADS_CTL_IDDQ_1L;
867 pads_writel(pcie, value, PADS_CTL);
869 /* reset PLL */
870 value = pads_readl(pcie, soc->pads_pll_ctl);
871 value &= ~PADS_PLL_CTL_RST_B4SM;
872 pads_writel(pcie, value, soc->pads_pll_ctl);
874 usleep_range(20, 100);
876 return 0;
879 static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
881 struct device *dev = port->pcie->dev;
882 unsigned int i;
883 int err;
885 for (i = 0; i < port->lanes; i++) {
886 err = phy_power_on(port->phys[i]);
887 if (err < 0) {
888 dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
889 return err;
893 return 0;
896 static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
898 struct device *dev = port->pcie->dev;
899 unsigned int i;
900 int err;
902 for (i = 0; i < port->lanes; i++) {
903 err = phy_power_off(port->phys[i]);
904 if (err < 0) {
905 dev_err(dev, "failed to power off PHY#%u: %d\n", i,
906 err);
907 return err;
911 return 0;
914 static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
916 struct device *dev = pcie->dev;
917 const struct tegra_pcie_soc *soc = pcie->soc;
918 struct tegra_pcie_port *port;
919 int err;
921 if (pcie->legacy_phy) {
922 if (pcie->phy)
923 err = phy_power_on(pcie->phy);
924 else
925 err = tegra_pcie_phy_enable(pcie);
927 if (err < 0)
928 dev_err(dev, "failed to power on PHY: %d\n", err);
930 return err;
933 list_for_each_entry(port, &pcie->ports, list) {
934 err = tegra_pcie_port_phy_power_on(port);
935 if (err < 0) {
936 dev_err(dev,
937 "failed to power on PCIe port %u PHY: %d\n",
938 port->index, err);
939 return err;
943 /* Configure the reference clock driver */
944 pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
946 if (soc->num_ports > 2)
947 pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
949 return 0;
952 static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
954 struct device *dev = pcie->dev;
955 struct tegra_pcie_port *port;
956 int err;
958 if (pcie->legacy_phy) {
959 if (pcie->phy)
960 err = phy_power_off(pcie->phy);
961 else
962 err = tegra_pcie_phy_disable(pcie);
964 if (err < 0)
965 dev_err(dev, "failed to power off PHY: %d\n", err);
967 return err;
970 list_for_each_entry(port, &pcie->ports, list) {
971 err = tegra_pcie_port_phy_power_off(port);
972 if (err < 0) {
973 dev_err(dev,
974 "failed to power off PCIe port %u PHY: %d\n",
975 port->index, err);
976 return err;
980 return 0;
983 static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
985 struct device *dev = pcie->dev;
986 const struct tegra_pcie_soc *soc = pcie->soc;
987 struct tegra_pcie_port *port;
988 unsigned long value;
989 int err;
991 /* enable PLL power down */
992 if (pcie->phy) {
993 value = afi_readl(pcie, AFI_PLLE_CONTROL);
994 value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
995 value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
996 afi_writel(pcie, value, AFI_PLLE_CONTROL);
999 /* power down PCIe slot clock bias pad */
1000 if (soc->has_pex_bias_ctrl)
1001 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
1003 /* configure mode and disable all ports */
1004 value = afi_readl(pcie, AFI_PCIE_CONFIG);
1005 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
1006 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
1008 list_for_each_entry(port, &pcie->ports, list)
1009 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
1011 afi_writel(pcie, value, AFI_PCIE_CONFIG);
1013 if (soc->has_gen2) {
1014 value = afi_readl(pcie, AFI_FUSE);
1015 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1016 afi_writel(pcie, value, AFI_FUSE);
1017 } else {
1018 value = afi_readl(pcie, AFI_FUSE);
1019 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
1020 afi_writel(pcie, value, AFI_FUSE);
1023 err = tegra_pcie_phy_power_on(pcie);
1024 if (err < 0) {
1025 dev_err(dev, "failed to power on PHY(s): %d\n", err);
1026 return err;
1029 /* take the PCIe interface module out of reset */
1030 reset_control_deassert(pcie->pcie_xrst);
1032 /* finally enable PCIe */
1033 value = afi_readl(pcie, AFI_CONFIGURATION);
1034 value |= AFI_CONFIGURATION_EN_FPCI;
1035 afi_writel(pcie, value, AFI_CONFIGURATION);
1037 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1038 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1039 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
1041 if (soc->has_intr_prsnt_sense)
1042 value |= AFI_INTR_EN_PRSNT_SENSE;
1044 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
1045 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1047 /* don't enable MSI for now, only when needed */
1048 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1050 /* disable all exceptions */
1051 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
1053 return 0;
1056 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1058 struct device *dev = pcie->dev;
1059 int err;
1061 /* TODO: disable and unprepare clocks? */
1063 err = tegra_pcie_phy_power_off(pcie);
1064 if (err < 0)
1065 dev_err(dev, "failed to power off PHY(s): %d\n", err);
1067 reset_control_assert(pcie->pcie_xrst);
1068 reset_control_assert(pcie->afi_rst);
1069 reset_control_assert(pcie->pex_rst);
1071 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1073 err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1074 if (err < 0)
1075 dev_warn(dev, "failed to disable regulators: %d\n", err);
1078 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1080 struct device *dev = pcie->dev;
1081 const struct tegra_pcie_soc *soc = pcie->soc;
1082 int err;
1084 reset_control_assert(pcie->pcie_xrst);
1085 reset_control_assert(pcie->afi_rst);
1086 reset_control_assert(pcie->pex_rst);
1088 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1090 /* enable regulators */
1091 err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1092 if (err < 0)
1093 dev_err(dev, "failed to enable regulators: %d\n", err);
1095 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
1096 pcie->pex_clk,
1097 pcie->pex_rst);
1098 if (err) {
1099 dev_err(dev, "powerup sequence failed: %d\n", err);
1100 return err;
1103 reset_control_deassert(pcie->afi_rst);
1105 err = clk_prepare_enable(pcie->afi_clk);
1106 if (err < 0) {
1107 dev_err(dev, "failed to enable AFI clock: %d\n", err);
1108 return err;
1111 if (soc->has_cml_clk) {
1112 err = clk_prepare_enable(pcie->cml_clk);
1113 if (err < 0) {
1114 dev_err(dev, "failed to enable CML clock: %d\n", err);
1115 return err;
1119 err = clk_prepare_enable(pcie->pll_e);
1120 if (err < 0) {
1121 dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1122 return err;
1125 return 0;
1128 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1130 struct device *dev = pcie->dev;
1131 const struct tegra_pcie_soc *soc = pcie->soc;
1133 pcie->pex_clk = devm_clk_get(dev, "pex");
1134 if (IS_ERR(pcie->pex_clk))
1135 return PTR_ERR(pcie->pex_clk);
1137 pcie->afi_clk = devm_clk_get(dev, "afi");
1138 if (IS_ERR(pcie->afi_clk))
1139 return PTR_ERR(pcie->afi_clk);
1141 pcie->pll_e = devm_clk_get(dev, "pll_e");
1142 if (IS_ERR(pcie->pll_e))
1143 return PTR_ERR(pcie->pll_e);
1145 if (soc->has_cml_clk) {
1146 pcie->cml_clk = devm_clk_get(dev, "cml");
1147 if (IS_ERR(pcie->cml_clk))
1148 return PTR_ERR(pcie->cml_clk);
1151 return 0;
1154 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1156 struct device *dev = pcie->dev;
1158 pcie->pex_rst = devm_reset_control_get(dev, "pex");
1159 if (IS_ERR(pcie->pex_rst))
1160 return PTR_ERR(pcie->pex_rst);
1162 pcie->afi_rst = devm_reset_control_get(dev, "afi");
1163 if (IS_ERR(pcie->afi_rst))
1164 return PTR_ERR(pcie->afi_rst);
1166 pcie->pcie_xrst = devm_reset_control_get(dev, "pcie_x");
1167 if (IS_ERR(pcie->pcie_xrst))
1168 return PTR_ERR(pcie->pcie_xrst);
1170 return 0;
1173 static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1175 struct device *dev = pcie->dev;
1176 int err;
1178 pcie->phy = devm_phy_optional_get(dev, "pcie");
1179 if (IS_ERR(pcie->phy)) {
1180 err = PTR_ERR(pcie->phy);
1181 dev_err(dev, "failed to get PHY: %d\n", err);
1182 return err;
1185 err = phy_init(pcie->phy);
1186 if (err < 0) {
1187 dev_err(dev, "failed to initialize PHY: %d\n", err);
1188 return err;
1191 pcie->legacy_phy = true;
1193 return 0;
1196 static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1197 struct device_node *np,
1198 const char *consumer,
1199 unsigned int index)
1201 struct phy *phy;
1202 char *name;
1204 name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1205 if (!name)
1206 return ERR_PTR(-ENOMEM);
1208 phy = devm_of_phy_get(dev, np, name);
1209 kfree(name);
1211 if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
1212 phy = NULL;
1214 return phy;
1217 static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1219 struct device *dev = port->pcie->dev;
1220 struct phy *phy;
1221 unsigned int i;
1222 int err;
1224 port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1225 if (!port->phys)
1226 return -ENOMEM;
1228 for (i = 0; i < port->lanes; i++) {
1229 phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1230 if (IS_ERR(phy)) {
1231 dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1232 PTR_ERR(phy));
1233 return PTR_ERR(phy);
1236 err = phy_init(phy);
1237 if (err < 0) {
1238 dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1239 err);
1240 return err;
1243 port->phys[i] = phy;
1246 return 0;
1249 static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1251 const struct tegra_pcie_soc *soc = pcie->soc;
1252 struct device_node *np = pcie->dev->of_node;
1253 struct tegra_pcie_port *port;
1254 int err;
1256 if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
1257 return tegra_pcie_phys_get_legacy(pcie);
1259 list_for_each_entry(port, &pcie->ports, list) {
1260 err = tegra_pcie_port_get_phys(port);
1261 if (err < 0)
1262 return err;
1265 return 0;
1268 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1270 struct device *dev = pcie->dev;
1271 struct platform_device *pdev = to_platform_device(dev);
1272 struct resource *pads, *afi, *res;
1273 int err;
1275 err = tegra_pcie_clocks_get(pcie);
1276 if (err) {
1277 dev_err(dev, "failed to get clocks: %d\n", err);
1278 return err;
1281 err = tegra_pcie_resets_get(pcie);
1282 if (err) {
1283 dev_err(dev, "failed to get resets: %d\n", err);
1284 return err;
1287 err = tegra_pcie_phys_get(pcie);
1288 if (err < 0) {
1289 dev_err(dev, "failed to get PHYs: %d\n", err);
1290 return err;
1293 err = tegra_pcie_power_on(pcie);
1294 if (err) {
1295 dev_err(dev, "failed to power up: %d\n", err);
1296 return err;
1299 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1300 pcie->pads = devm_ioremap_resource(dev, pads);
1301 if (IS_ERR(pcie->pads)) {
1302 err = PTR_ERR(pcie->pads);
1303 goto poweroff;
1306 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1307 pcie->afi = devm_ioremap_resource(dev, afi);
1308 if (IS_ERR(pcie->afi)) {
1309 err = PTR_ERR(pcie->afi);
1310 goto poweroff;
1313 /* request configuration space, but remap later, on demand */
1314 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1315 if (!res) {
1316 err = -EADDRNOTAVAIL;
1317 goto poweroff;
1320 pcie->cs = devm_request_mem_region(dev, res->start,
1321 resource_size(res), res->name);
1322 if (!pcie->cs) {
1323 err = -EADDRNOTAVAIL;
1324 goto poweroff;
1327 /* request interrupt */
1328 err = platform_get_irq_byname(pdev, "intr");
1329 if (err < 0) {
1330 dev_err(dev, "failed to get IRQ: %d\n", err);
1331 goto poweroff;
1334 pcie->irq = err;
1336 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1337 if (err) {
1338 dev_err(dev, "failed to register IRQ: %d\n", err);
1339 goto poweroff;
1342 return 0;
1344 poweroff:
1345 tegra_pcie_power_off(pcie);
1346 return err;
1349 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1351 struct device *dev = pcie->dev;
1352 int err;
1354 if (pcie->irq > 0)
1355 free_irq(pcie->irq, pcie);
1357 tegra_pcie_power_off(pcie);
1359 err = phy_exit(pcie->phy);
1360 if (err < 0)
1361 dev_err(dev, "failed to teardown PHY: %d\n", err);
1363 return 0;
1366 static int tegra_msi_alloc(struct tegra_msi *chip)
1368 int msi;
1370 mutex_lock(&chip->lock);
1372 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1373 if (msi < INT_PCI_MSI_NR)
1374 set_bit(msi, chip->used);
1375 else
1376 msi = -ENOSPC;
1378 mutex_unlock(&chip->lock);
1380 return msi;
1383 static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1385 struct device *dev = chip->chip.dev;
1387 mutex_lock(&chip->lock);
1389 if (!test_bit(irq, chip->used))
1390 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1391 else
1392 clear_bit(irq, chip->used);
1394 mutex_unlock(&chip->lock);
1397 static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1399 struct tegra_pcie *pcie = data;
1400 struct device *dev = pcie->dev;
1401 struct tegra_msi *msi = &pcie->msi;
1402 unsigned int i, processed = 0;
1404 for (i = 0; i < 8; i++) {
1405 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1407 while (reg) {
1408 unsigned int offset = find_first_bit(&reg, 32);
1409 unsigned int index = i * 32 + offset;
1410 unsigned int irq;
1412 /* clear the interrupt */
1413 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1415 irq = irq_find_mapping(msi->domain, index);
1416 if (irq) {
1417 if (test_bit(index, msi->used))
1418 generic_handle_irq(irq);
1419 else
1420 dev_info(dev, "unhandled MSI\n");
1421 } else {
1423 * that's weird who triggered this?
1424 * just clear it
1426 dev_info(dev, "unexpected MSI\n");
1429 /* see if there's any more pending in this vector */
1430 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1432 processed++;
1436 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1439 static int tegra_msi_setup_irq(struct msi_controller *chip,
1440 struct pci_dev *pdev, struct msi_desc *desc)
1442 struct tegra_msi *msi = to_tegra_msi(chip);
1443 struct msi_msg msg;
1444 unsigned int irq;
1445 int hwirq;
1447 hwirq = tegra_msi_alloc(msi);
1448 if (hwirq < 0)
1449 return hwirq;
1451 irq = irq_create_mapping(msi->domain, hwirq);
1452 if (!irq) {
1453 tegra_msi_free(msi, hwirq);
1454 return -EINVAL;
1457 irq_set_msi_desc(irq, desc);
1459 msg.address_lo = virt_to_phys((void *)msi->pages);
1460 /* 32 bit address only */
1461 msg.address_hi = 0;
1462 msg.data = hwirq;
1464 pci_write_msi_msg(irq, &msg);
1466 return 0;
1469 static void tegra_msi_teardown_irq(struct msi_controller *chip,
1470 unsigned int irq)
1472 struct tegra_msi *msi = to_tegra_msi(chip);
1473 struct irq_data *d = irq_get_irq_data(irq);
1474 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1476 irq_dispose_mapping(irq);
1477 tegra_msi_free(msi, hwirq);
1480 static struct irq_chip tegra_msi_irq_chip = {
1481 .name = "Tegra PCIe MSI",
1482 .irq_enable = pci_msi_unmask_irq,
1483 .irq_disable = pci_msi_mask_irq,
1484 .irq_mask = pci_msi_mask_irq,
1485 .irq_unmask = pci_msi_unmask_irq,
1488 static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1489 irq_hw_number_t hwirq)
1491 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1492 irq_set_chip_data(irq, domain->host_data);
1494 tegra_cpuidle_pcie_irqs_in_use();
1496 return 0;
1499 static const struct irq_domain_ops msi_domain_ops = {
1500 .map = tegra_msi_map,
1503 static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1505 struct device *dev = pcie->dev;
1506 struct platform_device *pdev = to_platform_device(dev);
1507 const struct tegra_pcie_soc *soc = pcie->soc;
1508 struct tegra_msi *msi = &pcie->msi;
1509 unsigned long base;
1510 int err;
1511 u32 reg;
1513 mutex_init(&msi->lock);
1515 msi->chip.dev = dev;
1516 msi->chip.setup_irq = tegra_msi_setup_irq;
1517 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1519 msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
1520 &msi_domain_ops, &msi->chip);
1521 if (!msi->domain) {
1522 dev_err(dev, "failed to create IRQ domain\n");
1523 return -ENOMEM;
1526 err = platform_get_irq_byname(pdev, "msi");
1527 if (err < 0) {
1528 dev_err(dev, "failed to get IRQ: %d\n", err);
1529 goto err;
1532 msi->irq = err;
1534 err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
1535 tegra_msi_irq_chip.name, pcie);
1536 if (err < 0) {
1537 dev_err(dev, "failed to request IRQ: %d\n", err);
1538 goto err;
1541 /* setup AFI/FPCI range */
1542 msi->pages = __get_free_pages(GFP_KERNEL, 0);
1543 base = virt_to_phys((void *)msi->pages);
1545 afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1546 afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1547 /* this register is in 4K increments */
1548 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1550 /* enable all MSI vectors */
1551 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1552 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1553 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1554 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1555 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1556 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1557 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1558 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1560 /* and unmask the MSI interrupt */
1561 reg = afi_readl(pcie, AFI_INTR_MASK);
1562 reg |= AFI_INTR_MASK_MSI_MASK;
1563 afi_writel(pcie, reg, AFI_INTR_MASK);
1565 return 0;
1567 err:
1568 irq_domain_remove(msi->domain);
1569 return err;
1572 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1574 struct tegra_msi *msi = &pcie->msi;
1575 unsigned int i, irq;
1576 u32 value;
1578 /* mask the MSI interrupt */
1579 value = afi_readl(pcie, AFI_INTR_MASK);
1580 value &= ~AFI_INTR_MASK_MSI_MASK;
1581 afi_writel(pcie, value, AFI_INTR_MASK);
1583 /* disable all MSI vectors */
1584 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1585 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1586 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1587 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1588 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1589 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1590 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1591 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1593 free_pages(msi->pages, 0);
1595 if (msi->irq > 0)
1596 free_irq(msi->irq, pcie);
1598 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1599 irq = irq_find_mapping(msi->domain, i);
1600 if (irq > 0)
1601 irq_dispose_mapping(irq);
1604 irq_domain_remove(msi->domain);
1606 return 0;
1609 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1610 u32 *xbar)
1612 struct device *dev = pcie->dev;
1613 struct device_node *np = dev->of_node;
1615 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1616 switch (lanes) {
1617 case 0x0000104:
1618 dev_info(dev, "4x1, 1x1 configuration\n");
1619 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1620 return 0;
1622 case 0x0000102:
1623 dev_info(dev, "2x1, 1x1 configuration\n");
1624 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1625 return 0;
1627 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1628 switch (lanes) {
1629 case 0x00000204:
1630 dev_info(dev, "4x1, 2x1 configuration\n");
1631 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1632 return 0;
1634 case 0x00020202:
1635 dev_info(dev, "2x3 configuration\n");
1636 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1637 return 0;
1639 case 0x00010104:
1640 dev_info(dev, "4x1, 1x2 configuration\n");
1641 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1642 return 0;
1644 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1645 switch (lanes) {
1646 case 0x00000004:
1647 dev_info(dev, "single-mode configuration\n");
1648 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1649 return 0;
1651 case 0x00000202:
1652 dev_info(dev, "dual-mode configuration\n");
1653 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1654 return 0;
1658 return -EINVAL;
1662 * Check whether a given set of supplies is available in a device tree node.
1663 * This is used to check whether the new or the legacy device tree bindings
1664 * should be used.
1666 static bool of_regulator_bulk_available(struct device_node *np,
1667 struct regulator_bulk_data *supplies,
1668 unsigned int num_supplies)
1670 char property[32];
1671 unsigned int i;
1673 for (i = 0; i < num_supplies; i++) {
1674 snprintf(property, 32, "%s-supply", supplies[i].supply);
1676 if (of_find_property(np, property, NULL) == NULL)
1677 return false;
1680 return true;
1684 * Old versions of the device tree binding for this device used a set of power
1685 * supplies that didn't match the hardware inputs. This happened to work for a
1686 * number of cases but is not future proof. However to preserve backwards-
1687 * compatibility with old device trees, this function will try to use the old
1688 * set of supplies.
1690 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1692 struct device *dev = pcie->dev;
1693 struct device_node *np = dev->of_node;
1695 if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1696 pcie->num_supplies = 3;
1697 else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1698 pcie->num_supplies = 2;
1700 if (pcie->num_supplies == 0) {
1701 dev_err(dev, "device %s not supported in legacy mode\n",
1702 np->full_name);
1703 return -ENODEV;
1706 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1707 sizeof(*pcie->supplies),
1708 GFP_KERNEL);
1709 if (!pcie->supplies)
1710 return -ENOMEM;
1712 pcie->supplies[0].supply = "pex-clk";
1713 pcie->supplies[1].supply = "vdd";
1715 if (pcie->num_supplies > 2)
1716 pcie->supplies[2].supply = "avdd";
1718 return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1722 * Obtains the list of regulators required for a particular generation of the
1723 * IP block.
1725 * This would've been nice to do simply by providing static tables for use
1726 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1727 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1728 * and either seems to be optional depending on which ports are being used.
1730 static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1732 struct device *dev = pcie->dev;
1733 struct device_node *np = dev->of_node;
1734 unsigned int i = 0;
1736 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1737 pcie->num_supplies = 7;
1739 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1740 sizeof(*pcie->supplies),
1741 GFP_KERNEL);
1742 if (!pcie->supplies)
1743 return -ENOMEM;
1745 pcie->supplies[i++].supply = "avddio-pex";
1746 pcie->supplies[i++].supply = "dvddio-pex";
1747 pcie->supplies[i++].supply = "avdd-pex-pll";
1748 pcie->supplies[i++].supply = "hvdd-pex";
1749 pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1750 pcie->supplies[i++].supply = "vddio-pex-ctl";
1751 pcie->supplies[i++].supply = "avdd-pll-erefe";
1752 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1753 bool need_pexa = false, need_pexb = false;
1755 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
1756 if (lane_mask & 0x0f)
1757 need_pexa = true;
1759 /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
1760 if (lane_mask & 0x30)
1761 need_pexb = true;
1763 pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
1764 (need_pexb ? 2 : 0);
1766 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1767 sizeof(*pcie->supplies),
1768 GFP_KERNEL);
1769 if (!pcie->supplies)
1770 return -ENOMEM;
1772 pcie->supplies[i++].supply = "avdd-pex-pll";
1773 pcie->supplies[i++].supply = "hvdd-pex";
1774 pcie->supplies[i++].supply = "vddio-pex-ctl";
1775 pcie->supplies[i++].supply = "avdd-plle";
1777 if (need_pexa) {
1778 pcie->supplies[i++].supply = "avdd-pexa";
1779 pcie->supplies[i++].supply = "vdd-pexa";
1782 if (need_pexb) {
1783 pcie->supplies[i++].supply = "avdd-pexb";
1784 pcie->supplies[i++].supply = "vdd-pexb";
1786 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1787 pcie->num_supplies = 5;
1789 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1790 sizeof(*pcie->supplies),
1791 GFP_KERNEL);
1792 if (!pcie->supplies)
1793 return -ENOMEM;
1795 pcie->supplies[0].supply = "avdd-pex";
1796 pcie->supplies[1].supply = "vdd-pex";
1797 pcie->supplies[2].supply = "avdd-pex-pll";
1798 pcie->supplies[3].supply = "avdd-plle";
1799 pcie->supplies[4].supply = "vddio-pex-clk";
1802 if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
1803 pcie->num_supplies))
1804 return devm_regulator_bulk_get(dev, pcie->num_supplies,
1805 pcie->supplies);
1808 * If not all regulators are available for this new scheme, assume
1809 * that the device tree complies with an older version of the device
1810 * tree binding.
1812 dev_info(dev, "using legacy DT binding for power supplies\n");
1814 devm_kfree(dev, pcie->supplies);
1815 pcie->num_supplies = 0;
1817 return tegra_pcie_get_legacy_regulators(pcie);
1820 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1822 struct device *dev = pcie->dev;
1823 struct device_node *np = dev->of_node, *port;
1824 const struct tegra_pcie_soc *soc = pcie->soc;
1825 struct of_pci_range_parser parser;
1826 struct of_pci_range range;
1827 u32 lanes = 0, mask = 0;
1828 unsigned int lane = 0;
1829 struct resource res;
1830 int err;
1832 if (of_pci_range_parser_init(&parser, np)) {
1833 dev_err(dev, "missing \"ranges\" property\n");
1834 return -EINVAL;
1837 for_each_of_pci_range(&parser, &range) {
1838 err = of_pci_range_to_resource(&range, np, &res);
1839 if (err < 0)
1840 return err;
1842 switch (res.flags & IORESOURCE_TYPE_BITS) {
1843 case IORESOURCE_IO:
1844 /* Track the bus -> CPU I/O mapping offset. */
1845 pcie->offset.io = res.start - range.pci_addr;
1847 memcpy(&pcie->pio, &res, sizeof(res));
1848 pcie->pio.name = np->full_name;
1851 * The Tegra PCIe host bridge uses this to program the
1852 * mapping of the I/O space to the physical address,
1853 * so we override the .start and .end fields here that
1854 * of_pci_range_to_resource() converted to I/O space.
1855 * We also set the IORESOURCE_MEM type to clarify that
1856 * the resource is in the physical memory space.
1858 pcie->io.start = range.cpu_addr;
1859 pcie->io.end = range.cpu_addr + range.size - 1;
1860 pcie->io.flags = IORESOURCE_MEM;
1861 pcie->io.name = "I/O";
1863 memcpy(&res, &pcie->io, sizeof(res));
1864 break;
1866 case IORESOURCE_MEM:
1868 * Track the bus -> CPU memory mapping offset. This
1869 * assumes that the prefetchable and non-prefetchable
1870 * regions will be the last of type IORESOURCE_MEM in
1871 * the ranges property.
1872 * */
1873 pcie->offset.mem = res.start - range.pci_addr;
1875 if (res.flags & IORESOURCE_PREFETCH) {
1876 memcpy(&pcie->prefetch, &res, sizeof(res));
1877 pcie->prefetch.name = "prefetchable";
1878 } else {
1879 memcpy(&pcie->mem, &res, sizeof(res));
1880 pcie->mem.name = "non-prefetchable";
1882 break;
1886 err = of_pci_parse_bus_range(np, &pcie->busn);
1887 if (err < 0) {
1888 dev_err(dev, "failed to parse ranges property: %d\n", err);
1889 pcie->busn.name = np->name;
1890 pcie->busn.start = 0;
1891 pcie->busn.end = 0xff;
1892 pcie->busn.flags = IORESOURCE_BUS;
1895 /* parse root ports */
1896 for_each_child_of_node(np, port) {
1897 struct tegra_pcie_port *rp;
1898 unsigned int index;
1899 u32 value;
1901 err = of_pci_get_devfn(port);
1902 if (err < 0) {
1903 dev_err(dev, "failed to parse address: %d\n", err);
1904 goto err_node_put;
1907 index = PCI_SLOT(err);
1909 if (index < 1 || index > soc->num_ports) {
1910 dev_err(dev, "invalid port number: %d\n", index);
1911 err = -EINVAL;
1912 goto err_node_put;
1915 index--;
1917 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1918 if (err < 0) {
1919 dev_err(dev, "failed to parse # of lanes: %d\n",
1920 err);
1921 goto err_node_put;
1924 if (value > 16) {
1925 dev_err(dev, "invalid # of lanes: %u\n", value);
1926 err = -EINVAL;
1927 goto err_node_put;
1930 lanes |= value << (index << 3);
1932 if (!of_device_is_available(port)) {
1933 lane += value;
1934 continue;
1937 mask |= ((1 << value) - 1) << lane;
1938 lane += value;
1940 rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
1941 if (!rp) {
1942 err = -ENOMEM;
1943 goto err_node_put;
1946 err = of_address_to_resource(port, 0, &rp->regs);
1947 if (err < 0) {
1948 dev_err(dev, "failed to parse address: %d\n", err);
1949 goto err_node_put;
1952 INIT_LIST_HEAD(&rp->list);
1953 rp->index = index;
1954 rp->lanes = value;
1955 rp->pcie = pcie;
1956 rp->np = port;
1958 rp->base = devm_ioremap_resource(dev, &rp->regs);
1959 if (IS_ERR(rp->base))
1960 return PTR_ERR(rp->base);
1962 list_add_tail(&rp->list, &pcie->ports);
1965 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1966 if (err < 0) {
1967 dev_err(dev, "invalid lane configuration\n");
1968 return err;
1971 err = tegra_pcie_get_regulators(pcie, mask);
1972 if (err < 0)
1973 return err;
1975 return 0;
1977 err_node_put:
1978 of_node_put(port);
1979 return err;
1983 * FIXME: If there are no PCIe cards attached, then calling this function
1984 * can result in the increase of the bootup time as there are big timeout
1985 * loops.
1987 #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
1988 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1990 struct device *dev = port->pcie->dev;
1991 unsigned int retries = 3;
1992 unsigned long value;
1994 /* override presence detection */
1995 value = readl(port->base + RP_PRIV_MISC);
1996 value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
1997 value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
1998 writel(value, port->base + RP_PRIV_MISC);
2000 do {
2001 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2003 do {
2004 value = readl(port->base + RP_VEND_XP);
2006 if (value & RP_VEND_XP_DL_UP)
2007 break;
2009 usleep_range(1000, 2000);
2010 } while (--timeout);
2012 if (!timeout) {
2013 dev_err(dev, "link %u down, retrying\n", port->index);
2014 goto retry;
2017 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2019 do {
2020 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2022 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2023 return true;
2025 usleep_range(1000, 2000);
2026 } while (--timeout);
2028 retry:
2029 tegra_pcie_port_reset(port);
2030 } while (--retries);
2032 return false;
2035 static int tegra_pcie_enable(struct tegra_pcie *pcie)
2037 struct device *dev = pcie->dev;
2038 struct tegra_pcie_port *port, *tmp;
2039 struct hw_pci hw;
2041 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2042 dev_info(dev, "probing port %u, using %u lanes\n",
2043 port->index, port->lanes);
2045 tegra_pcie_port_enable(port);
2047 if (tegra_pcie_port_check_link(port))
2048 continue;
2050 dev_info(dev, "link %u down, ignoring\n", port->index);
2052 tegra_pcie_port_disable(port);
2053 tegra_pcie_port_free(port);
2056 memset(&hw, 0, sizeof(hw));
2058 #ifdef CONFIG_PCI_MSI
2059 hw.msi_ctrl = &pcie->msi.chip;
2060 #endif
2062 hw.nr_controllers = 1;
2063 hw.private_data = (void **)&pcie;
2064 hw.setup = tegra_pcie_setup;
2065 hw.map_irq = tegra_pcie_map_irq;
2066 hw.ops = &tegra_pcie_ops;
2068 pci_common_init_dev(dev, &hw);
2069 return 0;
2072 static const struct tegra_pcie_soc tegra20_pcie = {
2073 .num_ports = 2,
2074 .msi_base_shift = 0,
2075 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2076 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2077 .pads_refclk_cfg0 = 0xfa5cfa5c,
2078 .has_pex_clkreq_en = false,
2079 .has_pex_bias_ctrl = false,
2080 .has_intr_prsnt_sense = false,
2081 .has_cml_clk = false,
2082 .has_gen2 = false,
2085 static const struct tegra_pcie_soc tegra30_pcie = {
2086 .num_ports = 3,
2087 .msi_base_shift = 8,
2088 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2089 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2090 .pads_refclk_cfg0 = 0xfa5cfa5c,
2091 .pads_refclk_cfg1 = 0xfa5cfa5c,
2092 .has_pex_clkreq_en = true,
2093 .has_pex_bias_ctrl = true,
2094 .has_intr_prsnt_sense = true,
2095 .has_cml_clk = true,
2096 .has_gen2 = false,
2099 static const struct tegra_pcie_soc tegra124_pcie = {
2100 .num_ports = 2,
2101 .msi_base_shift = 8,
2102 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2103 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2104 .pads_refclk_cfg0 = 0x44ac44ac,
2105 .has_pex_clkreq_en = true,
2106 .has_pex_bias_ctrl = true,
2107 .has_intr_prsnt_sense = true,
2108 .has_cml_clk = true,
2109 .has_gen2 = true,
2112 static const struct of_device_id tegra_pcie_of_match[] = {
2113 { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2114 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2115 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2116 { },
2119 static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2121 struct tegra_pcie *pcie = s->private;
2123 if (list_empty(&pcie->ports))
2124 return NULL;
2126 seq_printf(s, "Index Status\n");
2128 return seq_list_start(&pcie->ports, *pos);
2131 static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2133 struct tegra_pcie *pcie = s->private;
2135 return seq_list_next(v, &pcie->ports, pos);
2138 static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2142 static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2144 bool up = false, active = false;
2145 struct tegra_pcie_port *port;
2146 unsigned int value;
2148 port = list_entry(v, struct tegra_pcie_port, list);
2150 value = readl(port->base + RP_VEND_XP);
2152 if (value & RP_VEND_XP_DL_UP)
2153 up = true;
2155 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2157 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2158 active = true;
2160 seq_printf(s, "%2u ", port->index);
2162 if (up)
2163 seq_printf(s, "up");
2165 if (active) {
2166 if (up)
2167 seq_printf(s, ", ");
2169 seq_printf(s, "active");
2172 seq_printf(s, "\n");
2173 return 0;
2176 static const struct seq_operations tegra_pcie_ports_seq_ops = {
2177 .start = tegra_pcie_ports_seq_start,
2178 .next = tegra_pcie_ports_seq_next,
2179 .stop = tegra_pcie_ports_seq_stop,
2180 .show = tegra_pcie_ports_seq_show,
2183 static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2185 struct tegra_pcie *pcie = inode->i_private;
2186 struct seq_file *s;
2187 int err;
2189 err = seq_open(file, &tegra_pcie_ports_seq_ops);
2190 if (err)
2191 return err;
2193 s = file->private_data;
2194 s->private = pcie;
2196 return 0;
2199 static const struct file_operations tegra_pcie_ports_ops = {
2200 .owner = THIS_MODULE,
2201 .open = tegra_pcie_ports_open,
2202 .read = seq_read,
2203 .llseek = seq_lseek,
2204 .release = seq_release,
2207 static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2209 struct dentry *file;
2211 pcie->debugfs = debugfs_create_dir("pcie", NULL);
2212 if (!pcie->debugfs)
2213 return -ENOMEM;
2215 file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2216 pcie, &tegra_pcie_ports_ops);
2217 if (!file)
2218 goto remove;
2220 return 0;
2222 remove:
2223 debugfs_remove_recursive(pcie->debugfs);
2224 pcie->debugfs = NULL;
2225 return -ENOMEM;
2228 static int tegra_pcie_probe(struct platform_device *pdev)
2230 struct device *dev = &pdev->dev;
2231 struct tegra_pcie *pcie;
2232 int err;
2234 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
2235 if (!pcie)
2236 return -ENOMEM;
2238 pcie->soc = of_device_get_match_data(dev);
2239 INIT_LIST_HEAD(&pcie->buses);
2240 INIT_LIST_HEAD(&pcie->ports);
2241 pcie->dev = dev;
2243 err = tegra_pcie_parse_dt(pcie);
2244 if (err < 0)
2245 return err;
2247 err = tegra_pcie_get_resources(pcie);
2248 if (err < 0) {
2249 dev_err(dev, "failed to request resources: %d\n", err);
2250 return err;
2253 err = tegra_pcie_enable_controller(pcie);
2254 if (err)
2255 goto put_resources;
2257 /* setup the AFI address translations */
2258 tegra_pcie_setup_translations(pcie);
2260 if (IS_ENABLED(CONFIG_PCI_MSI)) {
2261 err = tegra_pcie_enable_msi(pcie);
2262 if (err < 0) {
2263 dev_err(dev, "failed to enable MSI support: %d\n", err);
2264 goto put_resources;
2268 err = tegra_pcie_enable(pcie);
2269 if (err < 0) {
2270 dev_err(dev, "failed to enable PCIe ports: %d\n", err);
2271 goto disable_msi;
2274 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2275 err = tegra_pcie_debugfs_init(pcie);
2276 if (err < 0)
2277 dev_err(dev, "failed to setup debugfs: %d\n", err);
2280 return 0;
2282 disable_msi:
2283 if (IS_ENABLED(CONFIG_PCI_MSI))
2284 tegra_pcie_disable_msi(pcie);
2285 put_resources:
2286 tegra_pcie_put_resources(pcie);
2287 return err;
2290 static struct platform_driver tegra_pcie_driver = {
2291 .driver = {
2292 .name = "tegra-pcie",
2293 .of_match_table = tegra_pcie_of_match,
2294 .suppress_bind_attrs = true,
2296 .probe = tegra_pcie_probe,
2298 builtin_platform_driver(tegra_pcie_driver);