mm/zsmalloc: allocate exactly size of struct zs_pool
[linux/fpc-iii.git] / drivers / pci / host / pci-tegra.c
blobfeccfa6b6c11d4cbe5772b05e28c8dfb64973424
1 /*
2 * PCIe host controller driver for Tegra SoCs
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
10 * Bits taken from arch/arm/mach-dove/pcie.c
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
27 #include <linux/clk.h>
28 #include <linux/debugfs.h>
29 #include <linux/delay.h>
30 #include <linux/export.h>
31 #include <linux/interrupt.h>
32 #include <linux/irq.h>
33 #include <linux/irqdomain.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/msi.h>
37 #include <linux/of_address.h>
38 #include <linux/of_pci.h>
39 #include <linux/of_platform.h>
40 #include <linux/pci.h>
41 #include <linux/phy/phy.h>
42 #include <linux/platform_device.h>
43 #include <linux/reset.h>
44 #include <linux/sizes.h>
45 #include <linux/slab.h>
46 #include <linux/vmalloc.h>
47 #include <linux/regulator/consumer.h>
49 #include <soc/tegra/cpuidle.h>
50 #include <soc/tegra/pmc.h>
52 #include <asm/mach/irq.h>
53 #include <asm/mach/map.h>
54 #include <asm/mach/pci.h>
56 #define INT_PCI_MSI_NR (8 * 32)
58 /* register definitions */
60 #define AFI_AXI_BAR0_SZ 0x00
61 #define AFI_AXI_BAR1_SZ 0x04
62 #define AFI_AXI_BAR2_SZ 0x08
63 #define AFI_AXI_BAR3_SZ 0x0c
64 #define AFI_AXI_BAR4_SZ 0x10
65 #define AFI_AXI_BAR5_SZ 0x14
67 #define AFI_AXI_BAR0_START 0x18
68 #define AFI_AXI_BAR1_START 0x1c
69 #define AFI_AXI_BAR2_START 0x20
70 #define AFI_AXI_BAR3_START 0x24
71 #define AFI_AXI_BAR4_START 0x28
72 #define AFI_AXI_BAR5_START 0x2c
74 #define AFI_FPCI_BAR0 0x30
75 #define AFI_FPCI_BAR1 0x34
76 #define AFI_FPCI_BAR2 0x38
77 #define AFI_FPCI_BAR3 0x3c
78 #define AFI_FPCI_BAR4 0x40
79 #define AFI_FPCI_BAR5 0x44
81 #define AFI_CACHE_BAR0_SZ 0x48
82 #define AFI_CACHE_BAR0_ST 0x4c
83 #define AFI_CACHE_BAR1_SZ 0x50
84 #define AFI_CACHE_BAR1_ST 0x54
86 #define AFI_MSI_BAR_SZ 0x60
87 #define AFI_MSI_FPCI_BAR_ST 0x64
88 #define AFI_MSI_AXI_BAR_ST 0x68
90 #define AFI_MSI_VEC0 0x6c
91 #define AFI_MSI_VEC1 0x70
92 #define AFI_MSI_VEC2 0x74
93 #define AFI_MSI_VEC3 0x78
94 #define AFI_MSI_VEC4 0x7c
95 #define AFI_MSI_VEC5 0x80
96 #define AFI_MSI_VEC6 0x84
97 #define AFI_MSI_VEC7 0x88
99 #define AFI_MSI_EN_VEC0 0x8c
100 #define AFI_MSI_EN_VEC1 0x90
101 #define AFI_MSI_EN_VEC2 0x94
102 #define AFI_MSI_EN_VEC3 0x98
103 #define AFI_MSI_EN_VEC4 0x9c
104 #define AFI_MSI_EN_VEC5 0xa0
105 #define AFI_MSI_EN_VEC6 0xa4
106 #define AFI_MSI_EN_VEC7 0xa8
108 #define AFI_CONFIGURATION 0xac
109 #define AFI_CONFIGURATION_EN_FPCI (1 << 0)
111 #define AFI_FPCI_ERROR_MASKS 0xb0
113 #define AFI_INTR_MASK 0xb4
114 #define AFI_INTR_MASK_INT_MASK (1 << 0)
115 #define AFI_INTR_MASK_MSI_MASK (1 << 8)
117 #define AFI_INTR_CODE 0xb8
118 #define AFI_INTR_CODE_MASK 0xf
119 #define AFI_INTR_INI_SLAVE_ERROR 1
120 #define AFI_INTR_INI_DECODE_ERROR 2
121 #define AFI_INTR_TARGET_ABORT 3
122 #define AFI_INTR_MASTER_ABORT 4
123 #define AFI_INTR_INVALID_WRITE 5
124 #define AFI_INTR_LEGACY 6
125 #define AFI_INTR_FPCI_DECODE_ERROR 7
126 #define AFI_INTR_AXI_DECODE_ERROR 8
127 #define AFI_INTR_FPCI_TIMEOUT 9
128 #define AFI_INTR_PE_PRSNT_SENSE 10
129 #define AFI_INTR_PE_CLKREQ_SENSE 11
130 #define AFI_INTR_CLKCLAMP_SENSE 12
131 #define AFI_INTR_RDY4PD_SENSE 13
132 #define AFI_INTR_P2P_ERROR 14
134 #define AFI_INTR_SIGNATURE 0xbc
135 #define AFI_UPPER_FPCI_ADDRESS 0xc0
136 #define AFI_SM_INTR_ENABLE 0xc4
137 #define AFI_SM_INTR_INTA_ASSERT (1 << 0)
138 #define AFI_SM_INTR_INTB_ASSERT (1 << 1)
139 #define AFI_SM_INTR_INTC_ASSERT (1 << 2)
140 #define AFI_SM_INTR_INTD_ASSERT (1 << 3)
141 #define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
142 #define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
143 #define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
144 #define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
146 #define AFI_AFI_INTR_ENABLE 0xc8
147 #define AFI_INTR_EN_INI_SLVERR (1 << 0)
148 #define AFI_INTR_EN_INI_DECERR (1 << 1)
149 #define AFI_INTR_EN_TGT_SLVERR (1 << 2)
150 #define AFI_INTR_EN_TGT_DECERR (1 << 3)
151 #define AFI_INTR_EN_TGT_WRERR (1 << 4)
152 #define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
153 #define AFI_INTR_EN_AXI_DECERR (1 << 6)
154 #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
155 #define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
157 #define AFI_PCIE_CONFIG 0x0f8
158 #define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
159 #define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
160 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
161 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
162 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
163 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
164 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
165 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
166 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
167 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
169 #define AFI_FUSE 0x104
170 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
172 #define AFI_PEX0_CTRL 0x110
173 #define AFI_PEX1_CTRL 0x118
174 #define AFI_PEX2_CTRL 0x128
175 #define AFI_PEX_CTRL_RST (1 << 0)
176 #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
177 #define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
178 #define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
180 #define AFI_PLLE_CONTROL 0x160
181 #define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
182 #define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
184 #define AFI_PEXBIAS_CTRL_0 0x168
186 #define RP_VEND_XP 0x00000F00
187 #define RP_VEND_XP_DL_UP (1 << 30)
189 #define RP_PRIV_MISC 0x00000FE0
190 #define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
191 #define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
193 #define RP_LINK_CONTROL_STATUS 0x00000090
194 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
195 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
197 #define PADS_CTL_SEL 0x0000009C
199 #define PADS_CTL 0x000000A0
200 #define PADS_CTL_IDDQ_1L (1 << 0)
201 #define PADS_CTL_TX_DATA_EN_1L (1 << 6)
202 #define PADS_CTL_RX_DATA_EN_1L (1 << 10)
204 #define PADS_PLL_CTL_TEGRA20 0x000000B8
205 #define PADS_PLL_CTL_TEGRA30 0x000000B4
206 #define PADS_PLL_CTL_RST_B4SM (1 << 1)
207 #define PADS_PLL_CTL_LOCKDET (1 << 8)
208 #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
209 #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
210 #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
211 #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
212 #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
213 #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
214 #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
215 #define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
217 #define PADS_REFCLK_CFG0 0x000000C8
218 #define PADS_REFCLK_CFG1 0x000000CC
219 #define PADS_REFCLK_BIAS 0x000000D0
222 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
223 * entries, one entry per PCIe port. These field definitions and desired
224 * values aren't in the TRM, but do come from NVIDIA.
226 #define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
227 #define PADS_REFCLK_CFG_E_TERM_SHIFT 7
228 #define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
229 #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
231 /* Default value provided by HW engineering is 0xfa5c */
232 #define PADS_REFCLK_CFG_VALUE \
234 (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \
235 (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
236 (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \
237 (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \
240 struct tegra_msi {
241 struct msi_controller chip;
242 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
243 struct irq_domain *domain;
244 unsigned long pages;
245 struct mutex lock;
246 int irq;
249 /* used to differentiate between Tegra SoC generations */
250 struct tegra_pcie_soc_data {
251 unsigned int num_ports;
252 unsigned int msi_base_shift;
253 u32 pads_pll_ctl;
254 u32 tx_ref_sel;
255 bool has_pex_clkreq_en;
256 bool has_pex_bias_ctrl;
257 bool has_intr_prsnt_sense;
258 bool has_cml_clk;
259 bool has_gen2;
262 static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
264 return container_of(chip, struct tegra_msi, chip);
267 struct tegra_pcie {
268 struct device *dev;
270 void __iomem *pads;
271 void __iomem *afi;
272 int irq;
274 struct list_head buses;
275 struct resource *cs;
277 struct resource all;
278 struct resource io;
279 struct resource pio;
280 struct resource mem;
281 struct resource prefetch;
282 struct resource busn;
284 struct clk *pex_clk;
285 struct clk *afi_clk;
286 struct clk *pll_e;
287 struct clk *cml_clk;
289 struct reset_control *pex_rst;
290 struct reset_control *afi_rst;
291 struct reset_control *pcie_xrst;
293 struct phy *phy;
295 struct tegra_msi msi;
297 struct list_head ports;
298 unsigned int num_ports;
299 u32 xbar_config;
301 struct regulator_bulk_data *supplies;
302 unsigned int num_supplies;
304 const struct tegra_pcie_soc_data *soc_data;
305 struct dentry *debugfs;
308 struct tegra_pcie_port {
309 struct tegra_pcie *pcie;
310 struct list_head list;
311 struct resource regs;
312 void __iomem *base;
313 unsigned int index;
314 unsigned int lanes;
317 struct tegra_pcie_bus {
318 struct vm_struct *area;
319 struct list_head list;
320 unsigned int nr;
323 static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
325 return sys->private_data;
328 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
329 unsigned long offset)
331 writel(value, pcie->afi + offset);
334 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
336 return readl(pcie->afi + offset);
339 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
340 unsigned long offset)
342 writel(value, pcie->pads + offset);
345 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
347 return readl(pcie->pads + offset);
351 * The configuration space mapping on Tegra is somewhat similar to the ECAM
352 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
353 * register accesses are mapped:
355 * [27:24] extended register number
356 * [23:16] bus number
357 * [15:11] device number
358 * [10: 8] function number
359 * [ 7: 0] register number
361 * Mapping the whole extended configuration space would require 256 MiB of
362 * virtual address space, only a small part of which will actually be used.
363 * To work around this, a 1 MiB of virtual addresses are allocated per bus
364 * when the bus is first accessed. When the physical range is mapped, the
365 * the bus number bits are hidden so that the extended register number bits
366 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
368 * [19:16] extended register number
369 * [15:11] device number
370 * [10: 8] function number
371 * [ 7: 0] register number
373 * This is achieved by stitching together 16 chunks of 64 KiB of physical
374 * address space via the MMU.
376 static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
378 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
379 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
382 static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
383 unsigned int busnr)
385 pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
386 L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
387 phys_addr_t cs = pcie->cs->start;
388 struct tegra_pcie_bus *bus;
389 unsigned int i;
390 int err;
392 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
393 if (!bus)
394 return ERR_PTR(-ENOMEM);
396 INIT_LIST_HEAD(&bus->list);
397 bus->nr = busnr;
399 /* allocate 1 MiB of virtual addresses */
400 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
401 if (!bus->area) {
402 err = -ENOMEM;
403 goto free;
406 /* map each of the 16 chunks of 64 KiB each */
407 for (i = 0; i < 16; i++) {
408 unsigned long virt = (unsigned long)bus->area->addr +
409 i * SZ_64K;
410 phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
412 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
413 if (err < 0) {
414 dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
415 err);
416 goto unmap;
420 return bus;
422 unmap:
423 vunmap(bus->area->addr);
424 free:
425 kfree(bus);
426 return ERR_PTR(err);
430 * Look up a virtual address mapping for the specified bus number. If no such
431 * mapping exists, try to create one.
433 static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
434 unsigned int busnr)
436 struct tegra_pcie_bus *bus;
438 list_for_each_entry(bus, &pcie->buses, list)
439 if (bus->nr == busnr)
440 return (void __iomem *)bus->area->addr;
442 bus = tegra_pcie_bus_alloc(pcie, busnr);
443 if (IS_ERR(bus))
444 return NULL;
446 list_add_tail(&bus->list, &pcie->buses);
448 return (void __iomem *)bus->area->addr;
451 static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
452 unsigned int devfn,
453 int where)
455 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
456 void __iomem *addr = NULL;
458 if (bus->number == 0) {
459 unsigned int slot = PCI_SLOT(devfn);
460 struct tegra_pcie_port *port;
462 list_for_each_entry(port, &pcie->ports, list) {
463 if (port->index + 1 == slot) {
464 addr = port->base + (where & ~3);
465 break;
468 } else {
469 addr = tegra_pcie_bus_map(pcie, bus->number);
470 if (!addr) {
471 dev_err(pcie->dev,
472 "failed to map cfg. space for bus %u\n",
473 bus->number);
474 return NULL;
477 addr += tegra_pcie_conf_offset(devfn, where);
480 return addr;
483 static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
484 int where, int size, u32 *value)
486 void __iomem *addr;
488 addr = tegra_pcie_conf_address(bus, devfn, where);
489 if (!addr) {
490 *value = 0xffffffff;
491 return PCIBIOS_DEVICE_NOT_FOUND;
494 *value = readl(addr);
496 if (size == 1)
497 *value = (*value >> (8 * (where & 3))) & 0xff;
498 else if (size == 2)
499 *value = (*value >> (8 * (where & 3))) & 0xffff;
501 return PCIBIOS_SUCCESSFUL;
504 static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
505 int where, int size, u32 value)
507 void __iomem *addr;
508 u32 mask, tmp;
510 addr = tegra_pcie_conf_address(bus, devfn, where);
511 if (!addr)
512 return PCIBIOS_DEVICE_NOT_FOUND;
514 if (size == 4) {
515 writel(value, addr);
516 return PCIBIOS_SUCCESSFUL;
519 if (size == 2)
520 mask = ~(0xffff << ((where & 0x3) * 8));
521 else if (size == 1)
522 mask = ~(0xff << ((where & 0x3) * 8));
523 else
524 return PCIBIOS_BAD_REGISTER_NUMBER;
526 tmp = readl(addr) & mask;
527 tmp |= value << ((where & 0x3) * 8);
528 writel(tmp, addr);
530 return PCIBIOS_SUCCESSFUL;
533 static struct pci_ops tegra_pcie_ops = {
534 .read = tegra_pcie_read_conf,
535 .write = tegra_pcie_write_conf,
538 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
540 unsigned long ret = 0;
542 switch (port->index) {
543 case 0:
544 ret = AFI_PEX0_CTRL;
545 break;
547 case 1:
548 ret = AFI_PEX1_CTRL;
549 break;
551 case 2:
552 ret = AFI_PEX2_CTRL;
553 break;
556 return ret;
559 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
561 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
562 unsigned long value;
564 /* pulse reset signal */
565 value = afi_readl(port->pcie, ctrl);
566 value &= ~AFI_PEX_CTRL_RST;
567 afi_writel(port->pcie, value, ctrl);
569 usleep_range(1000, 2000);
571 value = afi_readl(port->pcie, ctrl);
572 value |= AFI_PEX_CTRL_RST;
573 afi_writel(port->pcie, value, ctrl);
576 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
578 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
579 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
580 unsigned long value;
582 /* enable reference clock */
583 value = afi_readl(port->pcie, ctrl);
584 value |= AFI_PEX_CTRL_REFCLK_EN;
586 if (soc->has_pex_clkreq_en)
587 value |= AFI_PEX_CTRL_CLKREQ_EN;
589 value |= AFI_PEX_CTRL_OVERRIDE_EN;
591 afi_writel(port->pcie, value, ctrl);
593 tegra_pcie_port_reset(port);
596 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
598 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
599 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
600 unsigned long value;
602 /* assert port reset */
603 value = afi_readl(port->pcie, ctrl);
604 value &= ~AFI_PEX_CTRL_RST;
605 afi_writel(port->pcie, value, ctrl);
607 /* disable reference clock */
608 value = afi_readl(port->pcie, ctrl);
610 if (soc->has_pex_clkreq_en)
611 value &= ~AFI_PEX_CTRL_CLKREQ_EN;
613 value &= ~AFI_PEX_CTRL_REFCLK_EN;
614 afi_writel(port->pcie, value, ctrl);
617 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
619 struct tegra_pcie *pcie = port->pcie;
621 devm_iounmap(pcie->dev, port->base);
622 devm_release_mem_region(pcie->dev, port->regs.start,
623 resource_size(&port->regs));
624 list_del(&port->list);
625 devm_kfree(pcie->dev, port);
628 static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
630 u16 reg;
632 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
633 pci_read_config_word(dev, PCI_COMMAND, &reg);
634 reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
635 PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
636 pci_write_config_word(dev, PCI_COMMAND, reg);
639 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
641 /* Tegra PCIE root complex wrongly reports device class */
642 static void tegra_pcie_fixup_class(struct pci_dev *dev)
644 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
646 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
647 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
648 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
649 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
651 /* Tegra PCIE requires relaxed ordering */
652 static void tegra_pcie_relax_enable(struct pci_dev *dev)
654 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
656 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
658 static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
660 struct tegra_pcie *pcie = sys_to_pcie(sys);
661 int err;
663 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
664 if (err < 0)
665 return err;
667 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
668 if (err)
669 return err;
671 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
672 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
673 sys->mem_offset);
674 pci_add_resource(&sys->resources, &pcie->busn);
676 pci_ioremap_io(pcie->pio.start, pcie->io.start);
678 return 1;
681 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
683 struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
684 int irq;
686 tegra_cpuidle_pcie_irqs_in_use();
688 irq = of_irq_parse_and_map_pci(pdev, slot, pin);
689 if (!irq)
690 irq = pcie->irq;
692 return irq;
695 static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
697 struct tegra_pcie *pcie = sys_to_pcie(sys);
698 struct pci_bus *bus;
700 bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
701 &sys->resources);
702 if (!bus)
703 return NULL;
705 pci_scan_child_bus(bus);
707 return bus;
710 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
712 const char *err_msg[] = {
713 "Unknown",
714 "AXI slave error",
715 "AXI decode error",
716 "Target abort",
717 "Master abort",
718 "Invalid write",
719 "Legacy interrupt",
720 "Response decoding error",
721 "AXI response decoding error",
722 "Transaction timeout",
723 "Slot present pin change",
724 "Slot clock request change",
725 "TMS clock ramp change",
726 "TMS ready for power down",
727 "Peer2Peer error",
729 struct tegra_pcie *pcie = arg;
730 u32 code, signature;
732 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
733 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
734 afi_writel(pcie, 0, AFI_INTR_CODE);
736 if (code == AFI_INTR_LEGACY)
737 return IRQ_NONE;
739 if (code >= ARRAY_SIZE(err_msg))
740 code = 0;
743 * do not pollute kernel log with master abort reports since they
744 * happen a lot during enumeration
746 if (code == AFI_INTR_MASTER_ABORT)
747 dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
748 signature);
749 else
750 dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
751 signature);
753 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
754 code == AFI_INTR_FPCI_DECODE_ERROR) {
755 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
756 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
758 if (code == AFI_INTR_MASTER_ABORT)
759 dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
760 else
761 dev_err(pcie->dev, " FPCI address: %10llx\n", address);
764 return IRQ_HANDLED;
768 * FPCI map is as follows:
769 * - 0xfdfc000000: I/O space
770 * - 0xfdfe000000: type 0 configuration space
771 * - 0xfdff000000: type 1 configuration space
772 * - 0xfe00000000: type 0 extended configuration space
773 * - 0xfe10000000: type 1 extended configuration space
775 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
777 u32 fpci_bar, size, axi_address;
779 /* Bar 0: type 1 extended configuration space */
780 fpci_bar = 0xfe100000;
781 size = resource_size(pcie->cs);
782 axi_address = pcie->cs->start;
783 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
784 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
785 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
787 /* Bar 1: downstream IO bar */
788 fpci_bar = 0xfdfc0000;
789 size = resource_size(&pcie->io);
790 axi_address = pcie->io.start;
791 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
792 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
793 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
795 /* Bar 2: prefetchable memory BAR */
796 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
797 size = resource_size(&pcie->prefetch);
798 axi_address = pcie->prefetch.start;
799 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
800 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
801 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
803 /* Bar 3: non prefetchable memory BAR */
804 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
805 size = resource_size(&pcie->mem);
806 axi_address = pcie->mem.start;
807 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
808 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
809 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
811 /* NULL out the remaining BARs as they are not used */
812 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
813 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
814 afi_writel(pcie, 0, AFI_FPCI_BAR4);
816 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
817 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
818 afi_writel(pcie, 0, AFI_FPCI_BAR5);
820 /* map all upstream transactions as uncached */
821 afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
822 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
823 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
824 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
826 /* MSI translations are setup only when needed */
827 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
828 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
829 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
830 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
833 static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
835 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
836 u32 value;
838 timeout = jiffies + msecs_to_jiffies(timeout);
840 while (time_before(jiffies, timeout)) {
841 value = pads_readl(pcie, soc->pads_pll_ctl);
842 if (value & PADS_PLL_CTL_LOCKDET)
843 return 0;
846 return -ETIMEDOUT;
849 static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
851 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
852 u32 value;
853 int err;
855 /* initialize internal PHY, enable up to 16 PCIE lanes */
856 pads_writel(pcie, 0x0, PADS_CTL_SEL);
858 /* override IDDQ to 1 on all 4 lanes */
859 value = pads_readl(pcie, PADS_CTL);
860 value |= PADS_CTL_IDDQ_1L;
861 pads_writel(pcie, value, PADS_CTL);
864 * Set up PHY PLL inputs select PLLE output as refclock,
865 * set TX ref sel to div10 (not div5).
867 value = pads_readl(pcie, soc->pads_pll_ctl);
868 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
869 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
870 pads_writel(pcie, value, soc->pads_pll_ctl);
872 /* reset PLL */
873 value = pads_readl(pcie, soc->pads_pll_ctl);
874 value &= ~PADS_PLL_CTL_RST_B4SM;
875 pads_writel(pcie, value, soc->pads_pll_ctl);
877 usleep_range(20, 100);
879 /* take PLL out of reset */
880 value = pads_readl(pcie, soc->pads_pll_ctl);
881 value |= PADS_PLL_CTL_RST_B4SM;
882 pads_writel(pcie, value, soc->pads_pll_ctl);
884 /* Configure the reference clock driver */
885 value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
886 pads_writel(pcie, value, PADS_REFCLK_CFG0);
887 if (soc->num_ports > 2)
888 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
890 /* wait for the PLL to lock */
891 err = tegra_pcie_pll_wait(pcie, 500);
892 if (err < 0) {
893 dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
894 return err;
897 /* turn off IDDQ override */
898 value = pads_readl(pcie, PADS_CTL);
899 value &= ~PADS_CTL_IDDQ_1L;
900 pads_writel(pcie, value, PADS_CTL);
902 /* enable TX/RX data */
903 value = pads_readl(pcie, PADS_CTL);
904 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
905 pads_writel(pcie, value, PADS_CTL);
907 return 0;
910 static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
912 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
913 struct tegra_pcie_port *port;
914 unsigned long value;
915 int err;
917 /* enable PLL power down */
918 if (pcie->phy) {
919 value = afi_readl(pcie, AFI_PLLE_CONTROL);
920 value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
921 value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
922 afi_writel(pcie, value, AFI_PLLE_CONTROL);
925 /* power down PCIe slot clock bias pad */
926 if (soc->has_pex_bias_ctrl)
927 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
929 /* configure mode and disable all ports */
930 value = afi_readl(pcie, AFI_PCIE_CONFIG);
931 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
932 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
934 list_for_each_entry(port, &pcie->ports, list)
935 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
937 afi_writel(pcie, value, AFI_PCIE_CONFIG);
939 if (soc->has_gen2) {
940 value = afi_readl(pcie, AFI_FUSE);
941 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
942 afi_writel(pcie, value, AFI_FUSE);
943 } else {
944 value = afi_readl(pcie, AFI_FUSE);
945 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
946 afi_writel(pcie, value, AFI_FUSE);
949 if (!pcie->phy)
950 err = tegra_pcie_phy_enable(pcie);
951 else
952 err = phy_power_on(pcie->phy);
954 if (err < 0) {
955 dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
956 return err;
959 /* take the PCIe interface module out of reset */
960 reset_control_deassert(pcie->pcie_xrst);
962 /* finally enable PCIe */
963 value = afi_readl(pcie, AFI_CONFIGURATION);
964 value |= AFI_CONFIGURATION_EN_FPCI;
965 afi_writel(pcie, value, AFI_CONFIGURATION);
967 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
968 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
969 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
971 if (soc->has_intr_prsnt_sense)
972 value |= AFI_INTR_EN_PRSNT_SENSE;
974 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
975 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
977 /* don't enable MSI for now, only when needed */
978 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
980 /* disable all exceptions */
981 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
983 return 0;
986 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
988 int err;
990 /* TODO: disable and unprepare clocks? */
992 err = phy_power_off(pcie->phy);
993 if (err < 0)
994 dev_warn(pcie->dev, "failed to power off PHY: %d\n", err);
996 reset_control_assert(pcie->pcie_xrst);
997 reset_control_assert(pcie->afi_rst);
998 reset_control_assert(pcie->pex_rst);
1000 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1002 err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1003 if (err < 0)
1004 dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
1007 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1009 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1010 int err;
1012 reset_control_assert(pcie->pcie_xrst);
1013 reset_control_assert(pcie->afi_rst);
1014 reset_control_assert(pcie->pex_rst);
1016 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1018 /* enable regulators */
1019 err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1020 if (err < 0)
1021 dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
1023 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
1024 pcie->pex_clk,
1025 pcie->pex_rst);
1026 if (err) {
1027 dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
1028 return err;
1031 reset_control_deassert(pcie->afi_rst);
1033 err = clk_prepare_enable(pcie->afi_clk);
1034 if (err < 0) {
1035 dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
1036 return err;
1039 if (soc->has_cml_clk) {
1040 err = clk_prepare_enable(pcie->cml_clk);
1041 if (err < 0) {
1042 dev_err(pcie->dev, "failed to enable CML clock: %d\n",
1043 err);
1044 return err;
1048 err = clk_prepare_enable(pcie->pll_e);
1049 if (err < 0) {
1050 dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
1051 return err;
1054 return 0;
1057 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1059 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1061 pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
1062 if (IS_ERR(pcie->pex_clk))
1063 return PTR_ERR(pcie->pex_clk);
1065 pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
1066 if (IS_ERR(pcie->afi_clk))
1067 return PTR_ERR(pcie->afi_clk);
1069 pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
1070 if (IS_ERR(pcie->pll_e))
1071 return PTR_ERR(pcie->pll_e);
1073 if (soc->has_cml_clk) {
1074 pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
1075 if (IS_ERR(pcie->cml_clk))
1076 return PTR_ERR(pcie->cml_clk);
1079 return 0;
1082 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1084 pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
1085 if (IS_ERR(pcie->pex_rst))
1086 return PTR_ERR(pcie->pex_rst);
1088 pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
1089 if (IS_ERR(pcie->afi_rst))
1090 return PTR_ERR(pcie->afi_rst);
1092 pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
1093 if (IS_ERR(pcie->pcie_xrst))
1094 return PTR_ERR(pcie->pcie_xrst);
1096 return 0;
1099 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1101 struct platform_device *pdev = to_platform_device(pcie->dev);
1102 struct resource *pads, *afi, *res;
1103 int err;
1105 err = tegra_pcie_clocks_get(pcie);
1106 if (err) {
1107 dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
1108 return err;
1111 err = tegra_pcie_resets_get(pcie);
1112 if (err) {
1113 dev_err(&pdev->dev, "failed to get resets: %d\n", err);
1114 return err;
1117 pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
1118 if (IS_ERR(pcie->phy)) {
1119 err = PTR_ERR(pcie->phy);
1120 dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
1121 return err;
1124 err = phy_init(pcie->phy);
1125 if (err < 0) {
1126 dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err);
1127 return err;
1130 err = tegra_pcie_power_on(pcie);
1131 if (err) {
1132 dev_err(&pdev->dev, "failed to power up: %d\n", err);
1133 return err;
1136 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1137 pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
1138 if (IS_ERR(pcie->pads)) {
1139 err = PTR_ERR(pcie->pads);
1140 goto poweroff;
1143 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1144 pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
1145 if (IS_ERR(pcie->afi)) {
1146 err = PTR_ERR(pcie->afi);
1147 goto poweroff;
1150 /* request configuration space, but remap later, on demand */
1151 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1152 if (!res) {
1153 err = -EADDRNOTAVAIL;
1154 goto poweroff;
1157 pcie->cs = devm_request_mem_region(pcie->dev, res->start,
1158 resource_size(res), res->name);
1159 if (!pcie->cs) {
1160 err = -EADDRNOTAVAIL;
1161 goto poweroff;
1164 /* request interrupt */
1165 err = platform_get_irq_byname(pdev, "intr");
1166 if (err < 0) {
1167 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1168 goto poweroff;
1171 pcie->irq = err;
1173 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1174 if (err) {
1175 dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
1176 goto poweroff;
1179 return 0;
1181 poweroff:
1182 tegra_pcie_power_off(pcie);
1183 return err;
1186 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1188 int err;
1190 if (pcie->irq > 0)
1191 free_irq(pcie->irq, pcie);
1193 tegra_pcie_power_off(pcie);
1195 err = phy_exit(pcie->phy);
1196 if (err < 0)
1197 dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
1199 return 0;
1202 static int tegra_msi_alloc(struct tegra_msi *chip)
1204 int msi;
1206 mutex_lock(&chip->lock);
1208 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1209 if (msi < INT_PCI_MSI_NR)
1210 set_bit(msi, chip->used);
1211 else
1212 msi = -ENOSPC;
1214 mutex_unlock(&chip->lock);
1216 return msi;
1219 static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1221 struct device *dev = chip->chip.dev;
1223 mutex_lock(&chip->lock);
1225 if (!test_bit(irq, chip->used))
1226 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1227 else
1228 clear_bit(irq, chip->used);
1230 mutex_unlock(&chip->lock);
1233 static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1235 struct tegra_pcie *pcie = data;
1236 struct tegra_msi *msi = &pcie->msi;
1237 unsigned int i, processed = 0;
1239 for (i = 0; i < 8; i++) {
1240 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1242 while (reg) {
1243 unsigned int offset = find_first_bit(&reg, 32);
1244 unsigned int index = i * 32 + offset;
1245 unsigned int irq;
1247 /* clear the interrupt */
1248 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1250 irq = irq_find_mapping(msi->domain, index);
1251 if (irq) {
1252 if (test_bit(index, msi->used))
1253 generic_handle_irq(irq);
1254 else
1255 dev_info(pcie->dev, "unhandled MSI\n");
1256 } else {
1258 * that's weird who triggered this?
1259 * just clear it
1261 dev_info(pcie->dev, "unexpected MSI\n");
1264 /* see if there's any more pending in this vector */
1265 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1267 processed++;
1271 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1274 static int tegra_msi_setup_irq(struct msi_controller *chip,
1275 struct pci_dev *pdev, struct msi_desc *desc)
1277 struct tegra_msi *msi = to_tegra_msi(chip);
1278 struct msi_msg msg;
1279 unsigned int irq;
1280 int hwirq;
1282 hwirq = tegra_msi_alloc(msi);
1283 if (hwirq < 0)
1284 return hwirq;
1286 irq = irq_create_mapping(msi->domain, hwirq);
1287 if (!irq) {
1288 tegra_msi_free(msi, hwirq);
1289 return -EINVAL;
1292 irq_set_msi_desc(irq, desc);
1294 msg.address_lo = virt_to_phys((void *)msi->pages);
1295 /* 32 bit address only */
1296 msg.address_hi = 0;
1297 msg.data = hwirq;
1299 pci_write_msi_msg(irq, &msg);
1301 return 0;
1304 static void tegra_msi_teardown_irq(struct msi_controller *chip,
1305 unsigned int irq)
1307 struct tegra_msi *msi = to_tegra_msi(chip);
1308 struct irq_data *d = irq_get_irq_data(irq);
1309 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1311 irq_dispose_mapping(irq);
1312 tegra_msi_free(msi, hwirq);
1315 static struct irq_chip tegra_msi_irq_chip = {
1316 .name = "Tegra PCIe MSI",
1317 .irq_enable = pci_msi_unmask_irq,
1318 .irq_disable = pci_msi_mask_irq,
1319 .irq_mask = pci_msi_mask_irq,
1320 .irq_unmask = pci_msi_unmask_irq,
1323 static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1324 irq_hw_number_t hwirq)
1326 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1327 irq_set_chip_data(irq, domain->host_data);
1328 set_irq_flags(irq, IRQF_VALID);
1330 tegra_cpuidle_pcie_irqs_in_use();
1332 return 0;
1335 static const struct irq_domain_ops msi_domain_ops = {
1336 .map = tegra_msi_map,
1339 static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1341 struct platform_device *pdev = to_platform_device(pcie->dev);
1342 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1343 struct tegra_msi *msi = &pcie->msi;
1344 unsigned long base;
1345 int err;
1346 u32 reg;
1348 mutex_init(&msi->lock);
1350 msi->chip.dev = pcie->dev;
1351 msi->chip.setup_irq = tegra_msi_setup_irq;
1352 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1354 msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
1355 &msi_domain_ops, &msi->chip);
1356 if (!msi->domain) {
1357 dev_err(&pdev->dev, "failed to create IRQ domain\n");
1358 return -ENOMEM;
1361 err = platform_get_irq_byname(pdev, "msi");
1362 if (err < 0) {
1363 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1364 goto err;
1367 msi->irq = err;
1369 err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
1370 tegra_msi_irq_chip.name, pcie);
1371 if (err < 0) {
1372 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1373 goto err;
1376 /* setup AFI/FPCI range */
1377 msi->pages = __get_free_pages(GFP_KERNEL, 0);
1378 base = virt_to_phys((void *)msi->pages);
1380 afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1381 afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1382 /* this register is in 4K increments */
1383 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1385 /* enable all MSI vectors */
1386 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1387 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1388 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1389 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1390 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1391 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1392 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1393 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1395 /* and unmask the MSI interrupt */
1396 reg = afi_readl(pcie, AFI_INTR_MASK);
1397 reg |= AFI_INTR_MASK_MSI_MASK;
1398 afi_writel(pcie, reg, AFI_INTR_MASK);
1400 return 0;
1402 err:
1403 irq_domain_remove(msi->domain);
1404 return err;
1407 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1409 struct tegra_msi *msi = &pcie->msi;
1410 unsigned int i, irq;
1411 u32 value;
1413 /* mask the MSI interrupt */
1414 value = afi_readl(pcie, AFI_INTR_MASK);
1415 value &= ~AFI_INTR_MASK_MSI_MASK;
1416 afi_writel(pcie, value, AFI_INTR_MASK);
1418 /* disable all MSI vectors */
1419 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1420 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1421 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1422 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1423 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1424 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1425 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1426 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1428 free_pages(msi->pages, 0);
1430 if (msi->irq > 0)
1431 free_irq(msi->irq, pcie);
1433 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1434 irq = irq_find_mapping(msi->domain, i);
1435 if (irq > 0)
1436 irq_dispose_mapping(irq);
1439 irq_domain_remove(msi->domain);
1441 return 0;
1444 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1445 u32 *xbar)
1447 struct device_node *np = pcie->dev->of_node;
1449 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1450 switch (lanes) {
1451 case 0x0000104:
1452 dev_info(pcie->dev, "4x1, 1x1 configuration\n");
1453 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1454 return 0;
1456 case 0x0000102:
1457 dev_info(pcie->dev, "2x1, 1x1 configuration\n");
1458 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1459 return 0;
1461 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1462 switch (lanes) {
1463 case 0x00000204:
1464 dev_info(pcie->dev, "4x1, 2x1 configuration\n");
1465 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1466 return 0;
1468 case 0x00020202:
1469 dev_info(pcie->dev, "2x3 configuration\n");
1470 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1471 return 0;
1473 case 0x00010104:
1474 dev_info(pcie->dev, "4x1, 1x2 configuration\n");
1475 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1476 return 0;
1478 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1479 switch (lanes) {
1480 case 0x00000004:
1481 dev_info(pcie->dev, "single-mode configuration\n");
1482 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1483 return 0;
1485 case 0x00000202:
1486 dev_info(pcie->dev, "dual-mode configuration\n");
1487 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1488 return 0;
1492 return -EINVAL;
1496 * Check whether a given set of supplies is available in a device tree node.
1497 * This is used to check whether the new or the legacy device tree bindings
1498 * should be used.
1500 static bool of_regulator_bulk_available(struct device_node *np,
1501 struct regulator_bulk_data *supplies,
1502 unsigned int num_supplies)
1504 char property[32];
1505 unsigned int i;
1507 for (i = 0; i < num_supplies; i++) {
1508 snprintf(property, 32, "%s-supply", supplies[i].supply);
1510 if (of_find_property(np, property, NULL) == NULL)
1511 return false;
1514 return true;
1518 * Old versions of the device tree binding for this device used a set of power
1519 * supplies that didn't match the hardware inputs. This happened to work for a
1520 * number of cases but is not future proof. However to preserve backwards-
1521 * compatibility with old device trees, this function will try to use the old
1522 * set of supplies.
1524 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1526 struct device_node *np = pcie->dev->of_node;
1528 if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1529 pcie->num_supplies = 3;
1530 else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1531 pcie->num_supplies = 2;
1533 if (pcie->num_supplies == 0) {
1534 dev_err(pcie->dev, "device %s not supported in legacy mode\n",
1535 np->full_name);
1536 return -ENODEV;
1539 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1540 sizeof(*pcie->supplies),
1541 GFP_KERNEL);
1542 if (!pcie->supplies)
1543 return -ENOMEM;
1545 pcie->supplies[0].supply = "pex-clk";
1546 pcie->supplies[1].supply = "vdd";
1548 if (pcie->num_supplies > 2)
1549 pcie->supplies[2].supply = "avdd";
1551 return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1552 pcie->supplies);
1556 * Obtains the list of regulators required for a particular generation of the
1557 * IP block.
1559 * This would've been nice to do simply by providing static tables for use
1560 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1561 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1562 * and either seems to be optional depending on which ports are being used.
1564 static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1566 struct device_node *np = pcie->dev->of_node;
1567 unsigned int i = 0;
1569 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1570 pcie->num_supplies = 7;
1572 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1573 sizeof(*pcie->supplies),
1574 GFP_KERNEL);
1575 if (!pcie->supplies)
1576 return -ENOMEM;
1578 pcie->supplies[i++].supply = "avddio-pex";
1579 pcie->supplies[i++].supply = "dvddio-pex";
1580 pcie->supplies[i++].supply = "avdd-pex-pll";
1581 pcie->supplies[i++].supply = "hvdd-pex";
1582 pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1583 pcie->supplies[i++].supply = "vddio-pex-ctl";
1584 pcie->supplies[i++].supply = "avdd-pll-erefe";
1585 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1586 bool need_pexa = false, need_pexb = false;
1588 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
1589 if (lane_mask & 0x0f)
1590 need_pexa = true;
1592 /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
1593 if (lane_mask & 0x30)
1594 need_pexb = true;
1596 pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
1597 (need_pexb ? 2 : 0);
1599 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1600 sizeof(*pcie->supplies),
1601 GFP_KERNEL);
1602 if (!pcie->supplies)
1603 return -ENOMEM;
1605 pcie->supplies[i++].supply = "avdd-pex-pll";
1606 pcie->supplies[i++].supply = "hvdd-pex";
1607 pcie->supplies[i++].supply = "vddio-pex-ctl";
1608 pcie->supplies[i++].supply = "avdd-plle";
1610 if (need_pexa) {
1611 pcie->supplies[i++].supply = "avdd-pexa";
1612 pcie->supplies[i++].supply = "vdd-pexa";
1615 if (need_pexb) {
1616 pcie->supplies[i++].supply = "avdd-pexb";
1617 pcie->supplies[i++].supply = "vdd-pexb";
1619 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1620 pcie->num_supplies = 5;
1622 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1623 sizeof(*pcie->supplies),
1624 GFP_KERNEL);
1625 if (!pcie->supplies)
1626 return -ENOMEM;
1628 pcie->supplies[0].supply = "avdd-pex";
1629 pcie->supplies[1].supply = "vdd-pex";
1630 pcie->supplies[2].supply = "avdd-pex-pll";
1631 pcie->supplies[3].supply = "avdd-plle";
1632 pcie->supplies[4].supply = "vddio-pex-clk";
1635 if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
1636 pcie->num_supplies))
1637 return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1638 pcie->supplies);
1641 * If not all regulators are available for this new scheme, assume
1642 * that the device tree complies with an older version of the device
1643 * tree binding.
1645 dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
1647 devm_kfree(pcie->dev, pcie->supplies);
1648 pcie->num_supplies = 0;
1650 return tegra_pcie_get_legacy_regulators(pcie);
1653 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1655 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1656 struct device_node *np = pcie->dev->of_node, *port;
1657 struct of_pci_range_parser parser;
1658 struct of_pci_range range;
1659 u32 lanes = 0, mask = 0;
1660 unsigned int lane = 0;
1661 struct resource res;
1662 int err;
1664 memset(&pcie->all, 0, sizeof(pcie->all));
1665 pcie->all.flags = IORESOURCE_MEM;
1666 pcie->all.name = np->full_name;
1667 pcie->all.start = ~0;
1668 pcie->all.end = 0;
1670 if (of_pci_range_parser_init(&parser, np)) {
1671 dev_err(pcie->dev, "missing \"ranges\" property\n");
1672 return -EINVAL;
1675 for_each_of_pci_range(&parser, &range) {
1676 err = of_pci_range_to_resource(&range, np, &res);
1677 if (err < 0)
1678 return err;
1680 switch (res.flags & IORESOURCE_TYPE_BITS) {
1681 case IORESOURCE_IO:
1682 memcpy(&pcie->pio, &res, sizeof(res));
1683 pcie->pio.name = np->full_name;
1686 * The Tegra PCIe host bridge uses this to program the
1687 * mapping of the I/O space to the physical address,
1688 * so we override the .start and .end fields here that
1689 * of_pci_range_to_resource() converted to I/O space.
1690 * We also set the IORESOURCE_MEM type to clarify that
1691 * the resource is in the physical memory space.
1693 pcie->io.start = range.cpu_addr;
1694 pcie->io.end = range.cpu_addr + range.size - 1;
1695 pcie->io.flags = IORESOURCE_MEM;
1696 pcie->io.name = "I/O";
1698 memcpy(&res, &pcie->io, sizeof(res));
1699 break;
1701 case IORESOURCE_MEM:
1702 if (res.flags & IORESOURCE_PREFETCH) {
1703 memcpy(&pcie->prefetch, &res, sizeof(res));
1704 pcie->prefetch.name = "prefetchable";
1705 } else {
1706 memcpy(&pcie->mem, &res, sizeof(res));
1707 pcie->mem.name = "non-prefetchable";
1709 break;
1712 if (res.start <= pcie->all.start)
1713 pcie->all.start = res.start;
1715 if (res.end >= pcie->all.end)
1716 pcie->all.end = res.end;
1719 err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
1720 if (err < 0)
1721 return err;
1723 err = of_pci_parse_bus_range(np, &pcie->busn);
1724 if (err < 0) {
1725 dev_err(pcie->dev, "failed to parse ranges property: %d\n",
1726 err);
1727 pcie->busn.name = np->name;
1728 pcie->busn.start = 0;
1729 pcie->busn.end = 0xff;
1730 pcie->busn.flags = IORESOURCE_BUS;
1733 /* parse root ports */
1734 for_each_child_of_node(np, port) {
1735 struct tegra_pcie_port *rp;
1736 unsigned int index;
1737 u32 value;
1739 err = of_pci_get_devfn(port);
1740 if (err < 0) {
1741 dev_err(pcie->dev, "failed to parse address: %d\n",
1742 err);
1743 return err;
1746 index = PCI_SLOT(err);
1748 if (index < 1 || index > soc->num_ports) {
1749 dev_err(pcie->dev, "invalid port number: %d\n", index);
1750 return -EINVAL;
1753 index--;
1755 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1756 if (err < 0) {
1757 dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
1758 err);
1759 return err;
1762 if (value > 16) {
1763 dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
1764 return -EINVAL;
1767 lanes |= value << (index << 3);
1769 if (!of_device_is_available(port)) {
1770 lane += value;
1771 continue;
1774 mask |= ((1 << value) - 1) << lane;
1775 lane += value;
1777 rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
1778 if (!rp)
1779 return -ENOMEM;
1781 err = of_address_to_resource(port, 0, &rp->regs);
1782 if (err < 0) {
1783 dev_err(pcie->dev, "failed to parse address: %d\n",
1784 err);
1785 return err;
1788 INIT_LIST_HEAD(&rp->list);
1789 rp->index = index;
1790 rp->lanes = value;
1791 rp->pcie = pcie;
1793 rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
1794 if (IS_ERR(rp->base))
1795 return PTR_ERR(rp->base);
1797 list_add_tail(&rp->list, &pcie->ports);
1800 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1801 if (err < 0) {
1802 dev_err(pcie->dev, "invalid lane configuration\n");
1803 return err;
1806 err = tegra_pcie_get_regulators(pcie, mask);
1807 if (err < 0)
1808 return err;
1810 return 0;
1814 * FIXME: If there are no PCIe cards attached, then calling this function
1815 * can result in the increase of the bootup time as there are big timeout
1816 * loops.
1818 #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
1819 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1821 unsigned int retries = 3;
1822 unsigned long value;
1824 /* override presence detection */
1825 value = readl(port->base + RP_PRIV_MISC);
1826 value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
1827 value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
1828 writel(value, port->base + RP_PRIV_MISC);
1830 do {
1831 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1833 do {
1834 value = readl(port->base + RP_VEND_XP);
1836 if (value & RP_VEND_XP_DL_UP)
1837 break;
1839 usleep_range(1000, 2000);
1840 } while (--timeout);
1842 if (!timeout) {
1843 dev_err(port->pcie->dev, "link %u down, retrying\n",
1844 port->index);
1845 goto retry;
1848 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1850 do {
1851 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1853 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1854 return true;
1856 usleep_range(1000, 2000);
1857 } while (--timeout);
1859 retry:
1860 tegra_pcie_port_reset(port);
1861 } while (--retries);
1863 return false;
1866 static int tegra_pcie_enable(struct tegra_pcie *pcie)
1868 struct tegra_pcie_port *port, *tmp;
1869 struct hw_pci hw;
1871 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1872 dev_info(pcie->dev, "probing port %u, using %u lanes\n",
1873 port->index, port->lanes);
1875 tegra_pcie_port_enable(port);
1877 if (tegra_pcie_port_check_link(port))
1878 continue;
1880 dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
1882 tegra_pcie_port_disable(port);
1883 tegra_pcie_port_free(port);
1886 memset(&hw, 0, sizeof(hw));
1888 #ifdef CONFIG_PCI_MSI
1889 hw.msi_ctrl = &pcie->msi.chip;
1890 #endif
1892 hw.nr_controllers = 1;
1893 hw.private_data = (void **)&pcie;
1894 hw.setup = tegra_pcie_setup;
1895 hw.map_irq = tegra_pcie_map_irq;
1896 hw.scan = tegra_pcie_scan_bus;
1897 hw.ops = &tegra_pcie_ops;
1899 pci_common_init_dev(pcie->dev, &hw);
1901 return 0;
1904 static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1905 .num_ports = 2,
1906 .msi_base_shift = 0,
1907 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
1908 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
1909 .has_pex_clkreq_en = false,
1910 .has_pex_bias_ctrl = false,
1911 .has_intr_prsnt_sense = false,
1912 .has_cml_clk = false,
1913 .has_gen2 = false,
1916 static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1917 .num_ports = 3,
1918 .msi_base_shift = 8,
1919 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1920 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1921 .has_pex_clkreq_en = true,
1922 .has_pex_bias_ctrl = true,
1923 .has_intr_prsnt_sense = true,
1924 .has_cml_clk = true,
1925 .has_gen2 = false,
1928 static const struct tegra_pcie_soc_data tegra124_pcie_data = {
1929 .num_ports = 2,
1930 .msi_base_shift = 8,
1931 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1932 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1933 .has_pex_clkreq_en = true,
1934 .has_pex_bias_ctrl = true,
1935 .has_intr_prsnt_sense = true,
1936 .has_cml_clk = true,
1937 .has_gen2 = true,
1940 static const struct of_device_id tegra_pcie_of_match[] = {
1941 { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
1942 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1943 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1944 { },
1946 MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
1948 static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
1950 struct tegra_pcie *pcie = s->private;
1952 if (list_empty(&pcie->ports))
1953 return NULL;
1955 seq_printf(s, "Index Status\n");
1957 return seq_list_start(&pcie->ports, *pos);
1960 static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
1962 struct tegra_pcie *pcie = s->private;
1964 return seq_list_next(v, &pcie->ports, pos);
1967 static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
1971 static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
1973 bool up = false, active = false;
1974 struct tegra_pcie_port *port;
1975 unsigned int value;
1977 port = list_entry(v, struct tegra_pcie_port, list);
1979 value = readl(port->base + RP_VEND_XP);
1981 if (value & RP_VEND_XP_DL_UP)
1982 up = true;
1984 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1986 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1987 active = true;
1989 seq_printf(s, "%2u ", port->index);
1991 if (up)
1992 seq_printf(s, "up");
1994 if (active) {
1995 if (up)
1996 seq_printf(s, ", ");
1998 seq_printf(s, "active");
2001 seq_printf(s, "\n");
2002 return 0;
2005 static const struct seq_operations tegra_pcie_ports_seq_ops = {
2006 .start = tegra_pcie_ports_seq_start,
2007 .next = tegra_pcie_ports_seq_next,
2008 .stop = tegra_pcie_ports_seq_stop,
2009 .show = tegra_pcie_ports_seq_show,
2012 static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2014 struct tegra_pcie *pcie = inode->i_private;
2015 struct seq_file *s;
2016 int err;
2018 err = seq_open(file, &tegra_pcie_ports_seq_ops);
2019 if (err)
2020 return err;
2022 s = file->private_data;
2023 s->private = pcie;
2025 return 0;
2028 static const struct file_operations tegra_pcie_ports_ops = {
2029 .owner = THIS_MODULE,
2030 .open = tegra_pcie_ports_open,
2031 .read = seq_read,
2032 .llseek = seq_lseek,
2033 .release = seq_release,
2036 static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2038 struct dentry *file;
2040 pcie->debugfs = debugfs_create_dir("pcie", NULL);
2041 if (!pcie->debugfs)
2042 return -ENOMEM;
2044 file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2045 pcie, &tegra_pcie_ports_ops);
2046 if (!file)
2047 goto remove;
2049 return 0;
2051 remove:
2052 debugfs_remove_recursive(pcie->debugfs);
2053 pcie->debugfs = NULL;
2054 return -ENOMEM;
2057 static int tegra_pcie_probe(struct platform_device *pdev)
2059 const struct of_device_id *match;
2060 struct tegra_pcie *pcie;
2061 int err;
2063 match = of_match_device(tegra_pcie_of_match, &pdev->dev);
2064 if (!match)
2065 return -ENODEV;
2067 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
2068 if (!pcie)
2069 return -ENOMEM;
2071 INIT_LIST_HEAD(&pcie->buses);
2072 INIT_LIST_HEAD(&pcie->ports);
2073 pcie->soc_data = match->data;
2074 pcie->dev = &pdev->dev;
2076 err = tegra_pcie_parse_dt(pcie);
2077 if (err < 0)
2078 return err;
2080 pcibios_min_mem = 0;
2082 err = tegra_pcie_get_resources(pcie);
2083 if (err < 0) {
2084 dev_err(&pdev->dev, "failed to request resources: %d\n", err);
2085 return err;
2088 err = tegra_pcie_enable_controller(pcie);
2089 if (err)
2090 goto put_resources;
2092 /* setup the AFI address translations */
2093 tegra_pcie_setup_translations(pcie);
2095 if (IS_ENABLED(CONFIG_PCI_MSI)) {
2096 err = tegra_pcie_enable_msi(pcie);
2097 if (err < 0) {
2098 dev_err(&pdev->dev,
2099 "failed to enable MSI support: %d\n",
2100 err);
2101 goto put_resources;
2105 err = tegra_pcie_enable(pcie);
2106 if (err < 0) {
2107 dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
2108 goto disable_msi;
2111 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2112 err = tegra_pcie_debugfs_init(pcie);
2113 if (err < 0)
2114 dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
2115 err);
2118 platform_set_drvdata(pdev, pcie);
2119 return 0;
2121 disable_msi:
2122 if (IS_ENABLED(CONFIG_PCI_MSI))
2123 tegra_pcie_disable_msi(pcie);
2124 put_resources:
2125 tegra_pcie_put_resources(pcie);
2126 return err;
2129 static struct platform_driver tegra_pcie_driver = {
2130 .driver = {
2131 .name = "tegra-pcie",
2132 .owner = THIS_MODULE,
2133 .of_match_table = tegra_pcie_of_match,
2134 .suppress_bind_attrs = true,
2136 .probe = tegra_pcie_probe,
2138 module_platform_driver(tegra_pcie_driver);
2140 MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
2141 MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
2142 MODULE_LICENSE("GPL v2");