jme: Do not enable NIC WoL functions on S0
[linux/fpc-iii.git] / drivers / pci / host / pci-tegra.c
blob0407b1d6bab8e608c573880edc11695ddf355d04
1 /*
2 * PCIe host controller driver for Tegra SoCs
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
10 * Bits taken from arch/arm/mach-dove/pcie.c
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
27 #include <linux/clk.h>
28 #include <linux/delay.h>
29 #include <linux/export.h>
30 #include <linux/interrupt.h>
31 #include <linux/irq.h>
32 #include <linux/irqdomain.h>
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/msi.h>
36 #include <linux/of_address.h>
37 #include <linux/of_pci.h>
38 #include <linux/of_platform.h>
39 #include <linux/pci.h>
40 #include <linux/platform_device.h>
41 #include <linux/reset.h>
42 #include <linux/sizes.h>
43 #include <linux/slab.h>
44 #include <linux/tegra-cpuidle.h>
45 #include <linux/tegra-powergate.h>
46 #include <linux/vmalloc.h>
47 #include <linux/regulator/consumer.h>
49 #include <asm/mach/irq.h>
50 #include <asm/mach/map.h>
51 #include <asm/mach/pci.h>
53 #define INT_PCI_MSI_NR (8 * 32)
55 /* register definitions */
57 #define AFI_AXI_BAR0_SZ 0x00
58 #define AFI_AXI_BAR1_SZ 0x04
59 #define AFI_AXI_BAR2_SZ 0x08
60 #define AFI_AXI_BAR3_SZ 0x0c
61 #define AFI_AXI_BAR4_SZ 0x10
62 #define AFI_AXI_BAR5_SZ 0x14
64 #define AFI_AXI_BAR0_START 0x18
65 #define AFI_AXI_BAR1_START 0x1c
66 #define AFI_AXI_BAR2_START 0x20
67 #define AFI_AXI_BAR3_START 0x24
68 #define AFI_AXI_BAR4_START 0x28
69 #define AFI_AXI_BAR5_START 0x2c
71 #define AFI_FPCI_BAR0 0x30
72 #define AFI_FPCI_BAR1 0x34
73 #define AFI_FPCI_BAR2 0x38
74 #define AFI_FPCI_BAR3 0x3c
75 #define AFI_FPCI_BAR4 0x40
76 #define AFI_FPCI_BAR5 0x44
78 #define AFI_CACHE_BAR0_SZ 0x48
79 #define AFI_CACHE_BAR0_ST 0x4c
80 #define AFI_CACHE_BAR1_SZ 0x50
81 #define AFI_CACHE_BAR1_ST 0x54
83 #define AFI_MSI_BAR_SZ 0x60
84 #define AFI_MSI_FPCI_BAR_ST 0x64
85 #define AFI_MSI_AXI_BAR_ST 0x68
87 #define AFI_MSI_VEC0 0x6c
88 #define AFI_MSI_VEC1 0x70
89 #define AFI_MSI_VEC2 0x74
90 #define AFI_MSI_VEC3 0x78
91 #define AFI_MSI_VEC4 0x7c
92 #define AFI_MSI_VEC5 0x80
93 #define AFI_MSI_VEC6 0x84
94 #define AFI_MSI_VEC7 0x88
96 #define AFI_MSI_EN_VEC0 0x8c
97 #define AFI_MSI_EN_VEC1 0x90
98 #define AFI_MSI_EN_VEC2 0x94
99 #define AFI_MSI_EN_VEC3 0x98
100 #define AFI_MSI_EN_VEC4 0x9c
101 #define AFI_MSI_EN_VEC5 0xa0
102 #define AFI_MSI_EN_VEC6 0xa4
103 #define AFI_MSI_EN_VEC7 0xa8
105 #define AFI_CONFIGURATION 0xac
106 #define AFI_CONFIGURATION_EN_FPCI (1 << 0)
108 #define AFI_FPCI_ERROR_MASKS 0xb0
110 #define AFI_INTR_MASK 0xb4
111 #define AFI_INTR_MASK_INT_MASK (1 << 0)
112 #define AFI_INTR_MASK_MSI_MASK (1 << 8)
114 #define AFI_INTR_CODE 0xb8
115 #define AFI_INTR_CODE_MASK 0xf
116 #define AFI_INTR_AXI_SLAVE_ERROR 1
117 #define AFI_INTR_AXI_DECODE_ERROR 2
118 #define AFI_INTR_TARGET_ABORT 3
119 #define AFI_INTR_MASTER_ABORT 4
120 #define AFI_INTR_INVALID_WRITE 5
121 #define AFI_INTR_LEGACY 6
122 #define AFI_INTR_FPCI_DECODE_ERROR 7
124 #define AFI_INTR_SIGNATURE 0xbc
125 #define AFI_UPPER_FPCI_ADDRESS 0xc0
126 #define AFI_SM_INTR_ENABLE 0xc4
127 #define AFI_SM_INTR_INTA_ASSERT (1 << 0)
128 #define AFI_SM_INTR_INTB_ASSERT (1 << 1)
129 #define AFI_SM_INTR_INTC_ASSERT (1 << 2)
130 #define AFI_SM_INTR_INTD_ASSERT (1 << 3)
131 #define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
132 #define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
133 #define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
134 #define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
136 #define AFI_AFI_INTR_ENABLE 0xc8
137 #define AFI_INTR_EN_INI_SLVERR (1 << 0)
138 #define AFI_INTR_EN_INI_DECERR (1 << 1)
139 #define AFI_INTR_EN_TGT_SLVERR (1 << 2)
140 #define AFI_INTR_EN_TGT_DECERR (1 << 3)
141 #define AFI_INTR_EN_TGT_WRERR (1 << 4)
142 #define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
143 #define AFI_INTR_EN_AXI_DECERR (1 << 6)
144 #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
145 #define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
147 #define AFI_PCIE_CONFIG 0x0f8
148 #define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
149 #define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
150 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
151 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
152 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
153 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
154 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
155 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
157 #define AFI_FUSE 0x104
158 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
160 #define AFI_PEX0_CTRL 0x110
161 #define AFI_PEX1_CTRL 0x118
162 #define AFI_PEX2_CTRL 0x128
163 #define AFI_PEX_CTRL_RST (1 << 0)
164 #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
165 #define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
167 #define AFI_PEXBIAS_CTRL_0 0x168
169 #define RP_VEND_XP 0x00000F00
170 #define RP_VEND_XP_DL_UP (1 << 30)
172 #define RP_LINK_CONTROL_STATUS 0x00000090
173 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
174 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
176 #define PADS_CTL_SEL 0x0000009C
178 #define PADS_CTL 0x000000A0
179 #define PADS_CTL_IDDQ_1L (1 << 0)
180 #define PADS_CTL_TX_DATA_EN_1L (1 << 6)
181 #define PADS_CTL_RX_DATA_EN_1L (1 << 10)
183 #define PADS_PLL_CTL_TEGRA20 0x000000B8
184 #define PADS_PLL_CTL_TEGRA30 0x000000B4
185 #define PADS_PLL_CTL_RST_B4SM (1 << 1)
186 #define PADS_PLL_CTL_LOCKDET (1 << 8)
187 #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
188 #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
189 #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
190 #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
191 #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
192 #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
193 #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
194 #define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
196 #define PADS_REFCLK_CFG0 0x000000C8
197 #define PADS_REFCLK_CFG1 0x000000CC
200 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
201 * entries, one entry per PCIe port. These field definitions and desired
202 * values aren't in the TRM, but do come from NVIDIA.
204 #define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
205 #define PADS_REFCLK_CFG_E_TERM_SHIFT 7
206 #define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
207 #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
209 /* Default value provided by HW engineering is 0xfa5c */
210 #define PADS_REFCLK_CFG_VALUE \
212 (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \
213 (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
214 (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \
215 (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \
218 struct tegra_msi {
219 struct msi_chip chip;
220 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
221 struct irq_domain *domain;
222 unsigned long pages;
223 struct mutex lock;
224 int irq;
227 /* used to differentiate between Tegra SoC generations */
228 struct tegra_pcie_soc_data {
229 unsigned int num_ports;
230 unsigned int msi_base_shift;
231 u32 pads_pll_ctl;
232 u32 tx_ref_sel;
233 bool has_pex_clkreq_en;
234 bool has_pex_bias_ctrl;
235 bool has_intr_prsnt_sense;
236 bool has_avdd_supply;
237 bool has_cml_clk;
240 static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
242 return container_of(chip, struct tegra_msi, chip);
245 struct tegra_pcie {
246 struct device *dev;
248 void __iomem *pads;
249 void __iomem *afi;
250 int irq;
252 struct list_head buses;
253 struct resource *cs;
255 struct resource io;
256 struct resource mem;
257 struct resource prefetch;
258 struct resource busn;
260 struct clk *pex_clk;
261 struct clk *afi_clk;
262 struct clk *pll_e;
263 struct clk *cml_clk;
265 struct reset_control *pex_rst;
266 struct reset_control *afi_rst;
267 struct reset_control *pcie_xrst;
269 struct tegra_msi msi;
271 struct list_head ports;
272 unsigned int num_ports;
273 u32 xbar_config;
275 struct regulator *pex_clk_supply;
276 struct regulator *vdd_supply;
277 struct regulator *avdd_supply;
279 const struct tegra_pcie_soc_data *soc_data;
282 struct tegra_pcie_port {
283 struct tegra_pcie *pcie;
284 struct list_head list;
285 struct resource regs;
286 void __iomem *base;
287 unsigned int index;
288 unsigned int lanes;
291 struct tegra_pcie_bus {
292 struct vm_struct *area;
293 struct list_head list;
294 unsigned int nr;
297 static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
299 return sys->private_data;
302 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
303 unsigned long offset)
305 writel(value, pcie->afi + offset);
308 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
310 return readl(pcie->afi + offset);
313 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
314 unsigned long offset)
316 writel(value, pcie->pads + offset);
319 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
321 return readl(pcie->pads + offset);
325 * The configuration space mapping on Tegra is somewhat similar to the ECAM
326 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
327 * register accesses are mapped:
329 * [27:24] extended register number
330 * [23:16] bus number
331 * [15:11] device number
332 * [10: 8] function number
333 * [ 7: 0] register number
335 * Mapping the whole extended configuration space would require 256 MiB of
336 * virtual address space, only a small part of which will actually be used.
337 * To work around this, a 1 MiB of virtual addresses are allocated per bus
338 * when the bus is first accessed. When the physical range is mapped, the
339 * the bus number bits are hidden so that the extended register number bits
340 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
342 * [19:16] extended register number
343 * [15:11] device number
344 * [10: 8] function number
345 * [ 7: 0] register number
347 * This is achieved by stitching together 16 chunks of 64 KiB of physical
348 * address space via the MMU.
350 static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
352 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
353 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
356 static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
357 unsigned int busnr)
359 pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
360 L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
361 phys_addr_t cs = pcie->cs->start;
362 struct tegra_pcie_bus *bus;
363 unsigned int i;
364 int err;
366 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
367 if (!bus)
368 return ERR_PTR(-ENOMEM);
370 INIT_LIST_HEAD(&bus->list);
371 bus->nr = busnr;
373 /* allocate 1 MiB of virtual addresses */
374 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
375 if (!bus->area) {
376 err = -ENOMEM;
377 goto free;
380 /* map each of the 16 chunks of 64 KiB each */
381 for (i = 0; i < 16; i++) {
382 unsigned long virt = (unsigned long)bus->area->addr +
383 i * SZ_64K;
384 phys_addr_t phys = cs + i * SZ_1M + busnr * SZ_64K;
386 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
387 if (err < 0) {
388 dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
389 err);
390 goto unmap;
394 return bus;
396 unmap:
397 vunmap(bus->area->addr);
398 free:
399 kfree(bus);
400 return ERR_PTR(err);
404 * Look up a virtual address mapping for the specified bus number. If no such
405 * mapping exists, try to create one.
407 static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
408 unsigned int busnr)
410 struct tegra_pcie_bus *bus;
412 list_for_each_entry(bus, &pcie->buses, list)
413 if (bus->nr == busnr)
414 return (void __iomem *)bus->area->addr;
416 bus = tegra_pcie_bus_alloc(pcie, busnr);
417 if (IS_ERR(bus))
418 return NULL;
420 list_add_tail(&bus->list, &pcie->buses);
422 return (void __iomem *)bus->area->addr;
425 static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
426 unsigned int devfn,
427 int where)
429 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
430 void __iomem *addr = NULL;
432 if (bus->number == 0) {
433 unsigned int slot = PCI_SLOT(devfn);
434 struct tegra_pcie_port *port;
436 list_for_each_entry(port, &pcie->ports, list) {
437 if (port->index + 1 == slot) {
438 addr = port->base + (where & ~3);
439 break;
442 } else {
443 addr = tegra_pcie_bus_map(pcie, bus->number);
444 if (!addr) {
445 dev_err(pcie->dev,
446 "failed to map cfg. space for bus %u\n",
447 bus->number);
448 return NULL;
451 addr += tegra_pcie_conf_offset(devfn, where);
454 return addr;
457 static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
458 int where, int size, u32 *value)
460 void __iomem *addr;
462 addr = tegra_pcie_conf_address(bus, devfn, where);
463 if (!addr) {
464 *value = 0xffffffff;
465 return PCIBIOS_DEVICE_NOT_FOUND;
468 *value = readl(addr);
470 if (size == 1)
471 *value = (*value >> (8 * (where & 3))) & 0xff;
472 else if (size == 2)
473 *value = (*value >> (8 * (where & 3))) & 0xffff;
475 return PCIBIOS_SUCCESSFUL;
478 static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
479 int where, int size, u32 value)
481 void __iomem *addr;
482 u32 mask, tmp;
484 addr = tegra_pcie_conf_address(bus, devfn, where);
485 if (!addr)
486 return PCIBIOS_DEVICE_NOT_FOUND;
488 if (size == 4) {
489 writel(value, addr);
490 return PCIBIOS_SUCCESSFUL;
493 if (size == 2)
494 mask = ~(0xffff << ((where & 0x3) * 8));
495 else if (size == 1)
496 mask = ~(0xff << ((where & 0x3) * 8));
497 else
498 return PCIBIOS_BAD_REGISTER_NUMBER;
500 tmp = readl(addr) & mask;
501 tmp |= value << ((where & 0x3) * 8);
502 writel(tmp, addr);
504 return PCIBIOS_SUCCESSFUL;
507 static struct pci_ops tegra_pcie_ops = {
508 .read = tegra_pcie_read_conf,
509 .write = tegra_pcie_write_conf,
512 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
514 unsigned long ret = 0;
516 switch (port->index) {
517 case 0:
518 ret = AFI_PEX0_CTRL;
519 break;
521 case 1:
522 ret = AFI_PEX1_CTRL;
523 break;
525 case 2:
526 ret = AFI_PEX2_CTRL;
527 break;
530 return ret;
533 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
535 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
536 unsigned long value;
538 /* pulse reset signal */
539 value = afi_readl(port->pcie, ctrl);
540 value &= ~AFI_PEX_CTRL_RST;
541 afi_writel(port->pcie, value, ctrl);
543 usleep_range(1000, 2000);
545 value = afi_readl(port->pcie, ctrl);
546 value |= AFI_PEX_CTRL_RST;
547 afi_writel(port->pcie, value, ctrl);
550 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
552 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
553 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
554 unsigned long value;
556 /* enable reference clock */
557 value = afi_readl(port->pcie, ctrl);
558 value |= AFI_PEX_CTRL_REFCLK_EN;
560 if (soc->has_pex_clkreq_en)
561 value |= AFI_PEX_CTRL_CLKREQ_EN;
563 afi_writel(port->pcie, value, ctrl);
565 tegra_pcie_port_reset(port);
568 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
570 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
571 unsigned long value;
573 /* assert port reset */
574 value = afi_readl(port->pcie, ctrl);
575 value &= ~AFI_PEX_CTRL_RST;
576 afi_writel(port->pcie, value, ctrl);
578 /* disable reference clock */
579 value = afi_readl(port->pcie, ctrl);
580 value &= ~AFI_PEX_CTRL_REFCLK_EN;
581 afi_writel(port->pcie, value, ctrl);
584 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
586 struct tegra_pcie *pcie = port->pcie;
588 devm_iounmap(pcie->dev, port->base);
589 devm_release_mem_region(pcie->dev, port->regs.start,
590 resource_size(&port->regs));
591 list_del(&port->list);
592 devm_kfree(pcie->dev, port);
595 static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
597 u16 reg;
599 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
600 pci_read_config_word(dev, PCI_COMMAND, &reg);
601 reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
602 PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
603 pci_write_config_word(dev, PCI_COMMAND, reg);
606 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
608 /* Tegra PCIE root complex wrongly reports device class */
609 static void tegra_pcie_fixup_class(struct pci_dev *dev)
611 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
613 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
614 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
615 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
616 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
618 /* Tegra PCIE requires relaxed ordering */
619 static void tegra_pcie_relax_enable(struct pci_dev *dev)
621 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
623 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
625 static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
627 struct tegra_pcie *pcie = sys_to_pcie(sys);
629 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
630 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
631 sys->mem_offset);
632 pci_add_resource(&sys->resources, &pcie->busn);
634 pci_ioremap_io(nr * SZ_64K, pcie->io.start);
636 return 1;
639 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
641 struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
642 int irq;
644 tegra_cpuidle_pcie_irqs_in_use();
646 irq = of_irq_parse_and_map_pci(pdev, slot, pin);
647 if (!irq)
648 irq = pcie->irq;
650 return irq;
653 static void tegra_pcie_add_bus(struct pci_bus *bus)
655 if (IS_ENABLED(CONFIG_PCI_MSI)) {
656 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
658 bus->msi = &pcie->msi.chip;
662 static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
664 struct tegra_pcie *pcie = sys_to_pcie(sys);
665 struct pci_bus *bus;
667 bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
668 &sys->resources);
669 if (!bus)
670 return NULL;
672 pci_scan_child_bus(bus);
674 return bus;
677 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
679 const char *err_msg[] = {
680 "Unknown",
681 "AXI slave error",
682 "AXI decode error",
683 "Target abort",
684 "Master abort",
685 "Invalid write",
686 "Response decoding error",
687 "AXI response decoding error",
688 "Transaction timeout",
690 struct tegra_pcie *pcie = arg;
691 u32 code, signature;
693 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
694 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
695 afi_writel(pcie, 0, AFI_INTR_CODE);
697 if (code == AFI_INTR_LEGACY)
698 return IRQ_NONE;
700 if (code >= ARRAY_SIZE(err_msg))
701 code = 0;
704 * do not pollute kernel log with master abort reports since they
705 * happen a lot during enumeration
707 if (code == AFI_INTR_MASTER_ABORT)
708 dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
709 signature);
710 else
711 dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
712 signature);
714 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
715 code == AFI_INTR_FPCI_DECODE_ERROR) {
716 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
717 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
719 if (code == AFI_INTR_MASTER_ABORT)
720 dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
721 else
722 dev_err(pcie->dev, " FPCI address: %10llx\n", address);
725 return IRQ_HANDLED;
729 * FPCI map is as follows:
730 * - 0xfdfc000000: I/O space
731 * - 0xfdfe000000: type 0 configuration space
732 * - 0xfdff000000: type 1 configuration space
733 * - 0xfe00000000: type 0 extended configuration space
734 * - 0xfe10000000: type 1 extended configuration space
736 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
738 u32 fpci_bar, size, axi_address;
740 /* Bar 0: type 1 extended configuration space */
741 fpci_bar = 0xfe100000;
742 size = resource_size(pcie->cs);
743 axi_address = pcie->cs->start;
744 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
745 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
746 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
748 /* Bar 1: downstream IO bar */
749 fpci_bar = 0xfdfc0000;
750 size = resource_size(&pcie->io);
751 axi_address = pcie->io.start;
752 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
753 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
754 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
756 /* Bar 2: prefetchable memory BAR */
757 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
758 size = resource_size(&pcie->prefetch);
759 axi_address = pcie->prefetch.start;
760 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
761 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
762 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
764 /* Bar 3: non prefetchable memory BAR */
765 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
766 size = resource_size(&pcie->mem);
767 axi_address = pcie->mem.start;
768 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
769 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
770 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
772 /* NULL out the remaining BARs as they are not used */
773 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
774 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
775 afi_writel(pcie, 0, AFI_FPCI_BAR4);
777 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
778 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
779 afi_writel(pcie, 0, AFI_FPCI_BAR5);
781 /* map all upstream transactions as uncached */
782 afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
783 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
784 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
785 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
787 /* MSI translations are setup only when needed */
788 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
789 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
790 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
791 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
794 static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
796 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
797 struct tegra_pcie_port *port;
798 unsigned int timeout;
799 unsigned long value;
801 /* power down PCIe slot clock bias pad */
802 if (soc->has_pex_bias_ctrl)
803 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
805 /* configure mode and disable all ports */
806 value = afi_readl(pcie, AFI_PCIE_CONFIG);
807 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
808 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
810 list_for_each_entry(port, &pcie->ports, list)
811 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
813 afi_writel(pcie, value, AFI_PCIE_CONFIG);
815 value = afi_readl(pcie, AFI_FUSE);
816 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
817 afi_writel(pcie, value, AFI_FUSE);
819 /* initialize internal PHY, enable up to 16 PCIE lanes */
820 pads_writel(pcie, 0x0, PADS_CTL_SEL);
822 /* override IDDQ to 1 on all 4 lanes */
823 value = pads_readl(pcie, PADS_CTL);
824 value |= PADS_CTL_IDDQ_1L;
825 pads_writel(pcie, value, PADS_CTL);
828 * Set up PHY PLL inputs select PLLE output as refclock,
829 * set TX ref sel to div10 (not div5).
831 value = pads_readl(pcie, soc->pads_pll_ctl);
832 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
833 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
834 pads_writel(pcie, value, soc->pads_pll_ctl);
836 /* take PLL out of reset */
837 value = pads_readl(pcie, soc->pads_pll_ctl);
838 value |= PADS_PLL_CTL_RST_B4SM;
839 pads_writel(pcie, value, soc->pads_pll_ctl);
841 /* Configure the reference clock driver */
842 value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
843 pads_writel(pcie, value, PADS_REFCLK_CFG0);
844 if (soc->num_ports > 2)
845 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
847 /* wait for the PLL to lock */
848 timeout = 300;
849 do {
850 value = pads_readl(pcie, soc->pads_pll_ctl);
851 usleep_range(1000, 2000);
852 if (--timeout == 0) {
853 pr_err("Tegra PCIe error: timeout waiting for PLL\n");
854 return -EBUSY;
856 } while (!(value & PADS_PLL_CTL_LOCKDET));
858 /* turn off IDDQ override */
859 value = pads_readl(pcie, PADS_CTL);
860 value &= ~PADS_CTL_IDDQ_1L;
861 pads_writel(pcie, value, PADS_CTL);
863 /* enable TX/RX data */
864 value = pads_readl(pcie, PADS_CTL);
865 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
866 pads_writel(pcie, value, PADS_CTL);
868 /* take the PCIe interface module out of reset */
869 reset_control_deassert(pcie->pcie_xrst);
871 /* finally enable PCIe */
872 value = afi_readl(pcie, AFI_CONFIGURATION);
873 value |= AFI_CONFIGURATION_EN_FPCI;
874 afi_writel(pcie, value, AFI_CONFIGURATION);
876 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
877 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
878 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
880 if (soc->has_intr_prsnt_sense)
881 value |= AFI_INTR_EN_PRSNT_SENSE;
883 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
884 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
886 /* don't enable MSI for now, only when needed */
887 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
889 /* disable all exceptions */
890 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
892 return 0;
895 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
897 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
898 int err;
900 /* TODO: disable and unprepare clocks? */
902 reset_control_assert(pcie->pcie_xrst);
903 reset_control_assert(pcie->afi_rst);
904 reset_control_assert(pcie->pex_rst);
906 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
908 if (soc->has_avdd_supply) {
909 err = regulator_disable(pcie->avdd_supply);
910 if (err < 0)
911 dev_warn(pcie->dev,
912 "failed to disable AVDD regulator: %d\n",
913 err);
916 err = regulator_disable(pcie->pex_clk_supply);
917 if (err < 0)
918 dev_warn(pcie->dev, "failed to disable pex-clk regulator: %d\n",
919 err);
921 err = regulator_disable(pcie->vdd_supply);
922 if (err < 0)
923 dev_warn(pcie->dev, "failed to disable VDD regulator: %d\n",
924 err);
927 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
929 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
930 int err;
932 reset_control_assert(pcie->pcie_xrst);
933 reset_control_assert(pcie->afi_rst);
934 reset_control_assert(pcie->pex_rst);
936 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
938 /* enable regulators */
939 err = regulator_enable(pcie->vdd_supply);
940 if (err < 0) {
941 dev_err(pcie->dev, "failed to enable VDD regulator: %d\n", err);
942 return err;
945 err = regulator_enable(pcie->pex_clk_supply);
946 if (err < 0) {
947 dev_err(pcie->dev, "failed to enable pex-clk regulator: %d\n",
948 err);
949 return err;
952 if (soc->has_avdd_supply) {
953 err = regulator_enable(pcie->avdd_supply);
954 if (err < 0) {
955 dev_err(pcie->dev,
956 "failed to enable AVDD regulator: %d\n",
957 err);
958 return err;
962 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
963 pcie->pex_clk,
964 pcie->pex_rst);
965 if (err) {
966 dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
967 return err;
970 reset_control_deassert(pcie->afi_rst);
972 err = clk_prepare_enable(pcie->afi_clk);
973 if (err < 0) {
974 dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
975 return err;
978 if (soc->has_cml_clk) {
979 err = clk_prepare_enable(pcie->cml_clk);
980 if (err < 0) {
981 dev_err(pcie->dev, "failed to enable CML clock: %d\n",
982 err);
983 return err;
987 err = clk_prepare_enable(pcie->pll_e);
988 if (err < 0) {
989 dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
990 return err;
993 return 0;
996 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
998 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1000 pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
1001 if (IS_ERR(pcie->pex_clk))
1002 return PTR_ERR(pcie->pex_clk);
1004 pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
1005 if (IS_ERR(pcie->afi_clk))
1006 return PTR_ERR(pcie->afi_clk);
1008 pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
1009 if (IS_ERR(pcie->pll_e))
1010 return PTR_ERR(pcie->pll_e);
1012 if (soc->has_cml_clk) {
1013 pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
1014 if (IS_ERR(pcie->cml_clk))
1015 return PTR_ERR(pcie->cml_clk);
1018 return 0;
1021 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1023 pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
1024 if (IS_ERR(pcie->pex_rst))
1025 return PTR_ERR(pcie->pex_rst);
1027 pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
1028 if (IS_ERR(pcie->afi_rst))
1029 return PTR_ERR(pcie->afi_rst);
1031 pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
1032 if (IS_ERR(pcie->pcie_xrst))
1033 return PTR_ERR(pcie->pcie_xrst);
1035 return 0;
1038 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1040 struct platform_device *pdev = to_platform_device(pcie->dev);
1041 struct resource *pads, *afi, *res;
1042 int err;
1044 err = tegra_pcie_clocks_get(pcie);
1045 if (err) {
1046 dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
1047 return err;
1050 err = tegra_pcie_resets_get(pcie);
1051 if (err) {
1052 dev_err(&pdev->dev, "failed to get resets: %d\n", err);
1053 return err;
1056 err = tegra_pcie_power_on(pcie);
1057 if (err) {
1058 dev_err(&pdev->dev, "failed to power up: %d\n", err);
1059 return err;
1062 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1063 pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
1064 if (IS_ERR(pcie->pads)) {
1065 err = PTR_ERR(pcie->pads);
1066 goto poweroff;
1069 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1070 pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
1071 if (IS_ERR(pcie->afi)) {
1072 err = PTR_ERR(pcie->afi);
1073 goto poweroff;
1076 /* request configuration space, but remap later, on demand */
1077 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1078 if (!res) {
1079 err = -EADDRNOTAVAIL;
1080 goto poweroff;
1083 pcie->cs = devm_request_mem_region(pcie->dev, res->start,
1084 resource_size(res), res->name);
1085 if (!pcie->cs) {
1086 err = -EADDRNOTAVAIL;
1087 goto poweroff;
1090 /* request interrupt */
1091 err = platform_get_irq_byname(pdev, "intr");
1092 if (err < 0) {
1093 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1094 goto poweroff;
1097 pcie->irq = err;
1099 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1100 if (err) {
1101 dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
1102 goto poweroff;
1105 return 0;
1107 poweroff:
1108 tegra_pcie_power_off(pcie);
1109 return err;
1112 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1114 if (pcie->irq > 0)
1115 free_irq(pcie->irq, pcie);
1117 tegra_pcie_power_off(pcie);
1118 return 0;
1121 static int tegra_msi_alloc(struct tegra_msi *chip)
1123 int msi;
1125 mutex_lock(&chip->lock);
1127 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1128 if (msi < INT_PCI_MSI_NR)
1129 set_bit(msi, chip->used);
1130 else
1131 msi = -ENOSPC;
1133 mutex_unlock(&chip->lock);
1135 return msi;
1138 static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1140 struct device *dev = chip->chip.dev;
1142 mutex_lock(&chip->lock);
1144 if (!test_bit(irq, chip->used))
1145 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1146 else
1147 clear_bit(irq, chip->used);
1149 mutex_unlock(&chip->lock);
1152 static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1154 struct tegra_pcie *pcie = data;
1155 struct tegra_msi *msi = &pcie->msi;
1156 unsigned int i, processed = 0;
1158 for (i = 0; i < 8; i++) {
1159 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1161 while (reg) {
1162 unsigned int offset = find_first_bit(&reg, 32);
1163 unsigned int index = i * 32 + offset;
1164 unsigned int irq;
1166 /* clear the interrupt */
1167 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1169 irq = irq_find_mapping(msi->domain, index);
1170 if (irq) {
1171 if (test_bit(index, msi->used))
1172 generic_handle_irq(irq);
1173 else
1174 dev_info(pcie->dev, "unhandled MSI\n");
1175 } else {
1177 * that's weird who triggered this?
1178 * just clear it
1180 dev_info(pcie->dev, "unexpected MSI\n");
1183 /* see if there's any more pending in this vector */
1184 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1186 processed++;
1190 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1193 static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
1194 struct msi_desc *desc)
1196 struct tegra_msi *msi = to_tegra_msi(chip);
1197 struct msi_msg msg;
1198 unsigned int irq;
1199 int hwirq;
1201 hwirq = tegra_msi_alloc(msi);
1202 if (hwirq < 0)
1203 return hwirq;
1205 irq = irq_create_mapping(msi->domain, hwirq);
1206 if (!irq)
1207 return -EINVAL;
1209 irq_set_msi_desc(irq, desc);
1211 msg.address_lo = virt_to_phys((void *)msi->pages);
1212 /* 32 bit address only */
1213 msg.address_hi = 0;
1214 msg.data = hwirq;
1216 write_msi_msg(irq, &msg);
1218 return 0;
1221 static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
1223 struct tegra_msi *msi = to_tegra_msi(chip);
1224 struct irq_data *d = irq_get_irq_data(irq);
1226 tegra_msi_free(msi, d->hwirq);
1229 static struct irq_chip tegra_msi_irq_chip = {
1230 .name = "Tegra PCIe MSI",
1231 .irq_enable = unmask_msi_irq,
1232 .irq_disable = mask_msi_irq,
1233 .irq_mask = mask_msi_irq,
1234 .irq_unmask = unmask_msi_irq,
1237 static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1238 irq_hw_number_t hwirq)
1240 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1241 irq_set_chip_data(irq, domain->host_data);
1242 set_irq_flags(irq, IRQF_VALID);
1244 tegra_cpuidle_pcie_irqs_in_use();
1246 return 0;
1249 static const struct irq_domain_ops msi_domain_ops = {
1250 .map = tegra_msi_map,
1253 static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1255 struct platform_device *pdev = to_platform_device(pcie->dev);
1256 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1257 struct tegra_msi *msi = &pcie->msi;
1258 unsigned long base;
1259 int err;
1260 u32 reg;
1262 mutex_init(&msi->lock);
1264 msi->chip.dev = pcie->dev;
1265 msi->chip.setup_irq = tegra_msi_setup_irq;
1266 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1268 msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
1269 &msi_domain_ops, &msi->chip);
1270 if (!msi->domain) {
1271 dev_err(&pdev->dev, "failed to create IRQ domain\n");
1272 return -ENOMEM;
1275 err = platform_get_irq_byname(pdev, "msi");
1276 if (err < 0) {
1277 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1278 goto err;
1281 msi->irq = err;
1283 err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
1284 tegra_msi_irq_chip.name, pcie);
1285 if (err < 0) {
1286 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1287 goto err;
1290 /* setup AFI/FPCI range */
1291 msi->pages = __get_free_pages(GFP_KERNEL, 0);
1292 base = virt_to_phys((void *)msi->pages);
1294 afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1295 afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1296 /* this register is in 4K increments */
1297 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1299 /* enable all MSI vectors */
1300 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1301 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1302 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1303 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1304 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1305 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1306 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1307 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1309 /* and unmask the MSI interrupt */
1310 reg = afi_readl(pcie, AFI_INTR_MASK);
1311 reg |= AFI_INTR_MASK_MSI_MASK;
1312 afi_writel(pcie, reg, AFI_INTR_MASK);
1314 return 0;
1316 err:
1317 irq_domain_remove(msi->domain);
1318 return err;
1321 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1323 struct tegra_msi *msi = &pcie->msi;
1324 unsigned int i, irq;
1325 u32 value;
1327 /* mask the MSI interrupt */
1328 value = afi_readl(pcie, AFI_INTR_MASK);
1329 value &= ~AFI_INTR_MASK_MSI_MASK;
1330 afi_writel(pcie, value, AFI_INTR_MASK);
1332 /* disable all MSI vectors */
1333 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1334 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1335 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1336 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1337 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1338 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1339 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1340 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1342 free_pages(msi->pages, 0);
1344 if (msi->irq > 0)
1345 free_irq(msi->irq, pcie);
1347 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1348 irq = irq_find_mapping(msi->domain, i);
1349 if (irq > 0)
1350 irq_dispose_mapping(irq);
1353 irq_domain_remove(msi->domain);
1355 return 0;
1358 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1359 u32 *xbar)
1361 struct device_node *np = pcie->dev->of_node;
1363 if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1364 switch (lanes) {
1365 case 0x00000204:
1366 dev_info(pcie->dev, "4x1, 2x1 configuration\n");
1367 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1368 return 0;
1370 case 0x00020202:
1371 dev_info(pcie->dev, "2x3 configuration\n");
1372 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1373 return 0;
1375 case 0x00010104:
1376 dev_info(pcie->dev, "4x1, 1x2 configuration\n");
1377 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1378 return 0;
1380 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1381 switch (lanes) {
1382 case 0x00000004:
1383 dev_info(pcie->dev, "single-mode configuration\n");
1384 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1385 return 0;
1387 case 0x00000202:
1388 dev_info(pcie->dev, "dual-mode configuration\n");
1389 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1390 return 0;
1394 return -EINVAL;
1397 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1399 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1400 struct device_node *np = pcie->dev->of_node, *port;
1401 struct of_pci_range_parser parser;
1402 struct of_pci_range range;
1403 struct resource res;
1404 u32 lanes = 0;
1405 int err;
1407 if (of_pci_range_parser_init(&parser, np)) {
1408 dev_err(pcie->dev, "missing \"ranges\" property\n");
1409 return -EINVAL;
1412 pcie->vdd_supply = devm_regulator_get(pcie->dev, "vdd");
1413 if (IS_ERR(pcie->vdd_supply))
1414 return PTR_ERR(pcie->vdd_supply);
1416 pcie->pex_clk_supply = devm_regulator_get(pcie->dev, "pex-clk");
1417 if (IS_ERR(pcie->pex_clk_supply))
1418 return PTR_ERR(pcie->pex_clk_supply);
1420 if (soc->has_avdd_supply) {
1421 pcie->avdd_supply = devm_regulator_get(pcie->dev, "avdd");
1422 if (IS_ERR(pcie->avdd_supply))
1423 return PTR_ERR(pcie->avdd_supply);
1426 for_each_of_pci_range(&parser, &range) {
1427 of_pci_range_to_resource(&range, np, &res);
1429 switch (res.flags & IORESOURCE_TYPE_BITS) {
1430 case IORESOURCE_IO:
1431 memcpy(&pcie->io, &res, sizeof(res));
1432 pcie->io.name = "I/O";
1433 break;
1435 case IORESOURCE_MEM:
1436 if (res.flags & IORESOURCE_PREFETCH) {
1437 memcpy(&pcie->prefetch, &res, sizeof(res));
1438 pcie->prefetch.name = "PREFETCH";
1439 } else {
1440 memcpy(&pcie->mem, &res, sizeof(res));
1441 pcie->mem.name = "MEM";
1443 break;
1447 err = of_pci_parse_bus_range(np, &pcie->busn);
1448 if (err < 0) {
1449 dev_err(pcie->dev, "failed to parse ranges property: %d\n",
1450 err);
1451 pcie->busn.name = np->name;
1452 pcie->busn.start = 0;
1453 pcie->busn.end = 0xff;
1454 pcie->busn.flags = IORESOURCE_BUS;
1457 /* parse root ports */
1458 for_each_child_of_node(np, port) {
1459 struct tegra_pcie_port *rp;
1460 unsigned int index;
1461 u32 value;
1463 err = of_pci_get_devfn(port);
1464 if (err < 0) {
1465 dev_err(pcie->dev, "failed to parse address: %d\n",
1466 err);
1467 return err;
1470 index = PCI_SLOT(err);
1472 if (index < 1 || index > soc->num_ports) {
1473 dev_err(pcie->dev, "invalid port number: %d\n", index);
1474 return -EINVAL;
1477 index--;
1479 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1480 if (err < 0) {
1481 dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
1482 err);
1483 return err;
1486 if (value > 16) {
1487 dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
1488 return -EINVAL;
1491 lanes |= value << (index << 3);
1493 if (!of_device_is_available(port))
1494 continue;
1496 rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
1497 if (!rp)
1498 return -ENOMEM;
1500 err = of_address_to_resource(port, 0, &rp->regs);
1501 if (err < 0) {
1502 dev_err(pcie->dev, "failed to parse address: %d\n",
1503 err);
1504 return err;
1507 INIT_LIST_HEAD(&rp->list);
1508 rp->index = index;
1509 rp->lanes = value;
1510 rp->pcie = pcie;
1512 rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
1513 if (IS_ERR(rp->base))
1514 return PTR_ERR(rp->base);
1516 list_add_tail(&rp->list, &pcie->ports);
1519 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1520 if (err < 0) {
1521 dev_err(pcie->dev, "invalid lane configuration\n");
1522 return err;
1525 return 0;
1529 * FIXME: If there are no PCIe cards attached, then calling this function
1530 * can result in the increase of the bootup time as there are big timeout
1531 * loops.
1533 #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
1534 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1536 unsigned int retries = 3;
1537 unsigned long value;
1539 do {
1540 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1542 do {
1543 value = readl(port->base + RP_VEND_XP);
1545 if (value & RP_VEND_XP_DL_UP)
1546 break;
1548 usleep_range(1000, 2000);
1549 } while (--timeout);
1551 if (!timeout) {
1552 dev_err(port->pcie->dev, "link %u down, retrying\n",
1553 port->index);
1554 goto retry;
1557 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1559 do {
1560 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1562 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1563 return true;
1565 usleep_range(1000, 2000);
1566 } while (--timeout);
1568 retry:
1569 tegra_pcie_port_reset(port);
1570 } while (--retries);
1572 return false;
1575 static int tegra_pcie_enable(struct tegra_pcie *pcie)
1577 struct tegra_pcie_port *port, *tmp;
1578 struct hw_pci hw;
1580 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1581 dev_info(pcie->dev, "probing port %u, using %u lanes\n",
1582 port->index, port->lanes);
1584 tegra_pcie_port_enable(port);
1586 if (tegra_pcie_port_check_link(port))
1587 continue;
1589 dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
1591 tegra_pcie_port_disable(port);
1592 tegra_pcie_port_free(port);
1595 memset(&hw, 0, sizeof(hw));
1597 hw.nr_controllers = 1;
1598 hw.private_data = (void **)&pcie;
1599 hw.setup = tegra_pcie_setup;
1600 hw.map_irq = tegra_pcie_map_irq;
1601 hw.add_bus = tegra_pcie_add_bus;
1602 hw.scan = tegra_pcie_scan_bus;
1603 hw.ops = &tegra_pcie_ops;
1605 pci_common_init_dev(pcie->dev, &hw);
1607 return 0;
1610 static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1611 .num_ports = 2,
1612 .msi_base_shift = 0,
1613 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
1614 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
1615 .has_pex_clkreq_en = false,
1616 .has_pex_bias_ctrl = false,
1617 .has_intr_prsnt_sense = false,
1618 .has_avdd_supply = false,
1619 .has_cml_clk = false,
1622 static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1623 .num_ports = 3,
1624 .msi_base_shift = 8,
1625 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1626 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1627 .has_pex_clkreq_en = true,
1628 .has_pex_bias_ctrl = true,
1629 .has_intr_prsnt_sense = true,
1630 .has_avdd_supply = true,
1631 .has_cml_clk = true,
1634 static const struct of_device_id tegra_pcie_of_match[] = {
1635 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1636 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1637 { },
1639 MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
1641 static int tegra_pcie_probe(struct platform_device *pdev)
1643 const struct of_device_id *match;
1644 struct tegra_pcie *pcie;
1645 int err;
1647 match = of_match_device(tegra_pcie_of_match, &pdev->dev);
1648 if (!match)
1649 return -ENODEV;
1651 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1652 if (!pcie)
1653 return -ENOMEM;
1655 INIT_LIST_HEAD(&pcie->buses);
1656 INIT_LIST_HEAD(&pcie->ports);
1657 pcie->soc_data = match->data;
1658 pcie->dev = &pdev->dev;
1660 err = tegra_pcie_parse_dt(pcie);
1661 if (err < 0)
1662 return err;
1664 pcibios_min_mem = 0;
1666 err = tegra_pcie_get_resources(pcie);
1667 if (err < 0) {
1668 dev_err(&pdev->dev, "failed to request resources: %d\n", err);
1669 return err;
1672 err = tegra_pcie_enable_controller(pcie);
1673 if (err)
1674 goto put_resources;
1676 /* setup the AFI address translations */
1677 tegra_pcie_setup_translations(pcie);
1679 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1680 err = tegra_pcie_enable_msi(pcie);
1681 if (err < 0) {
1682 dev_err(&pdev->dev,
1683 "failed to enable MSI support: %d\n",
1684 err);
1685 goto put_resources;
1689 err = tegra_pcie_enable(pcie);
1690 if (err < 0) {
1691 dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
1692 goto disable_msi;
1695 platform_set_drvdata(pdev, pcie);
1696 return 0;
1698 disable_msi:
1699 if (IS_ENABLED(CONFIG_PCI_MSI))
1700 tegra_pcie_disable_msi(pcie);
1701 put_resources:
1702 tegra_pcie_put_resources(pcie);
1703 return err;
1706 static struct platform_driver tegra_pcie_driver = {
1707 .driver = {
1708 .name = "tegra-pcie",
1709 .owner = THIS_MODULE,
1710 .of_match_table = tegra_pcie_of_match,
1711 .suppress_bind_attrs = true,
1713 .probe = tegra_pcie_probe,
1715 module_platform_driver(tegra_pcie_driver);
1717 MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
1718 MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
1719 MODULE_LICENSE("GPLv2");