mm/compaction: break out of loop on !PageBuddy in isolate_freepages_block
[linux/fpc-iii.git] / drivers / pci / host / pci-tegra.c
blob330f7e3a32dd9f3694a09844106b551751235296
1 /*
2 * PCIe host controller driver for Tegra SoCs
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
10 * Bits taken from arch/arm/mach-dove/pcie.c
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
27 #include <linux/clk.h>
28 #include <linux/delay.h>
29 #include <linux/export.h>
30 #include <linux/interrupt.h>
31 #include <linux/irq.h>
32 #include <linux/irqdomain.h>
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/msi.h>
36 #include <linux/of_address.h>
37 #include <linux/of_pci.h>
38 #include <linux/of_platform.h>
39 #include <linux/pci.h>
40 #include <linux/platform_device.h>
41 #include <linux/reset.h>
42 #include <linux/sizes.h>
43 #include <linux/slab.h>
44 #include <linux/tegra-cpuidle.h>
45 #include <linux/tegra-powergate.h>
46 #include <linux/vmalloc.h>
47 #include <linux/regulator/consumer.h>
49 #include <asm/mach/irq.h>
50 #include <asm/mach/map.h>
51 #include <asm/mach/pci.h>
53 #define INT_PCI_MSI_NR (8 * 32)
55 /* register definitions */
57 #define AFI_AXI_BAR0_SZ 0x00
58 #define AFI_AXI_BAR1_SZ 0x04
59 #define AFI_AXI_BAR2_SZ 0x08
60 #define AFI_AXI_BAR3_SZ 0x0c
61 #define AFI_AXI_BAR4_SZ 0x10
62 #define AFI_AXI_BAR5_SZ 0x14
64 #define AFI_AXI_BAR0_START 0x18
65 #define AFI_AXI_BAR1_START 0x1c
66 #define AFI_AXI_BAR2_START 0x20
67 #define AFI_AXI_BAR3_START 0x24
68 #define AFI_AXI_BAR4_START 0x28
69 #define AFI_AXI_BAR5_START 0x2c
71 #define AFI_FPCI_BAR0 0x30
72 #define AFI_FPCI_BAR1 0x34
73 #define AFI_FPCI_BAR2 0x38
74 #define AFI_FPCI_BAR3 0x3c
75 #define AFI_FPCI_BAR4 0x40
76 #define AFI_FPCI_BAR5 0x44
78 #define AFI_CACHE_BAR0_SZ 0x48
79 #define AFI_CACHE_BAR0_ST 0x4c
80 #define AFI_CACHE_BAR1_SZ 0x50
81 #define AFI_CACHE_BAR1_ST 0x54
83 #define AFI_MSI_BAR_SZ 0x60
84 #define AFI_MSI_FPCI_BAR_ST 0x64
85 #define AFI_MSI_AXI_BAR_ST 0x68
87 #define AFI_MSI_VEC0 0x6c
88 #define AFI_MSI_VEC1 0x70
89 #define AFI_MSI_VEC2 0x74
90 #define AFI_MSI_VEC3 0x78
91 #define AFI_MSI_VEC4 0x7c
92 #define AFI_MSI_VEC5 0x80
93 #define AFI_MSI_VEC6 0x84
94 #define AFI_MSI_VEC7 0x88
96 #define AFI_MSI_EN_VEC0 0x8c
97 #define AFI_MSI_EN_VEC1 0x90
98 #define AFI_MSI_EN_VEC2 0x94
99 #define AFI_MSI_EN_VEC3 0x98
100 #define AFI_MSI_EN_VEC4 0x9c
101 #define AFI_MSI_EN_VEC5 0xa0
102 #define AFI_MSI_EN_VEC6 0xa4
103 #define AFI_MSI_EN_VEC7 0xa8
105 #define AFI_CONFIGURATION 0xac
106 #define AFI_CONFIGURATION_EN_FPCI (1 << 0)
108 #define AFI_FPCI_ERROR_MASKS 0xb0
110 #define AFI_INTR_MASK 0xb4
111 #define AFI_INTR_MASK_INT_MASK (1 << 0)
112 #define AFI_INTR_MASK_MSI_MASK (1 << 8)
114 #define AFI_INTR_CODE 0xb8
115 #define AFI_INTR_CODE_MASK 0xf
116 #define AFI_INTR_AXI_SLAVE_ERROR 1
117 #define AFI_INTR_AXI_DECODE_ERROR 2
118 #define AFI_INTR_TARGET_ABORT 3
119 #define AFI_INTR_MASTER_ABORT 4
120 #define AFI_INTR_INVALID_WRITE 5
121 #define AFI_INTR_LEGACY 6
122 #define AFI_INTR_FPCI_DECODE_ERROR 7
124 #define AFI_INTR_SIGNATURE 0xbc
125 #define AFI_UPPER_FPCI_ADDRESS 0xc0
126 #define AFI_SM_INTR_ENABLE 0xc4
127 #define AFI_SM_INTR_INTA_ASSERT (1 << 0)
128 #define AFI_SM_INTR_INTB_ASSERT (1 << 1)
129 #define AFI_SM_INTR_INTC_ASSERT (1 << 2)
130 #define AFI_SM_INTR_INTD_ASSERT (1 << 3)
131 #define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
132 #define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
133 #define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
134 #define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
136 #define AFI_AFI_INTR_ENABLE 0xc8
137 #define AFI_INTR_EN_INI_SLVERR (1 << 0)
138 #define AFI_INTR_EN_INI_DECERR (1 << 1)
139 #define AFI_INTR_EN_TGT_SLVERR (1 << 2)
140 #define AFI_INTR_EN_TGT_DECERR (1 << 3)
141 #define AFI_INTR_EN_TGT_WRERR (1 << 4)
142 #define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
143 #define AFI_INTR_EN_AXI_DECERR (1 << 6)
144 #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
145 #define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
147 #define AFI_PCIE_CONFIG 0x0f8
148 #define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
149 #define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
150 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
151 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
152 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
153 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
154 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
155 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
157 #define AFI_FUSE 0x104
158 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
160 #define AFI_PEX0_CTRL 0x110
161 #define AFI_PEX1_CTRL 0x118
162 #define AFI_PEX2_CTRL 0x128
163 #define AFI_PEX_CTRL_RST (1 << 0)
164 #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
165 #define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
167 #define AFI_PEXBIAS_CTRL_0 0x168
169 #define RP_VEND_XP 0x00000F00
170 #define RP_VEND_XP_DL_UP (1 << 30)
172 #define RP_LINK_CONTROL_STATUS 0x00000090
173 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
174 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
176 #define PADS_CTL_SEL 0x0000009C
178 #define PADS_CTL 0x000000A0
179 #define PADS_CTL_IDDQ_1L (1 << 0)
180 #define PADS_CTL_TX_DATA_EN_1L (1 << 6)
181 #define PADS_CTL_RX_DATA_EN_1L (1 << 10)
183 #define PADS_PLL_CTL_TEGRA20 0x000000B8
184 #define PADS_PLL_CTL_TEGRA30 0x000000B4
185 #define PADS_PLL_CTL_RST_B4SM (1 << 1)
186 #define PADS_PLL_CTL_LOCKDET (1 << 8)
187 #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
188 #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
189 #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
190 #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
191 #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
192 #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
193 #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
194 #define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
196 #define PADS_REFCLK_CFG0 0x000000C8
197 #define PADS_REFCLK_CFG1 0x000000CC
200 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
201 * entries, one entry per PCIe port. These field definitions and desired
202 * values aren't in the TRM, but do come from NVIDIA.
204 #define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
205 #define PADS_REFCLK_CFG_E_TERM_SHIFT 7
206 #define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
207 #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
209 /* Default value provided by HW engineering is 0xfa5c */
210 #define PADS_REFCLK_CFG_VALUE \
212 (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \
213 (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
214 (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \
215 (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \
218 struct tegra_msi {
219 struct msi_chip chip;
220 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
221 struct irq_domain *domain;
222 unsigned long pages;
223 struct mutex lock;
224 int irq;
227 /* used to differentiate between Tegra SoC generations */
228 struct tegra_pcie_soc_data {
229 unsigned int num_ports;
230 unsigned int msi_base_shift;
231 u32 pads_pll_ctl;
232 u32 tx_ref_sel;
233 bool has_pex_clkreq_en;
234 bool has_pex_bias_ctrl;
235 bool has_intr_prsnt_sense;
236 bool has_avdd_supply;
237 bool has_cml_clk;
240 static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
242 return container_of(chip, struct tegra_msi, chip);
245 struct tegra_pcie {
246 struct device *dev;
248 void __iomem *pads;
249 void __iomem *afi;
250 int irq;
252 struct list_head buses;
253 struct resource *cs;
255 struct resource io;
256 struct resource mem;
257 struct resource prefetch;
258 struct resource busn;
260 struct clk *pex_clk;
261 struct clk *afi_clk;
262 struct clk *pll_e;
263 struct clk *cml_clk;
265 struct reset_control *pex_rst;
266 struct reset_control *afi_rst;
267 struct reset_control *pcie_xrst;
269 struct tegra_msi msi;
271 struct list_head ports;
272 unsigned int num_ports;
273 u32 xbar_config;
275 struct regulator *pex_clk_supply;
276 struct regulator *vdd_supply;
277 struct regulator *avdd_supply;
279 const struct tegra_pcie_soc_data *soc_data;
282 struct tegra_pcie_port {
283 struct tegra_pcie *pcie;
284 struct list_head list;
285 struct resource regs;
286 void __iomem *base;
287 unsigned int index;
288 unsigned int lanes;
291 struct tegra_pcie_bus {
292 struct vm_struct *area;
293 struct list_head list;
294 unsigned int nr;
297 static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
299 return sys->private_data;
302 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
303 unsigned long offset)
305 writel(value, pcie->afi + offset);
308 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
310 return readl(pcie->afi + offset);
313 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
314 unsigned long offset)
316 writel(value, pcie->pads + offset);
319 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
321 return readl(pcie->pads + offset);
325 * The configuration space mapping on Tegra is somewhat similar to the ECAM
326 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
327 * register accesses are mapped:
329 * [27:24] extended register number
330 * [23:16] bus number
331 * [15:11] device number
332 * [10: 8] function number
333 * [ 7: 0] register number
335 * Mapping the whole extended configuration space would require 256 MiB of
336 * virtual address space, only a small part of which will actually be used.
337 * To work around this, a 1 MiB of virtual addresses are allocated per bus
338 * when the bus is first accessed. When the physical range is mapped, the
339 * the bus number bits are hidden so that the extended register number bits
340 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
342 * [19:16] extended register number
343 * [15:11] device number
344 * [10: 8] function number
345 * [ 7: 0] register number
347 * This is achieved by stitching together 16 chunks of 64 KiB of physical
348 * address space via the MMU.
350 static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
352 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
353 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
356 static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
357 unsigned int busnr)
359 pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
360 L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
361 phys_addr_t cs = pcie->cs->start;
362 struct tegra_pcie_bus *bus;
363 unsigned int i;
364 int err;
366 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
367 if (!bus)
368 return ERR_PTR(-ENOMEM);
370 INIT_LIST_HEAD(&bus->list);
371 bus->nr = busnr;
373 /* allocate 1 MiB of virtual addresses */
374 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
375 if (!bus->area) {
376 err = -ENOMEM;
377 goto free;
380 /* map each of the 16 chunks of 64 KiB each */
381 for (i = 0; i < 16; i++) {
382 unsigned long virt = (unsigned long)bus->area->addr +
383 i * SZ_64K;
384 phys_addr_t phys = cs + i * SZ_1M + busnr * SZ_64K;
386 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
387 if (err < 0) {
388 dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
389 err);
390 goto unmap;
394 return bus;
396 unmap:
397 vunmap(bus->area->addr);
398 free:
399 kfree(bus);
400 return ERR_PTR(err);
404 * Look up a virtual address mapping for the specified bus number. If no such
405 * mapping exists, try to create one.
407 static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
408 unsigned int busnr)
410 struct tegra_pcie_bus *bus;
412 list_for_each_entry(bus, &pcie->buses, list)
413 if (bus->nr == busnr)
414 return (void __iomem *)bus->area->addr;
416 bus = tegra_pcie_bus_alloc(pcie, busnr);
417 if (IS_ERR(bus))
418 return NULL;
420 list_add_tail(&bus->list, &pcie->buses);
422 return (void __iomem *)bus->area->addr;
425 static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
426 unsigned int devfn,
427 int where)
429 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
430 void __iomem *addr = NULL;
432 if (bus->number == 0) {
433 unsigned int slot = PCI_SLOT(devfn);
434 struct tegra_pcie_port *port;
436 list_for_each_entry(port, &pcie->ports, list) {
437 if (port->index + 1 == slot) {
438 addr = port->base + (where & ~3);
439 break;
442 } else {
443 addr = tegra_pcie_bus_map(pcie, bus->number);
444 if (!addr) {
445 dev_err(pcie->dev,
446 "failed to map cfg. space for bus %u\n",
447 bus->number);
448 return NULL;
451 addr += tegra_pcie_conf_offset(devfn, where);
454 return addr;
457 static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
458 int where, int size, u32 *value)
460 void __iomem *addr;
462 addr = tegra_pcie_conf_address(bus, devfn, where);
463 if (!addr) {
464 *value = 0xffffffff;
465 return PCIBIOS_DEVICE_NOT_FOUND;
468 *value = readl(addr);
470 if (size == 1)
471 *value = (*value >> (8 * (where & 3))) & 0xff;
472 else if (size == 2)
473 *value = (*value >> (8 * (where & 3))) & 0xffff;
475 return PCIBIOS_SUCCESSFUL;
478 static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
479 int where, int size, u32 value)
481 void __iomem *addr;
482 u32 mask, tmp;
484 addr = tegra_pcie_conf_address(bus, devfn, where);
485 if (!addr)
486 return PCIBIOS_DEVICE_NOT_FOUND;
488 if (size == 4) {
489 writel(value, addr);
490 return PCIBIOS_SUCCESSFUL;
493 if (size == 2)
494 mask = ~(0xffff << ((where & 0x3) * 8));
495 else if (size == 1)
496 mask = ~(0xff << ((where & 0x3) * 8));
497 else
498 return PCIBIOS_BAD_REGISTER_NUMBER;
500 tmp = readl(addr) & mask;
501 tmp |= value << ((where & 0x3) * 8);
502 writel(tmp, addr);
504 return PCIBIOS_SUCCESSFUL;
507 static struct pci_ops tegra_pcie_ops = {
508 .read = tegra_pcie_read_conf,
509 .write = tegra_pcie_write_conf,
512 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
514 unsigned long ret = 0;
516 switch (port->index) {
517 case 0:
518 ret = AFI_PEX0_CTRL;
519 break;
521 case 1:
522 ret = AFI_PEX1_CTRL;
523 break;
525 case 2:
526 ret = AFI_PEX2_CTRL;
527 break;
530 return ret;
533 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
535 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
536 unsigned long value;
538 /* pulse reset signal */
539 value = afi_readl(port->pcie, ctrl);
540 value &= ~AFI_PEX_CTRL_RST;
541 afi_writel(port->pcie, value, ctrl);
543 usleep_range(1000, 2000);
545 value = afi_readl(port->pcie, ctrl);
546 value |= AFI_PEX_CTRL_RST;
547 afi_writel(port->pcie, value, ctrl);
550 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
552 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
553 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
554 unsigned long value;
556 /* enable reference clock */
557 value = afi_readl(port->pcie, ctrl);
558 value |= AFI_PEX_CTRL_REFCLK_EN;
560 if (soc->has_pex_clkreq_en)
561 value |= AFI_PEX_CTRL_CLKREQ_EN;
563 afi_writel(port->pcie, value, ctrl);
565 tegra_pcie_port_reset(port);
568 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
570 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
571 unsigned long value;
573 /* assert port reset */
574 value = afi_readl(port->pcie, ctrl);
575 value &= ~AFI_PEX_CTRL_RST;
576 afi_writel(port->pcie, value, ctrl);
578 /* disable reference clock */
579 value = afi_readl(port->pcie, ctrl);
580 value &= ~AFI_PEX_CTRL_REFCLK_EN;
581 afi_writel(port->pcie, value, ctrl);
584 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
586 struct tegra_pcie *pcie = port->pcie;
588 devm_iounmap(pcie->dev, port->base);
589 devm_release_mem_region(pcie->dev, port->regs.start,
590 resource_size(&port->regs));
591 list_del(&port->list);
592 devm_kfree(pcie->dev, port);
595 static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
597 u16 reg;
599 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
600 pci_read_config_word(dev, PCI_COMMAND, &reg);
601 reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
602 PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
603 pci_write_config_word(dev, PCI_COMMAND, reg);
606 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
608 /* Tegra PCIE root complex wrongly reports device class */
609 static void tegra_pcie_fixup_class(struct pci_dev *dev)
611 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
613 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
614 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
615 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
616 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
618 /* Tegra PCIE requires relaxed ordering */
619 static void tegra_pcie_relax_enable(struct pci_dev *dev)
621 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
623 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
625 static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
627 struct tegra_pcie *pcie = sys_to_pcie(sys);
629 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
630 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
631 sys->mem_offset);
632 pci_add_resource(&sys->resources, &pcie->busn);
634 pci_ioremap_io(nr * SZ_64K, pcie->io.start);
636 return 1;
639 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
641 struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
643 tegra_cpuidle_pcie_irqs_in_use();
645 return pcie->irq;
648 static void tegra_pcie_add_bus(struct pci_bus *bus)
650 if (IS_ENABLED(CONFIG_PCI_MSI)) {
651 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
653 bus->msi = &pcie->msi.chip;
657 static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
659 struct tegra_pcie *pcie = sys_to_pcie(sys);
660 struct pci_bus *bus;
662 bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
663 &sys->resources);
664 if (!bus)
665 return NULL;
667 pci_scan_child_bus(bus);
669 return bus;
672 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
674 const char *err_msg[] = {
675 "Unknown",
676 "AXI slave error",
677 "AXI decode error",
678 "Target abort",
679 "Master abort",
680 "Invalid write",
681 "Response decoding error",
682 "AXI response decoding error",
683 "Transaction timeout",
685 struct tegra_pcie *pcie = arg;
686 u32 code, signature;
688 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
689 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
690 afi_writel(pcie, 0, AFI_INTR_CODE);
692 if (code == AFI_INTR_LEGACY)
693 return IRQ_NONE;
695 if (code >= ARRAY_SIZE(err_msg))
696 code = 0;
699 * do not pollute kernel log with master abort reports since they
700 * happen a lot during enumeration
702 if (code == AFI_INTR_MASTER_ABORT)
703 dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
704 signature);
705 else
706 dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
707 signature);
709 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
710 code == AFI_INTR_FPCI_DECODE_ERROR) {
711 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
712 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
714 if (code == AFI_INTR_MASTER_ABORT)
715 dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
716 else
717 dev_err(pcie->dev, " FPCI address: %10llx\n", address);
720 return IRQ_HANDLED;
724 * FPCI map is as follows:
725 * - 0xfdfc000000: I/O space
726 * - 0xfdfe000000: type 0 configuration space
727 * - 0xfdff000000: type 1 configuration space
728 * - 0xfe00000000: type 0 extended configuration space
729 * - 0xfe10000000: type 1 extended configuration space
731 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
733 u32 fpci_bar, size, axi_address;
735 /* Bar 0: type 1 extended configuration space */
736 fpci_bar = 0xfe100000;
737 size = resource_size(pcie->cs);
738 axi_address = pcie->cs->start;
739 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
740 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
741 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
743 /* Bar 1: downstream IO bar */
744 fpci_bar = 0xfdfc0000;
745 size = resource_size(&pcie->io);
746 axi_address = pcie->io.start;
747 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
748 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
749 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
751 /* Bar 2: prefetchable memory BAR */
752 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
753 size = resource_size(&pcie->prefetch);
754 axi_address = pcie->prefetch.start;
755 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
756 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
757 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
759 /* Bar 3: non prefetchable memory BAR */
760 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
761 size = resource_size(&pcie->mem);
762 axi_address = pcie->mem.start;
763 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
764 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
765 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
767 /* NULL out the remaining BARs as they are not used */
768 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
769 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
770 afi_writel(pcie, 0, AFI_FPCI_BAR4);
772 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
773 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
774 afi_writel(pcie, 0, AFI_FPCI_BAR5);
776 /* map all upstream transactions as uncached */
777 afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
778 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
779 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
780 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
782 /* MSI translations are setup only when needed */
783 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
784 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
785 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
786 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
789 static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
791 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
792 struct tegra_pcie_port *port;
793 unsigned int timeout;
794 unsigned long value;
796 /* power down PCIe slot clock bias pad */
797 if (soc->has_pex_bias_ctrl)
798 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
800 /* configure mode and disable all ports */
801 value = afi_readl(pcie, AFI_PCIE_CONFIG);
802 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
803 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
805 list_for_each_entry(port, &pcie->ports, list)
806 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
808 afi_writel(pcie, value, AFI_PCIE_CONFIG);
810 value = afi_readl(pcie, AFI_FUSE);
811 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
812 afi_writel(pcie, value, AFI_FUSE);
814 /* initialize internal PHY, enable up to 16 PCIE lanes */
815 pads_writel(pcie, 0x0, PADS_CTL_SEL);
817 /* override IDDQ to 1 on all 4 lanes */
818 value = pads_readl(pcie, PADS_CTL);
819 value |= PADS_CTL_IDDQ_1L;
820 pads_writel(pcie, value, PADS_CTL);
823 * Set up PHY PLL inputs select PLLE output as refclock,
824 * set TX ref sel to div10 (not div5).
826 value = pads_readl(pcie, soc->pads_pll_ctl);
827 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
828 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
829 pads_writel(pcie, value, soc->pads_pll_ctl);
831 /* take PLL out of reset */
832 value = pads_readl(pcie, soc->pads_pll_ctl);
833 value |= PADS_PLL_CTL_RST_B4SM;
834 pads_writel(pcie, value, soc->pads_pll_ctl);
836 /* Configure the reference clock driver */
837 value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
838 pads_writel(pcie, value, PADS_REFCLK_CFG0);
839 if (soc->num_ports > 2)
840 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
842 /* wait for the PLL to lock */
843 timeout = 300;
844 do {
845 value = pads_readl(pcie, soc->pads_pll_ctl);
846 usleep_range(1000, 2000);
847 if (--timeout == 0) {
848 pr_err("Tegra PCIe error: timeout waiting for PLL\n");
849 return -EBUSY;
851 } while (!(value & PADS_PLL_CTL_LOCKDET));
853 /* turn off IDDQ override */
854 value = pads_readl(pcie, PADS_CTL);
855 value &= ~PADS_CTL_IDDQ_1L;
856 pads_writel(pcie, value, PADS_CTL);
858 /* enable TX/RX data */
859 value = pads_readl(pcie, PADS_CTL);
860 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
861 pads_writel(pcie, value, PADS_CTL);
863 /* take the PCIe interface module out of reset */
864 reset_control_deassert(pcie->pcie_xrst);
866 /* finally enable PCIe */
867 value = afi_readl(pcie, AFI_CONFIGURATION);
868 value |= AFI_CONFIGURATION_EN_FPCI;
869 afi_writel(pcie, value, AFI_CONFIGURATION);
871 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
872 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
873 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
875 if (soc->has_intr_prsnt_sense)
876 value |= AFI_INTR_EN_PRSNT_SENSE;
878 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
879 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
881 /* don't enable MSI for now, only when needed */
882 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
884 /* disable all exceptions */
885 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
887 return 0;
890 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
892 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
893 int err;
895 /* TODO: disable and unprepare clocks? */
897 reset_control_assert(pcie->pcie_xrst);
898 reset_control_assert(pcie->afi_rst);
899 reset_control_assert(pcie->pex_rst);
901 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
903 if (soc->has_avdd_supply) {
904 err = regulator_disable(pcie->avdd_supply);
905 if (err < 0)
906 dev_warn(pcie->dev,
907 "failed to disable AVDD regulator: %d\n",
908 err);
911 err = regulator_disable(pcie->pex_clk_supply);
912 if (err < 0)
913 dev_warn(pcie->dev, "failed to disable pex-clk regulator: %d\n",
914 err);
916 err = regulator_disable(pcie->vdd_supply);
917 if (err < 0)
918 dev_warn(pcie->dev, "failed to disable VDD regulator: %d\n",
919 err);
922 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
924 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
925 int err;
927 reset_control_assert(pcie->pcie_xrst);
928 reset_control_assert(pcie->afi_rst);
929 reset_control_assert(pcie->pex_rst);
931 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
933 /* enable regulators */
934 err = regulator_enable(pcie->vdd_supply);
935 if (err < 0) {
936 dev_err(pcie->dev, "failed to enable VDD regulator: %d\n", err);
937 return err;
940 err = regulator_enable(pcie->pex_clk_supply);
941 if (err < 0) {
942 dev_err(pcie->dev, "failed to enable pex-clk regulator: %d\n",
943 err);
944 return err;
947 if (soc->has_avdd_supply) {
948 err = regulator_enable(pcie->avdd_supply);
949 if (err < 0) {
950 dev_err(pcie->dev,
951 "failed to enable AVDD regulator: %d\n",
952 err);
953 return err;
957 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
958 pcie->pex_clk,
959 pcie->pex_rst);
960 if (err) {
961 dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
962 return err;
965 reset_control_deassert(pcie->afi_rst);
967 err = clk_prepare_enable(pcie->afi_clk);
968 if (err < 0) {
969 dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
970 return err;
973 if (soc->has_cml_clk) {
974 err = clk_prepare_enable(pcie->cml_clk);
975 if (err < 0) {
976 dev_err(pcie->dev, "failed to enable CML clock: %d\n",
977 err);
978 return err;
982 err = clk_prepare_enable(pcie->pll_e);
983 if (err < 0) {
984 dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
985 return err;
988 return 0;
991 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
993 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
995 pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
996 if (IS_ERR(pcie->pex_clk))
997 return PTR_ERR(pcie->pex_clk);
999 pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
1000 if (IS_ERR(pcie->afi_clk))
1001 return PTR_ERR(pcie->afi_clk);
1003 pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
1004 if (IS_ERR(pcie->pll_e))
1005 return PTR_ERR(pcie->pll_e);
1007 if (soc->has_cml_clk) {
1008 pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
1009 if (IS_ERR(pcie->cml_clk))
1010 return PTR_ERR(pcie->cml_clk);
1013 return 0;
1016 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1018 pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
1019 if (IS_ERR(pcie->pex_rst))
1020 return PTR_ERR(pcie->pex_rst);
1022 pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
1023 if (IS_ERR(pcie->afi_rst))
1024 return PTR_ERR(pcie->afi_rst);
1026 pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
1027 if (IS_ERR(pcie->pcie_xrst))
1028 return PTR_ERR(pcie->pcie_xrst);
1030 return 0;
1033 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1035 struct platform_device *pdev = to_platform_device(pcie->dev);
1036 struct resource *pads, *afi, *res;
1037 int err;
1039 err = tegra_pcie_clocks_get(pcie);
1040 if (err) {
1041 dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
1042 return err;
1045 err = tegra_pcie_resets_get(pcie);
1046 if (err) {
1047 dev_err(&pdev->dev, "failed to get resets: %d\n", err);
1048 return err;
1051 err = tegra_pcie_power_on(pcie);
1052 if (err) {
1053 dev_err(&pdev->dev, "failed to power up: %d\n", err);
1054 return err;
1057 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1058 pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
1059 if (IS_ERR(pcie->pads)) {
1060 err = PTR_ERR(pcie->pads);
1061 goto poweroff;
1064 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1065 pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
1066 if (IS_ERR(pcie->afi)) {
1067 err = PTR_ERR(pcie->afi);
1068 goto poweroff;
1071 /* request configuration space, but remap later, on demand */
1072 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1073 if (!res) {
1074 err = -EADDRNOTAVAIL;
1075 goto poweroff;
1078 pcie->cs = devm_request_mem_region(pcie->dev, res->start,
1079 resource_size(res), res->name);
1080 if (!pcie->cs) {
1081 err = -EADDRNOTAVAIL;
1082 goto poweroff;
1085 /* request interrupt */
1086 err = platform_get_irq_byname(pdev, "intr");
1087 if (err < 0) {
1088 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1089 goto poweroff;
1092 pcie->irq = err;
1094 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1095 if (err) {
1096 dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
1097 goto poweroff;
1100 return 0;
1102 poweroff:
1103 tegra_pcie_power_off(pcie);
1104 return err;
1107 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1109 if (pcie->irq > 0)
1110 free_irq(pcie->irq, pcie);
1112 tegra_pcie_power_off(pcie);
1113 return 0;
1116 static int tegra_msi_alloc(struct tegra_msi *chip)
1118 int msi;
1120 mutex_lock(&chip->lock);
1122 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1123 if (msi < INT_PCI_MSI_NR)
1124 set_bit(msi, chip->used);
1125 else
1126 msi = -ENOSPC;
1128 mutex_unlock(&chip->lock);
1130 return msi;
1133 static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1135 struct device *dev = chip->chip.dev;
1137 mutex_lock(&chip->lock);
1139 if (!test_bit(irq, chip->used))
1140 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1141 else
1142 clear_bit(irq, chip->used);
1144 mutex_unlock(&chip->lock);
1147 static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1149 struct tegra_pcie *pcie = data;
1150 struct tegra_msi *msi = &pcie->msi;
1151 unsigned int i, processed = 0;
1153 for (i = 0; i < 8; i++) {
1154 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1156 while (reg) {
1157 unsigned int offset = find_first_bit(&reg, 32);
1158 unsigned int index = i * 32 + offset;
1159 unsigned int irq;
1161 /* clear the interrupt */
1162 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1164 irq = irq_find_mapping(msi->domain, index);
1165 if (irq) {
1166 if (test_bit(index, msi->used))
1167 generic_handle_irq(irq);
1168 else
1169 dev_info(pcie->dev, "unhandled MSI\n");
1170 } else {
1172 * that's weird who triggered this?
1173 * just clear it
1175 dev_info(pcie->dev, "unexpected MSI\n");
1178 /* see if there's any more pending in this vector */
1179 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1181 processed++;
1185 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1188 static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
1189 struct msi_desc *desc)
1191 struct tegra_msi *msi = to_tegra_msi(chip);
1192 struct msi_msg msg;
1193 unsigned int irq;
1194 int hwirq;
1196 hwirq = tegra_msi_alloc(msi);
1197 if (hwirq < 0)
1198 return hwirq;
1200 irq = irq_create_mapping(msi->domain, hwirq);
1201 if (!irq)
1202 return -EINVAL;
1204 irq_set_msi_desc(irq, desc);
1206 msg.address_lo = virt_to_phys((void *)msi->pages);
1207 /* 32 bit address only */
1208 msg.address_hi = 0;
1209 msg.data = hwirq;
1211 write_msi_msg(irq, &msg);
1213 return 0;
1216 static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
1218 struct tegra_msi *msi = to_tegra_msi(chip);
1219 struct irq_data *d = irq_get_irq_data(irq);
1221 tegra_msi_free(msi, d->hwirq);
1224 static struct irq_chip tegra_msi_irq_chip = {
1225 .name = "Tegra PCIe MSI",
1226 .irq_enable = unmask_msi_irq,
1227 .irq_disable = mask_msi_irq,
1228 .irq_mask = mask_msi_irq,
1229 .irq_unmask = unmask_msi_irq,
1232 static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1233 irq_hw_number_t hwirq)
1235 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1236 irq_set_chip_data(irq, domain->host_data);
1237 set_irq_flags(irq, IRQF_VALID);
1239 tegra_cpuidle_pcie_irqs_in_use();
1241 return 0;
1244 static const struct irq_domain_ops msi_domain_ops = {
1245 .map = tegra_msi_map,
1248 static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1250 struct platform_device *pdev = to_platform_device(pcie->dev);
1251 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1252 struct tegra_msi *msi = &pcie->msi;
1253 unsigned long base;
1254 int err;
1255 u32 reg;
1257 mutex_init(&msi->lock);
1259 msi->chip.dev = pcie->dev;
1260 msi->chip.setup_irq = tegra_msi_setup_irq;
1261 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1263 msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
1264 &msi_domain_ops, &msi->chip);
1265 if (!msi->domain) {
1266 dev_err(&pdev->dev, "failed to create IRQ domain\n");
1267 return -ENOMEM;
1270 err = platform_get_irq_byname(pdev, "msi");
1271 if (err < 0) {
1272 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1273 goto err;
1276 msi->irq = err;
1278 err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
1279 tegra_msi_irq_chip.name, pcie);
1280 if (err < 0) {
1281 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1282 goto err;
1285 /* setup AFI/FPCI range */
1286 msi->pages = __get_free_pages(GFP_KERNEL, 0);
1287 base = virt_to_phys((void *)msi->pages);
1289 afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1290 afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1291 /* this register is in 4K increments */
1292 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1294 /* enable all MSI vectors */
1295 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1296 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1297 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1298 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1299 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1300 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1301 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1302 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1304 /* and unmask the MSI interrupt */
1305 reg = afi_readl(pcie, AFI_INTR_MASK);
1306 reg |= AFI_INTR_MASK_MSI_MASK;
1307 afi_writel(pcie, reg, AFI_INTR_MASK);
1309 return 0;
1311 err:
1312 irq_domain_remove(msi->domain);
1313 return err;
1316 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1318 struct tegra_msi *msi = &pcie->msi;
1319 unsigned int i, irq;
1320 u32 value;
1322 /* mask the MSI interrupt */
1323 value = afi_readl(pcie, AFI_INTR_MASK);
1324 value &= ~AFI_INTR_MASK_MSI_MASK;
1325 afi_writel(pcie, value, AFI_INTR_MASK);
1327 /* disable all MSI vectors */
1328 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1329 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1330 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1331 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1332 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1333 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1334 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1335 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1337 free_pages(msi->pages, 0);
1339 if (msi->irq > 0)
1340 free_irq(msi->irq, pcie);
1342 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1343 irq = irq_find_mapping(msi->domain, i);
1344 if (irq > 0)
1345 irq_dispose_mapping(irq);
1348 irq_domain_remove(msi->domain);
1350 return 0;
1353 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1354 u32 *xbar)
1356 struct device_node *np = pcie->dev->of_node;
1358 if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1359 switch (lanes) {
1360 case 0x00000204:
1361 dev_info(pcie->dev, "4x1, 2x1 configuration\n");
1362 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1363 return 0;
1365 case 0x00020202:
1366 dev_info(pcie->dev, "2x3 configuration\n");
1367 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1368 return 0;
1370 case 0x00010104:
1371 dev_info(pcie->dev, "4x1, 1x2 configuration\n");
1372 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1373 return 0;
1375 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1376 switch (lanes) {
1377 case 0x00000004:
1378 dev_info(pcie->dev, "single-mode configuration\n");
1379 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1380 return 0;
1382 case 0x00000202:
1383 dev_info(pcie->dev, "dual-mode configuration\n");
1384 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1385 return 0;
1389 return -EINVAL;
1392 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1394 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1395 struct device_node *np = pcie->dev->of_node, *port;
1396 struct of_pci_range_parser parser;
1397 struct of_pci_range range;
1398 struct resource res;
1399 u32 lanes = 0;
1400 int err;
1402 if (of_pci_range_parser_init(&parser, np)) {
1403 dev_err(pcie->dev, "missing \"ranges\" property\n");
1404 return -EINVAL;
1407 pcie->vdd_supply = devm_regulator_get(pcie->dev, "vdd");
1408 if (IS_ERR(pcie->vdd_supply))
1409 return PTR_ERR(pcie->vdd_supply);
1411 pcie->pex_clk_supply = devm_regulator_get(pcie->dev, "pex-clk");
1412 if (IS_ERR(pcie->pex_clk_supply))
1413 return PTR_ERR(pcie->pex_clk_supply);
1415 if (soc->has_avdd_supply) {
1416 pcie->avdd_supply = devm_regulator_get(pcie->dev, "avdd");
1417 if (IS_ERR(pcie->avdd_supply))
1418 return PTR_ERR(pcie->avdd_supply);
1421 for_each_of_pci_range(&parser, &range) {
1422 of_pci_range_to_resource(&range, np, &res);
1424 switch (res.flags & IORESOURCE_TYPE_BITS) {
1425 case IORESOURCE_IO:
1426 memcpy(&pcie->io, &res, sizeof(res));
1427 pcie->io.name = "I/O";
1428 break;
1430 case IORESOURCE_MEM:
1431 if (res.flags & IORESOURCE_PREFETCH) {
1432 memcpy(&pcie->prefetch, &res, sizeof(res));
1433 pcie->prefetch.name = "PREFETCH";
1434 } else {
1435 memcpy(&pcie->mem, &res, sizeof(res));
1436 pcie->mem.name = "MEM";
1438 break;
1442 err = of_pci_parse_bus_range(np, &pcie->busn);
1443 if (err < 0) {
1444 dev_err(pcie->dev, "failed to parse ranges property: %d\n",
1445 err);
1446 pcie->busn.name = np->name;
1447 pcie->busn.start = 0;
1448 pcie->busn.end = 0xff;
1449 pcie->busn.flags = IORESOURCE_BUS;
1452 /* parse root ports */
1453 for_each_child_of_node(np, port) {
1454 struct tegra_pcie_port *rp;
1455 unsigned int index;
1456 u32 value;
1458 err = of_pci_get_devfn(port);
1459 if (err < 0) {
1460 dev_err(pcie->dev, "failed to parse address: %d\n",
1461 err);
1462 return err;
1465 index = PCI_SLOT(err);
1467 if (index < 1 || index > soc->num_ports) {
1468 dev_err(pcie->dev, "invalid port number: %d\n", index);
1469 return -EINVAL;
1472 index--;
1474 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1475 if (err < 0) {
1476 dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
1477 err);
1478 return err;
1481 if (value > 16) {
1482 dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
1483 return -EINVAL;
1486 lanes |= value << (index << 3);
1488 if (!of_device_is_available(port))
1489 continue;
1491 rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
1492 if (!rp)
1493 return -ENOMEM;
1495 err = of_address_to_resource(port, 0, &rp->regs);
1496 if (err < 0) {
1497 dev_err(pcie->dev, "failed to parse address: %d\n",
1498 err);
1499 return err;
1502 INIT_LIST_HEAD(&rp->list);
1503 rp->index = index;
1504 rp->lanes = value;
1505 rp->pcie = pcie;
1507 rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
1508 if (IS_ERR(rp->base))
1509 return PTR_ERR(rp->base);
1511 list_add_tail(&rp->list, &pcie->ports);
1514 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1515 if (err < 0) {
1516 dev_err(pcie->dev, "invalid lane configuration\n");
1517 return err;
1520 return 0;
1524 * FIXME: If there are no PCIe cards attached, then calling this function
1525 * can result in the increase of the bootup time as there are big timeout
1526 * loops.
1528 #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
1529 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1531 unsigned int retries = 3;
1532 unsigned long value;
1534 do {
1535 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1537 do {
1538 value = readl(port->base + RP_VEND_XP);
1540 if (value & RP_VEND_XP_DL_UP)
1541 break;
1543 usleep_range(1000, 2000);
1544 } while (--timeout);
1546 if (!timeout) {
1547 dev_err(port->pcie->dev, "link %u down, retrying\n",
1548 port->index);
1549 goto retry;
1552 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1554 do {
1555 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1557 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1558 return true;
1560 usleep_range(1000, 2000);
1561 } while (--timeout);
1563 retry:
1564 tegra_pcie_port_reset(port);
1565 } while (--retries);
1567 return false;
1570 static int tegra_pcie_enable(struct tegra_pcie *pcie)
1572 struct tegra_pcie_port *port, *tmp;
1573 struct hw_pci hw;
1575 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1576 dev_info(pcie->dev, "probing port %u, using %u lanes\n",
1577 port->index, port->lanes);
1579 tegra_pcie_port_enable(port);
1581 if (tegra_pcie_port_check_link(port))
1582 continue;
1584 dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
1586 tegra_pcie_port_disable(port);
1587 tegra_pcie_port_free(port);
1590 memset(&hw, 0, sizeof(hw));
1592 hw.nr_controllers = 1;
1593 hw.private_data = (void **)&pcie;
1594 hw.setup = tegra_pcie_setup;
1595 hw.map_irq = tegra_pcie_map_irq;
1596 hw.add_bus = tegra_pcie_add_bus;
1597 hw.scan = tegra_pcie_scan_bus;
1598 hw.ops = &tegra_pcie_ops;
1600 pci_common_init_dev(pcie->dev, &hw);
1602 return 0;
1605 static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1606 .num_ports = 2,
1607 .msi_base_shift = 0,
1608 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
1609 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
1610 .has_pex_clkreq_en = false,
1611 .has_pex_bias_ctrl = false,
1612 .has_intr_prsnt_sense = false,
1613 .has_avdd_supply = false,
1614 .has_cml_clk = false,
1617 static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1618 .num_ports = 3,
1619 .msi_base_shift = 8,
1620 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1621 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1622 .has_pex_clkreq_en = true,
1623 .has_pex_bias_ctrl = true,
1624 .has_intr_prsnt_sense = true,
1625 .has_avdd_supply = true,
1626 .has_cml_clk = true,
1629 static const struct of_device_id tegra_pcie_of_match[] = {
1630 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1631 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1632 { },
1634 MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
1636 static int tegra_pcie_probe(struct platform_device *pdev)
1638 const struct of_device_id *match;
1639 struct tegra_pcie *pcie;
1640 int err;
1642 match = of_match_device(tegra_pcie_of_match, &pdev->dev);
1643 if (!match)
1644 return -ENODEV;
1646 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1647 if (!pcie)
1648 return -ENOMEM;
1650 INIT_LIST_HEAD(&pcie->buses);
1651 INIT_LIST_HEAD(&pcie->ports);
1652 pcie->soc_data = match->data;
1653 pcie->dev = &pdev->dev;
1655 err = tegra_pcie_parse_dt(pcie);
1656 if (err < 0)
1657 return err;
1659 pcibios_min_mem = 0;
1661 err = tegra_pcie_get_resources(pcie);
1662 if (err < 0) {
1663 dev_err(&pdev->dev, "failed to request resources: %d\n", err);
1664 return err;
1667 err = tegra_pcie_enable_controller(pcie);
1668 if (err)
1669 goto put_resources;
1671 /* setup the AFI address translations */
1672 tegra_pcie_setup_translations(pcie);
1674 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1675 err = tegra_pcie_enable_msi(pcie);
1676 if (err < 0) {
1677 dev_err(&pdev->dev,
1678 "failed to enable MSI support: %d\n",
1679 err);
1680 goto put_resources;
1684 err = tegra_pcie_enable(pcie);
1685 if (err < 0) {
1686 dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
1687 goto disable_msi;
1690 platform_set_drvdata(pdev, pcie);
1691 return 0;
1693 disable_msi:
1694 if (IS_ENABLED(CONFIG_PCI_MSI))
1695 tegra_pcie_disable_msi(pcie);
1696 put_resources:
1697 tegra_pcie_put_resources(pcie);
1698 return err;
1701 static struct platform_driver tegra_pcie_driver = {
1702 .driver = {
1703 .name = "tegra-pcie",
1704 .owner = THIS_MODULE,
1705 .of_match_table = tegra_pcie_of_match,
1706 .suppress_bind_attrs = true,
1708 .probe = tegra_pcie_probe,
1710 module_platform_driver(tegra_pcie_driver);
1712 MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
1713 MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
1714 MODULE_LICENSE("GPLv2");