2 * PCIe driver for Renesas R-Car SoCs
3 * Copyright (C) 2014 Renesas Electronics Europe Ltd
6 * arch/sh/drivers/pci/pcie-sh7786.c
7 * arch/sh/drivers/pci/ops-sh7786.c
8 * Copyright (C) 2009 - 2011 Paul Mundt
10 * Author: Phil Edworthy <phil.edworthy@renesas.com>
12 * This file is licensed under the terms of the GNU General Public
13 * License version 2. This program is licensed "as is" without any
14 * warranty of any kind, whether express or implied.
17 #include <linux/clk.h>
18 #include <linux/delay.h>
19 #include <linux/interrupt.h>
20 #include <linux/irq.h>
21 #include <linux/irqdomain.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/msi.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/of_pci.h>
28 #include <linux/of_platform.h>
29 #include <linux/pci.h>
30 #include <linux/platform_device.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/slab.h>
34 #define PCIECAR 0x000010
35 #define PCIECCTLR 0x000018
36 #define CONFIG_SEND_ENABLE (1 << 31)
37 #define TYPE0 (0 << 8)
38 #define TYPE1 (1 << 8)
39 #define PCIECDR 0x000020
40 #define PCIEMSR 0x000028
41 #define PCIEINTXR 0x000400
42 #define PCIEMSITXR 0x000840
44 /* Transfer control */
45 #define PCIETCTLR 0x02000
47 #define PCIETSTR 0x02004
48 #define DATA_LINK_ACTIVE 1
49 #define PCIEERRFR 0x02020
50 #define UNSUPPORTED_REQUEST (1 << 4)
51 #define PCIEMSIFR 0x02044
52 #define PCIEMSIALR 0x02048
54 #define PCIEMSIAUR 0x0204c
55 #define PCIEMSIIER 0x02050
57 /* root port address */
58 #define PCIEPRAR(x) (0x02080 + ((x) * 0x4))
60 /* local address reg & mask */
61 #define PCIELAR(x) (0x02200 + ((x) * 0x20))
62 #define PCIELAMR(x) (0x02208 + ((x) * 0x20))
63 #define LAM_PREFETCH (1 << 3)
64 #define LAM_64BIT (1 << 2)
65 #define LAR_ENABLE (1 << 1)
67 /* PCIe address reg & mask */
68 #define PCIEPALR(x) (0x03400 + ((x) * 0x20))
69 #define PCIEPAUR(x) (0x03404 + ((x) * 0x20))
70 #define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
71 #define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
72 #define PAR_ENABLE (1 << 31)
73 #define IO_SPACE (1 << 8)
76 #define PCICONF(x) (0x010000 + ((x) * 0x4))
77 #define PMCAP(x) (0x010040 + ((x) * 0x4))
78 #define EXPCAP(x) (0x010070 + ((x) * 0x4))
79 #define VCCAP(x) (0x010100 + ((x) * 0x4))
82 #define IDSETR1 0x011004
83 #define TLCTLR 0x011048
84 #define MACSR 0x011054
85 #define SPCHGFIN (1 << 4)
86 #define SPCHGFAIL (1 << 6)
87 #define SPCHGSUC (1 << 7)
88 #define LINK_SPEED (0xf << 16)
89 #define LINK_SPEED_2_5GTS (1 << 16)
90 #define LINK_SPEED_5_0GTS (2 << 16)
91 #define MACCTLR 0x011058
92 #define SPEED_CHANGE (1 << 24)
93 #define SCRAMBLE_DISABLE (1 << 27)
94 #define MACS2R 0x011078
95 #define MACCGSPSETR 0x011084
96 #define SPCNGRSN (1 << 31)
99 #define H1_PCIEPHYADRR 0x04000c
100 #define WRITE_CMD (1 << 16)
101 #define PHY_ACK (1 << 24)
105 #define H1_PCIEPHYDOUTR 0x040014
106 #define H1_PCIEPHYSR 0x040018
109 #define GEN2_PCIEPHYADDR 0x780
110 #define GEN2_PCIEPHYDATA 0x784
111 #define GEN2_PCIEPHYCTRL 0x78c
113 #define INT_PCI_MSI_NR 32
115 #define RCONF(x) (PCICONF(0)+(x))
116 #define RPMCAP(x) (PMCAP(0)+(x))
117 #define REXPCAP(x) (EXPCAP(0)+(x))
118 #define RVCCAP(x) (VCCAP(0)+(x))
120 #define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
121 #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
122 #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
124 #define RCAR_PCI_MAX_RESOURCES 4
125 #define MAX_NR_INBOUND_MAPS 6
128 DECLARE_BITMAP(used
, INT_PCI_MSI_NR
);
129 struct irq_domain
*domain
;
130 struct msi_controller chip
;
137 static inline struct rcar_msi
*to_rcar_msi(struct msi_controller
*chip
)
139 return container_of(chip
, struct rcar_msi
, chip
);
142 /* Structure representing the PCIe interface */
146 struct list_head resources
;
153 static void rcar_pci_write_reg(struct rcar_pcie
*pcie
, unsigned long val
,
156 writel(val
, pcie
->base
+ reg
);
159 static unsigned long rcar_pci_read_reg(struct rcar_pcie
*pcie
,
162 return readl(pcie
->base
+ reg
);
166 RCAR_PCI_ACCESS_READ
,
167 RCAR_PCI_ACCESS_WRITE
,
170 static void rcar_rmw32(struct rcar_pcie
*pcie
, int where
, u32 mask
, u32 data
)
172 int shift
= 8 * (where
& 3);
173 u32 val
= rcar_pci_read_reg(pcie
, where
& ~3);
175 val
&= ~(mask
<< shift
);
176 val
|= data
<< shift
;
177 rcar_pci_write_reg(pcie
, val
, where
& ~3);
180 static u32
rcar_read_conf(struct rcar_pcie
*pcie
, int where
)
182 int shift
= 8 * (where
& 3);
183 u32 val
= rcar_pci_read_reg(pcie
, where
& ~3);
188 /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
189 static int rcar_pcie_config_access(struct rcar_pcie
*pcie
,
190 unsigned char access_type
, struct pci_bus
*bus
,
191 unsigned int devfn
, int where
, u32
*data
)
193 int dev
, func
, reg
, index
;
195 dev
= PCI_SLOT(devfn
);
196 func
= PCI_FUNC(devfn
);
201 * While each channel has its own memory-mapped extended config
202 * space, it's generally only accessible when in endpoint mode.
203 * When in root complex mode, the controller is unable to target
204 * itself with either type 0 or type 1 accesses, and indeed, any
205 * controller initiated target transfer to its own config space
206 * result in a completer abort.
208 * Each channel effectively only supports a single device, but as
209 * the same channel <-> device access works for any PCI_SLOT()
210 * value, we cheat a bit here and bind the controller's config
211 * space to devfn 0 in order to enable self-enumeration. In this
212 * case the regular ECAR/ECDR path is sidelined and the mangled
213 * config access itself is initiated as an internal bus transaction.
215 if (pci_is_root_bus(bus
)) {
217 return PCIBIOS_DEVICE_NOT_FOUND
;
219 if (access_type
== RCAR_PCI_ACCESS_READ
) {
220 *data
= rcar_pci_read_reg(pcie
, PCICONF(index
));
222 /* Keep an eye out for changes to the root bus number */
223 if (pci_is_root_bus(bus
) && (reg
== PCI_PRIMARY_BUS
))
224 pcie
->root_bus_nr
= *data
& 0xff;
226 rcar_pci_write_reg(pcie
, *data
, PCICONF(index
));
229 return PCIBIOS_SUCCESSFUL
;
232 if (pcie
->root_bus_nr
< 0)
233 return PCIBIOS_DEVICE_NOT_FOUND
;
236 rcar_pci_write_reg(pcie
, rcar_pci_read_reg(pcie
, PCIEERRFR
), PCIEERRFR
);
238 /* Set the PIO address */
239 rcar_pci_write_reg(pcie
, PCIE_CONF_BUS(bus
->number
) |
240 PCIE_CONF_DEV(dev
) | PCIE_CONF_FUNC(func
) | reg
, PCIECAR
);
242 /* Enable the configuration access */
243 if (bus
->parent
->number
== pcie
->root_bus_nr
)
244 rcar_pci_write_reg(pcie
, CONFIG_SEND_ENABLE
| TYPE0
, PCIECCTLR
);
246 rcar_pci_write_reg(pcie
, CONFIG_SEND_ENABLE
| TYPE1
, PCIECCTLR
);
248 /* Check for errors */
249 if (rcar_pci_read_reg(pcie
, PCIEERRFR
) & UNSUPPORTED_REQUEST
)
250 return PCIBIOS_DEVICE_NOT_FOUND
;
252 /* Check for master and target aborts */
253 if (rcar_read_conf(pcie
, RCONF(PCI_STATUS
)) &
254 (PCI_STATUS_REC_MASTER_ABORT
| PCI_STATUS_REC_TARGET_ABORT
))
255 return PCIBIOS_DEVICE_NOT_FOUND
;
257 if (access_type
== RCAR_PCI_ACCESS_READ
)
258 *data
= rcar_pci_read_reg(pcie
, PCIECDR
);
260 rcar_pci_write_reg(pcie
, *data
, PCIECDR
);
262 /* Disable the configuration access */
263 rcar_pci_write_reg(pcie
, 0, PCIECCTLR
);
265 return PCIBIOS_SUCCESSFUL
;
268 static int rcar_pcie_read_conf(struct pci_bus
*bus
, unsigned int devfn
,
269 int where
, int size
, u32
*val
)
271 struct rcar_pcie
*pcie
= bus
->sysdata
;
274 ret
= rcar_pcie_config_access(pcie
, RCAR_PCI_ACCESS_READ
,
275 bus
, devfn
, where
, val
);
276 if (ret
!= PCIBIOS_SUCCESSFUL
) {
282 *val
= (*val
>> (8 * (where
& 3))) & 0xff;
284 *val
= (*val
>> (8 * (where
& 2))) & 0xffff;
286 dev_dbg(&bus
->dev
, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
287 bus
->number
, devfn
, where
, size
, (unsigned long)*val
);
292 /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
293 static int rcar_pcie_write_conf(struct pci_bus
*bus
, unsigned int devfn
,
294 int where
, int size
, u32 val
)
296 struct rcar_pcie
*pcie
= bus
->sysdata
;
300 ret
= rcar_pcie_config_access(pcie
, RCAR_PCI_ACCESS_READ
,
301 bus
, devfn
, where
, &data
);
302 if (ret
!= PCIBIOS_SUCCESSFUL
)
305 dev_dbg(&bus
->dev
, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
306 bus
->number
, devfn
, where
, size
, (unsigned long)val
);
309 shift
= 8 * (where
& 3);
310 data
&= ~(0xff << shift
);
311 data
|= ((val
& 0xff) << shift
);
312 } else if (size
== 2) {
313 shift
= 8 * (where
& 2);
314 data
&= ~(0xffff << shift
);
315 data
|= ((val
& 0xffff) << shift
);
319 ret
= rcar_pcie_config_access(pcie
, RCAR_PCI_ACCESS_WRITE
,
320 bus
, devfn
, where
, &data
);
325 static struct pci_ops rcar_pcie_ops
= {
326 .read
= rcar_pcie_read_conf
,
327 .write
= rcar_pcie_write_conf
,
330 static void rcar_pcie_setup_window(int win
, struct rcar_pcie
*pcie
,
331 struct resource
*res
)
333 /* Setup PCIe address space mappings for each resource */
334 resource_size_t size
;
335 resource_size_t res_start
;
338 rcar_pci_write_reg(pcie
, 0x00000000, PCIEPTCTLR(win
));
341 * The PAMR mask is calculated in units of 128Bytes, which
342 * keeps things pretty simple.
344 size
= resource_size(res
);
345 mask
= (roundup_pow_of_two(size
) / SZ_128
) - 1;
346 rcar_pci_write_reg(pcie
, mask
<< 7, PCIEPAMR(win
));
348 if (res
->flags
& IORESOURCE_IO
)
349 res_start
= pci_pio_to_address(res
->start
);
351 res_start
= res
->start
;
353 rcar_pci_write_reg(pcie
, upper_32_bits(res_start
), PCIEPAUR(win
));
354 rcar_pci_write_reg(pcie
, lower_32_bits(res_start
) & ~0x7F,
357 /* First resource is for IO */
359 if (res
->flags
& IORESOURCE_IO
)
362 rcar_pci_write_reg(pcie
, mask
, PCIEPTCTLR(win
));
365 static int rcar_pcie_setup(struct list_head
*resource
, struct rcar_pcie
*pci
)
367 struct resource_entry
*win
;
370 /* Setup PCI resources */
371 resource_list_for_each_entry(win
, &pci
->resources
) {
372 struct resource
*res
= win
->res
;
377 switch (resource_type(res
)) {
380 rcar_pcie_setup_window(i
, pci
, res
);
384 pci
->root_bus_nr
= res
->start
;
390 pci_add_resource(resource
, res
);
396 static void rcar_pcie_force_speedup(struct rcar_pcie
*pcie
)
398 struct device
*dev
= pcie
->dev
;
399 unsigned int timeout
= 1000;
402 if ((rcar_pci_read_reg(pcie
, MACS2R
) & LINK_SPEED
) != LINK_SPEED_5_0GTS
)
405 if (rcar_pci_read_reg(pcie
, MACCTLR
) & SPEED_CHANGE
) {
406 dev_err(dev
, "Speed change already in progress\n");
410 macsr
= rcar_pci_read_reg(pcie
, MACSR
);
411 if ((macsr
& LINK_SPEED
) == LINK_SPEED_5_0GTS
)
414 /* Set target link speed to 5.0 GT/s */
415 rcar_rmw32(pcie
, EXPCAP(12), PCI_EXP_LNKSTA_CLS
,
416 PCI_EXP_LNKSTA_CLS_5_0GB
);
418 /* Set speed change reason as intentional factor */
419 rcar_rmw32(pcie
, MACCGSPSETR
, SPCNGRSN
, 0);
421 /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
422 if (macsr
& (SPCHGFIN
| SPCHGSUC
| SPCHGFAIL
))
423 rcar_pci_write_reg(pcie
, macsr
, MACSR
);
425 /* Start link speed change */
426 rcar_rmw32(pcie
, MACCTLR
, SPEED_CHANGE
, SPEED_CHANGE
);
429 macsr
= rcar_pci_read_reg(pcie
, MACSR
);
430 if (macsr
& SPCHGFIN
) {
431 /* Clear the interrupt bits */
432 rcar_pci_write_reg(pcie
, macsr
, MACSR
);
434 if (macsr
& SPCHGFAIL
)
435 dev_err(dev
, "Speed change failed\n");
443 dev_err(dev
, "Speed change timed out\n");
446 dev_info(dev
, "Current link speed is %s GT/s\n",
447 (macsr
& LINK_SPEED
) == LINK_SPEED_5_0GTS
? "5" : "2.5");
450 static int rcar_pcie_enable(struct rcar_pcie
*pcie
)
452 struct device
*dev
= pcie
->dev
;
453 struct pci_bus
*bus
, *child
;
456 /* Try setting 5 GT/s link speed */
457 rcar_pcie_force_speedup(pcie
);
459 rcar_pcie_setup(&res
, pcie
);
461 pci_add_flags(PCI_REASSIGN_ALL_RSRC
| PCI_REASSIGN_ALL_BUS
);
463 if (IS_ENABLED(CONFIG_PCI_MSI
))
464 bus
= pci_scan_root_bus_msi(dev
, pcie
->root_bus_nr
,
465 &rcar_pcie_ops
, pcie
, &res
, &pcie
->msi
.chip
);
467 bus
= pci_scan_root_bus(dev
, pcie
->root_bus_nr
,
468 &rcar_pcie_ops
, pcie
, &res
);
471 dev_err(dev
, "Scanning rootbus failed");
475 pci_fixup_irqs(pci_common_swizzle
, of_irq_parse_and_map_pci
);
477 pci_bus_size_bridges(bus
);
478 pci_bus_assign_resources(bus
);
480 list_for_each_entry(child
, &bus
->children
, node
)
481 pcie_bus_configure_settings(child
);
483 pci_bus_add_devices(bus
);
488 static int phy_wait_for_ack(struct rcar_pcie
*pcie
)
490 struct device
*dev
= pcie
->dev
;
491 unsigned int timeout
= 100;
494 if (rcar_pci_read_reg(pcie
, H1_PCIEPHYADRR
) & PHY_ACK
)
500 dev_err(dev
, "Access to PCIe phy timed out\n");
505 static void phy_write_reg(struct rcar_pcie
*pcie
,
506 unsigned int rate
, unsigned int addr
,
507 unsigned int lane
, unsigned int data
)
509 unsigned long phyaddr
;
511 phyaddr
= WRITE_CMD
|
512 ((rate
& 1) << RATE_POS
) |
513 ((lane
& 0xf) << LANE_POS
) |
514 ((addr
& 0xff) << ADR_POS
);
517 rcar_pci_write_reg(pcie
, data
, H1_PCIEPHYDOUTR
);
518 rcar_pci_write_reg(pcie
, phyaddr
, H1_PCIEPHYADRR
);
520 /* Ignore errors as they will be dealt with if the data link is down */
521 phy_wait_for_ack(pcie
);
524 rcar_pci_write_reg(pcie
, 0, H1_PCIEPHYDOUTR
);
525 rcar_pci_write_reg(pcie
, 0, H1_PCIEPHYADRR
);
527 /* Ignore errors as they will be dealt with if the data link is down */
528 phy_wait_for_ack(pcie
);
531 static int rcar_pcie_wait_for_dl(struct rcar_pcie
*pcie
)
533 unsigned int timeout
= 10;
536 if ((rcar_pci_read_reg(pcie
, PCIETSTR
) & DATA_LINK_ACTIVE
))
545 static int rcar_pcie_hw_init(struct rcar_pcie
*pcie
)
549 /* Begin initialization */
550 rcar_pci_write_reg(pcie
, 0, PCIETCTLR
);
553 rcar_pci_write_reg(pcie
, 1, PCIEMSR
);
556 * Initial header for port config space is type 1, set the device
557 * class to match. Hardware takes care of propagating the IDSETR
558 * settings, so there is no need to bother with a quirk.
560 rcar_pci_write_reg(pcie
, PCI_CLASS_BRIDGE_PCI
<< 16, IDSETR1
);
563 * Setup Secondary Bus Number & Subordinate Bus Number, even though
564 * they aren't used, to avoid bridge being detected as broken.
566 rcar_rmw32(pcie
, RCONF(PCI_SECONDARY_BUS
), 0xff, 1);
567 rcar_rmw32(pcie
, RCONF(PCI_SUBORDINATE_BUS
), 0xff, 1);
569 /* Initialize default capabilities. */
570 rcar_rmw32(pcie
, REXPCAP(0), 0xff, PCI_CAP_ID_EXP
);
571 rcar_rmw32(pcie
, REXPCAP(PCI_EXP_FLAGS
),
572 PCI_EXP_FLAGS_TYPE
, PCI_EXP_TYPE_ROOT_PORT
<< 4);
573 rcar_rmw32(pcie
, RCONF(PCI_HEADER_TYPE
), 0x7f,
574 PCI_HEADER_TYPE_BRIDGE
);
576 /* Enable data link layer active state reporting */
577 rcar_rmw32(pcie
, REXPCAP(PCI_EXP_LNKCAP
), PCI_EXP_LNKCAP_DLLLARC
,
578 PCI_EXP_LNKCAP_DLLLARC
);
580 /* Write out the physical slot number = 0 */
581 rcar_rmw32(pcie
, REXPCAP(PCI_EXP_SLTCAP
), PCI_EXP_SLTCAP_PSN
, 0);
583 /* Set the completion timer timeout to the maximum 50ms. */
584 rcar_rmw32(pcie
, TLCTLR
+ 1, 0x3f, 50);
586 /* Terminate list of capabilities (Next Capability Offset=0) */
587 rcar_rmw32(pcie
, RVCCAP(0), 0xfff00000, 0);
590 if (IS_ENABLED(CONFIG_PCI_MSI
))
591 rcar_pci_write_reg(pcie
, 0x801f0000, PCIEMSITXR
);
593 /* Finish initialization - establish a PCI Express link */
594 rcar_pci_write_reg(pcie
, CFINIT
, PCIETCTLR
);
596 /* This will timeout if we don't have a link. */
597 err
= rcar_pcie_wait_for_dl(pcie
);
601 /* Enable INTx interrupts */
602 rcar_rmw32(pcie
, PCIEINTXR
, 0, 0xF << 8);
609 static int rcar_pcie_hw_init_h1(struct rcar_pcie
*pcie
)
611 unsigned int timeout
= 10;
613 /* Initialize the phy */
614 phy_write_reg(pcie
, 0, 0x42, 0x1, 0x0EC34191);
615 phy_write_reg(pcie
, 1, 0x42, 0x1, 0x0EC34180);
616 phy_write_reg(pcie
, 0, 0x43, 0x1, 0x00210188);
617 phy_write_reg(pcie
, 1, 0x43, 0x1, 0x00210188);
618 phy_write_reg(pcie
, 0, 0x44, 0x1, 0x015C0014);
619 phy_write_reg(pcie
, 1, 0x44, 0x1, 0x015C0014);
620 phy_write_reg(pcie
, 1, 0x4C, 0x1, 0x786174A0);
621 phy_write_reg(pcie
, 1, 0x4D, 0x1, 0x048000BB);
622 phy_write_reg(pcie
, 0, 0x51, 0x1, 0x079EC062);
623 phy_write_reg(pcie
, 0, 0x52, 0x1, 0x20000000);
624 phy_write_reg(pcie
, 1, 0x52, 0x1, 0x20000000);
625 phy_write_reg(pcie
, 1, 0x56, 0x1, 0x00003806);
627 phy_write_reg(pcie
, 0, 0x60, 0x1, 0x004B03A5);
628 phy_write_reg(pcie
, 0, 0x64, 0x1, 0x3F0F1F0F);
629 phy_write_reg(pcie
, 0, 0x66, 0x1, 0x00008000);
632 if (rcar_pci_read_reg(pcie
, H1_PCIEPHYSR
))
633 return rcar_pcie_hw_init(pcie
);
641 static int rcar_pcie_hw_init_gen2(struct rcar_pcie
*pcie
)
644 * These settings come from the R-Car Series, 2nd Generation User's
645 * Manual, section 50.3.1 (2) Initialization of the physical layer.
647 rcar_pci_write_reg(pcie
, 0x000f0030, GEN2_PCIEPHYADDR
);
648 rcar_pci_write_reg(pcie
, 0x00381203, GEN2_PCIEPHYDATA
);
649 rcar_pci_write_reg(pcie
, 0x00000001, GEN2_PCIEPHYCTRL
);
650 rcar_pci_write_reg(pcie
, 0x00000006, GEN2_PCIEPHYCTRL
);
652 rcar_pci_write_reg(pcie
, 0x000f0054, GEN2_PCIEPHYADDR
);
653 /* The following value is for DC connection, no termination resistor */
654 rcar_pci_write_reg(pcie
, 0x13802007, GEN2_PCIEPHYDATA
);
655 rcar_pci_write_reg(pcie
, 0x00000001, GEN2_PCIEPHYCTRL
);
656 rcar_pci_write_reg(pcie
, 0x00000006, GEN2_PCIEPHYCTRL
);
658 return rcar_pcie_hw_init(pcie
);
661 static int rcar_msi_alloc(struct rcar_msi
*chip
)
665 mutex_lock(&chip
->lock
);
667 msi
= find_first_zero_bit(chip
->used
, INT_PCI_MSI_NR
);
668 if (msi
< INT_PCI_MSI_NR
)
669 set_bit(msi
, chip
->used
);
673 mutex_unlock(&chip
->lock
);
678 static int rcar_msi_alloc_region(struct rcar_msi
*chip
, int no_irqs
)
682 mutex_lock(&chip
->lock
);
683 msi
= bitmap_find_free_region(chip
->used
, INT_PCI_MSI_NR
,
684 order_base_2(no_irqs
));
685 mutex_unlock(&chip
->lock
);
690 static void rcar_msi_free(struct rcar_msi
*chip
, unsigned long irq
)
692 mutex_lock(&chip
->lock
);
693 clear_bit(irq
, chip
->used
);
694 mutex_unlock(&chip
->lock
);
697 static irqreturn_t
rcar_pcie_msi_irq(int irq
, void *data
)
699 struct rcar_pcie
*pcie
= data
;
700 struct rcar_msi
*msi
= &pcie
->msi
;
701 struct device
*dev
= pcie
->dev
;
704 reg
= rcar_pci_read_reg(pcie
, PCIEMSIFR
);
706 /* MSI & INTx share an interrupt - we only handle MSI here */
711 unsigned int index
= find_first_bit(®
, 32);
714 /* clear the interrupt */
715 rcar_pci_write_reg(pcie
, 1 << index
, PCIEMSIFR
);
717 irq
= irq_find_mapping(msi
->domain
, index
);
719 if (test_bit(index
, msi
->used
))
720 generic_handle_irq(irq
);
722 dev_info(dev
, "unhandled MSI\n");
724 /* Unknown MSI, just clear it */
725 dev_dbg(dev
, "unexpected MSI\n");
728 /* see if there's any more pending in this vector */
729 reg
= rcar_pci_read_reg(pcie
, PCIEMSIFR
);
735 static int rcar_msi_setup_irq(struct msi_controller
*chip
, struct pci_dev
*pdev
,
736 struct msi_desc
*desc
)
738 struct rcar_msi
*msi
= to_rcar_msi(chip
);
739 struct rcar_pcie
*pcie
= container_of(chip
, struct rcar_pcie
, msi
.chip
);
744 hwirq
= rcar_msi_alloc(msi
);
748 irq
= irq_find_mapping(msi
->domain
, hwirq
);
750 rcar_msi_free(msi
, hwirq
);
754 irq_set_msi_desc(irq
, desc
);
756 msg
.address_lo
= rcar_pci_read_reg(pcie
, PCIEMSIALR
) & ~MSIFE
;
757 msg
.address_hi
= rcar_pci_read_reg(pcie
, PCIEMSIAUR
);
760 pci_write_msi_msg(irq
, &msg
);
765 static int rcar_msi_setup_irqs(struct msi_controller
*chip
,
766 struct pci_dev
*pdev
, int nvec
, int type
)
768 struct rcar_pcie
*pcie
= container_of(chip
, struct rcar_pcie
, msi
.chip
);
769 struct rcar_msi
*msi
= to_rcar_msi(chip
);
770 struct msi_desc
*desc
;
776 /* MSI-X interrupts are not supported */
777 if (type
== PCI_CAP_ID_MSIX
)
780 WARN_ON(!list_is_singular(&pdev
->dev
.msi_list
));
781 desc
= list_entry(pdev
->dev
.msi_list
.next
, struct msi_desc
, list
);
783 hwirq
= rcar_msi_alloc_region(msi
, nvec
);
787 irq
= irq_find_mapping(msi
->domain
, hwirq
);
791 for (i
= 0; i
< nvec
; i
++) {
793 * irq_create_mapping() called from rcar_pcie_probe() pre-
794 * allocates descs, so there is no need to allocate descs here.
795 * We can therefore assume that if irq_find_mapping() above
796 * returns non-zero, then the descs are also successfully
799 if (irq_set_msi_desc_off(irq
, i
, desc
)) {
805 desc
->nvec_used
= nvec
;
806 desc
->msi_attrib
.multiple
= order_base_2(nvec
);
808 msg
.address_lo
= rcar_pci_read_reg(pcie
, PCIEMSIALR
) & ~MSIFE
;
809 msg
.address_hi
= rcar_pci_read_reg(pcie
, PCIEMSIAUR
);
812 pci_write_msi_msg(irq
, &msg
);
817 static void rcar_msi_teardown_irq(struct msi_controller
*chip
, unsigned int irq
)
819 struct rcar_msi
*msi
= to_rcar_msi(chip
);
820 struct irq_data
*d
= irq_get_irq_data(irq
);
822 rcar_msi_free(msi
, d
->hwirq
);
825 static struct irq_chip rcar_msi_irq_chip
= {
826 .name
= "R-Car PCIe MSI",
827 .irq_enable
= pci_msi_unmask_irq
,
828 .irq_disable
= pci_msi_mask_irq
,
829 .irq_mask
= pci_msi_mask_irq
,
830 .irq_unmask
= pci_msi_unmask_irq
,
833 static int rcar_msi_map(struct irq_domain
*domain
, unsigned int irq
,
834 irq_hw_number_t hwirq
)
836 irq_set_chip_and_handler(irq
, &rcar_msi_irq_chip
, handle_simple_irq
);
837 irq_set_chip_data(irq
, domain
->host_data
);
842 static const struct irq_domain_ops msi_domain_ops
= {
846 static int rcar_pcie_enable_msi(struct rcar_pcie
*pcie
)
848 struct device
*dev
= pcie
->dev
;
849 struct rcar_msi
*msi
= &pcie
->msi
;
853 mutex_init(&msi
->lock
);
856 msi
->chip
.setup_irq
= rcar_msi_setup_irq
;
857 msi
->chip
.setup_irqs
= rcar_msi_setup_irqs
;
858 msi
->chip
.teardown_irq
= rcar_msi_teardown_irq
;
860 msi
->domain
= irq_domain_add_linear(dev
->of_node
, INT_PCI_MSI_NR
,
861 &msi_domain_ops
, &msi
->chip
);
863 dev_err(dev
, "failed to create IRQ domain\n");
867 for (i
= 0; i
< INT_PCI_MSI_NR
; i
++)
868 irq_create_mapping(msi
->domain
, i
);
870 /* Two irqs are for MSI, but they are also used for non-MSI irqs */
871 err
= devm_request_irq(dev
, msi
->irq1
, rcar_pcie_msi_irq
,
872 IRQF_SHARED
| IRQF_NO_THREAD
,
873 rcar_msi_irq_chip
.name
, pcie
);
875 dev_err(dev
, "failed to request IRQ: %d\n", err
);
879 err
= devm_request_irq(dev
, msi
->irq2
, rcar_pcie_msi_irq
,
880 IRQF_SHARED
| IRQF_NO_THREAD
,
881 rcar_msi_irq_chip
.name
, pcie
);
883 dev_err(dev
, "failed to request IRQ: %d\n", err
);
887 /* setup MSI data target */
888 msi
->pages
= __get_free_pages(GFP_KERNEL
, 0);
889 base
= virt_to_phys((void *)msi
->pages
);
891 rcar_pci_write_reg(pcie
, base
| MSIFE
, PCIEMSIALR
);
892 rcar_pci_write_reg(pcie
, 0, PCIEMSIAUR
);
894 /* enable all MSI interrupts */
895 rcar_pci_write_reg(pcie
, 0xffffffff, PCIEMSIIER
);
900 irq_domain_remove(msi
->domain
);
904 static int rcar_pcie_get_resources(struct rcar_pcie
*pcie
)
906 struct device
*dev
= pcie
->dev
;
910 err
= of_address_to_resource(dev
->of_node
, 0, &res
);
914 pcie
->base
= devm_ioremap_resource(dev
, &res
);
915 if (IS_ERR(pcie
->base
))
916 return PTR_ERR(pcie
->base
);
918 pcie
->clk
= devm_clk_get(dev
, "pcie");
919 if (IS_ERR(pcie
->clk
)) {
920 dev_err(dev
, "cannot get platform clock\n");
921 return PTR_ERR(pcie
->clk
);
923 err
= clk_prepare_enable(pcie
->clk
);
927 pcie
->bus_clk
= devm_clk_get(dev
, "pcie_bus");
928 if (IS_ERR(pcie
->bus_clk
)) {
929 dev_err(dev
, "cannot get pcie bus clock\n");
930 err
= PTR_ERR(pcie
->bus_clk
);
933 err
= clk_prepare_enable(pcie
->bus_clk
);
937 i
= irq_of_parse_and_map(dev
->of_node
, 0);
939 dev_err(dev
, "cannot get platform resources for msi interrupt\n");
945 i
= irq_of_parse_and_map(dev
->of_node
, 1);
947 dev_err(dev
, "cannot get platform resources for msi interrupt\n");
956 clk_disable_unprepare(pcie
->bus_clk
);
958 clk_disable_unprepare(pcie
->clk
);
963 static int rcar_pcie_inbound_ranges(struct rcar_pcie
*pcie
,
964 struct of_pci_range
*range
,
967 u64 restype
= range
->flags
;
968 u64 cpu_addr
= range
->cpu_addr
;
969 u64 cpu_end
= range
->cpu_addr
+ range
->size
;
970 u64 pci_addr
= range
->pci_addr
;
971 u32 flags
= LAM_64BIT
| LAR_ENABLE
;
976 if (restype
& IORESOURCE_PREFETCH
)
977 flags
|= LAM_PREFETCH
;
980 * If the size of the range is larger than the alignment of the start
981 * address, we have to use multiple entries to perform the mapping.
984 unsigned long nr_zeros
= __ffs64(cpu_addr
);
985 u64 alignment
= 1ULL << nr_zeros
;
987 size
= min(range
->size
, alignment
);
991 /* Hardware supports max 4GiB inbound region */
992 size
= min(size
, 1ULL << 32);
994 mask
= roundup_pow_of_two(size
) - 1;
997 while (cpu_addr
< cpu_end
) {
999 * Set up 64-bit inbound regions as the range parser doesn't
1000 * distinguish between 32 and 64-bit types.
1002 rcar_pci_write_reg(pcie
, lower_32_bits(pci_addr
),
1004 rcar_pci_write_reg(pcie
, lower_32_bits(cpu_addr
), PCIELAR(idx
));
1005 rcar_pci_write_reg(pcie
, lower_32_bits(mask
) | flags
,
1008 rcar_pci_write_reg(pcie
, upper_32_bits(pci_addr
),
1010 rcar_pci_write_reg(pcie
, upper_32_bits(cpu_addr
),
1012 rcar_pci_write_reg(pcie
, 0, PCIELAMR(idx
+ 1));
1018 if (idx
> MAX_NR_INBOUND_MAPS
) {
1019 dev_err(pcie
->dev
, "Failed to map inbound regions!\n");
1028 static int pci_dma_range_parser_init(struct of_pci_range_parser
*parser
,
1029 struct device_node
*node
)
1031 const int na
= 3, ns
= 2;
1034 parser
->node
= node
;
1035 parser
->pna
= of_n_addr_cells(node
);
1036 parser
->np
= parser
->pna
+ na
+ ns
;
1038 parser
->range
= of_get_property(node
, "dma-ranges", &rlen
);
1042 parser
->end
= parser
->range
+ rlen
/ sizeof(__be32
);
1046 static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie
*pcie
,
1047 struct device_node
*np
)
1049 struct of_pci_range range
;
1050 struct of_pci_range_parser parser
;
1054 if (pci_dma_range_parser_init(&parser
, np
))
1057 /* Get the dma-ranges from DT */
1058 for_each_of_pci_range(&parser
, &range
) {
1059 u64 end
= range
.cpu_addr
+ range
.size
- 1;
1061 dev_dbg(pcie
->dev
, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
1062 range
.flags
, range
.cpu_addr
, end
, range
.pci_addr
);
1064 err
= rcar_pcie_inbound_ranges(pcie
, &range
, &index
);
1072 static const struct of_device_id rcar_pcie_of_match
[] = {
1073 { .compatible
= "renesas,pcie-r8a7779", .data
= rcar_pcie_hw_init_h1
},
1074 { .compatible
= "renesas,pcie-r8a7790",
1075 .data
= rcar_pcie_hw_init_gen2
},
1076 { .compatible
= "renesas,pcie-r8a7791",
1077 .data
= rcar_pcie_hw_init_gen2
},
1078 { .compatible
= "renesas,pcie-rcar-gen2",
1079 .data
= rcar_pcie_hw_init_gen2
},
1080 { .compatible
= "renesas,pcie-r8a7795", .data
= rcar_pcie_hw_init
},
1081 { .compatible
= "renesas,pcie-rcar-gen3", .data
= rcar_pcie_hw_init
},
1085 static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie
*pci
)
1088 struct device
*dev
= pci
->dev
;
1089 struct device_node
*np
= dev
->of_node
;
1090 resource_size_t iobase
;
1091 struct resource_entry
*win
, *tmp
;
1093 err
= of_pci_get_host_bridge_resources(np
, 0, 0xff, &pci
->resources
,
1098 err
= devm_request_pci_bus_resources(dev
, &pci
->resources
);
1100 goto out_release_res
;
1102 resource_list_for_each_entry_safe(win
, tmp
, &pci
->resources
) {
1103 struct resource
*res
= win
->res
;
1105 if (resource_type(res
) == IORESOURCE_IO
) {
1106 err
= pci_remap_iospace(res
, iobase
);
1108 dev_warn(dev
, "error %d: failed to map resource %pR\n",
1111 resource_list_destroy_entry(win
);
1119 pci_free_resource_list(&pci
->resources
);
1123 static int rcar_pcie_probe(struct platform_device
*pdev
)
1125 struct device
*dev
= &pdev
->dev
;
1126 struct rcar_pcie
*pcie
;
1128 const struct of_device_id
*of_id
;
1130 int (*hw_init_fn
)(struct rcar_pcie
*);
1132 pcie
= devm_kzalloc(dev
, sizeof(*pcie
), GFP_KERNEL
);
1138 INIT_LIST_HEAD(&pcie
->resources
);
1140 rcar_pcie_parse_request_of_pci_ranges(pcie
);
1142 err
= rcar_pcie_get_resources(pcie
);
1144 dev_err(dev
, "failed to request resources: %d\n", err
);
1148 err
= rcar_pcie_parse_map_dma_ranges(pcie
, dev
->of_node
);
1152 of_id
= of_match_device(rcar_pcie_of_match
, dev
);
1153 if (!of_id
|| !of_id
->data
)
1155 hw_init_fn
= of_id
->data
;
1157 pm_runtime_enable(dev
);
1158 err
= pm_runtime_get_sync(dev
);
1160 dev_err(dev
, "pm_runtime_get_sync failed\n");
1161 goto err_pm_disable
;
1164 /* Failure to get a link might just be that no cards are inserted */
1165 err
= hw_init_fn(pcie
);
1167 dev_info(dev
, "PCIe link down\n");
1172 data
= rcar_pci_read_reg(pcie
, MACSR
);
1173 dev_info(dev
, "PCIe x%d: link up\n", (data
>> 20) & 0x3f);
1175 if (IS_ENABLED(CONFIG_PCI_MSI
)) {
1176 err
= rcar_pcie_enable_msi(pcie
);
1179 "failed to enable MSI support: %d\n",
1185 err
= rcar_pcie_enable(pcie
);
1192 pm_runtime_put(dev
);
1195 pm_runtime_disable(dev
);
1199 static struct platform_driver rcar_pcie_driver
= {
1201 .name
= "rcar-pcie",
1202 .of_match_table
= rcar_pcie_of_match
,
1203 .suppress_bind_attrs
= true,
1205 .probe
= rcar_pcie_probe
,
1207 builtin_platform_driver(rcar_pcie_driver
);