1 // SPDX-License-Identifier: GPL-2.0
3 * PCIe host controller driver for Mobiveil PCIe Host controller
5 * Copyright (c) 2018 Mobiveil Inc.
6 * Copyright 2019-2020 NXP
8 * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
9 * Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/irqchip/chained_irq.h>
16 #include <linux/irqdomain.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/msi.h>
20 #include <linux/of_address.h>
21 #include <linux/of_irq.h>
22 #include <linux/of_platform.h>
23 #include <linux/of_pci.h>
24 #include <linux/pci.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
28 #include "pcie-mobiveil.h"
30 static bool mobiveil_pcie_valid_device(struct pci_bus
*bus
, unsigned int devfn
)
32 /* Only one device down on each root port */
33 if (pci_is_root_bus(bus
) && (devfn
> 0))
37 * Do not read more than one device on the bus directly
40 if ((bus
->primary
== to_pci_host_bridge(bus
->bridge
)->busnr
) && (PCI_SLOT(devfn
) > 0))
47 * mobiveil_pcie_map_bus - routine to get the configuration base of either
48 * root port or endpoint
50 static void __iomem
*mobiveil_pcie_map_bus(struct pci_bus
*bus
,
51 unsigned int devfn
, int where
)
53 struct mobiveil_pcie
*pcie
= bus
->sysdata
;
54 struct mobiveil_root_port
*rp
= &pcie
->rp
;
57 if (!mobiveil_pcie_valid_device(bus
, devfn
))
60 /* RC config access */
61 if (pci_is_root_bus(bus
))
62 return pcie
->csr_axi_slave_base
+ where
;
65 * EP config access (in Config/APIO space)
66 * Program PEX Address base (31..16 bits) with appropriate value
67 * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
68 * Relies on pci_lock serialization
70 value
= bus
->number
<< PAB_BUS_SHIFT
|
71 PCI_SLOT(devfn
) << PAB_DEVICE_SHIFT
|
72 PCI_FUNC(devfn
) << PAB_FUNCTION_SHIFT
;
74 mobiveil_csr_writel(pcie
, value
, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0
));
76 return rp
->config_axi_slave_base
+ where
;
79 static struct pci_ops mobiveil_pcie_ops
= {
80 .map_bus
= mobiveil_pcie_map_bus
,
81 .read
= pci_generic_config_read
,
82 .write
= pci_generic_config_write
,
85 static void mobiveil_pcie_isr(struct irq_desc
*desc
)
87 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
88 struct mobiveil_pcie
*pcie
= irq_desc_get_handler_data(desc
);
89 struct device
*dev
= &pcie
->pdev
->dev
;
90 struct mobiveil_root_port
*rp
= &pcie
->rp
;
91 struct mobiveil_msi
*msi
= &rp
->msi
;
92 u32 msi_data
, msi_addr_lo
, msi_addr_hi
;
93 u32 intr_status
, msi_status
;
94 unsigned long shifted_status
;
95 u32 bit
, virq
, val
, mask
;
98 * The core provides a single interrupt for both INTx/MSI messages.
99 * So we'll read both INTx and MSI status
102 chained_irq_enter(chip
, desc
);
104 /* read INTx status */
105 val
= mobiveil_csr_readl(pcie
, PAB_INTP_AMBA_MISC_STAT
);
106 mask
= mobiveil_csr_readl(pcie
, PAB_INTP_AMBA_MISC_ENB
);
107 intr_status
= val
& mask
;
110 if (intr_status
& PAB_INTP_INTX_MASK
) {
111 shifted_status
= mobiveil_csr_readl(pcie
,
112 PAB_INTP_AMBA_MISC_STAT
);
113 shifted_status
&= PAB_INTP_INTX_MASK
;
114 shifted_status
>>= PAB_INTX_START
;
116 for_each_set_bit(bit
, &shifted_status
, PCI_NUM_INTX
) {
117 virq
= irq_find_mapping(rp
->intx_domain
,
120 generic_handle_irq(virq
);
122 dev_err_ratelimited(dev
, "unexpected IRQ, INT%d\n",
125 /* clear interrupt handled */
126 mobiveil_csr_writel(pcie
,
127 1 << (PAB_INTX_START
+ bit
),
128 PAB_INTP_AMBA_MISC_STAT
);
131 shifted_status
= mobiveil_csr_readl(pcie
,
132 PAB_INTP_AMBA_MISC_STAT
);
133 shifted_status
&= PAB_INTP_INTX_MASK
;
134 shifted_status
>>= PAB_INTX_START
;
135 } while (shifted_status
!= 0);
138 /* read extra MSI status register */
139 msi_status
= readl_relaxed(pcie
->apb_csr_base
+ MSI_STATUS_OFFSET
);
141 /* handle MSI interrupts */
142 while (msi_status
& 1) {
143 msi_data
= readl_relaxed(pcie
->apb_csr_base
+ MSI_DATA_OFFSET
);
146 * MSI_STATUS_OFFSET register gets updated to zero
147 * once we pop not only the MSI data but also address
148 * from MSI hardware FIFO. So keeping these following
151 msi_addr_lo
= readl_relaxed(pcie
->apb_csr_base
+
153 msi_addr_hi
= readl_relaxed(pcie
->apb_csr_base
+
155 dev_dbg(dev
, "MSI registers, data: %08x, addr: %08x:%08x\n",
156 msi_data
, msi_addr_hi
, msi_addr_lo
);
158 virq
= irq_find_mapping(msi
->dev_domain
, msi_data
);
160 generic_handle_irq(virq
);
162 msi_status
= readl_relaxed(pcie
->apb_csr_base
+
166 /* Clear the interrupt status */
167 mobiveil_csr_writel(pcie
, intr_status
, PAB_INTP_AMBA_MISC_STAT
);
168 chained_irq_exit(chip
, desc
);
171 static int mobiveil_pcie_parse_dt(struct mobiveil_pcie
*pcie
)
173 struct device
*dev
= &pcie
->pdev
->dev
;
174 struct platform_device
*pdev
= pcie
->pdev
;
175 struct device_node
*node
= dev
->of_node
;
176 struct mobiveil_root_port
*rp
= &pcie
->rp
;
177 struct resource
*res
;
179 /* map config resource */
180 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
182 rp
->config_axi_slave_base
= devm_pci_remap_cfg_resource(dev
, res
);
183 if (IS_ERR(rp
->config_axi_slave_base
))
184 return PTR_ERR(rp
->config_axi_slave_base
);
187 /* map csr resource */
188 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
190 pcie
->csr_axi_slave_base
= devm_pci_remap_cfg_resource(dev
, res
);
191 if (IS_ERR(pcie
->csr_axi_slave_base
))
192 return PTR_ERR(pcie
->csr_axi_slave_base
);
193 pcie
->pcie_reg_base
= res
->start
;
195 /* read the number of windows requested */
196 if (of_property_read_u32(node
, "apio-wins", &pcie
->apio_wins
))
197 pcie
->apio_wins
= MAX_PIO_WINDOWS
;
199 if (of_property_read_u32(node
, "ppio-wins", &pcie
->ppio_wins
))
200 pcie
->ppio_wins
= MAX_PIO_WINDOWS
;
205 static void mobiveil_pcie_enable_msi(struct mobiveil_pcie
*pcie
)
207 phys_addr_t msg_addr
= pcie
->pcie_reg_base
;
208 struct mobiveil_msi
*msi
= &pcie
->rp
.msi
;
210 msi
->num_of_vectors
= PCI_NUM_MSI
;
211 msi
->msi_pages_phys
= (phys_addr_t
)msg_addr
;
213 writel_relaxed(lower_32_bits(msg_addr
),
214 pcie
->apb_csr_base
+ MSI_BASE_LO_OFFSET
);
215 writel_relaxed(upper_32_bits(msg_addr
),
216 pcie
->apb_csr_base
+ MSI_BASE_HI_OFFSET
);
217 writel_relaxed(4096, pcie
->apb_csr_base
+ MSI_SIZE_OFFSET
);
218 writel_relaxed(1, pcie
->apb_csr_base
+ MSI_ENABLE_OFFSET
);
221 int mobiveil_host_init(struct mobiveil_pcie
*pcie
, bool reinit
)
223 struct mobiveil_root_port
*rp
= &pcie
->rp
;
224 struct pci_host_bridge
*bridge
= rp
->bridge
;
225 u32 value
, pab_ctrl
, type
;
226 struct resource_entry
*win
;
228 pcie
->ib_wins_configured
= 0;
229 pcie
->ob_wins_configured
= 0;
232 /* setup bus numbers */
233 value
= mobiveil_csr_readl(pcie
, PCI_PRIMARY_BUS
);
236 mobiveil_csr_writel(pcie
, value
, PCI_PRIMARY_BUS
);
240 * program Bus Master Enable Bit in Command Register in PAB Config
243 value
= mobiveil_csr_readl(pcie
, PCI_COMMAND
);
244 value
|= PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
;
245 mobiveil_csr_writel(pcie
, value
, PCI_COMMAND
);
248 * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
251 pab_ctrl
= mobiveil_csr_readl(pcie
, PAB_CTRL
);
252 pab_ctrl
|= (1 << AMBA_PIO_ENABLE_SHIFT
) | (1 << PEX_PIO_ENABLE_SHIFT
);
253 mobiveil_csr_writel(pcie
, pab_ctrl
, PAB_CTRL
);
256 * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
257 * PAB_AXI_PIO_CTRL Register
259 value
= mobiveil_csr_readl(pcie
, PAB_AXI_PIO_CTRL
);
260 value
|= APIO_EN_MASK
;
261 mobiveil_csr_writel(pcie
, value
, PAB_AXI_PIO_CTRL
);
263 /* Enable PCIe PIO master */
264 value
= mobiveil_csr_readl(pcie
, PAB_PEX_PIO_CTRL
);
265 value
|= 1 << PIO_ENABLE_SHIFT
;
266 mobiveil_csr_writel(pcie
, value
, PAB_PEX_PIO_CTRL
);
269 * we'll program one outbound window for config reads and
270 * another default inbound window for all the upstream traffic
271 * rest of the outbound windows will be configured according to
272 * the "ranges" field defined in device tree
275 /* config outbound translation window */
276 program_ob_windows(pcie
, WIN_NUM_0
, rp
->ob_io_res
->start
, 0,
277 CFG_WINDOW_TYPE
, resource_size(rp
->ob_io_res
));
279 /* memory inbound translation window */
280 program_ib_windows(pcie
, WIN_NUM_0
, 0, 0, MEM_WINDOW_TYPE
, IB_WIN_SIZE
);
282 /* Get the I/O and memory ranges from DT */
283 resource_list_for_each_entry(win
, &bridge
->windows
) {
284 if (resource_type(win
->res
) == IORESOURCE_MEM
)
285 type
= MEM_WINDOW_TYPE
;
286 else if (resource_type(win
->res
) == IORESOURCE_IO
)
287 type
= IO_WINDOW_TYPE
;
291 /* configure outbound translation window */
292 program_ob_windows(pcie
, pcie
->ob_wins_configured
,
294 win
->res
->start
- win
->offset
,
295 type
, resource_size(win
->res
));
298 /* fixup for PCIe class register */
299 value
= mobiveil_csr_readl(pcie
, PAB_INTP_AXI_PIO_CLASS
);
301 value
|= (PCI_CLASS_BRIDGE_PCI
<< 16);
302 mobiveil_csr_writel(pcie
, value
, PAB_INTP_AXI_PIO_CLASS
);
307 static void mobiveil_mask_intx_irq(struct irq_data
*data
)
309 struct mobiveil_pcie
*pcie
= irq_data_get_irq_chip_data(data
);
310 struct mobiveil_root_port
*rp
;
312 u32 mask
, shifted_val
;
315 mask
= 1 << ((data
->hwirq
+ PAB_INTX_START
) - 1);
316 raw_spin_lock_irqsave(&rp
->intx_mask_lock
, flags
);
317 shifted_val
= mobiveil_csr_readl(pcie
, PAB_INTP_AMBA_MISC_ENB
);
318 shifted_val
&= ~mask
;
319 mobiveil_csr_writel(pcie
, shifted_val
, PAB_INTP_AMBA_MISC_ENB
);
320 raw_spin_unlock_irqrestore(&rp
->intx_mask_lock
, flags
);
323 static void mobiveil_unmask_intx_irq(struct irq_data
*data
)
325 struct mobiveil_pcie
*pcie
= irq_data_get_irq_chip_data(data
);
326 struct mobiveil_root_port
*rp
;
328 u32 shifted_val
, mask
;
331 mask
= 1 << ((data
->hwirq
+ PAB_INTX_START
) - 1);
332 raw_spin_lock_irqsave(&rp
->intx_mask_lock
, flags
);
333 shifted_val
= mobiveil_csr_readl(pcie
, PAB_INTP_AMBA_MISC_ENB
);
335 mobiveil_csr_writel(pcie
, shifted_val
, PAB_INTP_AMBA_MISC_ENB
);
336 raw_spin_unlock_irqrestore(&rp
->intx_mask_lock
, flags
);
339 static struct irq_chip intx_irq_chip
= {
340 .name
= "mobiveil_pcie:intx",
341 .irq_enable
= mobiveil_unmask_intx_irq
,
342 .irq_disable
= mobiveil_mask_intx_irq
,
343 .irq_mask
= mobiveil_mask_intx_irq
,
344 .irq_unmask
= mobiveil_unmask_intx_irq
,
347 /* routine to setup the INTx related data */
348 static int mobiveil_pcie_intx_map(struct irq_domain
*domain
, unsigned int irq
,
349 irq_hw_number_t hwirq
)
351 irq_set_chip_and_handler(irq
, &intx_irq_chip
, handle_level_irq
);
352 irq_set_chip_data(irq
, domain
->host_data
);
357 /* INTx domain operations structure */
358 static const struct irq_domain_ops intx_domain_ops
= {
359 .map
= mobiveil_pcie_intx_map
,
362 static struct irq_chip mobiveil_msi_irq_chip
= {
363 .name
= "Mobiveil PCIe MSI",
364 .irq_mask
= pci_msi_mask_irq
,
365 .irq_unmask
= pci_msi_unmask_irq
,
368 static struct msi_domain_info mobiveil_msi_domain_info
= {
369 .flags
= (MSI_FLAG_USE_DEF_DOM_OPS
| MSI_FLAG_USE_DEF_CHIP_OPS
|
371 .chip
= &mobiveil_msi_irq_chip
,
374 static void mobiveil_compose_msi_msg(struct irq_data
*data
, struct msi_msg
*msg
)
376 struct mobiveil_pcie
*pcie
= irq_data_get_irq_chip_data(data
);
377 phys_addr_t addr
= pcie
->pcie_reg_base
+ (data
->hwirq
* sizeof(int));
379 msg
->address_lo
= lower_32_bits(addr
);
380 msg
->address_hi
= upper_32_bits(addr
);
381 msg
->data
= data
->hwirq
;
383 dev_dbg(&pcie
->pdev
->dev
, "msi#%d address_hi %#x address_lo %#x\n",
384 (int)data
->hwirq
, msg
->address_hi
, msg
->address_lo
);
387 static int mobiveil_msi_set_affinity(struct irq_data
*irq_data
,
388 const struct cpumask
*mask
, bool force
)
393 static struct irq_chip mobiveil_msi_bottom_irq_chip
= {
394 .name
= "Mobiveil MSI",
395 .irq_compose_msi_msg
= mobiveil_compose_msi_msg
,
396 .irq_set_affinity
= mobiveil_msi_set_affinity
,
399 static int mobiveil_irq_msi_domain_alloc(struct irq_domain
*domain
,
401 unsigned int nr_irqs
, void *args
)
403 struct mobiveil_pcie
*pcie
= domain
->host_data
;
404 struct mobiveil_msi
*msi
= &pcie
->rp
.msi
;
407 WARN_ON(nr_irqs
!= 1);
408 mutex_lock(&msi
->lock
);
410 bit
= find_first_zero_bit(msi
->msi_irq_in_use
, msi
->num_of_vectors
);
411 if (bit
>= msi
->num_of_vectors
) {
412 mutex_unlock(&msi
->lock
);
416 set_bit(bit
, msi
->msi_irq_in_use
);
418 mutex_unlock(&msi
->lock
);
420 irq_domain_set_info(domain
, virq
, bit
, &mobiveil_msi_bottom_irq_chip
,
421 domain
->host_data
, handle_level_irq
, NULL
, NULL
);
425 static void mobiveil_irq_msi_domain_free(struct irq_domain
*domain
,
427 unsigned int nr_irqs
)
429 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
);
430 struct mobiveil_pcie
*pcie
= irq_data_get_irq_chip_data(d
);
431 struct mobiveil_msi
*msi
= &pcie
->rp
.msi
;
433 mutex_lock(&msi
->lock
);
435 if (!test_bit(d
->hwirq
, msi
->msi_irq_in_use
))
436 dev_err(&pcie
->pdev
->dev
, "trying to free unused MSI#%lu\n",
439 __clear_bit(d
->hwirq
, msi
->msi_irq_in_use
);
441 mutex_unlock(&msi
->lock
);
443 static const struct irq_domain_ops msi_domain_ops
= {
444 .alloc
= mobiveil_irq_msi_domain_alloc
,
445 .free
= mobiveil_irq_msi_domain_free
,
448 static int mobiveil_allocate_msi_domains(struct mobiveil_pcie
*pcie
)
450 struct device
*dev
= &pcie
->pdev
->dev
;
451 struct fwnode_handle
*fwnode
= of_node_to_fwnode(dev
->of_node
);
452 struct mobiveil_msi
*msi
= &pcie
->rp
.msi
;
454 mutex_init(&msi
->lock
);
455 msi
->dev_domain
= irq_domain_add_linear(NULL
, msi
->num_of_vectors
,
456 &msi_domain_ops
, pcie
);
457 if (!msi
->dev_domain
) {
458 dev_err(dev
, "failed to create IRQ domain\n");
462 msi
->msi_domain
= pci_msi_create_irq_domain(fwnode
,
463 &mobiveil_msi_domain_info
,
465 if (!msi
->msi_domain
) {
466 dev_err(dev
, "failed to create MSI domain\n");
467 irq_domain_remove(msi
->dev_domain
);
474 static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie
*pcie
)
476 struct device
*dev
= &pcie
->pdev
->dev
;
477 struct device_node
*node
= dev
->of_node
;
478 struct mobiveil_root_port
*rp
= &pcie
->rp
;
481 rp
->intx_domain
= irq_domain_add_linear(node
, PCI_NUM_INTX
,
482 &intx_domain_ops
, pcie
);
484 if (!rp
->intx_domain
) {
485 dev_err(dev
, "Failed to get a INTx IRQ domain\n");
489 raw_spin_lock_init(&rp
->intx_mask_lock
);
492 return mobiveil_allocate_msi_domains(pcie
);
495 static int mobiveil_pcie_integrated_interrupt_init(struct mobiveil_pcie
*pcie
)
497 struct platform_device
*pdev
= pcie
->pdev
;
498 struct device
*dev
= &pdev
->dev
;
499 struct mobiveil_root_port
*rp
= &pcie
->rp
;
500 struct resource
*res
;
503 /* map MSI config resource */
504 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "apb_csr");
505 pcie
->apb_csr_base
= devm_pci_remap_cfg_resource(dev
, res
);
506 if (IS_ERR(pcie
->apb_csr_base
))
507 return PTR_ERR(pcie
->apb_csr_base
);
509 /* setup MSI hardware registers */
510 mobiveil_pcie_enable_msi(pcie
);
512 rp
->irq
= platform_get_irq(pdev
, 0);
516 /* initialize the IRQ domains */
517 ret
= mobiveil_pcie_init_irq_domain(pcie
);
519 dev_err(dev
, "Failed creating IRQ Domain\n");
523 irq_set_chained_handler_and_data(rp
->irq
, mobiveil_pcie_isr
, pcie
);
525 /* Enable interrupts */
526 mobiveil_csr_writel(pcie
, (PAB_INTP_INTX_MASK
| PAB_INTP_MSI_MASK
),
527 PAB_INTP_AMBA_MISC_ENB
);
533 static int mobiveil_pcie_interrupt_init(struct mobiveil_pcie
*pcie
)
535 struct mobiveil_root_port
*rp
= &pcie
->rp
;
537 if (rp
->ops
->interrupt_init
)
538 return rp
->ops
->interrupt_init(pcie
);
540 return mobiveil_pcie_integrated_interrupt_init(pcie
);
543 static bool mobiveil_pcie_is_bridge(struct mobiveil_pcie
*pcie
)
547 header_type
= mobiveil_csr_readb(pcie
, PCI_HEADER_TYPE
);
550 return header_type
== PCI_HEADER_TYPE_BRIDGE
;
553 int mobiveil_pcie_host_probe(struct mobiveil_pcie
*pcie
)
555 struct mobiveil_root_port
*rp
= &pcie
->rp
;
556 struct pci_host_bridge
*bridge
= rp
->bridge
;
557 struct device
*dev
= &pcie
->pdev
->dev
;
560 ret
= mobiveil_pcie_parse_dt(pcie
);
562 dev_err(dev
, "Parsing DT failed, ret: %x\n", ret
);
566 if (!mobiveil_pcie_is_bridge(pcie
))
570 * configure all inbound and outbound windows and prepare the RC for
573 ret
= mobiveil_host_init(pcie
, false);
575 dev_err(dev
, "Failed to initialize host\n");
579 ret
= mobiveil_pcie_interrupt_init(pcie
);
581 dev_err(dev
, "Interrupt init failed\n");
585 /* Initialize bridge */
586 bridge
->sysdata
= pcie
;
587 bridge
->ops
= &mobiveil_pcie_ops
;
589 ret
= mobiveil_bringup_link(pcie
);
591 dev_info(dev
, "link bring-up failed\n");
595 return pci_host_probe(bridge
);