1 // SPDX-License-Identifier: GPL-2.0-only
3 * PCIe host controller driver for Xilinx XDMA PCIe Bridge
5 * Copyright (C) 2023 Xilinx, Inc. All rights reserved.
7 #include <linux/bitfield.h>
8 #include <linux/interrupt.h>
10 #include <linux/irqdomain.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/msi.h>
14 #include <linux/of_address.h>
15 #include <linux/of_pci.h>
18 #include "pcie-xilinx-common.h"
20 /* Register definitions */
21 #define XILINX_PCIE_DMA_REG_IDR 0x00000138
22 #define XILINX_PCIE_DMA_REG_IMR 0x0000013c
23 #define XILINX_PCIE_DMA_REG_PSCR 0x00000144
24 #define XILINX_PCIE_DMA_REG_RPSC 0x00000148
25 #define XILINX_PCIE_DMA_REG_MSIBASE1 0x0000014c
26 #define XILINX_PCIE_DMA_REG_MSIBASE2 0x00000150
27 #define XILINX_PCIE_DMA_REG_RPEFR 0x00000154
28 #define XILINX_PCIE_DMA_REG_IDRN 0x00000160
29 #define XILINX_PCIE_DMA_REG_IDRN_MASK 0x00000164
30 #define XILINX_PCIE_DMA_REG_MSI_LOW 0x00000170
31 #define XILINX_PCIE_DMA_REG_MSI_HI 0x00000174
32 #define XILINX_PCIE_DMA_REG_MSI_LOW_MASK 0x00000178
33 #define XILINX_PCIE_DMA_REG_MSI_HI_MASK 0x0000017c
35 #define IMR(x) BIT(XILINX_PCIE_INTR_ ##x)
37 #define XILINX_PCIE_INTR_IMR_ALL_MASK \
57 #define XILINX_PCIE_DMA_IMR_ALL_MASK 0x0ff30fe9
58 #define XILINX_PCIE_DMA_IDR_ALL_MASK 0xffffffff
59 #define XILINX_PCIE_DMA_IDRN_MASK GENMASK(19, 16)
61 /* Root Port Error Register definitions */
62 #define XILINX_PCIE_DMA_RPEFR_ERR_VALID BIT(18)
63 #define XILINX_PCIE_DMA_RPEFR_REQ_ID GENMASK(15, 0)
64 #define XILINX_PCIE_DMA_RPEFR_ALL_MASK 0xffffffff
66 /* Root Port Interrupt Register definitions */
67 #define XILINX_PCIE_DMA_IDRN_SHIFT 16
69 /* Root Port Status/control Register definitions */
70 #define XILINX_PCIE_DMA_REG_RPSC_BEN BIT(0)
72 /* Phy Status/Control Register definitions */
73 #define XILINX_PCIE_DMA_REG_PSCR_LNKUP BIT(11)
74 #define QDMA_BRIDGE_BASE_OFF 0xcd8
76 /* Number of MSI IRQs */
77 #define XILINX_NUM_MSI_IRQS 64
79 enum xilinx_pl_dma_version
{
85 * struct xilinx_pl_dma_variant - PL DMA PCIe variant information
86 * @version: DMA version
88 struct xilinx_pl_dma_variant
{
89 enum xilinx_pl_dma_version version
;
93 struct irq_domain
*msi_domain
;
94 unsigned long *bitmap
;
95 struct irq_domain
*dev_domain
;
96 struct mutex lock
; /* Protect bitmap variable */
102 * struct pl_dma_pcie - PCIe port information
103 * @dev: Device pointer
104 * @reg_base: IO Mapped Register Base
105 * @cfg_base: IO Mapped Configuration Base
106 * @irq: Interrupt number
107 * @cfg: Holds mappings of config space window
108 * @phys_reg_base: Physical address of reg base
109 * @intx_domain: Legacy IRQ domain pointer
110 * @pldma_domain: PL DMA IRQ domain pointer
111 * @resources: Bus Resources
112 * @msi: MSI information
113 * @intx_irq: INTx error interrupt number
114 * @lock: Lock protecting shared register access
115 * @variant: PL DMA PCIe version check pointer
119 void __iomem
*reg_base
;
120 void __iomem
*cfg_base
;
122 struct pci_config_window
*cfg
;
123 phys_addr_t phys_reg_base
;
124 struct irq_domain
*intx_domain
;
125 struct irq_domain
*pldma_domain
;
126 struct list_head resources
;
127 struct xilinx_msi msi
;
130 const struct xilinx_pl_dma_variant
*variant
;
133 static inline u32
pcie_read(struct pl_dma_pcie
*port
, u32 reg
)
135 if (port
->variant
->version
== QDMA
)
136 return readl(port
->reg_base
+ reg
+ QDMA_BRIDGE_BASE_OFF
);
138 return readl(port
->reg_base
+ reg
);
141 static inline void pcie_write(struct pl_dma_pcie
*port
, u32 val
, u32 reg
)
143 if (port
->variant
->version
== QDMA
)
144 writel(val
, port
->reg_base
+ reg
+ QDMA_BRIDGE_BASE_OFF
);
146 writel(val
, port
->reg_base
+ reg
);
149 static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie
*port
)
151 return (pcie_read(port
, XILINX_PCIE_DMA_REG_PSCR
) &
152 XILINX_PCIE_DMA_REG_PSCR_LNKUP
) ? true : false;
155 static void xilinx_pl_dma_pcie_clear_err_interrupts(struct pl_dma_pcie
*port
)
157 unsigned long val
= pcie_read(port
, XILINX_PCIE_DMA_REG_RPEFR
);
159 if (val
& XILINX_PCIE_DMA_RPEFR_ERR_VALID
) {
160 dev_dbg(port
->dev
, "Requester ID %lu\n",
161 val
& XILINX_PCIE_DMA_RPEFR_REQ_ID
);
162 pcie_write(port
, XILINX_PCIE_DMA_RPEFR_ALL_MASK
,
163 XILINX_PCIE_DMA_REG_RPEFR
);
167 static bool xilinx_pl_dma_pcie_valid_device(struct pci_bus
*bus
,
170 struct pl_dma_pcie
*port
= bus
->sysdata
;
172 if (!pci_is_root_bus(bus
)) {
174 * Checking whether the link is up is the last line of
175 * defense, and this check is inherently racy by definition.
176 * Sending a PIO request to a downstream device when the link is
177 * down causes an unrecoverable error, and a reset of the entire
178 * PCIe controller will be needed. We can reduce the likelihood
179 * of that unrecoverable error by checking whether the link is
180 * up, but we can't completely prevent it because the link may
181 * go down between the link-up check and the PIO request.
183 if (!xilinx_pl_dma_pcie_link_up(port
))
185 } else if (devfn
> 0)
186 /* Only one device down on each root port */
192 static void __iomem
*xilinx_pl_dma_pcie_map_bus(struct pci_bus
*bus
,
193 unsigned int devfn
, int where
)
195 struct pl_dma_pcie
*port
= bus
->sysdata
;
197 if (!xilinx_pl_dma_pcie_valid_device(bus
, devfn
))
200 if (port
->variant
->version
== QDMA
)
201 return port
->cfg_base
+ PCIE_ECAM_OFFSET(bus
->number
, devfn
, where
);
203 return port
->reg_base
+ PCIE_ECAM_OFFSET(bus
->number
, devfn
, where
);
206 /* PCIe operations */
207 static struct pci_ecam_ops xilinx_pl_dma_pcie_ops
= {
209 .map_bus
= xilinx_pl_dma_pcie_map_bus
,
210 .read
= pci_generic_config_read
,
211 .write
= pci_generic_config_write
,
215 static void xilinx_pl_dma_pcie_enable_msi(struct pl_dma_pcie
*port
)
217 phys_addr_t msi_addr
= port
->phys_reg_base
;
219 pcie_write(port
, upper_32_bits(msi_addr
), XILINX_PCIE_DMA_REG_MSIBASE1
);
220 pcie_write(port
, lower_32_bits(msi_addr
), XILINX_PCIE_DMA_REG_MSIBASE2
);
223 static void xilinx_mask_intx_irq(struct irq_data
*data
)
225 struct pl_dma_pcie
*port
= irq_data_get_irq_chip_data(data
);
229 mask
= BIT(data
->hwirq
+ XILINX_PCIE_DMA_IDRN_SHIFT
);
230 raw_spin_lock_irqsave(&port
->lock
, flags
);
231 val
= pcie_read(port
, XILINX_PCIE_DMA_REG_IDRN_MASK
);
232 pcie_write(port
, (val
& (~mask
)), XILINX_PCIE_DMA_REG_IDRN_MASK
);
233 raw_spin_unlock_irqrestore(&port
->lock
, flags
);
236 static void xilinx_unmask_intx_irq(struct irq_data
*data
)
238 struct pl_dma_pcie
*port
= irq_data_get_irq_chip_data(data
);
242 mask
= BIT(data
->hwirq
+ XILINX_PCIE_DMA_IDRN_SHIFT
);
243 raw_spin_lock_irqsave(&port
->lock
, flags
);
244 val
= pcie_read(port
, XILINX_PCIE_DMA_REG_IDRN_MASK
);
245 pcie_write(port
, (val
| mask
), XILINX_PCIE_DMA_REG_IDRN_MASK
);
246 raw_spin_unlock_irqrestore(&port
->lock
, flags
);
249 static struct irq_chip xilinx_leg_irq_chip
= {
250 .name
= "pl_dma:INTx",
251 .irq_mask
= xilinx_mask_intx_irq
,
252 .irq_unmask
= xilinx_unmask_intx_irq
,
255 static int xilinx_pl_dma_pcie_intx_map(struct irq_domain
*domain
,
256 unsigned int irq
, irq_hw_number_t hwirq
)
258 irq_set_chip_and_handler(irq
, &xilinx_leg_irq_chip
, handle_level_irq
);
259 irq_set_chip_data(irq
, domain
->host_data
);
260 irq_set_status_flags(irq
, IRQ_LEVEL
);
265 /* INTx IRQ Domain operations */
266 static const struct irq_domain_ops intx_domain_ops
= {
267 .map
= xilinx_pl_dma_pcie_intx_map
,
270 static irqreturn_t
xilinx_pl_dma_pcie_msi_handler_high(int irq
, void *args
)
272 struct xilinx_msi
*msi
;
273 unsigned long status
;
275 struct pl_dma_pcie
*port
= args
;
279 while ((status
= pcie_read(port
, XILINX_PCIE_DMA_REG_MSI_HI
)) != 0) {
280 for_each_set_bit(bit
, &status
, 32) {
281 pcie_write(port
, 1 << bit
, XILINX_PCIE_DMA_REG_MSI_HI
);
283 virq
= irq_find_mapping(msi
->dev_domain
, bit
);
285 generic_handle_irq(virq
);
292 static irqreturn_t
xilinx_pl_dma_pcie_msi_handler_low(int irq
, void *args
)
294 struct pl_dma_pcie
*port
= args
;
295 struct xilinx_msi
*msi
;
296 unsigned long status
;
301 while ((status
= pcie_read(port
, XILINX_PCIE_DMA_REG_MSI_LOW
)) != 0) {
302 for_each_set_bit(bit
, &status
, 32) {
303 pcie_write(port
, 1 << bit
, XILINX_PCIE_DMA_REG_MSI_LOW
);
304 virq
= irq_find_mapping(msi
->dev_domain
, bit
);
306 generic_handle_irq(virq
);
313 static irqreturn_t
xilinx_pl_dma_pcie_event_flow(int irq
, void *args
)
315 struct pl_dma_pcie
*port
= args
;
319 val
= pcie_read(port
, XILINX_PCIE_DMA_REG_IDR
);
320 val
&= pcie_read(port
, XILINX_PCIE_DMA_REG_IMR
);
321 for_each_set_bit(i
, &val
, 32)
322 generic_handle_domain_irq(port
->pldma_domain
, i
);
324 pcie_write(port
, val
, XILINX_PCIE_DMA_REG_IDR
);
330 [XILINX_PCIE_INTR_ ## x] = { __stringify(x), s }
332 static const struct {
336 _IC(LINK_DOWN
, "Link Down"),
337 _IC(HOT_RESET
, "Hot reset"),
338 _IC(CFG_TIMEOUT
, "ECAM access timeout"),
339 _IC(CORRECTABLE
, "Correctable error message"),
340 _IC(NONFATAL
, "Non fatal error message"),
341 _IC(FATAL
, "Fatal error message"),
342 _IC(SLV_UNSUPP
, "Slave unsupported request"),
343 _IC(SLV_UNEXP
, "Slave unexpected completion"),
344 _IC(SLV_COMPL
, "Slave completion timeout"),
345 _IC(SLV_ERRP
, "Slave Error Poison"),
346 _IC(SLV_CMPABT
, "Slave Completer Abort"),
347 _IC(SLV_ILLBUR
, "Slave Illegal Burst"),
348 _IC(MST_DECERR
, "Master decode error"),
349 _IC(MST_SLVERR
, "Master slave error"),
352 static irqreturn_t
xilinx_pl_dma_pcie_intr_handler(int irq
, void *dev_id
)
354 struct pl_dma_pcie
*port
= (struct pl_dma_pcie
*)dev_id
;
355 struct device
*dev
= port
->dev
;
358 d
= irq_domain_get_irq_data(port
->pldma_domain
, irq
);
360 case XILINX_PCIE_INTR_CORRECTABLE
:
361 case XILINX_PCIE_INTR_NONFATAL
:
362 case XILINX_PCIE_INTR_FATAL
:
363 xilinx_pl_dma_pcie_clear_err_interrupts(port
);
367 if (intr_cause
[d
->hwirq
].str
)
368 dev_warn(dev
, "%s\n", intr_cause
[d
->hwirq
].str
);
370 dev_warn(dev
, "Unknown IRQ %ld\n", d
->hwirq
);
376 static struct irq_chip xilinx_msi_irq_chip
= {
377 .name
= "pl_dma:PCIe MSI",
378 .irq_enable
= pci_msi_unmask_irq
,
379 .irq_disable
= pci_msi_mask_irq
,
380 .irq_mask
= pci_msi_mask_irq
,
381 .irq_unmask
= pci_msi_unmask_irq
,
384 static struct msi_domain_info xilinx_msi_domain_info
= {
385 .flags
= MSI_FLAG_USE_DEF_DOM_OPS
| MSI_FLAG_USE_DEF_CHIP_OPS
|
386 MSI_FLAG_NO_AFFINITY
| MSI_FLAG_MULTI_PCI_MSI
,
387 .chip
= &xilinx_msi_irq_chip
,
390 static void xilinx_compose_msi_msg(struct irq_data
*data
, struct msi_msg
*msg
)
392 struct pl_dma_pcie
*pcie
= irq_data_get_irq_chip_data(data
);
393 phys_addr_t msi_addr
= pcie
->phys_reg_base
;
395 msg
->address_lo
= lower_32_bits(msi_addr
);
396 msg
->address_hi
= upper_32_bits(msi_addr
);
397 msg
->data
= data
->hwirq
;
400 static struct irq_chip xilinx_irq_chip
= {
401 .name
= "pl_dma:MSI",
402 .irq_compose_msi_msg
= xilinx_compose_msi_msg
,
405 static int xilinx_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
406 unsigned int nr_irqs
, void *args
)
408 struct pl_dma_pcie
*pcie
= domain
->host_data
;
409 struct xilinx_msi
*msi
= &pcie
->msi
;
412 mutex_lock(&msi
->lock
);
413 bit
= bitmap_find_free_region(msi
->bitmap
, XILINX_NUM_MSI_IRQS
,
414 get_count_order(nr_irqs
));
416 mutex_unlock(&msi
->lock
);
420 for (i
= 0; i
< nr_irqs
; i
++) {
421 irq_domain_set_info(domain
, virq
+ i
, bit
+ i
, &xilinx_irq_chip
,
422 domain
->host_data
, handle_simple_irq
,
425 mutex_unlock(&msi
->lock
);
430 static void xilinx_irq_domain_free(struct irq_domain
*domain
, unsigned int virq
,
431 unsigned int nr_irqs
)
433 struct irq_data
*data
= irq_domain_get_irq_data(domain
, virq
);
434 struct pl_dma_pcie
*pcie
= irq_data_get_irq_chip_data(data
);
435 struct xilinx_msi
*msi
= &pcie
->msi
;
437 mutex_lock(&msi
->lock
);
438 bitmap_release_region(msi
->bitmap
, data
->hwirq
,
439 get_count_order(nr_irqs
));
440 mutex_unlock(&msi
->lock
);
443 static const struct irq_domain_ops dev_msi_domain_ops
= {
444 .alloc
= xilinx_irq_domain_alloc
,
445 .free
= xilinx_irq_domain_free
,
448 static void xilinx_pl_dma_pcie_free_irq_domains(struct pl_dma_pcie
*port
)
450 struct xilinx_msi
*msi
= &port
->msi
;
452 if (port
->intx_domain
) {
453 irq_domain_remove(port
->intx_domain
);
454 port
->intx_domain
= NULL
;
457 if (msi
->dev_domain
) {
458 irq_domain_remove(msi
->dev_domain
);
459 msi
->dev_domain
= NULL
;
462 if (msi
->msi_domain
) {
463 irq_domain_remove(msi
->msi_domain
);
464 msi
->msi_domain
= NULL
;
468 static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie
*port
)
470 struct device
*dev
= port
->dev
;
471 struct xilinx_msi
*msi
= &port
->msi
;
472 int size
= BITS_TO_LONGS(XILINX_NUM_MSI_IRQS
) * sizeof(long);
473 struct fwnode_handle
*fwnode
= of_node_to_fwnode(port
->dev
->of_node
);
475 msi
->dev_domain
= irq_domain_add_linear(NULL
, XILINX_NUM_MSI_IRQS
,
476 &dev_msi_domain_ops
, port
);
477 if (!msi
->dev_domain
)
480 msi
->msi_domain
= pci_msi_create_irq_domain(fwnode
,
481 &xilinx_msi_domain_info
,
483 if (!msi
->msi_domain
)
486 mutex_init(&msi
->lock
);
487 msi
->bitmap
= kzalloc(size
, GFP_KERNEL
);
491 raw_spin_lock_init(&port
->lock
);
492 xilinx_pl_dma_pcie_enable_msi(port
);
497 xilinx_pl_dma_pcie_free_irq_domains(port
);
498 dev_err(dev
, "Failed to allocate MSI IRQ domains\n");
504 * INTx error interrupts are Xilinx controller specific interrupt, used to
505 * notify user about errors such as cfg timeout, slave unsupported requests,
506 * fatal and non fatal error etc.
509 static irqreturn_t
xilinx_pl_dma_pcie_intx_flow(int irq
, void *args
)
513 struct pl_dma_pcie
*port
= args
;
515 val
= FIELD_GET(XILINX_PCIE_DMA_IDRN_MASK
,
516 pcie_read(port
, XILINX_PCIE_DMA_REG_IDRN
));
518 for_each_set_bit(i
, &val
, PCI_NUM_INTX
)
519 generic_handle_domain_irq(port
->intx_domain
, i
);
523 static void xilinx_pl_dma_pcie_mask_event_irq(struct irq_data
*d
)
525 struct pl_dma_pcie
*port
= irq_data_get_irq_chip_data(d
);
528 raw_spin_lock(&port
->lock
);
529 val
= pcie_read(port
, XILINX_PCIE_DMA_REG_IMR
);
530 val
&= ~BIT(d
->hwirq
);
531 pcie_write(port
, val
, XILINX_PCIE_DMA_REG_IMR
);
532 raw_spin_unlock(&port
->lock
);
535 static void xilinx_pl_dma_pcie_unmask_event_irq(struct irq_data
*d
)
537 struct pl_dma_pcie
*port
= irq_data_get_irq_chip_data(d
);
540 raw_spin_lock(&port
->lock
);
541 val
= pcie_read(port
, XILINX_PCIE_DMA_REG_IMR
);
542 val
|= BIT(d
->hwirq
);
543 pcie_write(port
, val
, XILINX_PCIE_DMA_REG_IMR
);
544 raw_spin_unlock(&port
->lock
);
547 static struct irq_chip xilinx_pl_dma_pcie_event_irq_chip
= {
548 .name
= "pl_dma:RC-Event",
549 .irq_mask
= xilinx_pl_dma_pcie_mask_event_irq
,
550 .irq_unmask
= xilinx_pl_dma_pcie_unmask_event_irq
,
553 static int xilinx_pl_dma_pcie_event_map(struct irq_domain
*domain
,
554 unsigned int irq
, irq_hw_number_t hwirq
)
556 irq_set_chip_and_handler(irq
, &xilinx_pl_dma_pcie_event_irq_chip
,
558 irq_set_chip_data(irq
, domain
->host_data
);
559 irq_set_status_flags(irq
, IRQ_LEVEL
);
564 static const struct irq_domain_ops event_domain_ops
= {
565 .map
= xilinx_pl_dma_pcie_event_map
,
569 * xilinx_pl_dma_pcie_init_irq_domain - Initialize IRQ domain
570 * @port: PCIe port information
572 * Return: '0' on success and error value on failure.
574 static int xilinx_pl_dma_pcie_init_irq_domain(struct pl_dma_pcie
*port
)
576 struct device
*dev
= port
->dev
;
577 struct device_node
*node
= dev
->of_node
;
578 struct device_node
*pcie_intc_node
;
582 pcie_intc_node
= of_get_child_by_name(node
, "interrupt-controller");
583 if (!pcie_intc_node
) {
584 dev_err(dev
, "No PCIe Intc node found\n");
588 port
->pldma_domain
= irq_domain_add_linear(pcie_intc_node
, 32,
589 &event_domain_ops
, port
);
590 if (!port
->pldma_domain
)
593 irq_domain_update_bus_token(port
->pldma_domain
, DOMAIN_BUS_NEXUS
);
595 port
->intx_domain
= irq_domain_add_linear(pcie_intc_node
, PCI_NUM_INTX
,
596 &intx_domain_ops
, port
);
597 if (!port
->intx_domain
) {
598 dev_err(dev
, "Failed to get a INTx IRQ domain\n");
602 irq_domain_update_bus_token(port
->intx_domain
, DOMAIN_BUS_WIRED
);
604 ret
= xilinx_pl_dma_pcie_init_msi_irq_domain(port
);
606 irq_domain_remove(port
->intx_domain
);
610 of_node_put(pcie_intc_node
);
611 raw_spin_lock_init(&port
->lock
);
616 static int xilinx_pl_dma_pcie_setup_irq(struct pl_dma_pcie
*port
)
618 struct device
*dev
= port
->dev
;
619 struct platform_device
*pdev
= to_platform_device(dev
);
622 port
->irq
= platform_get_irq(pdev
, 0);
626 for (i
= 0; i
< ARRAY_SIZE(intr_cause
); i
++) {
629 if (!intr_cause
[i
].str
)
632 irq
= irq_create_mapping(port
->pldma_domain
, i
);
634 dev_err(dev
, "Failed to map interrupt\n");
638 err
= devm_request_irq(dev
, irq
,
639 xilinx_pl_dma_pcie_intr_handler
,
640 IRQF_SHARED
| IRQF_NO_THREAD
,
641 intr_cause
[i
].sym
, port
);
643 dev_err(dev
, "Failed to request IRQ %d\n", irq
);
648 port
->intx_irq
= irq_create_mapping(port
->pldma_domain
,
649 XILINX_PCIE_INTR_INTX
);
650 if (!port
->intx_irq
) {
651 dev_err(dev
, "Failed to map INTx interrupt\n");
655 err
= devm_request_irq(dev
, port
->intx_irq
, xilinx_pl_dma_pcie_intx_flow
,
656 IRQF_SHARED
| IRQF_NO_THREAD
, NULL
, port
);
658 dev_err(dev
, "Failed to request INTx IRQ %d\n", port
->intx_irq
);
662 err
= devm_request_irq(dev
, port
->irq
, xilinx_pl_dma_pcie_event_flow
,
663 IRQF_SHARED
| IRQF_NO_THREAD
, NULL
, port
);
665 dev_err(dev
, "Failed to request event IRQ %d\n", port
->irq
);
672 static void xilinx_pl_dma_pcie_init_port(struct pl_dma_pcie
*port
)
674 if (xilinx_pl_dma_pcie_link_up(port
))
675 dev_info(port
->dev
, "PCIe Link is UP\n");
677 dev_info(port
->dev
, "PCIe Link is DOWN\n");
679 /* Disable all interrupts */
680 pcie_write(port
, ~XILINX_PCIE_DMA_IDR_ALL_MASK
,
681 XILINX_PCIE_DMA_REG_IMR
);
683 /* Clear pending interrupts */
684 pcie_write(port
, pcie_read(port
, XILINX_PCIE_DMA_REG_IDR
) &
685 XILINX_PCIE_DMA_IMR_ALL_MASK
,
686 XILINX_PCIE_DMA_REG_IDR
);
688 /* Needed for MSI DECODE MODE */
689 pcie_write(port
, XILINX_PCIE_DMA_IDR_ALL_MASK
,
690 XILINX_PCIE_DMA_REG_MSI_LOW_MASK
);
691 pcie_write(port
, XILINX_PCIE_DMA_IDR_ALL_MASK
,
692 XILINX_PCIE_DMA_REG_MSI_HI_MASK
);
694 /* Set the Bridge enable bit */
695 pcie_write(port
, pcie_read(port
, XILINX_PCIE_DMA_REG_RPSC
) |
696 XILINX_PCIE_DMA_REG_RPSC_BEN
,
697 XILINX_PCIE_DMA_REG_RPSC
);
700 static int xilinx_request_msi_irq(struct pl_dma_pcie
*port
)
702 struct device
*dev
= port
->dev
;
703 struct platform_device
*pdev
= to_platform_device(dev
);
706 port
->msi
.irq_msi0
= platform_get_irq_byname(pdev
, "msi0");
707 if (port
->msi
.irq_msi0
<= 0)
708 return port
->msi
.irq_msi0
;
710 ret
= devm_request_irq(dev
, port
->msi
.irq_msi0
, xilinx_pl_dma_pcie_msi_handler_low
,
711 IRQF_SHARED
| IRQF_NO_THREAD
, "xlnx-pcie-dma-pl",
714 dev_err(dev
, "Failed to register interrupt\n");
718 port
->msi
.irq_msi1
= platform_get_irq_byname(pdev
, "msi1");
719 if (port
->msi
.irq_msi1
<= 0)
720 return port
->msi
.irq_msi1
;
722 ret
= devm_request_irq(dev
, port
->msi
.irq_msi1
, xilinx_pl_dma_pcie_msi_handler_high
,
723 IRQF_SHARED
| IRQF_NO_THREAD
, "xlnx-pcie-dma-pl",
726 dev_err(dev
, "Failed to register interrupt\n");
733 static int xilinx_pl_dma_pcie_parse_dt(struct pl_dma_pcie
*port
,
734 struct resource
*bus_range
)
736 struct device
*dev
= port
->dev
;
737 struct platform_device
*pdev
= to_platform_device(dev
);
738 struct resource
*res
;
741 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
743 dev_err(dev
, "Missing \"reg\" property\n");
746 port
->phys_reg_base
= res
->start
;
748 port
->cfg
= pci_ecam_create(dev
, res
, bus_range
, &xilinx_pl_dma_pcie_ops
);
749 if (IS_ERR(port
->cfg
))
750 return PTR_ERR(port
->cfg
);
752 port
->reg_base
= port
->cfg
->win
;
754 if (port
->variant
->version
== QDMA
) {
755 port
->cfg_base
= port
->cfg
->win
;
756 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "breg");
757 port
->reg_base
= devm_ioremap_resource(dev
, res
);
758 if (IS_ERR(port
->reg_base
))
759 return PTR_ERR(port
->reg_base
);
760 port
->phys_reg_base
= res
->start
;
763 err
= xilinx_request_msi_irq(port
);
765 pci_ecam_free(port
->cfg
);
772 static int xilinx_pl_dma_pcie_probe(struct platform_device
*pdev
)
774 struct device
*dev
= &pdev
->dev
;
775 struct pl_dma_pcie
*port
;
776 struct pci_host_bridge
*bridge
;
777 struct resource_entry
*bus
;
780 bridge
= devm_pci_alloc_host_bridge(dev
, sizeof(*port
));
784 port
= pci_host_bridge_priv(bridge
);
788 bus
= resource_list_first_type(&bridge
->windows
, IORESOURCE_BUS
);
792 port
->variant
= of_device_get_match_data(dev
);
794 err
= xilinx_pl_dma_pcie_parse_dt(port
, bus
->res
);
796 dev_err(dev
, "Parsing DT failed\n");
800 xilinx_pl_dma_pcie_init_port(port
);
802 err
= xilinx_pl_dma_pcie_init_irq_domain(port
);
806 err
= xilinx_pl_dma_pcie_setup_irq(port
);
808 bridge
->sysdata
= port
;
809 bridge
->ops
= &xilinx_pl_dma_pcie_ops
.pci_ops
;
811 err
= pci_host_probe(bridge
);
813 goto err_host_bridge
;
818 xilinx_pl_dma_pcie_free_irq_domains(port
);
821 pci_ecam_free(port
->cfg
);
825 static const struct xilinx_pl_dma_variant xdma_host
= {
829 static const struct xilinx_pl_dma_variant qdma_host
= {
833 static const struct of_device_id xilinx_pl_dma_pcie_of_match
[] = {
835 .compatible
= "xlnx,xdma-host-3.00",
839 .compatible
= "xlnx,qdma-host-3.00",
845 static struct platform_driver xilinx_pl_dma_pcie_driver
= {
847 .name
= "xilinx-xdma-pcie",
848 .of_match_table
= xilinx_pl_dma_pcie_of_match
,
849 .suppress_bind_attrs
= true,
851 .probe
= xilinx_pl_dma_pcie_probe
,
854 builtin_platform_driver(xilinx_pl_dma_pcie_driver
);