1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe endpoint controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
6 #include <linux/delay.h>
7 #include <linux/kernel.h>
9 #include <linux/pci-epc.h>
10 #include <linux/platform_device.h>
11 #include <linux/sizes.h>
13 #include "pcie-cadence.h"
15 #define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
16 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
17 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
19 static int cdns_pcie_ep_write_header(struct pci_epc
*epc
, u8 fn
,
20 struct pci_epf_header
*hdr
)
22 struct cdns_pcie_ep
*ep
= epc_get_drvdata(epc
);
23 struct cdns_pcie
*pcie
= &ep
->pcie
;
25 cdns_pcie_ep_fn_writew(pcie
, fn
, PCI_DEVICE_ID
, hdr
->deviceid
);
26 cdns_pcie_ep_fn_writeb(pcie
, fn
, PCI_REVISION_ID
, hdr
->revid
);
27 cdns_pcie_ep_fn_writeb(pcie
, fn
, PCI_CLASS_PROG
, hdr
->progif_code
);
28 cdns_pcie_ep_fn_writew(pcie
, fn
, PCI_CLASS_DEVICE
,
29 hdr
->subclass_code
| hdr
->baseclass_code
<< 8);
30 cdns_pcie_ep_fn_writeb(pcie
, fn
, PCI_CACHE_LINE_SIZE
,
31 hdr
->cache_line_size
);
32 cdns_pcie_ep_fn_writew(pcie
, fn
, PCI_SUBSYSTEM_ID
, hdr
->subsys_id
);
33 cdns_pcie_ep_fn_writeb(pcie
, fn
, PCI_INTERRUPT_PIN
, hdr
->interrupt_pin
);
36 * Vendor ID can only be modified from function 0, all other functions
37 * use the same vendor ID as function 0.
40 /* Update the vendor IDs. */
41 u32 id
= CDNS_PCIE_LM_ID_VENDOR(hdr
->vendorid
) |
42 CDNS_PCIE_LM_ID_SUBSYS(hdr
->subsys_vendor_id
);
44 cdns_pcie_writel(pcie
, CDNS_PCIE_LM_ID
, id
);
50 static int cdns_pcie_ep_set_bar(struct pci_epc
*epc
, u8 fn
,
51 struct pci_epf_bar
*epf_bar
)
53 struct cdns_pcie_ep
*ep
= epc_get_drvdata(epc
);
54 struct cdns_pcie_epf
*epf
= &ep
->epf
[fn
];
55 struct cdns_pcie
*pcie
= &ep
->pcie
;
56 dma_addr_t bar_phys
= epf_bar
->phys_addr
;
57 enum pci_barno bar
= epf_bar
->barno
;
58 int flags
= epf_bar
->flags
;
59 u32 addr0
, addr1
, reg
, cfg
, b
, aperture
, ctrl
;
62 /* BAR size is 2^(aperture + 7) */
63 sz
= max_t(size_t, epf_bar
->size
, CDNS_PCIE_EP_MIN_APERTURE
);
65 * roundup_pow_of_two() returns an unsigned long, which is not suited
68 sz
= 1ULL << fls64(sz
- 1);
69 aperture
= ilog2(sz
) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
71 if ((flags
& PCI_BASE_ADDRESS_SPACE
) == PCI_BASE_ADDRESS_SPACE_IO
) {
72 ctrl
= CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS
;
74 bool is_prefetch
= !!(flags
& PCI_BASE_ADDRESS_MEM_PREFETCH
);
75 bool is_64bits
= sz
> SZ_2G
;
77 if (is_64bits
&& (bar
& 1))
80 if (is_64bits
&& !(flags
& PCI_BASE_ADDRESS_MEM_TYPE_64
))
81 epf_bar
->flags
|= PCI_BASE_ADDRESS_MEM_TYPE_64
;
83 if (is_64bits
&& is_prefetch
)
84 ctrl
= CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS
;
86 ctrl
= CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS
;
88 ctrl
= CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS
;
90 ctrl
= CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS
;
93 addr0
= lower_32_bits(bar_phys
);
94 addr1
= upper_32_bits(bar_phys
);
95 cdns_pcie_writel(pcie
, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn
, bar
),
97 cdns_pcie_writel(pcie
, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn
, bar
),
101 reg
= CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn
);
104 reg
= CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn
);
108 cfg
= cdns_pcie_readl(pcie
, reg
);
109 cfg
&= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b
) |
110 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b
));
111 cfg
|= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b
, aperture
) |
112 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b
, ctrl
));
113 cdns_pcie_writel(pcie
, reg
, cfg
);
115 epf
->epf_bar
[bar
] = epf_bar
;
120 static void cdns_pcie_ep_clear_bar(struct pci_epc
*epc
, u8 fn
,
121 struct pci_epf_bar
*epf_bar
)
123 struct cdns_pcie_ep
*ep
= epc_get_drvdata(epc
);
124 struct cdns_pcie_epf
*epf
= &ep
->epf
[fn
];
125 struct cdns_pcie
*pcie
= &ep
->pcie
;
126 enum pci_barno bar
= epf_bar
->barno
;
127 u32 reg
, cfg
, b
, ctrl
;
130 reg
= CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn
);
133 reg
= CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn
);
137 ctrl
= CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED
;
138 cfg
= cdns_pcie_readl(pcie
, reg
);
139 cfg
&= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b
) |
140 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b
));
141 cfg
|= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b
, ctrl
);
142 cdns_pcie_writel(pcie
, reg
, cfg
);
144 cdns_pcie_writel(pcie
, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn
, bar
), 0);
145 cdns_pcie_writel(pcie
, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn
, bar
), 0);
147 epf
->epf_bar
[bar
] = NULL
;
150 static int cdns_pcie_ep_map_addr(struct pci_epc
*epc
, u8 fn
, phys_addr_t addr
,
151 u64 pci_addr
, size_t size
)
153 struct cdns_pcie_ep
*ep
= epc_get_drvdata(epc
);
154 struct cdns_pcie
*pcie
= &ep
->pcie
;
157 r
= find_first_zero_bit(&ep
->ob_region_map
,
158 sizeof(ep
->ob_region_map
) * BITS_PER_LONG
);
159 if (r
>= ep
->max_regions
- 1) {
160 dev_err(&epc
->dev
, "no free outbound region\n");
164 cdns_pcie_set_outbound_region(pcie
, 0, fn
, r
, false, addr
, pci_addr
, size
);
166 set_bit(r
, &ep
->ob_region_map
);
167 ep
->ob_addr
[r
] = addr
;
172 static void cdns_pcie_ep_unmap_addr(struct pci_epc
*epc
, u8 fn
,
175 struct cdns_pcie_ep
*ep
= epc_get_drvdata(epc
);
176 struct cdns_pcie
*pcie
= &ep
->pcie
;
179 for (r
= 0; r
< ep
->max_regions
- 1; r
++)
180 if (ep
->ob_addr
[r
] == addr
)
183 if (r
== ep
->max_regions
- 1)
186 cdns_pcie_reset_outbound_region(pcie
, r
);
189 clear_bit(r
, &ep
->ob_region_map
);
192 static int cdns_pcie_ep_set_msi(struct pci_epc
*epc
, u8 fn
, u8 mmc
)
194 struct cdns_pcie_ep
*ep
= epc_get_drvdata(epc
);
195 struct cdns_pcie
*pcie
= &ep
->pcie
;
196 u32 cap
= CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET
;
200 * Set the Multiple Message Capable bitfield into the Message Control
203 flags
= cdns_pcie_ep_fn_readw(pcie
, fn
, cap
+ PCI_MSI_FLAGS
);
204 flags
= (flags
& ~PCI_MSI_FLAGS_QMASK
) | (mmc
<< 1);
205 flags
|= PCI_MSI_FLAGS_64BIT
;
206 flags
&= ~PCI_MSI_FLAGS_MASKBIT
;
207 cdns_pcie_ep_fn_writew(pcie
, fn
, cap
+ PCI_MSI_FLAGS
, flags
);
212 static int cdns_pcie_ep_get_msi(struct pci_epc
*epc
, u8 fn
)
214 struct cdns_pcie_ep
*ep
= epc_get_drvdata(epc
);
215 struct cdns_pcie
*pcie
= &ep
->pcie
;
216 u32 cap
= CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET
;
219 /* Validate that the MSI feature is actually enabled. */
220 flags
= cdns_pcie_ep_fn_readw(pcie
, fn
, cap
+ PCI_MSI_FLAGS
);
221 if (!(flags
& PCI_MSI_FLAGS_ENABLE
))
225 * Get the Multiple Message Enable bitfield from the Message Control
228 mme
= (flags
& PCI_MSI_FLAGS_QSIZE
) >> 4;
233 static int cdns_pcie_ep_get_msix(struct pci_epc
*epc
, u8 func_no
)
235 struct cdns_pcie_ep
*ep
= epc_get_drvdata(epc
);
236 struct cdns_pcie
*pcie
= &ep
->pcie
;
237 u32 cap
= CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET
;
240 reg
= cap
+ PCI_MSIX_FLAGS
;
241 val
= cdns_pcie_ep_fn_readw(pcie
, func_no
, reg
);
242 if (!(val
& PCI_MSIX_FLAGS_ENABLE
))
245 val
&= PCI_MSIX_FLAGS_QSIZE
;
250 static int cdns_pcie_ep_set_msix(struct pci_epc
*epc
, u8 fn
, u16 interrupts
,
251 enum pci_barno bir
, u32 offset
)
253 struct cdns_pcie_ep
*ep
= epc_get_drvdata(epc
);
254 struct cdns_pcie
*pcie
= &ep
->pcie
;
255 u32 cap
= CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET
;
258 reg
= cap
+ PCI_MSIX_FLAGS
;
259 val
= cdns_pcie_ep_fn_readw(pcie
, fn
, reg
);
260 val
&= ~PCI_MSIX_FLAGS_QSIZE
;
262 cdns_pcie_ep_fn_writew(pcie
, fn
, reg
, val
);
264 /* Set MSIX BAR and offset */
265 reg
= cap
+ PCI_MSIX_TABLE
;
267 cdns_pcie_ep_fn_writel(pcie
, fn
, reg
, val
);
269 /* Set PBA BAR and offset. BAR must match MSIX BAR */
270 reg
= cap
+ PCI_MSIX_PBA
;
271 val
= (offset
+ (interrupts
* PCI_MSIX_ENTRY_SIZE
)) | bir
;
272 cdns_pcie_ep_fn_writel(pcie
, fn
, reg
, val
);
277 static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep
*ep
, u8 fn
,
278 u8 intx
, bool is_asserted
)
280 struct cdns_pcie
*pcie
= &ep
->pcie
;
288 /* Set the outbound region if needed. */
289 if (unlikely(ep
->irq_pci_addr
!= CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY
||
290 ep
->irq_pci_fn
!= fn
)) {
291 /* First region was reserved for IRQ writes. */
292 cdns_pcie_set_outbound_region_for_normal_msg(pcie
, 0, fn
, 0,
294 ep
->irq_pci_addr
= CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY
;
299 ep
->irq_pending
|= BIT(intx
);
300 msg_code
= MSG_CODE_ASSERT_INTA
+ intx
;
302 ep
->irq_pending
&= ~BIT(intx
);
303 msg_code
= MSG_CODE_DEASSERT_INTA
+ intx
;
306 spin_lock_irqsave(&ep
->lock
, flags
);
307 status
= cdns_pcie_ep_fn_readw(pcie
, fn
, PCI_STATUS
);
308 if (((status
& PCI_STATUS_INTERRUPT
) != 0) ^ (ep
->irq_pending
!= 0)) {
309 status
^= PCI_STATUS_INTERRUPT
;
310 cdns_pcie_ep_fn_writew(pcie
, fn
, PCI_STATUS
, status
);
312 spin_unlock_irqrestore(&ep
->lock
, flags
);
314 offset
= CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL
) |
315 CDNS_PCIE_NORMAL_MSG_CODE(msg_code
) |
316 CDNS_PCIE_MSG_NO_DATA
;
317 writel(0, ep
->irq_cpu_addr
+ offset
);
320 static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep
*ep
, u8 fn
, u8 intx
)
324 cmd
= cdns_pcie_ep_fn_readw(&ep
->pcie
, fn
, PCI_COMMAND
);
325 if (cmd
& PCI_COMMAND_INTX_DISABLE
)
328 cdns_pcie_ep_assert_intx(ep
, fn
, intx
, true);
330 * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
333 cdns_pcie_ep_assert_intx(ep
, fn
, intx
, false);
337 static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep
*ep
, u8 fn
,
340 struct cdns_pcie
*pcie
= &ep
->pcie
;
341 u32 cap
= CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET
;
342 u16 flags
, mme
, data
, data_mask
;
344 u64 pci_addr
, pci_addr_mask
= 0xff;
346 /* Check whether the MSI feature has been enabled by the PCI host. */
347 flags
= cdns_pcie_ep_fn_readw(pcie
, fn
, cap
+ PCI_MSI_FLAGS
);
348 if (!(flags
& PCI_MSI_FLAGS_ENABLE
))
351 /* Get the number of enabled MSIs */
352 mme
= (flags
& PCI_MSI_FLAGS_QSIZE
) >> 4;
353 msi_count
= 1 << mme
;
354 if (!interrupt_num
|| interrupt_num
> msi_count
)
357 /* Compute the data value to be written. */
358 data_mask
= msi_count
- 1;
359 data
= cdns_pcie_ep_fn_readw(pcie
, fn
, cap
+ PCI_MSI_DATA_64
);
360 data
= (data
& ~data_mask
) | ((interrupt_num
- 1) & data_mask
);
362 /* Get the PCI address where to write the data into. */
363 pci_addr
= cdns_pcie_ep_fn_readl(pcie
, fn
, cap
+ PCI_MSI_ADDRESS_HI
);
365 pci_addr
|= cdns_pcie_ep_fn_readl(pcie
, fn
, cap
+ PCI_MSI_ADDRESS_LO
);
366 pci_addr
&= GENMASK_ULL(63, 2);
368 /* Set the outbound region if needed. */
369 if (unlikely(ep
->irq_pci_addr
!= (pci_addr
& ~pci_addr_mask
) ||
370 ep
->irq_pci_fn
!= fn
)) {
371 /* First region was reserved for IRQ writes. */
372 cdns_pcie_set_outbound_region(pcie
, 0, fn
, 0,
375 pci_addr
& ~pci_addr_mask
,
377 ep
->irq_pci_addr
= (pci_addr
& ~pci_addr_mask
);
380 writel(data
, ep
->irq_cpu_addr
+ (pci_addr
& pci_addr_mask
));
385 static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep
*ep
, u8 fn
,
388 u32 cap
= CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET
;
389 u32 tbl_offset
, msg_data
, reg
;
390 struct cdns_pcie
*pcie
= &ep
->pcie
;
391 struct pci_epf_msix_tbl
*msix_tbl
;
392 struct cdns_pcie_epf
*epf
;
393 u64 pci_addr_mask
= 0xff;
398 /* Check whether the MSI-X feature has been enabled by the PCI host. */
399 flags
= cdns_pcie_ep_fn_readw(pcie
, fn
, cap
+ PCI_MSIX_FLAGS
);
400 if (!(flags
& PCI_MSIX_FLAGS_ENABLE
))
403 reg
= cap
+ PCI_MSIX_TABLE
;
404 tbl_offset
= cdns_pcie_ep_fn_readl(pcie
, fn
, reg
);
405 bir
= tbl_offset
& PCI_MSIX_TABLE_BIR
;
406 tbl_offset
&= PCI_MSIX_TABLE_OFFSET
;
409 msix_tbl
= epf
->epf_bar
[bir
]->addr
+ tbl_offset
;
410 msg_addr
= msix_tbl
[(interrupt_num
- 1)].msg_addr
;
411 msg_data
= msix_tbl
[(interrupt_num
- 1)].msg_data
;
413 /* Set the outbound region if needed. */
414 if (ep
->irq_pci_addr
!= (msg_addr
& ~pci_addr_mask
) ||
415 ep
->irq_pci_fn
!= fn
) {
416 /* First region was reserved for IRQ writes. */
417 cdns_pcie_set_outbound_region(pcie
, 0, fn
, 0,
420 msg_addr
& ~pci_addr_mask
,
422 ep
->irq_pci_addr
= (msg_addr
& ~pci_addr_mask
);
425 writel(msg_data
, ep
->irq_cpu_addr
+ (msg_addr
& pci_addr_mask
));
430 static int cdns_pcie_ep_raise_irq(struct pci_epc
*epc
, u8 fn
,
431 enum pci_epc_irq_type type
,
434 struct cdns_pcie_ep
*ep
= epc_get_drvdata(epc
);
437 case PCI_EPC_IRQ_LEGACY
:
438 return cdns_pcie_ep_send_legacy_irq(ep
, fn
, 0);
440 case PCI_EPC_IRQ_MSI
:
441 return cdns_pcie_ep_send_msi_irq(ep
, fn
, interrupt_num
);
443 case PCI_EPC_IRQ_MSIX
:
444 return cdns_pcie_ep_send_msix_irq(ep
, fn
, interrupt_num
);
453 static int cdns_pcie_ep_start(struct pci_epc
*epc
)
455 struct cdns_pcie_ep
*ep
= epc_get_drvdata(epc
);
456 struct cdns_pcie
*pcie
= &ep
->pcie
;
457 struct device
*dev
= pcie
->dev
;
463 * BIT(0) is hardwired to 1, hence function 0 is always enabled
464 * and can't be disabled anyway.
467 list_for_each_entry(epf
, &epc
->pci_epf
, list
)
468 cfg
|= BIT(epf
->func_no
);
469 cdns_pcie_writel(pcie
, CDNS_PCIE_LM_EP_FUNC_CFG
, cfg
);
471 ret
= cdns_pcie_start_link(pcie
);
473 dev_err(dev
, "Failed to start link\n");
480 static const struct pci_epc_features cdns_pcie_epc_features
= {
481 .linkup_notifier
= false,
483 .msix_capable
= true,
486 static const struct pci_epc_features
*
487 cdns_pcie_ep_get_features(struct pci_epc
*epc
, u8 func_no
)
489 return &cdns_pcie_epc_features
;
492 static const struct pci_epc_ops cdns_pcie_epc_ops
= {
493 .write_header
= cdns_pcie_ep_write_header
,
494 .set_bar
= cdns_pcie_ep_set_bar
,
495 .clear_bar
= cdns_pcie_ep_clear_bar
,
496 .map_addr
= cdns_pcie_ep_map_addr
,
497 .unmap_addr
= cdns_pcie_ep_unmap_addr
,
498 .set_msi
= cdns_pcie_ep_set_msi
,
499 .get_msi
= cdns_pcie_ep_get_msi
,
500 .set_msix
= cdns_pcie_ep_set_msix
,
501 .get_msix
= cdns_pcie_ep_get_msix
,
502 .raise_irq
= cdns_pcie_ep_raise_irq
,
503 .start
= cdns_pcie_ep_start
,
504 .get_features
= cdns_pcie_ep_get_features
,
508 int cdns_pcie_ep_setup(struct cdns_pcie_ep
*ep
)
510 struct device
*dev
= ep
->pcie
.dev
;
511 struct platform_device
*pdev
= to_platform_device(dev
);
512 struct device_node
*np
= dev
->of_node
;
513 struct cdns_pcie
*pcie
= &ep
->pcie
;
514 struct resource
*res
;
520 pcie
->reg_base
= devm_platform_ioremap_resource_byname(pdev
, "reg");
521 if (IS_ERR(pcie
->reg_base
)) {
522 dev_err(dev
, "missing \"reg\"\n");
523 return PTR_ERR(pcie
->reg_base
);
526 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "mem");
528 dev_err(dev
, "missing \"mem\"\n");
533 ep
->max_regions
= CDNS_PCIE_MAX_OB
;
534 of_property_read_u32(np
, "cdns,max-outbound-regions", &ep
->max_regions
);
536 ep
->ob_addr
= devm_kcalloc(dev
,
537 ep
->max_regions
, sizeof(*ep
->ob_addr
),
542 /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
543 cdns_pcie_writel(pcie
, CDNS_PCIE_LM_EP_FUNC_CFG
, BIT(0));
545 epc
= devm_pci_epc_create(dev
, &cdns_pcie_epc_ops
);
547 dev_err(dev
, "failed to create epc device\n");
551 epc_set_drvdata(epc
, ep
);
553 if (of_property_read_u8(np
, "max-functions", &epc
->max_functions
) < 0)
554 epc
->max_functions
= 1;
556 ep
->epf
= devm_kcalloc(dev
, epc
->max_functions
, sizeof(*ep
->epf
),
561 ret
= pci_epc_mem_init(epc
, pcie
->mem_res
->start
,
562 resource_size(pcie
->mem_res
), PAGE_SIZE
);
564 dev_err(dev
, "failed to initialize the memory space\n");
568 ep
->irq_cpu_addr
= pci_epc_mem_alloc_addr(epc
, &ep
->irq_phys_addr
,
570 if (!ep
->irq_cpu_addr
) {
571 dev_err(dev
, "failed to reserve memory space for MSI\n");
575 ep
->irq_pci_addr
= CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE
;
576 /* Reserve region 0 for IRQs */
577 set_bit(0, &ep
->ob_region_map
);
578 spin_lock_init(&ep
->lock
);
583 pci_epc_mem_exit(epc
);