x86/topology: Fix function name in documentation
[cris-mirror.git] / drivers / pci / cadence / pcie-cadence-ep.c
blob3c3a97743453daf0796c2d291e09ac179e7ad07a
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe endpoint controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
6 #include <linux/delay.h>
7 #include <linux/kernel.h>
8 #include <linux/of.h>
9 #include <linux/pci-epc.h>
10 #include <linux/platform_device.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sizes.h>
14 #include "pcie-cadence.h"
16 #define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
17 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
18 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
20 /**
21 * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
22 * @pcie: Cadence PCIe controller
23 * @max_regions: maximum number of regions supported by hardware
24 * @ob_region_map: bitmask of mapped outbound regions
25 * @ob_addr: base addresses in the AXI bus where the outbound regions start
26 * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
27 * dedicated outbound regions is mapped.
28 * @irq_cpu_addr: base address in the CPU space where a write access triggers
29 * the sending of a memory write (MSI) / normal message (legacy
30 * IRQ) TLP through the PCIe bus.
31 * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
32 * dedicated outbound region.
33 * @irq_pci_fn: the latest PCI function that has updated the mapping of
34 * the MSI/legacy IRQ dedicated outbound region.
35 * @irq_pending: bitmask of asserted legacy IRQs.
37 struct cdns_pcie_ep {
38 struct cdns_pcie pcie;
39 u32 max_regions;
40 unsigned long ob_region_map;
41 phys_addr_t *ob_addr;
42 phys_addr_t irq_phys_addr;
43 void __iomem *irq_cpu_addr;
44 u64 irq_pci_addr;
45 u8 irq_pci_fn;
46 u8 irq_pending;
49 static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
50 struct pci_epf_header *hdr)
52 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
53 struct cdns_pcie *pcie = &ep->pcie;
55 cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
56 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
57 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code);
58 cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE,
59 hdr->subclass_code | hdr->baseclass_code << 8);
60 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE,
61 hdr->cache_line_size);
62 cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id);
63 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin);
66 * Vendor ID can only be modified from function 0, all other functions
67 * use the same vendor ID as function 0.
69 if (fn == 0) {
70 /* Update the vendor IDs. */
71 u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) |
72 CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id);
74 cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
77 return 0;
80 static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, enum pci_barno bar,
81 dma_addr_t bar_phys, size_t size, int flags)
83 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
84 struct cdns_pcie *pcie = &ep->pcie;
85 u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
86 u64 sz;
88 /* BAR size is 2^(aperture + 7) */
89 sz = max_t(size_t, size, CDNS_PCIE_EP_MIN_APERTURE);
91 * roundup_pow_of_two() returns an unsigned long, which is not suited
92 * for 64bit values.
94 sz = 1ULL << fls64(sz - 1);
95 aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
97 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
98 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
99 } else {
100 bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
101 bool is_64bits = sz > SZ_2G;
103 if (is_64bits && (bar & 1))
104 return -EINVAL;
106 if (is_64bits && is_prefetch)
107 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
108 else if (is_prefetch)
109 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
110 else if (is_64bits)
111 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS;
112 else
113 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS;
116 addr0 = lower_32_bits(bar_phys);
117 addr1 = upper_32_bits(bar_phys);
118 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
119 addr0);
120 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
121 addr1);
123 if (bar < BAR_4) {
124 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
125 b = bar;
126 } else {
127 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
128 b = bar - BAR_4;
131 cfg = cdns_pcie_readl(pcie, reg);
132 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
133 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
134 cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
135 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
136 cdns_pcie_writel(pcie, reg, cfg);
138 return 0;
141 static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
142 enum pci_barno bar)
144 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
145 struct cdns_pcie *pcie = &ep->pcie;
146 u32 reg, cfg, b, ctrl;
148 if (bar < BAR_4) {
149 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
150 b = bar;
151 } else {
152 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
153 b = bar - BAR_4;
156 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
157 cfg = cdns_pcie_readl(pcie, reg);
158 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
159 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
160 cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
161 cdns_pcie_writel(pcie, reg, cfg);
163 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
164 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
167 static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
168 u64 pci_addr, size_t size)
170 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
171 struct cdns_pcie *pcie = &ep->pcie;
172 u32 r;
174 r = find_first_zero_bit(&ep->ob_region_map,
175 sizeof(ep->ob_region_map) * BITS_PER_LONG);
176 if (r >= ep->max_regions - 1) {
177 dev_err(&epc->dev, "no free outbound region\n");
178 return -EINVAL;
181 cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size);
183 set_bit(r, &ep->ob_region_map);
184 ep->ob_addr[r] = addr;
186 return 0;
189 static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
190 phys_addr_t addr)
192 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
193 struct cdns_pcie *pcie = &ep->pcie;
194 u32 r;
196 for (r = 0; r < ep->max_regions - 1; r++)
197 if (ep->ob_addr[r] == addr)
198 break;
200 if (r == ep->max_regions - 1)
201 return;
203 cdns_pcie_reset_outbound_region(pcie, r);
205 ep->ob_addr[r] = 0;
206 clear_bit(r, &ep->ob_region_map);
209 static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
211 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
212 struct cdns_pcie *pcie = &ep->pcie;
213 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
214 u16 flags;
217 * Set the Multiple Message Capable bitfield into the Message Control
218 * register.
220 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
221 flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1);
222 flags |= PCI_MSI_FLAGS_64BIT;
223 flags &= ~PCI_MSI_FLAGS_MASKBIT;
224 cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags);
226 return 0;
229 static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
231 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
232 struct cdns_pcie *pcie = &ep->pcie;
233 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
234 u16 flags, mmc, mme;
236 /* Validate that the MSI feature is actually enabled. */
237 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
238 if (!(flags & PCI_MSI_FLAGS_ENABLE))
239 return -EINVAL;
242 * Get the Multiple Message Enable bitfield from the Message Control
243 * register.
245 mmc = (flags & PCI_MSI_FLAGS_QMASK) >> 1;
246 mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
248 return mme;
251 static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
252 u8 intx, bool is_asserted)
254 struct cdns_pcie *pcie = &ep->pcie;
255 u32 r = ep->max_regions - 1;
256 u32 offset;
257 u16 status;
258 u8 msg_code;
260 intx &= 3;
262 /* Set the outbound region if needed. */
263 if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
264 ep->irq_pci_fn != fn)) {
265 /* Last region was reserved for IRQ writes. */
266 cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, r,
267 ep->irq_phys_addr);
268 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
269 ep->irq_pci_fn = fn;
272 if (is_asserted) {
273 ep->irq_pending |= BIT(intx);
274 msg_code = MSG_CODE_ASSERT_INTA + intx;
275 } else {
276 ep->irq_pending &= ~BIT(intx);
277 msg_code = MSG_CODE_DEASSERT_INTA + intx;
280 status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS);
281 if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) {
282 status ^= PCI_STATUS_INTERRUPT;
283 cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status);
286 offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
287 CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
288 CDNS_PCIE_MSG_NO_DATA;
289 writel(0, ep->irq_cpu_addr + offset);
292 static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
294 u16 cmd;
296 cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND);
297 if (cmd & PCI_COMMAND_INTX_DISABLE)
298 return -EINVAL;
300 cdns_pcie_ep_assert_intx(ep, fn, intx, true);
302 * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
303 * from drivers/pci/dwc/pci-dra7xx.c
305 mdelay(1);
306 cdns_pcie_ep_assert_intx(ep, fn, intx, false);
307 return 0;
310 static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
311 u8 interrupt_num)
313 struct cdns_pcie *pcie = &ep->pcie;
314 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
315 u16 flags, mme, data, data_mask;
316 u8 msi_count;
317 u64 pci_addr, pci_addr_mask = 0xff;
319 /* Check whether the MSI feature has been enabled by the PCI host. */
320 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
321 if (!(flags & PCI_MSI_FLAGS_ENABLE))
322 return -EINVAL;
324 /* Get the number of enabled MSIs */
325 mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
326 msi_count = 1 << mme;
327 if (!interrupt_num || interrupt_num > msi_count)
328 return -EINVAL;
330 /* Compute the data value to be written. */
331 data_mask = msi_count - 1;
332 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
333 data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
335 /* Get the PCI address where to write the data into. */
336 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
337 pci_addr <<= 32;
338 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
339 pci_addr &= GENMASK_ULL(63, 2);
341 /* Set the outbound region if needed. */
342 if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
343 ep->irq_pci_fn != fn)) {
344 /* Last region was reserved for IRQ writes. */
345 cdns_pcie_set_outbound_region(pcie, fn, ep->max_regions - 1,
346 false,
347 ep->irq_phys_addr,
348 pci_addr & ~pci_addr_mask,
349 pci_addr_mask + 1);
350 ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
351 ep->irq_pci_fn = fn;
353 writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
355 return 0;
358 static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
359 enum pci_epc_irq_type type, u8 interrupt_num)
361 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
363 switch (type) {
364 case PCI_EPC_IRQ_LEGACY:
365 return cdns_pcie_ep_send_legacy_irq(ep, fn, 0);
367 case PCI_EPC_IRQ_MSI:
368 return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
370 default:
371 break;
374 return -EINVAL;
377 static int cdns_pcie_ep_start(struct pci_epc *epc)
379 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
380 struct cdns_pcie *pcie = &ep->pcie;
381 struct pci_epf *epf;
382 u32 cfg;
385 * BIT(0) is hardwired to 1, hence function 0 is always enabled
386 * and can't be disabled anyway.
388 cfg = BIT(0);
389 list_for_each_entry(epf, &epc->pci_epf, list)
390 cfg |= BIT(epf->func_no);
391 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg);
394 * The PCIe links are automatically established by the controller
395 * once for all at powerup: the software can neither start nor stop
396 * those links later at runtime.
398 * Then we only have to notify the EP core that our links are already
399 * established. However we don't call directly pci_epc_linkup() because
400 * we've already locked the epc->lock.
402 list_for_each_entry(epf, &epc->pci_epf, list)
403 pci_epf_linkup(epf);
405 return 0;
408 static const struct pci_epc_ops cdns_pcie_epc_ops = {
409 .write_header = cdns_pcie_ep_write_header,
410 .set_bar = cdns_pcie_ep_set_bar,
411 .clear_bar = cdns_pcie_ep_clear_bar,
412 .map_addr = cdns_pcie_ep_map_addr,
413 .unmap_addr = cdns_pcie_ep_unmap_addr,
414 .set_msi = cdns_pcie_ep_set_msi,
415 .get_msi = cdns_pcie_ep_get_msi,
416 .raise_irq = cdns_pcie_ep_raise_irq,
417 .start = cdns_pcie_ep_start,
420 static const struct of_device_id cdns_pcie_ep_of_match[] = {
421 { .compatible = "cdns,cdns-pcie-ep" },
423 { },
426 static int cdns_pcie_ep_probe(struct platform_device *pdev)
428 struct device *dev = &pdev->dev;
429 struct device_node *np = dev->of_node;
430 struct cdns_pcie_ep *ep;
431 struct cdns_pcie *pcie;
432 struct pci_epc *epc;
433 struct resource *res;
434 int ret;
436 ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
437 if (!ep)
438 return -ENOMEM;
440 pcie = &ep->pcie;
441 pcie->is_rc = false;
443 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
444 pcie->reg_base = devm_ioremap_resource(dev, res);
445 if (IS_ERR(pcie->reg_base)) {
446 dev_err(dev, "missing \"reg\"\n");
447 return PTR_ERR(pcie->reg_base);
450 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
451 if (!res) {
452 dev_err(dev, "missing \"mem\"\n");
453 return -EINVAL;
455 pcie->mem_res = res;
457 ret = of_property_read_u32(np, "cdns,max-outbound-regions",
458 &ep->max_regions);
459 if (ret < 0) {
460 dev_err(dev, "missing \"cdns,max-outbound-regions\"\n");
461 return ret;
463 ep->ob_addr = devm_kzalloc(dev, ep->max_regions * sizeof(*ep->ob_addr),
464 GFP_KERNEL);
465 if (!ep->ob_addr)
466 return -ENOMEM;
468 pm_runtime_enable(dev);
469 ret = pm_runtime_get_sync(dev);
470 if (ret < 0) {
471 dev_err(dev, "pm_runtime_get_sync() failed\n");
472 goto err_get_sync;
475 /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
476 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
478 epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops);
479 if (IS_ERR(epc)) {
480 dev_err(dev, "failed to create epc device\n");
481 ret = PTR_ERR(epc);
482 goto err_init;
485 epc_set_drvdata(epc, ep);
487 if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0)
488 epc->max_functions = 1;
490 ret = pci_epc_mem_init(epc, pcie->mem_res->start,
491 resource_size(pcie->mem_res));
492 if (ret < 0) {
493 dev_err(dev, "failed to initialize the memory space\n");
494 goto err_init;
497 ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
498 SZ_128K);
499 if (!ep->irq_cpu_addr) {
500 dev_err(dev, "failed to reserve memory space for MSI\n");
501 ret = -ENOMEM;
502 goto free_epc_mem;
504 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
506 return 0;
508 free_epc_mem:
509 pci_epc_mem_exit(epc);
511 err_init:
512 pm_runtime_put_sync(dev);
514 err_get_sync:
515 pm_runtime_disable(dev);
517 return ret;
520 static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
522 struct device *dev = &pdev->dev;
523 int ret;
525 ret = pm_runtime_put_sync(dev);
526 if (ret < 0)
527 dev_dbg(dev, "pm_runtime_put_sync failed\n");
529 pm_runtime_disable(dev);
531 /* The PCIe controller can't be disabled. */
534 static struct platform_driver cdns_pcie_ep_driver = {
535 .driver = {
536 .name = "cdns-pcie-ep",
537 .of_match_table = cdns_pcie_ep_of_match,
539 .probe = cdns_pcie_ep_probe,
540 .shutdown = cdns_pcie_ep_shutdown,
542 builtin_platform_driver(cdns_pcie_ep_driver);