printf: Remove unused 'bprintf'
[drm/drm-misc.git] / drivers / pci / controller / pcie-rockchip-ep.c
blob1064b7b06cef64f0848d9b22d4edce5994d8f054
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Rockchip AXI PCIe endpoint controller driver
5 * Copyright (c) 2018 Rockchip, Inc.
7 * Author: Shawn Lin <shawn.lin@rock-chips.com>
8 * Simon Xue <xxm@rock-chips.com>
9 */
11 #include <linux/configfs.h>
12 #include <linux/delay.h>
13 #include <linux/gpio/consumer.h>
14 #include <linux/iopoll.h>
15 #include <linux/kernel.h>
16 #include <linux/irq.h>
17 #include <linux/of.h>
18 #include <linux/pci-epc.h>
19 #include <linux/platform_device.h>
20 #include <linux/pci-epf.h>
21 #include <linux/sizes.h>
22 #include <linux/workqueue.h>
24 #include "pcie-rockchip.h"
26 /**
27 * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver
28 * @rockchip: Rockchip PCIe controller
29 * @epc: PCI EPC device
30 * @max_regions: maximum number of regions supported by hardware
31 * @ob_region_map: bitmask of mapped outbound regions
32 * @ob_addr: base addresses in the AXI bus where the outbound regions start
33 * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ
34 * dedicated outbound regions is mapped.
35 * @irq_cpu_addr: base address in the CPU space where a write access triggers
36 * the sending of a memory write (MSI) / normal message (INTX
37 * IRQ) TLP through the PCIe bus.
38 * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ
39 * dedicated outbound region.
40 * @irq_pci_fn: the latest PCI function that has updated the mapping of
41 * the MSI/INTX IRQ dedicated outbound region.
42 * @irq_pending: bitmask of asserted INTX IRQs.
44 struct rockchip_pcie_ep {
45 struct rockchip_pcie rockchip;
46 struct pci_epc *epc;
47 u32 max_regions;
48 unsigned long ob_region_map;
49 phys_addr_t *ob_addr;
50 phys_addr_t irq_phys_addr;
51 void __iomem *irq_cpu_addr;
52 u64 irq_pci_addr;
53 u8 irq_pci_fn;
54 u8 irq_pending;
55 int perst_irq;
56 bool perst_asserted;
57 bool link_up;
58 struct delayed_work link_training;
61 static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
62 u32 region)
64 rockchip_pcie_write(rockchip, 0,
65 ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(region));
66 rockchip_pcie_write(rockchip, 0,
67 ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(region));
68 rockchip_pcie_write(rockchip, 0,
69 ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region));
70 rockchip_pcie_write(rockchip, 0,
71 ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
74 static int rockchip_pcie_ep_ob_atu_num_bits(struct rockchip_pcie *rockchip,
75 u64 pci_addr, size_t size)
77 int num_pass_bits = fls64(pci_addr ^ (pci_addr + size - 1));
79 return clamp(num_pass_bits,
80 ROCKCHIP_PCIE_AT_MIN_NUM_BITS,
81 ROCKCHIP_PCIE_AT_MAX_NUM_BITS);
84 static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
85 u32 r, u64 cpu_addr, u64 pci_addr,
86 size_t size)
88 int num_pass_bits;
89 u32 addr0, addr1, desc0;
91 num_pass_bits = rockchip_pcie_ep_ob_atu_num_bits(rockchip,
92 pci_addr, size);
94 addr0 = ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
95 (lower_32_bits(pci_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
96 addr1 = upper_32_bits(pci_addr);
97 desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | AXI_WRAPPER_MEM_WRITE;
99 /* PCI bus address region */
100 rockchip_pcie_write(rockchip, addr0,
101 ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
102 rockchip_pcie_write(rockchip, addr1,
103 ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
104 rockchip_pcie_write(rockchip, desc0,
105 ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
106 rockchip_pcie_write(rockchip, 0,
107 ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
110 static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
111 struct pci_epf_header *hdr)
113 u32 reg;
114 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
115 struct rockchip_pcie *rockchip = &ep->rockchip;
117 /* All functions share the same vendor ID with function 0 */
118 if (fn == 0) {
119 rockchip_pcie_write(rockchip,
120 hdr->vendorid | hdr->subsys_vendor_id << 16,
121 PCIE_CORE_CONFIG_VENDOR);
124 reg = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_DID_VID);
125 reg = (reg & 0xFFFF) | (hdr->deviceid << 16);
126 rockchip_pcie_write(rockchip, reg, PCIE_EP_CONFIG_DID_VID);
128 rockchip_pcie_write(rockchip,
129 hdr->revid |
130 hdr->progif_code << 8 |
131 hdr->subclass_code << 16 |
132 hdr->baseclass_code << 24,
133 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_REVISION_ID);
134 rockchip_pcie_write(rockchip, hdr->cache_line_size,
135 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
136 PCI_CACHE_LINE_SIZE);
137 rockchip_pcie_write(rockchip, hdr->subsys_id << 16,
138 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
139 PCI_SUBSYSTEM_VENDOR_ID);
140 rockchip_pcie_write(rockchip, hdr->interrupt_pin << 8,
141 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
142 PCI_INTERRUPT_LINE);
144 return 0;
147 static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
148 struct pci_epf_bar *epf_bar)
150 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
151 struct rockchip_pcie *rockchip = &ep->rockchip;
152 dma_addr_t bar_phys = epf_bar->phys_addr;
153 enum pci_barno bar = epf_bar->barno;
154 int flags = epf_bar->flags;
155 u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
156 u64 sz;
158 /* BAR size is 2^(aperture + 7) */
159 sz = max_t(size_t, epf_bar->size, MIN_EP_APERTURE);
162 * roundup_pow_of_two() returns an unsigned long, which is not suited
163 * for 64bit values.
165 sz = 1ULL << fls64(sz - 1);
166 aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
168 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
169 ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
170 } else {
171 bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
172 bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
174 if (is_64bits && (bar & 1))
175 return -EINVAL;
177 if (is_64bits && is_prefetch)
178 ctrl =
179 ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
180 else if (is_prefetch)
181 ctrl =
182 ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
183 else if (is_64bits)
184 ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS;
185 else
186 ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS;
189 if (bar < BAR_4) {
190 reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
191 b = bar;
192 } else {
193 reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
194 b = bar - BAR_4;
197 addr0 = lower_32_bits(bar_phys);
198 addr1 = upper_32_bits(bar_phys);
200 cfg = rockchip_pcie_read(rockchip, reg);
201 cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
202 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
203 cfg |= (ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
204 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
206 rockchip_pcie_write(rockchip, cfg, reg);
207 rockchip_pcie_write(rockchip, addr0,
208 ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
209 rockchip_pcie_write(rockchip, addr1,
210 ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
212 return 0;
215 static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
216 struct pci_epf_bar *epf_bar)
218 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
219 struct rockchip_pcie *rockchip = &ep->rockchip;
220 u32 reg, cfg, b, ctrl;
221 enum pci_barno bar = epf_bar->barno;
223 if (bar < BAR_4) {
224 reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
225 b = bar;
226 } else {
227 reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
228 b = bar - BAR_4;
231 ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED;
232 cfg = rockchip_pcie_read(rockchip, reg);
233 cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
234 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
235 cfg |= ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
237 rockchip_pcie_write(rockchip, cfg, reg);
238 rockchip_pcie_write(rockchip, 0x0,
239 ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
240 rockchip_pcie_write(rockchip, 0x0,
241 ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
244 static inline u32 rockchip_ob_region(phys_addr_t addr)
246 return (addr >> ilog2(SZ_1M)) & 0x1f;
249 static u64 rockchip_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr,
250 size_t *pci_size, size_t *addr_offset)
252 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
253 size_t size = *pci_size;
254 u64 offset, mask;
255 int num_bits;
257 num_bits = rockchip_pcie_ep_ob_atu_num_bits(&ep->rockchip,
258 pci_addr, size);
259 mask = (1ULL << num_bits) - 1;
261 offset = pci_addr & mask;
262 if (size + offset > SZ_1M)
263 size = SZ_1M - offset;
265 *pci_size = ALIGN(offset + size, ROCKCHIP_PCIE_AT_SIZE_ALIGN);
266 *addr_offset = offset;
268 return pci_addr & ~mask;
271 static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
272 phys_addr_t addr, u64 pci_addr,
273 size_t size)
275 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
276 struct rockchip_pcie *pcie = &ep->rockchip;
277 u32 r = rockchip_ob_region(addr);
279 if (test_bit(r, &ep->ob_region_map))
280 return -EBUSY;
282 rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, addr, pci_addr, size);
284 set_bit(r, &ep->ob_region_map);
285 ep->ob_addr[r] = addr;
287 return 0;
290 static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
291 phys_addr_t addr)
293 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
294 struct rockchip_pcie *rockchip = &ep->rockchip;
295 u32 r = rockchip_ob_region(addr);
297 if (addr != ep->ob_addr[r] || !test_bit(r, &ep->ob_region_map))
298 return;
300 rockchip_pcie_clear_ep_ob_atu(rockchip, r);
302 ep->ob_addr[r] = 0;
303 clear_bit(r, &ep->ob_region_map);
306 static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
307 u8 multi_msg_cap)
309 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
310 struct rockchip_pcie *rockchip = &ep->rockchip;
311 u32 flags;
313 flags = rockchip_pcie_read(rockchip,
314 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
315 ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
316 flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
317 flags |=
318 (multi_msg_cap << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
319 (PCI_MSI_FLAGS_64BIT << ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET);
320 flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
321 rockchip_pcie_write(rockchip, flags,
322 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
323 ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
324 return 0;
327 static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
329 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
330 struct rockchip_pcie *rockchip = &ep->rockchip;
331 u32 flags;
333 flags = rockchip_pcie_read(rockchip,
334 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
335 ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
336 if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
337 return -EINVAL;
339 return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
340 ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
343 static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
344 u8 intx, bool do_assert)
346 struct rockchip_pcie *rockchip = &ep->rockchip;
348 intx &= 3;
350 if (do_assert) {
351 ep->irq_pending |= BIT(intx);
352 rockchip_pcie_write(rockchip,
353 PCIE_CLIENT_INT_IN_ASSERT |
354 PCIE_CLIENT_INT_PEND_ST_PEND,
355 PCIE_CLIENT_LEGACY_INT_CTRL);
356 } else {
357 ep->irq_pending &= ~BIT(intx);
358 rockchip_pcie_write(rockchip,
359 PCIE_CLIENT_INT_IN_DEASSERT |
360 PCIE_CLIENT_INT_PEND_ST_NORMAL,
361 PCIE_CLIENT_LEGACY_INT_CTRL);
365 static int rockchip_pcie_ep_send_intx_irq(struct rockchip_pcie_ep *ep, u8 fn,
366 u8 intx)
368 u16 cmd;
370 cmd = rockchip_pcie_read(&ep->rockchip,
371 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
372 ROCKCHIP_PCIE_EP_CMD_STATUS);
374 if (cmd & PCI_COMMAND_INTX_DISABLE)
375 return -EINVAL;
378 * Should add some delay between toggling INTx per TRM vaguely saying
379 * it depends on some cycles of the AHB bus clock to function it. So
380 * add sufficient 1ms here.
382 rockchip_pcie_ep_assert_intx(ep, fn, intx, true);
383 mdelay(1);
384 rockchip_pcie_ep_assert_intx(ep, fn, intx, false);
385 return 0;
388 static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
389 u8 interrupt_num)
391 struct rockchip_pcie *rockchip = &ep->rockchip;
392 u32 flags, mme, data, data_mask;
393 size_t irq_pci_size, offset;
394 u64 irq_pci_addr;
395 u8 msi_count;
396 u64 pci_addr;
398 /* Check MSI enable bit */
399 flags = rockchip_pcie_read(&ep->rockchip,
400 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
401 ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
402 if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
403 return -EINVAL;
405 /* Get MSI numbers from MME */
406 mme = ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
407 ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
408 msi_count = 1 << mme;
409 if (!interrupt_num || interrupt_num > msi_count)
410 return -EINVAL;
412 /* Set MSI private data */
413 data_mask = msi_count - 1;
414 data = rockchip_pcie_read(rockchip,
415 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
416 ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
417 PCI_MSI_DATA_64);
418 data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
420 /* Get MSI PCI address */
421 pci_addr = rockchip_pcie_read(rockchip,
422 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
423 ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
424 PCI_MSI_ADDRESS_HI);
425 pci_addr <<= 32;
426 pci_addr |= rockchip_pcie_read(rockchip,
427 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
428 ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
429 PCI_MSI_ADDRESS_LO);
431 /* Set the outbound region if needed. */
432 irq_pci_size = ~PCIE_ADDR_MASK + 1;
433 irq_pci_addr = rockchip_pcie_ep_align_addr(ep->epc,
434 pci_addr & PCIE_ADDR_MASK,
435 &irq_pci_size, &offset);
436 if (unlikely(ep->irq_pci_addr != irq_pci_addr ||
437 ep->irq_pci_fn != fn)) {
438 rockchip_pcie_prog_ep_ob_atu(rockchip, fn,
439 rockchip_ob_region(ep->irq_phys_addr),
440 ep->irq_phys_addr,
441 irq_pci_addr, irq_pci_size);
442 ep->irq_pci_addr = irq_pci_addr;
443 ep->irq_pci_fn = fn;
446 writew(data, ep->irq_cpu_addr + offset + (pci_addr & ~PCIE_ADDR_MASK));
447 return 0;
450 static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
451 unsigned int type, u16 interrupt_num)
453 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
455 switch (type) {
456 case PCI_IRQ_INTX:
457 return rockchip_pcie_ep_send_intx_irq(ep, fn, 0);
458 case PCI_IRQ_MSI:
459 return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
460 default:
461 return -EINVAL;
465 static int rockchip_pcie_ep_start(struct pci_epc *epc)
467 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
468 struct rockchip_pcie *rockchip = &ep->rockchip;
469 struct pci_epf *epf;
470 u32 cfg;
472 cfg = BIT(0);
473 list_for_each_entry(epf, &epc->pci_epf, list)
474 cfg |= BIT(epf->func_no);
476 rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG);
478 if (rockchip->perst_gpio)
479 enable_irq(ep->perst_irq);
481 /* Enable configuration and start link training */
482 rockchip_pcie_write(rockchip,
483 PCIE_CLIENT_LINK_TRAIN_ENABLE |
484 PCIE_CLIENT_CONF_ENABLE,
485 PCIE_CLIENT_CONFIG);
487 if (!rockchip->perst_gpio)
488 schedule_delayed_work(&ep->link_training, 0);
490 return 0;
493 static void rockchip_pcie_ep_stop(struct pci_epc *epc)
495 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
496 struct rockchip_pcie *rockchip = &ep->rockchip;
498 if (rockchip->perst_gpio) {
499 ep->perst_asserted = true;
500 disable_irq(ep->perst_irq);
503 cancel_delayed_work_sync(&ep->link_training);
505 /* Stop link training and disable configuration */
506 rockchip_pcie_write(rockchip,
507 PCIE_CLIENT_CONF_DISABLE |
508 PCIE_CLIENT_LINK_TRAIN_DISABLE,
509 PCIE_CLIENT_CONFIG);
512 static void rockchip_pcie_ep_retrain_link(struct rockchip_pcie *rockchip)
514 u32 status;
516 status = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_LCS);
517 status |= PCI_EXP_LNKCTL_RL;
518 rockchip_pcie_write(rockchip, status, PCIE_EP_CONFIG_LCS);
521 static bool rockchip_pcie_ep_link_up(struct rockchip_pcie *rockchip)
523 u32 val = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS1);
525 return PCIE_LINK_UP(val);
528 static void rockchip_pcie_ep_link_training(struct work_struct *work)
530 struct rockchip_pcie_ep *ep =
531 container_of(work, struct rockchip_pcie_ep, link_training.work);
532 struct rockchip_pcie *rockchip = &ep->rockchip;
533 struct device *dev = rockchip->dev;
534 u32 val;
535 int ret;
537 /* Enable Gen1 training and wait for its completion */
538 ret = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
539 val, PCIE_LINK_TRAINING_DONE(val), 50,
540 LINK_TRAIN_TIMEOUT);
541 if (ret)
542 goto again;
544 /* Make sure that the link is up */
545 ret = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
546 val, PCIE_LINK_UP(val), 50,
547 LINK_TRAIN_TIMEOUT);
548 if (ret)
549 goto again;
552 * Check the current speed: if gen2 speed was requested and we are not
553 * at gen2 speed yet, retrain again for gen2.
555 val = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
556 if (!PCIE_LINK_IS_GEN2(val) && rockchip->link_gen == 2) {
557 /* Enable retrain for gen2 */
558 rockchip_pcie_ep_retrain_link(rockchip);
559 readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
560 val, PCIE_LINK_IS_GEN2(val), 50,
561 LINK_TRAIN_TIMEOUT);
564 /* Check again that the link is up */
565 if (!rockchip_pcie_ep_link_up(rockchip))
566 goto again;
569 * If PERST# was asserted while polling the link, do not notify
570 * the function.
572 if (ep->perst_asserted)
573 return;
575 val = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS0);
576 dev_info(dev,
577 "link up (negotiated speed: %sGT/s, width: x%lu)\n",
578 (val & PCIE_CLIENT_NEG_LINK_SPEED) ? "5" : "2.5",
579 ((val & PCIE_CLIENT_NEG_LINK_WIDTH_MASK) >>
580 PCIE_CLIENT_NEG_LINK_WIDTH_SHIFT) << 1);
582 /* Notify the function */
583 pci_epc_linkup(ep->epc);
584 ep->link_up = true;
586 return;
588 again:
589 schedule_delayed_work(&ep->link_training, msecs_to_jiffies(5));
592 static void rockchip_pcie_ep_perst_assert(struct rockchip_pcie_ep *ep)
594 struct rockchip_pcie *rockchip = &ep->rockchip;
596 dev_dbg(rockchip->dev, "PERST# asserted, link down\n");
598 if (ep->perst_asserted)
599 return;
601 ep->perst_asserted = true;
603 cancel_delayed_work_sync(&ep->link_training);
605 if (ep->link_up) {
606 pci_epc_linkdown(ep->epc);
607 ep->link_up = false;
611 static void rockchip_pcie_ep_perst_deassert(struct rockchip_pcie_ep *ep)
613 struct rockchip_pcie *rockchip = &ep->rockchip;
615 dev_dbg(rockchip->dev, "PERST# de-asserted, starting link training\n");
617 if (!ep->perst_asserted)
618 return;
620 ep->perst_asserted = false;
622 /* Enable link re-training */
623 rockchip_pcie_ep_retrain_link(rockchip);
625 /* Start link training */
626 schedule_delayed_work(&ep->link_training, 0);
629 static irqreturn_t rockchip_pcie_ep_perst_irq_thread(int irq, void *data)
631 struct pci_epc *epc = data;
632 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
633 struct rockchip_pcie *rockchip = &ep->rockchip;
634 u32 perst = gpiod_get_value(rockchip->perst_gpio);
636 if (perst)
637 rockchip_pcie_ep_perst_assert(ep);
638 else
639 rockchip_pcie_ep_perst_deassert(ep);
641 irq_set_irq_type(ep->perst_irq,
642 (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW));
644 return IRQ_HANDLED;
647 static int rockchip_pcie_ep_setup_irq(struct pci_epc *epc)
649 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
650 struct rockchip_pcie *rockchip = &ep->rockchip;
651 struct device *dev = rockchip->dev;
652 int ret;
654 if (!rockchip->perst_gpio)
655 return 0;
657 /* PCIe reset interrupt */
658 ep->perst_irq = gpiod_to_irq(rockchip->perst_gpio);
659 if (ep->perst_irq < 0) {
660 dev_err(dev,
661 "failed to get IRQ for PERST# GPIO: %d\n",
662 ep->perst_irq);
664 return ep->perst_irq;
668 * The perst_gpio is active low, so when it is inactive on start, it
669 * is high and will trigger the perst_irq handler. So treat this initial
670 * IRQ as a dummy one by faking the host asserting PERST#.
672 ep->perst_asserted = true;
673 irq_set_status_flags(ep->perst_irq, IRQ_NOAUTOEN);
674 ret = devm_request_threaded_irq(dev, ep->perst_irq, NULL,
675 rockchip_pcie_ep_perst_irq_thread,
676 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
677 "pcie-ep-perst", epc);
678 if (ret) {
679 dev_err(dev,
680 "failed to request IRQ for PERST# GPIO: %d\n",
681 ret);
683 return ret;
686 return 0;
689 static const struct pci_epc_features rockchip_pcie_epc_features = {
690 .linkup_notifier = true,
691 .msi_capable = true,
692 .msix_capable = false,
693 .align = ROCKCHIP_PCIE_AT_SIZE_ALIGN,
696 static const struct pci_epc_features*
697 rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
699 return &rockchip_pcie_epc_features;
702 static const struct pci_epc_ops rockchip_pcie_epc_ops = {
703 .write_header = rockchip_pcie_ep_write_header,
704 .set_bar = rockchip_pcie_ep_set_bar,
705 .clear_bar = rockchip_pcie_ep_clear_bar,
706 .align_addr = rockchip_pcie_ep_align_addr,
707 .map_addr = rockchip_pcie_ep_map_addr,
708 .unmap_addr = rockchip_pcie_ep_unmap_addr,
709 .set_msi = rockchip_pcie_ep_set_msi,
710 .get_msi = rockchip_pcie_ep_get_msi,
711 .raise_irq = rockchip_pcie_ep_raise_irq,
712 .start = rockchip_pcie_ep_start,
713 .stop = rockchip_pcie_ep_stop,
714 .get_features = rockchip_pcie_ep_get_features,
717 static int rockchip_pcie_ep_get_resources(struct rockchip_pcie *rockchip,
718 struct rockchip_pcie_ep *ep)
720 struct device *dev = rockchip->dev;
721 int err;
723 err = rockchip_pcie_parse_dt(rockchip);
724 if (err)
725 return err;
727 err = rockchip_pcie_get_phys(rockchip);
728 if (err)
729 return err;
731 err = of_property_read_u32(dev->of_node,
732 "rockchip,max-outbound-regions",
733 &ep->max_regions);
734 if (err < 0 || ep->max_regions > MAX_REGION_LIMIT)
735 ep->max_regions = MAX_REGION_LIMIT;
737 ep->ob_region_map = 0;
739 err = of_property_read_u8(dev->of_node, "max-functions",
740 &ep->epc->max_functions);
741 if (err < 0)
742 ep->epc->max_functions = 1;
744 return 0;
747 static const struct of_device_id rockchip_pcie_ep_of_match[] = {
748 { .compatible = "rockchip,rk3399-pcie-ep"},
752 static int rockchip_pcie_ep_init_ob_mem(struct rockchip_pcie_ep *ep)
754 struct rockchip_pcie *rockchip = &ep->rockchip;
755 struct device *dev = rockchip->dev;
756 struct pci_epc_mem_window *windows = NULL;
757 int err, i;
759 ep->ob_addr = devm_kcalloc(dev, ep->max_regions, sizeof(*ep->ob_addr),
760 GFP_KERNEL);
762 if (!ep->ob_addr)
763 return -ENOMEM;
765 windows = devm_kcalloc(dev, ep->max_regions,
766 sizeof(struct pci_epc_mem_window), GFP_KERNEL);
767 if (!windows)
768 return -ENOMEM;
770 for (i = 0; i < ep->max_regions; i++) {
771 windows[i].phys_base = rockchip->mem_res->start + (SZ_1M * i);
772 windows[i].size = SZ_1M;
773 windows[i].page_size = SZ_1M;
775 err = pci_epc_multi_mem_init(ep->epc, windows, ep->max_regions);
776 devm_kfree(dev, windows);
778 if (err < 0) {
779 dev_err(dev, "failed to initialize the memory space\n");
780 return err;
783 ep->irq_cpu_addr = pci_epc_mem_alloc_addr(ep->epc, &ep->irq_phys_addr,
784 SZ_1M);
785 if (!ep->irq_cpu_addr) {
786 dev_err(dev, "failed to reserve memory space for MSI\n");
787 goto err_epc_mem_exit;
790 ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
792 return 0;
794 err_epc_mem_exit:
795 pci_epc_mem_exit(ep->epc);
797 return err;
800 static void rockchip_pcie_ep_exit_ob_mem(struct rockchip_pcie_ep *ep)
802 pci_epc_mem_exit(ep->epc);
805 static void rockchip_pcie_ep_hide_broken_msix_cap(struct rockchip_pcie *rockchip)
807 u32 cfg_msi, cfg_msix_cp;
810 * MSI-X is not supported but the controller still advertises the MSI-X
811 * capability by default, which can lead to the Root Complex side
812 * allocating MSI-X vectors which cannot be used. Avoid this by skipping
813 * the MSI-X capability entry in the PCIe capabilities linked-list: get
814 * the next pointer from the MSI-X entry and set that in the MSI
815 * capability entry (which is the previous entry). This way the MSI-X
816 * entry is skipped (left out of the linked-list) and not advertised.
818 cfg_msi = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE +
819 ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
821 cfg_msi &= ~ROCKCHIP_PCIE_EP_MSI_CP1_MASK;
823 cfg_msix_cp = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE +
824 ROCKCHIP_PCIE_EP_MSIX_CAP_REG) &
825 ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK;
827 cfg_msi |= cfg_msix_cp;
829 rockchip_pcie_write(rockchip, cfg_msi,
830 PCIE_EP_CONFIG_BASE + ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
833 static int rockchip_pcie_ep_probe(struct platform_device *pdev)
835 struct device *dev = &pdev->dev;
836 struct rockchip_pcie_ep *ep;
837 struct rockchip_pcie *rockchip;
838 struct pci_epc *epc;
839 int err;
841 ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
842 if (!ep)
843 return -ENOMEM;
845 rockchip = &ep->rockchip;
846 rockchip->is_rc = false;
847 rockchip->dev = dev;
848 INIT_DELAYED_WORK(&ep->link_training, rockchip_pcie_ep_link_training);
850 epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
851 if (IS_ERR(epc)) {
852 dev_err(dev, "failed to create EPC device\n");
853 return PTR_ERR(epc);
856 ep->epc = epc;
857 epc_set_drvdata(epc, ep);
859 err = rockchip_pcie_ep_get_resources(rockchip, ep);
860 if (err)
861 return err;
863 err = rockchip_pcie_ep_init_ob_mem(ep);
864 if (err)
865 return err;
867 err = rockchip_pcie_enable_clocks(rockchip);
868 if (err)
869 goto err_exit_ob_mem;
871 err = rockchip_pcie_init_port(rockchip);
872 if (err)
873 goto err_disable_clocks;
875 rockchip_pcie_ep_hide_broken_msix_cap(rockchip);
877 /* Only enable function 0 by default */
878 rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
880 pci_epc_init_notify(epc);
882 err = rockchip_pcie_ep_setup_irq(epc);
883 if (err < 0)
884 goto err_uninit_port;
886 return 0;
887 err_uninit_port:
888 rockchip_pcie_deinit_phys(rockchip);
889 err_disable_clocks:
890 rockchip_pcie_disable_clocks(rockchip);
891 err_exit_ob_mem:
892 rockchip_pcie_ep_exit_ob_mem(ep);
893 return err;
896 static struct platform_driver rockchip_pcie_ep_driver = {
897 .driver = {
898 .name = "rockchip-pcie-ep",
899 .of_match_table = rockchip_pcie_ep_of_match,
901 .probe = rockchip_pcie_ep_probe,
904 builtin_platform_driver(rockchip_pcie_ep_driver);