1 // SPDX-License-Identifier: GPL-2.0
3 * Microsemi Switchtec(tm) PCIe Management Driver
4 * Copyright (c) 2019, Logan Gunthorpe <logang@deltatee.com>
5 * Copyright (c) 2019, GigaIO Networks, Inc
10 #include <linux/circ_buf.h>
11 #include <linux/dmaengine.h>
12 #include <linux/kref.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
17 MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine");
18 MODULE_VERSION("0.1");
19 MODULE_LICENSE("GPL");
20 MODULE_AUTHOR("Logan Gunthorpe");
22 #define PLX_REG_DESC_RING_ADDR 0x214
23 #define PLX_REG_DESC_RING_ADDR_HI 0x218
24 #define PLX_REG_DESC_RING_NEXT_ADDR 0x21C
25 #define PLX_REG_DESC_RING_COUNT 0x220
26 #define PLX_REG_DESC_RING_LAST_ADDR 0x224
27 #define PLX_REG_DESC_RING_LAST_SIZE 0x228
28 #define PLX_REG_PREF_LIMIT 0x234
29 #define PLX_REG_CTRL 0x238
30 #define PLX_REG_CTRL2 0x23A
31 #define PLX_REG_INTR_CTRL 0x23C
32 #define PLX_REG_INTR_STATUS 0x23E
34 #define PLX_REG_PREF_LIMIT_PREF_FOUR 8
36 #define PLX_REG_CTRL_GRACEFUL_PAUSE BIT(0)
37 #define PLX_REG_CTRL_ABORT BIT(1)
38 #define PLX_REG_CTRL_WRITE_BACK_EN BIT(2)
39 #define PLX_REG_CTRL_START BIT(3)
40 #define PLX_REG_CTRL_RING_STOP_MODE BIT(4)
41 #define PLX_REG_CTRL_DESC_MODE_BLOCK (0 << 5)
42 #define PLX_REG_CTRL_DESC_MODE_ON_CHIP (1 << 5)
43 #define PLX_REG_CTRL_DESC_MODE_OFF_CHIP (2 << 5)
44 #define PLX_REG_CTRL_DESC_INVALID BIT(8)
45 #define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE BIT(9)
46 #define PLX_REG_CTRL_ABORT_DONE BIT(10)
47 #define PLX_REG_CTRL_IMM_PAUSE_DONE BIT(12)
48 #define PLX_REG_CTRL_IN_PROGRESS BIT(30)
50 #define PLX_REG_CTRL_RESET_VAL (PLX_REG_CTRL_DESC_INVALID | \
51 PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \
52 PLX_REG_CTRL_ABORT_DONE | \
53 PLX_REG_CTRL_IMM_PAUSE_DONE)
55 #define PLX_REG_CTRL_START_VAL (PLX_REG_CTRL_WRITE_BACK_EN | \
56 PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \
57 PLX_REG_CTRL_START | \
58 PLX_REG_CTRL_RESET_VAL)
60 #define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B 0
61 #define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B 1
62 #define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B 2
63 #define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B 3
64 #define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB 4
65 #define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB 5
66 #define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B 7
68 #define PLX_REG_INTR_CRTL_ERROR_EN BIT(0)
69 #define PLX_REG_INTR_CRTL_INV_DESC_EN BIT(1)
70 #define PLX_REG_INTR_CRTL_ABORT_DONE_EN BIT(3)
71 #define PLX_REG_INTR_CRTL_PAUSE_DONE_EN BIT(4)
72 #define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN BIT(5)
74 #define PLX_REG_INTR_STATUS_ERROR BIT(0)
75 #define PLX_REG_INTR_STATUS_INV_DESC BIT(1)
76 #define PLX_REG_INTR_STATUS_DESC_DONE BIT(2)
77 #define PLX_REG_INTR_CRTL_ABORT_DONE BIT(3)
79 struct plx_dma_hw_std_desc
{
80 __le32 flags_and_size
;
87 #define PLX_DESC_SIZE_MASK 0x7ffffff
88 #define PLX_DESC_FLAG_VALID BIT(31)
89 #define PLX_DESC_FLAG_INT_WHEN_DONE BIT(30)
91 #define PLX_DESC_WB_SUCCESS BIT(30)
92 #define PLX_DESC_WB_RD_FAIL BIT(29)
93 #define PLX_DESC_WB_WR_FAIL BIT(28)
95 #define PLX_DMA_RING_COUNT 2048
98 struct dma_async_tx_descriptor txd
;
99 struct plx_dma_hw_std_desc
*hw
;
104 struct dma_device dma_dev
;
105 struct dma_chan dma_chan
;
106 struct pci_dev __rcu
*pdev
;
108 struct tasklet_struct desc_task
;
110 spinlock_t ring_lock
;
114 struct plx_dma_hw_std_desc
*hw_ring
;
115 dma_addr_t hw_ring_dma
;
116 struct plx_dma_desc
**desc_ring
;
119 static struct plx_dma_dev
*chan_to_plx_dma_dev(struct dma_chan
*c
)
121 return container_of(c
, struct plx_dma_dev
, dma_chan
);
124 static struct plx_dma_desc
*to_plx_desc(struct dma_async_tx_descriptor
*txd
)
126 return container_of(txd
, struct plx_dma_desc
, txd
);
129 static struct plx_dma_desc
*plx_dma_get_desc(struct plx_dma_dev
*plxdev
, int i
)
131 return plxdev
->desc_ring
[i
& (PLX_DMA_RING_COUNT
- 1)];
134 static void plx_dma_process_desc(struct plx_dma_dev
*plxdev
)
136 struct dmaengine_result res
;
137 struct plx_dma_desc
*desc
;
140 spin_lock_bh(&plxdev
->ring_lock
);
142 while (plxdev
->tail
!= plxdev
->head
) {
143 desc
= plx_dma_get_desc(plxdev
, plxdev
->tail
);
145 flags
= le32_to_cpu(READ_ONCE(desc
->hw
->flags_and_size
));
147 if (flags
& PLX_DESC_FLAG_VALID
)
150 res
.residue
= desc
->orig_size
- (flags
& PLX_DESC_SIZE_MASK
);
152 if (flags
& PLX_DESC_WB_SUCCESS
)
153 res
.result
= DMA_TRANS_NOERROR
;
154 else if (flags
& PLX_DESC_WB_WR_FAIL
)
155 res
.result
= DMA_TRANS_WRITE_FAILED
;
157 res
.result
= DMA_TRANS_READ_FAILED
;
159 dma_cookie_complete(&desc
->txd
);
160 dma_descriptor_unmap(&desc
->txd
);
161 dmaengine_desc_get_callback_invoke(&desc
->txd
, &res
);
162 desc
->txd
.callback
= NULL
;
163 desc
->txd
.callback_result
= NULL
;
168 spin_unlock_bh(&plxdev
->ring_lock
);
171 static void plx_dma_abort_desc(struct plx_dma_dev
*plxdev
)
173 struct dmaengine_result res
;
174 struct plx_dma_desc
*desc
;
176 plx_dma_process_desc(plxdev
);
178 spin_lock_bh(&plxdev
->ring_lock
);
180 while (plxdev
->tail
!= plxdev
->head
) {
181 desc
= plx_dma_get_desc(plxdev
, plxdev
->tail
);
183 res
.residue
= desc
->orig_size
;
184 res
.result
= DMA_TRANS_ABORTED
;
186 dma_cookie_complete(&desc
->txd
);
187 dma_descriptor_unmap(&desc
->txd
);
188 dmaengine_desc_get_callback_invoke(&desc
->txd
, &res
);
189 desc
->txd
.callback
= NULL
;
190 desc
->txd
.callback_result
= NULL
;
195 spin_unlock_bh(&plxdev
->ring_lock
);
198 static void __plx_dma_stop(struct plx_dma_dev
*plxdev
)
200 unsigned long timeout
= jiffies
+ msecs_to_jiffies(1000);
203 val
= readl(plxdev
->bar
+ PLX_REG_CTRL
);
204 if (!(val
& ~PLX_REG_CTRL_GRACEFUL_PAUSE
))
207 writel(PLX_REG_CTRL_RESET_VAL
| PLX_REG_CTRL_GRACEFUL_PAUSE
,
208 plxdev
->bar
+ PLX_REG_CTRL
);
210 while (!time_after(jiffies
, timeout
)) {
211 val
= readl(plxdev
->bar
+ PLX_REG_CTRL
);
212 if (val
& PLX_REG_CTRL_GRACEFUL_PAUSE_DONE
)
218 if (!(val
& PLX_REG_CTRL_GRACEFUL_PAUSE_DONE
))
219 dev_err(plxdev
->dma_dev
.dev
,
220 "Timeout waiting for graceful pause!\n");
222 writel(PLX_REG_CTRL_RESET_VAL
| PLX_REG_CTRL_GRACEFUL_PAUSE
,
223 plxdev
->bar
+ PLX_REG_CTRL
);
225 writel(0, plxdev
->bar
+ PLX_REG_DESC_RING_COUNT
);
226 writel(0, plxdev
->bar
+ PLX_REG_DESC_RING_ADDR
);
227 writel(0, plxdev
->bar
+ PLX_REG_DESC_RING_ADDR_HI
);
228 writel(0, plxdev
->bar
+ PLX_REG_DESC_RING_NEXT_ADDR
);
231 static void plx_dma_stop(struct plx_dma_dev
*plxdev
)
234 if (!rcu_dereference(plxdev
->pdev
)) {
239 __plx_dma_stop(plxdev
);
244 static void plx_dma_desc_task(struct tasklet_struct
*t
)
246 struct plx_dma_dev
*plxdev
= from_tasklet(plxdev
, t
, desc_task
);
248 plx_dma_process_desc(plxdev
);
251 static struct dma_async_tx_descriptor
*plx_dma_prep_memcpy(struct dma_chan
*c
,
252 dma_addr_t dma_dst
, dma_addr_t dma_src
, size_t len
,
254 __acquires(plxdev
->ring_lock
)
256 struct plx_dma_dev
*plxdev
= chan_to_plx_dma_dev(c
);
257 struct plx_dma_desc
*plxdesc
;
259 spin_lock_bh(&plxdev
->ring_lock
);
260 if (!plxdev
->ring_active
)
263 if (!CIRC_SPACE(plxdev
->head
, plxdev
->tail
, PLX_DMA_RING_COUNT
))
266 if (len
> PLX_DESC_SIZE_MASK
)
269 plxdesc
= plx_dma_get_desc(plxdev
, plxdev
->head
);
272 plxdesc
->hw
->dst_addr_lo
= cpu_to_le32(lower_32_bits(dma_dst
));
273 plxdesc
->hw
->dst_addr_hi
= cpu_to_le16(upper_32_bits(dma_dst
));
274 plxdesc
->hw
->src_addr_lo
= cpu_to_le32(lower_32_bits(dma_src
));
275 plxdesc
->hw
->src_addr_hi
= cpu_to_le16(upper_32_bits(dma_src
));
277 plxdesc
->orig_size
= len
;
279 if (flags
& DMA_PREP_INTERRUPT
)
280 len
|= PLX_DESC_FLAG_INT_WHEN_DONE
;
282 plxdesc
->hw
->flags_and_size
= cpu_to_le32(len
);
283 plxdesc
->txd
.flags
= flags
;
285 /* return with the lock held, it will be released in tx_submit */
287 return &plxdesc
->txd
;
291 * Keep sparse happy by restoring an even lock count on
294 __acquire(plxdev
->ring_lock
);
296 spin_unlock_bh(&plxdev
->ring_lock
);
300 static dma_cookie_t
plx_dma_tx_submit(struct dma_async_tx_descriptor
*desc
)
301 __releases(plxdev
->ring_lock
)
303 struct plx_dma_dev
*plxdev
= chan_to_plx_dma_dev(desc
->chan
);
304 struct plx_dma_desc
*plxdesc
= to_plx_desc(desc
);
307 cookie
= dma_cookie_assign(desc
);
310 * Ensure the descriptor updates are visible to the dma device
311 * before setting the valid bit.
315 plxdesc
->hw
->flags_and_size
|= cpu_to_le32(PLX_DESC_FLAG_VALID
);
317 spin_unlock_bh(&plxdev
->ring_lock
);
322 static enum dma_status
plx_dma_tx_status(struct dma_chan
*chan
,
323 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
325 struct plx_dma_dev
*plxdev
= chan_to_plx_dma_dev(chan
);
328 ret
= dma_cookie_status(chan
, cookie
, txstate
);
329 if (ret
== DMA_COMPLETE
)
332 plx_dma_process_desc(plxdev
);
334 return dma_cookie_status(chan
, cookie
, txstate
);
337 static void plx_dma_issue_pending(struct dma_chan
*chan
)
339 struct plx_dma_dev
*plxdev
= chan_to_plx_dma_dev(chan
);
342 if (!rcu_dereference(plxdev
->pdev
)) {
348 * Ensure the valid bits are visible before starting the
353 writew(PLX_REG_CTRL_START_VAL
, plxdev
->bar
+ PLX_REG_CTRL
);
358 static irqreturn_t
plx_dma_isr(int irq
, void *devid
)
360 struct plx_dma_dev
*plxdev
= devid
;
363 status
= readw(plxdev
->bar
+ PLX_REG_INTR_STATUS
);
368 if (status
& PLX_REG_INTR_STATUS_DESC_DONE
&& plxdev
->ring_active
)
369 tasklet_schedule(&plxdev
->desc_task
);
371 writew(status
, plxdev
->bar
+ PLX_REG_INTR_STATUS
);
376 static int plx_dma_alloc_desc(struct plx_dma_dev
*plxdev
)
378 struct plx_dma_desc
*desc
;
381 plxdev
->desc_ring
= kcalloc(PLX_DMA_RING_COUNT
,
382 sizeof(*plxdev
->desc_ring
), GFP_KERNEL
);
383 if (!plxdev
->desc_ring
)
386 for (i
= 0; i
< PLX_DMA_RING_COUNT
; i
++) {
387 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
391 dma_async_tx_descriptor_init(&desc
->txd
, &plxdev
->dma_chan
);
392 desc
->txd
.tx_submit
= plx_dma_tx_submit
;
393 desc
->hw
= &plxdev
->hw_ring
[i
];
395 plxdev
->desc_ring
[i
] = desc
;
401 for (i
= 0; i
< PLX_DMA_RING_COUNT
; i
++)
402 kfree(plxdev
->desc_ring
[i
]);
403 kfree(plxdev
->desc_ring
);
407 static int plx_dma_alloc_chan_resources(struct dma_chan
*chan
)
409 struct plx_dma_dev
*plxdev
= chan_to_plx_dma_dev(chan
);
410 size_t ring_sz
= PLX_DMA_RING_COUNT
* sizeof(*plxdev
->hw_ring
);
413 plxdev
->head
= plxdev
->tail
= 0;
414 plxdev
->hw_ring
= dma_alloc_coherent(plxdev
->dma_dev
.dev
, ring_sz
,
415 &plxdev
->hw_ring_dma
, GFP_KERNEL
);
416 if (!plxdev
->hw_ring
)
419 rc
= plx_dma_alloc_desc(plxdev
);
421 goto out_free_hw_ring
;
424 if (!rcu_dereference(plxdev
->pdev
)) {
427 goto out_free_hw_ring
;
430 writel(PLX_REG_CTRL_RESET_VAL
, plxdev
->bar
+ PLX_REG_CTRL
);
431 writel(lower_32_bits(plxdev
->hw_ring_dma
),
432 plxdev
->bar
+ PLX_REG_DESC_RING_ADDR
);
433 writel(upper_32_bits(plxdev
->hw_ring_dma
),
434 plxdev
->bar
+ PLX_REG_DESC_RING_ADDR_HI
);
435 writel(lower_32_bits(plxdev
->hw_ring_dma
),
436 plxdev
->bar
+ PLX_REG_DESC_RING_NEXT_ADDR
);
437 writel(PLX_DMA_RING_COUNT
, plxdev
->bar
+ PLX_REG_DESC_RING_COUNT
);
438 writel(PLX_REG_PREF_LIMIT_PREF_FOUR
, plxdev
->bar
+ PLX_REG_PREF_LIMIT
);
440 plxdev
->ring_active
= true;
444 return PLX_DMA_RING_COUNT
;
447 dma_free_coherent(plxdev
->dma_dev
.dev
, ring_sz
, plxdev
->hw_ring
,
448 plxdev
->hw_ring_dma
);
452 static void plx_dma_free_chan_resources(struct dma_chan
*chan
)
454 struct plx_dma_dev
*plxdev
= chan_to_plx_dma_dev(chan
);
455 size_t ring_sz
= PLX_DMA_RING_COUNT
* sizeof(*plxdev
->hw_ring
);
456 struct pci_dev
*pdev
;
460 spin_lock_bh(&plxdev
->ring_lock
);
461 plxdev
->ring_active
= false;
462 spin_unlock_bh(&plxdev
->ring_lock
);
464 plx_dma_stop(plxdev
);
467 pdev
= rcu_dereference(plxdev
->pdev
);
469 irq
= pci_irq_vector(pdev
, 0);
473 synchronize_irq(irq
);
475 tasklet_kill(&plxdev
->desc_task
);
477 plx_dma_abort_desc(plxdev
);
479 for (i
= 0; i
< PLX_DMA_RING_COUNT
; i
++)
480 kfree(plxdev
->desc_ring
[i
]);
482 kfree(plxdev
->desc_ring
);
483 dma_free_coherent(plxdev
->dma_dev
.dev
, ring_sz
, plxdev
->hw_ring
,
484 plxdev
->hw_ring_dma
);
488 static void plx_dma_release(struct dma_device
*dma_dev
)
490 struct plx_dma_dev
*plxdev
=
491 container_of(dma_dev
, struct plx_dma_dev
, dma_dev
);
493 put_device(dma_dev
->dev
);
497 static int plx_dma_create(struct pci_dev
*pdev
)
499 struct plx_dma_dev
*plxdev
;
500 struct dma_device
*dma
;
501 struct dma_chan
*chan
;
504 plxdev
= kzalloc(sizeof(*plxdev
), GFP_KERNEL
);
508 rc
= request_irq(pci_irq_vector(pdev
, 0), plx_dma_isr
, 0,
509 KBUILD_MODNAME
, plxdev
);
515 spin_lock_init(&plxdev
->ring_lock
);
516 tasklet_setup(&plxdev
->desc_task
, plx_dma_desc_task
);
518 RCU_INIT_POINTER(plxdev
->pdev
, pdev
);
519 plxdev
->bar
= pcim_iomap_table(pdev
)[0];
521 dma
= &plxdev
->dma_dev
;
523 INIT_LIST_HEAD(&dma
->channels
);
524 dma_cap_set(DMA_MEMCPY
, dma
->cap_mask
);
525 dma
->copy_align
= DMAENGINE_ALIGN_1_BYTE
;
526 dma
->dev
= get_device(&pdev
->dev
);
528 dma
->device_alloc_chan_resources
= plx_dma_alloc_chan_resources
;
529 dma
->device_free_chan_resources
= plx_dma_free_chan_resources
;
530 dma
->device_prep_dma_memcpy
= plx_dma_prep_memcpy
;
531 dma
->device_issue_pending
= plx_dma_issue_pending
;
532 dma
->device_tx_status
= plx_dma_tx_status
;
533 dma
->device_release
= plx_dma_release
;
535 chan
= &plxdev
->dma_chan
;
537 dma_cookie_init(chan
);
538 list_add_tail(&chan
->device_node
, &dma
->channels
);
540 rc
= dma_async_device_register(dma
);
542 pci_err(pdev
, "Failed to register dma device: %d\n", rc
);
543 free_irq(pci_irq_vector(pdev
, 0), plxdev
);
548 pci_set_drvdata(pdev
, plxdev
);
553 static int plx_dma_probe(struct pci_dev
*pdev
,
554 const struct pci_device_id
*id
)
558 rc
= pcim_enable_device(pdev
);
562 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(48));
564 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
568 rc
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(48));
570 rc
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
574 rc
= pcim_iomap_regions(pdev
, 1, KBUILD_MODNAME
);
578 rc
= pci_alloc_irq_vectors(pdev
, 1, 1, PCI_IRQ_ALL_TYPES
);
582 pci_set_master(pdev
);
584 rc
= plx_dma_create(pdev
);
586 goto err_free_irq_vectors
;
588 pci_info(pdev
, "PLX DMA Channel Registered\n");
592 err_free_irq_vectors
:
593 pci_free_irq_vectors(pdev
);
597 static void plx_dma_remove(struct pci_dev
*pdev
)
599 struct plx_dma_dev
*plxdev
= pci_get_drvdata(pdev
);
601 free_irq(pci_irq_vector(pdev
, 0), plxdev
);
603 rcu_assign_pointer(plxdev
->pdev
, NULL
);
606 spin_lock_bh(&plxdev
->ring_lock
);
607 plxdev
->ring_active
= false;
608 spin_unlock_bh(&plxdev
->ring_lock
);
610 __plx_dma_stop(plxdev
);
611 plx_dma_abort_desc(plxdev
);
614 dma_async_device_unregister(&plxdev
->dma_dev
);
616 pci_free_irq_vectors(pdev
);
619 static const struct pci_device_id plx_dma_pci_tbl
[] = {
621 .vendor
= PCI_VENDOR_ID_PLX
,
623 .subvendor
= PCI_ANY_ID
,
624 .subdevice
= PCI_ANY_ID
,
625 .class = PCI_CLASS_SYSTEM_OTHER
<< 8,
626 .class_mask
= 0xFFFFFFFF,
630 MODULE_DEVICE_TABLE(pci
, plx_dma_pci_tbl
);
632 static struct pci_driver plx_dma_pci_driver
= {
633 .name
= KBUILD_MODNAME
,
634 .id_table
= plx_dma_pci_tbl
,
635 .probe
= plx_dma_probe
,
636 .remove
= plx_dma_remove
,
638 module_pci_driver(plx_dma_pci_driver
);