2 * DMA driver for Altera mSGDMA IP core
4 * Copyright (C) 2017 Stefan Roese <sr@denx.de>
6 * Based on drivers/dma/xilinx/zynqmp_dma.c, which is:
7 * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation, either version 2 of the License, or
12 * (at your option) any later version.
15 #include <linux/bitops.h>
16 #include <linux/delay.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/dmapool.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
22 #include <linux/iopoll.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
27 #include "dmaengine.h"
29 #define MSGDMA_MAX_TRANS_LEN U32_MAX
30 #define MSGDMA_DESC_NUM 1024
33 * struct msgdma_extended_desc - implements an extended descriptor
34 * @read_addr_lo: data buffer source address low bits
35 * @write_addr_lo: data buffer destination address low bits
36 * @len: the number of bytes to transfer per descriptor
37 * @burst_seq_num: bit 31:24 write burst
38 * bit 23:16 read burst
39 * bit 15:00 sequence number
40 * @stride: bit 31:16 write stride
41 * bit 15:00 read stride
42 * @read_addr_hi: data buffer source address high bits
43 * @write_addr_hi: data buffer destination address high bits
44 * @control: characteristics of the transfer
46 struct msgdma_extended_desc
{
57 /* mSGDMA descriptor control field bit definitions */
58 #define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff)
59 #define MSGDMA_DESC_CTL_GEN_SOP BIT(8)
60 #define MSGDMA_DESC_CTL_GEN_EOP BIT(9)
61 #define MSGDMA_DESC_CTL_PARK_READS BIT(10)
62 #define MSGDMA_DESC_CTL_PARK_WRITES BIT(11)
63 #define MSGDMA_DESC_CTL_END_ON_EOP BIT(12)
64 #define MSGDMA_DESC_CTL_END_ON_LEN BIT(13)
65 #define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14)
66 #define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15)
67 #define MSGDMA_DESC_CTL_TR_ERR_IRQ GENMASK(23, 16)
68 #define MSGDMA_DESC_CTL_EARLY_DONE BIT(24)
71 * Writing "1" the "go" bit commits the entire descriptor into the
74 #define MSGDMA_DESC_CTL_GO BIT(31)
76 /* Tx buffer control flags */
77 #define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \
78 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
81 #define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \
84 #define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \
85 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
86 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
89 #define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \
90 MSGDMA_DESC_CTL_GEN_EOP | \
91 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
92 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
95 #define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \
96 MSGDMA_DESC_CTL_END_ON_LEN | \
97 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
98 MSGDMA_DESC_CTL_EARLY_IRQ | \
99 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
102 /* mSGDMA extended descriptor stride definitions */
103 #define MSGDMA_DESC_STRIDE_RD 0x00000001
104 #define MSGDMA_DESC_STRIDE_WR 0x00010000
105 #define MSGDMA_DESC_STRIDE_RW 0x00010001
107 /* mSGDMA dispatcher control and status register map */
108 #define MSGDMA_CSR_STATUS 0x00 /* Read / Clear */
109 #define MSGDMA_CSR_CONTROL 0x04 /* Read / Write */
110 #define MSGDMA_CSR_RW_FILL_LEVEL 0x08 /* 31:16 - write fill level */
111 /* 15:00 - read fill level */
112 #define MSGDMA_CSR_RESP_FILL_LEVEL 0x0c /* response FIFO fill level */
113 #define MSGDMA_CSR_RW_SEQ_NUM 0x10 /* 31:16 - write seq number */
114 /* 15:00 - read seq number */
116 /* mSGDMA CSR status register bit definitions */
117 #define MSGDMA_CSR_STAT_BUSY BIT(0)
118 #define MSGDMA_CSR_STAT_DESC_BUF_EMPTY BIT(1)
119 #define MSGDMA_CSR_STAT_DESC_BUF_FULL BIT(2)
120 #define MSGDMA_CSR_STAT_RESP_BUF_EMPTY BIT(3)
121 #define MSGDMA_CSR_STAT_RESP_BUF_FULL BIT(4)
122 #define MSGDMA_CSR_STAT_STOPPED BIT(5)
123 #define MSGDMA_CSR_STAT_RESETTING BIT(6)
124 #define MSGDMA_CSR_STAT_STOPPED_ON_ERR BIT(7)
125 #define MSGDMA_CSR_STAT_STOPPED_ON_EARLY BIT(8)
126 #define MSGDMA_CSR_STAT_IRQ BIT(9)
127 #define MSGDMA_CSR_STAT_MASK GENMASK(9, 0)
128 #define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ GENMASK(8, 0)
130 #define DESC_EMPTY (MSGDMA_CSR_STAT_DESC_BUF_EMPTY | \
131 MSGDMA_CSR_STAT_RESP_BUF_EMPTY)
133 /* mSGDMA CSR control register bit definitions */
134 #define MSGDMA_CSR_CTL_STOP BIT(0)
135 #define MSGDMA_CSR_CTL_RESET BIT(1)
136 #define MSGDMA_CSR_CTL_STOP_ON_ERR BIT(2)
137 #define MSGDMA_CSR_CTL_STOP_ON_EARLY BIT(3)
138 #define MSGDMA_CSR_CTL_GLOBAL_INTR BIT(4)
139 #define MSGDMA_CSR_CTL_STOP_DESCS BIT(5)
141 /* mSGDMA CSR fill level bits */
142 #define MSGDMA_CSR_WR_FILL_LEVEL_GET(v) (((v) & 0xffff0000) >> 16)
143 #define MSGDMA_CSR_RD_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
144 #define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
146 #define MSGDMA_CSR_SEQ_NUM_GET(v) (((v) & 0xffff0000) >> 16)
148 /* mSGDMA response register map */
149 #define MSGDMA_RESP_BYTES_TRANSFERRED 0x00
150 #define MSGDMA_RESP_STATUS 0x04
152 /* mSGDMA response register bit definitions */
153 #define MSGDMA_RESP_EARLY_TERM BIT(8)
154 #define MSGDMA_RESP_ERR_MASK 0xff
157 * struct msgdma_sw_desc - implements a sw descriptor
158 * @async_tx: support for the async_tx api
159 * @hw_desc: assosiated HW descriptor
160 * @free_list: node of the free SW descriprots list
162 struct msgdma_sw_desc
{
163 struct dma_async_tx_descriptor async_tx
;
164 struct msgdma_extended_desc hw_desc
;
165 struct list_head node
;
166 struct list_head tx_list
;
170 * struct msgdma_device - DMA device structure
172 struct msgdma_device
{
175 struct tasklet_struct irq_tasklet
;
176 struct list_head pending_list
;
177 struct list_head free_list
;
178 struct list_head active_list
;
179 struct list_head done_list
;
183 struct dma_device dmadev
;
184 struct dma_chan dmachan
;
186 struct msgdma_sw_desc
*sw_desq
;
187 unsigned int npendings
;
189 struct dma_slave_config slave_cfg
;
193 /* mSGDMA controller */
196 /* mSGDMA descriptors */
199 /* mSGDMA response */
203 #define to_mdev(chan) container_of(chan, struct msgdma_device, dmachan)
204 #define tx_to_desc(tx) container_of(tx, struct msgdma_sw_desc, async_tx)
207 * msgdma_get_descriptor - Get the sw descriptor from the pool
208 * @mdev: Pointer to the Altera mSGDMA device structure
210 * Return: The sw descriptor
212 static struct msgdma_sw_desc
*msgdma_get_descriptor(struct msgdma_device
*mdev
)
214 struct msgdma_sw_desc
*desc
;
217 spin_lock_irqsave(&mdev
->lock
, flags
);
218 desc
= list_first_entry(&mdev
->free_list
, struct msgdma_sw_desc
, node
);
219 list_del(&desc
->node
);
220 spin_unlock_irqrestore(&mdev
->lock
, flags
);
222 INIT_LIST_HEAD(&desc
->tx_list
);
228 * msgdma_free_descriptor - Issue pending transactions
229 * @mdev: Pointer to the Altera mSGDMA device structure
230 * @desc: Transaction descriptor pointer
232 static void msgdma_free_descriptor(struct msgdma_device
*mdev
,
233 struct msgdma_sw_desc
*desc
)
235 struct msgdma_sw_desc
*child
, *next
;
237 mdev
->desc_free_cnt
++;
238 list_add_tail(&desc
->node
, &mdev
->free_list
);
239 list_for_each_entry_safe(child
, next
, &desc
->tx_list
, node
) {
240 mdev
->desc_free_cnt
++;
241 list_move_tail(&child
->node
, &mdev
->free_list
);
246 * msgdma_free_desc_list - Free descriptors list
247 * @mdev: Pointer to the Altera mSGDMA device structure
248 * @list: List to parse and delete the descriptor
250 static void msgdma_free_desc_list(struct msgdma_device
*mdev
,
251 struct list_head
*list
)
253 struct msgdma_sw_desc
*desc
, *next
;
255 list_for_each_entry_safe(desc
, next
, list
, node
)
256 msgdma_free_descriptor(mdev
, desc
);
260 * msgdma_desc_config - Configure the descriptor
261 * @desc: Hw descriptor pointer
262 * @dst: Destination buffer address
263 * @src: Source buffer address
264 * @len: Transfer length
266 static void msgdma_desc_config(struct msgdma_extended_desc
*desc
,
267 dma_addr_t dst
, dma_addr_t src
, size_t len
,
270 /* Set lower 32bits of src & dst addresses in the descriptor */
271 desc
->read_addr_lo
= lower_32_bits(src
);
272 desc
->write_addr_lo
= lower_32_bits(dst
);
274 /* Set upper 32bits of src & dst addresses in the descriptor */
275 desc
->read_addr_hi
= upper_32_bits(src
);
276 desc
->write_addr_hi
= upper_32_bits(dst
);
279 desc
->stride
= stride
;
280 desc
->burst_seq_num
= 0; /* 0 will result in max burst length */
283 * Don't set interrupt on xfer end yet, this will be done later
284 * for the "last" descriptor
286 desc
->control
= MSGDMA_DESC_CTL_TR_ERR_IRQ
| MSGDMA_DESC_CTL_GO
|
287 MSGDMA_DESC_CTL_END_ON_LEN
;
291 * msgdma_desc_config_eod - Mark the descriptor as end descriptor
292 * @desc: Hw descriptor pointer
294 static void msgdma_desc_config_eod(struct msgdma_extended_desc
*desc
)
296 desc
->control
|= MSGDMA_DESC_CTL_TR_COMP_IRQ
;
300 * msgdma_tx_submit - Submit DMA transaction
301 * @tx: Async transaction descriptor pointer
303 * Return: cookie value
305 static dma_cookie_t
msgdma_tx_submit(struct dma_async_tx_descriptor
*tx
)
307 struct msgdma_device
*mdev
= to_mdev(tx
->chan
);
308 struct msgdma_sw_desc
*new;
312 new = tx_to_desc(tx
);
313 spin_lock_irqsave(&mdev
->lock
, flags
);
314 cookie
= dma_cookie_assign(tx
);
316 list_add_tail(&new->node
, &mdev
->pending_list
);
317 spin_unlock_irqrestore(&mdev
->lock
, flags
);
323 * msgdma_prep_memcpy - prepare descriptors for memcpy transaction
324 * @dchan: DMA channel
325 * @dma_dst: Destination buffer address
326 * @dma_src: Source buffer address
327 * @len: Transfer length
328 * @flags: transfer ack flags
330 * Return: Async transaction descriptor on success and NULL on failure
332 static struct dma_async_tx_descriptor
*
333 msgdma_prep_memcpy(struct dma_chan
*dchan
, dma_addr_t dma_dst
,
334 dma_addr_t dma_src
, size_t len
, ulong flags
)
336 struct msgdma_device
*mdev
= to_mdev(dchan
);
337 struct msgdma_sw_desc
*new, *first
= NULL
;
338 struct msgdma_extended_desc
*desc
;
341 unsigned long irqflags
;
343 desc_cnt
= DIV_ROUND_UP(len
, MSGDMA_MAX_TRANS_LEN
);
345 spin_lock_irqsave(&mdev
->lock
, irqflags
);
346 if (desc_cnt
> mdev
->desc_free_cnt
) {
347 spin_unlock_irqrestore(&mdev
->lock
, irqflags
);
348 dev_dbg(mdev
->dev
, "mdev %p descs are not available\n", mdev
);
351 mdev
->desc_free_cnt
-= desc_cnt
;
352 spin_unlock_irqrestore(&mdev
->lock
, irqflags
);
355 /* Allocate and populate the descriptor */
356 new = msgdma_get_descriptor(mdev
);
358 copy
= min_t(size_t, len
, MSGDMA_MAX_TRANS_LEN
);
359 desc
= &new->hw_desc
;
360 msgdma_desc_config(desc
, dma_dst
, dma_src
, copy
,
361 MSGDMA_DESC_STRIDE_RW
);
368 list_add_tail(&new->node
, &first
->tx_list
);
371 msgdma_desc_config_eod(desc
);
372 async_tx_ack(&first
->async_tx
);
373 first
->async_tx
.flags
= flags
;
375 return &first
->async_tx
;
379 * msgdma_prep_slave_sg - prepare descriptors for a slave sg transaction
381 * @dchan: DMA channel
382 * @sgl: Destination scatter list
383 * @sg_len: Number of entries in destination scatter list
384 * @dir: DMA transfer direction
385 * @flags: transfer ack flags
386 * @context: transfer context (unused)
388 static struct dma_async_tx_descriptor
*
389 msgdma_prep_slave_sg(struct dma_chan
*dchan
, struct scatterlist
*sgl
,
390 unsigned int sg_len
, enum dma_transfer_direction dir
,
391 unsigned long flags
, void *context
)
394 struct msgdma_device
*mdev
= to_mdev(dchan
);
395 struct dma_slave_config
*cfg
= &mdev
->slave_cfg
;
396 struct msgdma_sw_desc
*new, *first
= NULL
;
399 dma_addr_t dma_dst
, dma_src
;
401 struct scatterlist
*sg
;
403 unsigned long irqflags
;
405 for_each_sg(sgl
, sg
, sg_len
, i
)
406 desc_cnt
+= DIV_ROUND_UP(sg_dma_len(sg
), MSGDMA_MAX_TRANS_LEN
);
408 spin_lock_irqsave(&mdev
->lock
, irqflags
);
409 if (desc_cnt
> mdev
->desc_free_cnt
) {
410 spin_unlock_irqrestore(&mdev
->lock
, irqflags
);
411 dev_dbg(mdev
->dev
, "mdev %p descs are not available\n", mdev
);
414 mdev
->desc_free_cnt
-= desc_cnt
;
415 spin_unlock_irqrestore(&mdev
->lock
, irqflags
);
417 avail
= sg_dma_len(sgl
);
419 /* Run until we are out of scatterlist entries */
421 /* Allocate and populate the descriptor */
422 new = msgdma_get_descriptor(mdev
);
424 desc
= &new->hw_desc
;
425 len
= min_t(size_t, avail
, MSGDMA_MAX_TRANS_LEN
);
427 if (dir
== DMA_MEM_TO_DEV
) {
428 dma_src
= sg_dma_address(sgl
) + sg_dma_len(sgl
) - avail
;
429 dma_dst
= cfg
->dst_addr
;
430 stride
= MSGDMA_DESC_STRIDE_RD
;
432 dma_src
= cfg
->src_addr
;
433 dma_dst
= sg_dma_address(sgl
) + sg_dma_len(sgl
) - avail
;
434 stride
= MSGDMA_DESC_STRIDE_WR
;
436 msgdma_desc_config(desc
, dma_dst
, dma_src
, len
, stride
);
442 list_add_tail(&new->node
, &first
->tx_list
);
444 /* Fetch the next scatterlist entry */
452 avail
= sg_dma_len(sgl
);
456 msgdma_desc_config_eod(desc
);
457 first
->async_tx
.flags
= flags
;
459 return &first
->async_tx
;
462 static int msgdma_dma_config(struct dma_chan
*dchan
,
463 struct dma_slave_config
*config
)
465 struct msgdma_device
*mdev
= to_mdev(dchan
);
467 memcpy(&mdev
->slave_cfg
, config
, sizeof(*config
));
472 static void msgdma_reset(struct msgdma_device
*mdev
)
478 iowrite32(MSGDMA_CSR_STAT_MASK
, mdev
->csr
+ MSGDMA_CSR_STATUS
);
479 iowrite32(MSGDMA_CSR_CTL_RESET
, mdev
->csr
+ MSGDMA_CSR_CONTROL
);
481 ret
= readl_poll_timeout(mdev
->csr
+ MSGDMA_CSR_STATUS
, val
,
482 (val
& MSGDMA_CSR_STAT_RESETTING
) == 0,
485 dev_err(mdev
->dev
, "DMA channel did not reset\n");
487 /* Clear all status bits */
488 iowrite32(MSGDMA_CSR_STAT_MASK
, mdev
->csr
+ MSGDMA_CSR_STATUS
);
490 /* Enable the DMA controller including interrupts */
491 iowrite32(MSGDMA_CSR_CTL_STOP_ON_ERR
| MSGDMA_CSR_CTL_STOP_ON_EARLY
|
492 MSGDMA_CSR_CTL_GLOBAL_INTR
, mdev
->csr
+ MSGDMA_CSR_CONTROL
);
497 static void msgdma_copy_one(struct msgdma_device
*mdev
,
498 struct msgdma_sw_desc
*desc
)
500 void __iomem
*hw_desc
= mdev
->desc
;
503 * Check if the DESC FIFO it not full. If its full, we need to wait
504 * for at least one entry to become free again
506 while (ioread32(mdev
->csr
+ MSGDMA_CSR_STATUS
) &
507 MSGDMA_CSR_STAT_DESC_BUF_FULL
)
511 * The descriptor needs to get copied into the descriptor FIFO
512 * of the DMA controller. The descriptor will get flushed to the
513 * FIFO, once the last word (control word) is written. Since we
514 * are not 100% sure that memcpy() writes all word in the "correct"
515 * oder (address from low to high) on all architectures, we make
516 * sure this control word is written last by single coding it and
517 * adding some write-barriers here.
519 memcpy((void __force
*)hw_desc
, &desc
->hw_desc
,
520 sizeof(desc
->hw_desc
) - sizeof(u32
));
522 /* Write control word last to flush this descriptor into the FIFO */
525 iowrite32(desc
->hw_desc
.control
, hw_desc
+
526 offsetof(struct msgdma_extended_desc
, control
));
531 * msgdma_copy_desc_to_fifo - copy descriptor(s) into controller FIFO
532 * @mdev: Pointer to the Altera mSGDMA device structure
533 * @desc: Transaction descriptor pointer
535 static void msgdma_copy_desc_to_fifo(struct msgdma_device
*mdev
,
536 struct msgdma_sw_desc
*desc
)
538 struct msgdma_sw_desc
*sdesc
, *next
;
540 msgdma_copy_one(mdev
, desc
);
542 list_for_each_entry_safe(sdesc
, next
, &desc
->tx_list
, node
)
543 msgdma_copy_one(mdev
, sdesc
);
547 * msgdma_start_transfer - Initiate the new transfer
548 * @mdev: Pointer to the Altera mSGDMA device structure
550 static void msgdma_start_transfer(struct msgdma_device
*mdev
)
552 struct msgdma_sw_desc
*desc
;
557 desc
= list_first_entry_or_null(&mdev
->pending_list
,
558 struct msgdma_sw_desc
, node
);
562 list_splice_tail_init(&mdev
->pending_list
, &mdev
->active_list
);
563 msgdma_copy_desc_to_fifo(mdev
, desc
);
567 * msgdma_issue_pending - Issue pending transactions
568 * @chan: DMA channel pointer
570 static void msgdma_issue_pending(struct dma_chan
*chan
)
572 struct msgdma_device
*mdev
= to_mdev(chan
);
575 spin_lock_irqsave(&mdev
->lock
, flags
);
576 msgdma_start_transfer(mdev
);
577 spin_unlock_irqrestore(&mdev
->lock
, flags
);
581 * msgdma_chan_desc_cleanup - Cleanup the completed descriptors
582 * @mdev: Pointer to the Altera mSGDMA device structure
584 static void msgdma_chan_desc_cleanup(struct msgdma_device
*mdev
)
586 struct msgdma_sw_desc
*desc
, *next
;
588 list_for_each_entry_safe(desc
, next
, &mdev
->done_list
, node
) {
589 dma_async_tx_callback callback
;
590 void *callback_param
;
592 list_del(&desc
->node
);
594 callback
= desc
->async_tx
.callback
;
595 callback_param
= desc
->async_tx
.callback_param
;
597 spin_unlock(&mdev
->lock
);
598 callback(callback_param
);
599 spin_lock(&mdev
->lock
);
602 /* Run any dependencies, then free the descriptor */
603 msgdma_free_descriptor(mdev
, desc
);
608 * msgdma_complete_descriptor - Mark the active descriptor as complete
609 * @mdev: Pointer to the Altera mSGDMA device structure
611 static void msgdma_complete_descriptor(struct msgdma_device
*mdev
)
613 struct msgdma_sw_desc
*desc
;
615 desc
= list_first_entry_or_null(&mdev
->active_list
,
616 struct msgdma_sw_desc
, node
);
619 list_del(&desc
->node
);
620 dma_cookie_complete(&desc
->async_tx
);
621 list_add_tail(&desc
->node
, &mdev
->done_list
);
625 * msgdma_free_descriptors - Free channel descriptors
626 * @mdev: Pointer to the Altera mSGDMA device structure
628 static void msgdma_free_descriptors(struct msgdma_device
*mdev
)
630 msgdma_free_desc_list(mdev
, &mdev
->active_list
);
631 msgdma_free_desc_list(mdev
, &mdev
->pending_list
);
632 msgdma_free_desc_list(mdev
, &mdev
->done_list
);
636 * msgdma_free_chan_resources - Free channel resources
637 * @dchan: DMA channel pointer
639 static void msgdma_free_chan_resources(struct dma_chan
*dchan
)
641 struct msgdma_device
*mdev
= to_mdev(dchan
);
644 spin_lock_irqsave(&mdev
->lock
, flags
);
645 msgdma_free_descriptors(mdev
);
646 spin_unlock_irqrestore(&mdev
->lock
, flags
);
647 kfree(mdev
->sw_desq
);
651 * msgdma_alloc_chan_resources - Allocate channel resources
652 * @dchan: DMA channel
654 * Return: Number of descriptors on success and failure value on error
656 static int msgdma_alloc_chan_resources(struct dma_chan
*dchan
)
658 struct msgdma_device
*mdev
= to_mdev(dchan
);
659 struct msgdma_sw_desc
*desc
;
662 mdev
->sw_desq
= kcalloc(MSGDMA_DESC_NUM
, sizeof(*desc
), GFP_NOWAIT
);
667 mdev
->desc_free_cnt
= MSGDMA_DESC_NUM
;
669 INIT_LIST_HEAD(&mdev
->free_list
);
671 for (i
= 0; i
< MSGDMA_DESC_NUM
; i
++) {
672 desc
= mdev
->sw_desq
+ i
;
673 dma_async_tx_descriptor_init(&desc
->async_tx
, &mdev
->dmachan
);
674 desc
->async_tx
.tx_submit
= msgdma_tx_submit
;
675 list_add_tail(&desc
->node
, &mdev
->free_list
);
678 return MSGDMA_DESC_NUM
;
682 * msgdma_tasklet - Schedule completion tasklet
683 * @data: Pointer to the Altera sSGDMA channel structure
685 static void msgdma_tasklet(unsigned long data
)
687 struct msgdma_device
*mdev
= (struct msgdma_device
*)data
;
689 u32 __maybe_unused size
;
690 u32 __maybe_unused status
;
693 spin_lock_irqsave(&mdev
->lock
, flags
);
695 /* Read number of responses that are available */
696 count
= ioread32(mdev
->csr
+ MSGDMA_CSR_RESP_FILL_LEVEL
);
697 dev_dbg(mdev
->dev
, "%s (%d): response count=%d\n",
698 __func__
, __LINE__
, count
);
702 * Read both longwords to purge this response from the FIFO
703 * On Avalon-MM implementations, size and status do not
704 * have any real values, like transferred bytes or error
705 * bits. So we need to just drop these values.
707 size
= ioread32(mdev
->resp
+ MSGDMA_RESP_BYTES_TRANSFERRED
);
708 status
= ioread32(mdev
->resp
+ MSGDMA_RESP_STATUS
);
710 msgdma_complete_descriptor(mdev
);
711 msgdma_chan_desc_cleanup(mdev
);
714 spin_unlock_irqrestore(&mdev
->lock
, flags
);
718 * msgdma_irq_handler - Altera mSGDMA Interrupt handler
720 * @data: Pointer to the Altera mSGDMA device structure
722 * Return: IRQ_HANDLED/IRQ_NONE
724 static irqreturn_t
msgdma_irq_handler(int irq
, void *data
)
726 struct msgdma_device
*mdev
= data
;
729 status
= ioread32(mdev
->csr
+ MSGDMA_CSR_STATUS
);
730 if ((status
& MSGDMA_CSR_STAT_BUSY
) == 0) {
731 /* Start next transfer if the DMA controller is idle */
732 spin_lock(&mdev
->lock
);
734 msgdma_start_transfer(mdev
);
735 spin_unlock(&mdev
->lock
);
738 tasklet_schedule(&mdev
->irq_tasklet
);
740 /* Clear interrupt in mSGDMA controller */
741 iowrite32(MSGDMA_CSR_STAT_IRQ
, mdev
->csr
+ MSGDMA_CSR_STATUS
);
747 * msgdma_chan_remove - Channel remove function
748 * @mdev: Pointer to the Altera mSGDMA device structure
750 static void msgdma_dev_remove(struct msgdma_device
*mdev
)
755 devm_free_irq(mdev
->dev
, mdev
->irq
, mdev
);
756 tasklet_kill(&mdev
->irq_tasklet
);
757 list_del(&mdev
->dmachan
.device_node
);
760 static int request_and_map(struct platform_device
*pdev
, const char *name
,
761 struct resource
**res
, void __iomem
**ptr
)
763 struct resource
*region
;
764 struct device
*device
= &pdev
->dev
;
766 *res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, name
);
768 dev_err(device
, "resource %s not defined\n", name
);
772 region
= devm_request_mem_region(device
, (*res
)->start
,
773 resource_size(*res
), dev_name(device
));
774 if (region
== NULL
) {
775 dev_err(device
, "unable to request %s\n", name
);
779 *ptr
= devm_ioremap_nocache(device
, region
->start
,
780 resource_size(region
));
782 dev_err(device
, "ioremap_nocache of %s failed!", name
);
790 * msgdma_probe - Driver probe function
791 * @pdev: Pointer to the platform_device structure
793 * Return: '0' on success and failure value on error
795 static int msgdma_probe(struct platform_device
*pdev
)
797 struct msgdma_device
*mdev
;
798 struct dma_device
*dma_dev
;
799 struct resource
*dma_res
;
802 mdev
= devm_kzalloc(&pdev
->dev
, sizeof(*mdev
), GFP_NOWAIT
);
806 mdev
->dev
= &pdev
->dev
;
809 ret
= request_and_map(pdev
, "csr", &dma_res
, &mdev
->csr
);
813 /* Map (extended) descriptor space */
814 ret
= request_and_map(pdev
, "desc", &dma_res
, &mdev
->desc
);
818 /* Map response space */
819 ret
= request_and_map(pdev
, "resp", &dma_res
, &mdev
->resp
);
823 platform_set_drvdata(pdev
, mdev
);
825 /* Get interrupt nr from platform data */
826 mdev
->irq
= platform_get_irq(pdev
, 0);
830 ret
= devm_request_irq(&pdev
->dev
, mdev
->irq
, msgdma_irq_handler
,
831 0, dev_name(&pdev
->dev
), mdev
);
835 tasklet_init(&mdev
->irq_tasklet
, msgdma_tasklet
, (unsigned long)mdev
);
837 dma_cookie_init(&mdev
->dmachan
);
839 spin_lock_init(&mdev
->lock
);
841 INIT_LIST_HEAD(&mdev
->active_list
);
842 INIT_LIST_HEAD(&mdev
->pending_list
);
843 INIT_LIST_HEAD(&mdev
->done_list
);
844 INIT_LIST_HEAD(&mdev
->free_list
);
846 dma_dev
= &mdev
->dmadev
;
848 /* Set DMA capabilities */
849 dma_cap_zero(dma_dev
->cap_mask
);
850 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
851 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
853 dma_dev
->src_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
854 dma_dev
->dst_addr_widths
= BIT(DMA_SLAVE_BUSWIDTH_4_BYTES
);
855 dma_dev
->directions
= BIT(DMA_MEM_TO_DEV
) | BIT(DMA_DEV_TO_MEM
) |
857 dma_dev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
859 /* Init DMA link list */
860 INIT_LIST_HEAD(&dma_dev
->channels
);
862 /* Set base routines */
863 dma_dev
->device_tx_status
= dma_cookie_status
;
864 dma_dev
->device_issue_pending
= msgdma_issue_pending
;
865 dma_dev
->dev
= &pdev
->dev
;
867 dma_dev
->copy_align
= DMAENGINE_ALIGN_4_BYTES
;
868 dma_dev
->device_prep_dma_memcpy
= msgdma_prep_memcpy
;
869 dma_dev
->device_prep_slave_sg
= msgdma_prep_slave_sg
;
870 dma_dev
->device_config
= msgdma_dma_config
;
872 dma_dev
->device_alloc_chan_resources
= msgdma_alloc_chan_resources
;
873 dma_dev
->device_free_chan_resources
= msgdma_free_chan_resources
;
875 mdev
->dmachan
.device
= dma_dev
;
876 list_add_tail(&mdev
->dmachan
.device_node
, &dma_dev
->channels
);
878 /* Set DMA mask to 64 bits */
879 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
881 dev_warn(&pdev
->dev
, "unable to set coherent mask to 64");
882 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
889 ret
= dma_async_device_register(dma_dev
);
893 dev_notice(&pdev
->dev
, "Altera mSGDMA driver probe success\n");
898 msgdma_dev_remove(mdev
);
904 * msgdma_dma_remove - Driver remove function
905 * @pdev: Pointer to the platform_device structure
909 static int msgdma_remove(struct platform_device
*pdev
)
911 struct msgdma_device
*mdev
= platform_get_drvdata(pdev
);
913 dma_async_device_unregister(&mdev
->dmadev
);
914 msgdma_dev_remove(mdev
);
916 dev_notice(&pdev
->dev
, "Altera mSGDMA driver removed\n");
921 static struct platform_driver msgdma_driver
= {
923 .name
= "altera-msgdma",
925 .probe
= msgdma_probe
,
926 .remove
= msgdma_remove
,
929 module_platform_driver(msgdma_driver
);
931 MODULE_ALIAS("platform:altera-msgdma");
932 MODULE_DESCRIPTION("Altera mSGDMA driver");
933 MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
934 MODULE_LICENSE("GPL");