2 * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
4 * Copyright (c) 2011-2014 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * The full GNU General Public License is included in this distribution in the
18 * file called COPYING.
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/ioport.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/rio.h>
29 #include <linux/rio_drv.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/interrupt.h>
32 #include <linux/kfifo.h>
33 #include <linux/delay.h>
34 #include "../../dma/dmaengine.h"
38 #define TSI721_DMA_TX_QUEUE_SZ 16 /* number of transaction descriptors */
41 static irqreturn_t
tsi721_bdma_msix(int irq
, void *ptr
);
43 static int tsi721_submit_sg(struct tsi721_tx_desc
*desc
);
45 static unsigned int dma_desc_per_channel
= 128;
46 module_param(dma_desc_per_channel
, uint
, S_IWUSR
| S_IRUGO
);
47 MODULE_PARM_DESC(dma_desc_per_channel
,
48 "Number of DMA descriptors per channel (default: 128)");
50 static inline struct tsi721_bdma_chan
*to_tsi721_chan(struct dma_chan
*chan
)
52 return container_of(chan
, struct tsi721_bdma_chan
, dchan
);
55 static inline struct tsi721_device
*to_tsi721(struct dma_device
*ddev
)
57 return container_of(ddev
, struct rio_mport
, dma
)->priv
;
61 struct tsi721_tx_desc
*to_tsi721_desc(struct dma_async_tx_descriptor
*txd
)
63 return container_of(txd
, struct tsi721_tx_desc
, txd
);
67 struct tsi721_tx_desc
*tsi721_dma_first_active(
68 struct tsi721_bdma_chan
*bdma_chan
)
70 return list_first_entry(&bdma_chan
->active_list
,
71 struct tsi721_tx_desc
, desc_node
);
74 static int tsi721_bdma_ch_init(struct tsi721_bdma_chan
*bdma_chan
, int bd_num
)
76 struct tsi721_dma_desc
*bd_ptr
;
77 struct device
*dev
= bdma_chan
->dchan
.device
->dev
;
83 struct tsi721_device
*priv
= to_tsi721(bdma_chan
->dchan
.device
);
86 dev_dbg(dev
, "Init Block DMA Engine, CH%d\n", bdma_chan
->id
);
89 * Allocate space for DMA descriptors
90 * (add an extra element for link descriptor)
92 bd_ptr
= dma_zalloc_coherent(dev
,
93 (bd_num
+ 1) * sizeof(struct tsi721_dma_desc
),
94 &bd_phys
, GFP_KERNEL
);
98 bdma_chan
->bd_num
= bd_num
;
99 bdma_chan
->bd_phys
= bd_phys
;
100 bdma_chan
->bd_base
= bd_ptr
;
102 dev_dbg(dev
, "DMA descriptors @ %p (phys = %llx)\n",
103 bd_ptr
, (unsigned long long)bd_phys
);
105 /* Allocate space for descriptor status FIFO */
106 sts_size
= ((bd_num
+ 1) >= TSI721_DMA_MINSTSSZ
) ?
107 (bd_num
+ 1) : TSI721_DMA_MINSTSSZ
;
108 sts_size
= roundup_pow_of_two(sts_size
);
109 sts_ptr
= dma_zalloc_coherent(dev
,
110 sts_size
* sizeof(struct tsi721_dma_sts
),
111 &sts_phys
, GFP_KERNEL
);
113 /* Free space allocated for DMA descriptors */
114 dma_free_coherent(dev
,
115 (bd_num
+ 1) * sizeof(struct tsi721_dma_desc
),
117 bdma_chan
->bd_base
= NULL
;
121 bdma_chan
->sts_phys
= sts_phys
;
122 bdma_chan
->sts_base
= sts_ptr
;
123 bdma_chan
->sts_size
= sts_size
;
126 "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
127 sts_ptr
, (unsigned long long)sts_phys
, sts_size
);
129 /* Initialize DMA descriptors ring using added link descriptor */
130 bd_ptr
[bd_num
].type_id
= cpu_to_le32(DTYPE3
<< 29);
131 bd_ptr
[bd_num
].next_lo
= cpu_to_le32((u64
)bd_phys
&
132 TSI721_DMAC_DPTRL_MASK
);
133 bd_ptr
[bd_num
].next_hi
= cpu_to_le32((u64
)bd_phys
>> 32);
135 /* Setup DMA descriptor pointers */
136 iowrite32(((u64
)bd_phys
>> 32),
137 bdma_chan
->regs
+ TSI721_DMAC_DPTRH
);
138 iowrite32(((u64
)bd_phys
& TSI721_DMAC_DPTRL_MASK
),
139 bdma_chan
->regs
+ TSI721_DMAC_DPTRL
);
141 /* Setup descriptor status FIFO */
142 iowrite32(((u64
)sts_phys
>> 32),
143 bdma_chan
->regs
+ TSI721_DMAC_DSBH
);
144 iowrite32(((u64
)sts_phys
& TSI721_DMAC_DSBL_MASK
),
145 bdma_chan
->regs
+ TSI721_DMAC_DSBL
);
146 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size
),
147 bdma_chan
->regs
+ TSI721_DMAC_DSSZ
);
149 /* Clear interrupt bits */
150 iowrite32(TSI721_DMAC_INT_ALL
,
151 bdma_chan
->regs
+ TSI721_DMAC_INT
);
153 ioread32(bdma_chan
->regs
+ TSI721_DMAC_INT
);
155 #ifdef CONFIG_PCI_MSI
156 /* Request interrupt service if we are in MSI-X mode */
157 if (priv
->flags
& TSI721_USING_MSIX
) {
160 idx
= TSI721_VECT_DMA0_DONE
+ bdma_chan
->id
;
162 rc
= request_irq(priv
->msix
[idx
].vector
, tsi721_bdma_msix
, 0,
163 priv
->msix
[idx
].irq_name
, (void *)bdma_chan
);
166 dev_dbg(dev
, "Unable to get MSI-X for BDMA%d-DONE\n",
171 idx
= TSI721_VECT_DMA0_INT
+ bdma_chan
->id
;
173 rc
= request_irq(priv
->msix
[idx
].vector
, tsi721_bdma_msix
, 0,
174 priv
->msix
[idx
].irq_name
, (void *)bdma_chan
);
177 dev_dbg(dev
, "Unable to get MSI-X for BDMA%d-INT\n",
180 priv
->msix
[TSI721_VECT_DMA0_DONE
+
181 bdma_chan
->id
].vector
,
187 /* Free space allocated for DMA descriptors */
188 dma_free_coherent(dev
,
189 (bd_num
+ 1) * sizeof(struct tsi721_dma_desc
),
191 bdma_chan
->bd_base
= NULL
;
193 /* Free space allocated for status descriptors */
194 dma_free_coherent(dev
,
195 sts_size
* sizeof(struct tsi721_dma_sts
),
197 bdma_chan
->sts_base
= NULL
;
202 #endif /* CONFIG_PCI_MSI */
204 /* Toggle DMA channel initialization */
205 iowrite32(TSI721_DMAC_CTL_INIT
, bdma_chan
->regs
+ TSI721_DMAC_CTL
);
206 ioread32(bdma_chan
->regs
+ TSI721_DMAC_CTL
);
207 bdma_chan
->wr_count
= bdma_chan
->wr_count_next
= 0;
208 bdma_chan
->sts_rdptr
= 0;
214 static int tsi721_bdma_ch_free(struct tsi721_bdma_chan
*bdma_chan
)
217 #ifdef CONFIG_PCI_MSI
218 struct tsi721_device
*priv
= to_tsi721(bdma_chan
->dchan
.device
);
221 if (bdma_chan
->bd_base
== NULL
)
224 /* Check if DMA channel still running */
225 ch_stat
= ioread32(bdma_chan
->regs
+ TSI721_DMAC_STS
);
226 if (ch_stat
& TSI721_DMAC_STS_RUN
)
229 /* Put DMA channel into init state */
230 iowrite32(TSI721_DMAC_CTL_INIT
, bdma_chan
->regs
+ TSI721_DMAC_CTL
);
232 #ifdef CONFIG_PCI_MSI
233 if (priv
->flags
& TSI721_USING_MSIX
) {
234 free_irq(priv
->msix
[TSI721_VECT_DMA0_DONE
+
235 bdma_chan
->id
].vector
, (void *)bdma_chan
);
236 free_irq(priv
->msix
[TSI721_VECT_DMA0_INT
+
237 bdma_chan
->id
].vector
, (void *)bdma_chan
);
239 #endif /* CONFIG_PCI_MSI */
241 /* Free space allocated for DMA descriptors */
242 dma_free_coherent(bdma_chan
->dchan
.device
->dev
,
243 (bdma_chan
->bd_num
+ 1) * sizeof(struct tsi721_dma_desc
),
244 bdma_chan
->bd_base
, bdma_chan
->bd_phys
);
245 bdma_chan
->bd_base
= NULL
;
247 /* Free space allocated for status FIFO */
248 dma_free_coherent(bdma_chan
->dchan
.device
->dev
,
249 bdma_chan
->sts_size
* sizeof(struct tsi721_dma_sts
),
250 bdma_chan
->sts_base
, bdma_chan
->sts_phys
);
251 bdma_chan
->sts_base
= NULL
;
256 tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan
*bdma_chan
, int enable
)
259 /* Clear pending BDMA channel interrupts */
260 iowrite32(TSI721_DMAC_INT_ALL
,
261 bdma_chan
->regs
+ TSI721_DMAC_INT
);
262 ioread32(bdma_chan
->regs
+ TSI721_DMAC_INT
);
263 /* Enable BDMA channel interrupts */
264 iowrite32(TSI721_DMAC_INT_ALL
,
265 bdma_chan
->regs
+ TSI721_DMAC_INTE
);
267 /* Disable BDMA channel interrupts */
268 iowrite32(0, bdma_chan
->regs
+ TSI721_DMAC_INTE
);
269 /* Clear pending BDMA channel interrupts */
270 iowrite32(TSI721_DMAC_INT_ALL
,
271 bdma_chan
->regs
+ TSI721_DMAC_INT
);
276 static bool tsi721_dma_is_idle(struct tsi721_bdma_chan
*bdma_chan
)
280 sts
= ioread32(bdma_chan
->regs
+ TSI721_DMAC_STS
);
281 return ((sts
& TSI721_DMAC_STS_RUN
) == 0);
284 void tsi721_bdma_handler(struct tsi721_bdma_chan
*bdma_chan
)
286 /* Disable BDMA channel interrupts */
287 iowrite32(0, bdma_chan
->regs
+ TSI721_DMAC_INTE
);
288 if (bdma_chan
->active
)
289 tasklet_schedule(&bdma_chan
->tasklet
);
292 #ifdef CONFIG_PCI_MSI
294 * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
295 * @irq: Linux interrupt number
296 * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
298 * Handles BDMA channel interrupts signaled using MSI-X.
300 static irqreturn_t
tsi721_bdma_msix(int irq
, void *ptr
)
302 struct tsi721_bdma_chan
*bdma_chan
= ptr
;
304 tsi721_bdma_handler(bdma_chan
);
307 #endif /* CONFIG_PCI_MSI */
309 /* Must be called with the spinlock held */
310 static void tsi721_start_dma(struct tsi721_bdma_chan
*bdma_chan
)
312 if (!tsi721_dma_is_idle(bdma_chan
)) {
313 dev_err(bdma_chan
->dchan
.device
->dev
,
314 "BUG: Attempt to start non-idle channel\n");
318 if (bdma_chan
->wr_count
== bdma_chan
->wr_count_next
) {
319 dev_err(bdma_chan
->dchan
.device
->dev
,
320 "BUG: Attempt to start DMA with no BDs ready\n");
324 dev_dbg(bdma_chan
->dchan
.device
->dev
,
325 "%s: chan_%d (wrc=%d)\n", __func__
, bdma_chan
->id
,
326 bdma_chan
->wr_count_next
);
328 iowrite32(bdma_chan
->wr_count_next
,
329 bdma_chan
->regs
+ TSI721_DMAC_DWRCNT
);
330 ioread32(bdma_chan
->regs
+ TSI721_DMAC_DWRCNT
);
332 bdma_chan
->wr_count
= bdma_chan
->wr_count_next
;
336 tsi721_desc_fill_init(struct tsi721_tx_desc
*desc
,
337 struct tsi721_dma_desc
*bd_ptr
,
338 struct scatterlist
*sg
, u32 sys_size
)
345 /* Initialize DMA descriptor */
346 bd_ptr
->type_id
= cpu_to_le32((DTYPE1
<< 29) |
347 (desc
->rtype
<< 19) | desc
->destid
);
348 bd_ptr
->bcount
= cpu_to_le32(((desc
->rio_addr
& 0x3) << 30) |
350 rio_addr
= (desc
->rio_addr
>> 2) |
351 ((u64
)(desc
->rio_addr_u
& 0x3) << 62);
352 bd_ptr
->raddr_lo
= cpu_to_le32(rio_addr
& 0xffffffff);
353 bd_ptr
->raddr_hi
= cpu_to_le32(rio_addr
>> 32);
354 bd_ptr
->t1
.bufptr_lo
= cpu_to_le32(
355 (u64
)sg_dma_address(sg
) & 0xffffffff);
356 bd_ptr
->t1
.bufptr_hi
= cpu_to_le32((u64
)sg_dma_address(sg
) >> 32);
357 bd_ptr
->t1
.s_dist
= 0;
358 bd_ptr
->t1
.s_size
= 0;
364 tsi721_desc_fill_end(struct tsi721_dma_desc
*bd_ptr
, u32 bcount
, bool interrupt
)
369 /* Update DMA descriptor */
371 bd_ptr
->type_id
|= cpu_to_le32(TSI721_DMAD_IOF
);
372 bd_ptr
->bcount
|= cpu_to_le32(bcount
& TSI721_DMAD_BCOUNT1
);
377 static void tsi721_dma_tx_err(struct tsi721_bdma_chan
*bdma_chan
,
378 struct tsi721_tx_desc
*desc
)
380 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
381 dma_async_tx_callback callback
= txd
->callback
;
382 void *param
= txd
->callback_param
;
384 list_move(&desc
->desc_node
, &bdma_chan
->free_list
);
390 static void tsi721_clr_stat(struct tsi721_bdma_chan
*bdma_chan
)
396 /* Check and clear descriptor status FIFO entries */
397 srd_ptr
= bdma_chan
->sts_rdptr
;
398 sts_ptr
= bdma_chan
->sts_base
;
401 for (i
= 0; i
< 8 && sts_ptr
[j
]; i
++, j
++)
405 srd_ptr
%= bdma_chan
->sts_size
;
409 iowrite32(srd_ptr
, bdma_chan
->regs
+ TSI721_DMAC_DSRP
);
410 bdma_chan
->sts_rdptr
= srd_ptr
;
413 /* Must be called with the channel spinlock held */
414 static int tsi721_submit_sg(struct tsi721_tx_desc
*desc
)
416 struct dma_chan
*dchan
= desc
->txd
.chan
;
417 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
420 dma_addr_t next_addr
;
422 struct scatterlist
*sg
;
425 struct tsi721_dma_desc
*bd_ptr
= NULL
;
429 if (!tsi721_dma_is_idle(bdma_chan
)) {
430 dev_err(bdma_chan
->dchan
.device
->dev
,
431 "BUG: Attempt to use non-idle channel\n");
436 * Fill DMA channel's hardware buffer descriptors.
437 * (NOTE: RapidIO destination address is limited to 64 bits for now)
439 rio_addr
= desc
->rio_addr
;
442 sys_size
= dma_to_mport(bdma_chan
->dchan
.device
)->sys_size
;
444 rd_idx
= ioread32(bdma_chan
->regs
+ TSI721_DMAC_DRDCNT
);
445 rd_idx
%= (bdma_chan
->bd_num
+ 1);
447 idx
= bdma_chan
->wr_count_next
% (bdma_chan
->bd_num
+ 1);
448 if (idx
== bdma_chan
->bd_num
) {
449 /* wrap around link descriptor */
454 dev_dbg(dchan
->device
->dev
, "%s: BD ring status: rdi=%d wri=%d\n",
455 __func__
, rd_idx
, idx
);
457 for_each_sg(desc
->sg
, sg
, desc
->sg_len
, i
) {
459 dev_dbg(dchan
->device
->dev
, "sg%d/%d addr: 0x%llx len: %d\n",
461 (unsigned long long)sg_dma_address(sg
), sg_dma_len(sg
));
463 if (sg_dma_len(sg
) > TSI721_BDMA_MAX_BCOUNT
) {
464 dev_err(dchan
->device
->dev
,
465 "%s: SG entry %d is too large\n", __func__
, i
);
471 * If this sg entry forms contiguous block with previous one,
472 * try to merge it into existing DMA descriptor
474 if (next_addr
== sg_dma_address(sg
) &&
475 bcount
+ sg_dma_len(sg
) <= TSI721_BDMA_MAX_BCOUNT
) {
476 /* Adjust byte count of the descriptor */
477 bcount
+= sg_dma_len(sg
);
479 } else if (next_addr
!= -1) {
480 /* Finalize descriptor using total byte count value */
481 tsi721_desc_fill_end(bd_ptr
, bcount
, 0);
482 dev_dbg(dchan
->device
->dev
,
483 "%s: prev desc final len: %d\n",
487 desc
->rio_addr
= rio_addr
;
489 if (i
&& idx
== rd_idx
) {
490 dev_dbg(dchan
->device
->dev
,
491 "%s: HW descriptor ring is full @ %d\n",
498 bd_ptr
= &((struct tsi721_dma_desc
*)bdma_chan
->bd_base
)[idx
];
499 err
= tsi721_desc_fill_init(desc
, bd_ptr
, sg
, sys_size
);
501 dev_err(dchan
->device
->dev
,
502 "Failed to build desc: err=%d\n", err
);
506 dev_dbg(dchan
->device
->dev
, "bd_ptr = %p did=%d raddr=0x%llx\n",
507 bd_ptr
, desc
->destid
, desc
->rio_addr
);
509 next_addr
= sg_dma_address(sg
);
510 bcount
= sg_dma_len(sg
);
513 if (++idx
== bdma_chan
->bd_num
) {
514 /* wrap around link descriptor */
520 if (sg_is_last(sg
)) {
521 tsi721_desc_fill_end(bd_ptr
, bcount
, 0);
522 dev_dbg(dchan
->device
->dev
, "%s: last desc final len: %d\n",
526 rio_addr
+= sg_dma_len(sg
);
527 next_addr
+= sg_dma_len(sg
);
532 bdma_chan
->wr_count_next
+= add_count
;
537 static void tsi721_advance_work(struct tsi721_bdma_chan
*bdma_chan
)
539 struct tsi721_tx_desc
*desc
;
542 dev_dbg(bdma_chan
->dchan
.device
->dev
, "%s: Enter\n", __func__
);
545 * If there are any new transactions in the queue add them
546 * into the processing list
548 if (!list_empty(&bdma_chan
->queue
))
549 list_splice_init(&bdma_chan
->queue
, &bdma_chan
->active_list
);
551 /* Start new transaction (if available) */
552 if (!list_empty(&bdma_chan
->active_list
)) {
553 desc
= tsi721_dma_first_active(bdma_chan
);
554 err
= tsi721_submit_sg(desc
);
556 tsi721_start_dma(bdma_chan
);
558 tsi721_dma_tx_err(bdma_chan
, desc
);
559 dev_dbg(bdma_chan
->dchan
.device
->dev
,
560 "ERR: tsi721_submit_sg failed with err=%d\n",
565 dev_dbg(bdma_chan
->dchan
.device
->dev
, "%s: Exit\n", __func__
);
568 static void tsi721_dma_tasklet(unsigned long data
)
570 struct tsi721_bdma_chan
*bdma_chan
= (struct tsi721_bdma_chan
*)data
;
571 u32 dmac_int
, dmac_sts
;
573 dmac_int
= ioread32(bdma_chan
->regs
+ TSI721_DMAC_INT
);
574 dev_dbg(bdma_chan
->dchan
.device
->dev
, "%s: DMAC%d_INT = 0x%x\n",
575 __func__
, bdma_chan
->id
, dmac_int
);
576 /* Clear channel interrupts */
577 iowrite32(dmac_int
, bdma_chan
->regs
+ TSI721_DMAC_INT
);
579 if (dmac_int
& TSI721_DMAC_INT_ERR
) {
580 dmac_sts
= ioread32(bdma_chan
->regs
+ TSI721_DMAC_STS
);
581 dev_err(bdma_chan
->dchan
.device
->dev
,
582 "%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
583 __func__
, bdma_chan
->id
, dmac_sts
);
586 if (dmac_int
& TSI721_DMAC_INT_STFULL
) {
587 dev_err(bdma_chan
->dchan
.device
->dev
,
588 "%s: DMAC%d descriptor status FIFO is full\n",
589 __func__
, bdma_chan
->id
);
592 if (dmac_int
& (TSI721_DMAC_INT_DONE
| TSI721_DMAC_INT_IOFDONE
)) {
593 struct tsi721_tx_desc
*desc
;
595 tsi721_clr_stat(bdma_chan
);
596 spin_lock(&bdma_chan
->lock
);
597 desc
= tsi721_dma_first_active(bdma_chan
);
599 if (desc
->sg_len
== 0) {
600 dma_async_tx_callback callback
= NULL
;
603 desc
->status
= DMA_COMPLETE
;
604 dma_cookie_complete(&desc
->txd
);
605 if (desc
->txd
.flags
& DMA_PREP_INTERRUPT
) {
606 callback
= desc
->txd
.callback
;
607 param
= desc
->txd
.callback_param
;
609 list_move(&desc
->desc_node
, &bdma_chan
->free_list
);
610 spin_unlock(&bdma_chan
->lock
);
613 spin_lock(&bdma_chan
->lock
);
616 tsi721_advance_work(bdma_chan
);
617 spin_unlock(&bdma_chan
->lock
);
620 /* Re-Enable BDMA channel interrupts */
621 iowrite32(TSI721_DMAC_INT_ALL
, bdma_chan
->regs
+ TSI721_DMAC_INTE
);
624 static dma_cookie_t
tsi721_tx_submit(struct dma_async_tx_descriptor
*txd
)
626 struct tsi721_tx_desc
*desc
= to_tsi721_desc(txd
);
627 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(txd
->chan
);
630 /* Check if the descriptor is detached from any lists */
631 if (!list_empty(&desc
->desc_node
)) {
632 dev_err(bdma_chan
->dchan
.device
->dev
,
633 "%s: wrong state of descriptor %p\n", __func__
, txd
);
637 spin_lock_bh(&bdma_chan
->lock
);
639 if (!bdma_chan
->active
) {
640 spin_unlock_bh(&bdma_chan
->lock
);
644 cookie
= dma_cookie_assign(txd
);
645 desc
->status
= DMA_IN_PROGRESS
;
646 list_add_tail(&desc
->desc_node
, &bdma_chan
->queue
);
648 spin_unlock_bh(&bdma_chan
->lock
);
652 static int tsi721_alloc_chan_resources(struct dma_chan
*dchan
)
654 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
655 struct tsi721_tx_desc
*desc
= NULL
;
658 dev_dbg(dchan
->device
->dev
, "%s: for channel %d\n",
659 __func__
, bdma_chan
->id
);
661 if (bdma_chan
->bd_base
)
662 return TSI721_DMA_TX_QUEUE_SZ
;
664 /* Initialize BDMA channel */
665 if (tsi721_bdma_ch_init(bdma_chan
, dma_desc_per_channel
)) {
666 dev_err(dchan
->device
->dev
, "Unable to initialize data DMA"
667 " channel %d, aborting\n", bdma_chan
->id
);
671 /* Allocate queue of transaction descriptors */
672 desc
= kcalloc(TSI721_DMA_TX_QUEUE_SZ
, sizeof(struct tsi721_tx_desc
),
675 dev_err(dchan
->device
->dev
,
676 "Failed to allocate logical descriptors\n");
677 tsi721_bdma_ch_free(bdma_chan
);
681 bdma_chan
->tx_desc
= desc
;
683 for (i
= 0; i
< TSI721_DMA_TX_QUEUE_SZ
; i
++) {
684 dma_async_tx_descriptor_init(&desc
[i
].txd
, dchan
);
685 desc
[i
].txd
.tx_submit
= tsi721_tx_submit
;
686 desc
[i
].txd
.flags
= DMA_CTRL_ACK
;
687 list_add(&desc
[i
].desc_node
, &bdma_chan
->free_list
);
690 dma_cookie_init(dchan
);
692 bdma_chan
->active
= true;
693 tsi721_bdma_interrupt_enable(bdma_chan
, 1);
695 return TSI721_DMA_TX_QUEUE_SZ
;
698 static void tsi721_sync_dma_irq(struct tsi721_bdma_chan
*bdma_chan
)
700 struct tsi721_device
*priv
= to_tsi721(bdma_chan
->dchan
.device
);
702 #ifdef CONFIG_PCI_MSI
703 if (priv
->flags
& TSI721_USING_MSIX
) {
704 synchronize_irq(priv
->msix
[TSI721_VECT_DMA0_DONE
+
705 bdma_chan
->id
].vector
);
706 synchronize_irq(priv
->msix
[TSI721_VECT_DMA0_INT
+
707 bdma_chan
->id
].vector
);
710 synchronize_irq(priv
->pdev
->irq
);
713 static void tsi721_free_chan_resources(struct dma_chan
*dchan
)
715 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
717 dev_dbg(dchan
->device
->dev
, "%s: for channel %d\n",
718 __func__
, bdma_chan
->id
);
720 if (bdma_chan
->bd_base
== NULL
)
723 BUG_ON(!list_empty(&bdma_chan
->active_list
));
724 BUG_ON(!list_empty(&bdma_chan
->queue
));
726 tsi721_bdma_interrupt_enable(bdma_chan
, 0);
727 bdma_chan
->active
= false;
728 tsi721_sync_dma_irq(bdma_chan
);
729 tasklet_kill(&bdma_chan
->tasklet
);
730 INIT_LIST_HEAD(&bdma_chan
->free_list
);
731 kfree(bdma_chan
->tx_desc
);
732 tsi721_bdma_ch_free(bdma_chan
);
736 enum dma_status
tsi721_tx_status(struct dma_chan
*dchan
, dma_cookie_t cookie
,
737 struct dma_tx_state
*txstate
)
739 return dma_cookie_status(dchan
, cookie
, txstate
);
742 static void tsi721_issue_pending(struct dma_chan
*dchan
)
744 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
746 dev_dbg(dchan
->device
->dev
, "%s: Enter\n", __func__
);
748 if (tsi721_dma_is_idle(bdma_chan
) && bdma_chan
->active
) {
749 spin_lock_bh(&bdma_chan
->lock
);
750 tsi721_advance_work(bdma_chan
);
751 spin_unlock_bh(&bdma_chan
->lock
);
756 struct dma_async_tx_descriptor
*tsi721_prep_rio_sg(struct dma_chan
*dchan
,
757 struct scatterlist
*sgl
, unsigned int sg_len
,
758 enum dma_transfer_direction dir
, unsigned long flags
,
761 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
762 struct tsi721_tx_desc
*desc
, *_d
;
763 struct rio_dma_ext
*rext
= tinfo
;
764 enum dma_rtype rtype
;
765 struct dma_async_tx_descriptor
*txd
= NULL
;
767 if (!sgl
|| !sg_len
) {
768 dev_err(dchan
->device
->dev
, "%s: No SG list\n", __func__
);
772 dev_dbg(dchan
->device
->dev
, "%s: %s\n", __func__
,
773 (dir
== DMA_DEV_TO_MEM
)?"READ":"WRITE");
775 if (dir
== DMA_DEV_TO_MEM
)
777 else if (dir
== DMA_MEM_TO_DEV
) {
778 switch (rext
->wr_type
) {
782 case RDW_ALL_NWRITE_R
:
783 rtype
= ALL_NWRITE_R
;
785 case RDW_LAST_NWRITE_R
:
787 rtype
= LAST_NWRITE_R
;
791 dev_err(dchan
->device
->dev
,
792 "%s: Unsupported DMA direction option\n", __func__
);
796 spin_lock_bh(&bdma_chan
->lock
);
798 list_for_each_entry_safe(desc
, _d
, &bdma_chan
->free_list
, desc_node
) {
799 if (async_tx_test_ack(&desc
->txd
)) {
800 list_del_init(&desc
->desc_node
);
801 desc
->destid
= rext
->destid
;
802 desc
->rio_addr
= rext
->rio_addr
;
803 desc
->rio_addr_u
= 0;
805 desc
->sg_len
= sg_len
;
813 spin_unlock_bh(&bdma_chan
->lock
);
818 static int tsi721_device_control(struct dma_chan
*dchan
, enum dma_ctrl_cmd cmd
,
821 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
822 struct tsi721_tx_desc
*desc
, *_d
;
826 dev_dbg(dchan
->device
->dev
, "%s: Entry\n", __func__
);
828 if (cmd
!= DMA_TERMINATE_ALL
)
831 spin_lock_bh(&bdma_chan
->lock
);
833 bdma_chan
->active
= false;
835 if (!tsi721_dma_is_idle(bdma_chan
)) {
836 /* make sure to stop the transfer */
837 iowrite32(TSI721_DMAC_CTL_SUSP
,
838 bdma_chan
->regs
+ TSI721_DMAC_CTL
);
840 /* Wait until DMA channel stops */
842 dmac_int
= ioread32(bdma_chan
->regs
+ TSI721_DMAC_INT
);
843 } while ((dmac_int
& TSI721_DMAC_INT_SUSP
) == 0);
846 list_splice_init(&bdma_chan
->active_list
, &list
);
847 list_splice_init(&bdma_chan
->queue
, &list
);
849 list_for_each_entry_safe(desc
, _d
, &list
, desc_node
)
850 tsi721_dma_tx_err(bdma_chan
, desc
);
852 spin_unlock_bh(&bdma_chan
->lock
);
857 int tsi721_register_dma(struct tsi721_device
*priv
)
862 struct rio_mport
*mport
= priv
->mport
;
864 INIT_LIST_HEAD(&mport
->dma
.channels
);
866 for (i
= 0; i
< TSI721_DMA_MAXCH
; i
++) {
867 struct tsi721_bdma_chan
*bdma_chan
= &priv
->bdma
[i
];
869 if (i
== TSI721_DMACH_MAINT
)
872 bdma_chan
->regs
= priv
->regs
+ TSI721_DMAC_BASE(i
);
874 bdma_chan
->dchan
.device
= &mport
->dma
;
875 bdma_chan
->dchan
.cookie
= 1;
876 bdma_chan
->dchan
.chan_id
= i
;
878 bdma_chan
->active
= false;
880 spin_lock_init(&bdma_chan
->lock
);
882 INIT_LIST_HEAD(&bdma_chan
->active_list
);
883 INIT_LIST_HEAD(&bdma_chan
->queue
);
884 INIT_LIST_HEAD(&bdma_chan
->free_list
);
886 tasklet_init(&bdma_chan
->tasklet
, tsi721_dma_tasklet
,
887 (unsigned long)bdma_chan
);
888 list_add_tail(&bdma_chan
->dchan
.device_node
,
889 &mport
->dma
.channels
);
893 mport
->dma
.chancnt
= nr_channels
;
894 dma_cap_zero(mport
->dma
.cap_mask
);
895 dma_cap_set(DMA_PRIVATE
, mport
->dma
.cap_mask
);
896 dma_cap_set(DMA_SLAVE
, mport
->dma
.cap_mask
);
898 mport
->dma
.dev
= &priv
->pdev
->dev
;
899 mport
->dma
.device_alloc_chan_resources
= tsi721_alloc_chan_resources
;
900 mport
->dma
.device_free_chan_resources
= tsi721_free_chan_resources
;
901 mport
->dma
.device_tx_status
= tsi721_tx_status
;
902 mport
->dma
.device_issue_pending
= tsi721_issue_pending
;
903 mport
->dma
.device_prep_slave_sg
= tsi721_prep_rio_sg
;
904 mport
->dma
.device_control
= tsi721_device_control
;
906 err
= dma_async_device_register(&mport
->dma
);
908 dev_err(&priv
->pdev
->dev
, "Failed to register DMA device\n");