2 * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
4 * Copyright 2011 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 #include <linux/errno.h>
24 #include <linux/init.h>
25 #include <linux/ioport.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include <linux/rio.h>
30 #include <linux/rio_drv.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/interrupt.h>
33 #include <linux/kfifo.h>
34 #include <linux/delay.h>
38 static inline struct tsi721_bdma_chan
*to_tsi721_chan(struct dma_chan
*chan
)
40 return container_of(chan
, struct tsi721_bdma_chan
, dchan
);
43 static inline struct tsi721_device
*to_tsi721(struct dma_device
*ddev
)
45 return container_of(ddev
, struct rio_mport
, dma
)->priv
;
49 struct tsi721_tx_desc
*to_tsi721_desc(struct dma_async_tx_descriptor
*txd
)
51 return container_of(txd
, struct tsi721_tx_desc
, txd
);
55 struct tsi721_tx_desc
*tsi721_dma_first_active(
56 struct tsi721_bdma_chan
*bdma_chan
)
58 return list_first_entry(&bdma_chan
->active_list
,
59 struct tsi721_tx_desc
, desc_node
);
62 static int tsi721_bdma_ch_init(struct tsi721_bdma_chan
*bdma_chan
)
64 struct tsi721_dma_desc
*bd_ptr
;
65 struct device
*dev
= bdma_chan
->dchan
.device
->dev
;
70 int bd_num
= bdma_chan
->bd_num
;
72 dev_dbg(dev
, "Init Block DMA Engine, CH%d\n", bdma_chan
->id
);
74 /* Allocate space for DMA descriptors */
75 bd_ptr
= dma_zalloc_coherent(dev
,
76 bd_num
* sizeof(struct tsi721_dma_desc
),
77 &bd_phys
, GFP_KERNEL
);
81 bdma_chan
->bd_phys
= bd_phys
;
82 bdma_chan
->bd_base
= bd_ptr
;
84 dev_dbg(dev
, "DMA descriptors @ %p (phys = %llx)\n",
85 bd_ptr
, (unsigned long long)bd_phys
);
87 /* Allocate space for descriptor status FIFO */
88 sts_size
= (bd_num
>= TSI721_DMA_MINSTSSZ
) ?
89 bd_num
: TSI721_DMA_MINSTSSZ
;
90 sts_size
= roundup_pow_of_two(sts_size
);
91 sts_ptr
= dma_zalloc_coherent(dev
,
92 sts_size
* sizeof(struct tsi721_dma_sts
),
93 &sts_phys
, GFP_KERNEL
);
95 /* Free space allocated for DMA descriptors */
96 dma_free_coherent(dev
,
97 bd_num
* sizeof(struct tsi721_dma_desc
),
99 bdma_chan
->bd_base
= NULL
;
103 bdma_chan
->sts_phys
= sts_phys
;
104 bdma_chan
->sts_base
= sts_ptr
;
105 bdma_chan
->sts_size
= sts_size
;
108 "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
109 sts_ptr
, (unsigned long long)sts_phys
, sts_size
);
111 /* Initialize DMA descriptors ring */
112 bd_ptr
[bd_num
- 1].type_id
= cpu_to_le32(DTYPE3
<< 29);
113 bd_ptr
[bd_num
- 1].next_lo
= cpu_to_le32((u64
)bd_phys
&
114 TSI721_DMAC_DPTRL_MASK
);
115 bd_ptr
[bd_num
- 1].next_hi
= cpu_to_le32((u64
)bd_phys
>> 32);
117 /* Setup DMA descriptor pointers */
118 iowrite32(((u64
)bd_phys
>> 32),
119 bdma_chan
->regs
+ TSI721_DMAC_DPTRH
);
120 iowrite32(((u64
)bd_phys
& TSI721_DMAC_DPTRL_MASK
),
121 bdma_chan
->regs
+ TSI721_DMAC_DPTRL
);
123 /* Setup descriptor status FIFO */
124 iowrite32(((u64
)sts_phys
>> 32),
125 bdma_chan
->regs
+ TSI721_DMAC_DSBH
);
126 iowrite32(((u64
)sts_phys
& TSI721_DMAC_DSBL_MASK
),
127 bdma_chan
->regs
+ TSI721_DMAC_DSBL
);
128 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size
),
129 bdma_chan
->regs
+ TSI721_DMAC_DSSZ
);
131 /* Clear interrupt bits */
132 iowrite32(TSI721_DMAC_INT_ALL
,
133 bdma_chan
->regs
+ TSI721_DMAC_INT
);
135 ioread32(bdma_chan
->regs
+ TSI721_DMAC_INT
);
137 /* Toggle DMA channel initialization */
138 iowrite32(TSI721_DMAC_CTL_INIT
, bdma_chan
->regs
+ TSI721_DMAC_CTL
);
139 ioread32(bdma_chan
->regs
+ TSI721_DMAC_CTL
);
140 bdma_chan
->wr_count
= bdma_chan
->wr_count_next
= 0;
141 bdma_chan
->sts_rdptr
= 0;
147 static int tsi721_bdma_ch_free(struct tsi721_bdma_chan
*bdma_chan
)
151 if (bdma_chan
->bd_base
== NULL
)
154 /* Check if DMA channel still running */
155 ch_stat
= ioread32(bdma_chan
->regs
+ TSI721_DMAC_STS
);
156 if (ch_stat
& TSI721_DMAC_STS_RUN
)
159 /* Put DMA channel into init state */
160 iowrite32(TSI721_DMAC_CTL_INIT
, bdma_chan
->regs
+ TSI721_DMAC_CTL
);
162 /* Free space allocated for DMA descriptors */
163 dma_free_coherent(bdma_chan
->dchan
.device
->dev
,
164 bdma_chan
->bd_num
* sizeof(struct tsi721_dma_desc
),
165 bdma_chan
->bd_base
, bdma_chan
->bd_phys
);
166 bdma_chan
->bd_base
= NULL
;
168 /* Free space allocated for status FIFO */
169 dma_free_coherent(bdma_chan
->dchan
.device
->dev
,
170 bdma_chan
->sts_size
* sizeof(struct tsi721_dma_sts
),
171 bdma_chan
->sts_base
, bdma_chan
->sts_phys
);
172 bdma_chan
->sts_base
= NULL
;
177 tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan
*bdma_chan
, int enable
)
180 /* Clear pending BDMA channel interrupts */
181 iowrite32(TSI721_DMAC_INT_ALL
,
182 bdma_chan
->regs
+ TSI721_DMAC_INT
);
183 ioread32(bdma_chan
->regs
+ TSI721_DMAC_INT
);
184 /* Enable BDMA channel interrupts */
185 iowrite32(TSI721_DMAC_INT_ALL
,
186 bdma_chan
->regs
+ TSI721_DMAC_INTE
);
188 /* Disable BDMA channel interrupts */
189 iowrite32(0, bdma_chan
->regs
+ TSI721_DMAC_INTE
);
190 /* Clear pending BDMA channel interrupts */
191 iowrite32(TSI721_DMAC_INT_ALL
,
192 bdma_chan
->regs
+ TSI721_DMAC_INT
);
197 static bool tsi721_dma_is_idle(struct tsi721_bdma_chan
*bdma_chan
)
201 sts
= ioread32(bdma_chan
->regs
+ TSI721_DMAC_STS
);
202 return ((sts
& TSI721_DMAC_STS_RUN
) == 0);
205 void tsi721_bdma_handler(struct tsi721_bdma_chan
*bdma_chan
)
207 /* Disable BDMA channel interrupts */
208 iowrite32(0, bdma_chan
->regs
+ TSI721_DMAC_INTE
);
209 if (bdma_chan
->active
)
210 tasklet_schedule(&bdma_chan
->tasklet
);
213 #ifdef CONFIG_PCI_MSI
215 * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
216 * @irq: Linux interrupt number
217 * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
219 * Handles BDMA channel interrupts signaled using MSI-X.
221 static irqreturn_t
tsi721_bdma_msix(int irq
, void *ptr
)
223 struct tsi721_bdma_chan
*bdma_chan
= ptr
;
225 tsi721_bdma_handler(bdma_chan
);
228 #endif /* CONFIG_PCI_MSI */
230 /* Must be called with the spinlock held */
231 static void tsi721_start_dma(struct tsi721_bdma_chan
*bdma_chan
)
233 if (!tsi721_dma_is_idle(bdma_chan
)) {
234 dev_err(bdma_chan
->dchan
.device
->dev
,
235 "BUG: Attempt to start non-idle channel\n");
239 if (bdma_chan
->wr_count
== bdma_chan
->wr_count_next
) {
240 dev_err(bdma_chan
->dchan
.device
->dev
,
241 "BUG: Attempt to start DMA with no BDs ready\n");
245 dev_dbg(bdma_chan
->dchan
.device
->dev
,
246 "tx_chan: %p, chan: %d, regs: %p\n",
247 bdma_chan
, bdma_chan
->dchan
.chan_id
, bdma_chan
->regs
);
249 iowrite32(bdma_chan
->wr_count_next
,
250 bdma_chan
->regs
+ TSI721_DMAC_DWRCNT
);
251 ioread32(bdma_chan
->regs
+ TSI721_DMAC_DWRCNT
);
253 bdma_chan
->wr_count
= bdma_chan
->wr_count_next
;
256 static void tsi721_desc_put(struct tsi721_bdma_chan
*bdma_chan
,
257 struct tsi721_tx_desc
*desc
)
259 dev_dbg(bdma_chan
->dchan
.device
->dev
,
260 "Put desc: %p into free list\n", desc
);
263 spin_lock_bh(&bdma_chan
->lock
);
264 list_splice_init(&desc
->tx_list
, &bdma_chan
->free_list
);
265 list_add(&desc
->desc_node
, &bdma_chan
->free_list
);
266 bdma_chan
->wr_count_next
= bdma_chan
->wr_count
;
267 spin_unlock_bh(&bdma_chan
->lock
);
272 struct tsi721_tx_desc
*tsi721_desc_get(struct tsi721_bdma_chan
*bdma_chan
)
274 struct tsi721_tx_desc
*tx_desc
, *_tx_desc
;
275 struct tsi721_tx_desc
*ret
= NULL
;
278 spin_lock_bh(&bdma_chan
->lock
);
279 list_for_each_entry_safe(tx_desc
, _tx_desc
,
280 &bdma_chan
->free_list
, desc_node
) {
281 if (async_tx_test_ack(&tx_desc
->txd
)) {
282 list_del(&tx_desc
->desc_node
);
286 dev_dbg(bdma_chan
->dchan
.device
->dev
,
287 "desc %p not ACKed\n", tx_desc
);
291 dev_dbg(bdma_chan
->dchan
.device
->dev
,
292 "%s: unable to obtain tx descriptor\n", __func__
);
296 i
= bdma_chan
->wr_count_next
% bdma_chan
->bd_num
;
297 if (i
== bdma_chan
->bd_num
- 1) {
299 bdma_chan
->wr_count_next
++; /* skip link descriptor */
302 bdma_chan
->wr_count_next
++;
303 tx_desc
->txd
.phys
= bdma_chan
->bd_phys
+
304 i
* sizeof(struct tsi721_dma_desc
);
305 tx_desc
->hw_desc
= &((struct tsi721_dma_desc
*)bdma_chan
->bd_base
)[i
];
307 spin_unlock_bh(&bdma_chan
->lock
);
313 tsi721_fill_desc(struct tsi721_bdma_chan
*bdma_chan
,
314 struct tsi721_tx_desc
*desc
, struct scatterlist
*sg
,
315 enum dma_rtype rtype
, u32 sys_size
)
317 struct tsi721_dma_desc
*bd_ptr
= desc
->hw_desc
;
320 if (sg_dma_len(sg
) > TSI721_DMAD_BCOUNT1
+ 1) {
321 dev_err(bdma_chan
->dchan
.device
->dev
,
322 "SG element is too large\n");
326 dev_dbg(bdma_chan
->dchan
.device
->dev
,
327 "desc: 0x%llx, addr: 0x%llx len: 0x%x\n",
328 (u64
)desc
->txd
.phys
, (unsigned long long)sg_dma_address(sg
),
331 dev_dbg(bdma_chan
->dchan
.device
->dev
,
332 "bd_ptr = %p did=%d raddr=0x%llx\n",
333 bd_ptr
, desc
->destid
, desc
->rio_addr
);
335 /* Initialize DMA descriptor */
336 bd_ptr
->type_id
= cpu_to_le32((DTYPE1
<< 29) |
337 (rtype
<< 19) | desc
->destid
);
339 bd_ptr
->type_id
|= cpu_to_le32(TSI721_DMAD_IOF
);
340 bd_ptr
->bcount
= cpu_to_le32(((desc
->rio_addr
& 0x3) << 30) |
341 (sys_size
<< 26) | sg_dma_len(sg
));
342 rio_addr
= (desc
->rio_addr
>> 2) |
343 ((u64
)(desc
->rio_addr_u
& 0x3) << 62);
344 bd_ptr
->raddr_lo
= cpu_to_le32(rio_addr
& 0xffffffff);
345 bd_ptr
->raddr_hi
= cpu_to_le32(rio_addr
>> 32);
346 bd_ptr
->t1
.bufptr_lo
= cpu_to_le32(
347 (u64
)sg_dma_address(sg
) & 0xffffffff);
348 bd_ptr
->t1
.bufptr_hi
= cpu_to_le32((u64
)sg_dma_address(sg
) >> 32);
349 bd_ptr
->t1
.s_dist
= 0;
350 bd_ptr
->t1
.s_size
= 0;
355 static void tsi721_dma_chain_complete(struct tsi721_bdma_chan
*bdma_chan
,
356 struct tsi721_tx_desc
*desc
)
358 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
359 dma_async_tx_callback callback
= txd
->callback
;
360 void *param
= txd
->callback_param
;
362 list_splice_init(&desc
->tx_list
, &bdma_chan
->free_list
);
363 list_move(&desc
->desc_node
, &bdma_chan
->free_list
);
364 bdma_chan
->completed_cookie
= txd
->cookie
;
370 static void tsi721_dma_complete_all(struct tsi721_bdma_chan
*bdma_chan
)
372 struct tsi721_tx_desc
*desc
, *_d
;
375 BUG_ON(!tsi721_dma_is_idle(bdma_chan
));
377 if (!list_empty(&bdma_chan
->queue
))
378 tsi721_start_dma(bdma_chan
);
380 list_splice_init(&bdma_chan
->active_list
, &list
);
381 list_splice_init(&bdma_chan
->queue
, &bdma_chan
->active_list
);
383 list_for_each_entry_safe(desc
, _d
, &list
, desc_node
)
384 tsi721_dma_chain_complete(bdma_chan
, desc
);
387 static void tsi721_clr_stat(struct tsi721_bdma_chan
*bdma_chan
)
393 /* Check and clear descriptor status FIFO entries */
394 srd_ptr
= bdma_chan
->sts_rdptr
;
395 sts_ptr
= bdma_chan
->sts_base
;
398 for (i
= 0; i
< 8 && sts_ptr
[j
]; i
++, j
++)
402 srd_ptr
%= bdma_chan
->sts_size
;
406 iowrite32(srd_ptr
, bdma_chan
->regs
+ TSI721_DMAC_DSRP
);
407 bdma_chan
->sts_rdptr
= srd_ptr
;
410 static void tsi721_advance_work(struct tsi721_bdma_chan
*bdma_chan
)
412 if (list_empty(&bdma_chan
->active_list
) ||
413 list_is_singular(&bdma_chan
->active_list
)) {
414 dev_dbg(bdma_chan
->dchan
.device
->dev
,
415 "%s: Active_list empty\n", __func__
);
416 tsi721_dma_complete_all(bdma_chan
);
418 dev_dbg(bdma_chan
->dchan
.device
->dev
,
419 "%s: Active_list NOT empty\n", __func__
);
420 tsi721_dma_chain_complete(bdma_chan
,
421 tsi721_dma_first_active(bdma_chan
));
422 tsi721_start_dma(bdma_chan
);
426 static void tsi721_dma_tasklet(unsigned long data
)
428 struct tsi721_bdma_chan
*bdma_chan
= (struct tsi721_bdma_chan
*)data
;
429 u32 dmac_int
, dmac_sts
;
431 dmac_int
= ioread32(bdma_chan
->regs
+ TSI721_DMAC_INT
);
432 dev_dbg(bdma_chan
->dchan
.device
->dev
, "%s: DMAC%d_INT = 0x%x\n",
433 __func__
, bdma_chan
->id
, dmac_int
);
434 /* Clear channel interrupts */
435 iowrite32(dmac_int
, bdma_chan
->regs
+ TSI721_DMAC_INT
);
437 if (dmac_int
& TSI721_DMAC_INT_ERR
) {
438 dmac_sts
= ioread32(bdma_chan
->regs
+ TSI721_DMAC_STS
);
439 dev_err(bdma_chan
->dchan
.device
->dev
,
440 "%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
441 __func__
, bdma_chan
->id
, dmac_sts
);
444 if (dmac_int
& TSI721_DMAC_INT_STFULL
) {
445 dev_err(bdma_chan
->dchan
.device
->dev
,
446 "%s: DMAC%d descriptor status FIFO is full\n",
447 __func__
, bdma_chan
->id
);
450 if (dmac_int
& (TSI721_DMAC_INT_DONE
| TSI721_DMAC_INT_IOFDONE
)) {
451 tsi721_clr_stat(bdma_chan
);
452 spin_lock(&bdma_chan
->lock
);
453 tsi721_advance_work(bdma_chan
);
454 spin_unlock(&bdma_chan
->lock
);
457 /* Re-Enable BDMA channel interrupts */
458 iowrite32(TSI721_DMAC_INT_ALL
, bdma_chan
->regs
+ TSI721_DMAC_INTE
);
461 static dma_cookie_t
tsi721_tx_submit(struct dma_async_tx_descriptor
*txd
)
463 struct tsi721_tx_desc
*desc
= to_tsi721_desc(txd
);
464 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(txd
->chan
);
467 spin_lock_bh(&bdma_chan
->lock
);
469 cookie
= txd
->chan
->cookie
;
472 txd
->chan
->cookie
= cookie
;
473 txd
->cookie
= cookie
;
475 if (list_empty(&bdma_chan
->active_list
)) {
476 list_add_tail(&desc
->desc_node
, &bdma_chan
->active_list
);
477 tsi721_start_dma(bdma_chan
);
479 list_add_tail(&desc
->desc_node
, &bdma_chan
->queue
);
482 spin_unlock_bh(&bdma_chan
->lock
);
486 static int tsi721_alloc_chan_resources(struct dma_chan
*dchan
)
488 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
489 #ifdef CONFIG_PCI_MSI
490 struct tsi721_device
*priv
= to_tsi721(dchan
->device
);
492 struct tsi721_tx_desc
*desc
= NULL
;
497 if (bdma_chan
->bd_base
)
498 return bdma_chan
->bd_num
- 1;
500 /* Initialize BDMA channel */
501 if (tsi721_bdma_ch_init(bdma_chan
)) {
502 dev_err(dchan
->device
->dev
, "Unable to initialize data DMA"
503 " channel %d, aborting\n", bdma_chan
->id
);
507 /* Alocate matching number of logical descriptors */
508 desc
= kcalloc((bdma_chan
->bd_num
- 1), sizeof(struct tsi721_tx_desc
),
511 dev_err(dchan
->device
->dev
,
512 "Failed to allocate logical descriptors\n");
517 bdma_chan
->tx_desc
= desc
;
519 for (i
= 0; i
< bdma_chan
->bd_num
- 1; i
++) {
520 dma_async_tx_descriptor_init(&desc
[i
].txd
, dchan
);
521 desc
[i
].txd
.tx_submit
= tsi721_tx_submit
;
522 desc
[i
].txd
.flags
= DMA_CTRL_ACK
;
523 INIT_LIST_HEAD(&desc
[i
].tx_list
);
524 list_add_tail(&desc
[i
].desc_node
, &tmp_list
);
527 spin_lock_bh(&bdma_chan
->lock
);
528 list_splice(&tmp_list
, &bdma_chan
->free_list
);
529 bdma_chan
->completed_cookie
= dchan
->cookie
= 1;
530 spin_unlock_bh(&bdma_chan
->lock
);
532 #ifdef CONFIG_PCI_MSI
533 if (priv
->flags
& TSI721_USING_MSIX
) {
534 /* Request interrupt service if we are in MSI-X mode */
536 priv
->msix
[TSI721_VECT_DMA0_DONE
+
537 bdma_chan
->id
].vector
,
539 priv
->msix
[TSI721_VECT_DMA0_DONE
+
540 bdma_chan
->id
].irq_name
,
544 dev_dbg(dchan
->device
->dev
,
545 "Unable to allocate MSI-X interrupt for "
546 "BDMA%d-DONE\n", bdma_chan
->id
);
550 rc
= request_irq(priv
->msix
[TSI721_VECT_DMA0_INT
+
551 bdma_chan
->id
].vector
,
553 priv
->msix
[TSI721_VECT_DMA0_INT
+
554 bdma_chan
->id
].irq_name
,
558 dev_dbg(dchan
->device
->dev
,
559 "Unable to allocate MSI-X interrupt for "
560 "BDMA%d-INT\n", bdma_chan
->id
);
562 priv
->msix
[TSI721_VECT_DMA0_DONE
+
563 bdma_chan
->id
].vector
,
569 #endif /* CONFIG_PCI_MSI */
571 bdma_chan
->active
= true;
572 tsi721_bdma_interrupt_enable(bdma_chan
, 1);
574 return bdma_chan
->bd_num
- 1;
578 tsi721_bdma_ch_free(bdma_chan
);
582 static void tsi721_free_chan_resources(struct dma_chan
*dchan
)
584 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
585 struct tsi721_device
*priv
= to_tsi721(dchan
->device
);
588 dev_dbg(dchan
->device
->dev
, "%s: Entry\n", __func__
);
590 if (bdma_chan
->bd_base
== NULL
)
593 BUG_ON(!list_empty(&bdma_chan
->active_list
));
594 BUG_ON(!list_empty(&bdma_chan
->queue
));
596 tsi721_bdma_interrupt_enable(bdma_chan
, 0);
597 bdma_chan
->active
= false;
599 #ifdef CONFIG_PCI_MSI
600 if (priv
->flags
& TSI721_USING_MSIX
) {
601 synchronize_irq(priv
->msix
[TSI721_VECT_DMA0_DONE
+
602 bdma_chan
->id
].vector
);
603 synchronize_irq(priv
->msix
[TSI721_VECT_DMA0_INT
+
604 bdma_chan
->id
].vector
);
607 synchronize_irq(priv
->pdev
->irq
);
609 tasklet_kill(&bdma_chan
->tasklet
);
611 spin_lock_bh(&bdma_chan
->lock
);
612 list_splice_init(&bdma_chan
->free_list
, &list
);
613 spin_unlock_bh(&bdma_chan
->lock
);
615 #ifdef CONFIG_PCI_MSI
616 if (priv
->flags
& TSI721_USING_MSIX
) {
617 free_irq(priv
->msix
[TSI721_VECT_DMA0_DONE
+
618 bdma_chan
->id
].vector
, (void *)bdma_chan
);
619 free_irq(priv
->msix
[TSI721_VECT_DMA0_INT
+
620 bdma_chan
->id
].vector
, (void *)bdma_chan
);
622 #endif /* CONFIG_PCI_MSI */
624 tsi721_bdma_ch_free(bdma_chan
);
625 kfree(bdma_chan
->tx_desc
);
629 enum dma_status
tsi721_tx_status(struct dma_chan
*dchan
, dma_cookie_t cookie
,
630 struct dma_tx_state
*txstate
)
632 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
633 dma_cookie_t last_used
;
634 dma_cookie_t last_completed
;
637 spin_lock_bh(&bdma_chan
->lock
);
638 last_completed
= bdma_chan
->completed_cookie
;
639 last_used
= dchan
->cookie
;
640 spin_unlock_bh(&bdma_chan
->lock
);
642 ret
= dma_async_is_complete(cookie
, last_completed
, last_used
);
644 dma_set_tx_state(txstate
, last_completed
, last_used
, 0);
646 dev_dbg(dchan
->device
->dev
,
647 "%s: exit, ret: %d, last_completed: %d, last_used: %d\n",
648 __func__
, ret
, last_completed
, last_used
);
653 static void tsi721_issue_pending(struct dma_chan
*dchan
)
655 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
657 dev_dbg(dchan
->device
->dev
, "%s: Entry\n", __func__
);
659 if (tsi721_dma_is_idle(bdma_chan
)) {
660 spin_lock_bh(&bdma_chan
->lock
);
661 tsi721_advance_work(bdma_chan
);
662 spin_unlock_bh(&bdma_chan
->lock
);
664 dev_dbg(dchan
->device
->dev
,
665 "%s: DMA channel still busy\n", __func__
);
669 struct dma_async_tx_descriptor
*tsi721_prep_rio_sg(struct dma_chan
*dchan
,
670 struct scatterlist
*sgl
, unsigned int sg_len
,
671 enum dma_transfer_direction dir
, unsigned long flags
,
674 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
675 struct tsi721_tx_desc
*desc
= NULL
;
676 struct tsi721_tx_desc
*first
= NULL
;
677 struct scatterlist
*sg
;
678 struct rio_dma_ext
*rext
= tinfo
;
679 u64 rio_addr
= rext
->rio_addr
; /* limited to 64-bit rio_addr for now */
681 u32 sys_size
= dma_to_mport(dchan
->device
)->sys_size
;
682 enum dma_rtype rtype
;
684 if (!sgl
|| !sg_len
) {
685 dev_err(dchan
->device
->dev
, "%s: No SG list\n", __func__
);
689 if (dir
== DMA_DEV_TO_MEM
)
691 else if (dir
== DMA_MEM_TO_DEV
) {
692 switch (rext
->wr_type
) {
696 case RDW_ALL_NWRITE_R
:
697 rtype
= ALL_NWRITE_R
;
699 case RDW_LAST_NWRITE_R
:
701 rtype
= LAST_NWRITE_R
;
705 dev_err(dchan
->device
->dev
,
706 "%s: Unsupported DMA direction option\n", __func__
);
710 for_each_sg(sgl
, sg
, sg_len
, i
) {
713 dev_dbg(dchan
->device
->dev
, "%s: sg #%d\n", __func__
, i
);
714 desc
= tsi721_desc_get(bdma_chan
);
716 dev_err(dchan
->device
->dev
,
717 "Not enough descriptors available\n");
722 desc
->interrupt
= (flags
& DMA_PREP_INTERRUPT
) != 0;
724 desc
->interrupt
= false;
726 desc
->destid
= rext
->destid
;
727 desc
->rio_addr
= rio_addr
;
728 desc
->rio_addr_u
= 0;
730 err
= tsi721_fill_desc(bdma_chan
, desc
, sg
, rtype
, sys_size
);
732 dev_err(dchan
->device
->dev
,
733 "Failed to build desc: %d\n", err
);
737 rio_addr
+= sg_dma_len(sg
);
742 list_add_tail(&desc
->desc_node
, &first
->tx_list
);
745 first
->txd
.cookie
= -EBUSY
;
746 desc
->txd
.flags
= flags
;
751 tsi721_desc_put(bdma_chan
, first
);
755 static int tsi721_device_control(struct dma_chan
*dchan
, enum dma_ctrl_cmd cmd
,
758 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
759 struct tsi721_tx_desc
*desc
, *_d
;
762 dev_dbg(dchan
->device
->dev
, "%s: Entry\n", __func__
);
764 if (cmd
!= DMA_TERMINATE_ALL
)
767 spin_lock_bh(&bdma_chan
->lock
);
769 /* make sure to stop the transfer */
770 iowrite32(TSI721_DMAC_CTL_SUSP
, bdma_chan
->regs
+ TSI721_DMAC_CTL
);
772 list_splice_init(&bdma_chan
->active_list
, &list
);
773 list_splice_init(&bdma_chan
->queue
, &list
);
775 list_for_each_entry_safe(desc
, _d
, &list
, desc_node
)
776 tsi721_dma_chain_complete(bdma_chan
, desc
);
778 spin_unlock_bh(&bdma_chan
->lock
);
783 int tsi721_register_dma(struct tsi721_device
*priv
)
786 int nr_channels
= TSI721_DMA_MAXCH
;
788 struct rio_mport
*mport
= priv
->mport
;
790 mport
->dma
.dev
= &priv
->pdev
->dev
;
791 mport
->dma
.chancnt
= nr_channels
;
793 INIT_LIST_HEAD(&mport
->dma
.channels
);
795 for (i
= 0; i
< nr_channels
; i
++) {
796 struct tsi721_bdma_chan
*bdma_chan
= &priv
->bdma
[i
];
798 if (i
== TSI721_DMACH_MAINT
)
801 bdma_chan
->bd_num
= 64;
802 bdma_chan
->regs
= priv
->regs
+ TSI721_DMAC_BASE(i
);
804 bdma_chan
->dchan
.device
= &mport
->dma
;
805 bdma_chan
->dchan
.cookie
= 1;
806 bdma_chan
->dchan
.chan_id
= i
;
808 bdma_chan
->active
= false;
810 spin_lock_init(&bdma_chan
->lock
);
812 INIT_LIST_HEAD(&bdma_chan
->active_list
);
813 INIT_LIST_HEAD(&bdma_chan
->queue
);
814 INIT_LIST_HEAD(&bdma_chan
->free_list
);
816 tasklet_init(&bdma_chan
->tasklet
, tsi721_dma_tasklet
,
817 (unsigned long)bdma_chan
);
818 list_add_tail(&bdma_chan
->dchan
.device_node
,
819 &mport
->dma
.channels
);
822 dma_cap_zero(mport
->dma
.cap_mask
);
823 dma_cap_set(DMA_PRIVATE
, mport
->dma
.cap_mask
);
824 dma_cap_set(DMA_SLAVE
, mport
->dma
.cap_mask
);
826 mport
->dma
.device_alloc_chan_resources
= tsi721_alloc_chan_resources
;
827 mport
->dma
.device_free_chan_resources
= tsi721_free_chan_resources
;
828 mport
->dma
.device_tx_status
= tsi721_tx_status
;
829 mport
->dma
.device_issue_pending
= tsi721_issue_pending
;
830 mport
->dma
.device_prep_slave_sg
= tsi721_prep_rio_sg
;
831 mport
->dma
.device_control
= tsi721_device_control
;
833 err
= dma_async_device_register(&mport
->dma
);
835 dev_err(&priv
->pdev
->dev
, "Failed to register DMA device\n");