2 * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
4 * Copyright 2011 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 #include <linux/errno.h>
24 #include <linux/init.h>
25 #include <linux/ioport.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include <linux/rio.h>
30 #include <linux/rio_drv.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/interrupt.h>
33 #include <linux/kfifo.h>
34 #include <linux/delay.h>
38 static inline struct tsi721_bdma_chan
*to_tsi721_chan(struct dma_chan
*chan
)
40 return container_of(chan
, struct tsi721_bdma_chan
, dchan
);
43 static inline struct tsi721_device
*to_tsi721(struct dma_device
*ddev
)
45 return container_of(ddev
, struct rio_mport
, dma
)->priv
;
49 struct tsi721_tx_desc
*to_tsi721_desc(struct dma_async_tx_descriptor
*txd
)
51 return container_of(txd
, struct tsi721_tx_desc
, txd
);
55 struct tsi721_tx_desc
*tsi721_dma_first_active(
56 struct tsi721_bdma_chan
*bdma_chan
)
58 return list_first_entry(&bdma_chan
->active_list
,
59 struct tsi721_tx_desc
, desc_node
);
62 static int tsi721_bdma_ch_init(struct tsi721_bdma_chan
*bdma_chan
)
64 struct tsi721_dma_desc
*bd_ptr
;
65 struct device
*dev
= bdma_chan
->dchan
.device
->dev
;
70 int bd_num
= bdma_chan
->bd_num
;
72 dev_dbg(dev
, "Init Block DMA Engine, CH%d\n", bdma_chan
->id
);
74 /* Allocate space for DMA descriptors */
75 bd_ptr
= dma_zalloc_coherent(dev
,
76 bd_num
* sizeof(struct tsi721_dma_desc
),
77 &bd_phys
, GFP_KERNEL
);
81 bdma_chan
->bd_phys
= bd_phys
;
82 bdma_chan
->bd_base
= bd_ptr
;
84 dev_dbg(dev
, "DMA descriptors @ %p (phys = %llx)\n",
85 bd_ptr
, (unsigned long long)bd_phys
);
87 /* Allocate space for descriptor status FIFO */
88 sts_size
= (bd_num
>= TSI721_DMA_MINSTSSZ
) ?
89 bd_num
: TSI721_DMA_MINSTSSZ
;
90 sts_size
= roundup_pow_of_two(sts_size
);
91 sts_ptr
= dma_zalloc_coherent(dev
,
92 sts_size
* sizeof(struct tsi721_dma_sts
),
93 &sts_phys
, GFP_KERNEL
);
95 /* Free space allocated for DMA descriptors */
96 dma_free_coherent(dev
,
97 bd_num
* sizeof(struct tsi721_dma_desc
),
99 bdma_chan
->bd_base
= NULL
;
103 bdma_chan
->sts_phys
= sts_phys
;
104 bdma_chan
->sts_base
= sts_ptr
;
105 bdma_chan
->sts_size
= sts_size
;
108 "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
109 sts_ptr
, (unsigned long long)sts_phys
, sts_size
);
111 /* Initialize DMA descriptors ring */
112 bd_ptr
[bd_num
- 1].type_id
= cpu_to_le32(DTYPE3
<< 29);
113 bd_ptr
[bd_num
- 1].next_lo
= cpu_to_le32((u64
)bd_phys
&
114 TSI721_DMAC_DPTRL_MASK
);
115 bd_ptr
[bd_num
- 1].next_hi
= cpu_to_le32((u64
)bd_phys
>> 32);
117 /* Setup DMA descriptor pointers */
118 iowrite32(((u64
)bd_phys
>> 32),
119 bdma_chan
->regs
+ TSI721_DMAC_DPTRH
);
120 iowrite32(((u64
)bd_phys
& TSI721_DMAC_DPTRL_MASK
),
121 bdma_chan
->regs
+ TSI721_DMAC_DPTRL
);
123 /* Setup descriptor status FIFO */
124 iowrite32(((u64
)sts_phys
>> 32),
125 bdma_chan
->regs
+ TSI721_DMAC_DSBH
);
126 iowrite32(((u64
)sts_phys
& TSI721_DMAC_DSBL_MASK
),
127 bdma_chan
->regs
+ TSI721_DMAC_DSBL
);
128 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size
),
129 bdma_chan
->regs
+ TSI721_DMAC_DSSZ
);
131 /* Clear interrupt bits */
132 iowrite32(TSI721_DMAC_INT_ALL
,
133 bdma_chan
->regs
+ TSI721_DMAC_INT
);
135 ioread32(bdma_chan
->regs
+ TSI721_DMAC_INT
);
137 /* Toggle DMA channel initialization */
138 iowrite32(TSI721_DMAC_CTL_INIT
, bdma_chan
->regs
+ TSI721_DMAC_CTL
);
139 ioread32(bdma_chan
->regs
+ TSI721_DMAC_CTL
);
140 bdma_chan
->wr_count
= bdma_chan
->wr_count_next
= 0;
141 bdma_chan
->sts_rdptr
= 0;
147 static int tsi721_bdma_ch_free(struct tsi721_bdma_chan
*bdma_chan
)
151 if (bdma_chan
->bd_base
== NULL
)
154 /* Check if DMA channel still running */
155 ch_stat
= ioread32(bdma_chan
->regs
+ TSI721_DMAC_STS
);
156 if (ch_stat
& TSI721_DMAC_STS_RUN
)
159 /* Put DMA channel into init state */
160 iowrite32(TSI721_DMAC_CTL_INIT
, bdma_chan
->regs
+ TSI721_DMAC_CTL
);
162 /* Free space allocated for DMA descriptors */
163 dma_free_coherent(bdma_chan
->dchan
.device
->dev
,
164 bdma_chan
->bd_num
* sizeof(struct tsi721_dma_desc
),
165 bdma_chan
->bd_base
, bdma_chan
->bd_phys
);
166 bdma_chan
->bd_base
= NULL
;
168 /* Free space allocated for status FIFO */
169 dma_free_coherent(bdma_chan
->dchan
.device
->dev
,
170 bdma_chan
->sts_size
* sizeof(struct tsi721_dma_sts
),
171 bdma_chan
->sts_base
, bdma_chan
->sts_phys
);
172 bdma_chan
->sts_base
= NULL
;
177 tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan
*bdma_chan
, int enable
)
180 /* Clear pending BDMA channel interrupts */
181 iowrite32(TSI721_DMAC_INT_ALL
,
182 bdma_chan
->regs
+ TSI721_DMAC_INT
);
183 ioread32(bdma_chan
->regs
+ TSI721_DMAC_INT
);
184 /* Enable BDMA channel interrupts */
185 iowrite32(TSI721_DMAC_INT_ALL
,
186 bdma_chan
->regs
+ TSI721_DMAC_INTE
);
188 /* Disable BDMA channel interrupts */
189 iowrite32(0, bdma_chan
->regs
+ TSI721_DMAC_INTE
);
190 /* Clear pending BDMA channel interrupts */
191 iowrite32(TSI721_DMAC_INT_ALL
,
192 bdma_chan
->regs
+ TSI721_DMAC_INT
);
197 static bool tsi721_dma_is_idle(struct tsi721_bdma_chan
*bdma_chan
)
201 sts
= ioread32(bdma_chan
->regs
+ TSI721_DMAC_STS
);
202 return ((sts
& TSI721_DMAC_STS_RUN
) == 0);
205 void tsi721_bdma_handler(struct tsi721_bdma_chan
*bdma_chan
)
207 /* Disable BDMA channel interrupts */
208 iowrite32(0, bdma_chan
->regs
+ TSI721_DMAC_INTE
);
210 tasklet_schedule(&bdma_chan
->tasklet
);
213 #ifdef CONFIG_PCI_MSI
215 * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
216 * @irq: Linux interrupt number
217 * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
219 * Handles BDMA channel interrupts signaled using MSI-X.
221 static irqreturn_t
tsi721_bdma_msix(int irq
, void *ptr
)
223 struct tsi721_bdma_chan
*bdma_chan
= ptr
;
225 tsi721_bdma_handler(bdma_chan
);
228 #endif /* CONFIG_PCI_MSI */
230 /* Must be called with the spinlock held */
231 static void tsi721_start_dma(struct tsi721_bdma_chan
*bdma_chan
)
233 if (!tsi721_dma_is_idle(bdma_chan
)) {
234 dev_err(bdma_chan
->dchan
.device
->dev
,
235 "BUG: Attempt to start non-idle channel\n");
239 if (bdma_chan
->wr_count
== bdma_chan
->wr_count_next
) {
240 dev_err(bdma_chan
->dchan
.device
->dev
,
241 "BUG: Attempt to start DMA with no BDs ready\n");
245 dev_dbg(bdma_chan
->dchan
.device
->dev
,
246 "tx_chan: %p, chan: %d, regs: %p\n",
247 bdma_chan
, bdma_chan
->dchan
.chan_id
, bdma_chan
->regs
);
249 iowrite32(bdma_chan
->wr_count_next
,
250 bdma_chan
->regs
+ TSI721_DMAC_DWRCNT
);
251 ioread32(bdma_chan
->regs
+ TSI721_DMAC_DWRCNT
);
253 bdma_chan
->wr_count
= bdma_chan
->wr_count_next
;
256 static void tsi721_desc_put(struct tsi721_bdma_chan
*bdma_chan
,
257 struct tsi721_tx_desc
*desc
)
259 dev_dbg(bdma_chan
->dchan
.device
->dev
,
260 "Put desc: %p into free list\n", desc
);
263 spin_lock_bh(&bdma_chan
->lock
);
264 list_splice_init(&desc
->tx_list
, &bdma_chan
->free_list
);
265 list_add(&desc
->desc_node
, &bdma_chan
->free_list
);
266 bdma_chan
->wr_count_next
= bdma_chan
->wr_count
;
267 spin_unlock_bh(&bdma_chan
->lock
);
272 struct tsi721_tx_desc
*tsi721_desc_get(struct tsi721_bdma_chan
*bdma_chan
)
274 struct tsi721_tx_desc
*tx_desc
, *_tx_desc
;
275 struct tsi721_tx_desc
*ret
= NULL
;
278 spin_lock_bh(&bdma_chan
->lock
);
279 list_for_each_entry_safe(tx_desc
, _tx_desc
,
280 &bdma_chan
->free_list
, desc_node
) {
281 if (async_tx_test_ack(&tx_desc
->txd
)) {
282 list_del(&tx_desc
->desc_node
);
286 dev_dbg(bdma_chan
->dchan
.device
->dev
,
287 "desc %p not ACKed\n", tx_desc
);
290 i
= bdma_chan
->wr_count_next
% bdma_chan
->bd_num
;
291 if (i
== bdma_chan
->bd_num
- 1) {
293 bdma_chan
->wr_count_next
++; /* skip link descriptor */
296 bdma_chan
->wr_count_next
++;
297 tx_desc
->txd
.phys
= bdma_chan
->bd_phys
+
298 i
* sizeof(struct tsi721_dma_desc
);
299 tx_desc
->hw_desc
= &((struct tsi721_dma_desc
*)bdma_chan
->bd_base
)[i
];
301 spin_unlock_bh(&bdma_chan
->lock
);
307 tsi721_fill_desc(struct tsi721_bdma_chan
*bdma_chan
,
308 struct tsi721_tx_desc
*desc
, struct scatterlist
*sg
,
309 enum dma_rtype rtype
, u32 sys_size
)
311 struct tsi721_dma_desc
*bd_ptr
= desc
->hw_desc
;
314 if (sg_dma_len(sg
) > TSI721_DMAD_BCOUNT1
+ 1) {
315 dev_err(bdma_chan
->dchan
.device
->dev
,
316 "SG element is too large\n");
320 dev_dbg(bdma_chan
->dchan
.device
->dev
,
321 "desc: 0x%llx, addr: 0x%llx len: 0x%x\n",
322 (u64
)desc
->txd
.phys
, (unsigned long long)sg_dma_address(sg
),
325 dev_dbg(bdma_chan
->dchan
.device
->dev
,
326 "bd_ptr = %p did=%d raddr=0x%llx\n",
327 bd_ptr
, desc
->destid
, desc
->rio_addr
);
329 /* Initialize DMA descriptor */
330 bd_ptr
->type_id
= cpu_to_le32((DTYPE1
<< 29) |
331 (rtype
<< 19) | desc
->destid
);
333 bd_ptr
->type_id
|= cpu_to_le32(TSI721_DMAD_IOF
);
334 bd_ptr
->bcount
= cpu_to_le32(((desc
->rio_addr
& 0x3) << 30) |
335 (sys_size
<< 26) | sg_dma_len(sg
));
336 rio_addr
= (desc
->rio_addr
>> 2) |
337 ((u64
)(desc
->rio_addr_u
& 0x3) << 62);
338 bd_ptr
->raddr_lo
= cpu_to_le32(rio_addr
& 0xffffffff);
339 bd_ptr
->raddr_hi
= cpu_to_le32(rio_addr
>> 32);
340 bd_ptr
->t1
.bufptr_lo
= cpu_to_le32(
341 (u64
)sg_dma_address(sg
) & 0xffffffff);
342 bd_ptr
->t1
.bufptr_hi
= cpu_to_le32((u64
)sg_dma_address(sg
) >> 32);
343 bd_ptr
->t1
.s_dist
= 0;
344 bd_ptr
->t1
.s_size
= 0;
349 static void tsi721_dma_chain_complete(struct tsi721_bdma_chan
*bdma_chan
,
350 struct tsi721_tx_desc
*desc
)
352 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
353 dma_async_tx_callback callback
= txd
->callback
;
354 void *param
= txd
->callback_param
;
356 list_splice_init(&desc
->tx_list
, &bdma_chan
->free_list
);
357 list_move(&desc
->desc_node
, &bdma_chan
->free_list
);
358 bdma_chan
->completed_cookie
= txd
->cookie
;
364 static void tsi721_dma_complete_all(struct tsi721_bdma_chan
*bdma_chan
)
366 struct tsi721_tx_desc
*desc
, *_d
;
369 BUG_ON(!tsi721_dma_is_idle(bdma_chan
));
371 if (!list_empty(&bdma_chan
->queue
))
372 tsi721_start_dma(bdma_chan
);
374 list_splice_init(&bdma_chan
->active_list
, &list
);
375 list_splice_init(&bdma_chan
->queue
, &bdma_chan
->active_list
);
377 list_for_each_entry_safe(desc
, _d
, &list
, desc_node
)
378 tsi721_dma_chain_complete(bdma_chan
, desc
);
381 static void tsi721_clr_stat(struct tsi721_bdma_chan
*bdma_chan
)
387 /* Check and clear descriptor status FIFO entries */
388 srd_ptr
= bdma_chan
->sts_rdptr
;
389 sts_ptr
= bdma_chan
->sts_base
;
392 for (i
= 0; i
< 8 && sts_ptr
[j
]; i
++, j
++)
396 srd_ptr
%= bdma_chan
->sts_size
;
400 iowrite32(srd_ptr
, bdma_chan
->regs
+ TSI721_DMAC_DSRP
);
401 bdma_chan
->sts_rdptr
= srd_ptr
;
404 static void tsi721_advance_work(struct tsi721_bdma_chan
*bdma_chan
)
406 if (list_empty(&bdma_chan
->active_list
) ||
407 list_is_singular(&bdma_chan
->active_list
)) {
408 dev_dbg(bdma_chan
->dchan
.device
->dev
,
409 "%s: Active_list empty\n", __func__
);
410 tsi721_dma_complete_all(bdma_chan
);
412 dev_dbg(bdma_chan
->dchan
.device
->dev
,
413 "%s: Active_list NOT empty\n", __func__
);
414 tsi721_dma_chain_complete(bdma_chan
,
415 tsi721_dma_first_active(bdma_chan
));
416 tsi721_start_dma(bdma_chan
);
420 static void tsi721_dma_tasklet(unsigned long data
)
422 struct tsi721_bdma_chan
*bdma_chan
= (struct tsi721_bdma_chan
*)data
;
423 u32 dmac_int
, dmac_sts
;
425 dmac_int
= ioread32(bdma_chan
->regs
+ TSI721_DMAC_INT
);
426 dev_dbg(bdma_chan
->dchan
.device
->dev
, "%s: DMAC%d_INT = 0x%x\n",
427 __func__
, bdma_chan
->id
, dmac_int
);
428 /* Clear channel interrupts */
429 iowrite32(dmac_int
, bdma_chan
->regs
+ TSI721_DMAC_INT
);
431 if (dmac_int
& TSI721_DMAC_INT_ERR
) {
432 dmac_sts
= ioread32(bdma_chan
->regs
+ TSI721_DMAC_STS
);
433 dev_err(bdma_chan
->dchan
.device
->dev
,
434 "%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
435 __func__
, bdma_chan
->id
, dmac_sts
);
438 if (dmac_int
& TSI721_DMAC_INT_STFULL
) {
439 dev_err(bdma_chan
->dchan
.device
->dev
,
440 "%s: DMAC%d descriptor status FIFO is full\n",
441 __func__
, bdma_chan
->id
);
444 if (dmac_int
& (TSI721_DMAC_INT_DONE
| TSI721_DMAC_INT_IOFDONE
)) {
445 tsi721_clr_stat(bdma_chan
);
446 spin_lock(&bdma_chan
->lock
);
447 tsi721_advance_work(bdma_chan
);
448 spin_unlock(&bdma_chan
->lock
);
451 /* Re-Enable BDMA channel interrupts */
452 iowrite32(TSI721_DMAC_INT_ALL
, bdma_chan
->regs
+ TSI721_DMAC_INTE
);
455 static dma_cookie_t
tsi721_tx_submit(struct dma_async_tx_descriptor
*txd
)
457 struct tsi721_tx_desc
*desc
= to_tsi721_desc(txd
);
458 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(txd
->chan
);
461 spin_lock_bh(&bdma_chan
->lock
);
463 cookie
= txd
->chan
->cookie
;
466 txd
->chan
->cookie
= cookie
;
467 txd
->cookie
= cookie
;
469 if (list_empty(&bdma_chan
->active_list
)) {
470 list_add_tail(&desc
->desc_node
, &bdma_chan
->active_list
);
471 tsi721_start_dma(bdma_chan
);
473 list_add_tail(&desc
->desc_node
, &bdma_chan
->queue
);
476 spin_unlock_bh(&bdma_chan
->lock
);
480 static int tsi721_alloc_chan_resources(struct dma_chan
*dchan
)
482 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
483 #ifdef CONFIG_PCI_MSI
484 struct tsi721_device
*priv
= to_tsi721(dchan
->device
);
486 struct tsi721_tx_desc
*desc
= NULL
;
491 if (bdma_chan
->bd_base
)
492 return bdma_chan
->bd_num
- 1;
494 /* Initialize BDMA channel */
495 if (tsi721_bdma_ch_init(bdma_chan
)) {
496 dev_err(dchan
->device
->dev
, "Unable to initialize data DMA"
497 " channel %d, aborting\n", bdma_chan
->id
);
501 /* Alocate matching number of logical descriptors */
502 desc
= kcalloc((bdma_chan
->bd_num
- 1), sizeof(struct tsi721_tx_desc
),
505 dev_err(dchan
->device
->dev
,
506 "Failed to allocate logical descriptors\n");
511 bdma_chan
->tx_desc
= desc
;
513 for (i
= 0; i
< bdma_chan
->bd_num
- 1; i
++) {
514 dma_async_tx_descriptor_init(&desc
[i
].txd
, dchan
);
515 desc
[i
].txd
.tx_submit
= tsi721_tx_submit
;
516 desc
[i
].txd
.flags
= DMA_CTRL_ACK
;
517 INIT_LIST_HEAD(&desc
[i
].tx_list
);
518 list_add_tail(&desc
[i
].desc_node
, &tmp_list
);
521 spin_lock_bh(&bdma_chan
->lock
);
522 list_splice(&tmp_list
, &bdma_chan
->free_list
);
523 bdma_chan
->completed_cookie
= dchan
->cookie
= 1;
524 spin_unlock_bh(&bdma_chan
->lock
);
526 #ifdef CONFIG_PCI_MSI
527 if (priv
->flags
& TSI721_USING_MSIX
) {
528 /* Request interrupt service if we are in MSI-X mode */
530 priv
->msix
[TSI721_VECT_DMA0_DONE
+
531 bdma_chan
->id
].vector
,
533 priv
->msix
[TSI721_VECT_DMA0_DONE
+
534 bdma_chan
->id
].irq_name
,
538 dev_dbg(dchan
->device
->dev
,
539 "Unable to allocate MSI-X interrupt for "
540 "BDMA%d-DONE\n", bdma_chan
->id
);
544 rc
= request_irq(priv
->msix
[TSI721_VECT_DMA0_INT
+
545 bdma_chan
->id
].vector
,
547 priv
->msix
[TSI721_VECT_DMA0_INT
+
548 bdma_chan
->id
].irq_name
,
552 dev_dbg(dchan
->device
->dev
,
553 "Unable to allocate MSI-X interrupt for "
554 "BDMA%d-INT\n", bdma_chan
->id
);
556 priv
->msix
[TSI721_VECT_DMA0_DONE
+
557 bdma_chan
->id
].vector
,
563 #endif /* CONFIG_PCI_MSI */
565 tasklet_enable(&bdma_chan
->tasklet
);
566 tsi721_bdma_interrupt_enable(bdma_chan
, 1);
568 return bdma_chan
->bd_num
- 1;
572 tsi721_bdma_ch_free(bdma_chan
);
576 static void tsi721_free_chan_resources(struct dma_chan
*dchan
)
578 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
579 #ifdef CONFIG_PCI_MSI
580 struct tsi721_device
*priv
= to_tsi721(dchan
->device
);
584 dev_dbg(dchan
->device
->dev
, "%s: Entry\n", __func__
);
586 if (bdma_chan
->bd_base
== NULL
)
589 BUG_ON(!list_empty(&bdma_chan
->active_list
));
590 BUG_ON(!list_empty(&bdma_chan
->queue
));
592 tasklet_disable(&bdma_chan
->tasklet
);
594 spin_lock_bh(&bdma_chan
->lock
);
595 list_splice_init(&bdma_chan
->free_list
, &list
);
596 spin_unlock_bh(&bdma_chan
->lock
);
598 tsi721_bdma_interrupt_enable(bdma_chan
, 0);
600 #ifdef CONFIG_PCI_MSI
601 if (priv
->flags
& TSI721_USING_MSIX
) {
602 free_irq(priv
->msix
[TSI721_VECT_DMA0_DONE
+
603 bdma_chan
->id
].vector
, (void *)bdma_chan
);
604 free_irq(priv
->msix
[TSI721_VECT_DMA0_INT
+
605 bdma_chan
->id
].vector
, (void *)bdma_chan
);
607 #endif /* CONFIG_PCI_MSI */
609 tsi721_bdma_ch_free(bdma_chan
);
610 kfree(bdma_chan
->tx_desc
);
614 enum dma_status
tsi721_tx_status(struct dma_chan
*dchan
, dma_cookie_t cookie
,
615 struct dma_tx_state
*txstate
)
617 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
618 dma_cookie_t last_used
;
619 dma_cookie_t last_completed
;
622 spin_lock_bh(&bdma_chan
->lock
);
623 last_completed
= bdma_chan
->completed_cookie
;
624 last_used
= dchan
->cookie
;
625 spin_unlock_bh(&bdma_chan
->lock
);
627 ret
= dma_async_is_complete(cookie
, last_completed
, last_used
);
629 dma_set_tx_state(txstate
, last_completed
, last_used
, 0);
631 dev_dbg(dchan
->device
->dev
,
632 "%s: exit, ret: %d, last_completed: %d, last_used: %d\n",
633 __func__
, ret
, last_completed
, last_used
);
638 static void tsi721_issue_pending(struct dma_chan
*dchan
)
640 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
642 dev_dbg(dchan
->device
->dev
, "%s: Entry\n", __func__
);
644 if (tsi721_dma_is_idle(bdma_chan
)) {
645 spin_lock_bh(&bdma_chan
->lock
);
646 tsi721_advance_work(bdma_chan
);
647 spin_unlock_bh(&bdma_chan
->lock
);
649 dev_dbg(dchan
->device
->dev
,
650 "%s: DMA channel still busy\n", __func__
);
654 struct dma_async_tx_descriptor
*tsi721_prep_rio_sg(struct dma_chan
*dchan
,
655 struct scatterlist
*sgl
, unsigned int sg_len
,
656 enum dma_transfer_direction dir
, unsigned long flags
,
659 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
660 struct tsi721_tx_desc
*desc
= NULL
;
661 struct tsi721_tx_desc
*first
= NULL
;
662 struct scatterlist
*sg
;
663 struct rio_dma_ext
*rext
= tinfo
;
664 u64 rio_addr
= rext
->rio_addr
; /* limited to 64-bit rio_addr for now */
666 u32 sys_size
= dma_to_mport(dchan
->device
)->sys_size
;
667 enum dma_rtype rtype
;
669 if (!sgl
|| !sg_len
) {
670 dev_err(dchan
->device
->dev
, "%s: No SG list\n", __func__
);
674 if (dir
== DMA_DEV_TO_MEM
)
676 else if (dir
== DMA_MEM_TO_DEV
) {
677 switch (rext
->wr_type
) {
681 case RDW_ALL_NWRITE_R
:
682 rtype
= ALL_NWRITE_R
;
684 case RDW_LAST_NWRITE_R
:
686 rtype
= LAST_NWRITE_R
;
690 dev_err(dchan
->device
->dev
,
691 "%s: Unsupported DMA direction option\n", __func__
);
695 for_each_sg(sgl
, sg
, sg_len
, i
) {
698 dev_dbg(dchan
->device
->dev
, "%s: sg #%d\n", __func__
, i
);
699 desc
= tsi721_desc_get(bdma_chan
);
701 dev_err(dchan
->device
->dev
,
702 "Not enough descriptors available\n");
707 desc
->interrupt
= (flags
& DMA_PREP_INTERRUPT
) != 0;
709 desc
->interrupt
= false;
711 desc
->destid
= rext
->destid
;
712 desc
->rio_addr
= rio_addr
;
713 desc
->rio_addr_u
= 0;
715 err
= tsi721_fill_desc(bdma_chan
, desc
, sg
, rtype
, sys_size
);
717 dev_err(dchan
->device
->dev
,
718 "Failed to build desc: %d\n", err
);
722 rio_addr
+= sg_dma_len(sg
);
727 list_add_tail(&desc
->desc_node
, &first
->tx_list
);
730 first
->txd
.cookie
= -EBUSY
;
731 desc
->txd
.flags
= flags
;
736 tsi721_desc_put(bdma_chan
, first
);
740 static int tsi721_device_control(struct dma_chan
*dchan
, enum dma_ctrl_cmd cmd
,
743 struct tsi721_bdma_chan
*bdma_chan
= to_tsi721_chan(dchan
);
744 struct tsi721_tx_desc
*desc
, *_d
;
747 dev_dbg(dchan
->device
->dev
, "%s: Entry\n", __func__
);
749 if (cmd
!= DMA_TERMINATE_ALL
)
752 spin_lock_bh(&bdma_chan
->lock
);
754 /* make sure to stop the transfer */
755 iowrite32(TSI721_DMAC_CTL_SUSP
, bdma_chan
->regs
+ TSI721_DMAC_CTL
);
757 list_splice_init(&bdma_chan
->active_list
, &list
);
758 list_splice_init(&bdma_chan
->queue
, &list
);
760 list_for_each_entry_safe(desc
, _d
, &list
, desc_node
)
761 tsi721_dma_chain_complete(bdma_chan
, desc
);
763 spin_unlock_bh(&bdma_chan
->lock
);
768 int tsi721_register_dma(struct tsi721_device
*priv
)
771 int nr_channels
= TSI721_DMA_MAXCH
;
773 struct rio_mport
*mport
= priv
->mport
;
775 mport
->dma
.dev
= &priv
->pdev
->dev
;
776 mport
->dma
.chancnt
= nr_channels
;
778 INIT_LIST_HEAD(&mport
->dma
.channels
);
780 for (i
= 0; i
< nr_channels
; i
++) {
781 struct tsi721_bdma_chan
*bdma_chan
= &priv
->bdma
[i
];
783 if (i
== TSI721_DMACH_MAINT
)
786 bdma_chan
->bd_num
= 64;
787 bdma_chan
->regs
= priv
->regs
+ TSI721_DMAC_BASE(i
);
789 bdma_chan
->dchan
.device
= &mport
->dma
;
790 bdma_chan
->dchan
.cookie
= 1;
791 bdma_chan
->dchan
.chan_id
= i
;
794 spin_lock_init(&bdma_chan
->lock
);
796 INIT_LIST_HEAD(&bdma_chan
->active_list
);
797 INIT_LIST_HEAD(&bdma_chan
->queue
);
798 INIT_LIST_HEAD(&bdma_chan
->free_list
);
800 tasklet_init(&bdma_chan
->tasklet
, tsi721_dma_tasklet
,
801 (unsigned long)bdma_chan
);
802 tasklet_disable(&bdma_chan
->tasklet
);
803 list_add_tail(&bdma_chan
->dchan
.device_node
,
804 &mport
->dma
.channels
);
807 dma_cap_zero(mport
->dma
.cap_mask
);
808 dma_cap_set(DMA_PRIVATE
, mport
->dma
.cap_mask
);
809 dma_cap_set(DMA_SLAVE
, mport
->dma
.cap_mask
);
811 mport
->dma
.device_alloc_chan_resources
= tsi721_alloc_chan_resources
;
812 mport
->dma
.device_free_chan_resources
= tsi721_free_chan_resources
;
813 mport
->dma
.device_tx_status
= tsi721_tx_status
;
814 mport
->dma
.device_issue_pending
= tsi721_issue_pending
;
815 mport
->dma
.device_prep_slave_sg
= tsi721_prep_rio_sg
;
816 mport
->dma
.device_control
= tsi721_device_control
;
818 err
= dma_async_device_register(&mport
->dma
);
820 dev_err(&priv
->pdev
->dev
, "Failed to register DMA device\n");