2 * Renesas SuperH DMA Engine support
4 * base is drivers/dma/flsdma.c
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
23 #include <linux/dmaengine.h>
24 #include <linux/delay.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
29 #include <asm/dmaengine.h>
33 /* DMA descriptor control */
34 enum sh_dmae_desc_status
{
38 DESC_COMPLETED
, /* completed, have to call callback */
39 DESC_WAITING
, /* callback called, waiting for ack / re-submit */
42 #define NR_DESCS_PER_CHANNEL 32
43 /* Default MEMCPY transfer size = 2^2 = 4 bytes */
44 #define LOG2_DEFAULT_XFER_SIZE 2
46 /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
47 static unsigned long sh_dmae_slave_used
[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER
)];
49 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan
*sh_chan
, bool all
);
51 static void sh_dmae_writel(struct sh_dmae_chan
*sh_dc
, u32 data
, u32 reg
)
53 __raw_writel(data
, sh_dc
->base
+ reg
/ sizeof(u32
));
56 static u32
sh_dmae_readl(struct sh_dmae_chan
*sh_dc
, u32 reg
)
58 return __raw_readl(sh_dc
->base
+ reg
/ sizeof(u32
));
61 static u16
dmaor_read(struct sh_dmae_device
*shdev
)
63 return __raw_readw(shdev
->chan_reg
+ DMAOR
/ sizeof(u32
));
66 static void dmaor_write(struct sh_dmae_device
*shdev
, u16 data
)
68 __raw_writew(data
, shdev
->chan_reg
+ DMAOR
/ sizeof(u32
));
72 * Reset DMA controller
74 * SH7780 has two DMAOR register
76 static void sh_dmae_ctl_stop(struct sh_dmae_device
*shdev
)
78 unsigned short dmaor
= dmaor_read(shdev
);
80 dmaor_write(shdev
, dmaor
& ~(DMAOR_NMIF
| DMAOR_AE
| DMAOR_DME
));
83 static int sh_dmae_rst(struct sh_dmae_device
*shdev
)
87 sh_dmae_ctl_stop(shdev
);
88 dmaor
= dmaor_read(shdev
) | shdev
->pdata
->dmaor_init
;
90 dmaor_write(shdev
, dmaor
);
91 if (dmaor_read(shdev
) & (DMAOR_AE
| DMAOR_NMIF
)) {
92 pr_warning("dma-sh: Can't initialize DMAOR.\n");
98 static bool dmae_is_busy(struct sh_dmae_chan
*sh_chan
)
100 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
102 if ((chcr
& (CHCR_DE
| CHCR_TE
)) == CHCR_DE
)
103 return true; /* working */
105 return false; /* waiting */
108 static unsigned int calc_xmit_shift(struct sh_dmae_chan
*sh_chan
, u32 chcr
)
110 struct sh_dmae_device
*shdev
= container_of(sh_chan
->common
.device
,
111 struct sh_dmae_device
, common
);
112 struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
113 int cnt
= ((chcr
& pdata
->ts_low_mask
) >> pdata
->ts_low_shift
) |
114 ((chcr
& pdata
->ts_high_mask
) >> pdata
->ts_high_shift
);
116 if (cnt
>= pdata
->ts_shift_num
)
119 return pdata
->ts_shift
[cnt
];
122 static u32
log2size_to_chcr(struct sh_dmae_chan
*sh_chan
, int l2size
)
124 struct sh_dmae_device
*shdev
= container_of(sh_chan
->common
.device
,
125 struct sh_dmae_device
, common
);
126 struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
129 for (i
= 0; i
< pdata
->ts_shift_num
; i
++)
130 if (pdata
->ts_shift
[i
] == l2size
)
133 if (i
== pdata
->ts_shift_num
)
136 return ((i
<< pdata
->ts_low_shift
) & pdata
->ts_low_mask
) |
137 ((i
<< pdata
->ts_high_shift
) & pdata
->ts_high_mask
);
140 static void dmae_set_reg(struct sh_dmae_chan
*sh_chan
, struct sh_dmae_regs
*hw
)
142 sh_dmae_writel(sh_chan
, hw
->sar
, SAR
);
143 sh_dmae_writel(sh_chan
, hw
->dar
, DAR
);
144 sh_dmae_writel(sh_chan
, hw
->tcr
>> sh_chan
->xmit_shift
, TCR
);
147 static void dmae_start(struct sh_dmae_chan
*sh_chan
)
149 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
151 chcr
|= CHCR_DE
| CHCR_IE
;
152 sh_dmae_writel(sh_chan
, chcr
& ~CHCR_TE
, CHCR
);
155 static void dmae_halt(struct sh_dmae_chan
*sh_chan
)
157 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
159 chcr
&= ~(CHCR_DE
| CHCR_TE
| CHCR_IE
);
160 sh_dmae_writel(sh_chan
, chcr
, CHCR
);
163 static void dmae_init(struct sh_dmae_chan
*sh_chan
)
166 * Default configuration for dual address memory-memory transfer.
167 * 0x400 represents auto-request.
169 u32 chcr
= DM_INC
| SM_INC
| 0x400 | log2size_to_chcr(sh_chan
,
170 LOG2_DEFAULT_XFER_SIZE
);
171 sh_chan
->xmit_shift
= calc_xmit_shift(sh_chan
, chcr
);
172 sh_dmae_writel(sh_chan
, chcr
, CHCR
);
175 static int dmae_set_chcr(struct sh_dmae_chan
*sh_chan
, u32 val
)
177 /* When DMA was working, can not set data to CHCR */
178 if (dmae_is_busy(sh_chan
))
181 sh_chan
->xmit_shift
= calc_xmit_shift(sh_chan
, val
);
182 sh_dmae_writel(sh_chan
, val
, CHCR
);
187 static int dmae_set_dmars(struct sh_dmae_chan
*sh_chan
, u16 val
)
189 struct sh_dmae_device
*shdev
= container_of(sh_chan
->common
.device
,
190 struct sh_dmae_device
, common
);
191 struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
192 struct sh_dmae_channel
*chan_pdata
= &pdata
->channel
[sh_chan
->id
];
193 u16 __iomem
*addr
= shdev
->dmars
+ chan_pdata
->dmars
/ sizeof(u16
);
194 int shift
= chan_pdata
->dmars_bit
;
196 if (dmae_is_busy(sh_chan
))
199 __raw_writew((__raw_readw(addr
) & (0xff00 >> shift
)) | (val
<< shift
),
205 static dma_cookie_t
sh_dmae_tx_submit(struct dma_async_tx_descriptor
*tx
)
207 struct sh_desc
*desc
= tx_to_sh_desc(tx
), *chunk
, *last
= desc
, *c
;
208 struct sh_dmae_chan
*sh_chan
= to_sh_chan(tx
->chan
);
209 dma_async_tx_callback callback
= tx
->callback
;
212 spin_lock_bh(&sh_chan
->desc_lock
);
214 cookie
= sh_chan
->common
.cookie
;
219 sh_chan
->common
.cookie
= cookie
;
222 /* Mark all chunks of this descriptor as submitted, move to the queue */
223 list_for_each_entry_safe(chunk
, c
, desc
->node
.prev
, node
) {
225 * All chunks are on the global ld_free, so, we have to find
226 * the end of the chain ourselves
228 if (chunk
!= desc
&& (chunk
->mark
== DESC_IDLE
||
229 chunk
->async_tx
.cookie
> 0 ||
230 chunk
->async_tx
.cookie
== -EBUSY
||
231 &chunk
->node
== &sh_chan
->ld_free
))
233 chunk
->mark
= DESC_SUBMITTED
;
234 /* Callback goes to the last chunk */
235 chunk
->async_tx
.callback
= NULL
;
236 chunk
->cookie
= cookie
;
237 list_move_tail(&chunk
->node
, &sh_chan
->ld_queue
);
241 last
->async_tx
.callback
= callback
;
242 last
->async_tx
.callback_param
= tx
->callback_param
;
244 dev_dbg(sh_chan
->dev
, "submit #%d@%p on %d: %x[%d] -> %x\n",
245 tx
->cookie
, &last
->async_tx
, sh_chan
->id
,
246 desc
->hw
.sar
, desc
->hw
.tcr
, desc
->hw
.dar
);
248 spin_unlock_bh(&sh_chan
->desc_lock
);
253 /* Called with desc_lock held */
254 static struct sh_desc
*sh_dmae_get_desc(struct sh_dmae_chan
*sh_chan
)
256 struct sh_desc
*desc
;
258 list_for_each_entry(desc
, &sh_chan
->ld_free
, node
)
259 if (desc
->mark
!= DESC_PREPARED
) {
260 BUG_ON(desc
->mark
!= DESC_IDLE
);
261 list_del(&desc
->node
);
268 static struct sh_dmae_slave_config
*sh_dmae_find_slave(
269 struct sh_dmae_chan
*sh_chan
, enum sh_dmae_slave_chan_id slave_id
)
271 struct dma_device
*dma_dev
= sh_chan
->common
.device
;
272 struct sh_dmae_device
*shdev
= container_of(dma_dev
,
273 struct sh_dmae_device
, common
);
274 struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
277 if ((unsigned)slave_id
>= SHDMA_SLAVE_NUMBER
)
280 for (i
= 0; i
< pdata
->slave_num
; i
++)
281 if (pdata
->slave
[i
].slave_id
== slave_id
)
282 return pdata
->slave
+ i
;
287 static int sh_dmae_alloc_chan_resources(struct dma_chan
*chan
)
289 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
290 struct sh_desc
*desc
;
291 struct sh_dmae_slave
*param
= chan
->private;
293 pm_runtime_get_sync(sh_chan
->dev
);
296 * This relies on the guarantee from dmaengine that alloc_chan_resources
297 * never runs concurrently with itself or free_chan_resources.
300 struct sh_dmae_slave_config
*cfg
;
302 cfg
= sh_dmae_find_slave(sh_chan
, param
->slave_id
);
306 if (test_and_set_bit(param
->slave_id
, sh_dmae_slave_used
))
311 dmae_set_dmars(sh_chan
, cfg
->mid_rid
);
312 dmae_set_chcr(sh_chan
, cfg
->chcr
);
313 } else if ((sh_dmae_readl(sh_chan
, CHCR
) & 0xf00) != 0x400) {
317 spin_lock_bh(&sh_chan
->desc_lock
);
318 while (sh_chan
->descs_allocated
< NR_DESCS_PER_CHANNEL
) {
319 spin_unlock_bh(&sh_chan
->desc_lock
);
320 desc
= kzalloc(sizeof(struct sh_desc
), GFP_KERNEL
);
322 spin_lock_bh(&sh_chan
->desc_lock
);
325 dma_async_tx_descriptor_init(&desc
->async_tx
,
327 desc
->async_tx
.tx_submit
= sh_dmae_tx_submit
;
328 desc
->mark
= DESC_IDLE
;
330 spin_lock_bh(&sh_chan
->desc_lock
);
331 list_add(&desc
->node
, &sh_chan
->ld_free
);
332 sh_chan
->descs_allocated
++;
334 spin_unlock_bh(&sh_chan
->desc_lock
);
336 if (!sh_chan
->descs_allocated
)
337 pm_runtime_put(sh_chan
->dev
);
339 return sh_chan
->descs_allocated
;
343 * sh_dma_free_chan_resources - Free all resources of the channel.
345 static void sh_dmae_free_chan_resources(struct dma_chan
*chan
)
347 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
348 struct sh_desc
*desc
, *_desc
;
350 int descs
= sh_chan
->descs_allocated
;
354 /* Prepared and not submitted descriptors can still be on the queue */
355 if (!list_empty(&sh_chan
->ld_queue
))
356 sh_dmae_chan_ld_cleanup(sh_chan
, true);
359 /* The caller is holding dma_list_mutex */
360 struct sh_dmae_slave
*param
= chan
->private;
361 clear_bit(param
->slave_id
, sh_dmae_slave_used
);
364 spin_lock_bh(&sh_chan
->desc_lock
);
366 list_splice_init(&sh_chan
->ld_free
, &list
);
367 sh_chan
->descs_allocated
= 0;
369 spin_unlock_bh(&sh_chan
->desc_lock
);
372 pm_runtime_put(sh_chan
->dev
);
374 list_for_each_entry_safe(desc
, _desc
, &list
, node
)
379 * sh_dmae_add_desc - get, set up and return one transfer descriptor
380 * @sh_chan: DMA channel
381 * @flags: DMA transfer flags
382 * @dest: destination DMA address, incremented when direction equals
383 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
384 * @src: source DMA address, incremented when direction equals
385 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
386 * @len: DMA transfer length
387 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
388 * @direction: needed for slave DMA to decide which address to keep constant,
389 * equals DMA_BIDIRECTIONAL for MEMCPY
390 * Returns 0 or an error
391 * Locks: called with desc_lock held
393 static struct sh_desc
*sh_dmae_add_desc(struct sh_dmae_chan
*sh_chan
,
394 unsigned long flags
, dma_addr_t
*dest
, dma_addr_t
*src
, size_t *len
,
395 struct sh_desc
**first
, enum dma_data_direction direction
)
403 /* Allocate the link descriptor from the free list */
404 new = sh_dmae_get_desc(sh_chan
);
406 dev_err(sh_chan
->dev
, "No free link descriptor available\n");
410 copy_size
= min(*len
, (size_t)SH_DMA_TCR_MAX
+ 1);
414 new->hw
.tcr
= copy_size
;
418 new->async_tx
.cookie
= -EBUSY
;
421 /* Other desc - invisible to the user */
422 new->async_tx
.cookie
= -EINVAL
;
425 dev_dbg(sh_chan
->dev
,
426 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
427 copy_size
, *len
, *src
, *dest
, &new->async_tx
,
428 new->async_tx
.cookie
, sh_chan
->xmit_shift
);
430 new->mark
= DESC_PREPARED
;
431 new->async_tx
.flags
= flags
;
432 new->direction
= direction
;
435 if (direction
== DMA_BIDIRECTIONAL
|| direction
== DMA_TO_DEVICE
)
437 if (direction
== DMA_BIDIRECTIONAL
|| direction
== DMA_FROM_DEVICE
)
444 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
446 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
447 * converted to scatter-gather to guarantee consistent locking and a correct
448 * list manipulation. For slave DMA direction carries the usual meaning, and,
449 * logically, the SG list is RAM and the addr variable contains slave address,
450 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
451 * and the SG list contains only one element and points at the source buffer.
453 static struct dma_async_tx_descriptor
*sh_dmae_prep_sg(struct sh_dmae_chan
*sh_chan
,
454 struct scatterlist
*sgl
, unsigned int sg_len
, dma_addr_t
*addr
,
455 enum dma_data_direction direction
, unsigned long flags
)
457 struct scatterlist
*sg
;
458 struct sh_desc
*first
= NULL
, *new = NULL
/* compiler... */;
466 for_each_sg(sgl
, sg
, sg_len
, i
)
467 chunks
+= (sg_dma_len(sg
) + SH_DMA_TCR_MAX
) /
468 (SH_DMA_TCR_MAX
+ 1);
470 /* Have to lock the whole loop to protect against concurrent release */
471 spin_lock_bh(&sh_chan
->desc_lock
);
475 * first descriptor is what user is dealing with in all API calls, its
476 * cookie is at first set to -EBUSY, at tx-submit to a positive
478 * if more than one chunk is needed further chunks have cookie = -EINVAL
479 * the last chunk, if not equal to the first, has cookie = -ENOSPC
480 * all chunks are linked onto the tx_list head with their .node heads
481 * only during this function, then they are immediately spliced
482 * back onto the free list in form of a chain
484 for_each_sg(sgl
, sg
, sg_len
, i
) {
485 dma_addr_t sg_addr
= sg_dma_address(sg
);
486 size_t len
= sg_dma_len(sg
);
492 dev_dbg(sh_chan
->dev
, "Add SG #%d@%p[%d], dma %llx\n",
493 i
, sg
, len
, (unsigned long long)sg_addr
);
495 if (direction
== DMA_FROM_DEVICE
)
496 new = sh_dmae_add_desc(sh_chan
, flags
,
497 &sg_addr
, addr
, &len
, &first
,
500 new = sh_dmae_add_desc(sh_chan
, flags
,
501 addr
, &sg_addr
, &len
, &first
,
506 new->chunks
= chunks
--;
507 list_add_tail(&new->node
, &tx_list
);
512 new->async_tx
.cookie
= -ENOSPC
;
514 /* Put them back on the free list, so, they don't get lost */
515 list_splice_tail(&tx_list
, &sh_chan
->ld_free
);
517 spin_unlock_bh(&sh_chan
->desc_lock
);
519 return &first
->async_tx
;
522 list_for_each_entry(new, &tx_list
, node
)
523 new->mark
= DESC_IDLE
;
524 list_splice(&tx_list
, &sh_chan
->ld_free
);
526 spin_unlock_bh(&sh_chan
->desc_lock
);
531 static struct dma_async_tx_descriptor
*sh_dmae_prep_memcpy(
532 struct dma_chan
*chan
, dma_addr_t dma_dest
, dma_addr_t dma_src
,
533 size_t len
, unsigned long flags
)
535 struct sh_dmae_chan
*sh_chan
;
536 struct scatterlist sg
;
541 chan
->private = NULL
;
543 sh_chan
= to_sh_chan(chan
);
545 sg_init_table(&sg
, 1);
546 sg_set_page(&sg
, pfn_to_page(PFN_DOWN(dma_src
)), len
,
547 offset_in_page(dma_src
));
548 sg_dma_address(&sg
) = dma_src
;
549 sg_dma_len(&sg
) = len
;
551 return sh_dmae_prep_sg(sh_chan
, &sg
, 1, &dma_dest
, DMA_BIDIRECTIONAL
,
555 static struct dma_async_tx_descriptor
*sh_dmae_prep_slave_sg(
556 struct dma_chan
*chan
, struct scatterlist
*sgl
, unsigned int sg_len
,
557 enum dma_data_direction direction
, unsigned long flags
)
559 struct sh_dmae_slave
*param
;
560 struct sh_dmae_chan
*sh_chan
;
565 sh_chan
= to_sh_chan(chan
);
566 param
= chan
->private;
568 /* Someone calling slave DMA on a public channel? */
569 if (!param
|| !sg_len
) {
570 dev_warn(sh_chan
->dev
, "%s: bad parameter: %p, %d, %d\n",
571 __func__
, param
, sg_len
, param
? param
->slave_id
: -1);
576 * if (param != NULL), this is a successfully requested slave channel,
577 * therefore param->config != NULL too.
579 return sh_dmae_prep_sg(sh_chan
, sgl
, sg_len
, ¶m
->config
->addr
,
583 static void sh_dmae_terminate_all(struct dma_chan
*chan
)
585 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
592 spin_lock_bh(&sh_chan
->desc_lock
);
593 if (!list_empty(&sh_chan
->ld_queue
)) {
594 /* Record partial transfer */
595 struct sh_desc
*desc
= list_entry(sh_chan
->ld_queue
.next
,
596 struct sh_desc
, node
);
597 desc
->partial
= (desc
->hw
.tcr
- sh_dmae_readl(sh_chan
, TCR
)) <<
601 spin_unlock_bh(&sh_chan
->desc_lock
);
603 sh_dmae_chan_ld_cleanup(sh_chan
, true);
606 static dma_async_tx_callback
__ld_cleanup(struct sh_dmae_chan
*sh_chan
, bool all
)
608 struct sh_desc
*desc
, *_desc
;
609 /* Is the "exposed" head of a chain acked? */
610 bool head_acked
= false;
611 dma_cookie_t cookie
= 0;
612 dma_async_tx_callback callback
= NULL
;
615 spin_lock_bh(&sh_chan
->desc_lock
);
616 list_for_each_entry_safe(desc
, _desc
, &sh_chan
->ld_queue
, node
) {
617 struct dma_async_tx_descriptor
*tx
= &desc
->async_tx
;
619 BUG_ON(tx
->cookie
> 0 && tx
->cookie
!= desc
->cookie
);
620 BUG_ON(desc
->mark
!= DESC_SUBMITTED
&&
621 desc
->mark
!= DESC_COMPLETED
&&
622 desc
->mark
!= DESC_WAITING
);
625 * queue is ordered, and we use this loop to (1) clean up all
626 * completed descriptors, and to (2) update descriptor flags of
627 * any chunks in a (partially) completed chain
629 if (!all
&& desc
->mark
== DESC_SUBMITTED
&&
630 desc
->cookie
!= cookie
)
636 if (desc
->mark
== DESC_COMPLETED
&& desc
->chunks
== 1) {
637 if (sh_chan
->completed_cookie
!= desc
->cookie
- 1)
638 dev_dbg(sh_chan
->dev
,
639 "Completing cookie %d, expected %d\n",
641 sh_chan
->completed_cookie
+ 1);
642 sh_chan
->completed_cookie
= desc
->cookie
;
645 /* Call callback on the last chunk */
646 if (desc
->mark
== DESC_COMPLETED
&& tx
->callback
) {
647 desc
->mark
= DESC_WAITING
;
648 callback
= tx
->callback
;
649 param
= tx
->callback_param
;
650 dev_dbg(sh_chan
->dev
, "descriptor #%d@%p on %d callback\n",
651 tx
->cookie
, tx
, sh_chan
->id
);
652 BUG_ON(desc
->chunks
!= 1);
656 if (tx
->cookie
> 0 || tx
->cookie
== -EBUSY
) {
657 if (desc
->mark
== DESC_COMPLETED
) {
658 BUG_ON(tx
->cookie
< 0);
659 desc
->mark
= DESC_WAITING
;
661 head_acked
= async_tx_test_ack(tx
);
663 switch (desc
->mark
) {
665 desc
->mark
= DESC_WAITING
;
669 async_tx_ack(&desc
->async_tx
);
673 dev_dbg(sh_chan
->dev
, "descriptor %p #%d completed.\n",
676 if (((desc
->mark
== DESC_COMPLETED
||
677 desc
->mark
== DESC_WAITING
) &&
678 async_tx_test_ack(&desc
->async_tx
)) || all
) {
679 /* Remove from ld_queue list */
680 desc
->mark
= DESC_IDLE
;
681 list_move(&desc
->node
, &sh_chan
->ld_free
);
684 spin_unlock_bh(&sh_chan
->desc_lock
);
693 * sh_chan_ld_cleanup - Clean up link descriptors
695 * This function cleans up the ld_queue of DMA channel.
697 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan
*sh_chan
, bool all
)
699 while (__ld_cleanup(sh_chan
, all
))
703 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan
*sh_chan
)
705 struct sh_desc
*desc
;
707 spin_lock_bh(&sh_chan
->desc_lock
);
709 if (dmae_is_busy(sh_chan
)) {
710 spin_unlock_bh(&sh_chan
->desc_lock
);
714 /* Find the first not transferred desciptor */
715 list_for_each_entry(desc
, &sh_chan
->ld_queue
, node
)
716 if (desc
->mark
== DESC_SUBMITTED
) {
717 dev_dbg(sh_chan
->dev
, "Queue #%d to %d: %u@%x -> %x\n",
718 desc
->async_tx
.cookie
, sh_chan
->id
,
719 desc
->hw
.tcr
, desc
->hw
.sar
, desc
->hw
.dar
);
720 /* Get the ld start address from ld_queue */
721 dmae_set_reg(sh_chan
, &desc
->hw
);
726 spin_unlock_bh(&sh_chan
->desc_lock
);
729 static void sh_dmae_memcpy_issue_pending(struct dma_chan
*chan
)
731 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
732 sh_chan_xfer_ld_queue(sh_chan
);
735 static enum dma_status
sh_dmae_is_complete(struct dma_chan
*chan
,
740 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
741 dma_cookie_t last_used
;
742 dma_cookie_t last_complete
;
743 enum dma_status status
;
745 sh_dmae_chan_ld_cleanup(sh_chan
, false);
747 last_used
= chan
->cookie
;
748 last_complete
= sh_chan
->completed_cookie
;
749 BUG_ON(last_complete
< 0);
752 *done
= last_complete
;
757 spin_lock_bh(&sh_chan
->desc_lock
);
759 status
= dma_async_is_complete(cookie
, last_complete
, last_used
);
762 * If we don't find cookie on the queue, it has been aborted and we have
765 if (status
!= DMA_SUCCESS
) {
766 struct sh_desc
*desc
;
768 list_for_each_entry(desc
, &sh_chan
->ld_queue
, node
)
769 if (desc
->cookie
== cookie
) {
770 status
= DMA_IN_PROGRESS
;
775 spin_unlock_bh(&sh_chan
->desc_lock
);
780 static irqreturn_t
sh_dmae_interrupt(int irq
, void *data
)
782 irqreturn_t ret
= IRQ_NONE
;
783 struct sh_dmae_chan
*sh_chan
= (struct sh_dmae_chan
*)data
;
784 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
786 if (chcr
& CHCR_TE
) {
791 tasklet_schedule(&sh_chan
->tasklet
);
797 #if defined(CONFIG_CPU_SH4)
798 static irqreturn_t
sh_dmae_err(int irq
, void *data
)
800 struct sh_dmae_device
*shdev
= (struct sh_dmae_device
*)data
;
803 /* halt the dma controller */
804 sh_dmae_ctl_stop(shdev
);
806 /* We cannot detect, which channel caused the error, have to reset all */
807 for (i
= 0; i
< SH_DMAC_MAX_CHANNELS
; i
++) {
808 struct sh_dmae_chan
*sh_chan
= shdev
->chan
[i
];
810 struct sh_desc
*desc
;
811 /* Stop the channel */
814 list_for_each_entry(desc
, &sh_chan
->ld_queue
, node
) {
815 struct dma_async_tx_descriptor
*tx
= &desc
->async_tx
;
816 desc
->mark
= DESC_IDLE
;
818 tx
->callback(tx
->callback_param
);
820 list_splice_init(&sh_chan
->ld_queue
, &sh_chan
->ld_free
);
829 static void dmae_do_tasklet(unsigned long data
)
831 struct sh_dmae_chan
*sh_chan
= (struct sh_dmae_chan
*)data
;
832 struct sh_desc
*desc
;
833 u32 sar_buf
= sh_dmae_readl(sh_chan
, SAR
);
834 u32 dar_buf
= sh_dmae_readl(sh_chan
, DAR
);
836 spin_lock(&sh_chan
->desc_lock
);
837 list_for_each_entry(desc
, &sh_chan
->ld_queue
, node
) {
838 if (desc
->mark
== DESC_SUBMITTED
&&
839 ((desc
->direction
== DMA_FROM_DEVICE
&&
840 (desc
->hw
.dar
+ desc
->hw
.tcr
) == dar_buf
) ||
841 (desc
->hw
.sar
+ desc
->hw
.tcr
) == sar_buf
)) {
842 dev_dbg(sh_chan
->dev
, "done #%d@%p dst %u\n",
843 desc
->async_tx
.cookie
, &desc
->async_tx
,
845 desc
->mark
= DESC_COMPLETED
;
849 spin_unlock(&sh_chan
->desc_lock
);
852 sh_chan_xfer_ld_queue(sh_chan
);
853 sh_dmae_chan_ld_cleanup(sh_chan
, false);
856 static int __devinit
sh_dmae_chan_probe(struct sh_dmae_device
*shdev
, int id
,
857 int irq
, unsigned long flags
)
860 struct sh_dmae_channel
*chan_pdata
= &shdev
->pdata
->channel
[id
];
861 struct platform_device
*pdev
= to_platform_device(shdev
->common
.dev
);
862 struct sh_dmae_chan
*new_sh_chan
;
865 new_sh_chan
= kzalloc(sizeof(struct sh_dmae_chan
), GFP_KERNEL
);
867 dev_err(shdev
->common
.dev
,
868 "No free memory for allocating dma channels!\n");
872 /* copy struct dma_device */
873 new_sh_chan
->common
.device
= &shdev
->common
;
875 new_sh_chan
->dev
= shdev
->common
.dev
;
876 new_sh_chan
->id
= id
;
877 new_sh_chan
->irq
= irq
;
878 new_sh_chan
->base
= shdev
->chan_reg
+ chan_pdata
->offset
/ sizeof(u32
);
880 /* Init DMA tasklet */
881 tasklet_init(&new_sh_chan
->tasklet
, dmae_do_tasklet
,
882 (unsigned long)new_sh_chan
);
884 /* Init the channel */
885 dmae_init(new_sh_chan
);
887 spin_lock_init(&new_sh_chan
->desc_lock
);
889 /* Init descripter manage list */
890 INIT_LIST_HEAD(&new_sh_chan
->ld_queue
);
891 INIT_LIST_HEAD(&new_sh_chan
->ld_free
);
893 /* Add the channel to DMA device channel list */
894 list_add_tail(&new_sh_chan
->common
.device_node
,
895 &shdev
->common
.channels
);
896 shdev
->common
.chancnt
++;
899 snprintf(new_sh_chan
->dev_id
, sizeof(new_sh_chan
->dev_id
),
900 "sh-dmae%d.%d", pdev
->id
, new_sh_chan
->id
);
902 snprintf(new_sh_chan
->dev_id
, sizeof(new_sh_chan
->dev_id
),
903 "sh-dma%d", new_sh_chan
->id
);
905 /* set up channel irq */
906 err
= request_irq(irq
, &sh_dmae_interrupt
, flags
,
907 new_sh_chan
->dev_id
, new_sh_chan
);
909 dev_err(shdev
->common
.dev
, "DMA channel %d request_irq error "
910 "with return %d\n", id
, err
);
914 shdev
->chan
[id
] = new_sh_chan
;
918 /* remove from dmaengine device node */
919 list_del(&new_sh_chan
->common
.device_node
);
924 static void sh_dmae_chan_remove(struct sh_dmae_device
*shdev
)
928 for (i
= shdev
->common
.chancnt
- 1 ; i
>= 0 ; i
--) {
929 if (shdev
->chan
[i
]) {
930 struct sh_dmae_chan
*sh_chan
= shdev
->chan
[i
];
932 free_irq(sh_chan
->irq
, sh_chan
);
934 list_del(&sh_chan
->common
.device_node
);
936 shdev
->chan
[i
] = NULL
;
939 shdev
->common
.chancnt
= 0;
942 static int __init
sh_dmae_probe(struct platform_device
*pdev
)
944 struct sh_dmae_pdata
*pdata
= pdev
->dev
.platform_data
;
945 unsigned long irqflags
= IRQF_DISABLED
,
946 chan_flag
[SH_DMAC_MAX_CHANNELS
] = {};
947 int errirq
, chan_irq
[SH_DMAC_MAX_CHANNELS
];
948 int err
, i
, irq_cnt
= 0, irqres
= 0;
949 struct sh_dmae_device
*shdev
;
950 struct resource
*chan
, *dmars
, *errirq_res
, *chanirq_res
;
952 /* get platform data */
953 if (!pdata
|| !pdata
->channel_num
)
956 chan
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
957 /* DMARS area is optional, if absent, this controller cannot do slave DMA */
958 dmars
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
961 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
962 * the error IRQ, in which case it is the only IRQ in this resource:
963 * start == end. If it is the only IRQ resource, all channels also
965 * 2. DMA channel IRQ resources can be specified one per resource or in
966 * ranges (start != end)
967 * 3. iff all events (channels and, optionally, error) on this
968 * controller use the same IRQ, only one IRQ resource can be
969 * specified, otherwise there must be one IRQ per channel, even if
970 * some of them are equal
971 * 4. if all IRQs on this controller are equal or if some specific IRQs
972 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
973 * requested with the IRQF_SHARED flag
975 errirq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
976 if (!chan
|| !errirq_res
)
979 if (!request_mem_region(chan
->start
, resource_size(chan
), pdev
->name
)) {
980 dev_err(&pdev
->dev
, "DMAC register region already claimed\n");
984 if (dmars
&& !request_mem_region(dmars
->start
, resource_size(dmars
), pdev
->name
)) {
985 dev_err(&pdev
->dev
, "DMAC DMARS region already claimed\n");
991 shdev
= kzalloc(sizeof(struct sh_dmae_device
), GFP_KERNEL
);
993 dev_err(&pdev
->dev
, "Not enough memory\n");
997 shdev
->chan_reg
= ioremap(chan
->start
, resource_size(chan
));
998 if (!shdev
->chan_reg
)
1001 shdev
->dmars
= ioremap(dmars
->start
, resource_size(dmars
));
1007 shdev
->pdata
= pdata
;
1009 pm_runtime_enable(&pdev
->dev
);
1010 pm_runtime_get_sync(&pdev
->dev
);
1012 /* reset dma controller */
1013 err
= sh_dmae_rst(shdev
);
1017 INIT_LIST_HEAD(&shdev
->common
.channels
);
1019 dma_cap_set(DMA_MEMCPY
, shdev
->common
.cap_mask
);
1021 dma_cap_set(DMA_SLAVE
, shdev
->common
.cap_mask
);
1023 shdev
->common
.device_alloc_chan_resources
1024 = sh_dmae_alloc_chan_resources
;
1025 shdev
->common
.device_free_chan_resources
= sh_dmae_free_chan_resources
;
1026 shdev
->common
.device_prep_dma_memcpy
= sh_dmae_prep_memcpy
;
1027 shdev
->common
.device_is_tx_complete
= sh_dmae_is_complete
;
1028 shdev
->common
.device_issue_pending
= sh_dmae_memcpy_issue_pending
;
1030 /* Compulsory for DMA_SLAVE fields */
1031 shdev
->common
.device_prep_slave_sg
= sh_dmae_prep_slave_sg
;
1032 shdev
->common
.device_terminate_all
= sh_dmae_terminate_all
;
1034 shdev
->common
.dev
= &pdev
->dev
;
1035 /* Default transfer size of 32 bytes requires 32-byte alignment */
1036 shdev
->common
.copy_align
= LOG2_DEFAULT_XFER_SIZE
;
1038 #if defined(CONFIG_CPU_SH4)
1039 chanirq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 1);
1042 chanirq_res
= errirq_res
;
1046 if (chanirq_res
== errirq_res
||
1047 (errirq_res
->flags
& IORESOURCE_BITS
) == IORESOURCE_IRQ_SHAREABLE
)
1048 irqflags
= IRQF_SHARED
;
1050 errirq
= errirq_res
->start
;
1052 err
= request_irq(errirq
, sh_dmae_err
, irqflags
,
1053 "DMAC Address Error", shdev
);
1056 "DMA failed requesting irq #%d, error %d\n",
1062 chanirq_res
= errirq_res
;
1063 #endif /* CONFIG_CPU_SH4 */
1065 if (chanirq_res
->start
== chanirq_res
->end
&&
1066 !platform_get_resource(pdev
, IORESOURCE_IRQ
, 1)) {
1067 /* Special case - all multiplexed */
1068 for (; irq_cnt
< pdata
->channel_num
; irq_cnt
++) {
1069 chan_irq
[irq_cnt
] = chanirq_res
->start
;
1070 chan_flag
[irq_cnt
] = IRQF_SHARED
;
1074 for (i
= chanirq_res
->start
; i
<= chanirq_res
->end
; i
++) {
1075 if ((errirq_res
->flags
& IORESOURCE_BITS
) ==
1076 IORESOURCE_IRQ_SHAREABLE
)
1077 chan_flag
[irq_cnt
] = IRQF_SHARED
;
1079 chan_flag
[irq_cnt
] = IRQF_DISABLED
;
1081 "Found IRQ %d for channel %d\n",
1083 chan_irq
[irq_cnt
++] = i
;
1085 chanirq_res
= platform_get_resource(pdev
,
1086 IORESOURCE_IRQ
, ++irqres
);
1087 } while (irq_cnt
< pdata
->channel_num
&& chanirq_res
);
1090 if (irq_cnt
< pdata
->channel_num
)
1093 /* Create DMA Channel */
1094 for (i
= 0; i
< pdata
->channel_num
; i
++) {
1095 err
= sh_dmae_chan_probe(shdev
, i
, chan_irq
[i
], chan_flag
[i
]);
1097 goto chan_probe_err
;
1100 pm_runtime_put(&pdev
->dev
);
1102 platform_set_drvdata(pdev
, shdev
);
1103 dma_async_device_register(&shdev
->common
);
1108 sh_dmae_chan_remove(shdev
);
1110 #if defined(CONFIG_CPU_SH4)
1111 free_irq(errirq
, shdev
);
1115 pm_runtime_put(&pdev
->dev
);
1117 iounmap(shdev
->dmars
);
1119 iounmap(shdev
->chan_reg
);
1124 release_mem_region(dmars
->start
, resource_size(dmars
));
1126 release_mem_region(chan
->start
, resource_size(chan
));
1131 static int __exit
sh_dmae_remove(struct platform_device
*pdev
)
1133 struct sh_dmae_device
*shdev
= platform_get_drvdata(pdev
);
1134 struct resource
*res
;
1135 int errirq
= platform_get_irq(pdev
, 0);
1137 dma_async_device_unregister(&shdev
->common
);
1140 free_irq(errirq
, shdev
);
1142 /* channel data remove */
1143 sh_dmae_chan_remove(shdev
);
1145 pm_runtime_disable(&pdev
->dev
);
1148 iounmap(shdev
->dmars
);
1149 iounmap(shdev
->chan_reg
);
1153 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1155 release_mem_region(res
->start
, resource_size(res
));
1156 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1158 release_mem_region(res
->start
, resource_size(res
));
1163 static void sh_dmae_shutdown(struct platform_device
*pdev
)
1165 struct sh_dmae_device
*shdev
= platform_get_drvdata(pdev
);
1166 sh_dmae_ctl_stop(shdev
);
1169 static struct platform_driver sh_dmae_driver
= {
1170 .remove
= __exit_p(sh_dmae_remove
),
1171 .shutdown
= sh_dmae_shutdown
,
1173 .name
= "sh-dma-engine",
1177 static int __init
sh_dmae_init(void)
1179 return platform_driver_probe(&sh_dmae_driver
, sh_dmae_probe
);
1181 module_init(sh_dmae_init
);
1183 static void __exit
sh_dmae_exit(void)
1185 platform_driver_unregister(&sh_dmae_driver
);
1187 module_exit(sh_dmae_exit
);
1189 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1190 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1191 MODULE_LICENSE("GPL");