2 * Renesas SuperH DMA Engine support
4 * base is drivers/dma/flsdma.c
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
23 #include <linux/dmaengine.h>
24 #include <linux/delay.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/dmapool.h>
27 #include <linux/platform_device.h>
29 #include <asm/dma-sh.h>
32 /* DMA descriptor control */
33 #define DESC_LAST (-1)
35 #define DESC_NCOMP (0)
37 #define NR_DESCS_PER_CHANNEL 32
39 * Define the default configuration for dual address memory-memory transfer.
40 * The 0x400 value represents auto-request, external->external.
42 * And this driver set 4byte burst mode.
43 * If you want to change mode, you need to change RS_DEFAULT of value.
44 * (ex 1byte burst mode -> (RS_DUAL & ~TS_32)
46 #define RS_DEFAULT (RS_DUAL)
48 #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
49 static void sh_dmae_writel(struct sh_dmae_chan
*sh_dc
, u32 data
, u32 reg
)
51 ctrl_outl(data
, (SH_DMAC_CHAN_BASE(sh_dc
->id
) + reg
));
54 static u32
sh_dmae_readl(struct sh_dmae_chan
*sh_dc
, u32 reg
)
56 return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc
->id
) + reg
));
59 static void dmae_init(struct sh_dmae_chan
*sh_chan
)
61 u32 chcr
= RS_DEFAULT
; /* default is DUAL mode */
62 sh_dmae_writel(sh_chan
, chcr
, CHCR
);
66 * Reset DMA controller
68 * SH7780 has two DMAOR register
70 static void sh_dmae_ctl_stop(int id
)
72 unsigned short dmaor
= dmaor_read_reg(id
);
74 dmaor
&= ~(DMAOR_NMIF
| DMAOR_AE
);
75 dmaor_write_reg(id
, dmaor
);
78 static int sh_dmae_rst(int id
)
83 dmaor
= (dmaor_read_reg(id
)|DMAOR_INIT
);
85 dmaor_write_reg(id
, dmaor
);
86 if ((dmaor_read_reg(id
) & (DMAOR_AE
| DMAOR_NMIF
))) {
87 pr_warning(KERN_ERR
"dma-sh: Can't initialize DMAOR.\n");
93 static int dmae_is_idle(struct sh_dmae_chan
*sh_chan
)
95 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
97 if (!(chcr
& CHCR_TE
))
98 return -EBUSY
; /* working */
100 return 0; /* waiting */
103 static inline unsigned int calc_xmit_shift(struct sh_dmae_chan
*sh_chan
)
105 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
106 return ts_shift
[(chcr
& CHCR_TS_MASK
) >> CHCR_TS_SHIFT
];
109 static void dmae_set_reg(struct sh_dmae_chan
*sh_chan
, struct sh_dmae_regs hw
)
111 sh_dmae_writel(sh_chan
, hw
.sar
, SAR
);
112 sh_dmae_writel(sh_chan
, hw
.dar
, DAR
);
113 sh_dmae_writel(sh_chan
,
114 (hw
.tcr
>> calc_xmit_shift(sh_chan
)), TCR
);
117 static void dmae_start(struct sh_dmae_chan
*sh_chan
)
119 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
121 chcr
|= (CHCR_DE
|CHCR_IE
);
122 sh_dmae_writel(sh_chan
, chcr
, CHCR
);
125 static void dmae_halt(struct sh_dmae_chan
*sh_chan
)
127 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
129 chcr
&= ~(CHCR_DE
| CHCR_TE
| CHCR_IE
);
130 sh_dmae_writel(sh_chan
, chcr
, CHCR
);
133 static int dmae_set_chcr(struct sh_dmae_chan
*sh_chan
, u32 val
)
135 int ret
= dmae_is_idle(sh_chan
);
136 /* When DMA was working, can not set data to CHCR */
140 sh_dmae_writel(sh_chan
, val
, CHCR
);
144 #define DMARS1_ADDR 0x04
145 #define DMARS2_ADDR 0x08
146 #define DMARS_SHIFT 8
147 #define DMARS_CHAN_MSK 0x01
148 static int dmae_set_dmars(struct sh_dmae_chan
*sh_chan
, u16 val
)
152 int ret
= dmae_is_idle(sh_chan
);
156 if (sh_chan
->id
& DMARS_CHAN_MSK
)
159 switch (sh_chan
->id
) {
163 addr
= SH_DMARS_BASE
;
168 addr
= (SH_DMARS_BASE
+ DMARS1_ADDR
);
173 addr
= (SH_DMARS_BASE
+ DMARS2_ADDR
);
179 ctrl_outw((val
<< shift
) |
180 (ctrl_inw(addr
) & (shift
? 0xFF00 : 0x00FF)),
186 static dma_cookie_t
sh_dmae_tx_submit(struct dma_async_tx_descriptor
*tx
)
188 struct sh_desc
*desc
= tx_to_sh_desc(tx
);
189 struct sh_dmae_chan
*sh_chan
= to_sh_chan(tx
->chan
);
192 spin_lock_bh(&sh_chan
->desc_lock
);
194 cookie
= sh_chan
->common
.cookie
;
199 /* If desc only in the case of 1 */
200 if (desc
->async_tx
.cookie
!= -EBUSY
)
201 desc
->async_tx
.cookie
= cookie
;
202 sh_chan
->common
.cookie
= desc
->async_tx
.cookie
;
204 list_splice_init(&desc
->tx_list
, sh_chan
->ld_queue
.prev
);
206 spin_unlock_bh(&sh_chan
->desc_lock
);
211 static struct sh_desc
*sh_dmae_get_desc(struct sh_dmae_chan
*sh_chan
)
213 struct sh_desc
*desc
, *_desc
, *ret
= NULL
;
215 spin_lock_bh(&sh_chan
->desc_lock
);
216 list_for_each_entry_safe(desc
, _desc
, &sh_chan
->ld_free
, node
) {
217 if (async_tx_test_ack(&desc
->async_tx
)) {
218 list_del(&desc
->node
);
223 spin_unlock_bh(&sh_chan
->desc_lock
);
228 static void sh_dmae_put_desc(struct sh_dmae_chan
*sh_chan
, struct sh_desc
*desc
)
231 spin_lock_bh(&sh_chan
->desc_lock
);
233 list_splice_init(&desc
->tx_list
, &sh_chan
->ld_free
);
234 list_add(&desc
->node
, &sh_chan
->ld_free
);
236 spin_unlock_bh(&sh_chan
->desc_lock
);
240 static int sh_dmae_alloc_chan_resources(struct dma_chan
*chan
)
242 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
243 struct sh_desc
*desc
;
245 spin_lock_bh(&sh_chan
->desc_lock
);
246 while (sh_chan
->descs_allocated
< NR_DESCS_PER_CHANNEL
) {
247 spin_unlock_bh(&sh_chan
->desc_lock
);
248 desc
= kzalloc(sizeof(struct sh_desc
), GFP_KERNEL
);
250 spin_lock_bh(&sh_chan
->desc_lock
);
253 dma_async_tx_descriptor_init(&desc
->async_tx
,
255 desc
->async_tx
.tx_submit
= sh_dmae_tx_submit
;
256 desc
->async_tx
.flags
= DMA_CTRL_ACK
;
257 INIT_LIST_HEAD(&desc
->tx_list
);
258 sh_dmae_put_desc(sh_chan
, desc
);
260 spin_lock_bh(&sh_chan
->desc_lock
);
261 sh_chan
->descs_allocated
++;
263 spin_unlock_bh(&sh_chan
->desc_lock
);
265 return sh_chan
->descs_allocated
;
269 * sh_dma_free_chan_resources - Free all resources of the channel.
271 static void sh_dmae_free_chan_resources(struct dma_chan
*chan
)
273 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
274 struct sh_desc
*desc
, *_desc
;
277 BUG_ON(!list_empty(&sh_chan
->ld_queue
));
278 spin_lock_bh(&sh_chan
->desc_lock
);
280 list_splice_init(&sh_chan
->ld_free
, &list
);
281 sh_chan
->descs_allocated
= 0;
283 spin_unlock_bh(&sh_chan
->desc_lock
);
285 list_for_each_entry_safe(desc
, _desc
, &list
, node
)
289 static struct dma_async_tx_descriptor
*sh_dmae_prep_memcpy(
290 struct dma_chan
*chan
, dma_addr_t dma_dest
, dma_addr_t dma_src
,
291 size_t len
, unsigned long flags
)
293 struct sh_dmae_chan
*sh_chan
;
294 struct sh_desc
*first
= NULL
, *prev
= NULL
, *new;
303 sh_chan
= to_sh_chan(chan
);
306 /* Allocate the link descriptor from DMA pool */
307 new = sh_dmae_get_desc(sh_chan
);
309 dev_err(sh_chan
->dev
,
310 "No free memory for link descriptor\n");
314 copy_size
= min(len
, (size_t)SH_DMA_TCR_MAX
);
316 new->hw
.sar
= dma_src
;
317 new->hw
.dar
= dma_dest
;
318 new->hw
.tcr
= copy_size
;
322 new->mark
= DESC_NCOMP
;
323 async_tx_ack(&new->async_tx
);
327 dma_src
+= copy_size
;
328 dma_dest
+= copy_size
;
329 /* Insert the link descriptor to the LD ring */
330 list_add_tail(&new->node
, &first
->tx_list
);
333 new->async_tx
.flags
= flags
; /* client is in control of this ack */
334 new->async_tx
.cookie
= -EBUSY
; /* Last desc */
336 return &first
->async_tx
;
339 sh_dmae_put_desc(sh_chan
, first
);
345 * sh_chan_ld_cleanup - Clean up link descriptors
347 * This function clean up the ld_queue of DMA channel.
349 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan
*sh_chan
)
351 struct sh_desc
*desc
, *_desc
;
353 spin_lock_bh(&sh_chan
->desc_lock
);
354 list_for_each_entry_safe(desc
, _desc
, &sh_chan
->ld_queue
, node
) {
355 dma_async_tx_callback callback
;
356 void *callback_param
;
359 if (desc
->mark
== DESC_NCOMP
)
363 callback
= desc
->async_tx
.callback
;
364 callback_param
= desc
->async_tx
.callback_param
;
366 /* Remove from ld_queue list */
367 list_splice_init(&desc
->tx_list
, &sh_chan
->ld_free
);
369 dev_dbg(sh_chan
->dev
, "link descriptor %p will be recycle.\n",
372 list_move(&desc
->node
, &sh_chan
->ld_free
);
373 /* Run the link descriptor callback function */
375 spin_unlock_bh(&sh_chan
->desc_lock
);
376 dev_dbg(sh_chan
->dev
, "link descriptor %p callback\n",
378 callback(callback_param
);
379 spin_lock_bh(&sh_chan
->desc_lock
);
382 spin_unlock_bh(&sh_chan
->desc_lock
);
385 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan
*sh_chan
)
387 struct list_head
*ld_node
;
388 struct sh_dmae_regs hw
;
391 if (dmae_is_idle(sh_chan
))
394 /* Find the first un-transfer desciptor */
395 for (ld_node
= sh_chan
->ld_queue
.next
;
396 (ld_node
!= &sh_chan
->ld_queue
)
397 && (to_sh_desc(ld_node
)->mark
== DESC_COMP
);
398 ld_node
= ld_node
->next
)
401 if (ld_node
!= &sh_chan
->ld_queue
) {
402 /* Get the ld start address from ld_queue */
403 hw
= to_sh_desc(ld_node
)->hw
;
404 dmae_set_reg(sh_chan
, hw
);
409 static void sh_dmae_memcpy_issue_pending(struct dma_chan
*chan
)
411 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
412 sh_chan_xfer_ld_queue(sh_chan
);
415 static enum dma_status
sh_dmae_is_complete(struct dma_chan
*chan
,
420 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
421 dma_cookie_t last_used
;
422 dma_cookie_t last_complete
;
424 sh_dmae_chan_ld_cleanup(sh_chan
);
426 last_used
= chan
->cookie
;
427 last_complete
= sh_chan
->completed_cookie
;
428 if (last_complete
== -EBUSY
)
429 last_complete
= last_used
;
432 *done
= last_complete
;
437 return dma_async_is_complete(cookie
, last_complete
, last_used
);
440 static irqreturn_t
sh_dmae_interrupt(int irq
, void *data
)
442 irqreturn_t ret
= IRQ_NONE
;
443 struct sh_dmae_chan
*sh_chan
= (struct sh_dmae_chan
*)data
;
444 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
446 if (chcr
& CHCR_TE
) {
451 tasklet_schedule(&sh_chan
->tasklet
);
457 #if defined(CONFIG_CPU_SH4)
458 static irqreturn_t
sh_dmae_err(int irq
, void *data
)
461 struct sh_dmae_device
*shdev
= (struct sh_dmae_device
*)data
;
464 if (shdev
->pdata
.mode
& SHDMA_MIX_IRQ
) {
467 #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
472 if (dmaor_read_reg(cnt
) & (DMAOR_NMIF
| DMAOR_AE
)) {
480 /* reset dma controller */
481 err
= sh_dmae_rst(0);
484 if (shdev
->pdata
.mode
& SHDMA_DMAOR1
) {
485 err
= sh_dmae_rst(1);
495 static void dmae_do_tasklet(unsigned long data
)
497 struct sh_dmae_chan
*sh_chan
= (struct sh_dmae_chan
*)data
;
498 struct sh_desc
*desc
, *_desc
, *cur_desc
= NULL
;
499 u32 sar_buf
= sh_dmae_readl(sh_chan
, SAR
);
500 list_for_each_entry_safe(desc
, _desc
,
501 &sh_chan
->ld_queue
, node
) {
502 if ((desc
->hw
.sar
+ desc
->hw
.tcr
) == sar_buf
) {
509 switch (cur_desc
->async_tx
.cookie
) {
510 case 0: /* other desc data */
512 case -EBUSY
: /* last desc */
513 sh_chan
->completed_cookie
=
514 cur_desc
->async_tx
.cookie
;
516 default: /* first desc ( 0 < )*/
517 sh_chan
->completed_cookie
=
518 cur_desc
->async_tx
.cookie
- 1;
521 cur_desc
->mark
= DESC_COMP
;
524 sh_chan_xfer_ld_queue(sh_chan
);
525 sh_dmae_chan_ld_cleanup(sh_chan
);
528 static unsigned int get_dmae_irq(unsigned int id
)
530 unsigned int irq
= 0;
531 if (id
< ARRAY_SIZE(dmte_irq_map
))
532 irq
= dmte_irq_map
[id
];
536 static int __devinit
sh_dmae_chan_probe(struct sh_dmae_device
*shdev
, int id
)
539 unsigned int irq
= get_dmae_irq(id
);
540 unsigned long irqflags
= IRQF_DISABLED
;
541 struct sh_dmae_chan
*new_sh_chan
;
544 new_sh_chan
= kzalloc(sizeof(struct sh_dmae_chan
), GFP_KERNEL
);
546 dev_err(shdev
->common
.dev
, "No free memory for allocating "
551 new_sh_chan
->dev
= shdev
->common
.dev
;
552 new_sh_chan
->id
= id
;
554 /* Init DMA tasklet */
555 tasklet_init(&new_sh_chan
->tasklet
, dmae_do_tasklet
,
556 (unsigned long)new_sh_chan
);
558 /* Init the channel */
559 dmae_init(new_sh_chan
);
561 spin_lock_init(&new_sh_chan
->desc_lock
);
563 /* Init descripter manage list */
564 INIT_LIST_HEAD(&new_sh_chan
->ld_queue
);
565 INIT_LIST_HEAD(&new_sh_chan
->ld_free
);
567 /* copy struct dma_device */
568 new_sh_chan
->common
.device
= &shdev
->common
;
570 /* Add the channel to DMA device channel list */
571 list_add_tail(&new_sh_chan
->common
.device_node
,
572 &shdev
->common
.channels
);
573 shdev
->common
.chancnt
++;
575 if (shdev
->pdata
.mode
& SHDMA_MIX_IRQ
) {
576 irqflags
= IRQF_SHARED
;
577 #if defined(DMTE6_IRQ)
578 if (irq
>= DMTE6_IRQ
)
585 snprintf(new_sh_chan
->dev_id
, sizeof(new_sh_chan
->dev_id
),
586 "sh-dmae%d", new_sh_chan
->id
);
588 /* set up channel irq */
589 err
= request_irq(irq
, &sh_dmae_interrupt
,
590 irqflags
, new_sh_chan
->dev_id
, new_sh_chan
);
592 dev_err(shdev
->common
.dev
, "DMA channel %d request_irq error "
593 "with return %d\n", id
, err
);
597 /* CHCR register control function */
598 new_sh_chan
->set_chcr
= dmae_set_chcr
;
599 /* DMARS register control function */
600 new_sh_chan
->set_dmars
= dmae_set_dmars
;
602 shdev
->chan
[id
] = new_sh_chan
;
606 /* remove from dmaengine device node */
607 list_del(&new_sh_chan
->common
.device_node
);
612 static void sh_dmae_chan_remove(struct sh_dmae_device
*shdev
)
616 for (i
= shdev
->common
.chancnt
- 1 ; i
>= 0 ; i
--) {
617 if (shdev
->chan
[i
]) {
618 struct sh_dmae_chan
*shchan
= shdev
->chan
[i
];
619 if (!(shdev
->pdata
.mode
& SHDMA_MIX_IRQ
))
620 free_irq(dmte_irq_map
[i
], shchan
);
622 list_del(&shchan
->common
.device_node
);
624 shdev
->chan
[i
] = NULL
;
627 shdev
->common
.chancnt
= 0;
630 static int __init
sh_dmae_probe(struct platform_device
*pdev
)
632 int err
= 0, cnt
, ecnt
;
633 unsigned long irqflags
= IRQF_DISABLED
;
634 #if defined(CONFIG_CPU_SH4)
635 int eirq
[] = { DMAE0_IRQ
,
636 #if defined(DMAE1_IRQ)
641 struct sh_dmae_device
*shdev
;
643 shdev
= kzalloc(sizeof(struct sh_dmae_device
), GFP_KERNEL
);
645 dev_err(&pdev
->dev
, "No enough memory\n");
650 /* get platform data */
651 if (!pdev
->dev
.platform_data
)
655 memcpy(&shdev
->pdata
, pdev
->dev
.platform_data
,
656 sizeof(struct sh_dmae_pdata
));
658 /* reset dma controller */
659 err
= sh_dmae_rst(0);
663 /* SH7780/85/23 has DMAOR1 */
664 if (shdev
->pdata
.mode
& SHDMA_DMAOR1
) {
665 err
= sh_dmae_rst(1);
670 INIT_LIST_HEAD(&shdev
->common
.channels
);
672 dma_cap_set(DMA_MEMCPY
, shdev
->common
.cap_mask
);
673 shdev
->common
.device_alloc_chan_resources
674 = sh_dmae_alloc_chan_resources
;
675 shdev
->common
.device_free_chan_resources
= sh_dmae_free_chan_resources
;
676 shdev
->common
.device_prep_dma_memcpy
= sh_dmae_prep_memcpy
;
677 shdev
->common
.device_is_tx_complete
= sh_dmae_is_complete
;
678 shdev
->common
.device_issue_pending
= sh_dmae_memcpy_issue_pending
;
679 shdev
->common
.dev
= &pdev
->dev
;
681 #if defined(CONFIG_CPU_SH4)
682 /* Non Mix IRQ mode SH7722/SH7730 etc... */
683 if (shdev
->pdata
.mode
& SHDMA_MIX_IRQ
) {
684 irqflags
= IRQF_SHARED
;
686 #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
691 for (ecnt
= 0 ; ecnt
< ARRAY_SIZE(eirq
); ecnt
++) {
692 err
= request_irq(eirq
[ecnt
], sh_dmae_err
,
693 irqflags
, "DMAC Address Error", shdev
);
695 dev_err(&pdev
->dev
, "DMA device request_irq"
696 "error (irq %d) with return %d\n",
701 #endif /* CONFIG_CPU_SH4 */
703 /* Create DMA Channel */
704 for (cnt
= 0 ; cnt
< MAX_DMA_CHANNELS
; cnt
++) {
705 err
= sh_dmae_chan_probe(shdev
, cnt
);
710 platform_set_drvdata(pdev
, shdev
);
711 dma_async_device_register(&shdev
->common
);
716 sh_dmae_chan_remove(shdev
);
719 for (ecnt
-- ; ecnt
>= 0; ecnt
--)
720 free_irq(eirq
[ecnt
], shdev
);
729 static int __exit
sh_dmae_remove(struct platform_device
*pdev
)
731 struct sh_dmae_device
*shdev
= platform_get_drvdata(pdev
);
733 dma_async_device_unregister(&shdev
->common
);
735 if (shdev
->pdata
.mode
& SHDMA_MIX_IRQ
) {
736 free_irq(DMTE0_IRQ
, shdev
);
737 #if defined(DMTE6_IRQ)
738 free_irq(DMTE6_IRQ
, shdev
);
742 /* channel data remove */
743 sh_dmae_chan_remove(shdev
);
745 if (!(shdev
->pdata
.mode
& SHDMA_MIX_IRQ
)) {
746 free_irq(DMAE0_IRQ
, shdev
);
747 #if defined(DMAE1_IRQ)
748 free_irq(DMAE1_IRQ
, shdev
);
756 static void sh_dmae_shutdown(struct platform_device
*pdev
)
758 struct sh_dmae_device
*shdev
= platform_get_drvdata(pdev
);
760 if (shdev
->pdata
.mode
& SHDMA_DMAOR1
)
764 static struct platform_driver sh_dmae_driver
= {
765 .remove
= __exit_p(sh_dmae_remove
),
766 .shutdown
= sh_dmae_shutdown
,
768 .name
= "sh-dma-engine",
772 static int __init
sh_dmae_init(void)
774 return platform_driver_probe(&sh_dmae_driver
, sh_dmae_probe
);
776 module_init(sh_dmae_init
);
778 static void __exit
sh_dmae_exit(void)
780 platform_driver_unregister(&sh_dmae_driver
);
782 module_exit(sh_dmae_exit
);
784 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
785 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
786 MODULE_LICENSE("GPL");