2 * Renesas SuperH DMA Engine support
4 * base is drivers/dma/flsdma.c
6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
11 * This is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * - DMA of SuperH does not have Hardware DMA chain mode.
17 * - MAX DMA size is 16MB.
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/interrupt.h>
25 #include <linux/dmaengine.h>
26 #include <linux/delay.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/sh_dma.h>
30 #include <linux/notifier.h>
31 #include <linux/kdebug.h>
32 #include <linux/spinlock.h>
33 #include <linux/rculist.h>
35 #include "../dmaengine.h"
38 #define SH_DMAE_DRV_NAME "sh-dma-engine"
40 /* Default MEMCPY transfer size = 2^2 = 4 bytes */
41 #define LOG2_DEFAULT_XFER_SIZE 2
42 #define SH_DMA_SLAVE_NUMBER 256
43 #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
46 * Used for write-side mutual exclusion for the global device list,
47 * read-side synchronization by way of RCU, and per-controller data.
49 static DEFINE_SPINLOCK(sh_dmae_lock
);
50 static LIST_HEAD(sh_dmae_devices
);
52 static void chclr_write(struct sh_dmae_chan
*sh_dc
, u32 data
)
54 struct sh_dmae_device
*shdev
= to_sh_dev(sh_dc
);
56 __raw_writel(data
, shdev
->chan_reg
+
57 shdev
->pdata
->channel
[sh_dc
->shdma_chan
.id
].chclr_offset
);
60 static void sh_dmae_writel(struct sh_dmae_chan
*sh_dc
, u32 data
, u32 reg
)
62 __raw_writel(data
, sh_dc
->base
+ reg
/ sizeof(u32
));
65 static u32
sh_dmae_readl(struct sh_dmae_chan
*sh_dc
, u32 reg
)
67 return __raw_readl(sh_dc
->base
+ reg
/ sizeof(u32
));
70 static u16
dmaor_read(struct sh_dmae_device
*shdev
)
72 u32 __iomem
*addr
= shdev
->chan_reg
+ DMAOR
/ sizeof(u32
);
74 if (shdev
->pdata
->dmaor_is_32bit
)
75 return __raw_readl(addr
);
77 return __raw_readw(addr
);
80 static void dmaor_write(struct sh_dmae_device
*shdev
, u16 data
)
82 u32 __iomem
*addr
= shdev
->chan_reg
+ DMAOR
/ sizeof(u32
);
84 if (shdev
->pdata
->dmaor_is_32bit
)
85 __raw_writel(data
, addr
);
87 __raw_writew(data
, addr
);
90 static void chcr_write(struct sh_dmae_chan
*sh_dc
, u32 data
)
92 struct sh_dmae_device
*shdev
= to_sh_dev(sh_dc
);
94 __raw_writel(data
, sh_dc
->base
+ shdev
->chcr_offset
/ sizeof(u32
));
97 static u32
chcr_read(struct sh_dmae_chan
*sh_dc
)
99 struct sh_dmae_device
*shdev
= to_sh_dev(sh_dc
);
101 return __raw_readl(sh_dc
->base
+ shdev
->chcr_offset
/ sizeof(u32
));
105 * Reset DMA controller
107 * SH7780 has two DMAOR register
109 static void sh_dmae_ctl_stop(struct sh_dmae_device
*shdev
)
111 unsigned short dmaor
;
114 spin_lock_irqsave(&sh_dmae_lock
, flags
);
116 dmaor
= dmaor_read(shdev
);
117 dmaor_write(shdev
, dmaor
& ~(DMAOR_NMIF
| DMAOR_AE
| DMAOR_DME
));
119 spin_unlock_irqrestore(&sh_dmae_lock
, flags
);
122 static int sh_dmae_rst(struct sh_dmae_device
*shdev
)
124 unsigned short dmaor
;
127 spin_lock_irqsave(&sh_dmae_lock
, flags
);
129 dmaor
= dmaor_read(shdev
) & ~(DMAOR_NMIF
| DMAOR_AE
| DMAOR_DME
);
131 if (shdev
->pdata
->chclr_present
) {
133 for (i
= 0; i
< shdev
->pdata
->channel_num
; i
++) {
134 struct sh_dmae_chan
*sh_chan
= shdev
->chan
[i
];
136 chclr_write(sh_chan
, 0);
140 dmaor_write(shdev
, dmaor
| shdev
->pdata
->dmaor_init
);
142 dmaor
= dmaor_read(shdev
);
144 spin_unlock_irqrestore(&sh_dmae_lock
, flags
);
146 if (dmaor
& (DMAOR_AE
| DMAOR_NMIF
)) {
147 dev_warn(shdev
->shdma_dev
.dma_dev
.dev
, "Can't initialize DMAOR.\n");
150 if (shdev
->pdata
->dmaor_init
& ~dmaor
)
151 dev_warn(shdev
->shdma_dev
.dma_dev
.dev
,
152 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
153 dmaor
, shdev
->pdata
->dmaor_init
);
157 static bool dmae_is_busy(struct sh_dmae_chan
*sh_chan
)
159 u32 chcr
= chcr_read(sh_chan
);
161 if ((chcr
& (CHCR_DE
| CHCR_TE
)) == CHCR_DE
)
162 return true; /* working */
164 return false; /* waiting */
167 static unsigned int calc_xmit_shift(struct sh_dmae_chan
*sh_chan
, u32 chcr
)
169 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
170 struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
171 int cnt
= ((chcr
& pdata
->ts_low_mask
) >> pdata
->ts_low_shift
) |
172 ((chcr
& pdata
->ts_high_mask
) >> pdata
->ts_high_shift
);
174 if (cnt
>= pdata
->ts_shift_num
)
177 return pdata
->ts_shift
[cnt
];
180 static u32
log2size_to_chcr(struct sh_dmae_chan
*sh_chan
, int l2size
)
182 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
183 struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
186 for (i
= 0; i
< pdata
->ts_shift_num
; i
++)
187 if (pdata
->ts_shift
[i
] == l2size
)
190 if (i
== pdata
->ts_shift_num
)
193 return ((i
<< pdata
->ts_low_shift
) & pdata
->ts_low_mask
) |
194 ((i
<< pdata
->ts_high_shift
) & pdata
->ts_high_mask
);
197 static void dmae_set_reg(struct sh_dmae_chan
*sh_chan
, struct sh_dmae_regs
*hw
)
199 sh_dmae_writel(sh_chan
, hw
->sar
, SAR
);
200 sh_dmae_writel(sh_chan
, hw
->dar
, DAR
);
201 sh_dmae_writel(sh_chan
, hw
->tcr
>> sh_chan
->xmit_shift
, TCR
);
204 static void dmae_start(struct sh_dmae_chan
*sh_chan
)
206 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
207 u32 chcr
= chcr_read(sh_chan
);
209 if (shdev
->pdata
->needs_tend_set
)
210 sh_dmae_writel(sh_chan
, 0xFFFFFFFF, TEND
);
212 chcr
|= CHCR_DE
| shdev
->chcr_ie_bit
;
213 chcr_write(sh_chan
, chcr
& ~CHCR_TE
);
216 static void dmae_init(struct sh_dmae_chan
*sh_chan
)
219 * Default configuration for dual address memory-memory transfer.
220 * 0x400 represents auto-request.
222 u32 chcr
= DM_INC
| SM_INC
| 0x400 | log2size_to_chcr(sh_chan
,
223 LOG2_DEFAULT_XFER_SIZE
);
224 sh_chan
->xmit_shift
= calc_xmit_shift(sh_chan
, chcr
);
225 chcr_write(sh_chan
, chcr
);
228 static int dmae_set_chcr(struct sh_dmae_chan
*sh_chan
, u32 val
)
230 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
231 if (dmae_is_busy(sh_chan
))
234 sh_chan
->xmit_shift
= calc_xmit_shift(sh_chan
, val
);
235 chcr_write(sh_chan
, val
);
240 static int dmae_set_dmars(struct sh_dmae_chan
*sh_chan
, u16 val
)
242 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
243 struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
244 const struct sh_dmae_channel
*chan_pdata
= &pdata
->channel
[sh_chan
->shdma_chan
.id
];
245 u16 __iomem
*addr
= shdev
->dmars
;
246 unsigned int shift
= chan_pdata
->dmars_bit
;
248 if (dmae_is_busy(sh_chan
))
254 /* in the case of a missing DMARS resource use first memory window */
256 addr
= (u16 __iomem
*)shdev
->chan_reg
;
257 addr
+= chan_pdata
->dmars
/ sizeof(u16
);
259 __raw_writew((__raw_readw(addr
) & (0xff00 >> shift
)) | (val
<< shift
),
265 static void sh_dmae_start_xfer(struct shdma_chan
*schan
,
266 struct shdma_desc
*sdesc
)
268 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
270 struct sh_dmae_desc
*sh_desc
= container_of(sdesc
,
271 struct sh_dmae_desc
, shdma_desc
);
272 dev_dbg(sh_chan
->shdma_chan
.dev
, "Queue #%d to %d: %u@%x -> %x\n",
273 sdesc
->async_tx
.cookie
, sh_chan
->shdma_chan
.id
,
274 sh_desc
->hw
.tcr
, sh_desc
->hw
.sar
, sh_desc
->hw
.dar
);
275 /* Get the ld start address from ld_queue */
276 dmae_set_reg(sh_chan
, &sh_desc
->hw
);
280 static bool sh_dmae_channel_busy(struct shdma_chan
*schan
)
282 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
284 return dmae_is_busy(sh_chan
);
287 static void sh_dmae_setup_xfer(struct shdma_chan
*schan
,
290 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
294 const struct sh_dmae_slave_config
*cfg
=
297 dmae_set_dmars(sh_chan
, cfg
->mid_rid
);
298 dmae_set_chcr(sh_chan
, cfg
->chcr
);
304 static const struct sh_dmae_slave_config
*dmae_find_slave(
305 struct sh_dmae_chan
*sh_chan
, int slave_id
)
307 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
308 struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
309 const struct sh_dmae_slave_config
*cfg
;
312 if (slave_id
>= SH_DMA_SLAVE_NUMBER
)
315 for (i
= 0, cfg
= pdata
->slave
; i
< pdata
->slave_num
; i
++, cfg
++)
316 if (cfg
->slave_id
== slave_id
)
322 static int sh_dmae_set_slave(struct shdma_chan
*schan
,
323 int slave_id
, bool try)
325 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
327 const struct sh_dmae_slave_config
*cfg
= dmae_find_slave(sh_chan
, slave_id
);
332 sh_chan
->config
= cfg
;
337 static void dmae_halt(struct sh_dmae_chan
*sh_chan
)
339 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
340 u32 chcr
= chcr_read(sh_chan
);
342 chcr
&= ~(CHCR_DE
| CHCR_TE
| shdev
->chcr_ie_bit
);
343 chcr_write(sh_chan
, chcr
);
346 static int sh_dmae_desc_setup(struct shdma_chan
*schan
,
347 struct shdma_desc
*sdesc
,
348 dma_addr_t src
, dma_addr_t dst
, size_t *len
)
350 struct sh_dmae_desc
*sh_desc
= container_of(sdesc
,
351 struct sh_dmae_desc
, shdma_desc
);
353 if (*len
> schan
->max_xfer_len
)
354 *len
= schan
->max_xfer_len
;
356 sh_desc
->hw
.sar
= src
;
357 sh_desc
->hw
.dar
= dst
;
358 sh_desc
->hw
.tcr
= *len
;
363 static void sh_dmae_halt(struct shdma_chan
*schan
)
365 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
370 static bool sh_dmae_chan_irq(struct shdma_chan
*schan
, int irq
)
372 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
375 if (!(chcr_read(sh_chan
) & CHCR_TE
))
384 static size_t sh_dmae_get_partial(struct shdma_chan
*schan
,
385 struct shdma_desc
*sdesc
)
387 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
389 struct sh_dmae_desc
*sh_desc
= container_of(sdesc
,
390 struct sh_dmae_desc
, shdma_desc
);
391 return (sh_desc
->hw
.tcr
- sh_dmae_readl(sh_chan
, TCR
)) <<
395 /* Called from error IRQ or NMI */
396 static bool sh_dmae_reset(struct sh_dmae_device
*shdev
)
400 /* halt the dma controller */
401 sh_dmae_ctl_stop(shdev
);
403 /* We cannot detect, which channel caused the error, have to reset all */
404 ret
= shdma_reset(&shdev
->shdma_dev
);
411 static irqreturn_t
sh_dmae_err(int irq
, void *data
)
413 struct sh_dmae_device
*shdev
= data
;
415 if (!(dmaor_read(shdev
) & DMAOR_AE
))
418 sh_dmae_reset(shdev
);
422 static bool sh_dmae_desc_completed(struct shdma_chan
*schan
,
423 struct shdma_desc
*sdesc
)
425 struct sh_dmae_chan
*sh_chan
= container_of(schan
,
426 struct sh_dmae_chan
, shdma_chan
);
427 struct sh_dmae_desc
*sh_desc
= container_of(sdesc
,
428 struct sh_dmae_desc
, shdma_desc
);
429 u32 sar_buf
= sh_dmae_readl(sh_chan
, SAR
);
430 u32 dar_buf
= sh_dmae_readl(sh_chan
, DAR
);
432 return (sdesc
->direction
== DMA_DEV_TO_MEM
&&
433 (sh_desc
->hw
.dar
+ sh_desc
->hw
.tcr
) == dar_buf
) ||
434 (sdesc
->direction
!= DMA_DEV_TO_MEM
&&
435 (sh_desc
->hw
.sar
+ sh_desc
->hw
.tcr
) == sar_buf
);
438 static bool sh_dmae_nmi_notify(struct sh_dmae_device
*shdev
)
440 /* Fast path out if NMIF is not asserted for this controller */
441 if ((dmaor_read(shdev
) & DMAOR_NMIF
) == 0)
444 return sh_dmae_reset(shdev
);
447 static int sh_dmae_nmi_handler(struct notifier_block
*self
,
448 unsigned long cmd
, void *data
)
450 struct sh_dmae_device
*shdev
;
451 int ret
= NOTIFY_DONE
;
455 * Only concern ourselves with NMI events.
457 * Normally we would check the die chain value, but as this needs
458 * to be architecture independent, check for NMI context instead.
464 list_for_each_entry_rcu(shdev
, &sh_dmae_devices
, node
) {
466 * Only stop if one of the controllers has NMIF asserted,
467 * we do not want to interfere with regular address error
468 * handling or NMI events that don't concern the DMACs.
470 triggered
= sh_dmae_nmi_notify(shdev
);
471 if (triggered
== true)
479 static struct notifier_block sh_dmae_nmi_notifier __read_mostly
= {
480 .notifier_call
= sh_dmae_nmi_handler
,
482 /* Run before NMI debug handler and KGDB */
486 static int __devinit
sh_dmae_chan_probe(struct sh_dmae_device
*shdev
, int id
,
487 int irq
, unsigned long flags
)
489 const struct sh_dmae_channel
*chan_pdata
= &shdev
->pdata
->channel
[id
];
490 struct shdma_dev
*sdev
= &shdev
->shdma_dev
;
491 struct platform_device
*pdev
= to_platform_device(sdev
->dma_dev
.dev
);
492 struct sh_dmae_chan
*sh_chan
;
493 struct shdma_chan
*schan
;
496 sh_chan
= kzalloc(sizeof(struct sh_dmae_chan
), GFP_KERNEL
);
498 dev_err(sdev
->dma_dev
.dev
,
499 "No free memory for allocating dma channels!\n");
503 schan
= &sh_chan
->shdma_chan
;
504 schan
->max_xfer_len
= SH_DMA_TCR_MAX
+ 1;
506 shdma_chan_probe(sdev
, schan
, id
);
508 sh_chan
->base
= shdev
->chan_reg
+ chan_pdata
->offset
/ sizeof(u32
);
510 /* set up channel irq */
512 snprintf(sh_chan
->dev_id
, sizeof(sh_chan
->dev_id
),
513 "sh-dmae%d.%d", pdev
->id
, id
);
515 snprintf(sh_chan
->dev_id
, sizeof(sh_chan
->dev_id
),
518 err
= shdma_request_irq(schan
, irq
, flags
, sh_chan
->dev_id
);
520 dev_err(sdev
->dma_dev
.dev
,
521 "DMA channel %d request_irq error %d\n",
526 shdev
->chan
[id
] = sh_chan
;
530 /* remove from dmaengine device node */
531 shdma_chan_remove(schan
);
536 static void sh_dmae_chan_remove(struct sh_dmae_device
*shdev
)
538 struct dma_device
*dma_dev
= &shdev
->shdma_dev
.dma_dev
;
539 struct shdma_chan
*schan
;
542 shdma_for_each_chan(schan
, &shdev
->shdma_dev
, i
) {
543 struct sh_dmae_chan
*sh_chan
= container_of(schan
,
544 struct sh_dmae_chan
, shdma_chan
);
547 shdma_free_irq(&sh_chan
->shdma_chan
);
549 shdma_chan_remove(schan
);
552 dma_dev
->chancnt
= 0;
555 static void sh_dmae_shutdown(struct platform_device
*pdev
)
557 struct sh_dmae_device
*shdev
= platform_get_drvdata(pdev
);
558 sh_dmae_ctl_stop(shdev
);
561 static int sh_dmae_runtime_suspend(struct device
*dev
)
566 static int sh_dmae_runtime_resume(struct device
*dev
)
568 struct sh_dmae_device
*shdev
= dev_get_drvdata(dev
);
570 return sh_dmae_rst(shdev
);
574 static int sh_dmae_suspend(struct device
*dev
)
579 static int sh_dmae_resume(struct device
*dev
)
581 struct sh_dmae_device
*shdev
= dev_get_drvdata(dev
);
584 ret
= sh_dmae_rst(shdev
);
586 dev_err(dev
, "Failed to reset!\n");
588 for (i
= 0; i
< shdev
->pdata
->channel_num
; i
++) {
589 struct sh_dmae_chan
*sh_chan
= shdev
->chan
[i
];
591 if (!sh_chan
->shdma_chan
.desc_num
)
594 if (sh_chan
->shdma_chan
.slave_id
>= 0) {
595 const struct sh_dmae_slave_config
*cfg
= sh_chan
->config
;
596 dmae_set_dmars(sh_chan
, cfg
->mid_rid
);
597 dmae_set_chcr(sh_chan
, cfg
->chcr
);
606 #define sh_dmae_suspend NULL
607 #define sh_dmae_resume NULL
610 const struct dev_pm_ops sh_dmae_pm
= {
611 .suspend
= sh_dmae_suspend
,
612 .resume
= sh_dmae_resume
,
613 .runtime_suspend
= sh_dmae_runtime_suspend
,
614 .runtime_resume
= sh_dmae_runtime_resume
,
617 static dma_addr_t
sh_dmae_slave_addr(struct shdma_chan
*schan
)
619 struct sh_dmae_chan
*sh_chan
= container_of(schan
,
620 struct sh_dmae_chan
, shdma_chan
);
623 * Implicit BUG_ON(!sh_chan->config)
624 * This is an exclusive slave DMA operation, may only be called after a
625 * successful slave configuration.
627 return sh_chan
->config
->addr
;
630 static struct shdma_desc
*sh_dmae_embedded_desc(void *buf
, int i
)
632 return &((struct sh_dmae_desc
*)buf
)[i
].shdma_desc
;
635 static const struct shdma_ops sh_dmae_shdma_ops
= {
636 .desc_completed
= sh_dmae_desc_completed
,
637 .halt_channel
= sh_dmae_halt
,
638 .channel_busy
= sh_dmae_channel_busy
,
639 .slave_addr
= sh_dmae_slave_addr
,
640 .desc_setup
= sh_dmae_desc_setup
,
641 .set_slave
= sh_dmae_set_slave
,
642 .setup_xfer
= sh_dmae_setup_xfer
,
643 .start_xfer
= sh_dmae_start_xfer
,
644 .embedded_desc
= sh_dmae_embedded_desc
,
645 .chan_irq
= sh_dmae_chan_irq
,
646 .get_partial
= sh_dmae_get_partial
,
649 static int __devinit
sh_dmae_probe(struct platform_device
*pdev
)
651 struct sh_dmae_pdata
*pdata
= pdev
->dev
.platform_data
;
652 unsigned long irqflags
= IRQF_DISABLED
,
653 chan_flag
[SH_DMAE_MAX_CHANNELS
] = {};
654 int errirq
, chan_irq
[SH_DMAE_MAX_CHANNELS
];
655 int err
, i
, irq_cnt
= 0, irqres
= 0, irq_cap
= 0;
656 struct sh_dmae_device
*shdev
;
657 struct dma_device
*dma_dev
;
658 struct resource
*chan
, *dmars
, *errirq_res
, *chanirq_res
;
660 /* get platform data */
661 if (!pdata
|| !pdata
->channel_num
)
664 chan
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
665 /* DMARS area is optional */
666 dmars
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
669 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
670 * the error IRQ, in which case it is the only IRQ in this resource:
671 * start == end. If it is the only IRQ resource, all channels also
673 * 2. DMA channel IRQ resources can be specified one per resource or in
674 * ranges (start != end)
675 * 3. iff all events (channels and, optionally, error) on this
676 * controller use the same IRQ, only one IRQ resource can be
677 * specified, otherwise there must be one IRQ per channel, even if
678 * some of them are equal
679 * 4. if all IRQs on this controller are equal or if some specific IRQs
680 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
681 * requested with the IRQF_SHARED flag
683 errirq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
684 if (!chan
|| !errirq_res
)
687 if (!request_mem_region(chan
->start
, resource_size(chan
), pdev
->name
)) {
688 dev_err(&pdev
->dev
, "DMAC register region already claimed\n");
692 if (dmars
&& !request_mem_region(dmars
->start
, resource_size(dmars
), pdev
->name
)) {
693 dev_err(&pdev
->dev
, "DMAC DMARS region already claimed\n");
699 shdev
= kzalloc(sizeof(struct sh_dmae_device
), GFP_KERNEL
);
701 dev_err(&pdev
->dev
, "Not enough memory\n");
705 dma_dev
= &shdev
->shdma_dev
.dma_dev
;
707 shdev
->chan_reg
= ioremap(chan
->start
, resource_size(chan
));
708 if (!shdev
->chan_reg
)
711 shdev
->dmars
= ioremap(dmars
->start
, resource_size(dmars
));
716 if (!pdata
->slave_only
)
717 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
718 if (pdata
->slave
&& pdata
->slave_num
)
719 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
721 /* Default transfer size of 32 bytes requires 32-byte alignment */
722 dma_dev
->copy_align
= LOG2_DEFAULT_XFER_SIZE
;
724 shdev
->shdma_dev
.ops
= &sh_dmae_shdma_ops
;
725 shdev
->shdma_dev
.desc_size
= sizeof(struct sh_dmae_desc
);
726 err
= shdma_init(&pdev
->dev
, &shdev
->shdma_dev
,
732 shdev
->pdata
= pdev
->dev
.platform_data
;
734 if (pdata
->chcr_offset
)
735 shdev
->chcr_offset
= pdata
->chcr_offset
;
737 shdev
->chcr_offset
= CHCR
;
739 if (pdata
->chcr_ie_bit
)
740 shdev
->chcr_ie_bit
= pdata
->chcr_ie_bit
;
742 shdev
->chcr_ie_bit
= CHCR_IE
;
744 platform_set_drvdata(pdev
, shdev
);
746 pm_runtime_enable(&pdev
->dev
);
747 err
= pm_runtime_get_sync(&pdev
->dev
);
749 dev_err(&pdev
->dev
, "%s(): GET = %d\n", __func__
, err
);
751 spin_lock_irq(&sh_dmae_lock
);
752 list_add_tail_rcu(&shdev
->node
, &sh_dmae_devices
);
753 spin_unlock_irq(&sh_dmae_lock
);
755 /* reset dma controller - only needed as a test */
756 err
= sh_dmae_rst(shdev
);
760 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
761 chanirq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 1);
764 chanirq_res
= errirq_res
;
768 if (chanirq_res
== errirq_res
||
769 (errirq_res
->flags
& IORESOURCE_BITS
) == IORESOURCE_IRQ_SHAREABLE
)
770 irqflags
= IRQF_SHARED
;
772 errirq
= errirq_res
->start
;
774 err
= request_irq(errirq
, sh_dmae_err
, irqflags
,
775 "DMAC Address Error", shdev
);
778 "DMA failed requesting irq #%d, error %d\n",
784 chanirq_res
= errirq_res
;
785 #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
787 if (chanirq_res
->start
== chanirq_res
->end
&&
788 !platform_get_resource(pdev
, IORESOURCE_IRQ
, 1)) {
789 /* Special case - all multiplexed */
790 for (; irq_cnt
< pdata
->channel_num
; irq_cnt
++) {
791 if (irq_cnt
< SH_DMAE_MAX_CHANNELS
) {
792 chan_irq
[irq_cnt
] = chanirq_res
->start
;
793 chan_flag
[irq_cnt
] = IRQF_SHARED
;
801 for (i
= chanirq_res
->start
; i
<= chanirq_res
->end
; i
++) {
802 if (irq_cnt
>= SH_DMAE_MAX_CHANNELS
) {
807 if ((errirq_res
->flags
& IORESOURCE_BITS
) ==
808 IORESOURCE_IRQ_SHAREABLE
)
809 chan_flag
[irq_cnt
] = IRQF_SHARED
;
811 chan_flag
[irq_cnt
] = IRQF_DISABLED
;
813 "Found IRQ %d for channel %d\n",
815 chan_irq
[irq_cnt
++] = i
;
818 if (irq_cnt
>= SH_DMAE_MAX_CHANNELS
)
821 chanirq_res
= platform_get_resource(pdev
,
822 IORESOURCE_IRQ
, ++irqres
);
823 } while (irq_cnt
< pdata
->channel_num
&& chanirq_res
);
826 /* Create DMA Channel */
827 for (i
= 0; i
< irq_cnt
; i
++) {
828 err
= sh_dmae_chan_probe(shdev
, i
, chan_irq
[i
], chan_flag
[i
]);
834 dev_notice(&pdev
->dev
, "Attempting to register %d DMA "
835 "channels when a maximum of %d are supported.\n",
836 pdata
->channel_num
, SH_DMAE_MAX_CHANNELS
);
838 pm_runtime_put(&pdev
->dev
);
840 err
= dma_async_device_register(&shdev
->shdma_dev
.dma_dev
);
847 pm_runtime_get(&pdev
->dev
);
850 sh_dmae_chan_remove(shdev
);
852 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
853 free_irq(errirq
, shdev
);
857 spin_lock_irq(&sh_dmae_lock
);
858 list_del_rcu(&shdev
->node
);
859 spin_unlock_irq(&sh_dmae_lock
);
861 pm_runtime_put(&pdev
->dev
);
862 pm_runtime_disable(&pdev
->dev
);
864 platform_set_drvdata(pdev
, NULL
);
865 shdma_cleanup(&shdev
->shdma_dev
);
868 iounmap(shdev
->dmars
);
870 iounmap(shdev
->chan_reg
);
876 release_mem_region(dmars
->start
, resource_size(dmars
));
878 release_mem_region(chan
->start
, resource_size(chan
));
883 static int __devexit
sh_dmae_remove(struct platform_device
*pdev
)
885 struct sh_dmae_device
*shdev
= platform_get_drvdata(pdev
);
886 struct dma_device
*dma_dev
= &shdev
->shdma_dev
.dma_dev
;
887 struct resource
*res
;
888 int errirq
= platform_get_irq(pdev
, 0);
890 dma_async_device_unregister(dma_dev
);
893 free_irq(errirq
, shdev
);
895 spin_lock_irq(&sh_dmae_lock
);
896 list_del_rcu(&shdev
->node
);
897 spin_unlock_irq(&sh_dmae_lock
);
899 pm_runtime_disable(&pdev
->dev
);
901 sh_dmae_chan_remove(shdev
);
902 shdma_cleanup(&shdev
->shdma_dev
);
905 iounmap(shdev
->dmars
);
906 iounmap(shdev
->chan_reg
);
908 platform_set_drvdata(pdev
, NULL
);
913 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
915 release_mem_region(res
->start
, resource_size(res
));
916 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
918 release_mem_region(res
->start
, resource_size(res
));
923 static struct platform_driver sh_dmae_driver
= {
925 .owner
= THIS_MODULE
,
927 .name
= SH_DMAE_DRV_NAME
,
929 .remove
= __devexit_p(sh_dmae_remove
),
930 .shutdown
= sh_dmae_shutdown
,
933 static int __init
sh_dmae_init(void)
935 /* Wire up NMI handling */
936 int err
= register_die_notifier(&sh_dmae_nmi_notifier
);
940 return platform_driver_probe(&sh_dmae_driver
, sh_dmae_probe
);
942 module_init(sh_dmae_init
);
944 static void __exit
sh_dmae_exit(void)
946 platform_driver_unregister(&sh_dmae_driver
);
948 unregister_die_notifier(&sh_dmae_nmi_notifier
);
950 module_exit(sh_dmae_exit
);
952 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
953 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
954 MODULE_LICENSE("GPL");
955 MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME
);