2 * Renesas SuperH DMA Engine support
4 * base is drivers/dma/flsdma.c
6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
11 * This is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * - DMA of SuperH does not have Hardware DMA chain mode.
17 * - MAX DMA size is 16MB.
21 #include <linux/delay.h>
22 #include <linux/dmaengine.h>
23 #include <linux/err.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/kdebug.h>
27 #include <linux/module.h>
28 #include <linux/notifier.h>
30 #include <linux/of_device.h>
31 #include <linux/platform_device.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/rculist.h>
34 #include <linux/sh_dma.h>
35 #include <linux/slab.h>
36 #include <linux/spinlock.h>
38 #include "../dmaengine.h"
42 #define SAR 0x00 /* Source Address Register */
43 #define DAR 0x04 /* Destination Address Register */
44 #define TCR 0x08 /* Transfer Count Register */
45 #define CHCR 0x0C /* Channel Control Register */
46 #define DMAOR 0x40 /* DMA Operation Register */
48 #define TEND 0x18 /* USB-DMAC */
50 #define SH_DMAE_DRV_NAME "sh-dma-engine"
52 /* Default MEMCPY transfer size = 2^2 = 4 bytes */
53 #define LOG2_DEFAULT_XFER_SIZE 2
54 #define SH_DMA_SLAVE_NUMBER 256
55 #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
58 * Used for write-side mutual exclusion for the global device list,
59 * read-side synchronization by way of RCU, and per-controller data.
61 static DEFINE_SPINLOCK(sh_dmae_lock
);
62 static LIST_HEAD(sh_dmae_devices
);
65 * Different DMAC implementations provide different ways to clear DMA channels:
66 * (1) none - no CHCLR registers are available
67 * (2) one CHCLR register per channel - 0 has to be written to it to clear
69 * (3) one CHCLR per several channels - 1 has to be written to the bit,
70 * corresponding to the specific channel to reset it
72 static void channel_clear(struct sh_dmae_chan
*sh_dc
)
74 struct sh_dmae_device
*shdev
= to_sh_dev(sh_dc
);
75 const struct sh_dmae_channel
*chan_pdata
= shdev
->pdata
->channel
+
77 u32 val
= shdev
->pdata
->chclr_bitwise
? 1 << chan_pdata
->chclr_bit
: 0;
79 __raw_writel(val
, shdev
->chan_reg
+ chan_pdata
->chclr_offset
);
82 static void sh_dmae_writel(struct sh_dmae_chan
*sh_dc
, u32 data
, u32 reg
)
84 __raw_writel(data
, sh_dc
->base
+ reg
);
87 static u32
sh_dmae_readl(struct sh_dmae_chan
*sh_dc
, u32 reg
)
89 return __raw_readl(sh_dc
->base
+ reg
);
92 static u16
dmaor_read(struct sh_dmae_device
*shdev
)
94 void __iomem
*addr
= shdev
->chan_reg
+ DMAOR
;
96 if (shdev
->pdata
->dmaor_is_32bit
)
97 return __raw_readl(addr
);
99 return __raw_readw(addr
);
102 static void dmaor_write(struct sh_dmae_device
*shdev
, u16 data
)
104 void __iomem
*addr
= shdev
->chan_reg
+ DMAOR
;
106 if (shdev
->pdata
->dmaor_is_32bit
)
107 __raw_writel(data
, addr
);
109 __raw_writew(data
, addr
);
112 static void chcr_write(struct sh_dmae_chan
*sh_dc
, u32 data
)
114 struct sh_dmae_device
*shdev
= to_sh_dev(sh_dc
);
116 __raw_writel(data
, sh_dc
->base
+ shdev
->chcr_offset
);
119 static u32
chcr_read(struct sh_dmae_chan
*sh_dc
)
121 struct sh_dmae_device
*shdev
= to_sh_dev(sh_dc
);
123 return __raw_readl(sh_dc
->base
+ shdev
->chcr_offset
);
127 * Reset DMA controller
129 * SH7780 has two DMAOR register
131 static void sh_dmae_ctl_stop(struct sh_dmae_device
*shdev
)
133 unsigned short dmaor
;
136 spin_lock_irqsave(&sh_dmae_lock
, flags
);
138 dmaor
= dmaor_read(shdev
);
139 dmaor_write(shdev
, dmaor
& ~(DMAOR_NMIF
| DMAOR_AE
| DMAOR_DME
));
141 spin_unlock_irqrestore(&sh_dmae_lock
, flags
);
144 static int sh_dmae_rst(struct sh_dmae_device
*shdev
)
146 unsigned short dmaor
;
149 spin_lock_irqsave(&sh_dmae_lock
, flags
);
151 dmaor
= dmaor_read(shdev
) & ~(DMAOR_NMIF
| DMAOR_AE
| DMAOR_DME
);
153 if (shdev
->pdata
->chclr_present
) {
155 for (i
= 0; i
< shdev
->pdata
->channel_num
; i
++) {
156 struct sh_dmae_chan
*sh_chan
= shdev
->chan
[i
];
158 channel_clear(sh_chan
);
162 dmaor_write(shdev
, dmaor
| shdev
->pdata
->dmaor_init
);
164 dmaor
= dmaor_read(shdev
);
166 spin_unlock_irqrestore(&sh_dmae_lock
, flags
);
168 if (dmaor
& (DMAOR_AE
| DMAOR_NMIF
)) {
169 dev_warn(shdev
->shdma_dev
.dma_dev
.dev
, "Can't initialize DMAOR.\n");
172 if (shdev
->pdata
->dmaor_init
& ~dmaor
)
173 dev_warn(shdev
->shdma_dev
.dma_dev
.dev
,
174 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
175 dmaor
, shdev
->pdata
->dmaor_init
);
179 static bool dmae_is_busy(struct sh_dmae_chan
*sh_chan
)
181 u32 chcr
= chcr_read(sh_chan
);
183 if ((chcr
& (CHCR_DE
| CHCR_TE
)) == CHCR_DE
)
184 return true; /* working */
186 return false; /* waiting */
189 static unsigned int calc_xmit_shift(struct sh_dmae_chan
*sh_chan
, u32 chcr
)
191 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
192 const struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
193 int cnt
= ((chcr
& pdata
->ts_low_mask
) >> pdata
->ts_low_shift
) |
194 ((chcr
& pdata
->ts_high_mask
) >> pdata
->ts_high_shift
);
196 if (cnt
>= pdata
->ts_shift_num
)
199 return pdata
->ts_shift
[cnt
];
202 static u32
log2size_to_chcr(struct sh_dmae_chan
*sh_chan
, int l2size
)
204 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
205 const struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
208 for (i
= 0; i
< pdata
->ts_shift_num
; i
++)
209 if (pdata
->ts_shift
[i
] == l2size
)
212 if (i
== pdata
->ts_shift_num
)
215 return ((i
<< pdata
->ts_low_shift
) & pdata
->ts_low_mask
) |
216 ((i
<< pdata
->ts_high_shift
) & pdata
->ts_high_mask
);
219 static void dmae_set_reg(struct sh_dmae_chan
*sh_chan
, struct sh_dmae_regs
*hw
)
221 sh_dmae_writel(sh_chan
, hw
->sar
, SAR
);
222 sh_dmae_writel(sh_chan
, hw
->dar
, DAR
);
223 sh_dmae_writel(sh_chan
, hw
->tcr
>> sh_chan
->xmit_shift
, TCR
);
226 static void dmae_start(struct sh_dmae_chan
*sh_chan
)
228 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
229 u32 chcr
= chcr_read(sh_chan
);
231 if (shdev
->pdata
->needs_tend_set
)
232 sh_dmae_writel(sh_chan
, 0xFFFFFFFF, TEND
);
234 chcr
|= CHCR_DE
| shdev
->chcr_ie_bit
;
235 chcr_write(sh_chan
, chcr
& ~CHCR_TE
);
238 static void dmae_init(struct sh_dmae_chan
*sh_chan
)
241 * Default configuration for dual address memory-memory transfer.
243 u32 chcr
= DM_INC
| SM_INC
| RS_AUTO
| log2size_to_chcr(sh_chan
,
244 LOG2_DEFAULT_XFER_SIZE
);
245 sh_chan
->xmit_shift
= calc_xmit_shift(sh_chan
, chcr
);
246 chcr_write(sh_chan
, chcr
);
249 static int dmae_set_chcr(struct sh_dmae_chan
*sh_chan
, u32 val
)
251 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
252 if (dmae_is_busy(sh_chan
))
255 sh_chan
->xmit_shift
= calc_xmit_shift(sh_chan
, val
);
256 chcr_write(sh_chan
, val
);
261 static int dmae_set_dmars(struct sh_dmae_chan
*sh_chan
, u16 val
)
263 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
264 const struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
265 const struct sh_dmae_channel
*chan_pdata
= &pdata
->channel
[sh_chan
->shdma_chan
.id
];
266 void __iomem
*addr
= shdev
->dmars
;
267 unsigned int shift
= chan_pdata
->dmars_bit
;
269 if (dmae_is_busy(sh_chan
))
275 /* in the case of a missing DMARS resource use first memory window */
277 addr
= shdev
->chan_reg
;
278 addr
+= chan_pdata
->dmars
;
280 __raw_writew((__raw_readw(addr
) & (0xff00 >> shift
)) | (val
<< shift
),
286 static void sh_dmae_start_xfer(struct shdma_chan
*schan
,
287 struct shdma_desc
*sdesc
)
289 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
291 struct sh_dmae_desc
*sh_desc
= container_of(sdesc
,
292 struct sh_dmae_desc
, shdma_desc
);
293 dev_dbg(sh_chan
->shdma_chan
.dev
, "Queue #%d to %d: %u@%x -> %x\n",
294 sdesc
->async_tx
.cookie
, sh_chan
->shdma_chan
.id
,
295 sh_desc
->hw
.tcr
, sh_desc
->hw
.sar
, sh_desc
->hw
.dar
);
296 /* Get the ld start address from ld_queue */
297 dmae_set_reg(sh_chan
, &sh_desc
->hw
);
301 static bool sh_dmae_channel_busy(struct shdma_chan
*schan
)
303 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
305 return dmae_is_busy(sh_chan
);
308 static void sh_dmae_setup_xfer(struct shdma_chan
*schan
,
311 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
315 const struct sh_dmae_slave_config
*cfg
=
318 dmae_set_dmars(sh_chan
, cfg
->mid_rid
);
319 dmae_set_chcr(sh_chan
, cfg
->chcr
);
326 * Find a slave channel configuration from the contoller list by either a slave
327 * ID in the non-DT case, or by a MID/RID value in the DT case
329 static const struct sh_dmae_slave_config
*dmae_find_slave(
330 struct sh_dmae_chan
*sh_chan
, int match
)
332 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
333 const struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
334 const struct sh_dmae_slave_config
*cfg
;
337 if (!sh_chan
->shdma_chan
.dev
->of_node
) {
338 if (match
>= SH_DMA_SLAVE_NUMBER
)
341 for (i
= 0, cfg
= pdata
->slave
; i
< pdata
->slave_num
; i
++, cfg
++)
342 if (cfg
->slave_id
== match
)
345 for (i
= 0, cfg
= pdata
->slave
; i
< pdata
->slave_num
; i
++, cfg
++)
346 if (cfg
->mid_rid
== match
) {
347 sh_chan
->shdma_chan
.slave_id
= i
;
355 static int sh_dmae_set_slave(struct shdma_chan
*schan
,
356 int slave_id
, dma_addr_t slave_addr
, bool try)
358 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
360 const struct sh_dmae_slave_config
*cfg
= dmae_find_slave(sh_chan
, slave_id
);
365 sh_chan
->config
= cfg
;
366 sh_chan
->slave_addr
= slave_addr
? : cfg
->addr
;
372 static void dmae_halt(struct sh_dmae_chan
*sh_chan
)
374 struct sh_dmae_device
*shdev
= to_sh_dev(sh_chan
);
375 u32 chcr
= chcr_read(sh_chan
);
377 chcr
&= ~(CHCR_DE
| CHCR_TE
| shdev
->chcr_ie_bit
);
378 chcr_write(sh_chan
, chcr
);
381 static int sh_dmae_desc_setup(struct shdma_chan
*schan
,
382 struct shdma_desc
*sdesc
,
383 dma_addr_t src
, dma_addr_t dst
, size_t *len
)
385 struct sh_dmae_desc
*sh_desc
= container_of(sdesc
,
386 struct sh_dmae_desc
, shdma_desc
);
388 if (*len
> schan
->max_xfer_len
)
389 *len
= schan
->max_xfer_len
;
391 sh_desc
->hw
.sar
= src
;
392 sh_desc
->hw
.dar
= dst
;
393 sh_desc
->hw
.tcr
= *len
;
398 static void sh_dmae_halt(struct shdma_chan
*schan
)
400 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
405 static bool sh_dmae_chan_irq(struct shdma_chan
*schan
, int irq
)
407 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
410 if (!(chcr_read(sh_chan
) & CHCR_TE
))
419 static size_t sh_dmae_get_partial(struct shdma_chan
*schan
,
420 struct shdma_desc
*sdesc
)
422 struct sh_dmae_chan
*sh_chan
= container_of(schan
, struct sh_dmae_chan
,
424 struct sh_dmae_desc
*sh_desc
= container_of(sdesc
,
425 struct sh_dmae_desc
, shdma_desc
);
426 return sh_desc
->hw
.tcr
-
427 (sh_dmae_readl(sh_chan
, TCR
) << sh_chan
->xmit_shift
);
430 /* Called from error IRQ or NMI */
431 static bool sh_dmae_reset(struct sh_dmae_device
*shdev
)
435 /* halt the dma controller */
436 sh_dmae_ctl_stop(shdev
);
438 /* We cannot detect, which channel caused the error, have to reset all */
439 ret
= shdma_reset(&shdev
->shdma_dev
);
446 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
447 static irqreturn_t
sh_dmae_err(int irq
, void *data
)
449 struct sh_dmae_device
*shdev
= data
;
451 if (!(dmaor_read(shdev
) & DMAOR_AE
))
454 sh_dmae_reset(shdev
);
459 static bool sh_dmae_desc_completed(struct shdma_chan
*schan
,
460 struct shdma_desc
*sdesc
)
462 struct sh_dmae_chan
*sh_chan
= container_of(schan
,
463 struct sh_dmae_chan
, shdma_chan
);
464 struct sh_dmae_desc
*sh_desc
= container_of(sdesc
,
465 struct sh_dmae_desc
, shdma_desc
);
466 u32 sar_buf
= sh_dmae_readl(sh_chan
, SAR
);
467 u32 dar_buf
= sh_dmae_readl(sh_chan
, DAR
);
469 return (sdesc
->direction
== DMA_DEV_TO_MEM
&&
470 (sh_desc
->hw
.dar
+ sh_desc
->hw
.tcr
) == dar_buf
) ||
471 (sdesc
->direction
!= DMA_DEV_TO_MEM
&&
472 (sh_desc
->hw
.sar
+ sh_desc
->hw
.tcr
) == sar_buf
);
475 static bool sh_dmae_nmi_notify(struct sh_dmae_device
*shdev
)
477 /* Fast path out if NMIF is not asserted for this controller */
478 if ((dmaor_read(shdev
) & DMAOR_NMIF
) == 0)
481 return sh_dmae_reset(shdev
);
484 static int sh_dmae_nmi_handler(struct notifier_block
*self
,
485 unsigned long cmd
, void *data
)
487 struct sh_dmae_device
*shdev
;
488 int ret
= NOTIFY_DONE
;
492 * Only concern ourselves with NMI events.
494 * Normally we would check the die chain value, but as this needs
495 * to be architecture independent, check for NMI context instead.
501 list_for_each_entry_rcu(shdev
, &sh_dmae_devices
, node
) {
503 * Only stop if one of the controllers has NMIF asserted,
504 * we do not want to interfere with regular address error
505 * handling or NMI events that don't concern the DMACs.
507 triggered
= sh_dmae_nmi_notify(shdev
);
508 if (triggered
== true)
516 static struct notifier_block sh_dmae_nmi_notifier __read_mostly
= {
517 .notifier_call
= sh_dmae_nmi_handler
,
519 /* Run before NMI debug handler and KGDB */
523 static int sh_dmae_chan_probe(struct sh_dmae_device
*shdev
, int id
,
524 int irq
, unsigned long flags
)
526 const struct sh_dmae_channel
*chan_pdata
= &shdev
->pdata
->channel
[id
];
527 struct shdma_dev
*sdev
= &shdev
->shdma_dev
;
528 struct platform_device
*pdev
= to_platform_device(sdev
->dma_dev
.dev
);
529 struct sh_dmae_chan
*sh_chan
;
530 struct shdma_chan
*schan
;
533 sh_chan
= devm_kzalloc(sdev
->dma_dev
.dev
, sizeof(struct sh_dmae_chan
),
536 dev_err(sdev
->dma_dev
.dev
,
537 "No free memory for allocating dma channels!\n");
541 schan
= &sh_chan
->shdma_chan
;
542 schan
->max_xfer_len
= SH_DMA_TCR_MAX
+ 1;
544 shdma_chan_probe(sdev
, schan
, id
);
546 sh_chan
->base
= shdev
->chan_reg
+ chan_pdata
->offset
;
548 /* set up channel irq */
550 snprintf(sh_chan
->dev_id
, sizeof(sh_chan
->dev_id
),
551 "sh-dmae%d.%d", pdev
->id
, id
);
553 snprintf(sh_chan
->dev_id
, sizeof(sh_chan
->dev_id
),
556 err
= shdma_request_irq(schan
, irq
, flags
, sh_chan
->dev_id
);
558 dev_err(sdev
->dma_dev
.dev
,
559 "DMA channel %d request_irq error %d\n",
564 shdev
->chan
[id
] = sh_chan
;
568 /* remove from dmaengine device node */
569 shdma_chan_remove(schan
);
573 static void sh_dmae_chan_remove(struct sh_dmae_device
*shdev
)
575 struct shdma_chan
*schan
;
578 shdma_for_each_chan(schan
, &shdev
->shdma_dev
, i
) {
581 shdma_chan_remove(schan
);
586 static int sh_dmae_runtime_suspend(struct device
*dev
)
588 struct sh_dmae_device
*shdev
= dev_get_drvdata(dev
);
590 sh_dmae_ctl_stop(shdev
);
594 static int sh_dmae_runtime_resume(struct device
*dev
)
596 struct sh_dmae_device
*shdev
= dev_get_drvdata(dev
);
598 return sh_dmae_rst(shdev
);
602 #ifdef CONFIG_PM_SLEEP
603 static int sh_dmae_suspend(struct device
*dev
)
605 struct sh_dmae_device
*shdev
= dev_get_drvdata(dev
);
607 sh_dmae_ctl_stop(shdev
);
611 static int sh_dmae_resume(struct device
*dev
)
613 struct sh_dmae_device
*shdev
= dev_get_drvdata(dev
);
616 ret
= sh_dmae_rst(shdev
);
618 dev_err(dev
, "Failed to reset!\n");
620 for (i
= 0; i
< shdev
->pdata
->channel_num
; i
++) {
621 struct sh_dmae_chan
*sh_chan
= shdev
->chan
[i
];
623 if (!sh_chan
->shdma_chan
.desc_num
)
626 if (sh_chan
->shdma_chan
.slave_id
>= 0) {
627 const struct sh_dmae_slave_config
*cfg
= sh_chan
->config
;
628 dmae_set_dmars(sh_chan
, cfg
->mid_rid
);
629 dmae_set_chcr(sh_chan
, cfg
->chcr
);
639 static const struct dev_pm_ops sh_dmae_pm
= {
640 SET_SYSTEM_SLEEP_PM_OPS(sh_dmae_suspend
, sh_dmae_resume
)
641 SET_RUNTIME_PM_OPS(sh_dmae_runtime_suspend
, sh_dmae_runtime_resume
,
645 static dma_addr_t
sh_dmae_slave_addr(struct shdma_chan
*schan
)
647 struct sh_dmae_chan
*sh_chan
= container_of(schan
,
648 struct sh_dmae_chan
, shdma_chan
);
651 * Implicit BUG_ON(!sh_chan->config)
652 * This is an exclusive slave DMA operation, may only be called after a
653 * successful slave configuration.
655 return sh_chan
->slave_addr
;
658 static struct shdma_desc
*sh_dmae_embedded_desc(void *buf
, int i
)
660 return &((struct sh_dmae_desc
*)buf
)[i
].shdma_desc
;
663 static const struct shdma_ops sh_dmae_shdma_ops
= {
664 .desc_completed
= sh_dmae_desc_completed
,
665 .halt_channel
= sh_dmae_halt
,
666 .channel_busy
= sh_dmae_channel_busy
,
667 .slave_addr
= sh_dmae_slave_addr
,
668 .desc_setup
= sh_dmae_desc_setup
,
669 .set_slave
= sh_dmae_set_slave
,
670 .setup_xfer
= sh_dmae_setup_xfer
,
671 .start_xfer
= sh_dmae_start_xfer
,
672 .embedded_desc
= sh_dmae_embedded_desc
,
673 .chan_irq
= sh_dmae_chan_irq
,
674 .get_partial
= sh_dmae_get_partial
,
677 static const struct of_device_id sh_dmae_of_match
[] = {
678 {.compatible
= "renesas,shdma-r8a73a4", .data
= r8a73a4_shdma_devid
,},
681 MODULE_DEVICE_TABLE(of
, sh_dmae_of_match
);
683 static int sh_dmae_probe(struct platform_device
*pdev
)
685 const enum dma_slave_buswidth widths
=
686 DMA_SLAVE_BUSWIDTH_1_BYTE
| DMA_SLAVE_BUSWIDTH_2_BYTES
|
687 DMA_SLAVE_BUSWIDTH_4_BYTES
| DMA_SLAVE_BUSWIDTH_8_BYTES
|
688 DMA_SLAVE_BUSWIDTH_16_BYTES
| DMA_SLAVE_BUSWIDTH_32_BYTES
;
689 const struct sh_dmae_pdata
*pdata
;
690 unsigned long chan_flag
[SH_DMAE_MAX_CHANNELS
] = {};
691 int chan_irq
[SH_DMAE_MAX_CHANNELS
];
692 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
693 unsigned long irqflags
= 0;
696 int err
, i
, irq_cnt
= 0, irqres
= 0, irq_cap
= 0;
697 struct sh_dmae_device
*shdev
;
698 struct dma_device
*dma_dev
;
699 struct resource
*chan
, *dmars
, *errirq_res
, *chanirq_res
;
701 if (pdev
->dev
.of_node
)
702 pdata
= of_match_device(sh_dmae_of_match
, &pdev
->dev
)->data
;
704 pdata
= dev_get_platdata(&pdev
->dev
);
706 /* get platform data */
707 if (!pdata
|| !pdata
->channel_num
)
710 chan
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
711 /* DMARS area is optional */
712 dmars
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
715 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
716 * the error IRQ, in which case it is the only IRQ in this resource:
717 * start == end. If it is the only IRQ resource, all channels also
719 * 2. DMA channel IRQ resources can be specified one per resource or in
720 * ranges (start != end)
721 * 3. iff all events (channels and, optionally, error) on this
722 * controller use the same IRQ, only one IRQ resource can be
723 * specified, otherwise there must be one IRQ per channel, even if
724 * some of them are equal
725 * 4. if all IRQs on this controller are equal or if some specific IRQs
726 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
727 * requested with the IRQF_SHARED flag
729 errirq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
730 if (!chan
|| !errirq_res
)
733 shdev
= devm_kzalloc(&pdev
->dev
, sizeof(struct sh_dmae_device
),
736 dev_err(&pdev
->dev
, "Not enough memory\n");
740 dma_dev
= &shdev
->shdma_dev
.dma_dev
;
742 shdev
->chan_reg
= devm_ioremap_resource(&pdev
->dev
, chan
);
743 if (IS_ERR(shdev
->chan_reg
))
744 return PTR_ERR(shdev
->chan_reg
);
746 shdev
->dmars
= devm_ioremap_resource(&pdev
->dev
, dmars
);
747 if (IS_ERR(shdev
->dmars
))
748 return PTR_ERR(shdev
->dmars
);
751 dma_dev
->src_addr_widths
= widths
;
752 dma_dev
->dst_addr_widths
= widths
;
753 dma_dev
->directions
= BIT(DMA_MEM_TO_DEV
) | BIT(DMA_DEV_TO_MEM
);
754 dma_dev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
756 if (!pdata
->slave_only
)
757 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
758 if (pdata
->slave
&& pdata
->slave_num
)
759 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
761 /* Default transfer size of 32 bytes requires 32-byte alignment */
762 dma_dev
->copy_align
= LOG2_DEFAULT_XFER_SIZE
;
764 shdev
->shdma_dev
.ops
= &sh_dmae_shdma_ops
;
765 shdev
->shdma_dev
.desc_size
= sizeof(struct sh_dmae_desc
);
766 err
= shdma_init(&pdev
->dev
, &shdev
->shdma_dev
,
772 shdev
->pdata
= pdata
;
774 if (pdata
->chcr_offset
)
775 shdev
->chcr_offset
= pdata
->chcr_offset
;
777 shdev
->chcr_offset
= CHCR
;
779 if (pdata
->chcr_ie_bit
)
780 shdev
->chcr_ie_bit
= pdata
->chcr_ie_bit
;
782 shdev
->chcr_ie_bit
= CHCR_IE
;
784 platform_set_drvdata(pdev
, shdev
);
786 pm_runtime_enable(&pdev
->dev
);
787 err
= pm_runtime_get_sync(&pdev
->dev
);
789 dev_err(&pdev
->dev
, "%s(): GET = %d\n", __func__
, err
);
791 spin_lock_irq(&sh_dmae_lock
);
792 list_add_tail_rcu(&shdev
->node
, &sh_dmae_devices
);
793 spin_unlock_irq(&sh_dmae_lock
);
795 /* reset dma controller - only needed as a test */
796 err
= sh_dmae_rst(shdev
);
800 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
801 chanirq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 1);
804 chanirq_res
= errirq_res
;
808 if (chanirq_res
== errirq_res
||
809 (errirq_res
->flags
& IORESOURCE_BITS
) == IORESOURCE_IRQ_SHAREABLE
)
810 irqflags
= IRQF_SHARED
;
812 errirq
= errirq_res
->start
;
814 err
= devm_request_irq(&pdev
->dev
, errirq
, sh_dmae_err
, irqflags
,
815 "DMAC Address Error", shdev
);
818 "DMA failed requesting irq #%d, error %d\n",
824 chanirq_res
= errirq_res
;
825 #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
827 if (chanirq_res
->start
== chanirq_res
->end
&&
828 !platform_get_resource(pdev
, IORESOURCE_IRQ
, 1)) {
829 /* Special case - all multiplexed */
830 for (; irq_cnt
< pdata
->channel_num
; irq_cnt
++) {
831 if (irq_cnt
< SH_DMAE_MAX_CHANNELS
) {
832 chan_irq
[irq_cnt
] = chanirq_res
->start
;
833 chan_flag
[irq_cnt
] = IRQF_SHARED
;
841 for (i
= chanirq_res
->start
; i
<= chanirq_res
->end
; i
++) {
842 if (irq_cnt
>= SH_DMAE_MAX_CHANNELS
) {
847 if ((errirq_res
->flags
& IORESOURCE_BITS
) ==
848 IORESOURCE_IRQ_SHAREABLE
)
849 chan_flag
[irq_cnt
] = IRQF_SHARED
;
851 chan_flag
[irq_cnt
] = 0;
853 "Found IRQ %d for channel %d\n",
855 chan_irq
[irq_cnt
++] = i
;
858 if (irq_cnt
>= SH_DMAE_MAX_CHANNELS
)
861 chanirq_res
= platform_get_resource(pdev
,
862 IORESOURCE_IRQ
, ++irqres
);
863 } while (irq_cnt
< pdata
->channel_num
&& chanirq_res
);
866 /* Create DMA Channel */
867 for (i
= 0; i
< irq_cnt
; i
++) {
868 err
= sh_dmae_chan_probe(shdev
, i
, chan_irq
[i
], chan_flag
[i
]);
874 dev_notice(&pdev
->dev
, "Attempting to register %d DMA "
875 "channels when a maximum of %d are supported.\n",
876 pdata
->channel_num
, SH_DMAE_MAX_CHANNELS
);
878 pm_runtime_put(&pdev
->dev
);
880 err
= dma_async_device_register(&shdev
->shdma_dev
.dma_dev
);
887 pm_runtime_get(&pdev
->dev
);
890 sh_dmae_chan_remove(shdev
);
892 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
896 spin_lock_irq(&sh_dmae_lock
);
897 list_del_rcu(&shdev
->node
);
898 spin_unlock_irq(&sh_dmae_lock
);
900 pm_runtime_put(&pdev
->dev
);
901 pm_runtime_disable(&pdev
->dev
);
903 shdma_cleanup(&shdev
->shdma_dev
);
910 static int sh_dmae_remove(struct platform_device
*pdev
)
912 struct sh_dmae_device
*shdev
= platform_get_drvdata(pdev
);
913 struct dma_device
*dma_dev
= &shdev
->shdma_dev
.dma_dev
;
915 dma_async_device_unregister(dma_dev
);
917 spin_lock_irq(&sh_dmae_lock
);
918 list_del_rcu(&shdev
->node
);
919 spin_unlock_irq(&sh_dmae_lock
);
921 pm_runtime_disable(&pdev
->dev
);
923 sh_dmae_chan_remove(shdev
);
924 shdma_cleanup(&shdev
->shdma_dev
);
931 static struct platform_driver sh_dmae_driver
= {
934 .name
= SH_DMAE_DRV_NAME
,
935 .of_match_table
= sh_dmae_of_match
,
937 .remove
= sh_dmae_remove
,
940 static int __init
sh_dmae_init(void)
942 /* Wire up NMI handling */
943 int err
= register_die_notifier(&sh_dmae_nmi_notifier
);
947 return platform_driver_probe(&sh_dmae_driver
, sh_dmae_probe
);
949 module_init(sh_dmae_init
);
951 static void __exit
sh_dmae_exit(void)
953 platform_driver_unregister(&sh_dmae_driver
);
955 unregister_die_notifier(&sh_dmae_nmi_notifier
);
957 module_exit(sh_dmae_exit
);
959 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
960 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
961 MODULE_LICENSE("GPL");
962 MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME
);