2 * Copyright (C) 2011-2013 Renesas Electronics Corporation
3 * Copyright (C) 2013 Cogent Embedded, Inc.
5 * This file is based on the drivers/dma/sh/shdma.c
7 * Renesas SuperH DMA Engine support
9 * This is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * - DMA of SuperH does not have Hardware DMA chain mode.
15 * - max DMA size is 16MB.
19 #include <linux/dmaengine.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/platform_data/dma-rcar-hpbdma.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/shdma-base.h>
28 #include <linux/slab.h>
30 /* DMA channel registers */
31 #define HPB_DMAE_DSAR0 0x00
32 #define HPB_DMAE_DDAR0 0x04
33 #define HPB_DMAE_DTCR0 0x08
34 #define HPB_DMAE_DSAR1 0x0C
35 #define HPB_DMAE_DDAR1 0x10
36 #define HPB_DMAE_DTCR1 0x14
37 #define HPB_DMAE_DSASR 0x18
38 #define HPB_DMAE_DDASR 0x1C
39 #define HPB_DMAE_DTCSR 0x20
40 #define HPB_DMAE_DPTR 0x24
41 #define HPB_DMAE_DCR 0x28
42 #define HPB_DMAE_DCMDR 0x2C
43 #define HPB_DMAE_DSTPR 0x30
44 #define HPB_DMAE_DSTSR 0x34
45 #define HPB_DMAE_DDBGR 0x38
46 #define HPB_DMAE_DDBGR2 0x3C
47 #define HPB_DMAE_CHAN(n) (0x40 * (n))
49 /* DMA command register (DCMDR) bits */
50 #define HPB_DMAE_DCMDR_BDOUT BIT(7)
51 #define HPB_DMAE_DCMDR_DQSPD BIT(6)
52 #define HPB_DMAE_DCMDR_DQSPC BIT(5)
53 #define HPB_DMAE_DCMDR_DMSPD BIT(4)
54 #define HPB_DMAE_DCMDR_DMSPC BIT(3)
55 #define HPB_DMAE_DCMDR_DQEND BIT(2)
56 #define HPB_DMAE_DCMDR_DNXT BIT(1)
57 #define HPB_DMAE_DCMDR_DMEN BIT(0)
59 /* DMA forced stop register (DSTPR) bits */
60 #define HPB_DMAE_DSTPR_DMSTP BIT(0)
62 /* DMA status register (DSTSR) bits */
63 #define HPB_DMAE_DSTSR_DQSTS BIT(2)
64 #define HPB_DMAE_DSTSR_DMSTS BIT(0)
66 /* DMA common registers */
67 #define HPB_DMAE_DTIMR 0x00
68 #define HPB_DMAE_DINTSR0 0x0C
69 #define HPB_DMAE_DINTSR1 0x10
70 #define HPB_DMAE_DINTCR0 0x14
71 #define HPB_DMAE_DINTCR1 0x18
72 #define HPB_DMAE_DINTMR0 0x1C
73 #define HPB_DMAE_DINTMR1 0x20
74 #define HPB_DMAE_DACTSR0 0x24
75 #define HPB_DMAE_DACTSR1 0x28
76 #define HPB_DMAE_HSRSTR(n) (0x40 + (n) * 4)
77 #define HPB_DMAE_HPB_DMASPR(n) (0x140 + (n) * 4)
78 #define HPB_DMAE_HPB_DMLVLR0 0x160
79 #define HPB_DMAE_HPB_DMLVLR1 0x164
80 #define HPB_DMAE_HPB_DMSHPT0 0x168
81 #define HPB_DMAE_HPB_DMSHPT1 0x16C
83 #define HPB_DMA_SLAVE_NUMBER 256
84 #define HPB_DMA_TCR_MAX 0x01000000 /* 16 MiB */
86 struct hpb_dmae_chan
{
87 struct shdma_chan shdma_chan
;
88 int xfer_mode
; /* DMA transfer mode */
91 unsigned plane_idx
; /* current DMA information set */
92 bool first_desc
; /* first/next transfer */
93 int xmit_shift
; /* log_2(bytes_per_xfer) */
95 const struct hpb_dmae_slave_config
*cfg
;
96 char dev_id
[16]; /* unique name per DMAC of channel */
97 dma_addr_t slave_addr
;
100 struct hpb_dmae_device
{
101 struct shdma_dev shdma_dev
;
102 spinlock_t reg_lock
; /* comm_reg operation lock */
103 struct hpb_dmae_pdata
*pdata
;
104 void __iomem
*chan_reg
;
105 void __iomem
*comm_reg
;
106 void __iomem
*reset_reg
;
107 void __iomem
*mode_reg
;
110 struct hpb_dmae_regs
{
111 u32 sar
; /* SAR / source address */
112 u32 dar
; /* DAR / destination address */
113 u32 tcr
; /* TCR / transfer count */
117 struct shdma_desc shdma_desc
;
118 struct hpb_dmae_regs hw
;
122 #define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan)
123 #define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc)
124 #define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \
125 struct hpb_dmae_device, shdma_dev.dma_dev)
127 static void ch_reg_write(struct hpb_dmae_chan
*hpb_dc
, u32 data
, u32 reg
)
129 iowrite32(data
, hpb_dc
->base
+ reg
);
132 static u32
ch_reg_read(struct hpb_dmae_chan
*hpb_dc
, u32 reg
)
134 return ioread32(hpb_dc
->base
+ reg
);
137 static void dcmdr_write(struct hpb_dmae_device
*hpbdev
, u32 data
)
139 iowrite32(data
, hpbdev
->chan_reg
+ HPB_DMAE_DCMDR
);
142 static void hsrstr_write(struct hpb_dmae_device
*hpbdev
, u32 ch
)
144 iowrite32(0x1, hpbdev
->comm_reg
+ HPB_DMAE_HSRSTR(ch
));
147 static u32
dintsr_read(struct hpb_dmae_device
*hpbdev
, u32 ch
)
152 v
= ioread32(hpbdev
->comm_reg
+ HPB_DMAE_DINTSR0
) >> ch
;
154 v
= ioread32(hpbdev
->comm_reg
+ HPB_DMAE_DINTSR1
) >> (ch
- 32);
158 static void dintcr_write(struct hpb_dmae_device
*hpbdev
, u32 ch
)
161 iowrite32((0x1 << ch
), hpbdev
->comm_reg
+ HPB_DMAE_DINTCR0
);
163 iowrite32((0x1 << (ch
- 32)),
164 hpbdev
->comm_reg
+ HPB_DMAE_DINTCR1
);
167 static void asyncmdr_write(struct hpb_dmae_device
*hpbdev
, u32 data
)
169 iowrite32(data
, hpbdev
->mode_reg
);
172 static u32
asyncmdr_read(struct hpb_dmae_device
*hpbdev
)
174 return ioread32(hpbdev
->mode_reg
);
177 static void hpb_dmae_enable_int(struct hpb_dmae_device
*hpbdev
, u32 ch
)
181 spin_lock_irq(&hpbdev
->reg_lock
);
183 intreg
= ioread32(hpbdev
->comm_reg
+ HPB_DMAE_DINTMR0
);
184 iowrite32(BIT(ch
) | intreg
,
185 hpbdev
->comm_reg
+ HPB_DMAE_DINTMR0
);
187 intreg
= ioread32(hpbdev
->comm_reg
+ HPB_DMAE_DINTMR1
);
188 iowrite32(BIT(ch
- 32) | intreg
,
189 hpbdev
->comm_reg
+ HPB_DMAE_DINTMR1
);
191 spin_unlock_irq(&hpbdev
->reg_lock
);
194 static void hpb_dmae_async_reset(struct hpb_dmae_device
*hpbdev
, u32 data
)
197 int timeout
= 10000; /* 100 ms */
199 spin_lock(&hpbdev
->reg_lock
);
200 rstr
= ioread32(hpbdev
->reset_reg
);
202 iowrite32(rstr
, hpbdev
->reset_reg
);
204 rstr
= ioread32(hpbdev
->reset_reg
);
205 if ((rstr
& data
) == data
)
211 dev_err(hpbdev
->shdma_dev
.dma_dev
.dev
,
212 "%s timeout\n", __func__
);
215 iowrite32(rstr
, hpbdev
->reset_reg
);
216 spin_unlock(&hpbdev
->reg_lock
);
219 static void hpb_dmae_set_async_mode(struct hpb_dmae_device
*hpbdev
,
224 spin_lock_irq(&hpbdev
->reg_lock
);
225 mode
= asyncmdr_read(hpbdev
);
228 asyncmdr_write(hpbdev
, mode
);
229 spin_unlock_irq(&hpbdev
->reg_lock
);
232 static void hpb_dmae_ctl_stop(struct hpb_dmae_device
*hpbdev
)
234 dcmdr_write(hpbdev
, HPB_DMAE_DCMDR_DQSPD
);
237 static void hpb_dmae_reset(struct hpb_dmae_device
*hpbdev
)
241 for (ch
= 0; ch
< hpbdev
->pdata
->num_hw_channels
; ch
++)
242 hsrstr_write(hpbdev
, ch
);
245 static unsigned int calc_xmit_shift(struct hpb_dmae_chan
*hpb_chan
)
247 struct hpb_dmae_device
*hpbdev
= to_dev(hpb_chan
);
248 struct hpb_dmae_pdata
*pdata
= hpbdev
->pdata
;
249 int width
= ch_reg_read(hpb_chan
, HPB_DMAE_DCR
);
252 switch (width
& (HPB_DMAE_DCR_SPDS_MASK
| HPB_DMAE_DCR_DPDS_MASK
)) {
253 case HPB_DMAE_DCR_SPDS_8BIT
| HPB_DMAE_DCR_DPDS_8BIT
:
257 case HPB_DMAE_DCR_SPDS_16BIT
| HPB_DMAE_DCR_DPDS_16BIT
:
260 case HPB_DMAE_DCR_SPDS_32BIT
| HPB_DMAE_DCR_DPDS_32BIT
:
264 return pdata
->ts_shift
[i
];
267 static void hpb_dmae_set_reg(struct hpb_dmae_chan
*hpb_chan
,
268 struct hpb_dmae_regs
*hw
, unsigned plane
)
270 ch_reg_write(hpb_chan
, hw
->sar
,
271 plane
? HPB_DMAE_DSAR1
: HPB_DMAE_DSAR0
);
272 ch_reg_write(hpb_chan
, hw
->dar
,
273 plane
? HPB_DMAE_DDAR1
: HPB_DMAE_DDAR0
);
274 ch_reg_write(hpb_chan
, hw
->tcr
>> hpb_chan
->xmit_shift
,
275 plane
? HPB_DMAE_DTCR1
: HPB_DMAE_DTCR0
);
278 static void hpb_dmae_start(struct hpb_dmae_chan
*hpb_chan
, bool next
)
280 ch_reg_write(hpb_chan
, (next
? HPB_DMAE_DCMDR_DNXT
: 0) |
281 HPB_DMAE_DCMDR_DMEN
, HPB_DMAE_DCMDR
);
284 static void hpb_dmae_halt(struct shdma_chan
*schan
)
286 struct hpb_dmae_chan
*chan
= to_chan(schan
);
288 ch_reg_write(chan
, HPB_DMAE_DCMDR_DQEND
, HPB_DMAE_DCMDR
);
289 ch_reg_write(chan
, HPB_DMAE_DSTPR_DMSTP
, HPB_DMAE_DSTPR
);
292 chan
->first_desc
= true;
295 static const struct hpb_dmae_slave_config
*
296 hpb_dmae_find_slave(struct hpb_dmae_chan
*hpb_chan
, int slave_id
)
298 struct hpb_dmae_device
*hpbdev
= to_dev(hpb_chan
);
299 struct hpb_dmae_pdata
*pdata
= hpbdev
->pdata
;
302 if (slave_id
>= HPB_DMA_SLAVE_NUMBER
)
305 for (i
= 0; i
< pdata
->num_slaves
; i
++)
306 if (pdata
->slaves
[i
].id
== slave_id
)
307 return pdata
->slaves
+ i
;
312 static void hpb_dmae_start_xfer(struct shdma_chan
*schan
,
313 struct shdma_desc
*sdesc
)
315 struct hpb_dmae_chan
*chan
= to_chan(schan
);
316 struct hpb_dmae_device
*hpbdev
= to_dev(chan
);
317 struct hpb_desc
*desc
= to_desc(sdesc
);
319 if (chan
->cfg
->flags
& HPB_DMAE_SET_ASYNC_RESET
)
320 hpb_dmae_async_reset(hpbdev
, chan
->cfg
->rstr
);
322 desc
->plane_idx
= chan
->plane_idx
;
323 hpb_dmae_set_reg(chan
, &desc
->hw
, chan
->plane_idx
);
324 hpb_dmae_start(chan
, !chan
->first_desc
);
326 if (chan
->xfer_mode
== XFER_DOUBLE
) {
327 chan
->plane_idx
^= 1;
328 chan
->first_desc
= false;
332 static bool hpb_dmae_desc_completed(struct shdma_chan
*schan
,
333 struct shdma_desc
*sdesc
)
336 * This is correct since we always have at most single
337 * outstanding DMA transfer per channel, and by the time
338 * we get completion interrupt the transfer is completed.
339 * This will change if we ever use alternating DMA
340 * information sets and submit two descriptors at once.
345 static bool hpb_dmae_chan_irq(struct shdma_chan
*schan
, int irq
)
347 struct hpb_dmae_chan
*chan
= to_chan(schan
);
348 struct hpb_dmae_device
*hpbdev
= to_dev(chan
);
349 int ch
= chan
->cfg
->dma_ch
;
351 /* Check Complete DMA Transfer */
352 if (dintsr_read(hpbdev
, ch
)) {
353 /* Clear Interrupt status */
354 dintcr_write(hpbdev
, ch
);
360 static int hpb_dmae_desc_setup(struct shdma_chan
*schan
,
361 struct shdma_desc
*sdesc
,
362 dma_addr_t src
, dma_addr_t dst
, size_t *len
)
364 struct hpb_desc
*desc
= to_desc(sdesc
);
366 if (*len
> (size_t)HPB_DMA_TCR_MAX
)
367 *len
= (size_t)HPB_DMA_TCR_MAX
;
376 static size_t hpb_dmae_get_partial(struct shdma_chan
*schan
,
377 struct shdma_desc
*sdesc
)
379 struct hpb_desc
*desc
= to_desc(sdesc
);
380 struct hpb_dmae_chan
*chan
= to_chan(schan
);
381 u32 tcr
= ch_reg_read(chan
, desc
->plane_idx
?
382 HPB_DMAE_DTCR1
: HPB_DMAE_DTCR0
);
384 return (desc
->hw
.tcr
- tcr
) << chan
->xmit_shift
;
387 static bool hpb_dmae_channel_busy(struct shdma_chan
*schan
)
389 struct hpb_dmae_chan
*chan
= to_chan(schan
);
390 u32 dstsr
= ch_reg_read(chan
, HPB_DMAE_DSTSR
);
392 if (chan
->xfer_mode
== XFER_DOUBLE
)
393 return dstsr
& HPB_DMAE_DSTSR_DQSTS
;
395 return dstsr
& HPB_DMAE_DSTSR_DMSTS
;
399 hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan
*hpb_chan
,
400 const struct hpb_dmae_slave_config
*cfg
)
402 struct hpb_dmae_device
*hpbdev
= to_dev(hpb_chan
);
403 struct hpb_dmae_pdata
*pdata
= hpbdev
->pdata
;
404 const struct hpb_dmae_channel
*channel
= pdata
->channels
;
405 int slave_id
= cfg
->id
;
408 for (i
= 0; i
< pdata
->num_channels
; i
++, channel
++) {
409 if (channel
->s_id
== slave_id
) {
410 struct device
*dev
= hpb_chan
->shdma_chan
.dev
;
412 hpb_chan
->base
= hpbdev
->chan_reg
+
413 HPB_DMAE_CHAN(cfg
->dma_ch
);
415 dev_dbg(dev
, "Detected Slave device\n");
416 dev_dbg(dev
, " -- slave_id : 0x%x\n", slave_id
);
417 dev_dbg(dev
, " -- cfg->dma_ch : %d\n", cfg
->dma_ch
);
418 dev_dbg(dev
, " -- channel->ch_irq: %d\n",
424 err
= shdma_request_irq(&hpb_chan
->shdma_chan
, channel
->ch_irq
,
425 IRQF_SHARED
, hpb_chan
->dev_id
);
427 dev_err(hpb_chan
->shdma_chan
.dev
,
428 "DMA channel request_irq %d failed with error %d\n",
429 channel
->ch_irq
, err
);
433 hpb_chan
->plane_idx
= 0;
434 hpb_chan
->first_desc
= true;
436 if ((cfg
->dcr
& (HPB_DMAE_DCR_CT
| HPB_DMAE_DCR_DIP
)) == 0) {
437 hpb_chan
->xfer_mode
= XFER_SINGLE
;
438 } else if ((cfg
->dcr
& (HPB_DMAE_DCR_CT
| HPB_DMAE_DCR_DIP
)) ==
439 (HPB_DMAE_DCR_CT
| HPB_DMAE_DCR_DIP
)) {
440 hpb_chan
->xfer_mode
= XFER_DOUBLE
;
442 dev_err(hpb_chan
->shdma_chan
.dev
, "DCR setting error");
446 if (cfg
->flags
& HPB_DMAE_SET_ASYNC_MODE
)
447 hpb_dmae_set_async_mode(hpbdev
, cfg
->mdm
, cfg
->mdr
);
448 ch_reg_write(hpb_chan
, cfg
->dcr
, HPB_DMAE_DCR
);
449 ch_reg_write(hpb_chan
, cfg
->port
, HPB_DMAE_DPTR
);
450 hpb_chan
->xmit_shift
= calc_xmit_shift(hpb_chan
);
451 hpb_dmae_enable_int(hpbdev
, cfg
->dma_ch
);
456 static int hpb_dmae_set_slave(struct shdma_chan
*schan
, int slave_id
,
457 dma_addr_t slave_addr
, bool try)
459 struct hpb_dmae_chan
*chan
= to_chan(schan
);
460 const struct hpb_dmae_slave_config
*sc
=
461 hpb_dmae_find_slave(chan
, slave_id
);
468 chan
->slave_addr
= slave_addr
? : sc
->addr
;
469 return hpb_dmae_alloc_chan_resources(chan
, sc
);
472 static void hpb_dmae_setup_xfer(struct shdma_chan
*schan
, int slave_id
)
476 static dma_addr_t
hpb_dmae_slave_addr(struct shdma_chan
*schan
)
478 struct hpb_dmae_chan
*chan
= to_chan(schan
);
480 return chan
->slave_addr
;
483 static struct shdma_desc
*hpb_dmae_embedded_desc(void *buf
, int i
)
485 return &((struct hpb_desc
*)buf
)[i
].shdma_desc
;
488 static const struct shdma_ops hpb_dmae_ops
= {
489 .desc_completed
= hpb_dmae_desc_completed
,
490 .halt_channel
= hpb_dmae_halt
,
491 .channel_busy
= hpb_dmae_channel_busy
,
492 .slave_addr
= hpb_dmae_slave_addr
,
493 .desc_setup
= hpb_dmae_desc_setup
,
494 .set_slave
= hpb_dmae_set_slave
,
495 .setup_xfer
= hpb_dmae_setup_xfer
,
496 .start_xfer
= hpb_dmae_start_xfer
,
497 .embedded_desc
= hpb_dmae_embedded_desc
,
498 .chan_irq
= hpb_dmae_chan_irq
,
499 .get_partial
= hpb_dmae_get_partial
,
502 static int hpb_dmae_chan_probe(struct hpb_dmae_device
*hpbdev
, int id
)
504 struct shdma_dev
*sdev
= &hpbdev
->shdma_dev
;
505 struct platform_device
*pdev
=
506 to_platform_device(hpbdev
->shdma_dev
.dma_dev
.dev
);
507 struct hpb_dmae_chan
*new_hpb_chan
;
508 struct shdma_chan
*schan
;
511 new_hpb_chan
= devm_kzalloc(&pdev
->dev
,
512 sizeof(struct hpb_dmae_chan
), GFP_KERNEL
);
514 dev_err(hpbdev
->shdma_dev
.dma_dev
.dev
,
515 "No free memory for allocating DMA channels!\n");
519 schan
= &new_hpb_chan
->shdma_chan
;
520 schan
->max_xfer_len
= HPB_DMA_TCR_MAX
;
522 shdma_chan_probe(sdev
, schan
, id
);
525 snprintf(new_hpb_chan
->dev_id
, sizeof(new_hpb_chan
->dev_id
),
526 "hpb-dmae%d.%d", pdev
->id
, id
);
528 snprintf(new_hpb_chan
->dev_id
, sizeof(new_hpb_chan
->dev_id
),
534 static int hpb_dmae_probe(struct platform_device
*pdev
)
536 struct hpb_dmae_pdata
*pdata
= pdev
->dev
.platform_data
;
537 struct hpb_dmae_device
*hpbdev
;
538 struct dma_device
*dma_dev
;
539 struct resource
*chan
, *comm
, *rest
, *mode
, *irq_res
;
542 /* Get platform data */
543 if (!pdata
|| !pdata
->num_channels
)
546 chan
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
547 comm
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
548 rest
= platform_get_resource(pdev
, IORESOURCE_MEM
, 2);
549 mode
= platform_get_resource(pdev
, IORESOURCE_MEM
, 3);
551 irq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
555 hpbdev
= devm_kzalloc(&pdev
->dev
, sizeof(struct hpb_dmae_device
),
558 dev_err(&pdev
->dev
, "Not enough memory\n");
562 hpbdev
->chan_reg
= devm_ioremap_resource(&pdev
->dev
, chan
);
563 if (IS_ERR(hpbdev
->chan_reg
))
564 return PTR_ERR(hpbdev
->chan_reg
);
566 hpbdev
->comm_reg
= devm_ioremap_resource(&pdev
->dev
, comm
);
567 if (IS_ERR(hpbdev
->comm_reg
))
568 return PTR_ERR(hpbdev
->comm_reg
);
570 hpbdev
->reset_reg
= devm_ioremap_resource(&pdev
->dev
, rest
);
571 if (IS_ERR(hpbdev
->reset_reg
))
572 return PTR_ERR(hpbdev
->reset_reg
);
574 hpbdev
->mode_reg
= devm_ioremap_resource(&pdev
->dev
, mode
);
575 if (IS_ERR(hpbdev
->mode_reg
))
576 return PTR_ERR(hpbdev
->mode_reg
);
578 dma_dev
= &hpbdev
->shdma_dev
.dma_dev
;
580 spin_lock_init(&hpbdev
->reg_lock
);
583 hpbdev
->pdata
= pdata
;
585 pm_runtime_enable(&pdev
->dev
);
586 err
= pm_runtime_get_sync(&pdev
->dev
);
588 dev_err(&pdev
->dev
, "%s(): GET = %d\n", __func__
, err
);
590 /* Reset DMA controller */
591 hpb_dmae_reset(hpbdev
);
593 pm_runtime_put(&pdev
->dev
);
595 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
596 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
598 hpbdev
->shdma_dev
.ops
= &hpb_dmae_ops
;
599 hpbdev
->shdma_dev
.desc_size
= sizeof(struct hpb_desc
);
600 err
= shdma_init(&pdev
->dev
, &hpbdev
->shdma_dev
, pdata
->num_channels
);
604 /* Create DMA channels */
605 for (i
= 0; i
< pdata
->num_channels
; i
++)
606 hpb_dmae_chan_probe(hpbdev
, i
);
608 platform_set_drvdata(pdev
, hpbdev
);
609 err
= dma_async_device_register(dma_dev
);
613 shdma_cleanup(&hpbdev
->shdma_dev
);
615 pm_runtime_disable(&pdev
->dev
);
619 static void hpb_dmae_chan_remove(struct hpb_dmae_device
*hpbdev
)
621 struct dma_device
*dma_dev
= &hpbdev
->shdma_dev
.dma_dev
;
622 struct shdma_chan
*schan
;
625 shdma_for_each_chan(schan
, &hpbdev
->shdma_dev
, i
) {
628 shdma_chan_remove(schan
);
630 dma_dev
->chancnt
= 0;
633 static int hpb_dmae_remove(struct platform_device
*pdev
)
635 struct hpb_dmae_device
*hpbdev
= platform_get_drvdata(pdev
);
637 dma_async_device_unregister(&hpbdev
->shdma_dev
.dma_dev
);
639 pm_runtime_disable(&pdev
->dev
);
641 hpb_dmae_chan_remove(hpbdev
);
646 static void hpb_dmae_shutdown(struct platform_device
*pdev
)
648 struct hpb_dmae_device
*hpbdev
= platform_get_drvdata(pdev
);
649 hpb_dmae_ctl_stop(hpbdev
);
652 static struct platform_driver hpb_dmae_driver
= {
653 .probe
= hpb_dmae_probe
,
654 .remove
= hpb_dmae_remove
,
655 .shutdown
= hpb_dmae_shutdown
,
657 .owner
= THIS_MODULE
,
658 .name
= "hpb-dma-engine",
661 module_platform_driver(hpb_dmae_driver
);
663 MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>");
664 MODULE_DESCRIPTION("Renesas HPB DMA Engine driver");
665 MODULE_LICENSE("GPL");