2 * Copyright (C) 2011-2013 Renesas Electronics Corporation
3 * Copyright (C) 2013 Cogent Embedded, Inc.
5 * This file is based on the drivers/dma/sh/shdma.c
7 * Renesas SuperH DMA Engine support
9 * This is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * - DMA of SuperH does not have Hardware DMA chain mode.
15 * - max DMA size is 16MB.
19 #include <linux/dmaengine.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/platform_data/dma-rcar-hpbdma.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/shdma-base.h>
28 #include <linux/slab.h>
30 /* DMA channel registers */
31 #define HPB_DMAE_DSAR0 0x00
32 #define HPB_DMAE_DDAR0 0x04
33 #define HPB_DMAE_DTCR0 0x08
34 #define HPB_DMAE_DSAR1 0x0C
35 #define HPB_DMAE_DDAR1 0x10
36 #define HPB_DMAE_DTCR1 0x14
37 #define HPB_DMAE_DSASR 0x18
38 #define HPB_DMAE_DDASR 0x1C
39 #define HPB_DMAE_DTCSR 0x20
40 #define HPB_DMAE_DPTR 0x24
41 #define HPB_DMAE_DCR 0x28
42 #define HPB_DMAE_DCMDR 0x2C
43 #define HPB_DMAE_DSTPR 0x30
44 #define HPB_DMAE_DSTSR 0x34
45 #define HPB_DMAE_DDBGR 0x38
46 #define HPB_DMAE_DDBGR2 0x3C
47 #define HPB_DMAE_CHAN(n) (0x40 * (n))
49 /* DMA command register (DCMDR) bits */
50 #define HPB_DMAE_DCMDR_BDOUT BIT(7)
51 #define HPB_DMAE_DCMDR_DQSPD BIT(6)
52 #define HPB_DMAE_DCMDR_DQSPC BIT(5)
53 #define HPB_DMAE_DCMDR_DMSPD BIT(4)
54 #define HPB_DMAE_DCMDR_DMSPC BIT(3)
55 #define HPB_DMAE_DCMDR_DQEND BIT(2)
56 #define HPB_DMAE_DCMDR_DNXT BIT(1)
57 #define HPB_DMAE_DCMDR_DMEN BIT(0)
59 /* DMA forced stop register (DSTPR) bits */
60 #define HPB_DMAE_DSTPR_DMSTP BIT(0)
62 /* DMA status register (DSTSR) bits */
63 #define HPB_DMAE_DSTSR_DMSTS BIT(0)
65 /* DMA common registers */
66 #define HPB_DMAE_DTIMR 0x00
67 #define HPB_DMAE_DINTSR0 0x0C
68 #define HPB_DMAE_DINTSR1 0x10
69 #define HPB_DMAE_DINTCR0 0x14
70 #define HPB_DMAE_DINTCR1 0x18
71 #define HPB_DMAE_DINTMR0 0x1C
72 #define HPB_DMAE_DINTMR1 0x20
73 #define HPB_DMAE_DACTSR0 0x24
74 #define HPB_DMAE_DACTSR1 0x28
75 #define HPB_DMAE_HSRSTR(n) (0x40 + (n) * 4)
76 #define HPB_DMAE_HPB_DMASPR(n) (0x140 + (n) * 4)
77 #define HPB_DMAE_HPB_DMLVLR0 0x160
78 #define HPB_DMAE_HPB_DMLVLR1 0x164
79 #define HPB_DMAE_HPB_DMSHPT0 0x168
80 #define HPB_DMAE_HPB_DMSHPT1 0x16C
82 #define HPB_DMA_SLAVE_NUMBER 256
83 #define HPB_DMA_TCR_MAX 0x01000000 /* 16 MiB */
85 struct hpb_dmae_chan
{
86 struct shdma_chan shdma_chan
;
87 int xfer_mode
; /* DMA transfer mode */
90 unsigned plane_idx
; /* current DMA information set */
91 bool first_desc
; /* first/next transfer */
92 int xmit_shift
; /* log_2(bytes_per_xfer) */
94 const struct hpb_dmae_slave_config
*cfg
;
95 char dev_id
[16]; /* unique name per DMAC of channel */
96 dma_addr_t slave_addr
;
99 struct hpb_dmae_device
{
100 struct shdma_dev shdma_dev
;
101 spinlock_t reg_lock
; /* comm_reg operation lock */
102 struct hpb_dmae_pdata
*pdata
;
103 void __iomem
*chan_reg
;
104 void __iomem
*comm_reg
;
105 void __iomem
*reset_reg
;
106 void __iomem
*mode_reg
;
109 struct hpb_dmae_regs
{
110 u32 sar
; /* SAR / source address */
111 u32 dar
; /* DAR / destination address */
112 u32 tcr
; /* TCR / transfer count */
116 struct shdma_desc shdma_desc
;
117 struct hpb_dmae_regs hw
;
121 #define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan)
122 #define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc)
123 #define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \
124 struct hpb_dmae_device, shdma_dev.dma_dev)
126 static void ch_reg_write(struct hpb_dmae_chan
*hpb_dc
, u32 data
, u32 reg
)
128 iowrite32(data
, hpb_dc
->base
+ reg
);
131 static u32
ch_reg_read(struct hpb_dmae_chan
*hpb_dc
, u32 reg
)
133 return ioread32(hpb_dc
->base
+ reg
);
136 static void dcmdr_write(struct hpb_dmae_device
*hpbdev
, u32 data
)
138 iowrite32(data
, hpbdev
->chan_reg
+ HPB_DMAE_DCMDR
);
141 static void hsrstr_write(struct hpb_dmae_device
*hpbdev
, u32 ch
)
143 iowrite32(0x1, hpbdev
->comm_reg
+ HPB_DMAE_HSRSTR(ch
));
146 static u32
dintsr_read(struct hpb_dmae_device
*hpbdev
, u32 ch
)
151 v
= ioread32(hpbdev
->comm_reg
+ HPB_DMAE_DINTSR0
) >> ch
;
153 v
= ioread32(hpbdev
->comm_reg
+ HPB_DMAE_DINTSR1
) >> (ch
- 32);
157 static void dintcr_write(struct hpb_dmae_device
*hpbdev
, u32 ch
)
160 iowrite32((0x1 << ch
), hpbdev
->comm_reg
+ HPB_DMAE_DINTCR0
);
162 iowrite32((0x1 << (ch
- 32)),
163 hpbdev
->comm_reg
+ HPB_DMAE_DINTCR1
);
166 static void asyncmdr_write(struct hpb_dmae_device
*hpbdev
, u32 data
)
168 iowrite32(data
, hpbdev
->mode_reg
);
171 static u32
asyncmdr_read(struct hpb_dmae_device
*hpbdev
)
173 return ioread32(hpbdev
->mode_reg
);
176 static void hpb_dmae_enable_int(struct hpb_dmae_device
*hpbdev
, u32 ch
)
180 spin_lock_irq(&hpbdev
->reg_lock
);
182 intreg
= ioread32(hpbdev
->comm_reg
+ HPB_DMAE_DINTMR0
);
183 iowrite32(BIT(ch
) | intreg
,
184 hpbdev
->comm_reg
+ HPB_DMAE_DINTMR0
);
186 intreg
= ioread32(hpbdev
->comm_reg
+ HPB_DMAE_DINTMR1
);
187 iowrite32(BIT(ch
- 32) | intreg
,
188 hpbdev
->comm_reg
+ HPB_DMAE_DINTMR1
);
190 spin_unlock_irq(&hpbdev
->reg_lock
);
193 static void hpb_dmae_async_reset(struct hpb_dmae_device
*hpbdev
, u32 data
)
196 int timeout
= 10000; /* 100 ms */
198 spin_lock(&hpbdev
->reg_lock
);
199 rstr
= ioread32(hpbdev
->reset_reg
);
201 iowrite32(rstr
, hpbdev
->reset_reg
);
203 rstr
= ioread32(hpbdev
->reset_reg
);
204 if ((rstr
& data
) == data
)
210 dev_err(hpbdev
->shdma_dev
.dma_dev
.dev
,
211 "%s timeout\n", __func__
);
214 iowrite32(rstr
, hpbdev
->reset_reg
);
215 spin_unlock(&hpbdev
->reg_lock
);
218 static void hpb_dmae_set_async_mode(struct hpb_dmae_device
*hpbdev
,
223 spin_lock_irq(&hpbdev
->reg_lock
);
224 mode
= asyncmdr_read(hpbdev
);
227 asyncmdr_write(hpbdev
, mode
);
228 spin_unlock_irq(&hpbdev
->reg_lock
);
231 static void hpb_dmae_ctl_stop(struct hpb_dmae_device
*hpbdev
)
233 dcmdr_write(hpbdev
, HPB_DMAE_DCMDR_DQSPD
);
236 static void hpb_dmae_reset(struct hpb_dmae_device
*hpbdev
)
240 for (ch
= 0; ch
< hpbdev
->pdata
->num_hw_channels
; ch
++)
241 hsrstr_write(hpbdev
, ch
);
244 static unsigned int calc_xmit_shift(struct hpb_dmae_chan
*hpb_chan
)
246 struct hpb_dmae_device
*hpbdev
= to_dev(hpb_chan
);
247 struct hpb_dmae_pdata
*pdata
= hpbdev
->pdata
;
248 int width
= ch_reg_read(hpb_chan
, HPB_DMAE_DCR
);
251 switch (width
& (HPB_DMAE_DCR_SPDS_MASK
| HPB_DMAE_DCR_DPDS_MASK
)) {
252 case HPB_DMAE_DCR_SPDS_8BIT
| HPB_DMAE_DCR_DPDS_8BIT
:
256 case HPB_DMAE_DCR_SPDS_16BIT
| HPB_DMAE_DCR_DPDS_16BIT
:
259 case HPB_DMAE_DCR_SPDS_32BIT
| HPB_DMAE_DCR_DPDS_32BIT
:
263 return pdata
->ts_shift
[i
];
266 static void hpb_dmae_set_reg(struct hpb_dmae_chan
*hpb_chan
,
267 struct hpb_dmae_regs
*hw
, unsigned plane
)
269 ch_reg_write(hpb_chan
, hw
->sar
,
270 plane
? HPB_DMAE_DSAR1
: HPB_DMAE_DSAR0
);
271 ch_reg_write(hpb_chan
, hw
->dar
,
272 plane
? HPB_DMAE_DDAR1
: HPB_DMAE_DDAR0
);
273 ch_reg_write(hpb_chan
, hw
->tcr
>> hpb_chan
->xmit_shift
,
274 plane
? HPB_DMAE_DTCR1
: HPB_DMAE_DTCR0
);
277 static void hpb_dmae_start(struct hpb_dmae_chan
*hpb_chan
, bool next
)
279 ch_reg_write(hpb_chan
, (next
? HPB_DMAE_DCMDR_DNXT
: 0) |
280 HPB_DMAE_DCMDR_DMEN
, HPB_DMAE_DCMDR
);
283 static void hpb_dmae_halt(struct shdma_chan
*schan
)
285 struct hpb_dmae_chan
*chan
= to_chan(schan
);
287 ch_reg_write(chan
, HPB_DMAE_DCMDR_DQEND
, HPB_DMAE_DCMDR
);
288 ch_reg_write(chan
, HPB_DMAE_DSTPR_DMSTP
, HPB_DMAE_DSTPR
);
291 static const struct hpb_dmae_slave_config
*
292 hpb_dmae_find_slave(struct hpb_dmae_chan
*hpb_chan
, int slave_id
)
294 struct hpb_dmae_device
*hpbdev
= to_dev(hpb_chan
);
295 struct hpb_dmae_pdata
*pdata
= hpbdev
->pdata
;
298 if (slave_id
>= HPB_DMA_SLAVE_NUMBER
)
301 for (i
= 0; i
< pdata
->num_slaves
; i
++)
302 if (pdata
->slaves
[i
].id
== slave_id
)
303 return pdata
->slaves
+ i
;
308 static void hpb_dmae_start_xfer(struct shdma_chan
*schan
,
309 struct shdma_desc
*sdesc
)
311 struct hpb_dmae_chan
*chan
= to_chan(schan
);
312 struct hpb_dmae_device
*hpbdev
= to_dev(chan
);
313 struct hpb_desc
*desc
= to_desc(sdesc
);
315 if (chan
->cfg
->flags
& HPB_DMAE_SET_ASYNC_RESET
)
316 hpb_dmae_async_reset(hpbdev
, chan
->cfg
->rstr
);
318 desc
->plane_idx
= chan
->plane_idx
;
319 hpb_dmae_set_reg(chan
, &desc
->hw
, chan
->plane_idx
);
320 hpb_dmae_start(chan
, !chan
->first_desc
);
322 if (chan
->xfer_mode
== XFER_DOUBLE
) {
323 chan
->plane_idx
^= 1;
324 chan
->first_desc
= false;
328 static bool hpb_dmae_desc_completed(struct shdma_chan
*schan
,
329 struct shdma_desc
*sdesc
)
332 * This is correct since we always have at most single
333 * outstanding DMA transfer per channel, and by the time
334 * we get completion interrupt the transfer is completed.
335 * This will change if we ever use alternating DMA
336 * information sets and submit two descriptors at once.
341 static bool hpb_dmae_chan_irq(struct shdma_chan
*schan
, int irq
)
343 struct hpb_dmae_chan
*chan
= to_chan(schan
);
344 struct hpb_dmae_device
*hpbdev
= to_dev(chan
);
345 int ch
= chan
->cfg
->dma_ch
;
347 /* Check Complete DMA Transfer */
348 if (dintsr_read(hpbdev
, ch
)) {
349 /* Clear Interrupt status */
350 dintcr_write(hpbdev
, ch
);
356 static int hpb_dmae_desc_setup(struct shdma_chan
*schan
,
357 struct shdma_desc
*sdesc
,
358 dma_addr_t src
, dma_addr_t dst
, size_t *len
)
360 struct hpb_desc
*desc
= to_desc(sdesc
);
362 if (*len
> (size_t)HPB_DMA_TCR_MAX
)
363 *len
= (size_t)HPB_DMA_TCR_MAX
;
372 static size_t hpb_dmae_get_partial(struct shdma_chan
*schan
,
373 struct shdma_desc
*sdesc
)
375 struct hpb_desc
*desc
= to_desc(sdesc
);
376 struct hpb_dmae_chan
*chan
= to_chan(schan
);
377 u32 tcr
= ch_reg_read(chan
, desc
->plane_idx
?
378 HPB_DMAE_DTCR1
: HPB_DMAE_DTCR0
);
380 return (desc
->hw
.tcr
- tcr
) << chan
->xmit_shift
;
383 static bool hpb_dmae_channel_busy(struct shdma_chan
*schan
)
385 struct hpb_dmae_chan
*chan
= to_chan(schan
);
386 u32 dstsr
= ch_reg_read(chan
, HPB_DMAE_DSTSR
);
388 return (dstsr
& HPB_DMAE_DSTSR_DMSTS
) == HPB_DMAE_DSTSR_DMSTS
;
392 hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan
*hpb_chan
,
393 const struct hpb_dmae_slave_config
*cfg
)
395 struct hpb_dmae_device
*hpbdev
= to_dev(hpb_chan
);
396 struct hpb_dmae_pdata
*pdata
= hpbdev
->pdata
;
397 const struct hpb_dmae_channel
*channel
= pdata
->channels
;
398 int slave_id
= cfg
->id
;
401 for (i
= 0; i
< pdata
->num_channels
; i
++, channel
++) {
402 if (channel
->s_id
== slave_id
) {
403 struct device
*dev
= hpb_chan
->shdma_chan
.dev
;
405 hpb_chan
->base
= hpbdev
->chan_reg
+
406 HPB_DMAE_CHAN(cfg
->dma_ch
);
408 dev_dbg(dev
, "Detected Slave device\n");
409 dev_dbg(dev
, " -- slave_id : 0x%x\n", slave_id
);
410 dev_dbg(dev
, " -- cfg->dma_ch : %d\n", cfg
->dma_ch
);
411 dev_dbg(dev
, " -- channel->ch_irq: %d\n",
417 err
= shdma_request_irq(&hpb_chan
->shdma_chan
, channel
->ch_irq
,
418 IRQF_SHARED
, hpb_chan
->dev_id
);
420 dev_err(hpb_chan
->shdma_chan
.dev
,
421 "DMA channel request_irq %d failed with error %d\n",
422 channel
->ch_irq
, err
);
426 hpb_chan
->plane_idx
= 0;
427 hpb_chan
->first_desc
= true;
429 if ((cfg
->dcr
& (HPB_DMAE_DCR_CT
| HPB_DMAE_DCR_DIP
)) == 0) {
430 hpb_chan
->xfer_mode
= XFER_SINGLE
;
431 } else if ((cfg
->dcr
& (HPB_DMAE_DCR_CT
| HPB_DMAE_DCR_DIP
)) ==
432 (HPB_DMAE_DCR_CT
| HPB_DMAE_DCR_DIP
)) {
433 hpb_chan
->xfer_mode
= XFER_DOUBLE
;
435 dev_err(hpb_chan
->shdma_chan
.dev
, "DCR setting error");
439 if (cfg
->flags
& HPB_DMAE_SET_ASYNC_MODE
)
440 hpb_dmae_set_async_mode(hpbdev
, cfg
->mdm
, cfg
->mdr
);
441 ch_reg_write(hpb_chan
, cfg
->dcr
, HPB_DMAE_DCR
);
442 ch_reg_write(hpb_chan
, cfg
->port
, HPB_DMAE_DPTR
);
443 hpb_chan
->xmit_shift
= calc_xmit_shift(hpb_chan
);
444 hpb_dmae_enable_int(hpbdev
, cfg
->dma_ch
);
449 static int hpb_dmae_set_slave(struct shdma_chan
*schan
, int slave_id
,
450 dma_addr_t slave_addr
, bool try)
452 struct hpb_dmae_chan
*chan
= to_chan(schan
);
453 const struct hpb_dmae_slave_config
*sc
=
454 hpb_dmae_find_slave(chan
, slave_id
);
461 chan
->slave_addr
= slave_addr
? : sc
->addr
;
462 return hpb_dmae_alloc_chan_resources(chan
, sc
);
465 static void hpb_dmae_setup_xfer(struct shdma_chan
*schan
, int slave_id
)
469 static dma_addr_t
hpb_dmae_slave_addr(struct shdma_chan
*schan
)
471 struct hpb_dmae_chan
*chan
= to_chan(schan
);
473 return chan
->slave_addr
;
476 static struct shdma_desc
*hpb_dmae_embedded_desc(void *buf
, int i
)
478 return &((struct hpb_desc
*)buf
)[i
].shdma_desc
;
481 static const struct shdma_ops hpb_dmae_ops
= {
482 .desc_completed
= hpb_dmae_desc_completed
,
483 .halt_channel
= hpb_dmae_halt
,
484 .channel_busy
= hpb_dmae_channel_busy
,
485 .slave_addr
= hpb_dmae_slave_addr
,
486 .desc_setup
= hpb_dmae_desc_setup
,
487 .set_slave
= hpb_dmae_set_slave
,
488 .setup_xfer
= hpb_dmae_setup_xfer
,
489 .start_xfer
= hpb_dmae_start_xfer
,
490 .embedded_desc
= hpb_dmae_embedded_desc
,
491 .chan_irq
= hpb_dmae_chan_irq
,
492 .get_partial
= hpb_dmae_get_partial
,
495 static int hpb_dmae_chan_probe(struct hpb_dmae_device
*hpbdev
, int id
)
497 struct shdma_dev
*sdev
= &hpbdev
->shdma_dev
;
498 struct platform_device
*pdev
=
499 to_platform_device(hpbdev
->shdma_dev
.dma_dev
.dev
);
500 struct hpb_dmae_chan
*new_hpb_chan
;
501 struct shdma_chan
*schan
;
504 new_hpb_chan
= devm_kzalloc(&pdev
->dev
,
505 sizeof(struct hpb_dmae_chan
), GFP_KERNEL
);
507 dev_err(hpbdev
->shdma_dev
.dma_dev
.dev
,
508 "No free memory for allocating DMA channels!\n");
512 schan
= &new_hpb_chan
->shdma_chan
;
513 shdma_chan_probe(sdev
, schan
, id
);
516 snprintf(new_hpb_chan
->dev_id
, sizeof(new_hpb_chan
->dev_id
),
517 "hpb-dmae%d.%d", pdev
->id
, id
);
519 snprintf(new_hpb_chan
->dev_id
, sizeof(new_hpb_chan
->dev_id
),
525 static int hpb_dmae_probe(struct platform_device
*pdev
)
527 struct hpb_dmae_pdata
*pdata
= pdev
->dev
.platform_data
;
528 struct hpb_dmae_device
*hpbdev
;
529 struct dma_device
*dma_dev
;
530 struct resource
*chan
, *comm
, *rest
, *mode
, *irq_res
;
533 /* Get platform data */
534 if (!pdata
|| !pdata
->num_channels
)
537 chan
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
538 comm
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
539 rest
= platform_get_resource(pdev
, IORESOURCE_MEM
, 2);
540 mode
= platform_get_resource(pdev
, IORESOURCE_MEM
, 3);
542 irq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
546 hpbdev
= devm_kzalloc(&pdev
->dev
, sizeof(struct hpb_dmae_device
),
549 dev_err(&pdev
->dev
, "Not enough memory\n");
553 hpbdev
->chan_reg
= devm_ioremap_resource(&pdev
->dev
, chan
);
554 if (IS_ERR(hpbdev
->chan_reg
))
555 return PTR_ERR(hpbdev
->chan_reg
);
557 hpbdev
->comm_reg
= devm_ioremap_resource(&pdev
->dev
, comm
);
558 if (IS_ERR(hpbdev
->comm_reg
))
559 return PTR_ERR(hpbdev
->comm_reg
);
561 hpbdev
->reset_reg
= devm_ioremap_resource(&pdev
->dev
, rest
);
562 if (IS_ERR(hpbdev
->reset_reg
))
563 return PTR_ERR(hpbdev
->reset_reg
);
565 hpbdev
->mode_reg
= devm_ioremap_resource(&pdev
->dev
, mode
);
566 if (IS_ERR(hpbdev
->mode_reg
))
567 return PTR_ERR(hpbdev
->mode_reg
);
569 dma_dev
= &hpbdev
->shdma_dev
.dma_dev
;
571 spin_lock_init(&hpbdev
->reg_lock
);
574 hpbdev
->pdata
= pdata
;
576 pm_runtime_enable(&pdev
->dev
);
577 err
= pm_runtime_get_sync(&pdev
->dev
);
579 dev_err(&pdev
->dev
, "%s(): GET = %d\n", __func__
, err
);
581 /* Reset DMA controller */
582 hpb_dmae_reset(hpbdev
);
584 pm_runtime_put(&pdev
->dev
);
586 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
587 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
589 hpbdev
->shdma_dev
.ops
= &hpb_dmae_ops
;
590 hpbdev
->shdma_dev
.desc_size
= sizeof(struct hpb_desc
);
591 err
= shdma_init(&pdev
->dev
, &hpbdev
->shdma_dev
, pdata
->num_channels
);
595 /* Create DMA channels */
596 for (i
= 0; i
< pdata
->num_channels
; i
++)
597 hpb_dmae_chan_probe(hpbdev
, i
);
599 platform_set_drvdata(pdev
, hpbdev
);
600 err
= dma_async_device_register(dma_dev
);
604 shdma_cleanup(&hpbdev
->shdma_dev
);
606 pm_runtime_disable(&pdev
->dev
);
610 static void hpb_dmae_chan_remove(struct hpb_dmae_device
*hpbdev
)
612 struct dma_device
*dma_dev
= &hpbdev
->shdma_dev
.dma_dev
;
613 struct shdma_chan
*schan
;
616 shdma_for_each_chan(schan
, &hpbdev
->shdma_dev
, i
) {
619 shdma_chan_remove(schan
);
621 dma_dev
->chancnt
= 0;
624 static int hpb_dmae_remove(struct platform_device
*pdev
)
626 struct hpb_dmae_device
*hpbdev
= platform_get_drvdata(pdev
);
628 dma_async_device_unregister(&hpbdev
->shdma_dev
.dma_dev
);
630 pm_runtime_disable(&pdev
->dev
);
632 hpb_dmae_chan_remove(hpbdev
);
637 static void hpb_dmae_shutdown(struct platform_device
*pdev
)
639 struct hpb_dmae_device
*hpbdev
= platform_get_drvdata(pdev
);
640 hpb_dmae_ctl_stop(hpbdev
);
643 static struct platform_driver hpb_dmae_driver
= {
644 .probe
= hpb_dmae_probe
,
645 .remove
= hpb_dmae_remove
,
646 .shutdown
= hpb_dmae_shutdown
,
648 .owner
= THIS_MODULE
,
649 .name
= "hpb-dma-engine",
652 module_platform_driver(hpb_dmae_driver
);
654 MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>");
655 MODULE_DESCRIPTION("Renesas HPB DMA Engine driver");
656 MODULE_LICENSE("GPL");