2 * Copyright (C) 2011-2013 Renesas Electronics Corporation
3 * Copyright (C) 2013 Cogent Embedded, Inc.
5 * This file is based on the drivers/dma/sh/shdma.c
7 * Renesas SuperH DMA Engine support
9 * This is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * - DMA of SuperH does not have Hardware DMA chain mode.
15 * - max DMA size is 16MB.
19 #include <linux/dmaengine.h>
20 #include <linux/delay.h>
21 #include <linux/err.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/platform_data/dma-rcar-hpbdma.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/shdma-base.h>
29 #include <linux/slab.h>
31 /* DMA channel registers */
32 #define HPB_DMAE_DSAR0 0x00
33 #define HPB_DMAE_DDAR0 0x04
34 #define HPB_DMAE_DTCR0 0x08
35 #define HPB_DMAE_DSAR1 0x0C
36 #define HPB_DMAE_DDAR1 0x10
37 #define HPB_DMAE_DTCR1 0x14
38 #define HPB_DMAE_DSASR 0x18
39 #define HPB_DMAE_DDASR 0x1C
40 #define HPB_DMAE_DTCSR 0x20
41 #define HPB_DMAE_DPTR 0x24
42 #define HPB_DMAE_DCR 0x28
43 #define HPB_DMAE_DCMDR 0x2C
44 #define HPB_DMAE_DSTPR 0x30
45 #define HPB_DMAE_DSTSR 0x34
46 #define HPB_DMAE_DDBGR 0x38
47 #define HPB_DMAE_DDBGR2 0x3C
48 #define HPB_DMAE_CHAN(n) (0x40 * (n))
50 /* DMA command register (DCMDR) bits */
51 #define HPB_DMAE_DCMDR_BDOUT BIT(7)
52 #define HPB_DMAE_DCMDR_DQSPD BIT(6)
53 #define HPB_DMAE_DCMDR_DQSPC BIT(5)
54 #define HPB_DMAE_DCMDR_DMSPD BIT(4)
55 #define HPB_DMAE_DCMDR_DMSPC BIT(3)
56 #define HPB_DMAE_DCMDR_DQEND BIT(2)
57 #define HPB_DMAE_DCMDR_DNXT BIT(1)
58 #define HPB_DMAE_DCMDR_DMEN BIT(0)
60 /* DMA forced stop register (DSTPR) bits */
61 #define HPB_DMAE_DSTPR_DMSTP BIT(0)
63 /* DMA status register (DSTSR) bits */
64 #define HPB_DMAE_DSTSR_DQSTS BIT(2)
65 #define HPB_DMAE_DSTSR_DMSTS BIT(0)
67 /* DMA common registers */
68 #define HPB_DMAE_DTIMR 0x00
69 #define HPB_DMAE_DINTSR0 0x0C
70 #define HPB_DMAE_DINTSR1 0x10
71 #define HPB_DMAE_DINTCR0 0x14
72 #define HPB_DMAE_DINTCR1 0x18
73 #define HPB_DMAE_DINTMR0 0x1C
74 #define HPB_DMAE_DINTMR1 0x20
75 #define HPB_DMAE_DACTSR0 0x24
76 #define HPB_DMAE_DACTSR1 0x28
77 #define HPB_DMAE_HSRSTR(n) (0x40 + (n) * 4)
78 #define HPB_DMAE_HPB_DMASPR(n) (0x140 + (n) * 4)
79 #define HPB_DMAE_HPB_DMLVLR0 0x160
80 #define HPB_DMAE_HPB_DMLVLR1 0x164
81 #define HPB_DMAE_HPB_DMSHPT0 0x168
82 #define HPB_DMAE_HPB_DMSHPT1 0x16C
84 #define HPB_DMA_SLAVE_NUMBER 256
85 #define HPB_DMA_TCR_MAX 0x01000000 /* 16 MiB */
87 struct hpb_dmae_chan
{
88 struct shdma_chan shdma_chan
;
89 int xfer_mode
; /* DMA transfer mode */
92 unsigned plane_idx
; /* current DMA information set */
93 bool first_desc
; /* first/next transfer */
94 int xmit_shift
; /* log_2(bytes_per_xfer) */
96 const struct hpb_dmae_slave_config
*cfg
;
97 char dev_id
[16]; /* unique name per DMAC of channel */
98 dma_addr_t slave_addr
;
101 struct hpb_dmae_device
{
102 struct shdma_dev shdma_dev
;
103 spinlock_t reg_lock
; /* comm_reg operation lock */
104 struct hpb_dmae_pdata
*pdata
;
105 void __iomem
*chan_reg
;
106 void __iomem
*comm_reg
;
107 void __iomem
*reset_reg
;
108 void __iomem
*mode_reg
;
111 struct hpb_dmae_regs
{
112 u32 sar
; /* SAR / source address */
113 u32 dar
; /* DAR / destination address */
114 u32 tcr
; /* TCR / transfer count */
118 struct shdma_desc shdma_desc
;
119 struct hpb_dmae_regs hw
;
123 #define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan)
124 #define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc)
125 #define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \
126 struct hpb_dmae_device, shdma_dev.dma_dev)
128 static void ch_reg_write(struct hpb_dmae_chan
*hpb_dc
, u32 data
, u32 reg
)
130 iowrite32(data
, hpb_dc
->base
+ reg
);
133 static u32
ch_reg_read(struct hpb_dmae_chan
*hpb_dc
, u32 reg
)
135 return ioread32(hpb_dc
->base
+ reg
);
138 static void dcmdr_write(struct hpb_dmae_device
*hpbdev
, u32 data
)
140 iowrite32(data
, hpbdev
->chan_reg
+ HPB_DMAE_DCMDR
);
143 static void hsrstr_write(struct hpb_dmae_device
*hpbdev
, u32 ch
)
145 iowrite32(0x1, hpbdev
->comm_reg
+ HPB_DMAE_HSRSTR(ch
));
148 static u32
dintsr_read(struct hpb_dmae_device
*hpbdev
, u32 ch
)
153 v
= ioread32(hpbdev
->comm_reg
+ HPB_DMAE_DINTSR0
) >> ch
;
155 v
= ioread32(hpbdev
->comm_reg
+ HPB_DMAE_DINTSR1
) >> (ch
- 32);
159 static void dintcr_write(struct hpb_dmae_device
*hpbdev
, u32 ch
)
162 iowrite32((0x1 << ch
), hpbdev
->comm_reg
+ HPB_DMAE_DINTCR0
);
164 iowrite32((0x1 << (ch
- 32)),
165 hpbdev
->comm_reg
+ HPB_DMAE_DINTCR1
);
168 static void asyncmdr_write(struct hpb_dmae_device
*hpbdev
, u32 data
)
170 iowrite32(data
, hpbdev
->mode_reg
);
173 static u32
asyncmdr_read(struct hpb_dmae_device
*hpbdev
)
175 return ioread32(hpbdev
->mode_reg
);
178 static void hpb_dmae_enable_int(struct hpb_dmae_device
*hpbdev
, u32 ch
)
182 spin_lock_irq(&hpbdev
->reg_lock
);
184 intreg
= ioread32(hpbdev
->comm_reg
+ HPB_DMAE_DINTMR0
);
185 iowrite32(BIT(ch
) | intreg
,
186 hpbdev
->comm_reg
+ HPB_DMAE_DINTMR0
);
188 intreg
= ioread32(hpbdev
->comm_reg
+ HPB_DMAE_DINTMR1
);
189 iowrite32(BIT(ch
- 32) | intreg
,
190 hpbdev
->comm_reg
+ HPB_DMAE_DINTMR1
);
192 spin_unlock_irq(&hpbdev
->reg_lock
);
195 static void hpb_dmae_async_reset(struct hpb_dmae_device
*hpbdev
, u32 data
)
198 int timeout
= 10000; /* 100 ms */
200 spin_lock(&hpbdev
->reg_lock
);
201 rstr
= ioread32(hpbdev
->reset_reg
);
203 iowrite32(rstr
, hpbdev
->reset_reg
);
205 rstr
= ioread32(hpbdev
->reset_reg
);
206 if ((rstr
& data
) == data
)
212 dev_err(hpbdev
->shdma_dev
.dma_dev
.dev
,
213 "%s timeout\n", __func__
);
216 iowrite32(rstr
, hpbdev
->reset_reg
);
217 spin_unlock(&hpbdev
->reg_lock
);
220 static void hpb_dmae_set_async_mode(struct hpb_dmae_device
*hpbdev
,
225 spin_lock_irq(&hpbdev
->reg_lock
);
226 mode
= asyncmdr_read(hpbdev
);
229 asyncmdr_write(hpbdev
, mode
);
230 spin_unlock_irq(&hpbdev
->reg_lock
);
233 static void hpb_dmae_ctl_stop(struct hpb_dmae_device
*hpbdev
)
235 dcmdr_write(hpbdev
, HPB_DMAE_DCMDR_DQSPD
);
238 static void hpb_dmae_reset(struct hpb_dmae_device
*hpbdev
)
242 for (ch
= 0; ch
< hpbdev
->pdata
->num_hw_channels
; ch
++)
243 hsrstr_write(hpbdev
, ch
);
246 static unsigned int calc_xmit_shift(struct hpb_dmae_chan
*hpb_chan
)
248 struct hpb_dmae_device
*hpbdev
= to_dev(hpb_chan
);
249 struct hpb_dmae_pdata
*pdata
= hpbdev
->pdata
;
250 int width
= ch_reg_read(hpb_chan
, HPB_DMAE_DCR
);
253 switch (width
& (HPB_DMAE_DCR_SPDS_MASK
| HPB_DMAE_DCR_DPDS_MASK
)) {
254 case HPB_DMAE_DCR_SPDS_8BIT
| HPB_DMAE_DCR_DPDS_8BIT
:
258 case HPB_DMAE_DCR_SPDS_16BIT
| HPB_DMAE_DCR_DPDS_16BIT
:
261 case HPB_DMAE_DCR_SPDS_32BIT
| HPB_DMAE_DCR_DPDS_32BIT
:
265 return pdata
->ts_shift
[i
];
268 static void hpb_dmae_set_reg(struct hpb_dmae_chan
*hpb_chan
,
269 struct hpb_dmae_regs
*hw
, unsigned plane
)
271 ch_reg_write(hpb_chan
, hw
->sar
,
272 plane
? HPB_DMAE_DSAR1
: HPB_DMAE_DSAR0
);
273 ch_reg_write(hpb_chan
, hw
->dar
,
274 plane
? HPB_DMAE_DDAR1
: HPB_DMAE_DDAR0
);
275 ch_reg_write(hpb_chan
, hw
->tcr
>> hpb_chan
->xmit_shift
,
276 plane
? HPB_DMAE_DTCR1
: HPB_DMAE_DTCR0
);
279 static void hpb_dmae_start(struct hpb_dmae_chan
*hpb_chan
, bool next
)
281 ch_reg_write(hpb_chan
, (next
? HPB_DMAE_DCMDR_DNXT
: 0) |
282 HPB_DMAE_DCMDR_DMEN
, HPB_DMAE_DCMDR
);
285 static void hpb_dmae_halt(struct shdma_chan
*schan
)
287 struct hpb_dmae_chan
*chan
= to_chan(schan
);
289 ch_reg_write(chan
, HPB_DMAE_DCMDR_DQEND
, HPB_DMAE_DCMDR
);
290 ch_reg_write(chan
, HPB_DMAE_DSTPR_DMSTP
, HPB_DMAE_DSTPR
);
293 chan
->first_desc
= true;
296 static const struct hpb_dmae_slave_config
*
297 hpb_dmae_find_slave(struct hpb_dmae_chan
*hpb_chan
, int slave_id
)
299 struct hpb_dmae_device
*hpbdev
= to_dev(hpb_chan
);
300 struct hpb_dmae_pdata
*pdata
= hpbdev
->pdata
;
303 if (slave_id
>= HPB_DMA_SLAVE_NUMBER
)
306 for (i
= 0; i
< pdata
->num_slaves
; i
++)
307 if (pdata
->slaves
[i
].id
== slave_id
)
308 return pdata
->slaves
+ i
;
313 static void hpb_dmae_start_xfer(struct shdma_chan
*schan
,
314 struct shdma_desc
*sdesc
)
316 struct hpb_dmae_chan
*chan
= to_chan(schan
);
317 struct hpb_dmae_device
*hpbdev
= to_dev(chan
);
318 struct hpb_desc
*desc
= to_desc(sdesc
);
320 if (chan
->cfg
->flags
& HPB_DMAE_SET_ASYNC_RESET
)
321 hpb_dmae_async_reset(hpbdev
, chan
->cfg
->rstr
);
323 desc
->plane_idx
= chan
->plane_idx
;
324 hpb_dmae_set_reg(chan
, &desc
->hw
, chan
->plane_idx
);
325 hpb_dmae_start(chan
, !chan
->first_desc
);
327 if (chan
->xfer_mode
== XFER_DOUBLE
) {
328 chan
->plane_idx
^= 1;
329 chan
->first_desc
= false;
333 static bool hpb_dmae_desc_completed(struct shdma_chan
*schan
,
334 struct shdma_desc
*sdesc
)
337 * This is correct since we always have at most single
338 * outstanding DMA transfer per channel, and by the time
339 * we get completion interrupt the transfer is completed.
340 * This will change if we ever use alternating DMA
341 * information sets and submit two descriptors at once.
346 static bool hpb_dmae_chan_irq(struct shdma_chan
*schan
, int irq
)
348 struct hpb_dmae_chan
*chan
= to_chan(schan
);
349 struct hpb_dmae_device
*hpbdev
= to_dev(chan
);
350 int ch
= chan
->cfg
->dma_ch
;
352 /* Check Complete DMA Transfer */
353 if (dintsr_read(hpbdev
, ch
)) {
354 /* Clear Interrupt status */
355 dintcr_write(hpbdev
, ch
);
361 static int hpb_dmae_desc_setup(struct shdma_chan
*schan
,
362 struct shdma_desc
*sdesc
,
363 dma_addr_t src
, dma_addr_t dst
, size_t *len
)
365 struct hpb_desc
*desc
= to_desc(sdesc
);
367 if (*len
> (size_t)HPB_DMA_TCR_MAX
)
368 *len
= (size_t)HPB_DMA_TCR_MAX
;
377 static size_t hpb_dmae_get_partial(struct shdma_chan
*schan
,
378 struct shdma_desc
*sdesc
)
380 struct hpb_desc
*desc
= to_desc(sdesc
);
381 struct hpb_dmae_chan
*chan
= to_chan(schan
);
382 u32 tcr
= ch_reg_read(chan
, desc
->plane_idx
?
383 HPB_DMAE_DTCR1
: HPB_DMAE_DTCR0
);
385 return (desc
->hw
.tcr
- tcr
) << chan
->xmit_shift
;
388 static bool hpb_dmae_channel_busy(struct shdma_chan
*schan
)
390 struct hpb_dmae_chan
*chan
= to_chan(schan
);
391 u32 dstsr
= ch_reg_read(chan
, HPB_DMAE_DSTSR
);
393 if (chan
->xfer_mode
== XFER_DOUBLE
)
394 return dstsr
& HPB_DMAE_DSTSR_DQSTS
;
396 return dstsr
& HPB_DMAE_DSTSR_DMSTS
;
400 hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan
*hpb_chan
,
401 const struct hpb_dmae_slave_config
*cfg
)
403 struct hpb_dmae_device
*hpbdev
= to_dev(hpb_chan
);
404 struct hpb_dmae_pdata
*pdata
= hpbdev
->pdata
;
405 const struct hpb_dmae_channel
*channel
= pdata
->channels
;
406 int slave_id
= cfg
->id
;
409 for (i
= 0; i
< pdata
->num_channels
; i
++, channel
++) {
410 if (channel
->s_id
== slave_id
) {
411 struct device
*dev
= hpb_chan
->shdma_chan
.dev
;
413 hpb_chan
->base
= hpbdev
->chan_reg
+
414 HPB_DMAE_CHAN(cfg
->dma_ch
);
416 dev_dbg(dev
, "Detected Slave device\n");
417 dev_dbg(dev
, " -- slave_id : 0x%x\n", slave_id
);
418 dev_dbg(dev
, " -- cfg->dma_ch : %d\n", cfg
->dma_ch
);
419 dev_dbg(dev
, " -- channel->ch_irq: %d\n",
425 err
= shdma_request_irq(&hpb_chan
->shdma_chan
, channel
->ch_irq
,
426 IRQF_SHARED
, hpb_chan
->dev_id
);
428 dev_err(hpb_chan
->shdma_chan
.dev
,
429 "DMA channel request_irq %d failed with error %d\n",
430 channel
->ch_irq
, err
);
434 hpb_chan
->plane_idx
= 0;
435 hpb_chan
->first_desc
= true;
437 if ((cfg
->dcr
& (HPB_DMAE_DCR_CT
| HPB_DMAE_DCR_DIP
)) == 0) {
438 hpb_chan
->xfer_mode
= XFER_SINGLE
;
439 } else if ((cfg
->dcr
& (HPB_DMAE_DCR_CT
| HPB_DMAE_DCR_DIP
)) ==
440 (HPB_DMAE_DCR_CT
| HPB_DMAE_DCR_DIP
)) {
441 hpb_chan
->xfer_mode
= XFER_DOUBLE
;
443 dev_err(hpb_chan
->shdma_chan
.dev
, "DCR setting error");
447 if (cfg
->flags
& HPB_DMAE_SET_ASYNC_MODE
)
448 hpb_dmae_set_async_mode(hpbdev
, cfg
->mdm
, cfg
->mdr
);
449 ch_reg_write(hpb_chan
, cfg
->dcr
, HPB_DMAE_DCR
);
450 ch_reg_write(hpb_chan
, cfg
->port
, HPB_DMAE_DPTR
);
451 hpb_chan
->xmit_shift
= calc_xmit_shift(hpb_chan
);
452 hpb_dmae_enable_int(hpbdev
, cfg
->dma_ch
);
457 static int hpb_dmae_set_slave(struct shdma_chan
*schan
, int slave_id
,
458 dma_addr_t slave_addr
, bool try)
460 struct hpb_dmae_chan
*chan
= to_chan(schan
);
461 const struct hpb_dmae_slave_config
*sc
=
462 hpb_dmae_find_slave(chan
, slave_id
);
469 chan
->slave_addr
= slave_addr
? : sc
->addr
;
470 return hpb_dmae_alloc_chan_resources(chan
, sc
);
473 static void hpb_dmae_setup_xfer(struct shdma_chan
*schan
, int slave_id
)
477 static dma_addr_t
hpb_dmae_slave_addr(struct shdma_chan
*schan
)
479 struct hpb_dmae_chan
*chan
= to_chan(schan
);
481 return chan
->slave_addr
;
484 static struct shdma_desc
*hpb_dmae_embedded_desc(void *buf
, int i
)
486 return &((struct hpb_desc
*)buf
)[i
].shdma_desc
;
489 static const struct shdma_ops hpb_dmae_ops
= {
490 .desc_completed
= hpb_dmae_desc_completed
,
491 .halt_channel
= hpb_dmae_halt
,
492 .channel_busy
= hpb_dmae_channel_busy
,
493 .slave_addr
= hpb_dmae_slave_addr
,
494 .desc_setup
= hpb_dmae_desc_setup
,
495 .set_slave
= hpb_dmae_set_slave
,
496 .setup_xfer
= hpb_dmae_setup_xfer
,
497 .start_xfer
= hpb_dmae_start_xfer
,
498 .embedded_desc
= hpb_dmae_embedded_desc
,
499 .chan_irq
= hpb_dmae_chan_irq
,
500 .get_partial
= hpb_dmae_get_partial
,
503 static int hpb_dmae_chan_probe(struct hpb_dmae_device
*hpbdev
, int id
)
505 struct shdma_dev
*sdev
= &hpbdev
->shdma_dev
;
506 struct platform_device
*pdev
=
507 to_platform_device(hpbdev
->shdma_dev
.dma_dev
.dev
);
508 struct hpb_dmae_chan
*new_hpb_chan
;
509 struct shdma_chan
*schan
;
512 new_hpb_chan
= devm_kzalloc(&pdev
->dev
,
513 sizeof(struct hpb_dmae_chan
), GFP_KERNEL
);
515 dev_err(hpbdev
->shdma_dev
.dma_dev
.dev
,
516 "No free memory for allocating DMA channels!\n");
520 schan
= &new_hpb_chan
->shdma_chan
;
521 schan
->max_xfer_len
= HPB_DMA_TCR_MAX
;
523 shdma_chan_probe(sdev
, schan
, id
);
526 snprintf(new_hpb_chan
->dev_id
, sizeof(new_hpb_chan
->dev_id
),
527 "hpb-dmae%d.%d", pdev
->id
, id
);
529 snprintf(new_hpb_chan
->dev_id
, sizeof(new_hpb_chan
->dev_id
),
535 static int hpb_dmae_probe(struct platform_device
*pdev
)
537 const enum dma_slave_buswidth widths
= DMA_SLAVE_BUSWIDTH_1_BYTE
|
538 DMA_SLAVE_BUSWIDTH_2_BYTES
| DMA_SLAVE_BUSWIDTH_4_BYTES
;
539 struct hpb_dmae_pdata
*pdata
= pdev
->dev
.platform_data
;
540 struct hpb_dmae_device
*hpbdev
;
541 struct dma_device
*dma_dev
;
542 struct resource
*chan
, *comm
, *rest
, *mode
, *irq_res
;
545 /* Get platform data */
546 if (!pdata
|| !pdata
->num_channels
)
549 chan
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
550 comm
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
551 rest
= platform_get_resource(pdev
, IORESOURCE_MEM
, 2);
552 mode
= platform_get_resource(pdev
, IORESOURCE_MEM
, 3);
554 irq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
558 hpbdev
= devm_kzalloc(&pdev
->dev
, sizeof(struct hpb_dmae_device
),
561 dev_err(&pdev
->dev
, "Not enough memory\n");
565 hpbdev
->chan_reg
= devm_ioremap_resource(&pdev
->dev
, chan
);
566 if (IS_ERR(hpbdev
->chan_reg
))
567 return PTR_ERR(hpbdev
->chan_reg
);
569 hpbdev
->comm_reg
= devm_ioremap_resource(&pdev
->dev
, comm
);
570 if (IS_ERR(hpbdev
->comm_reg
))
571 return PTR_ERR(hpbdev
->comm_reg
);
573 hpbdev
->reset_reg
= devm_ioremap_resource(&pdev
->dev
, rest
);
574 if (IS_ERR(hpbdev
->reset_reg
))
575 return PTR_ERR(hpbdev
->reset_reg
);
577 hpbdev
->mode_reg
= devm_ioremap_resource(&pdev
->dev
, mode
);
578 if (IS_ERR(hpbdev
->mode_reg
))
579 return PTR_ERR(hpbdev
->mode_reg
);
581 dma_dev
= &hpbdev
->shdma_dev
.dma_dev
;
583 spin_lock_init(&hpbdev
->reg_lock
);
586 hpbdev
->pdata
= pdata
;
588 pm_runtime_enable(&pdev
->dev
);
589 err
= pm_runtime_get_sync(&pdev
->dev
);
591 dev_err(&pdev
->dev
, "%s(): GET = %d\n", __func__
, err
);
593 /* Reset DMA controller */
594 hpb_dmae_reset(hpbdev
);
596 pm_runtime_put(&pdev
->dev
);
598 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
599 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
600 dma_dev
->src_addr_widths
= widths
;
601 dma_dev
->dst_addr_widths
= widths
;
602 dma_dev
->directions
= BIT(DMA_MEM_TO_DEV
) | BIT(DMA_DEV_TO_MEM
);
603 dma_dev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
605 hpbdev
->shdma_dev
.ops
= &hpb_dmae_ops
;
606 hpbdev
->shdma_dev
.desc_size
= sizeof(struct hpb_desc
);
607 err
= shdma_init(&pdev
->dev
, &hpbdev
->shdma_dev
, pdata
->num_channels
);
611 /* Create DMA channels */
612 for (i
= 0; i
< pdata
->num_channels
; i
++)
613 hpb_dmae_chan_probe(hpbdev
, i
);
615 platform_set_drvdata(pdev
, hpbdev
);
616 err
= dma_async_device_register(dma_dev
);
620 shdma_cleanup(&hpbdev
->shdma_dev
);
622 pm_runtime_disable(&pdev
->dev
);
626 static void hpb_dmae_chan_remove(struct hpb_dmae_device
*hpbdev
)
628 struct shdma_chan
*schan
;
631 shdma_for_each_chan(schan
, &hpbdev
->shdma_dev
, i
) {
634 shdma_chan_remove(schan
);
638 static int hpb_dmae_remove(struct platform_device
*pdev
)
640 struct hpb_dmae_device
*hpbdev
= platform_get_drvdata(pdev
);
642 dma_async_device_unregister(&hpbdev
->shdma_dev
.dma_dev
);
644 pm_runtime_disable(&pdev
->dev
);
646 hpb_dmae_chan_remove(hpbdev
);
651 static void hpb_dmae_shutdown(struct platform_device
*pdev
)
653 struct hpb_dmae_device
*hpbdev
= platform_get_drvdata(pdev
);
654 hpb_dmae_ctl_stop(hpbdev
);
657 static struct platform_driver hpb_dmae_driver
= {
658 .probe
= hpb_dmae_probe
,
659 .remove
= hpb_dmae_remove
,
660 .shutdown
= hpb_dmae_shutdown
,
662 .name
= "hpb-dma-engine",
665 module_platform_driver(hpb_dmae_driver
);
667 MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>");
668 MODULE_DESCRIPTION("Renesas HPB DMA Engine driver");
669 MODULE_LICENSE("GPL");