1 // SPDX-License-Identifier: GPL-2.0+
3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4 // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
6 #include <linux/module.h>
7 #include <linux/interrupt.h>
8 #include <linux/dmaengine.h>
9 #include <linux/platform_device.h>
10 #include <linux/platform_data/dma-mcf-edma.h>
12 #include "fsl-edma-common.h"
14 #define EDMA_CHANNELS 64
15 #define EDMA_MASK_CH(x) ((x) & GENMASK(5, 0))
17 static irqreturn_t
mcf_edma_tx_handler(int irq
, void *dev_id
)
19 struct fsl_edma_engine
*mcf_edma
= dev_id
;
20 struct edma_regs
*regs
= &mcf_edma
->regs
;
22 struct fsl_edma_chan
*mcf_chan
;
25 intmap
= ioread32(regs
->inth
);
27 intmap
|= ioread32(regs
->intl
);
31 for (ch
= 0; ch
< mcf_edma
->n_chans
; ch
++) {
32 if (intmap
& BIT(ch
)) {
33 iowrite8(EDMA_MASK_CH(ch
), regs
->cint
);
35 mcf_chan
= &mcf_edma
->chans
[ch
];
37 spin_lock(&mcf_chan
->vchan
.lock
);
39 if (!mcf_chan
->edesc
) {
40 /* terminate_all called before */
41 spin_unlock(&mcf_chan
->vchan
.lock
);
45 if (!mcf_chan
->edesc
->iscyclic
) {
46 list_del(&mcf_chan
->edesc
->vdesc
.node
);
47 vchan_cookie_complete(&mcf_chan
->edesc
->vdesc
);
48 mcf_chan
->edesc
= NULL
;
49 mcf_chan
->status
= DMA_COMPLETE
;
50 mcf_chan
->idle
= true;
52 vchan_cyclic_callback(&mcf_chan
->edesc
->vdesc
);
56 fsl_edma_xfer_desc(mcf_chan
);
58 spin_unlock(&mcf_chan
->vchan
.lock
);
65 static irqreturn_t
mcf_edma_err_handler(int irq
, void *dev_id
)
67 struct fsl_edma_engine
*mcf_edma
= dev_id
;
68 struct edma_regs
*regs
= &mcf_edma
->regs
;
71 err
= ioread32(regs
->errl
);
75 for (ch
= 0; ch
< (EDMA_CHANNELS
/ 2); ch
++) {
77 fsl_edma_disable_request(&mcf_edma
->chans
[ch
]);
78 iowrite8(EDMA_CERR_CERR(ch
), regs
->cerr
);
79 mcf_edma
->chans
[ch
].status
= DMA_ERROR
;
80 mcf_edma
->chans
[ch
].idle
= true;
84 err
= ioread32(regs
->errh
);
88 for (ch
= (EDMA_CHANNELS
/ 2); ch
< EDMA_CHANNELS
; ch
++) {
89 if (err
& (BIT(ch
- (EDMA_CHANNELS
/ 2)))) {
90 fsl_edma_disable_request(&mcf_edma
->chans
[ch
]);
91 iowrite8(EDMA_CERR_CERR(ch
), regs
->cerr
);
92 mcf_edma
->chans
[ch
].status
= DMA_ERROR
;
93 mcf_edma
->chans
[ch
].idle
= true;
100 static int mcf_edma_irq_init(struct platform_device
*pdev
,
101 struct fsl_edma_engine
*mcf_edma
)
104 struct resource
*res
;
106 res
= platform_get_resource_byname(pdev
,
107 IORESOURCE_IRQ
, "edma-tx-00-15");
111 for (ret
= 0, i
= res
->start
; i
<= res
->end
; ++i
)
112 ret
|= request_irq(i
, mcf_edma_tx_handler
, 0, "eDMA", mcf_edma
);
116 res
= platform_get_resource_byname(pdev
,
117 IORESOURCE_IRQ
, "edma-tx-16-55");
121 for (ret
= 0, i
= res
->start
; i
<= res
->end
; ++i
)
122 ret
|= request_irq(i
, mcf_edma_tx_handler
, 0, "eDMA", mcf_edma
);
126 ret
= platform_get_irq_byname(pdev
, "edma-tx-56-63");
128 ret
= request_irq(ret
, mcf_edma_tx_handler
,
129 0, "eDMA", mcf_edma
);
134 ret
= platform_get_irq_byname(pdev
, "edma-err");
136 ret
= request_irq(ret
, mcf_edma_err_handler
,
137 0, "eDMA", mcf_edma
);
145 static void mcf_edma_irq_free(struct platform_device
*pdev
,
146 struct fsl_edma_engine
*mcf_edma
)
149 struct resource
*res
;
151 res
= platform_get_resource_byname(pdev
,
152 IORESOURCE_IRQ
, "edma-tx-00-15");
154 for (irq
= res
->start
; irq
<= res
->end
; irq
++)
155 free_irq(irq
, mcf_edma
);
158 res
= platform_get_resource_byname(pdev
,
159 IORESOURCE_IRQ
, "edma-tx-16-55");
161 for (irq
= res
->start
; irq
<= res
->end
; irq
++)
162 free_irq(irq
, mcf_edma
);
165 irq
= platform_get_irq_byname(pdev
, "edma-tx-56-63");
167 free_irq(irq
, mcf_edma
);
169 irq
= platform_get_irq_byname(pdev
, "edma-err");
171 free_irq(irq
, mcf_edma
);
174 static struct fsl_edma_drvdata mcf_data
= {
176 .setup_irq
= mcf_edma_irq_init
,
179 static int mcf_edma_probe(struct platform_device
*pdev
)
181 struct mcf_edma_platform_data
*pdata
;
182 struct fsl_edma_engine
*mcf_edma
;
183 struct fsl_edma_chan
*mcf_chan
;
184 struct edma_regs
*regs
;
185 struct resource
*res
;
186 int ret
, i
, len
, chans
;
188 pdata
= dev_get_platdata(&pdev
->dev
);
190 dev_err(&pdev
->dev
, "no platform data supplied\n");
194 chans
= pdata
->dma_channels
;
195 len
= sizeof(*mcf_edma
) + sizeof(*mcf_chan
) * chans
;
196 mcf_edma
= devm_kzalloc(&pdev
->dev
, len
, GFP_KERNEL
);
200 mcf_edma
->n_chans
= chans
;
202 /* Set up drvdata for ColdFire edma */
203 mcf_edma
->drvdata
= &mcf_data
;
204 mcf_edma
->big_endian
= 1;
206 if (!mcf_edma
->n_chans
) {
207 dev_info(&pdev
->dev
, "setting default channel number to 64");
208 mcf_edma
->n_chans
= 64;
211 mutex_init(&mcf_edma
->fsl_edma_mutex
);
213 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
215 mcf_edma
->membase
= devm_ioremap_resource(&pdev
->dev
, res
);
216 if (IS_ERR(mcf_edma
->membase
))
217 return PTR_ERR(mcf_edma
->membase
);
219 fsl_edma_setup_regs(mcf_edma
);
220 regs
= &mcf_edma
->regs
;
222 INIT_LIST_HEAD(&mcf_edma
->dma_dev
.channels
);
223 for (i
= 0; i
< mcf_edma
->n_chans
; i
++) {
224 struct fsl_edma_chan
*mcf_chan
= &mcf_edma
->chans
[i
];
226 mcf_chan
->edma
= mcf_edma
;
227 mcf_chan
->slave_id
= i
;
228 mcf_chan
->idle
= true;
229 mcf_chan
->dma_dir
= DMA_NONE
;
230 mcf_chan
->vchan
.desc_free
= fsl_edma_free_desc
;
231 vchan_init(&mcf_chan
->vchan
, &mcf_edma
->dma_dev
);
232 iowrite32(0x0, ®s
->tcd
[i
].csr
);
235 iowrite32(~0, regs
->inth
);
236 iowrite32(~0, regs
->intl
);
238 ret
= mcf_edma
->drvdata
->setup_irq(pdev
, mcf_edma
);
242 dma_cap_set(DMA_PRIVATE
, mcf_edma
->dma_dev
.cap_mask
);
243 dma_cap_set(DMA_SLAVE
, mcf_edma
->dma_dev
.cap_mask
);
244 dma_cap_set(DMA_CYCLIC
, mcf_edma
->dma_dev
.cap_mask
);
246 mcf_edma
->dma_dev
.dev
= &pdev
->dev
;
247 mcf_edma
->dma_dev
.device_alloc_chan_resources
=
248 fsl_edma_alloc_chan_resources
;
249 mcf_edma
->dma_dev
.device_free_chan_resources
=
250 fsl_edma_free_chan_resources
;
251 mcf_edma
->dma_dev
.device_config
= fsl_edma_slave_config
;
252 mcf_edma
->dma_dev
.device_prep_dma_cyclic
=
253 fsl_edma_prep_dma_cyclic
;
254 mcf_edma
->dma_dev
.device_prep_slave_sg
= fsl_edma_prep_slave_sg
;
255 mcf_edma
->dma_dev
.device_tx_status
= fsl_edma_tx_status
;
256 mcf_edma
->dma_dev
.device_pause
= fsl_edma_pause
;
257 mcf_edma
->dma_dev
.device_resume
= fsl_edma_resume
;
258 mcf_edma
->dma_dev
.device_terminate_all
= fsl_edma_terminate_all
;
259 mcf_edma
->dma_dev
.device_issue_pending
= fsl_edma_issue_pending
;
261 mcf_edma
->dma_dev
.src_addr_widths
= FSL_EDMA_BUSWIDTHS
;
262 mcf_edma
->dma_dev
.dst_addr_widths
= FSL_EDMA_BUSWIDTHS
;
263 mcf_edma
->dma_dev
.directions
=
264 BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
266 mcf_edma
->dma_dev
.filter
.fn
= mcf_edma_filter_fn
;
267 mcf_edma
->dma_dev
.filter
.map
= pdata
->slave_map
;
268 mcf_edma
->dma_dev
.filter
.mapcnt
= pdata
->slavecnt
;
270 platform_set_drvdata(pdev
, mcf_edma
);
272 ret
= dma_async_device_register(&mcf_edma
->dma_dev
);
275 "Can't register Freescale eDMA engine. (%d)\n", ret
);
279 /* Enable round robin arbitration */
280 iowrite32(EDMA_CR_ERGA
| EDMA_CR_ERCA
, regs
->cr
);
285 static int mcf_edma_remove(struct platform_device
*pdev
)
287 struct fsl_edma_engine
*mcf_edma
= platform_get_drvdata(pdev
);
289 mcf_edma_irq_free(pdev
, mcf_edma
);
290 fsl_edma_cleanup_vchan(&mcf_edma
->dma_dev
);
291 dma_async_device_unregister(&mcf_edma
->dma_dev
);
296 static struct platform_driver mcf_edma_driver
= {
300 .probe
= mcf_edma_probe
,
301 .remove
= mcf_edma_remove
,
304 bool mcf_edma_filter_fn(struct dma_chan
*chan
, void *param
)
306 if (chan
->device
->dev
->driver
== &mcf_edma_driver
.driver
) {
307 struct fsl_edma_chan
*mcf_chan
= to_fsl_edma_chan(chan
);
309 return (mcf_chan
->slave_id
== (uintptr_t)param
);
314 EXPORT_SYMBOL(mcf_edma_filter_fn
);
316 static int __init
mcf_edma_init(void)
318 return platform_driver_register(&mcf_edma_driver
);
320 subsys_initcall(mcf_edma_init
);
322 static void __exit
mcf_edma_exit(void)
324 platform_driver_unregister(&mcf_edma_driver
);
326 module_exit(mcf_edma_exit
);
328 MODULE_ALIAS("platform:mcf-edma");
329 MODULE_DESCRIPTION("Freescale eDMA engine driver, ColdFire family");
330 MODULE_LICENSE("GPL v2");