1 // SPDX-License-Identifier: GPL-2.0
3 // Copyright (C) 2019 Linaro Ltd.
4 // Copyright (C) 2019 Socionext Inc.
6 #include <linux/bits.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/dmaengine.h>
9 #include <linux/interrupt.h>
10 #include <linux/iopoll.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/of_dma.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/bitfield.h>
22 #define M10V_XDACS 0x00
24 /* channel local register */
25 #define M10V_XDTBC 0x10
26 #define M10V_XDSSA 0x14
27 #define M10V_XDDSA 0x18
28 #define M10V_XDSAC 0x1C
29 #define M10V_XDDAC 0x20
30 #define M10V_XDDCC 0x24
31 #define M10V_XDDES 0x28
32 #define M10V_XDDPC 0x2C
33 #define M10V_XDDSD 0x30
35 #define M10V_XDACS_XE BIT(28)
37 #define M10V_DEFBS 0x3
38 #define M10V_DEFBL 0xf
40 #define M10V_XDSAC_SBS GENMASK(17, 16)
41 #define M10V_XDSAC_SBL GENMASK(11, 8)
43 #define M10V_XDDAC_DBS GENMASK(17, 16)
44 #define M10V_XDDAC_DBL GENMASK(11, 8)
46 #define M10V_XDDES_CE BIT(28)
47 #define M10V_XDDES_SE BIT(24)
48 #define M10V_XDDES_SA BIT(15)
49 #define M10V_XDDES_TF GENMASK(23, 20)
50 #define M10V_XDDES_EI BIT(1)
51 #define M10V_XDDES_TI BIT(0)
53 #define M10V_XDDSD_IS_MASK GENMASK(3, 0)
54 #define M10V_XDDSD_IS_NORMAL 0x8
56 #define MLB_XDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
57 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
58 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
59 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
61 struct milbeaut_xdmac_desc
{
62 struct virt_dma_desc vd
;
68 struct milbeaut_xdmac_chan
{
69 struct virt_dma_chan vc
;
70 struct milbeaut_xdmac_desc
*md
;
71 void __iomem
*reg_ch_base
;
74 struct milbeaut_xdmac_device
{
75 struct dma_device ddev
;
76 void __iomem
*reg_base
;
77 struct milbeaut_xdmac_chan channels
[0];
80 static struct milbeaut_xdmac_chan
*
81 to_milbeaut_xdmac_chan(struct virt_dma_chan
*vc
)
83 return container_of(vc
, struct milbeaut_xdmac_chan
, vc
);
86 static struct milbeaut_xdmac_desc
*
87 to_milbeaut_xdmac_desc(struct virt_dma_desc
*vd
)
89 return container_of(vd
, struct milbeaut_xdmac_desc
, vd
);
92 /* mc->vc.lock must be held by caller */
93 static struct milbeaut_xdmac_desc
*
94 milbeaut_xdmac_next_desc(struct milbeaut_xdmac_chan
*mc
)
96 struct virt_dma_desc
*vd
;
98 vd
= vchan_next_desc(&mc
->vc
);
106 mc
->md
= to_milbeaut_xdmac_desc(vd
);
111 /* mc->vc.lock must be held by caller */
112 static void milbeaut_chan_start(struct milbeaut_xdmac_chan
*mc
,
113 struct milbeaut_xdmac_desc
*md
)
117 /* Setup the channel */
119 writel_relaxed(val
, mc
->reg_ch_base
+ M10V_XDTBC
);
122 writel_relaxed(val
, mc
->reg_ch_base
+ M10V_XDSSA
);
125 writel_relaxed(val
, mc
->reg_ch_base
+ M10V_XDDSA
);
127 val
= readl_relaxed(mc
->reg_ch_base
+ M10V_XDSAC
);
128 val
&= ~(M10V_XDSAC_SBS
| M10V_XDSAC_SBL
);
129 val
|= FIELD_PREP(M10V_XDSAC_SBS
, M10V_DEFBS
) |
130 FIELD_PREP(M10V_XDSAC_SBL
, M10V_DEFBL
);
131 writel_relaxed(val
, mc
->reg_ch_base
+ M10V_XDSAC
);
133 val
= readl_relaxed(mc
->reg_ch_base
+ M10V_XDDAC
);
134 val
&= ~(M10V_XDDAC_DBS
| M10V_XDDAC_DBL
);
135 val
|= FIELD_PREP(M10V_XDDAC_DBS
, M10V_DEFBS
) |
136 FIELD_PREP(M10V_XDDAC_DBL
, M10V_DEFBL
);
137 writel_relaxed(val
, mc
->reg_ch_base
+ M10V_XDDAC
);
139 /* Start the channel */
140 val
= readl_relaxed(mc
->reg_ch_base
+ M10V_XDDES
);
141 val
&= ~(M10V_XDDES_CE
| M10V_XDDES_SE
| M10V_XDDES_TF
|
142 M10V_XDDES_EI
| M10V_XDDES_TI
);
143 val
|= FIELD_PREP(M10V_XDDES_CE
, 1) | FIELD_PREP(M10V_XDDES_SE
, 1) |
144 FIELD_PREP(M10V_XDDES_TF
, 1) | FIELD_PREP(M10V_XDDES_EI
, 1) |
145 FIELD_PREP(M10V_XDDES_TI
, 1);
146 writel_relaxed(val
, mc
->reg_ch_base
+ M10V_XDDES
);
149 /* mc->vc.lock must be held by caller */
150 static void milbeaut_xdmac_start(struct milbeaut_xdmac_chan
*mc
)
152 struct milbeaut_xdmac_desc
*md
;
154 md
= milbeaut_xdmac_next_desc(mc
);
156 milbeaut_chan_start(mc
, md
);
159 static irqreturn_t
milbeaut_xdmac_interrupt(int irq
, void *dev_id
)
161 struct milbeaut_xdmac_chan
*mc
= dev_id
;
162 struct milbeaut_xdmac_desc
*md
;
166 spin_lock_irqsave(&mc
->vc
.lock
, flags
);
169 val
= FIELD_PREP(M10V_XDDSD_IS_MASK
, 0x0);
170 writel_relaxed(val
, mc
->reg_ch_base
+ M10V_XDDSD
);
176 vchan_cookie_complete(&md
->vd
);
178 milbeaut_xdmac_start(mc
);
180 spin_unlock_irqrestore(&mc
->vc
.lock
, flags
);
184 static void milbeaut_xdmac_free_chan_resources(struct dma_chan
*chan
)
186 vchan_free_chan_resources(to_virt_chan(chan
));
189 static struct dma_async_tx_descriptor
*
190 milbeaut_xdmac_prep_memcpy(struct dma_chan
*chan
, dma_addr_t dst
,
191 dma_addr_t src
, size_t len
, unsigned long flags
)
193 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
194 struct milbeaut_xdmac_desc
*md
;
196 md
= kzalloc(sizeof(*md
), GFP_NOWAIT
);
204 return vchan_tx_prep(vc
, &md
->vd
, flags
);
207 static int milbeaut_xdmac_terminate_all(struct dma_chan
*chan
)
209 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
210 struct milbeaut_xdmac_chan
*mc
= to_milbeaut_xdmac_chan(vc
);
216 spin_lock_irqsave(&vc
->lock
, flags
);
218 /* Halt the channel */
219 val
= readl(mc
->reg_ch_base
+ M10V_XDDES
);
220 val
&= ~M10V_XDDES_CE
;
221 val
|= FIELD_PREP(M10V_XDDES_CE
, 0);
222 writel(val
, mc
->reg_ch_base
+ M10V_XDDES
);
225 vchan_terminate_vdesc(&mc
->md
->vd
);
229 vchan_get_all_descriptors(vc
, &head
);
231 spin_unlock_irqrestore(&vc
->lock
, flags
);
233 vchan_dma_desc_free_list(vc
, &head
);
238 static void milbeaut_xdmac_synchronize(struct dma_chan
*chan
)
240 vchan_synchronize(to_virt_chan(chan
));
243 static void milbeaut_xdmac_issue_pending(struct dma_chan
*chan
)
245 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
246 struct milbeaut_xdmac_chan
*mc
= to_milbeaut_xdmac_chan(vc
);
249 spin_lock_irqsave(&vc
->lock
, flags
);
251 if (vchan_issue_pending(vc
) && !mc
->md
)
252 milbeaut_xdmac_start(mc
);
254 spin_unlock_irqrestore(&vc
->lock
, flags
);
257 static void milbeaut_xdmac_desc_free(struct virt_dma_desc
*vd
)
259 kfree(to_milbeaut_xdmac_desc(vd
));
262 static int milbeaut_xdmac_chan_init(struct platform_device
*pdev
,
263 struct milbeaut_xdmac_device
*mdev
,
266 struct device
*dev
= &pdev
->dev
;
267 struct milbeaut_xdmac_chan
*mc
= &mdev
->channels
[chan_id
];
271 irq
= platform_get_irq(pdev
, chan_id
);
275 irq_name
= devm_kasprintf(dev
, GFP_KERNEL
, "milbeaut-xdmac-%d",
280 ret
= devm_request_irq(dev
, irq
, milbeaut_xdmac_interrupt
,
281 IRQF_SHARED
, irq_name
, mc
);
285 mc
->reg_ch_base
= mdev
->reg_base
+ chan_id
* 0x30;
287 mc
->vc
.desc_free
= milbeaut_xdmac_desc_free
;
288 vchan_init(&mc
->vc
, &mdev
->ddev
);
293 static void enable_xdmac(struct milbeaut_xdmac_device
*mdev
)
297 val
= readl(mdev
->reg_base
+ M10V_XDACS
);
298 val
|= M10V_XDACS_XE
;
299 writel(val
, mdev
->reg_base
+ M10V_XDACS
);
302 static void disable_xdmac(struct milbeaut_xdmac_device
*mdev
)
306 val
= readl(mdev
->reg_base
+ M10V_XDACS
);
307 val
&= ~M10V_XDACS_XE
;
308 writel(val
, mdev
->reg_base
+ M10V_XDACS
);
311 static int milbeaut_xdmac_probe(struct platform_device
*pdev
)
313 struct device
*dev
= &pdev
->dev
;
314 struct milbeaut_xdmac_device
*mdev
;
315 struct dma_device
*ddev
;
316 int nr_chans
, ret
, i
;
318 nr_chans
= platform_irq_count(pdev
);
322 mdev
= devm_kzalloc(dev
, struct_size(mdev
, channels
, nr_chans
),
327 mdev
->reg_base
= devm_platform_ioremap_resource(pdev
, 0);
328 if (IS_ERR(mdev
->reg_base
))
329 return PTR_ERR(mdev
->reg_base
);
333 dma_cap_set(DMA_MEMCPY
, ddev
->cap_mask
);
334 ddev
->src_addr_widths
= MLB_XDMAC_BUSWIDTHS
;
335 ddev
->dst_addr_widths
= MLB_XDMAC_BUSWIDTHS
;
336 ddev
->device_free_chan_resources
= milbeaut_xdmac_free_chan_resources
;
337 ddev
->device_prep_dma_memcpy
= milbeaut_xdmac_prep_memcpy
;
338 ddev
->device_terminate_all
= milbeaut_xdmac_terminate_all
;
339 ddev
->device_synchronize
= milbeaut_xdmac_synchronize
;
340 ddev
->device_tx_status
= dma_cookie_status
;
341 ddev
->device_issue_pending
= milbeaut_xdmac_issue_pending
;
342 INIT_LIST_HEAD(&ddev
->channels
);
344 for (i
= 0; i
< nr_chans
; i
++) {
345 ret
= milbeaut_xdmac_chan_init(pdev
, mdev
, i
);
352 ret
= dma_async_device_register(ddev
);
356 ret
= of_dma_controller_register(dev
->of_node
,
357 of_dma_simple_xlate
, mdev
);
359 goto unregister_dmac
;
361 platform_set_drvdata(pdev
, mdev
);
366 dma_async_device_unregister(ddev
);
370 static int milbeaut_xdmac_remove(struct platform_device
*pdev
)
372 struct milbeaut_xdmac_device
*mdev
= platform_get_drvdata(pdev
);
373 struct dma_chan
*chan
;
377 * Before reaching here, almost all descriptors have been freed by the
378 * ->device_free_chan_resources() hook. However, each channel might
379 * be still holding one descriptor that was on-flight at that moment.
380 * Terminate it to make sure this hardware is no longer running. Then,
381 * free the channel resources once again to avoid memory leak.
383 list_for_each_entry(chan
, &mdev
->ddev
.channels
, device_node
) {
384 ret
= dmaengine_terminate_sync(chan
);
387 milbeaut_xdmac_free_chan_resources(chan
);
390 of_dma_controller_free(pdev
->dev
.of_node
);
391 dma_async_device_unregister(&mdev
->ddev
);
398 static const struct of_device_id milbeaut_xdmac_match
[] = {
399 { .compatible
= "socionext,milbeaut-m10v-xdmac" },
402 MODULE_DEVICE_TABLE(of
, milbeaut_xdmac_match
);
404 static struct platform_driver milbeaut_xdmac_driver
= {
405 .probe
= milbeaut_xdmac_probe
,
406 .remove
= milbeaut_xdmac_remove
,
408 .name
= "milbeaut-m10v-xdmac",
409 .of_match_table
= milbeaut_xdmac_match
,
412 module_platform_driver(milbeaut_xdmac_driver
);
414 MODULE_DESCRIPTION("Milbeaut XDMAC DmaEngine driver");
415 MODULE_LICENSE("GPL v2");