1 // SPDX-License-Identifier: GPL-2.0
3 // Copyright (C) 2019 Linaro Ltd.
4 // Copyright (C) 2019 Socionext Inc.
6 #include <linux/bits.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/dmaengine.h>
9 #include <linux/interrupt.h>
10 #include <linux/iopoll.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/of_dma.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/bitfield.h>
22 #define M10V_XDACS 0x00
24 /* channel local register */
25 #define M10V_XDTBC 0x10
26 #define M10V_XDSSA 0x14
27 #define M10V_XDDSA 0x18
28 #define M10V_XDSAC 0x1C
29 #define M10V_XDDAC 0x20
30 #define M10V_XDDCC 0x24
31 #define M10V_XDDES 0x28
32 #define M10V_XDDPC 0x2C
33 #define M10V_XDDSD 0x30
35 #define M10V_XDACS_XE BIT(28)
37 #define M10V_DEFBS 0x3
38 #define M10V_DEFBL 0xf
40 #define M10V_XDSAC_SBS GENMASK(17, 16)
41 #define M10V_XDSAC_SBL GENMASK(11, 8)
43 #define M10V_XDDAC_DBS GENMASK(17, 16)
44 #define M10V_XDDAC_DBL GENMASK(11, 8)
46 #define M10V_XDDES_CE BIT(28)
47 #define M10V_XDDES_SE BIT(24)
48 #define M10V_XDDES_SA BIT(15)
49 #define M10V_XDDES_TF GENMASK(23, 20)
50 #define M10V_XDDES_EI BIT(1)
51 #define M10V_XDDES_TI BIT(0)
53 #define M10V_XDDSD_IS_MASK GENMASK(3, 0)
54 #define M10V_XDDSD_IS_NORMAL 0x8
56 #define MLB_XDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
57 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
58 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
59 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
61 struct milbeaut_xdmac_desc
{
62 struct virt_dma_desc vd
;
68 struct milbeaut_xdmac_chan
{
69 struct virt_dma_chan vc
;
70 struct milbeaut_xdmac_desc
*md
;
71 void __iomem
*reg_ch_base
;
74 struct milbeaut_xdmac_device
{
75 struct dma_device ddev
;
76 void __iomem
*reg_base
;
77 struct milbeaut_xdmac_chan channels
[];
80 static struct milbeaut_xdmac_chan
*
81 to_milbeaut_xdmac_chan(struct virt_dma_chan
*vc
)
83 return container_of(vc
, struct milbeaut_xdmac_chan
, vc
);
86 static struct milbeaut_xdmac_desc
*
87 to_milbeaut_xdmac_desc(struct virt_dma_desc
*vd
)
89 return container_of(vd
, struct milbeaut_xdmac_desc
, vd
);
92 /* mc->vc.lock must be held by caller */
93 static struct milbeaut_xdmac_desc
*
94 milbeaut_xdmac_next_desc(struct milbeaut_xdmac_chan
*mc
)
96 struct virt_dma_desc
*vd
;
98 vd
= vchan_next_desc(&mc
->vc
);
106 mc
->md
= to_milbeaut_xdmac_desc(vd
);
111 /* mc->vc.lock must be held by caller */
112 static void milbeaut_chan_start(struct milbeaut_xdmac_chan
*mc
,
113 struct milbeaut_xdmac_desc
*md
)
117 /* Setup the channel */
119 writel_relaxed(val
, mc
->reg_ch_base
+ M10V_XDTBC
);
122 writel_relaxed(val
, mc
->reg_ch_base
+ M10V_XDSSA
);
125 writel_relaxed(val
, mc
->reg_ch_base
+ M10V_XDDSA
);
127 val
= readl_relaxed(mc
->reg_ch_base
+ M10V_XDSAC
);
128 val
&= ~(M10V_XDSAC_SBS
| M10V_XDSAC_SBL
);
129 val
|= FIELD_PREP(M10V_XDSAC_SBS
, M10V_DEFBS
) |
130 FIELD_PREP(M10V_XDSAC_SBL
, M10V_DEFBL
);
131 writel_relaxed(val
, mc
->reg_ch_base
+ M10V_XDSAC
);
133 val
= readl_relaxed(mc
->reg_ch_base
+ M10V_XDDAC
);
134 val
&= ~(M10V_XDDAC_DBS
| M10V_XDDAC_DBL
);
135 val
|= FIELD_PREP(M10V_XDDAC_DBS
, M10V_DEFBS
) |
136 FIELD_PREP(M10V_XDDAC_DBL
, M10V_DEFBL
);
137 writel_relaxed(val
, mc
->reg_ch_base
+ M10V_XDDAC
);
139 /* Start the channel */
140 val
= readl_relaxed(mc
->reg_ch_base
+ M10V_XDDES
);
141 val
&= ~(M10V_XDDES_CE
| M10V_XDDES_SE
| M10V_XDDES_TF
|
142 M10V_XDDES_EI
| M10V_XDDES_TI
);
143 val
|= FIELD_PREP(M10V_XDDES_CE
, 1) | FIELD_PREP(M10V_XDDES_SE
, 1) |
144 FIELD_PREP(M10V_XDDES_TF
, 1) | FIELD_PREP(M10V_XDDES_EI
, 1) |
145 FIELD_PREP(M10V_XDDES_TI
, 1);
146 writel_relaxed(val
, mc
->reg_ch_base
+ M10V_XDDES
);
149 /* mc->vc.lock must be held by caller */
150 static void milbeaut_xdmac_start(struct milbeaut_xdmac_chan
*mc
)
152 struct milbeaut_xdmac_desc
*md
;
154 md
= milbeaut_xdmac_next_desc(mc
);
156 milbeaut_chan_start(mc
, md
);
159 static irqreturn_t
milbeaut_xdmac_interrupt(int irq
, void *dev_id
)
161 struct milbeaut_xdmac_chan
*mc
= dev_id
;
162 struct milbeaut_xdmac_desc
*md
;
165 spin_lock(&mc
->vc
.lock
);
168 val
= FIELD_PREP(M10V_XDDSD_IS_MASK
, 0x0);
169 writel_relaxed(val
, mc
->reg_ch_base
+ M10V_XDDSD
);
175 vchan_cookie_complete(&md
->vd
);
177 milbeaut_xdmac_start(mc
);
179 spin_unlock(&mc
->vc
.lock
);
183 static void milbeaut_xdmac_free_chan_resources(struct dma_chan
*chan
)
185 vchan_free_chan_resources(to_virt_chan(chan
));
188 static struct dma_async_tx_descriptor
*
189 milbeaut_xdmac_prep_memcpy(struct dma_chan
*chan
, dma_addr_t dst
,
190 dma_addr_t src
, size_t len
, unsigned long flags
)
192 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
193 struct milbeaut_xdmac_desc
*md
;
195 md
= kzalloc(sizeof(*md
), GFP_NOWAIT
);
203 return vchan_tx_prep(vc
, &md
->vd
, flags
);
206 static int milbeaut_xdmac_terminate_all(struct dma_chan
*chan
)
208 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
209 struct milbeaut_xdmac_chan
*mc
= to_milbeaut_xdmac_chan(vc
);
215 spin_lock_irqsave(&vc
->lock
, flags
);
217 /* Halt the channel */
218 val
= readl(mc
->reg_ch_base
+ M10V_XDDES
);
219 val
&= ~M10V_XDDES_CE
;
220 val
|= FIELD_PREP(M10V_XDDES_CE
, 0);
221 writel(val
, mc
->reg_ch_base
+ M10V_XDDES
);
224 vchan_terminate_vdesc(&mc
->md
->vd
);
228 vchan_get_all_descriptors(vc
, &head
);
230 spin_unlock_irqrestore(&vc
->lock
, flags
);
232 vchan_dma_desc_free_list(vc
, &head
);
237 static void milbeaut_xdmac_synchronize(struct dma_chan
*chan
)
239 vchan_synchronize(to_virt_chan(chan
));
242 static void milbeaut_xdmac_issue_pending(struct dma_chan
*chan
)
244 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
245 struct milbeaut_xdmac_chan
*mc
= to_milbeaut_xdmac_chan(vc
);
248 spin_lock_irqsave(&vc
->lock
, flags
);
250 if (vchan_issue_pending(vc
) && !mc
->md
)
251 milbeaut_xdmac_start(mc
);
253 spin_unlock_irqrestore(&vc
->lock
, flags
);
256 static void milbeaut_xdmac_desc_free(struct virt_dma_desc
*vd
)
258 kfree(to_milbeaut_xdmac_desc(vd
));
261 static int milbeaut_xdmac_chan_init(struct platform_device
*pdev
,
262 struct milbeaut_xdmac_device
*mdev
,
265 struct device
*dev
= &pdev
->dev
;
266 struct milbeaut_xdmac_chan
*mc
= &mdev
->channels
[chan_id
];
270 irq
= platform_get_irq(pdev
, chan_id
);
274 irq_name
= devm_kasprintf(dev
, GFP_KERNEL
, "milbeaut-xdmac-%d",
279 ret
= devm_request_irq(dev
, irq
, milbeaut_xdmac_interrupt
,
280 IRQF_SHARED
, irq_name
, mc
);
284 mc
->reg_ch_base
= mdev
->reg_base
+ chan_id
* 0x30;
286 mc
->vc
.desc_free
= milbeaut_xdmac_desc_free
;
287 vchan_init(&mc
->vc
, &mdev
->ddev
);
292 static void enable_xdmac(struct milbeaut_xdmac_device
*mdev
)
296 val
= readl(mdev
->reg_base
+ M10V_XDACS
);
297 val
|= M10V_XDACS_XE
;
298 writel(val
, mdev
->reg_base
+ M10V_XDACS
);
301 static void disable_xdmac(struct milbeaut_xdmac_device
*mdev
)
305 val
= readl(mdev
->reg_base
+ M10V_XDACS
);
306 val
&= ~M10V_XDACS_XE
;
307 writel(val
, mdev
->reg_base
+ M10V_XDACS
);
310 static int milbeaut_xdmac_probe(struct platform_device
*pdev
)
312 struct device
*dev
= &pdev
->dev
;
313 struct milbeaut_xdmac_device
*mdev
;
314 struct dma_device
*ddev
;
315 int nr_chans
, ret
, i
;
317 nr_chans
= platform_irq_count(pdev
);
321 mdev
= devm_kzalloc(dev
, struct_size(mdev
, channels
, nr_chans
),
326 mdev
->reg_base
= devm_platform_ioremap_resource(pdev
, 0);
327 if (IS_ERR(mdev
->reg_base
))
328 return PTR_ERR(mdev
->reg_base
);
332 dma_cap_set(DMA_MEMCPY
, ddev
->cap_mask
);
333 ddev
->src_addr_widths
= MLB_XDMAC_BUSWIDTHS
;
334 ddev
->dst_addr_widths
= MLB_XDMAC_BUSWIDTHS
;
335 ddev
->device_free_chan_resources
= milbeaut_xdmac_free_chan_resources
;
336 ddev
->device_prep_dma_memcpy
= milbeaut_xdmac_prep_memcpy
;
337 ddev
->device_terminate_all
= milbeaut_xdmac_terminate_all
;
338 ddev
->device_synchronize
= milbeaut_xdmac_synchronize
;
339 ddev
->device_tx_status
= dma_cookie_status
;
340 ddev
->device_issue_pending
= milbeaut_xdmac_issue_pending
;
341 INIT_LIST_HEAD(&ddev
->channels
);
343 for (i
= 0; i
< nr_chans
; i
++) {
344 ret
= milbeaut_xdmac_chan_init(pdev
, mdev
, i
);
351 ret
= dma_async_device_register(ddev
);
355 ret
= of_dma_controller_register(dev
->of_node
,
356 of_dma_simple_xlate
, mdev
);
358 goto unregister_dmac
;
360 platform_set_drvdata(pdev
, mdev
);
365 dma_async_device_unregister(ddev
);
371 static int milbeaut_xdmac_remove(struct platform_device
*pdev
)
373 struct milbeaut_xdmac_device
*mdev
= platform_get_drvdata(pdev
);
374 struct dma_chan
*chan
;
378 * Before reaching here, almost all descriptors have been freed by the
379 * ->device_free_chan_resources() hook. However, each channel might
380 * be still holding one descriptor that was on-flight at that moment.
381 * Terminate it to make sure this hardware is no longer running. Then,
382 * free the channel resources once again to avoid memory leak.
384 list_for_each_entry(chan
, &mdev
->ddev
.channels
, device_node
) {
385 ret
= dmaengine_terminate_sync(chan
);
388 milbeaut_xdmac_free_chan_resources(chan
);
391 of_dma_controller_free(pdev
->dev
.of_node
);
392 dma_async_device_unregister(&mdev
->ddev
);
399 static const struct of_device_id milbeaut_xdmac_match
[] = {
400 { .compatible
= "socionext,milbeaut-m10v-xdmac" },
403 MODULE_DEVICE_TABLE(of
, milbeaut_xdmac_match
);
405 static struct platform_driver milbeaut_xdmac_driver
= {
406 .probe
= milbeaut_xdmac_probe
,
407 .remove
= milbeaut_xdmac_remove
,
409 .name
= "milbeaut-m10v-xdmac",
410 .of_match_table
= milbeaut_xdmac_match
,
413 module_platform_driver(milbeaut_xdmac_driver
);
415 MODULE_DESCRIPTION("Milbeaut XDMAC DmaEngine driver");
416 MODULE_LICENSE("GPL v2");