1 // SPDX-License-Identifier: GPL-2.0
3 // Copyright (C) 2019 Linaro Ltd.
4 // Copyright (C) 2019 Socionext Inc.
6 #include <linux/bits.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
10 #include <linux/iopoll.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/of_dma.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/bitfield.h>
21 #define MLB_HDMAC_DMACR 0x0 /* global */
22 #define MLB_HDMAC_DE BIT(31)
23 #define MLB_HDMAC_DS BIT(30)
24 #define MLB_HDMAC_PR BIT(28)
25 #define MLB_HDMAC_DH GENMASK(27, 24)
27 #define MLB_HDMAC_CH_STRIDE 0x10
29 #define MLB_HDMAC_DMACA 0x0 /* channel */
30 #define MLB_HDMAC_EB BIT(31)
31 #define MLB_HDMAC_PB BIT(30)
32 #define MLB_HDMAC_ST BIT(29)
33 #define MLB_HDMAC_IS GENMASK(28, 24)
34 #define MLB_HDMAC_BT GENMASK(23, 20)
35 #define MLB_HDMAC_BC GENMASK(19, 16)
36 #define MLB_HDMAC_TC GENMASK(15, 0)
37 #define MLB_HDMAC_DMACB 0x4
38 #define MLB_HDMAC_TT GENMASK(31, 30)
39 #define MLB_HDMAC_MS GENMASK(29, 28)
40 #define MLB_HDMAC_TW GENMASK(27, 26)
41 #define MLB_HDMAC_FS BIT(25)
42 #define MLB_HDMAC_FD BIT(24)
43 #define MLB_HDMAC_RC BIT(23)
44 #define MLB_HDMAC_RS BIT(22)
45 #define MLB_HDMAC_RD BIT(21)
46 #define MLB_HDMAC_EI BIT(20)
47 #define MLB_HDMAC_CI BIT(19)
48 #define HDMAC_PAUSE 0x7
49 #define MLB_HDMAC_SS GENMASK(18, 16)
50 #define MLB_HDMAC_SP GENMASK(15, 12)
51 #define MLB_HDMAC_DP GENMASK(11, 8)
52 #define MLB_HDMAC_DMACSA 0x8
53 #define MLB_HDMAC_DMACDA 0xc
55 #define MLB_HDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
56 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
57 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
59 struct milbeaut_hdmac_desc
{
60 struct virt_dma_desc vd
;
61 struct scatterlist
*sgl
;
64 enum dma_transfer_direction dir
;
67 struct milbeaut_hdmac_chan
{
68 struct virt_dma_chan vc
;
69 struct milbeaut_hdmac_device
*mdev
;
70 struct milbeaut_hdmac_desc
*md
;
71 void __iomem
*reg_ch_base
;
72 unsigned int slave_id
;
73 struct dma_slave_config cfg
;
76 struct milbeaut_hdmac_device
{
77 struct dma_device ddev
;
79 void __iomem
*reg_base
;
80 struct milbeaut_hdmac_chan channels
[];
83 static struct milbeaut_hdmac_chan
*
84 to_milbeaut_hdmac_chan(struct virt_dma_chan
*vc
)
86 return container_of(vc
, struct milbeaut_hdmac_chan
, vc
);
89 static struct milbeaut_hdmac_desc
*
90 to_milbeaut_hdmac_desc(struct virt_dma_desc
*vd
)
92 return container_of(vd
, struct milbeaut_hdmac_desc
, vd
);
95 /* mc->vc.lock must be held by caller */
96 static struct milbeaut_hdmac_desc
*
97 milbeaut_hdmac_next_desc(struct milbeaut_hdmac_chan
*mc
)
99 struct virt_dma_desc
*vd
;
101 vd
= vchan_next_desc(&mc
->vc
);
109 mc
->md
= to_milbeaut_hdmac_desc(vd
);
114 /* mc->vc.lock must be held by caller */
115 static void milbeaut_chan_start(struct milbeaut_hdmac_chan
*mc
,
116 struct milbeaut_hdmac_desc
*md
)
118 struct scatterlist
*sg
;
119 u32 cb
, ca
, src_addr
, dest_addr
, len
;
122 sg
= &md
->sgl
[md
->sg_cur
];
123 len
= sg_dma_len(sg
);
125 cb
= MLB_HDMAC_CI
| MLB_HDMAC_EI
;
126 if (md
->dir
== DMA_MEM_TO_DEV
) {
128 width
= mc
->cfg
.dst_addr_width
;
129 burst
= mc
->cfg
.dst_maxburst
;
130 src_addr
= sg_dma_address(sg
);
131 dest_addr
= mc
->cfg
.dst_addr
;
134 width
= mc
->cfg
.src_addr_width
;
135 burst
= mc
->cfg
.src_maxburst
;
136 src_addr
= mc
->cfg
.src_addr
;
137 dest_addr
= sg_dma_address(sg
);
139 cb
|= FIELD_PREP(MLB_HDMAC_TW
, (width
>> 1));
140 cb
|= FIELD_PREP(MLB_HDMAC_MS
, 2);
142 writel_relaxed(MLB_HDMAC_DE
, mc
->mdev
->reg_base
+ MLB_HDMAC_DMACR
);
143 writel_relaxed(src_addr
, mc
->reg_ch_base
+ MLB_HDMAC_DMACSA
);
144 writel_relaxed(dest_addr
, mc
->reg_ch_base
+ MLB_HDMAC_DMACDA
);
145 writel_relaxed(cb
, mc
->reg_ch_base
+ MLB_HDMAC_DMACB
);
147 ca
= FIELD_PREP(MLB_HDMAC_IS
, mc
->slave_id
);
149 ca
|= FIELD_PREP(MLB_HDMAC_BT
, 0xf);
151 ca
|= FIELD_PREP(MLB_HDMAC_BT
, 0xd);
153 ca
|= FIELD_PREP(MLB_HDMAC_BT
, 0xb);
155 ca
|= FIELD_PREP(MLB_HDMAC_TC
, (len
/ burst
- 1));
156 writel_relaxed(ca
, mc
->reg_ch_base
+ MLB_HDMAC_DMACA
);
158 writel_relaxed(ca
, mc
->reg_ch_base
+ MLB_HDMAC_DMACA
);
161 /* mc->vc.lock must be held by caller */
162 static void milbeaut_hdmac_start(struct milbeaut_hdmac_chan
*mc
)
164 struct milbeaut_hdmac_desc
*md
;
166 md
= milbeaut_hdmac_next_desc(mc
);
168 milbeaut_chan_start(mc
, md
);
171 static irqreturn_t
milbeaut_hdmac_interrupt(int irq
, void *dev_id
)
173 struct milbeaut_hdmac_chan
*mc
= dev_id
;
174 struct milbeaut_hdmac_desc
*md
;
177 spin_lock(&mc
->vc
.lock
);
179 /* Ack and Disable irqs */
180 val
= readl_relaxed(mc
->reg_ch_base
+ MLB_HDMAC_DMACB
);
181 val
&= ~(FIELD_PREP(MLB_HDMAC_SS
, HDMAC_PAUSE
));
182 writel_relaxed(val
, mc
->reg_ch_base
+ MLB_HDMAC_DMACB
);
183 val
&= ~MLB_HDMAC_EI
;
184 val
&= ~MLB_HDMAC_CI
;
185 writel_relaxed(val
, mc
->reg_ch_base
+ MLB_HDMAC_DMACB
);
193 if (md
->sg_cur
>= md
->sg_len
) {
194 vchan_cookie_complete(&md
->vd
);
195 md
= milbeaut_hdmac_next_desc(mc
);
200 milbeaut_chan_start(mc
, md
);
203 spin_unlock(&mc
->vc
.lock
);
207 static void milbeaut_hdmac_free_chan_resources(struct dma_chan
*chan
)
209 vchan_free_chan_resources(to_virt_chan(chan
));
213 milbeaut_hdmac_chan_config(struct dma_chan
*chan
, struct dma_slave_config
*cfg
)
215 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
216 struct milbeaut_hdmac_chan
*mc
= to_milbeaut_hdmac_chan(vc
);
218 spin_lock(&mc
->vc
.lock
);
220 spin_unlock(&mc
->vc
.lock
);
225 static int milbeaut_hdmac_chan_pause(struct dma_chan
*chan
)
227 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
228 struct milbeaut_hdmac_chan
*mc
= to_milbeaut_hdmac_chan(vc
);
231 spin_lock(&mc
->vc
.lock
);
232 val
= readl_relaxed(mc
->reg_ch_base
+ MLB_HDMAC_DMACA
);
234 writel_relaxed(val
, mc
->reg_ch_base
+ MLB_HDMAC_DMACA
);
235 spin_unlock(&mc
->vc
.lock
);
240 static int milbeaut_hdmac_chan_resume(struct dma_chan
*chan
)
242 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
243 struct milbeaut_hdmac_chan
*mc
= to_milbeaut_hdmac_chan(vc
);
246 spin_lock(&mc
->vc
.lock
);
247 val
= readl_relaxed(mc
->reg_ch_base
+ MLB_HDMAC_DMACA
);
248 val
&= ~MLB_HDMAC_PB
;
249 writel_relaxed(val
, mc
->reg_ch_base
+ MLB_HDMAC_DMACA
);
250 spin_unlock(&mc
->vc
.lock
);
255 static struct dma_async_tx_descriptor
*
256 milbeaut_hdmac_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
258 enum dma_transfer_direction direction
,
259 unsigned long flags
, void *context
)
261 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
262 struct milbeaut_hdmac_desc
*md
;
265 if (!is_slave_direction(direction
))
268 md
= kzalloc(sizeof(*md
), GFP_NOWAIT
);
272 md
->sgl
= kzalloc(sizeof(*sgl
) * sg_len
, GFP_NOWAIT
);
278 for (i
= 0; i
< sg_len
; i
++)
284 return vchan_tx_prep(vc
, &md
->vd
, flags
);
287 static int milbeaut_hdmac_terminate_all(struct dma_chan
*chan
)
289 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
290 struct milbeaut_hdmac_chan
*mc
= to_milbeaut_hdmac_chan(vc
);
296 spin_lock_irqsave(&vc
->lock
, flags
);
298 val
= readl_relaxed(mc
->reg_ch_base
+ MLB_HDMAC_DMACA
);
299 val
&= ~MLB_HDMAC_EB
; /* disable the channel */
300 writel_relaxed(val
, mc
->reg_ch_base
+ MLB_HDMAC_DMACA
);
303 vchan_terminate_vdesc(&mc
->md
->vd
);
307 vchan_get_all_descriptors(vc
, &head
);
309 spin_unlock_irqrestore(&vc
->lock
, flags
);
311 vchan_dma_desc_free_list(vc
, &head
);
316 static void milbeaut_hdmac_synchronize(struct dma_chan
*chan
)
318 vchan_synchronize(to_virt_chan(chan
));
321 static enum dma_status
milbeaut_hdmac_tx_status(struct dma_chan
*chan
,
323 struct dma_tx_state
*txstate
)
325 struct virt_dma_chan
*vc
;
326 struct virt_dma_desc
*vd
;
327 struct milbeaut_hdmac_chan
*mc
;
328 struct milbeaut_hdmac_desc
*md
= NULL
;
329 enum dma_status stat
;
333 stat
= dma_cookie_status(chan
, cookie
, txstate
);
334 /* Return immediately if we do not need to compute the residue. */
335 if (stat
== DMA_COMPLETE
|| !txstate
)
338 vc
= to_virt_chan(chan
);
340 spin_lock_irqsave(&vc
->lock
, flags
);
342 mc
= to_milbeaut_hdmac_chan(vc
);
344 /* residue from the on-flight chunk */
345 if (mc
->md
&& mc
->md
->vd
.tx
.cookie
== cookie
) {
346 struct scatterlist
*sg
;
350 sg
= &md
->sgl
[md
->sg_cur
];
352 if (md
->dir
== DMA_DEV_TO_MEM
)
353 done
= readl_relaxed(mc
->reg_ch_base
356 done
= readl_relaxed(mc
->reg_ch_base
358 done
-= sg_dma_address(sg
);
360 txstate
->residue
= -done
;
364 vd
= vchan_find_desc(vc
, cookie
);
366 md
= to_milbeaut_hdmac_desc(vd
);
370 /* residue from the queued chunks */
371 for (i
= md
->sg_cur
; i
< md
->sg_len
; i
++)
372 txstate
->residue
+= sg_dma_len(&md
->sgl
[i
]);
375 spin_unlock_irqrestore(&vc
->lock
, flags
);
380 static void milbeaut_hdmac_issue_pending(struct dma_chan
*chan
)
382 struct virt_dma_chan
*vc
= to_virt_chan(chan
);
383 struct milbeaut_hdmac_chan
*mc
= to_milbeaut_hdmac_chan(vc
);
386 spin_lock_irqsave(&vc
->lock
, flags
);
388 if (vchan_issue_pending(vc
) && !mc
->md
)
389 milbeaut_hdmac_start(mc
);
391 spin_unlock_irqrestore(&vc
->lock
, flags
);
394 static void milbeaut_hdmac_desc_free(struct virt_dma_desc
*vd
)
396 struct milbeaut_hdmac_desc
*md
= to_milbeaut_hdmac_desc(vd
);
402 static struct dma_chan
*
403 milbeaut_hdmac_xlate(struct of_phandle_args
*dma_spec
, struct of_dma
*of_dma
)
405 struct milbeaut_hdmac_device
*mdev
= of_dma
->of_dma_data
;
406 struct milbeaut_hdmac_chan
*mc
;
407 struct virt_dma_chan
*vc
;
408 struct dma_chan
*chan
;
410 if (dma_spec
->args_count
!= 1)
413 chan
= dma_get_any_slave_channel(&mdev
->ddev
);
417 vc
= to_virt_chan(chan
);
418 mc
= to_milbeaut_hdmac_chan(vc
);
419 mc
->slave_id
= dma_spec
->args
[0];
424 static int milbeaut_hdmac_chan_init(struct platform_device
*pdev
,
425 struct milbeaut_hdmac_device
*mdev
,
428 struct device
*dev
= &pdev
->dev
;
429 struct milbeaut_hdmac_chan
*mc
= &mdev
->channels
[chan_id
];
433 irq
= platform_get_irq(pdev
, chan_id
);
437 irq_name
= devm_kasprintf(dev
, GFP_KERNEL
, "milbeaut-hdmac-%d",
442 ret
= devm_request_irq(dev
, irq
, milbeaut_hdmac_interrupt
,
443 IRQF_SHARED
, irq_name
, mc
);
448 mc
->reg_ch_base
= mdev
->reg_base
+ MLB_HDMAC_CH_STRIDE
* (chan_id
+ 1);
449 mc
->vc
.desc_free
= milbeaut_hdmac_desc_free
;
450 vchan_init(&mc
->vc
, &mdev
->ddev
);
455 static int milbeaut_hdmac_probe(struct platform_device
*pdev
)
457 struct device
*dev
= &pdev
->dev
;
458 struct milbeaut_hdmac_device
*mdev
;
459 struct dma_device
*ddev
;
460 int nr_chans
, ret
, i
;
462 nr_chans
= platform_irq_count(pdev
);
466 ret
= dma_set_mask(dev
, DMA_BIT_MASK(32));
470 mdev
= devm_kzalloc(dev
, struct_size(mdev
, channels
, nr_chans
),
475 mdev
->reg_base
= devm_platform_ioremap_resource(pdev
, 0);
476 if (IS_ERR(mdev
->reg_base
))
477 return PTR_ERR(mdev
->reg_base
);
479 mdev
->clk
= devm_clk_get(dev
, NULL
);
480 if (IS_ERR(mdev
->clk
)) {
481 dev_err(dev
, "failed to get clock\n");
482 return PTR_ERR(mdev
->clk
);
485 ret
= clk_prepare_enable(mdev
->clk
);
491 dma_cap_set(DMA_SLAVE
, ddev
->cap_mask
);
492 dma_cap_set(DMA_PRIVATE
, ddev
->cap_mask
);
493 ddev
->src_addr_widths
= MLB_HDMAC_BUSWIDTHS
;
494 ddev
->dst_addr_widths
= MLB_HDMAC_BUSWIDTHS
;
495 ddev
->directions
= BIT(DMA_MEM_TO_DEV
) | BIT(DMA_DEV_TO_MEM
);
496 ddev
->device_free_chan_resources
= milbeaut_hdmac_free_chan_resources
;
497 ddev
->device_config
= milbeaut_hdmac_chan_config
;
498 ddev
->device_pause
= milbeaut_hdmac_chan_pause
;
499 ddev
->device_resume
= milbeaut_hdmac_chan_resume
;
500 ddev
->device_prep_slave_sg
= milbeaut_hdmac_prep_slave_sg
;
501 ddev
->device_terminate_all
= milbeaut_hdmac_terminate_all
;
502 ddev
->device_synchronize
= milbeaut_hdmac_synchronize
;
503 ddev
->device_tx_status
= milbeaut_hdmac_tx_status
;
504 ddev
->device_issue_pending
= milbeaut_hdmac_issue_pending
;
505 INIT_LIST_HEAD(&ddev
->channels
);
507 for (i
= 0; i
< nr_chans
; i
++) {
508 ret
= milbeaut_hdmac_chan_init(pdev
, mdev
, i
);
513 ret
= dma_async_device_register(ddev
);
517 ret
= of_dma_controller_register(dev
->of_node
,
518 milbeaut_hdmac_xlate
, mdev
);
520 goto unregister_dmac
;
522 platform_set_drvdata(pdev
, mdev
);
527 dma_async_device_unregister(ddev
);
529 clk_disable_unprepare(mdev
->clk
);
534 static int milbeaut_hdmac_remove(struct platform_device
*pdev
)
536 struct milbeaut_hdmac_device
*mdev
= platform_get_drvdata(pdev
);
537 struct dma_chan
*chan
;
541 * Before reaching here, almost all descriptors have been freed by the
542 * ->device_free_chan_resources() hook. However, each channel might
543 * be still holding one descriptor that was on-flight at that moment.
544 * Terminate it to make sure this hardware is no longer running. Then,
545 * free the channel resources once again to avoid memory leak.
547 list_for_each_entry(chan
, &mdev
->ddev
.channels
, device_node
) {
548 ret
= dmaengine_terminate_sync(chan
);
551 milbeaut_hdmac_free_chan_resources(chan
);
554 of_dma_controller_free(pdev
->dev
.of_node
);
555 dma_async_device_unregister(&mdev
->ddev
);
556 clk_disable_unprepare(mdev
->clk
);
561 static const struct of_device_id milbeaut_hdmac_match
[] = {
562 { .compatible
= "socionext,milbeaut-m10v-hdmac" },
565 MODULE_DEVICE_TABLE(of
, milbeaut_hdmac_match
);
567 static struct platform_driver milbeaut_hdmac_driver
= {
568 .probe
= milbeaut_hdmac_probe
,
569 .remove
= milbeaut_hdmac_remove
,
571 .name
= "milbeaut-m10v-hdmac",
572 .of_match_table
= milbeaut_hdmac_match
,
575 module_platform_driver(milbeaut_hdmac_driver
);
577 MODULE_DESCRIPTION("Milbeaut HDMAC DmaEngine driver");
578 MODULE_LICENSE("GPL v2");