1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Ingenic JZ4780 DMA controller
5 * Copyright (c) 2015 Imagination Technologies
6 * Author: Alex Smith <alex@alex-smith.me.uk>
10 #include <linux/dmapool.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
16 #include <linux/of_dma.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
20 #include "dmaengine.h"
23 /* Global registers. */
24 #define JZ_DMA_REG_DMAC 0x00
25 #define JZ_DMA_REG_DIRQP 0x04
26 #define JZ_DMA_REG_DDR 0x08
27 #define JZ_DMA_REG_DDRS 0x0c
28 #define JZ_DMA_REG_DCKE 0x10
29 #define JZ_DMA_REG_DCKES 0x14
30 #define JZ_DMA_REG_DCKEC 0x18
31 #define JZ_DMA_REG_DMACP 0x1c
32 #define JZ_DMA_REG_DSIRQP 0x20
33 #define JZ_DMA_REG_DSIRQM 0x24
34 #define JZ_DMA_REG_DCIRQP 0x28
35 #define JZ_DMA_REG_DCIRQM 0x2c
37 /* Per-channel registers. */
38 #define JZ_DMA_REG_CHAN(n) (n * 0x20)
39 #define JZ_DMA_REG_DSA 0x00
40 #define JZ_DMA_REG_DTA 0x04
41 #define JZ_DMA_REG_DTC 0x08
42 #define JZ_DMA_REG_DRT 0x0c
43 #define JZ_DMA_REG_DCS 0x10
44 #define JZ_DMA_REG_DCM 0x14
45 #define JZ_DMA_REG_DDA 0x18
46 #define JZ_DMA_REG_DSD 0x1c
48 #define JZ_DMA_DMAC_DMAE BIT(0)
49 #define JZ_DMA_DMAC_AR BIT(2)
50 #define JZ_DMA_DMAC_HLT BIT(3)
51 #define JZ_DMA_DMAC_FAIC BIT(27)
52 #define JZ_DMA_DMAC_FMSC BIT(31)
54 #define JZ_DMA_DRT_AUTO 0x8
56 #define JZ_DMA_DCS_CTE BIT(0)
57 #define JZ_DMA_DCS_HLT BIT(2)
58 #define JZ_DMA_DCS_TT BIT(3)
59 #define JZ_DMA_DCS_AR BIT(4)
60 #define JZ_DMA_DCS_DES8 BIT(30)
62 #define JZ_DMA_DCM_LINK BIT(0)
63 #define JZ_DMA_DCM_TIE BIT(1)
64 #define JZ_DMA_DCM_STDE BIT(2)
65 #define JZ_DMA_DCM_TSZ_SHIFT 8
66 #define JZ_DMA_DCM_TSZ_MASK (0x7 << JZ_DMA_DCM_TSZ_SHIFT)
67 #define JZ_DMA_DCM_DP_SHIFT 12
68 #define JZ_DMA_DCM_SP_SHIFT 14
69 #define JZ_DMA_DCM_DAI BIT(22)
70 #define JZ_DMA_DCM_SAI BIT(23)
72 #define JZ_DMA_SIZE_4_BYTE 0x0
73 #define JZ_DMA_SIZE_1_BYTE 0x1
74 #define JZ_DMA_SIZE_2_BYTE 0x2
75 #define JZ_DMA_SIZE_16_BYTE 0x3
76 #define JZ_DMA_SIZE_32_BYTE 0x4
77 #define JZ_DMA_SIZE_64_BYTE 0x5
78 #define JZ_DMA_SIZE_128_BYTE 0x6
80 #define JZ_DMA_WIDTH_32_BIT 0x0
81 #define JZ_DMA_WIDTH_8_BIT 0x1
82 #define JZ_DMA_WIDTH_16_BIT 0x2
84 #define JZ_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
85 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
86 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
88 #define JZ4780_DMA_CTRL_OFFSET 0x1000
90 /* macros for use with jz4780_dma_soc_data.flags */
91 #define JZ_SOC_DATA_ALLOW_LEGACY_DT BIT(0)
92 #define JZ_SOC_DATA_PROGRAMMABLE_DMA BIT(1)
93 #define JZ_SOC_DATA_PER_CHAN_PM BIT(2)
94 #define JZ_SOC_DATA_NO_DCKES_DCKEC BIT(3)
95 #define JZ_SOC_DATA_BREAK_LINKS BIT(4)
98 * struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
99 * @dcm: value for the DCM (channel command) register
100 * @dsa: source address
101 * @dta: target address
102 * @dtc: transfer count (number of blocks of the transfer size specified in DCM
103 * to transfer) in the low 24 bits, offset of the next descriptor from the
104 * descriptor base address in the upper 8 bits.
106 struct jz4780_dma_hwdesc
{
113 /* Size of allocations for hardware descriptor blocks. */
114 #define JZ_DMA_DESC_BLOCK_SIZE PAGE_SIZE
115 #define JZ_DMA_MAX_DESC \
116 (JZ_DMA_DESC_BLOCK_SIZE / sizeof(struct jz4780_dma_hwdesc))
118 struct jz4780_dma_desc
{
119 struct virt_dma_desc vdesc
;
121 struct jz4780_dma_hwdesc
*desc
;
122 dma_addr_t desc_phys
;
124 enum dma_transaction_type type
;
129 struct jz4780_dma_chan
{
130 struct virt_dma_chan vchan
;
132 struct dma_pool
*desc_pool
;
134 u32 transfer_type_tx
, transfer_type_rx
;
136 struct dma_slave_config config
;
138 struct jz4780_dma_desc
*desc
;
139 unsigned int curr_hwdesc
;
142 struct jz4780_dma_soc_data
{
143 unsigned int nb_channels
;
144 unsigned int transfer_ord_max
;
148 struct jz4780_dma_dev
{
149 struct dma_device dma_device
;
150 void __iomem
*chn_base
;
151 void __iomem
*ctrl_base
;
154 const struct jz4780_dma_soc_data
*soc_data
;
157 struct jz4780_dma_chan chan
[];
160 struct jz4780_dma_filter_data
{
161 u32 transfer_type_tx
, transfer_type_rx
;
165 static inline struct jz4780_dma_chan
*to_jz4780_dma_chan(struct dma_chan
*chan
)
167 return container_of(chan
, struct jz4780_dma_chan
, vchan
.chan
);
170 static inline struct jz4780_dma_desc
*to_jz4780_dma_desc(
171 struct virt_dma_desc
*vdesc
)
173 return container_of(vdesc
, struct jz4780_dma_desc
, vdesc
);
176 static inline struct jz4780_dma_dev
*jz4780_dma_chan_parent(
177 struct jz4780_dma_chan
*jzchan
)
179 return container_of(jzchan
->vchan
.chan
.device
, struct jz4780_dma_dev
,
183 static inline u32
jz4780_dma_chn_readl(struct jz4780_dma_dev
*jzdma
,
184 unsigned int chn
, unsigned int reg
)
186 return readl(jzdma
->chn_base
+ reg
+ JZ_DMA_REG_CHAN(chn
));
189 static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev
*jzdma
,
190 unsigned int chn
, unsigned int reg
, u32 val
)
192 writel(val
, jzdma
->chn_base
+ reg
+ JZ_DMA_REG_CHAN(chn
));
195 static inline u32
jz4780_dma_ctrl_readl(struct jz4780_dma_dev
*jzdma
,
198 return readl(jzdma
->ctrl_base
+ reg
);
201 static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev
*jzdma
,
202 unsigned int reg
, u32 val
)
204 writel(val
, jzdma
->ctrl_base
+ reg
);
207 static inline void jz4780_dma_chan_enable(struct jz4780_dma_dev
*jzdma
,
210 if (jzdma
->soc_data
->flags
& JZ_SOC_DATA_PER_CHAN_PM
) {
213 if (jzdma
->soc_data
->flags
& JZ_SOC_DATA_NO_DCKES_DCKEC
)
214 reg
= JZ_DMA_REG_DCKE
;
216 reg
= JZ_DMA_REG_DCKES
;
218 jz4780_dma_ctrl_writel(jzdma
, reg
, BIT(chn
));
222 static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev
*jzdma
,
225 if ((jzdma
->soc_data
->flags
& JZ_SOC_DATA_PER_CHAN_PM
) &&
226 !(jzdma
->soc_data
->flags
& JZ_SOC_DATA_NO_DCKES_DCKEC
))
227 jz4780_dma_ctrl_writel(jzdma
, JZ_DMA_REG_DCKEC
, BIT(chn
));
230 static struct jz4780_dma_desc
*
231 jz4780_dma_desc_alloc(struct jz4780_dma_chan
*jzchan
, unsigned int count
,
232 enum dma_transaction_type type
,
233 enum dma_transfer_direction direction
)
235 struct jz4780_dma_desc
*desc
;
237 if (count
> JZ_DMA_MAX_DESC
)
240 desc
= kzalloc(sizeof(*desc
), GFP_NOWAIT
);
244 desc
->desc
= dma_pool_alloc(jzchan
->desc_pool
, GFP_NOWAIT
,
254 if (direction
== DMA_DEV_TO_MEM
)
255 desc
->transfer_type
= jzchan
->transfer_type_rx
;
257 desc
->transfer_type
= jzchan
->transfer_type_tx
;
262 static void jz4780_dma_desc_free(struct virt_dma_desc
*vdesc
)
264 struct jz4780_dma_desc
*desc
= to_jz4780_dma_desc(vdesc
);
265 struct jz4780_dma_chan
*jzchan
= to_jz4780_dma_chan(vdesc
->tx
.chan
);
267 dma_pool_free(jzchan
->desc_pool
, desc
->desc
, desc
->desc_phys
);
271 static u32
jz4780_dma_transfer_size(struct jz4780_dma_chan
*jzchan
,
272 unsigned long val
, u32
*shift
)
274 struct jz4780_dma_dev
*jzdma
= jz4780_dma_chan_parent(jzchan
);
275 int ord
= ffs(val
) - 1;
278 * 8 byte transfer sizes unsupported so fall back on 4. If it's larger
279 * than the maximum, just limit it. It is perfectly safe to fall back
280 * in this way since we won't exceed the maximum burst size supported
281 * by the device, the only effect is reduced efficiency. This is better
282 * than refusing to perform the request at all.
286 else if (ord
> jzdma
->soc_data
->transfer_ord_max
)
287 ord
= jzdma
->soc_data
->transfer_ord_max
;
293 return JZ_DMA_SIZE_1_BYTE
;
295 return JZ_DMA_SIZE_2_BYTE
;
297 return JZ_DMA_SIZE_4_BYTE
;
299 return JZ_DMA_SIZE_16_BYTE
;
301 return JZ_DMA_SIZE_32_BYTE
;
303 return JZ_DMA_SIZE_64_BYTE
;
305 return JZ_DMA_SIZE_128_BYTE
;
309 static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan
*jzchan
,
310 struct jz4780_dma_hwdesc
*desc
, dma_addr_t addr
, size_t len
,
311 enum dma_transfer_direction direction
)
313 struct dma_slave_config
*config
= &jzchan
->config
;
314 u32 width
, maxburst
, tsz
;
316 if (direction
== DMA_MEM_TO_DEV
) {
317 desc
->dcm
= JZ_DMA_DCM_SAI
;
319 desc
->dta
= config
->dst_addr
;
321 width
= config
->dst_addr_width
;
322 maxburst
= config
->dst_maxburst
;
324 desc
->dcm
= JZ_DMA_DCM_DAI
;
325 desc
->dsa
= config
->src_addr
;
328 width
= config
->src_addr_width
;
329 maxburst
= config
->src_maxburst
;
333 * This calculates the maximum transfer size that can be used with the
334 * given address, length, width and maximum burst size. The address
335 * must be aligned to the transfer size, the total length must be
336 * divisible by the transfer size, and we must not use more than the
337 * maximum burst specified by the user.
339 tsz
= jz4780_dma_transfer_size(jzchan
, addr
| len
| (width
* maxburst
),
340 &jzchan
->transfer_shift
);
343 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
344 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
346 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
347 width
= JZ_DMA_WIDTH_32_BIT
;
353 desc
->dcm
|= tsz
<< JZ_DMA_DCM_TSZ_SHIFT
;
354 desc
->dcm
|= width
<< JZ_DMA_DCM_SP_SHIFT
;
355 desc
->dcm
|= width
<< JZ_DMA_DCM_DP_SHIFT
;
357 desc
->dtc
= len
>> jzchan
->transfer_shift
;
361 static struct dma_async_tx_descriptor
*jz4780_dma_prep_slave_sg(
362 struct dma_chan
*chan
, struct scatterlist
*sgl
, unsigned int sg_len
,
363 enum dma_transfer_direction direction
, unsigned long flags
,
366 struct jz4780_dma_chan
*jzchan
= to_jz4780_dma_chan(chan
);
367 struct jz4780_dma_dev
*jzdma
= jz4780_dma_chan_parent(jzchan
);
368 struct jz4780_dma_desc
*desc
;
372 desc
= jz4780_dma_desc_alloc(jzchan
, sg_len
, DMA_SLAVE
, direction
);
376 for (i
= 0; i
< sg_len
; i
++) {
377 err
= jz4780_dma_setup_hwdesc(jzchan
, &desc
->desc
[i
],
378 sg_dma_address(&sgl
[i
]),
382 jz4780_dma_desc_free(&jzchan
->desc
->vdesc
);
386 desc
->desc
[i
].dcm
|= JZ_DMA_DCM_TIE
;
388 if (i
!= (sg_len
- 1) &&
389 !(jzdma
->soc_data
->flags
& JZ_SOC_DATA_BREAK_LINKS
)) {
390 /* Automatically proceed to the next descriptor. */
391 desc
->desc
[i
].dcm
|= JZ_DMA_DCM_LINK
;
394 * The upper 8 bits of the DTC field in the descriptor
395 * must be set to (offset from descriptor base of next
399 (((i
+ 1) * sizeof(*desc
->desc
)) >> 4) << 24;
403 return vchan_tx_prep(&jzchan
->vchan
, &desc
->vdesc
, flags
);
406 static struct dma_async_tx_descriptor
*jz4780_dma_prep_dma_cyclic(
407 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
408 size_t period_len
, enum dma_transfer_direction direction
,
411 struct jz4780_dma_chan
*jzchan
= to_jz4780_dma_chan(chan
);
412 struct jz4780_dma_desc
*desc
;
413 unsigned int periods
, i
;
416 if (buf_len
% period_len
)
419 periods
= buf_len
/ period_len
;
421 desc
= jz4780_dma_desc_alloc(jzchan
, periods
, DMA_CYCLIC
, direction
);
425 for (i
= 0; i
< periods
; i
++) {
426 err
= jz4780_dma_setup_hwdesc(jzchan
, &desc
->desc
[i
], buf_addr
,
427 period_len
, direction
);
429 jz4780_dma_desc_free(&jzchan
->desc
->vdesc
);
433 buf_addr
+= period_len
;
436 * Set the link bit to indicate that the controller should
437 * automatically proceed to the next descriptor. In
438 * jz4780_dma_begin(), this will be cleared if we need to issue
439 * an interrupt after each period.
441 desc
->desc
[i
].dcm
|= JZ_DMA_DCM_TIE
| JZ_DMA_DCM_LINK
;
444 * The upper 8 bits of the DTC field in the descriptor must be
445 * set to (offset from descriptor base of next descriptor >> 4).
446 * If this is the last descriptor, link it back to the first,
447 * i.e. leave offset set to 0, otherwise point to the next one.
449 if (i
!= (periods
- 1)) {
451 (((i
+ 1) * sizeof(*desc
->desc
)) >> 4) << 24;
455 return vchan_tx_prep(&jzchan
->vchan
, &desc
->vdesc
, flags
);
458 static struct dma_async_tx_descriptor
*jz4780_dma_prep_dma_memcpy(
459 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
460 size_t len
, unsigned long flags
)
462 struct jz4780_dma_chan
*jzchan
= to_jz4780_dma_chan(chan
);
463 struct jz4780_dma_desc
*desc
;
466 desc
= jz4780_dma_desc_alloc(jzchan
, 1, DMA_MEMCPY
, 0);
470 tsz
= jz4780_dma_transfer_size(jzchan
, dest
| src
| len
,
471 &jzchan
->transfer_shift
);
473 desc
->transfer_type
= JZ_DMA_DRT_AUTO
;
475 desc
->desc
[0].dsa
= src
;
476 desc
->desc
[0].dta
= dest
;
477 desc
->desc
[0].dcm
= JZ_DMA_DCM_TIE
| JZ_DMA_DCM_SAI
| JZ_DMA_DCM_DAI
|
478 tsz
<< JZ_DMA_DCM_TSZ_SHIFT
|
479 JZ_DMA_WIDTH_32_BIT
<< JZ_DMA_DCM_SP_SHIFT
|
480 JZ_DMA_WIDTH_32_BIT
<< JZ_DMA_DCM_DP_SHIFT
;
481 desc
->desc
[0].dtc
= len
>> jzchan
->transfer_shift
;
483 return vchan_tx_prep(&jzchan
->vchan
, &desc
->vdesc
, flags
);
486 static void jz4780_dma_begin(struct jz4780_dma_chan
*jzchan
)
488 struct jz4780_dma_dev
*jzdma
= jz4780_dma_chan_parent(jzchan
);
489 struct virt_dma_desc
*vdesc
;
491 dma_addr_t desc_phys
;
494 vdesc
= vchan_next_desc(&jzchan
->vchan
);
498 list_del(&vdesc
->node
);
500 jzchan
->desc
= to_jz4780_dma_desc(vdesc
);
501 jzchan
->curr_hwdesc
= 0;
503 if (jzchan
->desc
->type
== DMA_CYCLIC
&& vdesc
->tx
.callback
) {
505 * The DMA controller doesn't support triggering an
506 * interrupt after processing each descriptor, only
507 * after processing an entire terminated list of
508 * descriptors. For a cyclic DMA setup the list of
509 * descriptors is not terminated so we can never get an
512 * If the user requested a callback for a cyclic DMA
513 * setup then we workaround this hardware limitation
514 * here by degrading to a set of unlinked descriptors
515 * which we will submit in sequence in response to the
516 * completion of processing the previous descriptor.
518 for (i
= 0; i
< jzchan
->desc
->count
; i
++)
519 jzchan
->desc
->desc
[i
].dcm
&= ~JZ_DMA_DCM_LINK
;
523 * There is an existing transfer, therefore this must be one
524 * for which we unlinked the descriptors above. Advance to the
525 * next one in the list.
527 jzchan
->curr_hwdesc
=
528 (jzchan
->curr_hwdesc
+ 1) % jzchan
->desc
->count
;
531 /* Enable the channel's clock. */
532 jz4780_dma_chan_enable(jzdma
, jzchan
->id
);
534 /* Use 4-word descriptors. */
535 jz4780_dma_chn_writel(jzdma
, jzchan
->id
, JZ_DMA_REG_DCS
, 0);
537 /* Set transfer type. */
538 jz4780_dma_chn_writel(jzdma
, jzchan
->id
, JZ_DMA_REG_DRT
,
539 jzchan
->desc
->transfer_type
);
542 * Set the transfer count. This is redundant for a descriptor-driven
543 * transfer. However, there can be a delay between the transfer start
544 * time and when DTCn reg contains the new transfer count. Setting
545 * it explicitly ensures residue is computed correctly at all times.
547 jz4780_dma_chn_writel(jzdma
, jzchan
->id
, JZ_DMA_REG_DTC
,
548 jzchan
->desc
->desc
[jzchan
->curr_hwdesc
].dtc
);
550 /* Write descriptor address and initiate descriptor fetch. */
551 desc_phys
= jzchan
->desc
->desc_phys
+
552 (jzchan
->curr_hwdesc
* sizeof(*jzchan
->desc
->desc
));
553 jz4780_dma_chn_writel(jzdma
, jzchan
->id
, JZ_DMA_REG_DDA
, desc_phys
);
554 jz4780_dma_ctrl_writel(jzdma
, JZ_DMA_REG_DDRS
, BIT(jzchan
->id
));
556 /* Enable the channel. */
557 jz4780_dma_chn_writel(jzdma
, jzchan
->id
, JZ_DMA_REG_DCS
,
561 static void jz4780_dma_issue_pending(struct dma_chan
*chan
)
563 struct jz4780_dma_chan
*jzchan
= to_jz4780_dma_chan(chan
);
566 spin_lock_irqsave(&jzchan
->vchan
.lock
, flags
);
568 if (vchan_issue_pending(&jzchan
->vchan
) && !jzchan
->desc
)
569 jz4780_dma_begin(jzchan
);
571 spin_unlock_irqrestore(&jzchan
->vchan
.lock
, flags
);
574 static int jz4780_dma_terminate_all(struct dma_chan
*chan
)
576 struct jz4780_dma_chan
*jzchan
= to_jz4780_dma_chan(chan
);
577 struct jz4780_dma_dev
*jzdma
= jz4780_dma_chan_parent(jzchan
);
581 spin_lock_irqsave(&jzchan
->vchan
.lock
, flags
);
583 /* Clear the DMA status and stop the transfer. */
584 jz4780_dma_chn_writel(jzdma
, jzchan
->id
, JZ_DMA_REG_DCS
, 0);
586 vchan_terminate_vdesc(&jzchan
->desc
->vdesc
);
590 jz4780_dma_chan_disable(jzdma
, jzchan
->id
);
592 vchan_get_all_descriptors(&jzchan
->vchan
, &head
);
594 spin_unlock_irqrestore(&jzchan
->vchan
.lock
, flags
);
596 vchan_dma_desc_free_list(&jzchan
->vchan
, &head
);
600 static void jz4780_dma_synchronize(struct dma_chan
*chan
)
602 struct jz4780_dma_chan
*jzchan
= to_jz4780_dma_chan(chan
);
603 struct jz4780_dma_dev
*jzdma
= jz4780_dma_chan_parent(jzchan
);
605 vchan_synchronize(&jzchan
->vchan
);
606 jz4780_dma_chan_disable(jzdma
, jzchan
->id
);
609 static int jz4780_dma_config(struct dma_chan
*chan
,
610 struct dma_slave_config
*config
)
612 struct jz4780_dma_chan
*jzchan
= to_jz4780_dma_chan(chan
);
614 if ((config
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
615 || (config
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
))
618 /* Copy the reset of the slave configuration, it is used later. */
619 memcpy(&jzchan
->config
, config
, sizeof(jzchan
->config
));
624 static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan
*jzchan
,
625 struct jz4780_dma_desc
*desc
, unsigned int next_sg
)
627 struct jz4780_dma_dev
*jzdma
= jz4780_dma_chan_parent(jzchan
);
628 unsigned int count
= 0;
631 for (i
= next_sg
; i
< desc
->count
; i
++)
632 count
+= desc
->desc
[i
].dtc
& GENMASK(23, 0);
635 count
+= jz4780_dma_chn_readl(jzdma
, jzchan
->id
,
638 return count
<< jzchan
->transfer_shift
;
641 static enum dma_status
jz4780_dma_tx_status(struct dma_chan
*chan
,
642 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
644 struct jz4780_dma_chan
*jzchan
= to_jz4780_dma_chan(chan
);
645 struct virt_dma_desc
*vdesc
;
646 enum dma_status status
;
648 unsigned long residue
= 0;
650 spin_lock_irqsave(&jzchan
->vchan
.lock
, flags
);
652 status
= dma_cookie_status(chan
, cookie
, txstate
);
653 if ((status
== DMA_COMPLETE
) || (txstate
== NULL
))
654 goto out_unlock_irqrestore
;
656 vdesc
= vchan_find_desc(&jzchan
->vchan
, cookie
);
658 /* On the issued list, so hasn't been processed yet */
659 residue
= jz4780_dma_desc_residue(jzchan
,
660 to_jz4780_dma_desc(vdesc
), 0);
661 } else if (cookie
== jzchan
->desc
->vdesc
.tx
.cookie
) {
662 residue
= jz4780_dma_desc_residue(jzchan
, jzchan
->desc
,
663 jzchan
->curr_hwdesc
+ 1);
665 dma_set_residue(txstate
, residue
);
667 if (vdesc
&& jzchan
->desc
&& vdesc
== &jzchan
->desc
->vdesc
668 && jzchan
->desc
->status
& (JZ_DMA_DCS_AR
| JZ_DMA_DCS_HLT
))
671 out_unlock_irqrestore
:
672 spin_unlock_irqrestore(&jzchan
->vchan
.lock
, flags
);
676 static bool jz4780_dma_chan_irq(struct jz4780_dma_dev
*jzdma
,
677 struct jz4780_dma_chan
*jzchan
)
679 const unsigned int soc_flags
= jzdma
->soc_data
->flags
;
680 struct jz4780_dma_desc
*desc
= jzchan
->desc
;
684 spin_lock(&jzchan
->vchan
.lock
);
686 dcs
= jz4780_dma_chn_readl(jzdma
, jzchan
->id
, JZ_DMA_REG_DCS
);
687 jz4780_dma_chn_writel(jzdma
, jzchan
->id
, JZ_DMA_REG_DCS
, 0);
689 if (dcs
& JZ_DMA_DCS_AR
) {
690 dev_warn(&jzchan
->vchan
.chan
.dev
->device
,
691 "address error (DCS=0x%x)\n", dcs
);
694 if (dcs
& JZ_DMA_DCS_HLT
) {
695 dev_warn(&jzchan
->vchan
.chan
.dev
->device
,
696 "channel halt (DCS=0x%x)\n", dcs
);
700 jzchan
->desc
->status
= dcs
;
702 if ((dcs
& (JZ_DMA_DCS_AR
| JZ_DMA_DCS_HLT
)) == 0) {
703 if (jzchan
->desc
->type
== DMA_CYCLIC
) {
704 vchan_cyclic_callback(&jzchan
->desc
->vdesc
);
706 jz4780_dma_begin(jzchan
);
707 } else if (dcs
& JZ_DMA_DCS_TT
) {
708 if (!(soc_flags
& JZ_SOC_DATA_BREAK_LINKS
) ||
709 (jzchan
->curr_hwdesc
+ 1 == desc
->count
)) {
710 vchan_cookie_complete(&desc
->vdesc
);
714 jz4780_dma_begin(jzchan
);
716 /* False positive - continue the transfer */
718 jz4780_dma_chn_writel(jzdma
, jzchan
->id
,
724 dev_err(&jzchan
->vchan
.chan
.dev
->device
,
725 "channel IRQ with no active transfer\n");
728 spin_unlock(&jzchan
->vchan
.lock
);
733 static irqreturn_t
jz4780_dma_irq_handler(int irq
, void *data
)
735 struct jz4780_dma_dev
*jzdma
= data
;
736 unsigned int nb_channels
= jzdma
->soc_data
->nb_channels
;
737 unsigned long pending
;
741 pending
= jz4780_dma_ctrl_readl(jzdma
, JZ_DMA_REG_DIRQP
);
743 for_each_set_bit(i
, &pending
, nb_channels
) {
744 if (jz4780_dma_chan_irq(jzdma
, &jzdma
->chan
[i
]))
748 /* Clear halt and address error status of all channels. */
749 dmac
= jz4780_dma_ctrl_readl(jzdma
, JZ_DMA_REG_DMAC
);
750 dmac
&= ~(JZ_DMA_DMAC_HLT
| JZ_DMA_DMAC_AR
);
751 jz4780_dma_ctrl_writel(jzdma
, JZ_DMA_REG_DMAC
, dmac
);
753 /* Clear interrupt pending status. */
754 jz4780_dma_ctrl_writel(jzdma
, JZ_DMA_REG_DIRQP
, pending
);
759 static int jz4780_dma_alloc_chan_resources(struct dma_chan
*chan
)
761 struct jz4780_dma_chan
*jzchan
= to_jz4780_dma_chan(chan
);
763 jzchan
->desc_pool
= dma_pool_create(dev_name(&chan
->dev
->device
),
765 JZ_DMA_DESC_BLOCK_SIZE
,
767 if (!jzchan
->desc_pool
) {
768 dev_err(&chan
->dev
->device
,
769 "failed to allocate descriptor pool\n");
776 static void jz4780_dma_free_chan_resources(struct dma_chan
*chan
)
778 struct jz4780_dma_chan
*jzchan
= to_jz4780_dma_chan(chan
);
780 vchan_free_chan_resources(&jzchan
->vchan
);
781 dma_pool_destroy(jzchan
->desc_pool
);
782 jzchan
->desc_pool
= NULL
;
785 static bool jz4780_dma_filter_fn(struct dma_chan
*chan
, void *param
)
787 struct jz4780_dma_chan
*jzchan
= to_jz4780_dma_chan(chan
);
788 struct jz4780_dma_dev
*jzdma
= jz4780_dma_chan_parent(jzchan
);
789 struct jz4780_dma_filter_data
*data
= param
;
792 if (data
->channel
> -1) {
793 if (data
->channel
!= jzchan
->id
)
795 } else if (jzdma
->chan_reserved
& BIT(jzchan
->id
)) {
799 jzchan
->transfer_type_tx
= data
->transfer_type_tx
;
800 jzchan
->transfer_type_rx
= data
->transfer_type_rx
;
805 static struct dma_chan
*jz4780_of_dma_xlate(struct of_phandle_args
*dma_spec
,
806 struct of_dma
*ofdma
)
808 struct jz4780_dma_dev
*jzdma
= ofdma
->of_dma_data
;
809 dma_cap_mask_t mask
= jzdma
->dma_device
.cap_mask
;
810 struct jz4780_dma_filter_data data
;
812 if (dma_spec
->args_count
== 2) {
813 data
.transfer_type_tx
= dma_spec
->args
[0];
814 data
.transfer_type_rx
= dma_spec
->args
[0];
815 data
.channel
= dma_spec
->args
[1];
816 } else if (dma_spec
->args_count
== 3) {
817 data
.transfer_type_tx
= dma_spec
->args
[0];
818 data
.transfer_type_rx
= dma_spec
->args
[1];
819 data
.channel
= dma_spec
->args
[2];
824 if (data
.channel
> -1) {
825 if (data
.channel
>= jzdma
->soc_data
->nb_channels
) {
826 dev_err(jzdma
->dma_device
.dev
,
827 "device requested non-existent channel %u\n",
832 /* Can only select a channel marked as reserved. */
833 if (!(jzdma
->chan_reserved
& BIT(data
.channel
))) {
834 dev_err(jzdma
->dma_device
.dev
,
835 "device requested unreserved channel %u\n",
840 jzdma
->chan
[data
.channel
].transfer_type_tx
= data
.transfer_type_tx
;
841 jzdma
->chan
[data
.channel
].transfer_type_rx
= data
.transfer_type_rx
;
843 return dma_get_slave_channel(
844 &jzdma
->chan
[data
.channel
].vchan
.chan
);
846 return __dma_request_channel(&mask
, jz4780_dma_filter_fn
, &data
,
851 static int jz4780_dma_probe(struct platform_device
*pdev
)
853 struct device
*dev
= &pdev
->dev
;
854 const struct jz4780_dma_soc_data
*soc_data
;
855 struct jz4780_dma_dev
*jzdma
;
856 struct jz4780_dma_chan
*jzchan
;
857 struct dma_device
*dd
;
858 struct resource
*res
;
862 dev_err(dev
, "This driver must be probed from devicetree\n");
866 soc_data
= device_get_match_data(dev
);
870 jzdma
= devm_kzalloc(dev
, struct_size(jzdma
, chan
,
871 soc_data
->nb_channels
), GFP_KERNEL
);
875 jzdma
->soc_data
= soc_data
;
876 platform_set_drvdata(pdev
, jzdma
);
878 jzdma
->chn_base
= devm_platform_ioremap_resource(pdev
, 0);
879 if (IS_ERR(jzdma
->chn_base
))
880 return PTR_ERR(jzdma
->chn_base
);
882 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
884 jzdma
->ctrl_base
= devm_ioremap_resource(dev
, res
);
885 if (IS_ERR(jzdma
->ctrl_base
))
886 return PTR_ERR(jzdma
->ctrl_base
);
887 } else if (soc_data
->flags
& JZ_SOC_DATA_ALLOW_LEGACY_DT
) {
889 * On JZ4780, if the second memory resource was not supplied,
890 * assume we're using an old devicetree, and calculate the
891 * offset to the control registers.
893 jzdma
->ctrl_base
= jzdma
->chn_base
+ JZ4780_DMA_CTRL_OFFSET
;
895 dev_err(dev
, "failed to get I/O memory\n");
899 jzdma
->clk
= devm_clk_get(dev
, NULL
);
900 if (IS_ERR(jzdma
->clk
)) {
901 dev_err(dev
, "failed to get clock\n");
902 ret
= PTR_ERR(jzdma
->clk
);
906 clk_prepare_enable(jzdma
->clk
);
908 /* Property is optional, if it doesn't exist the value will remain 0. */
909 of_property_read_u32_index(dev
->of_node
, "ingenic,reserved-channels",
910 0, &jzdma
->chan_reserved
);
912 dd
= &jzdma
->dma_device
;
915 * The real segment size limit is dependent on the size unit selected
916 * for the transfer. Because the size unit is selected automatically
917 * and may be as small as 1 byte, use a safe limit of 2^24-1 bytes to
918 * ensure the 24-bit transfer count in the descriptor cannot overflow.
920 dma_set_max_seg_size(dev
, 0xffffff);
922 dma_cap_set(DMA_MEMCPY
, dd
->cap_mask
);
923 dma_cap_set(DMA_SLAVE
, dd
->cap_mask
);
924 dma_cap_set(DMA_CYCLIC
, dd
->cap_mask
);
927 dd
->copy_align
= DMAENGINE_ALIGN_4_BYTES
;
928 dd
->device_alloc_chan_resources
= jz4780_dma_alloc_chan_resources
;
929 dd
->device_free_chan_resources
= jz4780_dma_free_chan_resources
;
930 dd
->device_prep_slave_sg
= jz4780_dma_prep_slave_sg
;
931 dd
->device_prep_dma_cyclic
= jz4780_dma_prep_dma_cyclic
;
932 dd
->device_prep_dma_memcpy
= jz4780_dma_prep_dma_memcpy
;
933 dd
->device_config
= jz4780_dma_config
;
934 dd
->device_terminate_all
= jz4780_dma_terminate_all
;
935 dd
->device_synchronize
= jz4780_dma_synchronize
;
936 dd
->device_tx_status
= jz4780_dma_tx_status
;
937 dd
->device_issue_pending
= jz4780_dma_issue_pending
;
938 dd
->src_addr_widths
= JZ_DMA_BUSWIDTHS
;
939 dd
->dst_addr_widths
= JZ_DMA_BUSWIDTHS
;
940 dd
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
941 dd
->residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
942 dd
->max_sg_burst
= JZ_DMA_MAX_DESC
;
945 * Enable DMA controller, mark all channels as not programmable.
946 * Also set the FMSC bit - it increases MSC performance, so it makes
947 * little sense not to enable it.
949 jz4780_dma_ctrl_writel(jzdma
, JZ_DMA_REG_DMAC
, JZ_DMA_DMAC_DMAE
|
950 JZ_DMA_DMAC_FAIC
| JZ_DMA_DMAC_FMSC
);
952 if (soc_data
->flags
& JZ_SOC_DATA_PROGRAMMABLE_DMA
)
953 jz4780_dma_ctrl_writel(jzdma
, JZ_DMA_REG_DMACP
, 0);
955 INIT_LIST_HEAD(&dd
->channels
);
957 for (i
= 0; i
< soc_data
->nb_channels
; i
++) {
958 jzchan
= &jzdma
->chan
[i
];
961 vchan_init(&jzchan
->vchan
, dd
);
962 jzchan
->vchan
.desc_free
= jz4780_dma_desc_free
;
966 * On JZ4760, chan0 won't enable properly the first time.
967 * Enabling then disabling chan1 will magically make chan0 work
970 jz4780_dma_chan_enable(jzdma
, 1);
971 jz4780_dma_chan_disable(jzdma
, 1);
973 ret
= platform_get_irq(pdev
, 0);
975 goto err_disable_clk
;
979 ret
= request_irq(jzdma
->irq
, jz4780_dma_irq_handler
, 0, dev_name(dev
),
982 dev_err(dev
, "failed to request IRQ %u!\n", jzdma
->irq
);
983 goto err_disable_clk
;
986 ret
= dmaenginem_async_device_register(dd
);
988 dev_err(dev
, "failed to register device\n");
992 /* Register with OF DMA helpers. */
993 ret
= of_dma_controller_register(dev
->of_node
, jz4780_of_dma_xlate
,
996 dev_err(dev
, "failed to register OF DMA controller\n");
1000 dev_info(dev
, "JZ4780 DMA controller initialised\n");
1004 free_irq(jzdma
->irq
, jzdma
);
1007 clk_disable_unprepare(jzdma
->clk
);
1011 static void jz4780_dma_remove(struct platform_device
*pdev
)
1013 struct jz4780_dma_dev
*jzdma
= platform_get_drvdata(pdev
);
1016 of_dma_controller_free(pdev
->dev
.of_node
);
1018 clk_disable_unprepare(jzdma
->clk
);
1019 free_irq(jzdma
->irq
, jzdma
);
1021 for (i
= 0; i
< jzdma
->soc_data
->nb_channels
; i
++)
1022 tasklet_kill(&jzdma
->chan
[i
].vchan
.task
);
1025 static const struct jz4780_dma_soc_data jz4740_dma_soc_data
= {
1027 .transfer_ord_max
= 5,
1028 .flags
= JZ_SOC_DATA_BREAK_LINKS
,
1031 static const struct jz4780_dma_soc_data jz4725b_dma_soc_data
= {
1033 .transfer_ord_max
= 5,
1034 .flags
= JZ_SOC_DATA_PER_CHAN_PM
| JZ_SOC_DATA_NO_DCKES_DCKEC
|
1035 JZ_SOC_DATA_BREAK_LINKS
,
1038 static const struct jz4780_dma_soc_data jz4755_dma_soc_data
= {
1040 .transfer_ord_max
= 5,
1041 .flags
= JZ_SOC_DATA_PER_CHAN_PM
| JZ_SOC_DATA_NO_DCKES_DCKEC
|
1042 JZ_SOC_DATA_BREAK_LINKS
,
1045 static const struct jz4780_dma_soc_data jz4760_dma_soc_data
= {
1047 .transfer_ord_max
= 6,
1048 .flags
= JZ_SOC_DATA_PER_CHAN_PM
| JZ_SOC_DATA_NO_DCKES_DCKEC
,
1051 static const struct jz4780_dma_soc_data jz4760_mdma_soc_data
= {
1053 .transfer_ord_max
= 6,
1054 .flags
= JZ_SOC_DATA_PER_CHAN_PM
| JZ_SOC_DATA_NO_DCKES_DCKEC
,
1057 static const struct jz4780_dma_soc_data jz4760_bdma_soc_data
= {
1059 .transfer_ord_max
= 6,
1060 .flags
= JZ_SOC_DATA_PER_CHAN_PM
| JZ_SOC_DATA_NO_DCKES_DCKEC
,
1063 static const struct jz4780_dma_soc_data jz4760b_dma_soc_data
= {
1065 .transfer_ord_max
= 6,
1066 .flags
= JZ_SOC_DATA_PER_CHAN_PM
,
1069 static const struct jz4780_dma_soc_data jz4760b_mdma_soc_data
= {
1071 .transfer_ord_max
= 6,
1072 .flags
= JZ_SOC_DATA_PER_CHAN_PM
,
1075 static const struct jz4780_dma_soc_data jz4760b_bdma_soc_data
= {
1077 .transfer_ord_max
= 6,
1078 .flags
= JZ_SOC_DATA_PER_CHAN_PM
,
1081 static const struct jz4780_dma_soc_data jz4770_dma_soc_data
= {
1083 .transfer_ord_max
= 6,
1084 .flags
= JZ_SOC_DATA_PER_CHAN_PM
,
1087 static const struct jz4780_dma_soc_data jz4780_dma_soc_data
= {
1089 .transfer_ord_max
= 7,
1090 .flags
= JZ_SOC_DATA_ALLOW_LEGACY_DT
| JZ_SOC_DATA_PROGRAMMABLE_DMA
,
1093 static const struct jz4780_dma_soc_data x1000_dma_soc_data
= {
1095 .transfer_ord_max
= 7,
1096 .flags
= JZ_SOC_DATA_PROGRAMMABLE_DMA
,
1099 static const struct jz4780_dma_soc_data x1830_dma_soc_data
= {
1101 .transfer_ord_max
= 7,
1102 .flags
= JZ_SOC_DATA_PROGRAMMABLE_DMA
,
1105 static const struct of_device_id jz4780_dma_dt_match
[] = {
1106 { .compatible
= "ingenic,jz4740-dma", .data
= &jz4740_dma_soc_data
},
1107 { .compatible
= "ingenic,jz4725b-dma", .data
= &jz4725b_dma_soc_data
},
1108 { .compatible
= "ingenic,jz4755-dma", .data
= &jz4755_dma_soc_data
},
1109 { .compatible
= "ingenic,jz4760-dma", .data
= &jz4760_dma_soc_data
},
1110 { .compatible
= "ingenic,jz4760-mdma", .data
= &jz4760_mdma_soc_data
},
1111 { .compatible
= "ingenic,jz4760-bdma", .data
= &jz4760_bdma_soc_data
},
1112 { .compatible
= "ingenic,jz4760b-dma", .data
= &jz4760b_dma_soc_data
},
1113 { .compatible
= "ingenic,jz4760b-mdma", .data
= &jz4760b_mdma_soc_data
},
1114 { .compatible
= "ingenic,jz4760b-bdma", .data
= &jz4760b_bdma_soc_data
},
1115 { .compatible
= "ingenic,jz4770-dma", .data
= &jz4770_dma_soc_data
},
1116 { .compatible
= "ingenic,jz4780-dma", .data
= &jz4780_dma_soc_data
},
1117 { .compatible
= "ingenic,x1000-dma", .data
= &x1000_dma_soc_data
},
1118 { .compatible
= "ingenic,x1830-dma", .data
= &x1830_dma_soc_data
},
1121 MODULE_DEVICE_TABLE(of
, jz4780_dma_dt_match
);
1123 static struct platform_driver jz4780_dma_driver
= {
1124 .probe
= jz4780_dma_probe
,
1125 .remove
= jz4780_dma_remove
,
1127 .name
= "jz4780-dma",
1128 .of_match_table
= jz4780_dma_dt_match
,
1132 static int __init
jz4780_dma_init(void)
1134 return platform_driver_register(&jz4780_dma_driver
);
1136 subsys_initcall(jz4780_dma_init
);
1138 static void __exit
jz4780_dma_exit(void)
1140 platform_driver_unregister(&jz4780_dma_driver
);
1142 module_exit(jz4780_dma_exit
);
1144 MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
1145 MODULE_DESCRIPTION("Ingenic JZ4780 DMA controller driver");
1146 MODULE_LICENSE("GPL");