1 // SPDX-License-Identifier: GPL-2.0-only
3 * OMAP DMAengine support
5 #include <linux/delay.h>
6 #include <linux/dmaengine.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/dmapool.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/omap-dma.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/of_dma.h>
19 #include <linux/of_device.h>
21 #include "../virt-dma.h"
23 #define OMAP_SDMA_REQUESTS 127
24 #define OMAP_SDMA_CHANNELS 32
27 struct dma_device ddev
;
30 const struct omap_dma_reg
*reg_map
;
31 struct omap_system_dma_plat_info
*plat
;
34 struct dma_pool
*desc_pool
;
35 unsigned dma_requests
;
37 uint32_t irq_enable_mask
;
38 struct omap_chan
**lch_map
;
42 struct virt_dma_chan vc
;
43 void __iomem
*channel_base
;
44 const struct omap_dma_reg
*reg_map
;
47 struct dma_slave_config cfg
;
54 struct omap_desc
*desc
;
58 #define DESC_NXT_SV_REFRESH (0x1 << 24)
59 #define DESC_NXT_SV_REUSE (0x2 << 24)
60 #define DESC_NXT_DV_REFRESH (0x1 << 26)
61 #define DESC_NXT_DV_REUSE (0x2 << 26)
62 #define DESC_NTYPE_TYPE2 (0x2 << 29)
64 /* Type 2 descriptor with Source or Destination address update */
65 struct omap_type2_desc
{
68 uint32_t addr
; /* src or dst */
79 uint32_t en
; /* number of elements (24-bit) */
80 uint32_t fn
; /* number of frames (16-bit) */
81 int32_t fi
; /* for double indexing */
82 int16_t ei
; /* for double indexing */
85 struct omap_type2_desc
*t2_desc
;
86 dma_addr_t t2_desc_paddr
;
90 struct virt_dma_desc vd
;
92 enum dma_transfer_direction dir
;
96 int32_t fi
; /* for OMAP_DMA_SYNC_PACKET / double indexing */
97 int16_t ei
; /* for double indexing */
98 uint8_t es
; /* CSDP_DATA_TYPE_xxx */
99 uint32_t ccr
; /* CCR value */
100 uint16_t clnk_ctrl
; /* CLNK_CTRL value */
101 uint16_t cicr
; /* CICR value */
102 uint32_t csdp
; /* CSDP value */
105 struct omap_sg sg
[0];
109 CAPS_0_SUPPORT_LL123
= BIT(20), /* Linked List type1/2/3 */
110 CAPS_0_SUPPORT_LL4
= BIT(21), /* Linked List type4 */
113 CCR_READ_PRIORITY
= BIT(6),
115 CCR_AUTO_INIT
= BIT(8), /* OMAP1 only */
116 CCR_REPEAT
= BIT(9), /* OMAP1 only */
117 CCR_OMAP31_DISABLE
= BIT(10), /* OMAP1 only */
118 CCR_SUSPEND_SENSITIVE
= BIT(8), /* OMAP2+ only */
119 CCR_RD_ACTIVE
= BIT(9), /* OMAP2+ only */
120 CCR_WR_ACTIVE
= BIT(10), /* OMAP2+ only */
121 CCR_SRC_AMODE_CONSTANT
= 0 << 12,
122 CCR_SRC_AMODE_POSTINC
= 1 << 12,
123 CCR_SRC_AMODE_SGLIDX
= 2 << 12,
124 CCR_SRC_AMODE_DBLIDX
= 3 << 12,
125 CCR_DST_AMODE_CONSTANT
= 0 << 14,
126 CCR_DST_AMODE_POSTINC
= 1 << 14,
127 CCR_DST_AMODE_SGLIDX
= 2 << 14,
128 CCR_DST_AMODE_DBLIDX
= 3 << 14,
129 CCR_CONSTANT_FILL
= BIT(16),
130 CCR_TRANSPARENT_COPY
= BIT(17),
132 CCR_SUPERVISOR
= BIT(22),
133 CCR_PREFETCH
= BIT(23),
134 CCR_TRIGGER_SRC
= BIT(24),
135 CCR_BUFFERING_DISABLE
= BIT(25),
136 CCR_WRITE_PRIORITY
= BIT(26),
137 CCR_SYNC_ELEMENT
= 0,
138 CCR_SYNC_FRAME
= CCR_FS
,
139 CCR_SYNC_BLOCK
= CCR_BS
,
140 CCR_SYNC_PACKET
= CCR_BS
| CCR_FS
,
142 CSDP_DATA_TYPE_8
= 0,
143 CSDP_DATA_TYPE_16
= 1,
144 CSDP_DATA_TYPE_32
= 2,
145 CSDP_SRC_PORT_EMIFF
= 0 << 2, /* OMAP1 only */
146 CSDP_SRC_PORT_EMIFS
= 1 << 2, /* OMAP1 only */
147 CSDP_SRC_PORT_OCP_T1
= 2 << 2, /* OMAP1 only */
148 CSDP_SRC_PORT_TIPB
= 3 << 2, /* OMAP1 only */
149 CSDP_SRC_PORT_OCP_T2
= 4 << 2, /* OMAP1 only */
150 CSDP_SRC_PORT_MPUI
= 5 << 2, /* OMAP1 only */
151 CSDP_SRC_PACKED
= BIT(6),
152 CSDP_SRC_BURST_1
= 0 << 7,
153 CSDP_SRC_BURST_16
= 1 << 7,
154 CSDP_SRC_BURST_32
= 2 << 7,
155 CSDP_SRC_BURST_64
= 3 << 7,
156 CSDP_DST_PORT_EMIFF
= 0 << 9, /* OMAP1 only */
157 CSDP_DST_PORT_EMIFS
= 1 << 9, /* OMAP1 only */
158 CSDP_DST_PORT_OCP_T1
= 2 << 9, /* OMAP1 only */
159 CSDP_DST_PORT_TIPB
= 3 << 9, /* OMAP1 only */
160 CSDP_DST_PORT_OCP_T2
= 4 << 9, /* OMAP1 only */
161 CSDP_DST_PORT_MPUI
= 5 << 9, /* OMAP1 only */
162 CSDP_DST_PACKED
= BIT(13),
163 CSDP_DST_BURST_1
= 0 << 14,
164 CSDP_DST_BURST_16
= 1 << 14,
165 CSDP_DST_BURST_32
= 2 << 14,
166 CSDP_DST_BURST_64
= 3 << 14,
167 CSDP_WRITE_NON_POSTED
= 0 << 16,
168 CSDP_WRITE_POSTED
= 1 << 16,
169 CSDP_WRITE_LAST_NON_POSTED
= 2 << 16,
171 CICR_TOUT_IE
= BIT(0), /* OMAP1 only */
172 CICR_DROP_IE
= BIT(1),
173 CICR_HALF_IE
= BIT(2),
174 CICR_FRAME_IE
= BIT(3),
175 CICR_LAST_IE
= BIT(4),
176 CICR_BLOCK_IE
= BIT(5),
177 CICR_PKT_IE
= BIT(7), /* OMAP2+ only */
178 CICR_TRANS_ERR_IE
= BIT(8), /* OMAP2+ only */
179 CICR_SUPERVISOR_ERR_IE
= BIT(10), /* OMAP2+ only */
180 CICR_MISALIGNED_ERR_IE
= BIT(11), /* OMAP2+ only */
181 CICR_DRAIN_IE
= BIT(12), /* OMAP2+ only */
182 CICR_SUPER_BLOCK_IE
= BIT(14), /* OMAP2+ only */
184 CLNK_CTRL_ENABLE_LNK
= BIT(15),
186 CDP_DST_VALID_INC
= 0 << 0,
187 CDP_DST_VALID_RELOAD
= 1 << 0,
188 CDP_DST_VALID_REUSE
= 2 << 0,
189 CDP_SRC_VALID_INC
= 0 << 2,
190 CDP_SRC_VALID_RELOAD
= 1 << 2,
191 CDP_SRC_VALID_REUSE
= 2 << 2,
192 CDP_NTYPE_TYPE1
= 1 << 4,
193 CDP_NTYPE_TYPE2
= 2 << 4,
194 CDP_NTYPE_TYPE3
= 3 << 4,
195 CDP_TMODE_NORMAL
= 0 << 8,
196 CDP_TMODE_LLIST
= 1 << 8,
200 static const unsigned es_bytes
[] = {
201 [CSDP_DATA_TYPE_8
] = 1,
202 [CSDP_DATA_TYPE_16
] = 2,
203 [CSDP_DATA_TYPE_32
] = 4,
206 static bool omap_dma_filter_fn(struct dma_chan
*chan
, void *param
);
207 static struct of_dma_filter_info omap_dma_info
= {
208 .filter_fn
= omap_dma_filter_fn
,
211 static inline struct omap_dmadev
*to_omap_dma_dev(struct dma_device
*d
)
213 return container_of(d
, struct omap_dmadev
, ddev
);
216 static inline struct omap_chan
*to_omap_dma_chan(struct dma_chan
*c
)
218 return container_of(c
, struct omap_chan
, vc
.chan
);
221 static inline struct omap_desc
*to_omap_dma_desc(struct dma_async_tx_descriptor
*t
)
223 return container_of(t
, struct omap_desc
, vd
.tx
);
226 static void omap_dma_desc_free(struct virt_dma_desc
*vd
)
228 struct omap_desc
*d
= to_omap_dma_desc(&vd
->tx
);
231 struct omap_dmadev
*od
= to_omap_dma_dev(vd
->tx
.chan
->device
);
234 for (i
= 0; i
< d
->sglen
; i
++) {
235 if (d
->sg
[i
].t2_desc
)
236 dma_pool_free(od
->desc_pool
, d
->sg
[i
].t2_desc
,
237 d
->sg
[i
].t2_desc_paddr
);
244 static void omap_dma_fill_type2_desc(struct omap_desc
*d
, int idx
,
245 enum dma_transfer_direction dir
, bool last
)
247 struct omap_sg
*sg
= &d
->sg
[idx
];
248 struct omap_type2_desc
*t2_desc
= sg
->t2_desc
;
251 d
->sg
[idx
- 1].t2_desc
->next_desc
= sg
->t2_desc_paddr
;
253 t2_desc
->next_desc
= 0xfffffffc;
255 t2_desc
->en
= sg
->en
;
256 t2_desc
->addr
= sg
->addr
;
257 t2_desc
->fn
= sg
->fn
& 0xffff;
258 t2_desc
->cicr
= d
->cicr
;
260 t2_desc
->cicr
&= ~CICR_BLOCK_IE
;
264 t2_desc
->cdei
= sg
->ei
;
265 t2_desc
->csei
= d
->ei
;
266 t2_desc
->cdfi
= sg
->fi
;
267 t2_desc
->csfi
= d
->fi
;
269 t2_desc
->en
|= DESC_NXT_DV_REFRESH
;
270 t2_desc
->en
|= DESC_NXT_SV_REUSE
;
273 t2_desc
->cdei
= d
->ei
;
274 t2_desc
->csei
= sg
->ei
;
275 t2_desc
->cdfi
= d
->fi
;
276 t2_desc
->csfi
= sg
->fi
;
278 t2_desc
->en
|= DESC_NXT_SV_REFRESH
;
279 t2_desc
->en
|= DESC_NXT_DV_REUSE
;
285 t2_desc
->en
|= DESC_NTYPE_TYPE2
;
288 static void omap_dma_write(uint32_t val
, unsigned type
, void __iomem
*addr
)
291 case OMAP_DMA_REG_16BIT
:
292 writew_relaxed(val
, addr
);
294 case OMAP_DMA_REG_2X16BIT
:
295 writew_relaxed(val
, addr
);
296 writew_relaxed(val
>> 16, addr
+ 2);
298 case OMAP_DMA_REG_32BIT
:
299 writel_relaxed(val
, addr
);
306 static unsigned omap_dma_read(unsigned type
, void __iomem
*addr
)
311 case OMAP_DMA_REG_16BIT
:
312 val
= readw_relaxed(addr
);
314 case OMAP_DMA_REG_2X16BIT
:
315 val
= readw_relaxed(addr
);
316 val
|= readw_relaxed(addr
+ 2) << 16;
318 case OMAP_DMA_REG_32BIT
:
319 val
= readl_relaxed(addr
);
329 static void omap_dma_glbl_write(struct omap_dmadev
*od
, unsigned reg
, unsigned val
)
331 const struct omap_dma_reg
*r
= od
->reg_map
+ reg
;
335 omap_dma_write(val
, r
->type
, od
->base
+ r
->offset
);
338 static unsigned omap_dma_glbl_read(struct omap_dmadev
*od
, unsigned reg
)
340 const struct omap_dma_reg
*r
= od
->reg_map
+ reg
;
344 return omap_dma_read(r
->type
, od
->base
+ r
->offset
);
347 static void omap_dma_chan_write(struct omap_chan
*c
, unsigned reg
, unsigned val
)
349 const struct omap_dma_reg
*r
= c
->reg_map
+ reg
;
351 omap_dma_write(val
, r
->type
, c
->channel_base
+ r
->offset
);
354 static unsigned omap_dma_chan_read(struct omap_chan
*c
, unsigned reg
)
356 const struct omap_dma_reg
*r
= c
->reg_map
+ reg
;
358 return omap_dma_read(r
->type
, c
->channel_base
+ r
->offset
);
361 static void omap_dma_clear_csr(struct omap_chan
*c
)
364 omap_dma_chan_read(c
, CSR
);
366 omap_dma_chan_write(c
, CSR
, ~0);
369 static unsigned omap_dma_get_csr(struct omap_chan
*c
)
371 unsigned val
= omap_dma_chan_read(c
, CSR
);
374 omap_dma_chan_write(c
, CSR
, val
);
379 static void omap_dma_assign(struct omap_dmadev
*od
, struct omap_chan
*c
,
382 c
->channel_base
= od
->base
+ od
->plat
->channel_stride
* lch
;
384 od
->lch_map
[lch
] = c
;
387 static void omap_dma_start(struct omap_chan
*c
, struct omap_desc
*d
)
389 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
390 uint16_t cicr
= d
->cicr
;
392 if (__dma_omap15xx(od
->plat
->dma_attr
))
393 omap_dma_chan_write(c
, CPC
, 0);
395 omap_dma_chan_write(c
, CDAC
, 0);
397 omap_dma_clear_csr(c
);
400 uint32_t cdp
= CDP_TMODE_LLIST
| CDP_NTYPE_TYPE2
| CDP_FAST
;
402 if (d
->dir
== DMA_DEV_TO_MEM
)
403 cdp
|= (CDP_DST_VALID_RELOAD
| CDP_SRC_VALID_REUSE
);
405 cdp
|= (CDP_DST_VALID_REUSE
| CDP_SRC_VALID_RELOAD
);
406 omap_dma_chan_write(c
, CDP
, cdp
);
408 omap_dma_chan_write(c
, CNDP
, d
->sg
[0].t2_desc_paddr
);
409 omap_dma_chan_write(c
, CCDN
, 0);
410 omap_dma_chan_write(c
, CCFN
, 0xffff);
411 omap_dma_chan_write(c
, CCEN
, 0xffffff);
413 cicr
&= ~CICR_BLOCK_IE
;
414 } else if (od
->ll123_supported
) {
415 omap_dma_chan_write(c
, CDP
, 0);
418 /* Enable interrupts */
419 omap_dma_chan_write(c
, CICR
, cicr
);
422 omap_dma_chan_write(c
, CCR
, d
->ccr
| CCR_ENABLE
);
427 static void omap_dma_drain_chan(struct omap_chan
*c
)
432 /* Wait for sDMA FIFO to drain */
434 val
= omap_dma_chan_read(c
, CCR
);
435 if (!(val
& (CCR_RD_ACTIVE
| CCR_WR_ACTIVE
)))
444 if (val
& (CCR_RD_ACTIVE
| CCR_WR_ACTIVE
))
445 dev_err(c
->vc
.chan
.device
->dev
,
446 "DMA drain did not complete on lch %d\n",
450 static int omap_dma_stop(struct omap_chan
*c
)
452 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
456 omap_dma_chan_write(c
, CICR
, 0);
458 omap_dma_clear_csr(c
);
460 val
= omap_dma_chan_read(c
, CCR
);
461 if (od
->plat
->errata
& DMA_ERRATA_i541
&& val
& CCR_TRIGGER_SRC
) {
464 sysconfig
= omap_dma_glbl_read(od
, OCP_SYSCONFIG
);
465 val
= sysconfig
& ~DMA_SYSCONFIG_MIDLEMODE_MASK
;
466 val
|= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE
);
467 omap_dma_glbl_write(od
, OCP_SYSCONFIG
, val
);
469 val
= omap_dma_chan_read(c
, CCR
);
471 omap_dma_chan_write(c
, CCR
, val
);
473 if (!(c
->ccr
& CCR_BUFFERING_DISABLE
))
474 omap_dma_drain_chan(c
);
476 omap_dma_glbl_write(od
, OCP_SYSCONFIG
, sysconfig
);
478 if (!(val
& CCR_ENABLE
))
482 omap_dma_chan_write(c
, CCR
, val
);
484 if (!(c
->ccr
& CCR_BUFFERING_DISABLE
))
485 omap_dma_drain_chan(c
);
490 if (!__dma_omap15xx(od
->plat
->dma_attr
) && c
->cyclic
) {
491 val
= omap_dma_chan_read(c
, CLNK_CTRL
);
494 val
|= 1 << 14; /* set the STOP_LNK bit */
496 val
&= ~CLNK_CTRL_ENABLE_LNK
;
498 omap_dma_chan_write(c
, CLNK_CTRL
, val
);
504 static void omap_dma_start_sg(struct omap_chan
*c
, struct omap_desc
*d
)
506 struct omap_sg
*sg
= d
->sg
+ c
->sgidx
;
507 unsigned cxsa
, cxei
, cxfi
;
509 if (d
->dir
== DMA_DEV_TO_MEM
|| d
->dir
== DMA_MEM_TO_MEM
) {
519 omap_dma_chan_write(c
, cxsa
, sg
->addr
);
520 omap_dma_chan_write(c
, cxei
, sg
->ei
);
521 omap_dma_chan_write(c
, cxfi
, sg
->fi
);
522 omap_dma_chan_write(c
, CEN
, sg
->en
);
523 omap_dma_chan_write(c
, CFN
, sg
->fn
);
525 omap_dma_start(c
, d
);
529 static void omap_dma_start_desc(struct omap_chan
*c
)
531 struct virt_dma_desc
*vd
= vchan_next_desc(&c
->vc
);
533 unsigned cxsa
, cxei
, cxfi
;
542 c
->desc
= d
= to_omap_dma_desc(&vd
->tx
);
546 * This provides the necessary barrier to ensure data held in
547 * DMA coherent memory is visible to the DMA engine prior to
548 * the transfer starting.
552 omap_dma_chan_write(c
, CCR
, d
->ccr
);
554 omap_dma_chan_write(c
, CCR2
, d
->ccr
>> 16);
556 if (d
->dir
== DMA_DEV_TO_MEM
|| d
->dir
== DMA_MEM_TO_MEM
) {
566 omap_dma_chan_write(c
, cxsa
, d
->dev_addr
);
567 omap_dma_chan_write(c
, cxei
, d
->ei
);
568 omap_dma_chan_write(c
, cxfi
, d
->fi
);
569 omap_dma_chan_write(c
, CSDP
, d
->csdp
);
570 omap_dma_chan_write(c
, CLNK_CTRL
, d
->clnk_ctrl
);
572 omap_dma_start_sg(c
, d
);
575 static void omap_dma_callback(int ch
, u16 status
, void *data
)
577 struct omap_chan
*c
= data
;
581 spin_lock_irqsave(&c
->vc
.lock
, flags
);
585 vchan_cyclic_callback(&d
->vd
);
586 } else if (d
->using_ll
|| c
->sgidx
== d
->sglen
) {
587 omap_dma_start_desc(c
);
588 vchan_cookie_complete(&d
->vd
);
590 omap_dma_start_sg(c
, d
);
593 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
596 static irqreturn_t
omap_dma_irq(int irq
, void *devid
)
598 struct omap_dmadev
*od
= devid
;
599 unsigned status
, channel
;
601 spin_lock(&od
->irq_lock
);
603 status
= omap_dma_glbl_read(od
, IRQSTATUS_L1
);
604 status
&= od
->irq_enable_mask
;
606 spin_unlock(&od
->irq_lock
);
610 while ((channel
= ffs(status
)) != 0) {
618 c
= od
->lch_map
[channel
];
620 /* This should never happen */
621 dev_err(od
->ddev
.dev
, "invalid channel %u\n", channel
);
625 csr
= omap_dma_get_csr(c
);
626 omap_dma_glbl_write(od
, IRQSTATUS_L1
, mask
);
628 omap_dma_callback(channel
, csr
, c
);
631 spin_unlock(&od
->irq_lock
);
636 static int omap_dma_alloc_chan_resources(struct dma_chan
*chan
)
638 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
639 struct omap_chan
*c
= to_omap_dma_chan(chan
);
640 struct device
*dev
= od
->ddev
.dev
;
644 ret
= omap_request_dma(c
->dma_sig
, "DMA engine",
645 omap_dma_callback
, c
, &c
->dma_ch
);
647 ret
= omap_request_dma(c
->dma_sig
, "DMA engine", NULL
, NULL
,
651 dev_dbg(dev
, "allocating channel %u for %u\n", c
->dma_ch
, c
->dma_sig
);
654 omap_dma_assign(od
, c
, c
->dma_ch
);
659 spin_lock_irq(&od
->irq_lock
);
660 val
= BIT(c
->dma_ch
);
661 omap_dma_glbl_write(od
, IRQSTATUS_L1
, val
);
662 od
->irq_enable_mask
|= val
;
663 omap_dma_glbl_write(od
, IRQENABLE_L1
, od
->irq_enable_mask
);
665 val
= omap_dma_glbl_read(od
, IRQENABLE_L0
);
666 val
&= ~BIT(c
->dma_ch
);
667 omap_dma_glbl_write(od
, IRQENABLE_L0
, val
);
668 spin_unlock_irq(&od
->irq_lock
);
673 if (__dma_omap16xx(od
->plat
->dma_attr
)) {
674 c
->ccr
= CCR_OMAP31_DISABLE
;
675 /* Duplicate what plat-omap/dma.c does */
676 c
->ccr
|= c
->dma_ch
+ 1;
678 c
->ccr
= c
->dma_sig
& 0x1f;
681 c
->ccr
= c
->dma_sig
& 0x1f;
682 c
->ccr
|= (c
->dma_sig
& ~0x1f) << 14;
684 if (od
->plat
->errata
& DMA_ERRATA_IFRAME_BUFFERING
)
685 c
->ccr
|= CCR_BUFFERING_DISABLE
;
690 static void omap_dma_free_chan_resources(struct dma_chan
*chan
)
692 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
693 struct omap_chan
*c
= to_omap_dma_chan(chan
);
696 spin_lock_irq(&od
->irq_lock
);
697 od
->irq_enable_mask
&= ~BIT(c
->dma_ch
);
698 omap_dma_glbl_write(od
, IRQENABLE_L1
, od
->irq_enable_mask
);
699 spin_unlock_irq(&od
->irq_lock
);
702 c
->channel_base
= NULL
;
703 od
->lch_map
[c
->dma_ch
] = NULL
;
704 vchan_free_chan_resources(&c
->vc
);
705 omap_free_dma(c
->dma_ch
);
707 dev_dbg(od
->ddev
.dev
, "freeing channel %u used for %u\n", c
->dma_ch
,
712 static size_t omap_dma_sg_size(struct omap_sg
*sg
)
714 return sg
->en
* sg
->fn
;
717 static size_t omap_dma_desc_size(struct omap_desc
*d
)
722 for (size
= i
= 0; i
< d
->sglen
; i
++)
723 size
+= omap_dma_sg_size(&d
->sg
[i
]);
725 return size
* es_bytes
[d
->es
];
728 static size_t omap_dma_desc_size_pos(struct omap_desc
*d
, dma_addr_t addr
)
731 size_t size
, es_size
= es_bytes
[d
->es
];
733 for (size
= i
= 0; i
< d
->sglen
; i
++) {
734 size_t this_size
= omap_dma_sg_size(&d
->sg
[i
]) * es_size
;
738 else if (addr
>= d
->sg
[i
].addr
&&
739 addr
< d
->sg
[i
].addr
+ this_size
)
740 size
+= d
->sg
[i
].addr
+ this_size
- addr
;
746 * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
747 * read before the DMA controller finished disabling the channel.
749 static uint32_t omap_dma_chan_read_3_3(struct omap_chan
*c
, unsigned reg
)
751 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
754 val
= omap_dma_chan_read(c
, reg
);
755 if (val
== 0 && od
->plat
->errata
& DMA_ERRATA_3_3
)
756 val
= omap_dma_chan_read(c
, reg
);
761 static dma_addr_t
omap_dma_get_src_pos(struct omap_chan
*c
)
763 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
764 dma_addr_t addr
, cdac
;
766 if (__dma_omap15xx(od
->plat
->dma_attr
)) {
767 addr
= omap_dma_chan_read(c
, CPC
);
769 addr
= omap_dma_chan_read_3_3(c
, CSAC
);
770 cdac
= omap_dma_chan_read_3_3(c
, CDAC
);
773 * CDAC == 0 indicates that the DMA transfer on the channel has
774 * not been started (no data has been transferred so far).
775 * Return the programmed source start address in this case.
778 addr
= omap_dma_chan_read(c
, CSSA
);
782 addr
|= omap_dma_chan_read(c
, CSSA
) & 0xffff0000;
787 static dma_addr_t
omap_dma_get_dst_pos(struct omap_chan
*c
)
789 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
792 if (__dma_omap15xx(od
->plat
->dma_attr
)) {
793 addr
= omap_dma_chan_read(c
, CPC
);
795 addr
= omap_dma_chan_read_3_3(c
, CDAC
);
798 * CDAC == 0 indicates that the DMA transfer on the channel
799 * has not been started (no data has been transferred so
800 * far). Return the programmed destination start address in
804 addr
= omap_dma_chan_read(c
, CDSA
);
808 addr
|= omap_dma_chan_read(c
, CDSA
) & 0xffff0000;
813 static enum dma_status
omap_dma_tx_status(struct dma_chan
*chan
,
814 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
816 struct omap_chan
*c
= to_omap_dma_chan(chan
);
819 struct omap_desc
*d
= NULL
;
821 ret
= dma_cookie_status(chan
, cookie
, txstate
);
822 if (ret
== DMA_COMPLETE
)
825 spin_lock_irqsave(&c
->vc
.lock
, flags
);
826 if (c
->desc
&& c
->desc
->vd
.tx
.cookie
== cookie
)
835 if (d
->dir
== DMA_MEM_TO_DEV
)
836 pos
= omap_dma_get_src_pos(c
);
837 else if (d
->dir
== DMA_DEV_TO_MEM
|| d
->dir
== DMA_MEM_TO_MEM
)
838 pos
= omap_dma_get_dst_pos(c
);
842 txstate
->residue
= omap_dma_desc_size_pos(d
, pos
);
844 struct virt_dma_desc
*vd
= vchan_find_desc(&c
->vc
, cookie
);
847 txstate
->residue
= omap_dma_desc_size(
848 to_omap_dma_desc(&vd
->tx
));
850 txstate
->residue
= 0;
854 if (ret
== DMA_IN_PROGRESS
&& c
->paused
) {
856 } else if (d
&& d
->polled
&& c
->running
) {
857 uint32_t ccr
= omap_dma_chan_read(c
, CCR
);
859 * The channel is no longer active, set the return value
860 * accordingly and mark it as completed
862 if (!(ccr
& CCR_ENABLE
)) {
864 omap_dma_start_desc(c
);
865 vchan_cookie_complete(&d
->vd
);
869 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
874 static void omap_dma_issue_pending(struct dma_chan
*chan
)
876 struct omap_chan
*c
= to_omap_dma_chan(chan
);
879 spin_lock_irqsave(&c
->vc
.lock
, flags
);
880 if (vchan_issue_pending(&c
->vc
) && !c
->desc
)
881 omap_dma_start_desc(c
);
882 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
885 static struct dma_async_tx_descriptor
*omap_dma_prep_slave_sg(
886 struct dma_chan
*chan
, struct scatterlist
*sgl
, unsigned sglen
,
887 enum dma_transfer_direction dir
, unsigned long tx_flags
, void *context
)
889 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
890 struct omap_chan
*c
= to_omap_dma_chan(chan
);
891 enum dma_slave_buswidth dev_width
;
892 struct scatterlist
*sgent
;
895 unsigned i
, es
, en
, frame_bytes
;
896 bool ll_failed
= false;
898 u32 port_window
, port_window_bytes
;
900 if (dir
== DMA_DEV_TO_MEM
) {
901 dev_addr
= c
->cfg
.src_addr
;
902 dev_width
= c
->cfg
.src_addr_width
;
903 burst
= c
->cfg
.src_maxburst
;
904 port_window
= c
->cfg
.src_port_window_size
;
905 } else if (dir
== DMA_MEM_TO_DEV
) {
906 dev_addr
= c
->cfg
.dst_addr
;
907 dev_width
= c
->cfg
.dst_addr_width
;
908 burst
= c
->cfg
.dst_maxburst
;
909 port_window
= c
->cfg
.dst_port_window_size
;
911 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
915 /* Bus width translates to the element size (ES) */
917 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
918 es
= CSDP_DATA_TYPE_8
;
920 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
921 es
= CSDP_DATA_TYPE_16
;
923 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
924 es
= CSDP_DATA_TYPE_32
;
926 default: /* not reached */
930 /* Now allocate and setup the descriptor. */
931 d
= kzalloc(struct_size(d
, sg
, sglen
), GFP_ATOMIC
);
936 d
->dev_addr
= dev_addr
;
939 /* When the port_window is used, one frame must cover the window */
942 port_window_bytes
= port_window
* es_bytes
[es
];
946 * One frame covers the port_window and by configure
947 * the source frame index to be -1 * (port_window - 1)
948 * we instruct the sDMA that after a frame is processed
949 * it should move back to the start of the window.
951 d
->fi
= -(port_window_bytes
- 1);
954 d
->ccr
= c
->ccr
| CCR_SYNC_FRAME
;
955 if (dir
== DMA_DEV_TO_MEM
) {
956 d
->csdp
= CSDP_DST_BURST_64
| CSDP_DST_PACKED
;
958 d
->ccr
|= CCR_DST_AMODE_POSTINC
;
960 d
->ccr
|= CCR_SRC_AMODE_DBLIDX
;
962 if (port_window_bytes
>= 64)
963 d
->csdp
|= CSDP_SRC_BURST_64
;
964 else if (port_window_bytes
>= 32)
965 d
->csdp
|= CSDP_SRC_BURST_32
;
966 else if (port_window_bytes
>= 16)
967 d
->csdp
|= CSDP_SRC_BURST_16
;
970 d
->ccr
|= CCR_SRC_AMODE_CONSTANT
;
973 d
->csdp
= CSDP_SRC_BURST_64
| CSDP_SRC_PACKED
;
975 d
->ccr
|= CCR_SRC_AMODE_POSTINC
;
977 d
->ccr
|= CCR_DST_AMODE_DBLIDX
;
979 if (port_window_bytes
>= 64)
980 d
->csdp
|= CSDP_DST_BURST_64
;
981 else if (port_window_bytes
>= 32)
982 d
->csdp
|= CSDP_DST_BURST_32
;
983 else if (port_window_bytes
>= 16)
984 d
->csdp
|= CSDP_DST_BURST_16
;
986 d
->ccr
|= CCR_DST_AMODE_CONSTANT
;
990 d
->cicr
= CICR_DROP_IE
| CICR_BLOCK_IE
;
994 d
->cicr
|= CICR_TOUT_IE
;
996 if (dir
== DMA_DEV_TO_MEM
)
997 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_TIPB
;
999 d
->csdp
|= CSDP_DST_PORT_TIPB
| CSDP_SRC_PORT_EMIFF
;
1001 if (dir
== DMA_DEV_TO_MEM
)
1002 d
->ccr
|= CCR_TRIGGER_SRC
;
1004 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
1007 d
->csdp
|= CSDP_WRITE_LAST_NON_POSTED
;
1009 if (od
->plat
->errata
& DMA_ERRATA_PARALLEL_CHANNELS
)
1010 d
->clnk_ctrl
= c
->dma_ch
;
1013 * Build our scatterlist entries: each contains the address,
1014 * the number of elements (EN) in each frame, and the number of
1015 * frames (FN). Number of bytes for this entry = ES * EN * FN.
1017 * Burst size translates to number of elements with frame sync.
1018 * Note: DMA engine defines burst to be the number of dev-width
1022 frame_bytes
= es_bytes
[es
] * en
;
1025 d
->using_ll
= od
->ll123_supported
;
1027 for_each_sg(sgl
, sgent
, sglen
, i
) {
1028 struct omap_sg
*osg
= &d
->sg
[i
];
1030 osg
->addr
= sg_dma_address(sgent
);
1032 osg
->fn
= sg_dma_len(sgent
) / frame_bytes
;
1035 osg
->t2_desc
= dma_pool_alloc(od
->desc_pool
, GFP_ATOMIC
,
1036 &osg
->t2_desc_paddr
);
1037 if (!osg
->t2_desc
) {
1038 dev_err(chan
->device
->dev
,
1039 "t2_desc[%d] allocation failed\n", i
);
1041 d
->using_ll
= false;
1045 omap_dma_fill_type2_desc(d
, i
, dir
, (i
== sglen
- 1));
1051 /* Release the dma_pool entries if one allocation failed */
1053 for (i
= 0; i
< d
->sglen
; i
++) {
1054 struct omap_sg
*osg
= &d
->sg
[i
];
1057 dma_pool_free(od
->desc_pool
, osg
->t2_desc
,
1058 osg
->t2_desc_paddr
);
1059 osg
->t2_desc
= NULL
;
1064 return vchan_tx_prep(&c
->vc
, &d
->vd
, tx_flags
);
1067 static struct dma_async_tx_descriptor
*omap_dma_prep_dma_cyclic(
1068 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
1069 size_t period_len
, enum dma_transfer_direction dir
, unsigned long flags
)
1071 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
1072 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1073 enum dma_slave_buswidth dev_width
;
1074 struct omap_desc
*d
;
1075 dma_addr_t dev_addr
;
1079 if (dir
== DMA_DEV_TO_MEM
) {
1080 dev_addr
= c
->cfg
.src_addr
;
1081 dev_width
= c
->cfg
.src_addr_width
;
1082 burst
= c
->cfg
.src_maxburst
;
1083 } else if (dir
== DMA_MEM_TO_DEV
) {
1084 dev_addr
= c
->cfg
.dst_addr
;
1085 dev_width
= c
->cfg
.dst_addr_width
;
1086 burst
= c
->cfg
.dst_maxburst
;
1088 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
1092 /* Bus width translates to the element size (ES) */
1093 switch (dev_width
) {
1094 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
1095 es
= CSDP_DATA_TYPE_8
;
1097 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
1098 es
= CSDP_DATA_TYPE_16
;
1100 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
1101 es
= CSDP_DATA_TYPE_32
;
1103 default: /* not reached */
1107 /* Now allocate and setup the descriptor. */
1108 d
= kzalloc(sizeof(*d
) + sizeof(d
->sg
[0]), GFP_ATOMIC
);
1113 d
->dev_addr
= dev_addr
;
1116 d
->sg
[0].addr
= buf_addr
;
1117 d
->sg
[0].en
= period_len
/ es_bytes
[es
];
1118 d
->sg
[0].fn
= buf_len
/ period_len
;
1122 if (dir
== DMA_DEV_TO_MEM
)
1123 d
->ccr
|= CCR_DST_AMODE_POSTINC
| CCR_SRC_AMODE_CONSTANT
;
1125 d
->ccr
|= CCR_DST_AMODE_CONSTANT
| CCR_SRC_AMODE_POSTINC
;
1127 d
->cicr
= CICR_DROP_IE
;
1128 if (flags
& DMA_PREP_INTERRUPT
)
1129 d
->cicr
|= CICR_FRAME_IE
;
1134 d
->cicr
|= CICR_TOUT_IE
;
1136 if (dir
== DMA_DEV_TO_MEM
)
1137 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_MPUI
;
1139 d
->csdp
|= CSDP_DST_PORT_MPUI
| CSDP_SRC_PORT_EMIFF
;
1142 d
->ccr
|= CCR_SYNC_PACKET
;
1144 d
->ccr
|= CCR_SYNC_ELEMENT
;
1146 if (dir
== DMA_DEV_TO_MEM
) {
1147 d
->ccr
|= CCR_TRIGGER_SRC
;
1148 d
->csdp
|= CSDP_DST_PACKED
;
1150 d
->csdp
|= CSDP_SRC_PACKED
;
1153 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
1155 d
->csdp
|= CSDP_DST_BURST_64
| CSDP_SRC_BURST_64
;
1158 if (__dma_omap15xx(od
->plat
->dma_attr
))
1159 d
->ccr
|= CCR_AUTO_INIT
| CCR_REPEAT
;
1161 d
->clnk_ctrl
= c
->dma_ch
| CLNK_CTRL_ENABLE_LNK
;
1165 return vchan_tx_prep(&c
->vc
, &d
->vd
, flags
);
1168 static struct dma_async_tx_descriptor
*omap_dma_prep_dma_memcpy(
1169 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
1170 size_t len
, unsigned long tx_flags
)
1172 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1173 struct omap_desc
*d
;
1176 d
= kzalloc(sizeof(*d
) + sizeof(d
->sg
[0]), GFP_ATOMIC
);
1180 data_type
= __ffs((src
| dest
| len
));
1181 if (data_type
> CSDP_DATA_TYPE_32
)
1182 data_type
= CSDP_DATA_TYPE_32
;
1184 d
->dir
= DMA_MEM_TO_MEM
;
1188 d
->sg
[0].en
= len
/ BIT(data_type
);
1190 d
->sg
[0].addr
= dest
;
1193 d
->ccr
|= CCR_DST_AMODE_POSTINC
| CCR_SRC_AMODE_POSTINC
;
1195 if (tx_flags
& DMA_PREP_INTERRUPT
)
1196 d
->cicr
|= CICR_FRAME_IE
;
1200 d
->csdp
= data_type
;
1203 d
->cicr
|= CICR_TOUT_IE
;
1204 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_EMIFF
;
1206 d
->csdp
|= CSDP_DST_PACKED
| CSDP_SRC_PACKED
;
1207 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
1208 d
->csdp
|= CSDP_DST_BURST_64
| CSDP_SRC_BURST_64
;
1211 return vchan_tx_prep(&c
->vc
, &d
->vd
, tx_flags
);
1214 static struct dma_async_tx_descriptor
*omap_dma_prep_dma_interleaved(
1215 struct dma_chan
*chan
, struct dma_interleaved_template
*xt
,
1216 unsigned long flags
)
1218 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1219 struct omap_desc
*d
;
1222 size_t src_icg
, dst_icg
;
1224 /* Slave mode is not supported */
1225 if (is_slave_direction(xt
->dir
))
1228 if (xt
->frame_size
!= 1 || xt
->numf
== 0)
1231 d
= kzalloc(sizeof(*d
) + sizeof(d
->sg
[0]), GFP_ATOMIC
);
1235 data_type
= __ffs((xt
->src_start
| xt
->dst_start
| xt
->sgl
[0].size
));
1236 if (data_type
> CSDP_DATA_TYPE_32
)
1237 data_type
= CSDP_DATA_TYPE_32
;
1240 d
->dir
= DMA_MEM_TO_MEM
;
1241 d
->dev_addr
= xt
->src_start
;
1243 sg
->en
= xt
->sgl
[0].size
/ BIT(data_type
);
1245 sg
->addr
= xt
->dst_start
;
1249 src_icg
= dmaengine_get_src_icg(xt
, &xt
->sgl
[0]);
1250 dst_icg
= dmaengine_get_dst_icg(xt
, &xt
->sgl
[0]);
1252 d
->ccr
|= CCR_SRC_AMODE_DBLIDX
;
1254 d
->fi
= src_icg
+ 1;
1255 } else if (xt
->src_inc
) {
1256 d
->ccr
|= CCR_SRC_AMODE_POSTINC
;
1259 dev_err(chan
->device
->dev
,
1260 "%s: SRC constant addressing is not supported\n",
1267 d
->ccr
|= CCR_DST_AMODE_DBLIDX
;
1269 sg
->fi
= dst_icg
+ 1;
1270 } else if (xt
->dst_inc
) {
1271 d
->ccr
|= CCR_DST_AMODE_POSTINC
;
1274 dev_err(chan
->device
->dev
,
1275 "%s: DST constant addressing is not supported\n",
1281 d
->cicr
= CICR_DROP_IE
| CICR_FRAME_IE
;
1283 d
->csdp
= data_type
;
1286 d
->cicr
|= CICR_TOUT_IE
;
1287 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_EMIFF
;
1289 d
->csdp
|= CSDP_DST_PACKED
| CSDP_SRC_PACKED
;
1290 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
1291 d
->csdp
|= CSDP_DST_BURST_64
| CSDP_SRC_BURST_64
;
1294 return vchan_tx_prep(&c
->vc
, &d
->vd
, flags
);
1297 static int omap_dma_slave_config(struct dma_chan
*chan
, struct dma_slave_config
*cfg
)
1299 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1301 if (cfg
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
1302 cfg
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
1305 if (cfg
->src_maxburst
> chan
->device
->max_burst
||
1306 cfg
->dst_maxburst
> chan
->device
->max_burst
)
1309 memcpy(&c
->cfg
, cfg
, sizeof(c
->cfg
));
1314 static int omap_dma_terminate_all(struct dma_chan
*chan
)
1316 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1317 unsigned long flags
;
1320 spin_lock_irqsave(&c
->vc
.lock
, flags
);
1323 * Stop DMA activity: we assume the callback will not be called
1324 * after omap_dma_stop() returns (even if it does, it will see
1325 * c->desc is NULL and exit.)
1328 vchan_terminate_vdesc(&c
->desc
->vd
);
1330 /* Avoid stopping the dma twice */
1338 vchan_get_all_descriptors(&c
->vc
, &head
);
1339 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
1340 vchan_dma_desc_free_list(&c
->vc
, &head
);
1345 static void omap_dma_synchronize(struct dma_chan
*chan
)
1347 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1349 vchan_synchronize(&c
->vc
);
1352 static int omap_dma_pause(struct dma_chan
*chan
)
1354 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1355 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
1356 unsigned long flags
;
1358 bool can_pause
= false;
1360 spin_lock_irqsave(&od
->irq_lock
, flags
);
1369 * We do not allow DMA_MEM_TO_DEV transfers to be paused.
1370 * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
1371 * "When a channel is disabled during a transfer, the channel undergoes
1372 * an abort, unless it is hardware-source-synchronized …".
1373 * A source-synchronised channel is one where the fetching of data is
1374 * under control of the device. In other words, a device-to-memory
1375 * transfer. So, a destination-synchronised channel (which would be a
1376 * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
1378 * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
1379 * aborts immediately after completion of current read/write
1380 * transactions and then the FIFO is cleaned up." The term "cleaned up"
1381 * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
1382 * are both clear _before_ disabling the channel, otherwise data loss
1384 * The problem is that if the channel is active, then device activity
1385 * can result in DMA activity starting between reading those as both
1386 * clear and the write to DMA_CCR to clear the enable bit hitting the
1387 * hardware. If the DMA hardware can't drain the data in its FIFO to the
1388 * destination, then data loss "might" occur (say if we write to an UART
1389 * and the UART is not accepting any further data).
1391 else if (c
->desc
->dir
== DMA_DEV_TO_MEM
)
1394 if (can_pause
&& !c
->paused
) {
1395 ret
= omap_dma_stop(c
);
1400 spin_unlock_irqrestore(&od
->irq_lock
, flags
);
1405 static int omap_dma_resume(struct dma_chan
*chan
)
1407 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1408 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
1409 unsigned long flags
;
1412 spin_lock_irqsave(&od
->irq_lock
, flags
);
1414 if (c
->paused
&& c
->desc
) {
1417 /* Restore channel link register */
1418 omap_dma_chan_write(c
, CLNK_CTRL
, c
->desc
->clnk_ctrl
);
1420 omap_dma_start(c
, c
->desc
);
1424 spin_unlock_irqrestore(&od
->irq_lock
, flags
);
1429 static int omap_dma_chan_init(struct omap_dmadev
*od
)
1431 struct omap_chan
*c
;
1433 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
1437 c
->reg_map
= od
->reg_map
;
1438 c
->vc
.desc_free
= omap_dma_desc_free
;
1439 vchan_init(&c
->vc
, &od
->ddev
);
1444 static void omap_dma_free(struct omap_dmadev
*od
)
1446 while (!list_empty(&od
->ddev
.channels
)) {
1447 struct omap_chan
*c
= list_first_entry(&od
->ddev
.channels
,
1448 struct omap_chan
, vc
.chan
.device_node
);
1450 list_del(&c
->vc
.chan
.device_node
);
1451 tasklet_kill(&c
->vc
.task
);
1456 #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1457 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1458 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1460 static int omap_dma_probe(struct platform_device
*pdev
)
1462 struct omap_dmadev
*od
;
1463 struct resource
*res
;
1467 od
= devm_kzalloc(&pdev
->dev
, sizeof(*od
), GFP_KERNEL
);
1471 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1472 od
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
1473 if (IS_ERR(od
->base
))
1474 return PTR_ERR(od
->base
);
1476 od
->plat
= omap_get_plat_info();
1478 return -EPROBE_DEFER
;
1480 od
->reg_map
= od
->plat
->reg_map
;
1482 dma_cap_set(DMA_SLAVE
, od
->ddev
.cap_mask
);
1483 dma_cap_set(DMA_CYCLIC
, od
->ddev
.cap_mask
);
1484 dma_cap_set(DMA_MEMCPY
, od
->ddev
.cap_mask
);
1485 dma_cap_set(DMA_INTERLEAVE
, od
->ddev
.cap_mask
);
1486 od
->ddev
.device_alloc_chan_resources
= omap_dma_alloc_chan_resources
;
1487 od
->ddev
.device_free_chan_resources
= omap_dma_free_chan_resources
;
1488 od
->ddev
.device_tx_status
= omap_dma_tx_status
;
1489 od
->ddev
.device_issue_pending
= omap_dma_issue_pending
;
1490 od
->ddev
.device_prep_slave_sg
= omap_dma_prep_slave_sg
;
1491 od
->ddev
.device_prep_dma_cyclic
= omap_dma_prep_dma_cyclic
;
1492 od
->ddev
.device_prep_dma_memcpy
= omap_dma_prep_dma_memcpy
;
1493 od
->ddev
.device_prep_interleaved_dma
= omap_dma_prep_dma_interleaved
;
1494 od
->ddev
.device_config
= omap_dma_slave_config
;
1495 od
->ddev
.device_pause
= omap_dma_pause
;
1496 od
->ddev
.device_resume
= omap_dma_resume
;
1497 od
->ddev
.device_terminate_all
= omap_dma_terminate_all
;
1498 od
->ddev
.device_synchronize
= omap_dma_synchronize
;
1499 od
->ddev
.src_addr_widths
= OMAP_DMA_BUSWIDTHS
;
1500 od
->ddev
.dst_addr_widths
= OMAP_DMA_BUSWIDTHS
;
1501 od
->ddev
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
1502 if (__dma_omap15xx(od
->plat
->dma_attr
))
1503 od
->ddev
.residue_granularity
=
1504 DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
1506 od
->ddev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1507 od
->ddev
.max_burst
= SZ_16M
- 1; /* CCEN: 24bit unsigned */
1508 od
->ddev
.dev
= &pdev
->dev
;
1509 INIT_LIST_HEAD(&od
->ddev
.channels
);
1510 spin_lock_init(&od
->lock
);
1511 spin_lock_init(&od
->irq_lock
);
1513 /* Number of DMA requests */
1514 od
->dma_requests
= OMAP_SDMA_REQUESTS
;
1515 if (pdev
->dev
.of_node
&& of_property_read_u32(pdev
->dev
.of_node
,
1517 &od
->dma_requests
)) {
1518 dev_info(&pdev
->dev
,
1519 "Missing dma-requests property, using %u.\n",
1520 OMAP_SDMA_REQUESTS
);
1523 /* Number of available logical channels */
1524 if (!pdev
->dev
.of_node
) {
1525 lch_count
= od
->plat
->dma_attr
->lch_count
;
1526 if (unlikely(!lch_count
))
1527 lch_count
= OMAP_SDMA_CHANNELS
;
1528 } else if (of_property_read_u32(pdev
->dev
.of_node
, "dma-channels",
1530 dev_info(&pdev
->dev
,
1531 "Missing dma-channels property, using %u.\n",
1532 OMAP_SDMA_CHANNELS
);
1533 lch_count
= OMAP_SDMA_CHANNELS
;
1536 od
->lch_map
= devm_kcalloc(&pdev
->dev
, lch_count
, sizeof(*od
->lch_map
),
1541 for (i
= 0; i
< od
->dma_requests
; i
++) {
1542 rc
= omap_dma_chan_init(od
);
1549 irq
= platform_get_irq(pdev
, 1);
1551 dev_info(&pdev
->dev
, "failed to get L1 IRQ: %d\n", irq
);
1554 /* Disable all interrupts */
1555 od
->irq_enable_mask
= 0;
1556 omap_dma_glbl_write(od
, IRQENABLE_L1
, 0);
1558 rc
= devm_request_irq(&pdev
->dev
, irq
, omap_dma_irq
,
1559 IRQF_SHARED
, "omap-dma-engine", od
);
1566 if (omap_dma_glbl_read(od
, CAPS_0
) & CAPS_0_SUPPORT_LL123
)
1567 od
->ll123_supported
= true;
1569 od
->ddev
.filter
.map
= od
->plat
->slave_map
;
1570 od
->ddev
.filter
.mapcnt
= od
->plat
->slavecnt
;
1571 od
->ddev
.filter
.fn
= omap_dma_filter_fn
;
1573 if (od
->ll123_supported
) {
1574 od
->desc_pool
= dma_pool_create(dev_name(&pdev
->dev
),
1576 sizeof(struct omap_type2_desc
),
1578 if (!od
->desc_pool
) {
1580 "unable to allocate descriptor pool\n");
1581 od
->ll123_supported
= false;
1585 rc
= dma_async_device_register(&od
->ddev
);
1587 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
1593 platform_set_drvdata(pdev
, od
);
1595 if (pdev
->dev
.of_node
) {
1596 omap_dma_info
.dma_cap
= od
->ddev
.cap_mask
;
1598 /* Device-tree DMA controller registration */
1599 rc
= of_dma_controller_register(pdev
->dev
.of_node
,
1600 of_dma_simple_xlate
, &omap_dma_info
);
1602 pr_warn("OMAP-DMA: failed to register DMA controller\n");
1603 dma_async_device_unregister(&od
->ddev
);
1608 dev_info(&pdev
->dev
, "OMAP DMA engine driver%s\n",
1609 od
->ll123_supported
? " (LinkedList1/2/3 supported)" : "");
1614 static int omap_dma_remove(struct platform_device
*pdev
)
1616 struct omap_dmadev
*od
= platform_get_drvdata(pdev
);
1619 if (pdev
->dev
.of_node
)
1620 of_dma_controller_free(pdev
->dev
.of_node
);
1622 irq
= platform_get_irq(pdev
, 1);
1623 devm_free_irq(&pdev
->dev
, irq
, od
);
1625 dma_async_device_unregister(&od
->ddev
);
1628 /* Disable all interrupts */
1629 omap_dma_glbl_write(od
, IRQENABLE_L0
, 0);
1632 if (od
->ll123_supported
)
1633 dma_pool_destroy(od
->desc_pool
);
1640 static const struct of_device_id omap_dma_match
[] = {
1641 { .compatible
= "ti,omap2420-sdma", },
1642 { .compatible
= "ti,omap2430-sdma", },
1643 { .compatible
= "ti,omap3430-sdma", },
1644 { .compatible
= "ti,omap3630-sdma", },
1645 { .compatible
= "ti,omap4430-sdma", },
1648 MODULE_DEVICE_TABLE(of
, omap_dma_match
);
1650 static struct platform_driver omap_dma_driver
= {
1651 .probe
= omap_dma_probe
,
1652 .remove
= omap_dma_remove
,
1654 .name
= "omap-dma-engine",
1655 .of_match_table
= of_match_ptr(omap_dma_match
),
1659 static bool omap_dma_filter_fn(struct dma_chan
*chan
, void *param
)
1661 if (chan
->device
->dev
->driver
== &omap_dma_driver
.driver
) {
1662 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
1663 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1664 unsigned req
= *(unsigned *)param
;
1666 if (req
<= od
->dma_requests
) {
1674 static int omap_dma_init(void)
1676 return platform_driver_register(&omap_dma_driver
);
1678 subsys_initcall(omap_dma_init
);
1680 static void __exit
omap_dma_exit(void)
1682 platform_driver_unregister(&omap_dma_driver
);
1684 module_exit(omap_dma_exit
);
1686 MODULE_AUTHOR("Russell King");
1687 MODULE_LICENSE("GPL");