2 * OMAP DMAengine support
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/omap-dma.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_device.h>
25 #define OMAP_SDMA_REQUESTS 127
26 #define OMAP_SDMA_CHANNELS 32
29 struct dma_device ddev
;
32 const struct omap_dma_reg
*reg_map
;
33 struct omap_system_dma_plat_info
*plat
;
35 unsigned dma_requests
;
37 uint32_t irq_enable_mask
;
38 struct omap_chan
*lch_map
[OMAP_SDMA_CHANNELS
];
42 struct virt_dma_chan vc
;
43 void __iomem
*channel_base
;
44 const struct omap_dma_reg
*reg_map
;
47 struct dma_slave_config cfg
;
54 struct omap_desc
*desc
;
60 uint32_t en
; /* number of elements (24-bit) */
61 uint32_t fn
; /* number of frames (16-bit) */
62 int32_t fi
; /* for double indexing */
63 int16_t ei
; /* for double indexing */
67 struct virt_dma_desc vd
;
68 enum dma_transfer_direction dir
;
71 int32_t fi
; /* for OMAP_DMA_SYNC_PACKET / double indexing */
72 int16_t ei
; /* for double indexing */
73 uint8_t es
; /* CSDP_DATA_TYPE_xxx */
74 uint32_t ccr
; /* CCR value */
75 uint16_t clnk_ctrl
; /* CLNK_CTRL value */
76 uint16_t cicr
; /* CICR value */
77 uint32_t csdp
; /* CSDP value */
85 CCR_READ_PRIORITY
= BIT(6),
87 CCR_AUTO_INIT
= BIT(8), /* OMAP1 only */
88 CCR_REPEAT
= BIT(9), /* OMAP1 only */
89 CCR_OMAP31_DISABLE
= BIT(10), /* OMAP1 only */
90 CCR_SUSPEND_SENSITIVE
= BIT(8), /* OMAP2+ only */
91 CCR_RD_ACTIVE
= BIT(9), /* OMAP2+ only */
92 CCR_WR_ACTIVE
= BIT(10), /* OMAP2+ only */
93 CCR_SRC_AMODE_CONSTANT
= 0 << 12,
94 CCR_SRC_AMODE_POSTINC
= 1 << 12,
95 CCR_SRC_AMODE_SGLIDX
= 2 << 12,
96 CCR_SRC_AMODE_DBLIDX
= 3 << 12,
97 CCR_DST_AMODE_CONSTANT
= 0 << 14,
98 CCR_DST_AMODE_POSTINC
= 1 << 14,
99 CCR_DST_AMODE_SGLIDX
= 2 << 14,
100 CCR_DST_AMODE_DBLIDX
= 3 << 14,
101 CCR_CONSTANT_FILL
= BIT(16),
102 CCR_TRANSPARENT_COPY
= BIT(17),
104 CCR_SUPERVISOR
= BIT(22),
105 CCR_PREFETCH
= BIT(23),
106 CCR_TRIGGER_SRC
= BIT(24),
107 CCR_BUFFERING_DISABLE
= BIT(25),
108 CCR_WRITE_PRIORITY
= BIT(26),
109 CCR_SYNC_ELEMENT
= 0,
110 CCR_SYNC_FRAME
= CCR_FS
,
111 CCR_SYNC_BLOCK
= CCR_BS
,
112 CCR_SYNC_PACKET
= CCR_BS
| CCR_FS
,
114 CSDP_DATA_TYPE_8
= 0,
115 CSDP_DATA_TYPE_16
= 1,
116 CSDP_DATA_TYPE_32
= 2,
117 CSDP_SRC_PORT_EMIFF
= 0 << 2, /* OMAP1 only */
118 CSDP_SRC_PORT_EMIFS
= 1 << 2, /* OMAP1 only */
119 CSDP_SRC_PORT_OCP_T1
= 2 << 2, /* OMAP1 only */
120 CSDP_SRC_PORT_TIPB
= 3 << 2, /* OMAP1 only */
121 CSDP_SRC_PORT_OCP_T2
= 4 << 2, /* OMAP1 only */
122 CSDP_SRC_PORT_MPUI
= 5 << 2, /* OMAP1 only */
123 CSDP_SRC_PACKED
= BIT(6),
124 CSDP_SRC_BURST_1
= 0 << 7,
125 CSDP_SRC_BURST_16
= 1 << 7,
126 CSDP_SRC_BURST_32
= 2 << 7,
127 CSDP_SRC_BURST_64
= 3 << 7,
128 CSDP_DST_PORT_EMIFF
= 0 << 9, /* OMAP1 only */
129 CSDP_DST_PORT_EMIFS
= 1 << 9, /* OMAP1 only */
130 CSDP_DST_PORT_OCP_T1
= 2 << 9, /* OMAP1 only */
131 CSDP_DST_PORT_TIPB
= 3 << 9, /* OMAP1 only */
132 CSDP_DST_PORT_OCP_T2
= 4 << 9, /* OMAP1 only */
133 CSDP_DST_PORT_MPUI
= 5 << 9, /* OMAP1 only */
134 CSDP_DST_PACKED
= BIT(13),
135 CSDP_DST_BURST_1
= 0 << 14,
136 CSDP_DST_BURST_16
= 1 << 14,
137 CSDP_DST_BURST_32
= 2 << 14,
138 CSDP_DST_BURST_64
= 3 << 14,
140 CICR_TOUT_IE
= BIT(0), /* OMAP1 only */
141 CICR_DROP_IE
= BIT(1),
142 CICR_HALF_IE
= BIT(2),
143 CICR_FRAME_IE
= BIT(3),
144 CICR_LAST_IE
= BIT(4),
145 CICR_BLOCK_IE
= BIT(5),
146 CICR_PKT_IE
= BIT(7), /* OMAP2+ only */
147 CICR_TRANS_ERR_IE
= BIT(8), /* OMAP2+ only */
148 CICR_SUPERVISOR_ERR_IE
= BIT(10), /* OMAP2+ only */
149 CICR_MISALIGNED_ERR_IE
= BIT(11), /* OMAP2+ only */
150 CICR_DRAIN_IE
= BIT(12), /* OMAP2+ only */
151 CICR_SUPER_BLOCK_IE
= BIT(14), /* OMAP2+ only */
153 CLNK_CTRL_ENABLE_LNK
= BIT(15),
156 static const unsigned es_bytes
[] = {
157 [CSDP_DATA_TYPE_8
] = 1,
158 [CSDP_DATA_TYPE_16
] = 2,
159 [CSDP_DATA_TYPE_32
] = 4,
162 static struct of_dma_filter_info omap_dma_info
= {
163 .filter_fn
= omap_dma_filter_fn
,
166 static inline struct omap_dmadev
*to_omap_dma_dev(struct dma_device
*d
)
168 return container_of(d
, struct omap_dmadev
, ddev
);
171 static inline struct omap_chan
*to_omap_dma_chan(struct dma_chan
*c
)
173 return container_of(c
, struct omap_chan
, vc
.chan
);
176 static inline struct omap_desc
*to_omap_dma_desc(struct dma_async_tx_descriptor
*t
)
178 return container_of(t
, struct omap_desc
, vd
.tx
);
181 static void omap_dma_desc_free(struct virt_dma_desc
*vd
)
183 kfree(container_of(vd
, struct omap_desc
, vd
));
186 static void omap_dma_write(uint32_t val
, unsigned type
, void __iomem
*addr
)
189 case OMAP_DMA_REG_16BIT
:
190 writew_relaxed(val
, addr
);
192 case OMAP_DMA_REG_2X16BIT
:
193 writew_relaxed(val
, addr
);
194 writew_relaxed(val
>> 16, addr
+ 2);
196 case OMAP_DMA_REG_32BIT
:
197 writel_relaxed(val
, addr
);
204 static unsigned omap_dma_read(unsigned type
, void __iomem
*addr
)
209 case OMAP_DMA_REG_16BIT
:
210 val
= readw_relaxed(addr
);
212 case OMAP_DMA_REG_2X16BIT
:
213 val
= readw_relaxed(addr
);
214 val
|= readw_relaxed(addr
+ 2) << 16;
216 case OMAP_DMA_REG_32BIT
:
217 val
= readl_relaxed(addr
);
227 static void omap_dma_glbl_write(struct omap_dmadev
*od
, unsigned reg
, unsigned val
)
229 const struct omap_dma_reg
*r
= od
->reg_map
+ reg
;
233 omap_dma_write(val
, r
->type
, od
->base
+ r
->offset
);
236 static unsigned omap_dma_glbl_read(struct omap_dmadev
*od
, unsigned reg
)
238 const struct omap_dma_reg
*r
= od
->reg_map
+ reg
;
242 return omap_dma_read(r
->type
, od
->base
+ r
->offset
);
245 static void omap_dma_chan_write(struct omap_chan
*c
, unsigned reg
, unsigned val
)
247 const struct omap_dma_reg
*r
= c
->reg_map
+ reg
;
249 omap_dma_write(val
, r
->type
, c
->channel_base
+ r
->offset
);
252 static unsigned omap_dma_chan_read(struct omap_chan
*c
, unsigned reg
)
254 const struct omap_dma_reg
*r
= c
->reg_map
+ reg
;
256 return omap_dma_read(r
->type
, c
->channel_base
+ r
->offset
);
259 static void omap_dma_clear_csr(struct omap_chan
*c
)
262 omap_dma_chan_read(c
, CSR
);
264 omap_dma_chan_write(c
, CSR
, ~0);
267 static unsigned omap_dma_get_csr(struct omap_chan
*c
)
269 unsigned val
= omap_dma_chan_read(c
, CSR
);
272 omap_dma_chan_write(c
, CSR
, val
);
277 static void omap_dma_assign(struct omap_dmadev
*od
, struct omap_chan
*c
,
280 c
->channel_base
= od
->base
+ od
->plat
->channel_stride
* lch
;
282 od
->lch_map
[lch
] = c
;
285 static void omap_dma_start(struct omap_chan
*c
, struct omap_desc
*d
)
287 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
289 if (__dma_omap15xx(od
->plat
->dma_attr
))
290 omap_dma_chan_write(c
, CPC
, 0);
292 omap_dma_chan_write(c
, CDAC
, 0);
294 omap_dma_clear_csr(c
);
296 /* Enable interrupts */
297 omap_dma_chan_write(c
, CICR
, d
->cicr
);
300 omap_dma_chan_write(c
, CCR
, d
->ccr
| CCR_ENABLE
);
305 static void omap_dma_stop(struct omap_chan
*c
)
307 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
311 omap_dma_chan_write(c
, CICR
, 0);
313 omap_dma_clear_csr(c
);
315 val
= omap_dma_chan_read(c
, CCR
);
316 if (od
->plat
->errata
& DMA_ERRATA_i541
&& val
& CCR_TRIGGER_SRC
) {
320 sysconfig
= omap_dma_glbl_read(od
, OCP_SYSCONFIG
);
321 val
= sysconfig
& ~DMA_SYSCONFIG_MIDLEMODE_MASK
;
322 val
|= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE
);
323 omap_dma_glbl_write(od
, OCP_SYSCONFIG
, val
);
325 val
= omap_dma_chan_read(c
, CCR
);
327 omap_dma_chan_write(c
, CCR
, val
);
329 /* Wait for sDMA FIFO to drain */
331 val
= omap_dma_chan_read(c
, CCR
);
332 if (!(val
& (CCR_RD_ACTIVE
| CCR_WR_ACTIVE
)))
341 if (val
& (CCR_RD_ACTIVE
| CCR_WR_ACTIVE
))
342 dev_err(c
->vc
.chan
.device
->dev
,
343 "DMA drain did not complete on lch %d\n",
346 omap_dma_glbl_write(od
, OCP_SYSCONFIG
, sysconfig
);
349 omap_dma_chan_write(c
, CCR
, val
);
354 if (!__dma_omap15xx(od
->plat
->dma_attr
) && c
->cyclic
) {
355 val
= omap_dma_chan_read(c
, CLNK_CTRL
);
358 val
|= 1 << 14; /* set the STOP_LNK bit */
360 val
&= ~CLNK_CTRL_ENABLE_LNK
;
362 omap_dma_chan_write(c
, CLNK_CTRL
, val
);
368 static void omap_dma_start_sg(struct omap_chan
*c
, struct omap_desc
*d
,
371 struct omap_sg
*sg
= d
->sg
+ idx
;
372 unsigned cxsa
, cxei
, cxfi
;
374 if (d
->dir
== DMA_DEV_TO_MEM
|| d
->dir
== DMA_MEM_TO_MEM
) {
384 omap_dma_chan_write(c
, cxsa
, sg
->addr
);
385 omap_dma_chan_write(c
, cxei
, sg
->ei
);
386 omap_dma_chan_write(c
, cxfi
, sg
->fi
);
387 omap_dma_chan_write(c
, CEN
, sg
->en
);
388 omap_dma_chan_write(c
, CFN
, sg
->fn
);
390 omap_dma_start(c
, d
);
393 static void omap_dma_start_desc(struct omap_chan
*c
)
395 struct virt_dma_desc
*vd
= vchan_next_desc(&c
->vc
);
397 unsigned cxsa
, cxei
, cxfi
;
406 c
->desc
= d
= to_omap_dma_desc(&vd
->tx
);
410 * This provides the necessary barrier to ensure data held in
411 * DMA coherent memory is visible to the DMA engine prior to
412 * the transfer starting.
416 omap_dma_chan_write(c
, CCR
, d
->ccr
);
418 omap_dma_chan_write(c
, CCR2
, d
->ccr
>> 16);
420 if (d
->dir
== DMA_DEV_TO_MEM
|| d
->dir
== DMA_MEM_TO_MEM
) {
430 omap_dma_chan_write(c
, cxsa
, d
->dev_addr
);
431 omap_dma_chan_write(c
, cxei
, d
->ei
);
432 omap_dma_chan_write(c
, cxfi
, d
->fi
);
433 omap_dma_chan_write(c
, CSDP
, d
->csdp
);
434 omap_dma_chan_write(c
, CLNK_CTRL
, d
->clnk_ctrl
);
436 omap_dma_start_sg(c
, d
, 0);
439 static void omap_dma_callback(int ch
, u16 status
, void *data
)
441 struct omap_chan
*c
= data
;
445 spin_lock_irqsave(&c
->vc
.lock
, flags
);
449 if (++c
->sgidx
< d
->sglen
) {
450 omap_dma_start_sg(c
, d
, c
->sgidx
);
452 omap_dma_start_desc(c
);
453 vchan_cookie_complete(&d
->vd
);
456 vchan_cyclic_callback(&d
->vd
);
459 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
462 static irqreturn_t
omap_dma_irq(int irq
, void *devid
)
464 struct omap_dmadev
*od
= devid
;
465 unsigned status
, channel
;
467 spin_lock(&od
->irq_lock
);
469 status
= omap_dma_glbl_read(od
, IRQSTATUS_L1
);
470 status
&= od
->irq_enable_mask
;
472 spin_unlock(&od
->irq_lock
);
476 while ((channel
= ffs(status
)) != 0) {
484 c
= od
->lch_map
[channel
];
486 /* This should never happen */
487 dev_err(od
->ddev
.dev
, "invalid channel %u\n", channel
);
491 csr
= omap_dma_get_csr(c
);
492 omap_dma_glbl_write(od
, IRQSTATUS_L1
, mask
);
494 omap_dma_callback(channel
, csr
, c
);
497 spin_unlock(&od
->irq_lock
);
502 static int omap_dma_alloc_chan_resources(struct dma_chan
*chan
)
504 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
505 struct omap_chan
*c
= to_omap_dma_chan(chan
);
509 ret
= omap_request_dma(c
->dma_sig
, "DMA engine",
510 omap_dma_callback
, c
, &c
->dma_ch
);
512 ret
= omap_request_dma(c
->dma_sig
, "DMA engine", NULL
, NULL
,
516 dev_dbg(od
->ddev
.dev
, "allocating channel %u for %u\n",
517 c
->dma_ch
, c
->dma_sig
);
520 omap_dma_assign(od
, c
, c
->dma_ch
);
525 spin_lock_irq(&od
->irq_lock
);
526 val
= BIT(c
->dma_ch
);
527 omap_dma_glbl_write(od
, IRQSTATUS_L1
, val
);
528 od
->irq_enable_mask
|= val
;
529 omap_dma_glbl_write(od
, IRQENABLE_L1
, od
->irq_enable_mask
);
531 val
= omap_dma_glbl_read(od
, IRQENABLE_L0
);
532 val
&= ~BIT(c
->dma_ch
);
533 omap_dma_glbl_write(od
, IRQENABLE_L0
, val
);
534 spin_unlock_irq(&od
->irq_lock
);
539 if (__dma_omap16xx(od
->plat
->dma_attr
)) {
540 c
->ccr
= CCR_OMAP31_DISABLE
;
541 /* Duplicate what plat-omap/dma.c does */
542 c
->ccr
|= c
->dma_ch
+ 1;
544 c
->ccr
= c
->dma_sig
& 0x1f;
547 c
->ccr
= c
->dma_sig
& 0x1f;
548 c
->ccr
|= (c
->dma_sig
& ~0x1f) << 14;
550 if (od
->plat
->errata
& DMA_ERRATA_IFRAME_BUFFERING
)
551 c
->ccr
|= CCR_BUFFERING_DISABLE
;
556 static void omap_dma_free_chan_resources(struct dma_chan
*chan
)
558 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
559 struct omap_chan
*c
= to_omap_dma_chan(chan
);
562 spin_lock_irq(&od
->irq_lock
);
563 od
->irq_enable_mask
&= ~BIT(c
->dma_ch
);
564 omap_dma_glbl_write(od
, IRQENABLE_L1
, od
->irq_enable_mask
);
565 spin_unlock_irq(&od
->irq_lock
);
568 c
->channel_base
= NULL
;
569 od
->lch_map
[c
->dma_ch
] = NULL
;
570 vchan_free_chan_resources(&c
->vc
);
571 omap_free_dma(c
->dma_ch
);
573 dev_dbg(od
->ddev
.dev
, "freeing channel for %u\n", c
->dma_sig
);
577 static size_t omap_dma_sg_size(struct omap_sg
*sg
)
579 return sg
->en
* sg
->fn
;
582 static size_t omap_dma_desc_size(struct omap_desc
*d
)
587 for (size
= i
= 0; i
< d
->sglen
; i
++)
588 size
+= omap_dma_sg_size(&d
->sg
[i
]);
590 return size
* es_bytes
[d
->es
];
593 static size_t omap_dma_desc_size_pos(struct omap_desc
*d
, dma_addr_t addr
)
596 size_t size
, es_size
= es_bytes
[d
->es
];
598 for (size
= i
= 0; i
< d
->sglen
; i
++) {
599 size_t this_size
= omap_dma_sg_size(&d
->sg
[i
]) * es_size
;
603 else if (addr
>= d
->sg
[i
].addr
&&
604 addr
< d
->sg
[i
].addr
+ this_size
)
605 size
+= d
->sg
[i
].addr
+ this_size
- addr
;
611 * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
612 * read before the DMA controller finished disabling the channel.
614 static uint32_t omap_dma_chan_read_3_3(struct omap_chan
*c
, unsigned reg
)
616 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
619 val
= omap_dma_chan_read(c
, reg
);
620 if (val
== 0 && od
->plat
->errata
& DMA_ERRATA_3_3
)
621 val
= omap_dma_chan_read(c
, reg
);
626 static dma_addr_t
omap_dma_get_src_pos(struct omap_chan
*c
)
628 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
629 dma_addr_t addr
, cdac
;
631 if (__dma_omap15xx(od
->plat
->dma_attr
)) {
632 addr
= omap_dma_chan_read(c
, CPC
);
634 addr
= omap_dma_chan_read_3_3(c
, CSAC
);
635 cdac
= omap_dma_chan_read_3_3(c
, CDAC
);
638 * CDAC == 0 indicates that the DMA transfer on the channel has
639 * not been started (no data has been transferred so far).
640 * Return the programmed source start address in this case.
643 addr
= omap_dma_chan_read(c
, CSSA
);
647 addr
|= omap_dma_chan_read(c
, CSSA
) & 0xffff0000;
652 static dma_addr_t
omap_dma_get_dst_pos(struct omap_chan
*c
)
654 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
657 if (__dma_omap15xx(od
->plat
->dma_attr
)) {
658 addr
= omap_dma_chan_read(c
, CPC
);
660 addr
= omap_dma_chan_read_3_3(c
, CDAC
);
663 * CDAC == 0 indicates that the DMA transfer on the channel
664 * has not been started (no data has been transferred so
665 * far). Return the programmed destination start address in
669 addr
= omap_dma_chan_read(c
, CDSA
);
673 addr
|= omap_dma_chan_read(c
, CDSA
) & 0xffff0000;
678 static enum dma_status
omap_dma_tx_status(struct dma_chan
*chan
,
679 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
681 struct omap_chan
*c
= to_omap_dma_chan(chan
);
682 struct virt_dma_desc
*vd
;
686 ret
= dma_cookie_status(chan
, cookie
, txstate
);
688 if (!c
->paused
&& c
->running
) {
689 uint32_t ccr
= omap_dma_chan_read(c
, CCR
);
691 * The channel is no longer active, set the return value
694 if (!(ccr
& CCR_ENABLE
))
698 if (ret
== DMA_COMPLETE
|| !txstate
)
701 spin_lock_irqsave(&c
->vc
.lock
, flags
);
702 vd
= vchan_find_desc(&c
->vc
, cookie
);
704 txstate
->residue
= omap_dma_desc_size(to_omap_dma_desc(&vd
->tx
));
705 } else if (c
->desc
&& c
->desc
->vd
.tx
.cookie
== cookie
) {
706 struct omap_desc
*d
= c
->desc
;
709 if (d
->dir
== DMA_MEM_TO_DEV
)
710 pos
= omap_dma_get_src_pos(c
);
711 else if (d
->dir
== DMA_DEV_TO_MEM
|| d
->dir
== DMA_MEM_TO_MEM
)
712 pos
= omap_dma_get_dst_pos(c
);
716 txstate
->residue
= omap_dma_desc_size_pos(d
, pos
);
718 txstate
->residue
= 0;
720 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
725 static void omap_dma_issue_pending(struct dma_chan
*chan
)
727 struct omap_chan
*c
= to_omap_dma_chan(chan
);
730 spin_lock_irqsave(&c
->vc
.lock
, flags
);
731 if (vchan_issue_pending(&c
->vc
) && !c
->desc
)
732 omap_dma_start_desc(c
);
733 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
736 static struct dma_async_tx_descriptor
*omap_dma_prep_slave_sg(
737 struct dma_chan
*chan
, struct scatterlist
*sgl
, unsigned sglen
,
738 enum dma_transfer_direction dir
, unsigned long tx_flags
, void *context
)
740 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
741 struct omap_chan
*c
= to_omap_dma_chan(chan
);
742 enum dma_slave_buswidth dev_width
;
743 struct scatterlist
*sgent
;
746 unsigned i
, es
, en
, frame_bytes
;
749 if (dir
== DMA_DEV_TO_MEM
) {
750 dev_addr
= c
->cfg
.src_addr
;
751 dev_width
= c
->cfg
.src_addr_width
;
752 burst
= c
->cfg
.src_maxburst
;
753 } else if (dir
== DMA_MEM_TO_DEV
) {
754 dev_addr
= c
->cfg
.dst_addr
;
755 dev_width
= c
->cfg
.dst_addr_width
;
756 burst
= c
->cfg
.dst_maxburst
;
758 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
762 /* Bus width translates to the element size (ES) */
764 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
765 es
= CSDP_DATA_TYPE_8
;
767 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
768 es
= CSDP_DATA_TYPE_16
;
770 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
771 es
= CSDP_DATA_TYPE_32
;
773 default: /* not reached */
777 /* Now allocate and setup the descriptor. */
778 d
= kzalloc(sizeof(*d
) + sglen
* sizeof(d
->sg
[0]), GFP_ATOMIC
);
783 d
->dev_addr
= dev_addr
;
786 d
->ccr
= c
->ccr
| CCR_SYNC_FRAME
;
787 if (dir
== DMA_DEV_TO_MEM
)
788 d
->ccr
|= CCR_DST_AMODE_POSTINC
| CCR_SRC_AMODE_CONSTANT
;
790 d
->ccr
|= CCR_DST_AMODE_CONSTANT
| CCR_SRC_AMODE_POSTINC
;
792 d
->cicr
= CICR_DROP_IE
| CICR_BLOCK_IE
;
796 d
->cicr
|= CICR_TOUT_IE
;
798 if (dir
== DMA_DEV_TO_MEM
)
799 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_TIPB
;
801 d
->csdp
|= CSDP_DST_PORT_TIPB
| CSDP_SRC_PORT_EMIFF
;
803 if (dir
== DMA_DEV_TO_MEM
)
804 d
->ccr
|= CCR_TRIGGER_SRC
;
806 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
808 if (od
->plat
->errata
& DMA_ERRATA_PARALLEL_CHANNELS
)
809 d
->clnk_ctrl
= c
->dma_ch
;
812 * Build our scatterlist entries: each contains the address,
813 * the number of elements (EN) in each frame, and the number of
814 * frames (FN). Number of bytes for this entry = ES * EN * FN.
816 * Burst size translates to number of elements with frame sync.
817 * Note: DMA engine defines burst to be the number of dev-width
821 frame_bytes
= es_bytes
[es
] * en
;
822 for_each_sg(sgl
, sgent
, sglen
, i
) {
823 d
->sg
[i
].addr
= sg_dma_address(sgent
);
825 d
->sg
[i
].fn
= sg_dma_len(sgent
) / frame_bytes
;
830 return vchan_tx_prep(&c
->vc
, &d
->vd
, tx_flags
);
833 static struct dma_async_tx_descriptor
*omap_dma_prep_dma_cyclic(
834 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
835 size_t period_len
, enum dma_transfer_direction dir
, unsigned long flags
)
837 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
838 struct omap_chan
*c
= to_omap_dma_chan(chan
);
839 enum dma_slave_buswidth dev_width
;
845 if (dir
== DMA_DEV_TO_MEM
) {
846 dev_addr
= c
->cfg
.src_addr
;
847 dev_width
= c
->cfg
.src_addr_width
;
848 burst
= c
->cfg
.src_maxburst
;
849 } else if (dir
== DMA_MEM_TO_DEV
) {
850 dev_addr
= c
->cfg
.dst_addr
;
851 dev_width
= c
->cfg
.dst_addr_width
;
852 burst
= c
->cfg
.dst_maxburst
;
854 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
858 /* Bus width translates to the element size (ES) */
860 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
861 es
= CSDP_DATA_TYPE_8
;
863 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
864 es
= CSDP_DATA_TYPE_16
;
866 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
867 es
= CSDP_DATA_TYPE_32
;
869 default: /* not reached */
873 /* Now allocate and setup the descriptor. */
874 d
= kzalloc(sizeof(*d
) + sizeof(d
->sg
[0]), GFP_ATOMIC
);
879 d
->dev_addr
= dev_addr
;
882 d
->sg
[0].addr
= buf_addr
;
883 d
->sg
[0].en
= period_len
/ es_bytes
[es
];
884 d
->sg
[0].fn
= buf_len
/ period_len
;
888 if (dir
== DMA_DEV_TO_MEM
)
889 d
->ccr
|= CCR_DST_AMODE_POSTINC
| CCR_SRC_AMODE_CONSTANT
;
891 d
->ccr
|= CCR_DST_AMODE_CONSTANT
| CCR_SRC_AMODE_POSTINC
;
893 d
->cicr
= CICR_DROP_IE
;
894 if (flags
& DMA_PREP_INTERRUPT
)
895 d
->cicr
|= CICR_FRAME_IE
;
900 d
->cicr
|= CICR_TOUT_IE
;
902 if (dir
== DMA_DEV_TO_MEM
)
903 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_MPUI
;
905 d
->csdp
|= CSDP_DST_PORT_MPUI
| CSDP_SRC_PORT_EMIFF
;
908 d
->ccr
|= CCR_SYNC_PACKET
;
910 d
->ccr
|= CCR_SYNC_ELEMENT
;
912 if (dir
== DMA_DEV_TO_MEM
) {
913 d
->ccr
|= CCR_TRIGGER_SRC
;
914 d
->csdp
|= CSDP_DST_PACKED
;
916 d
->csdp
|= CSDP_SRC_PACKED
;
919 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
921 d
->csdp
|= CSDP_DST_BURST_64
| CSDP_SRC_BURST_64
;
924 if (__dma_omap15xx(od
->plat
->dma_attr
))
925 d
->ccr
|= CCR_AUTO_INIT
| CCR_REPEAT
;
927 d
->clnk_ctrl
= c
->dma_ch
| CLNK_CTRL_ENABLE_LNK
;
931 return vchan_tx_prep(&c
->vc
, &d
->vd
, flags
);
934 static struct dma_async_tx_descriptor
*omap_dma_prep_dma_memcpy(
935 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
936 size_t len
, unsigned long tx_flags
)
938 struct omap_chan
*c
= to_omap_dma_chan(chan
);
942 d
= kzalloc(sizeof(*d
) + sizeof(d
->sg
[0]), GFP_ATOMIC
);
946 data_type
= __ffs((src
| dest
| len
));
947 if (data_type
> CSDP_DATA_TYPE_32
)
948 data_type
= CSDP_DATA_TYPE_32
;
950 d
->dir
= DMA_MEM_TO_MEM
;
954 d
->sg
[0].en
= len
/ BIT(data_type
);
956 d
->sg
[0].addr
= dest
;
959 d
->ccr
|= CCR_DST_AMODE_POSTINC
| CCR_SRC_AMODE_POSTINC
;
961 d
->cicr
= CICR_DROP_IE
| CICR_FRAME_IE
;
966 d
->cicr
|= CICR_TOUT_IE
;
967 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_EMIFF
;
969 d
->csdp
|= CSDP_DST_PACKED
| CSDP_SRC_PACKED
;
970 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
971 d
->csdp
|= CSDP_DST_BURST_64
| CSDP_SRC_BURST_64
;
974 return vchan_tx_prep(&c
->vc
, &d
->vd
, tx_flags
);
977 static struct dma_async_tx_descriptor
*omap_dma_prep_dma_interleaved(
978 struct dma_chan
*chan
, struct dma_interleaved_template
*xt
,
981 struct omap_chan
*c
= to_omap_dma_chan(chan
);
985 size_t src_icg
, dst_icg
;
987 /* Slave mode is not supported */
988 if (is_slave_direction(xt
->dir
))
991 if (xt
->frame_size
!= 1 || xt
->numf
== 0)
994 d
= kzalloc(sizeof(*d
) + sizeof(d
->sg
[0]), GFP_ATOMIC
);
998 data_type
= __ffs((xt
->src_start
| xt
->dst_start
| xt
->sgl
[0].size
));
999 if (data_type
> CSDP_DATA_TYPE_32
)
1000 data_type
= CSDP_DATA_TYPE_32
;
1003 d
->dir
= DMA_MEM_TO_MEM
;
1004 d
->dev_addr
= xt
->src_start
;
1006 sg
->en
= xt
->sgl
[0].size
/ BIT(data_type
);
1008 sg
->addr
= xt
->dst_start
;
1012 src_icg
= dmaengine_get_src_icg(xt
, &xt
->sgl
[0]);
1013 dst_icg
= dmaengine_get_dst_icg(xt
, &xt
->sgl
[0]);
1015 d
->ccr
|= CCR_SRC_AMODE_DBLIDX
;
1018 } else if (xt
->src_inc
) {
1019 d
->ccr
|= CCR_SRC_AMODE_POSTINC
;
1022 dev_err(chan
->device
->dev
,
1023 "%s: SRC constant addressing is not supported\n",
1030 d
->ccr
|= CCR_DST_AMODE_DBLIDX
;
1033 } else if (xt
->dst_inc
) {
1034 d
->ccr
|= CCR_DST_AMODE_POSTINC
;
1037 dev_err(chan
->device
->dev
,
1038 "%s: DST constant addressing is not supported\n",
1044 d
->cicr
= CICR_DROP_IE
| CICR_FRAME_IE
;
1046 d
->csdp
= data_type
;
1049 d
->cicr
|= CICR_TOUT_IE
;
1050 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_EMIFF
;
1052 d
->csdp
|= CSDP_DST_PACKED
| CSDP_SRC_PACKED
;
1053 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
1054 d
->csdp
|= CSDP_DST_BURST_64
| CSDP_SRC_BURST_64
;
1057 return vchan_tx_prep(&c
->vc
, &d
->vd
, flags
);
1060 static int omap_dma_slave_config(struct dma_chan
*chan
, struct dma_slave_config
*cfg
)
1062 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1064 if (cfg
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
1065 cfg
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
1068 memcpy(&c
->cfg
, cfg
, sizeof(c
->cfg
));
1073 static int omap_dma_terminate_all(struct dma_chan
*chan
)
1075 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1076 unsigned long flags
;
1079 spin_lock_irqsave(&c
->vc
.lock
, flags
);
1082 * Stop DMA activity: we assume the callback will not be called
1083 * after omap_dma_stop() returns (even if it does, it will see
1084 * c->desc is NULL and exit.)
1087 omap_dma_desc_free(&c
->desc
->vd
);
1089 /* Avoid stopping the dma twice */
1099 vchan_get_all_descriptors(&c
->vc
, &head
);
1100 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
1101 vchan_dma_desc_free_list(&c
->vc
, &head
);
1106 static void omap_dma_synchronize(struct dma_chan
*chan
)
1108 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1110 vchan_synchronize(&c
->vc
);
1113 static int omap_dma_pause(struct dma_chan
*chan
)
1115 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1117 /* Pause/Resume only allowed with cyclic mode */
1129 static int omap_dma_resume(struct dma_chan
*chan
)
1131 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1133 /* Pause/Resume only allowed with cyclic mode */
1140 /* Restore channel link register */
1141 omap_dma_chan_write(c
, CLNK_CTRL
, c
->desc
->clnk_ctrl
);
1143 omap_dma_start(c
, c
->desc
);
1150 static int omap_dma_chan_init(struct omap_dmadev
*od
)
1152 struct omap_chan
*c
;
1154 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
1158 c
->reg_map
= od
->reg_map
;
1159 c
->vc
.desc_free
= omap_dma_desc_free
;
1160 vchan_init(&c
->vc
, &od
->ddev
);
1165 static void omap_dma_free(struct omap_dmadev
*od
)
1167 while (!list_empty(&od
->ddev
.channels
)) {
1168 struct omap_chan
*c
= list_first_entry(&od
->ddev
.channels
,
1169 struct omap_chan
, vc
.chan
.device_node
);
1171 list_del(&c
->vc
.chan
.device_node
);
1172 tasklet_kill(&c
->vc
.task
);
1177 #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1178 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1179 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1181 static int omap_dma_probe(struct platform_device
*pdev
)
1183 struct omap_dmadev
*od
;
1184 struct resource
*res
;
1187 od
= devm_kzalloc(&pdev
->dev
, sizeof(*od
), GFP_KERNEL
);
1191 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1192 od
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
1193 if (IS_ERR(od
->base
))
1194 return PTR_ERR(od
->base
);
1196 od
->plat
= omap_get_plat_info();
1198 return -EPROBE_DEFER
;
1200 od
->reg_map
= od
->plat
->reg_map
;
1202 dma_cap_set(DMA_SLAVE
, od
->ddev
.cap_mask
);
1203 dma_cap_set(DMA_CYCLIC
, od
->ddev
.cap_mask
);
1204 dma_cap_set(DMA_MEMCPY
, od
->ddev
.cap_mask
);
1205 dma_cap_set(DMA_INTERLEAVE
, od
->ddev
.cap_mask
);
1206 od
->ddev
.device_alloc_chan_resources
= omap_dma_alloc_chan_resources
;
1207 od
->ddev
.device_free_chan_resources
= omap_dma_free_chan_resources
;
1208 od
->ddev
.device_tx_status
= omap_dma_tx_status
;
1209 od
->ddev
.device_issue_pending
= omap_dma_issue_pending
;
1210 od
->ddev
.device_prep_slave_sg
= omap_dma_prep_slave_sg
;
1211 od
->ddev
.device_prep_dma_cyclic
= omap_dma_prep_dma_cyclic
;
1212 od
->ddev
.device_prep_dma_memcpy
= omap_dma_prep_dma_memcpy
;
1213 od
->ddev
.device_prep_interleaved_dma
= omap_dma_prep_dma_interleaved
;
1214 od
->ddev
.device_config
= omap_dma_slave_config
;
1215 od
->ddev
.device_pause
= omap_dma_pause
;
1216 od
->ddev
.device_resume
= omap_dma_resume
;
1217 od
->ddev
.device_terminate_all
= omap_dma_terminate_all
;
1218 od
->ddev
.device_synchronize
= omap_dma_synchronize
;
1219 od
->ddev
.src_addr_widths
= OMAP_DMA_BUSWIDTHS
;
1220 od
->ddev
.dst_addr_widths
= OMAP_DMA_BUSWIDTHS
;
1221 od
->ddev
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
1222 od
->ddev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1223 od
->ddev
.dev
= &pdev
->dev
;
1224 INIT_LIST_HEAD(&od
->ddev
.channels
);
1225 spin_lock_init(&od
->lock
);
1226 spin_lock_init(&od
->irq_lock
);
1228 od
->dma_requests
= OMAP_SDMA_REQUESTS
;
1229 if (pdev
->dev
.of_node
&& of_property_read_u32(pdev
->dev
.of_node
,
1231 &od
->dma_requests
)) {
1232 dev_info(&pdev
->dev
,
1233 "Missing dma-requests property, using %u.\n",
1234 OMAP_SDMA_REQUESTS
);
1237 for (i
= 0; i
< OMAP_SDMA_CHANNELS
; i
++) {
1238 rc
= omap_dma_chan_init(od
);
1245 irq
= platform_get_irq(pdev
, 1);
1247 dev_info(&pdev
->dev
, "failed to get L1 IRQ: %d\n", irq
);
1250 /* Disable all interrupts */
1251 od
->irq_enable_mask
= 0;
1252 omap_dma_glbl_write(od
, IRQENABLE_L1
, 0);
1254 rc
= devm_request_irq(&pdev
->dev
, irq
, omap_dma_irq
,
1255 IRQF_SHARED
, "omap-dma-engine", od
);
1260 od
->ddev
.filter
.map
= od
->plat
->slave_map
;
1261 od
->ddev
.filter
.mapcnt
= od
->plat
->slavecnt
;
1262 od
->ddev
.filter
.fn
= omap_dma_filter_fn
;
1264 rc
= dma_async_device_register(&od
->ddev
);
1266 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
1272 platform_set_drvdata(pdev
, od
);
1274 if (pdev
->dev
.of_node
) {
1275 omap_dma_info
.dma_cap
= od
->ddev
.cap_mask
;
1277 /* Device-tree DMA controller registration */
1278 rc
= of_dma_controller_register(pdev
->dev
.of_node
,
1279 of_dma_simple_xlate
, &omap_dma_info
);
1281 pr_warn("OMAP-DMA: failed to register DMA controller\n");
1282 dma_async_device_unregister(&od
->ddev
);
1287 dev_info(&pdev
->dev
, "OMAP DMA engine driver\n");
1292 static int omap_dma_remove(struct platform_device
*pdev
)
1294 struct omap_dmadev
*od
= platform_get_drvdata(pdev
);
1297 if (pdev
->dev
.of_node
)
1298 of_dma_controller_free(pdev
->dev
.of_node
);
1300 irq
= platform_get_irq(pdev
, 1);
1301 devm_free_irq(&pdev
->dev
, irq
, od
);
1303 dma_async_device_unregister(&od
->ddev
);
1306 /* Disable all interrupts */
1307 omap_dma_glbl_write(od
, IRQENABLE_L0
, 0);
1315 static const struct of_device_id omap_dma_match
[] = {
1316 { .compatible
= "ti,omap2420-sdma", },
1317 { .compatible
= "ti,omap2430-sdma", },
1318 { .compatible
= "ti,omap3430-sdma", },
1319 { .compatible
= "ti,omap3630-sdma", },
1320 { .compatible
= "ti,omap4430-sdma", },
1323 MODULE_DEVICE_TABLE(of
, omap_dma_match
);
1325 static struct platform_driver omap_dma_driver
= {
1326 .probe
= omap_dma_probe
,
1327 .remove
= omap_dma_remove
,
1329 .name
= "omap-dma-engine",
1330 .of_match_table
= of_match_ptr(omap_dma_match
),
1334 bool omap_dma_filter_fn(struct dma_chan
*chan
, void *param
)
1336 if (chan
->device
->dev
->driver
== &omap_dma_driver
.driver
) {
1337 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
1338 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1339 unsigned req
= *(unsigned *)param
;
1341 if (req
<= od
->dma_requests
) {
1348 EXPORT_SYMBOL_GPL(omap_dma_filter_fn
);
1350 static int omap_dma_init(void)
1352 return platform_driver_register(&omap_dma_driver
);
1354 subsys_initcall(omap_dma_init
);
1356 static void __exit
omap_dma_exit(void)
1358 platform_driver_unregister(&omap_dma_driver
);
1360 module_exit(omap_dma_exit
);
1362 MODULE_AUTHOR("Russell King");
1363 MODULE_LICENSE("GPL");