2 * OMAP DMAengine support
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/omap-dma.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_device.h>
26 struct dma_device ddev
;
28 struct tasklet_struct task
;
29 struct list_head pending
;
31 const struct omap_dma_reg
*reg_map
;
32 struct omap_system_dma_plat_info
*plat
;
35 uint32_t irq_enable_mask
;
36 struct omap_chan
*lch_map
[32];
40 struct virt_dma_chan vc
;
41 struct list_head node
;
42 void __iomem
*channel_base
;
43 const struct omap_dma_reg
*reg_map
;
46 struct dma_slave_config cfg
;
52 struct omap_desc
*desc
;
58 uint32_t en
; /* number of elements (24-bit) */
59 uint32_t fn
; /* number of frames (16-bit) */
63 struct virt_dma_desc vd
;
64 enum dma_transfer_direction dir
;
67 int16_t fi
; /* for OMAP_DMA_SYNC_PACKET */
68 uint8_t es
; /* CSDP_DATA_TYPE_xxx */
69 uint32_t ccr
; /* CCR value */
70 uint16_t clnk_ctrl
; /* CLNK_CTRL value */
71 uint16_t cicr
; /* CICR value */
72 uint32_t csdp
; /* CSDP value */
80 CCR_READ_PRIORITY
= BIT(6),
82 CCR_AUTO_INIT
= BIT(8), /* OMAP1 only */
83 CCR_REPEAT
= BIT(9), /* OMAP1 only */
84 CCR_OMAP31_DISABLE
= BIT(10), /* OMAP1 only */
85 CCR_SUSPEND_SENSITIVE
= BIT(8), /* OMAP2+ only */
86 CCR_RD_ACTIVE
= BIT(9), /* OMAP2+ only */
87 CCR_WR_ACTIVE
= BIT(10), /* OMAP2+ only */
88 CCR_SRC_AMODE_CONSTANT
= 0 << 12,
89 CCR_SRC_AMODE_POSTINC
= 1 << 12,
90 CCR_SRC_AMODE_SGLIDX
= 2 << 12,
91 CCR_SRC_AMODE_DBLIDX
= 3 << 12,
92 CCR_DST_AMODE_CONSTANT
= 0 << 14,
93 CCR_DST_AMODE_POSTINC
= 1 << 14,
94 CCR_DST_AMODE_SGLIDX
= 2 << 14,
95 CCR_DST_AMODE_DBLIDX
= 3 << 14,
96 CCR_CONSTANT_FILL
= BIT(16),
97 CCR_TRANSPARENT_COPY
= BIT(17),
99 CCR_SUPERVISOR
= BIT(22),
100 CCR_PREFETCH
= BIT(23),
101 CCR_TRIGGER_SRC
= BIT(24),
102 CCR_BUFFERING_DISABLE
= BIT(25),
103 CCR_WRITE_PRIORITY
= BIT(26),
104 CCR_SYNC_ELEMENT
= 0,
105 CCR_SYNC_FRAME
= CCR_FS
,
106 CCR_SYNC_BLOCK
= CCR_BS
,
107 CCR_SYNC_PACKET
= CCR_BS
| CCR_FS
,
109 CSDP_DATA_TYPE_8
= 0,
110 CSDP_DATA_TYPE_16
= 1,
111 CSDP_DATA_TYPE_32
= 2,
112 CSDP_SRC_PORT_EMIFF
= 0 << 2, /* OMAP1 only */
113 CSDP_SRC_PORT_EMIFS
= 1 << 2, /* OMAP1 only */
114 CSDP_SRC_PORT_OCP_T1
= 2 << 2, /* OMAP1 only */
115 CSDP_SRC_PORT_TIPB
= 3 << 2, /* OMAP1 only */
116 CSDP_SRC_PORT_OCP_T2
= 4 << 2, /* OMAP1 only */
117 CSDP_SRC_PORT_MPUI
= 5 << 2, /* OMAP1 only */
118 CSDP_SRC_PACKED
= BIT(6),
119 CSDP_SRC_BURST_1
= 0 << 7,
120 CSDP_SRC_BURST_16
= 1 << 7,
121 CSDP_SRC_BURST_32
= 2 << 7,
122 CSDP_SRC_BURST_64
= 3 << 7,
123 CSDP_DST_PORT_EMIFF
= 0 << 9, /* OMAP1 only */
124 CSDP_DST_PORT_EMIFS
= 1 << 9, /* OMAP1 only */
125 CSDP_DST_PORT_OCP_T1
= 2 << 9, /* OMAP1 only */
126 CSDP_DST_PORT_TIPB
= 3 << 9, /* OMAP1 only */
127 CSDP_DST_PORT_OCP_T2
= 4 << 9, /* OMAP1 only */
128 CSDP_DST_PORT_MPUI
= 5 << 9, /* OMAP1 only */
129 CSDP_DST_PACKED
= BIT(13),
130 CSDP_DST_BURST_1
= 0 << 14,
131 CSDP_DST_BURST_16
= 1 << 14,
132 CSDP_DST_BURST_32
= 2 << 14,
133 CSDP_DST_BURST_64
= 3 << 14,
135 CICR_TOUT_IE
= BIT(0), /* OMAP1 only */
136 CICR_DROP_IE
= BIT(1),
137 CICR_HALF_IE
= BIT(2),
138 CICR_FRAME_IE
= BIT(3),
139 CICR_LAST_IE
= BIT(4),
140 CICR_BLOCK_IE
= BIT(5),
141 CICR_PKT_IE
= BIT(7), /* OMAP2+ only */
142 CICR_TRANS_ERR_IE
= BIT(8), /* OMAP2+ only */
143 CICR_SUPERVISOR_ERR_IE
= BIT(10), /* OMAP2+ only */
144 CICR_MISALIGNED_ERR_IE
= BIT(11), /* OMAP2+ only */
145 CICR_DRAIN_IE
= BIT(12), /* OMAP2+ only */
146 CICR_SUPER_BLOCK_IE
= BIT(14), /* OMAP2+ only */
148 CLNK_CTRL_ENABLE_LNK
= BIT(15),
151 static const unsigned es_bytes
[] = {
152 [CSDP_DATA_TYPE_8
] = 1,
153 [CSDP_DATA_TYPE_16
] = 2,
154 [CSDP_DATA_TYPE_32
] = 4,
157 static struct of_dma_filter_info omap_dma_info
= {
158 .filter_fn
= omap_dma_filter_fn
,
161 static inline struct omap_dmadev
*to_omap_dma_dev(struct dma_device
*d
)
163 return container_of(d
, struct omap_dmadev
, ddev
);
166 static inline struct omap_chan
*to_omap_dma_chan(struct dma_chan
*c
)
168 return container_of(c
, struct omap_chan
, vc
.chan
);
171 static inline struct omap_desc
*to_omap_dma_desc(struct dma_async_tx_descriptor
*t
)
173 return container_of(t
, struct omap_desc
, vd
.tx
);
176 static void omap_dma_desc_free(struct virt_dma_desc
*vd
)
178 kfree(container_of(vd
, struct omap_desc
, vd
));
181 static void omap_dma_write(uint32_t val
, unsigned type
, void __iomem
*addr
)
184 case OMAP_DMA_REG_16BIT
:
185 writew_relaxed(val
, addr
);
187 case OMAP_DMA_REG_2X16BIT
:
188 writew_relaxed(val
, addr
);
189 writew_relaxed(val
>> 16, addr
+ 2);
191 case OMAP_DMA_REG_32BIT
:
192 writel_relaxed(val
, addr
);
199 static unsigned omap_dma_read(unsigned type
, void __iomem
*addr
)
204 case OMAP_DMA_REG_16BIT
:
205 val
= readw_relaxed(addr
);
207 case OMAP_DMA_REG_2X16BIT
:
208 val
= readw_relaxed(addr
);
209 val
|= readw_relaxed(addr
+ 2) << 16;
211 case OMAP_DMA_REG_32BIT
:
212 val
= readl_relaxed(addr
);
222 static void omap_dma_glbl_write(struct omap_dmadev
*od
, unsigned reg
, unsigned val
)
224 const struct omap_dma_reg
*r
= od
->reg_map
+ reg
;
228 omap_dma_write(val
, r
->type
, od
->base
+ r
->offset
);
231 static unsigned omap_dma_glbl_read(struct omap_dmadev
*od
, unsigned reg
)
233 const struct omap_dma_reg
*r
= od
->reg_map
+ reg
;
237 return omap_dma_read(r
->type
, od
->base
+ r
->offset
);
240 static void omap_dma_chan_write(struct omap_chan
*c
, unsigned reg
, unsigned val
)
242 const struct omap_dma_reg
*r
= c
->reg_map
+ reg
;
244 omap_dma_write(val
, r
->type
, c
->channel_base
+ r
->offset
);
247 static unsigned omap_dma_chan_read(struct omap_chan
*c
, unsigned reg
)
249 const struct omap_dma_reg
*r
= c
->reg_map
+ reg
;
251 return omap_dma_read(r
->type
, c
->channel_base
+ r
->offset
);
254 static void omap_dma_clear_csr(struct omap_chan
*c
)
257 omap_dma_chan_read(c
, CSR
);
259 omap_dma_chan_write(c
, CSR
, ~0);
262 static unsigned omap_dma_get_csr(struct omap_chan
*c
)
264 unsigned val
= omap_dma_chan_read(c
, CSR
);
267 omap_dma_chan_write(c
, CSR
, val
);
272 static void omap_dma_assign(struct omap_dmadev
*od
, struct omap_chan
*c
,
275 c
->channel_base
= od
->base
+ od
->plat
->channel_stride
* lch
;
277 od
->lch_map
[lch
] = c
;
280 static void omap_dma_start(struct omap_chan
*c
, struct omap_desc
*d
)
282 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
284 if (__dma_omap15xx(od
->plat
->dma_attr
))
285 omap_dma_chan_write(c
, CPC
, 0);
287 omap_dma_chan_write(c
, CDAC
, 0);
289 omap_dma_clear_csr(c
);
291 /* Enable interrupts */
292 omap_dma_chan_write(c
, CICR
, d
->cicr
);
295 omap_dma_chan_write(c
, CCR
, d
->ccr
| CCR_ENABLE
);
298 static void omap_dma_stop(struct omap_chan
*c
)
300 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
304 omap_dma_chan_write(c
, CICR
, 0);
306 omap_dma_clear_csr(c
);
308 val
= omap_dma_chan_read(c
, CCR
);
309 if (od
->plat
->errata
& DMA_ERRATA_i541
&& val
& CCR_TRIGGER_SRC
) {
313 sysconfig
= omap_dma_glbl_read(od
, OCP_SYSCONFIG
);
314 val
= sysconfig
& ~DMA_SYSCONFIG_MIDLEMODE_MASK
;
315 val
|= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE
);
316 omap_dma_glbl_write(od
, OCP_SYSCONFIG
, val
);
318 val
= omap_dma_chan_read(c
, CCR
);
320 omap_dma_chan_write(c
, CCR
, val
);
322 /* Wait for sDMA FIFO to drain */
324 val
= omap_dma_chan_read(c
, CCR
);
325 if (!(val
& (CCR_RD_ACTIVE
| CCR_WR_ACTIVE
)))
334 if (val
& (CCR_RD_ACTIVE
| CCR_WR_ACTIVE
))
335 dev_err(c
->vc
.chan
.device
->dev
,
336 "DMA drain did not complete on lch %d\n",
339 omap_dma_glbl_write(od
, OCP_SYSCONFIG
, sysconfig
);
342 omap_dma_chan_write(c
, CCR
, val
);
347 if (!__dma_omap15xx(od
->plat
->dma_attr
) && c
->cyclic
) {
348 val
= omap_dma_chan_read(c
, CLNK_CTRL
);
351 val
|= 1 << 14; /* set the STOP_LNK bit */
353 val
&= ~CLNK_CTRL_ENABLE_LNK
;
355 omap_dma_chan_write(c
, CLNK_CTRL
, val
);
359 static void omap_dma_start_sg(struct omap_chan
*c
, struct omap_desc
*d
,
362 struct omap_sg
*sg
= d
->sg
+ idx
;
363 unsigned cxsa
, cxei
, cxfi
;
365 if (d
->dir
== DMA_DEV_TO_MEM
) {
375 omap_dma_chan_write(c
, cxsa
, sg
->addr
);
376 omap_dma_chan_write(c
, cxei
, 0);
377 omap_dma_chan_write(c
, cxfi
, 0);
378 omap_dma_chan_write(c
, CEN
, sg
->en
);
379 omap_dma_chan_write(c
, CFN
, sg
->fn
);
381 omap_dma_start(c
, d
);
384 static void omap_dma_start_desc(struct omap_chan
*c
)
386 struct virt_dma_desc
*vd
= vchan_next_desc(&c
->vc
);
388 unsigned cxsa
, cxei
, cxfi
;
397 c
->desc
= d
= to_omap_dma_desc(&vd
->tx
);
401 * This provides the necessary barrier to ensure data held in
402 * DMA coherent memory is visible to the DMA engine prior to
403 * the transfer starting.
407 omap_dma_chan_write(c
, CCR
, d
->ccr
);
409 omap_dma_chan_write(c
, CCR2
, d
->ccr
>> 16);
411 if (d
->dir
== DMA_DEV_TO_MEM
) {
421 omap_dma_chan_write(c
, cxsa
, d
->dev_addr
);
422 omap_dma_chan_write(c
, cxei
, 0);
423 omap_dma_chan_write(c
, cxfi
, d
->fi
);
424 omap_dma_chan_write(c
, CSDP
, d
->csdp
);
425 omap_dma_chan_write(c
, CLNK_CTRL
, d
->clnk_ctrl
);
427 omap_dma_start_sg(c
, d
, 0);
430 static void omap_dma_callback(int ch
, u16 status
, void *data
)
432 struct omap_chan
*c
= data
;
436 spin_lock_irqsave(&c
->vc
.lock
, flags
);
440 if (++c
->sgidx
< d
->sglen
) {
441 omap_dma_start_sg(c
, d
, c
->sgidx
);
443 omap_dma_start_desc(c
);
444 vchan_cookie_complete(&d
->vd
);
447 vchan_cyclic_callback(&d
->vd
);
450 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
454 * This callback schedules all pending channels. We could be more
455 * clever here by postponing allocation of the real DMA channels to
456 * this point, and freeing them when our virtual channel becomes idle.
458 * We would then need to deal with 'all channels in-use'
460 static void omap_dma_sched(unsigned long data
)
462 struct omap_dmadev
*d
= (struct omap_dmadev
*)data
;
465 spin_lock_irq(&d
->lock
);
466 list_splice_tail_init(&d
->pending
, &head
);
467 spin_unlock_irq(&d
->lock
);
469 while (!list_empty(&head
)) {
470 struct omap_chan
*c
= list_first_entry(&head
,
471 struct omap_chan
, node
);
473 spin_lock_irq(&c
->vc
.lock
);
474 list_del_init(&c
->node
);
475 omap_dma_start_desc(c
);
476 spin_unlock_irq(&c
->vc
.lock
);
480 static irqreturn_t
omap_dma_irq(int irq
, void *devid
)
482 struct omap_dmadev
*od
= devid
;
483 unsigned status
, channel
;
485 spin_lock(&od
->irq_lock
);
487 status
= omap_dma_glbl_read(od
, IRQSTATUS_L1
);
488 status
&= od
->irq_enable_mask
;
490 spin_unlock(&od
->irq_lock
);
494 while ((channel
= ffs(status
)) != 0) {
502 c
= od
->lch_map
[channel
];
504 /* This should never happen */
505 dev_err(od
->ddev
.dev
, "invalid channel %u\n", channel
);
509 csr
= omap_dma_get_csr(c
);
510 omap_dma_glbl_write(od
, IRQSTATUS_L1
, mask
);
512 omap_dma_callback(channel
, csr
, c
);
515 spin_unlock(&od
->irq_lock
);
520 static int omap_dma_alloc_chan_resources(struct dma_chan
*chan
)
522 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
523 struct omap_chan
*c
= to_omap_dma_chan(chan
);
527 ret
= omap_request_dma(c
->dma_sig
, "DMA engine",
528 omap_dma_callback
, c
, &c
->dma_ch
);
530 ret
= omap_request_dma(c
->dma_sig
, "DMA engine", NULL
, NULL
,
534 dev_dbg(od
->ddev
.dev
, "allocating channel %u for %u\n",
535 c
->dma_ch
, c
->dma_sig
);
538 omap_dma_assign(od
, c
, c
->dma_ch
);
543 spin_lock_irq(&od
->irq_lock
);
544 val
= BIT(c
->dma_ch
);
545 omap_dma_glbl_write(od
, IRQSTATUS_L1
, val
);
546 od
->irq_enable_mask
|= val
;
547 omap_dma_glbl_write(od
, IRQENABLE_L1
, od
->irq_enable_mask
);
549 val
= omap_dma_glbl_read(od
, IRQENABLE_L0
);
550 val
&= ~BIT(c
->dma_ch
);
551 omap_dma_glbl_write(od
, IRQENABLE_L0
, val
);
552 spin_unlock_irq(&od
->irq_lock
);
557 if (__dma_omap16xx(od
->plat
->dma_attr
)) {
558 c
->ccr
= CCR_OMAP31_DISABLE
;
559 /* Duplicate what plat-omap/dma.c does */
560 c
->ccr
|= c
->dma_ch
+ 1;
562 c
->ccr
= c
->dma_sig
& 0x1f;
565 c
->ccr
= c
->dma_sig
& 0x1f;
566 c
->ccr
|= (c
->dma_sig
& ~0x1f) << 14;
568 if (od
->plat
->errata
& DMA_ERRATA_IFRAME_BUFFERING
)
569 c
->ccr
|= CCR_BUFFERING_DISABLE
;
574 static void omap_dma_free_chan_resources(struct dma_chan
*chan
)
576 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
577 struct omap_chan
*c
= to_omap_dma_chan(chan
);
580 spin_lock_irq(&od
->irq_lock
);
581 od
->irq_enable_mask
&= ~BIT(c
->dma_ch
);
582 omap_dma_glbl_write(od
, IRQENABLE_L1
, od
->irq_enable_mask
);
583 spin_unlock_irq(&od
->irq_lock
);
586 c
->channel_base
= NULL
;
587 od
->lch_map
[c
->dma_ch
] = NULL
;
588 vchan_free_chan_resources(&c
->vc
);
589 omap_free_dma(c
->dma_ch
);
591 dev_dbg(od
->ddev
.dev
, "freeing channel for %u\n", c
->dma_sig
);
594 static size_t omap_dma_sg_size(struct omap_sg
*sg
)
596 return sg
->en
* sg
->fn
;
599 static size_t omap_dma_desc_size(struct omap_desc
*d
)
604 for (size
= i
= 0; i
< d
->sglen
; i
++)
605 size
+= omap_dma_sg_size(&d
->sg
[i
]);
607 return size
* es_bytes
[d
->es
];
610 static size_t omap_dma_desc_size_pos(struct omap_desc
*d
, dma_addr_t addr
)
613 size_t size
, es_size
= es_bytes
[d
->es
];
615 for (size
= i
= 0; i
< d
->sglen
; i
++) {
616 size_t this_size
= omap_dma_sg_size(&d
->sg
[i
]) * es_size
;
620 else if (addr
>= d
->sg
[i
].addr
&&
621 addr
< d
->sg
[i
].addr
+ this_size
)
622 size
+= d
->sg
[i
].addr
+ this_size
- addr
;
628 * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
629 * read before the DMA controller finished disabling the channel.
631 static uint32_t omap_dma_chan_read_3_3(struct omap_chan
*c
, unsigned reg
)
633 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
636 val
= omap_dma_chan_read(c
, reg
);
637 if (val
== 0 && od
->plat
->errata
& DMA_ERRATA_3_3
)
638 val
= omap_dma_chan_read(c
, reg
);
643 static dma_addr_t
omap_dma_get_src_pos(struct omap_chan
*c
)
645 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
646 dma_addr_t addr
, cdac
;
648 if (__dma_omap15xx(od
->plat
->dma_attr
)) {
649 addr
= omap_dma_chan_read(c
, CPC
);
651 addr
= omap_dma_chan_read_3_3(c
, CSAC
);
652 cdac
= omap_dma_chan_read_3_3(c
, CDAC
);
655 * CDAC == 0 indicates that the DMA transfer on the channel has
656 * not been started (no data has been transferred so far).
657 * Return the programmed source start address in this case.
660 addr
= omap_dma_chan_read(c
, CSSA
);
664 addr
|= omap_dma_chan_read(c
, CSSA
) & 0xffff0000;
669 static dma_addr_t
omap_dma_get_dst_pos(struct omap_chan
*c
)
671 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
674 if (__dma_omap15xx(od
->plat
->dma_attr
)) {
675 addr
= omap_dma_chan_read(c
, CPC
);
677 addr
= omap_dma_chan_read_3_3(c
, CDAC
);
680 * CDAC == 0 indicates that the DMA transfer on the channel
681 * has not been started (no data has been transferred so
682 * far). Return the programmed destination start address in
686 addr
= omap_dma_chan_read(c
, CDSA
);
690 addr
|= omap_dma_chan_read(c
, CDSA
) & 0xffff0000;
695 static enum dma_status
omap_dma_tx_status(struct dma_chan
*chan
,
696 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
698 struct omap_chan
*c
= to_omap_dma_chan(chan
);
699 struct virt_dma_desc
*vd
;
703 ret
= dma_cookie_status(chan
, cookie
, txstate
);
704 if (ret
== DMA_COMPLETE
|| !txstate
)
707 spin_lock_irqsave(&c
->vc
.lock
, flags
);
708 vd
= vchan_find_desc(&c
->vc
, cookie
);
710 txstate
->residue
= omap_dma_desc_size(to_omap_dma_desc(&vd
->tx
));
711 } else if (c
->desc
&& c
->desc
->vd
.tx
.cookie
== cookie
) {
712 struct omap_desc
*d
= c
->desc
;
715 if (d
->dir
== DMA_MEM_TO_DEV
)
716 pos
= omap_dma_get_src_pos(c
);
717 else if (d
->dir
== DMA_DEV_TO_MEM
)
718 pos
= omap_dma_get_dst_pos(c
);
722 txstate
->residue
= omap_dma_desc_size_pos(d
, pos
);
724 txstate
->residue
= 0;
726 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
731 static void omap_dma_issue_pending(struct dma_chan
*chan
)
733 struct omap_chan
*c
= to_omap_dma_chan(chan
);
736 spin_lock_irqsave(&c
->vc
.lock
, flags
);
737 if (vchan_issue_pending(&c
->vc
) && !c
->desc
) {
739 * c->cyclic is used only by audio and in this case the DMA need
740 * to be started without delay.
743 struct omap_dmadev
*d
= to_omap_dma_dev(chan
->device
);
745 if (list_empty(&c
->node
))
746 list_add_tail(&c
->node
, &d
->pending
);
747 spin_unlock(&d
->lock
);
748 tasklet_schedule(&d
->task
);
750 omap_dma_start_desc(c
);
753 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
756 static struct dma_async_tx_descriptor
*omap_dma_prep_slave_sg(
757 struct dma_chan
*chan
, struct scatterlist
*sgl
, unsigned sglen
,
758 enum dma_transfer_direction dir
, unsigned long tx_flags
, void *context
)
760 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
761 struct omap_chan
*c
= to_omap_dma_chan(chan
);
762 enum dma_slave_buswidth dev_width
;
763 struct scatterlist
*sgent
;
766 unsigned i
, j
= 0, es
, en
, frame_bytes
;
769 if (dir
== DMA_DEV_TO_MEM
) {
770 dev_addr
= c
->cfg
.src_addr
;
771 dev_width
= c
->cfg
.src_addr_width
;
772 burst
= c
->cfg
.src_maxburst
;
773 } else if (dir
== DMA_MEM_TO_DEV
) {
774 dev_addr
= c
->cfg
.dst_addr
;
775 dev_width
= c
->cfg
.dst_addr_width
;
776 burst
= c
->cfg
.dst_maxburst
;
778 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
782 /* Bus width translates to the element size (ES) */
784 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
785 es
= CSDP_DATA_TYPE_8
;
787 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
788 es
= CSDP_DATA_TYPE_16
;
790 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
791 es
= CSDP_DATA_TYPE_32
;
793 default: /* not reached */
797 /* Now allocate and setup the descriptor. */
798 d
= kzalloc(sizeof(*d
) + sglen
* sizeof(d
->sg
[0]), GFP_ATOMIC
);
803 d
->dev_addr
= dev_addr
;
806 d
->ccr
= c
->ccr
| CCR_SYNC_FRAME
;
807 if (dir
== DMA_DEV_TO_MEM
)
808 d
->ccr
|= CCR_DST_AMODE_POSTINC
| CCR_SRC_AMODE_CONSTANT
;
810 d
->ccr
|= CCR_DST_AMODE_CONSTANT
| CCR_SRC_AMODE_POSTINC
;
812 d
->cicr
= CICR_DROP_IE
| CICR_BLOCK_IE
;
816 d
->cicr
|= CICR_TOUT_IE
;
818 if (dir
== DMA_DEV_TO_MEM
)
819 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_TIPB
;
821 d
->csdp
|= CSDP_DST_PORT_TIPB
| CSDP_SRC_PORT_EMIFF
;
823 if (dir
== DMA_DEV_TO_MEM
)
824 d
->ccr
|= CCR_TRIGGER_SRC
;
826 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
828 if (od
->plat
->errata
& DMA_ERRATA_PARALLEL_CHANNELS
)
829 d
->clnk_ctrl
= c
->dma_ch
;
832 * Build our scatterlist entries: each contains the address,
833 * the number of elements (EN) in each frame, and the number of
834 * frames (FN). Number of bytes for this entry = ES * EN * FN.
836 * Burst size translates to number of elements with frame sync.
837 * Note: DMA engine defines burst to be the number of dev-width
841 frame_bytes
= es_bytes
[es
] * en
;
842 for_each_sg(sgl
, sgent
, sglen
, i
) {
843 d
->sg
[j
].addr
= sg_dma_address(sgent
);
845 d
->sg
[j
].fn
= sg_dma_len(sgent
) / frame_bytes
;
851 return vchan_tx_prep(&c
->vc
, &d
->vd
, tx_flags
);
854 static struct dma_async_tx_descriptor
*omap_dma_prep_dma_cyclic(
855 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
856 size_t period_len
, enum dma_transfer_direction dir
, unsigned long flags
)
858 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
859 struct omap_chan
*c
= to_omap_dma_chan(chan
);
860 enum dma_slave_buswidth dev_width
;
866 if (dir
== DMA_DEV_TO_MEM
) {
867 dev_addr
= c
->cfg
.src_addr
;
868 dev_width
= c
->cfg
.src_addr_width
;
869 burst
= c
->cfg
.src_maxburst
;
870 } else if (dir
== DMA_MEM_TO_DEV
) {
871 dev_addr
= c
->cfg
.dst_addr
;
872 dev_width
= c
->cfg
.dst_addr_width
;
873 burst
= c
->cfg
.dst_maxburst
;
875 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
879 /* Bus width translates to the element size (ES) */
881 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
882 es
= CSDP_DATA_TYPE_8
;
884 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
885 es
= CSDP_DATA_TYPE_16
;
887 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
888 es
= CSDP_DATA_TYPE_32
;
890 default: /* not reached */
894 /* Now allocate and setup the descriptor. */
895 d
= kzalloc(sizeof(*d
) + sizeof(d
->sg
[0]), GFP_ATOMIC
);
900 d
->dev_addr
= dev_addr
;
903 d
->sg
[0].addr
= buf_addr
;
904 d
->sg
[0].en
= period_len
/ es_bytes
[es
];
905 d
->sg
[0].fn
= buf_len
/ period_len
;
909 if (dir
== DMA_DEV_TO_MEM
)
910 d
->ccr
|= CCR_DST_AMODE_POSTINC
| CCR_SRC_AMODE_CONSTANT
;
912 d
->ccr
|= CCR_DST_AMODE_CONSTANT
| CCR_SRC_AMODE_POSTINC
;
914 d
->cicr
= CICR_DROP_IE
;
915 if (flags
& DMA_PREP_INTERRUPT
)
916 d
->cicr
|= CICR_FRAME_IE
;
921 d
->cicr
|= CICR_TOUT_IE
;
923 if (dir
== DMA_DEV_TO_MEM
)
924 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_MPUI
;
926 d
->csdp
|= CSDP_DST_PORT_MPUI
| CSDP_SRC_PORT_EMIFF
;
929 d
->ccr
|= CCR_SYNC_PACKET
;
931 d
->ccr
|= CCR_SYNC_ELEMENT
;
933 if (dir
== DMA_DEV_TO_MEM
)
934 d
->ccr
|= CCR_TRIGGER_SRC
;
936 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
938 d
->csdp
|= CSDP_DST_BURST_64
| CSDP_SRC_BURST_64
;
941 if (__dma_omap15xx(od
->plat
->dma_attr
))
942 d
->ccr
|= CCR_AUTO_INIT
| CCR_REPEAT
;
944 d
->clnk_ctrl
= c
->dma_ch
| CLNK_CTRL_ENABLE_LNK
;
948 return vchan_tx_prep(&c
->vc
, &d
->vd
, flags
);
951 static int omap_dma_slave_config(struct omap_chan
*c
, struct dma_slave_config
*cfg
)
953 if (cfg
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
954 cfg
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
957 memcpy(&c
->cfg
, cfg
, sizeof(c
->cfg
));
962 static int omap_dma_terminate_all(struct omap_chan
*c
)
964 struct omap_dmadev
*d
= to_omap_dma_dev(c
->vc
.chan
.device
);
968 spin_lock_irqsave(&c
->vc
.lock
, flags
);
970 /* Prevent this channel being scheduled */
972 list_del_init(&c
->node
);
973 spin_unlock(&d
->lock
);
976 * Stop DMA activity: we assume the callback will not be called
977 * after omap_dma_stop() returns (even if it does, it will see
978 * c->desc is NULL and exit.)
982 /* Avoid stopping the dma twice */
992 vchan_get_all_descriptors(&c
->vc
, &head
);
993 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
994 vchan_dma_desc_free_list(&c
->vc
, &head
);
999 static int omap_dma_pause(struct omap_chan
*c
)
1001 /* Pause/Resume only allowed with cyclic mode */
1013 static int omap_dma_resume(struct omap_chan
*c
)
1015 /* Pause/Resume only allowed with cyclic mode */
1022 /* Restore channel link register */
1023 omap_dma_chan_write(c
, CLNK_CTRL
, c
->desc
->clnk_ctrl
);
1025 omap_dma_start(c
, c
->desc
);
1032 static int omap_dma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
1035 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1039 case DMA_SLAVE_CONFIG
:
1040 ret
= omap_dma_slave_config(c
, (struct dma_slave_config
*)arg
);
1043 case DMA_TERMINATE_ALL
:
1044 ret
= omap_dma_terminate_all(c
);
1048 ret
= omap_dma_pause(c
);
1052 ret
= omap_dma_resume(c
);
1063 static int omap_dma_chan_init(struct omap_dmadev
*od
, int dma_sig
)
1065 struct omap_chan
*c
;
1067 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
1071 c
->reg_map
= od
->reg_map
;
1072 c
->dma_sig
= dma_sig
;
1073 c
->vc
.desc_free
= omap_dma_desc_free
;
1074 vchan_init(&c
->vc
, &od
->ddev
);
1075 INIT_LIST_HEAD(&c
->node
);
1082 static void omap_dma_free(struct omap_dmadev
*od
)
1084 tasklet_kill(&od
->task
);
1085 while (!list_empty(&od
->ddev
.channels
)) {
1086 struct omap_chan
*c
= list_first_entry(&od
->ddev
.channels
,
1087 struct omap_chan
, vc
.chan
.device_node
);
1089 list_del(&c
->vc
.chan
.device_node
);
1090 tasklet_kill(&c
->vc
.task
);
1095 #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1096 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1097 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1099 static int omap_dma_device_slave_caps(struct dma_chan
*dchan
,
1100 struct dma_slave_caps
*caps
)
1102 caps
->src_addr_widths
= OMAP_DMA_BUSWIDTHS
;
1103 caps
->dstn_addr_widths
= OMAP_DMA_BUSWIDTHS
;
1104 caps
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
1105 caps
->cmd_pause
= true;
1106 caps
->cmd_terminate
= true;
1107 caps
->residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1112 static int omap_dma_probe(struct platform_device
*pdev
)
1114 struct omap_dmadev
*od
;
1115 struct resource
*res
;
1118 od
= devm_kzalloc(&pdev
->dev
, sizeof(*od
), GFP_KERNEL
);
1122 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1123 od
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
1124 if (IS_ERR(od
->base
))
1125 return PTR_ERR(od
->base
);
1127 od
->plat
= omap_get_plat_info();
1129 return -EPROBE_DEFER
;
1131 od
->reg_map
= od
->plat
->reg_map
;
1133 dma_cap_set(DMA_SLAVE
, od
->ddev
.cap_mask
);
1134 dma_cap_set(DMA_CYCLIC
, od
->ddev
.cap_mask
);
1135 od
->ddev
.device_alloc_chan_resources
= omap_dma_alloc_chan_resources
;
1136 od
->ddev
.device_free_chan_resources
= omap_dma_free_chan_resources
;
1137 od
->ddev
.device_tx_status
= omap_dma_tx_status
;
1138 od
->ddev
.device_issue_pending
= omap_dma_issue_pending
;
1139 od
->ddev
.device_prep_slave_sg
= omap_dma_prep_slave_sg
;
1140 od
->ddev
.device_prep_dma_cyclic
= omap_dma_prep_dma_cyclic
;
1141 od
->ddev
.device_control
= omap_dma_control
;
1142 od
->ddev
.device_slave_caps
= omap_dma_device_slave_caps
;
1143 od
->ddev
.dev
= &pdev
->dev
;
1144 INIT_LIST_HEAD(&od
->ddev
.channels
);
1145 INIT_LIST_HEAD(&od
->pending
);
1146 spin_lock_init(&od
->lock
);
1147 spin_lock_init(&od
->irq_lock
);
1149 tasklet_init(&od
->task
, omap_dma_sched
, (unsigned long)od
);
1151 for (i
= 0; i
< 127; i
++) {
1152 rc
= omap_dma_chan_init(od
, i
);
1159 irq
= platform_get_irq(pdev
, 1);
1161 dev_info(&pdev
->dev
, "failed to get L1 IRQ: %d\n", irq
);
1164 /* Disable all interrupts */
1165 od
->irq_enable_mask
= 0;
1166 omap_dma_glbl_write(od
, IRQENABLE_L1
, 0);
1168 rc
= devm_request_irq(&pdev
->dev
, irq
, omap_dma_irq
,
1169 IRQF_SHARED
, "omap-dma-engine", od
);
1174 rc
= dma_async_device_register(&od
->ddev
);
1176 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
1182 platform_set_drvdata(pdev
, od
);
1184 if (pdev
->dev
.of_node
) {
1185 omap_dma_info
.dma_cap
= od
->ddev
.cap_mask
;
1187 /* Device-tree DMA controller registration */
1188 rc
= of_dma_controller_register(pdev
->dev
.of_node
,
1189 of_dma_simple_xlate
, &omap_dma_info
);
1191 pr_warn("OMAP-DMA: failed to register DMA controller\n");
1192 dma_async_device_unregister(&od
->ddev
);
1197 dev_info(&pdev
->dev
, "OMAP DMA engine driver\n");
1202 static int omap_dma_remove(struct platform_device
*pdev
)
1204 struct omap_dmadev
*od
= platform_get_drvdata(pdev
);
1206 if (pdev
->dev
.of_node
)
1207 of_dma_controller_free(pdev
->dev
.of_node
);
1209 dma_async_device_unregister(&od
->ddev
);
1212 /* Disable all interrupts */
1213 omap_dma_glbl_write(od
, IRQENABLE_L0
, 0);
1221 static const struct of_device_id omap_dma_match
[] = {
1222 { .compatible
= "ti,omap2420-sdma", },
1223 { .compatible
= "ti,omap2430-sdma", },
1224 { .compatible
= "ti,omap3430-sdma", },
1225 { .compatible
= "ti,omap3630-sdma", },
1226 { .compatible
= "ti,omap4430-sdma", },
1229 MODULE_DEVICE_TABLE(of
, omap_dma_match
);
1231 static struct platform_driver omap_dma_driver
= {
1232 .probe
= omap_dma_probe
,
1233 .remove
= omap_dma_remove
,
1235 .name
= "omap-dma-engine",
1236 .owner
= THIS_MODULE
,
1237 .of_match_table
= of_match_ptr(omap_dma_match
),
1241 bool omap_dma_filter_fn(struct dma_chan
*chan
, void *param
)
1243 if (chan
->device
->dev
->driver
== &omap_dma_driver
.driver
) {
1244 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1245 unsigned req
= *(unsigned *)param
;
1247 return req
== c
->dma_sig
;
1251 EXPORT_SYMBOL_GPL(omap_dma_filter_fn
);
1253 static int omap_dma_init(void)
1255 return platform_driver_register(&omap_dma_driver
);
1257 subsys_initcall(omap_dma_init
);
1259 static void __exit
omap_dma_exit(void)
1261 platform_driver_unregister(&omap_dma_driver
);
1263 module_exit(omap_dma_exit
);
1265 MODULE_AUTHOR("Russell King");
1266 MODULE_LICENSE("GPL");