1 // SPDX-License-Identifier: GPL-2.0-only
3 * OMAP DMAengine support
5 #include <linux/cpu_pm.h>
6 #include <linux/delay.h>
7 #include <linux/dmaengine.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/dmapool.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/omap-dma.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/of_dma.h>
20 #include <linux/of_device.h>
22 #include "../virt-dma.h"
24 #define OMAP_SDMA_REQUESTS 127
25 #define OMAP_SDMA_CHANNELS 32
27 struct omap_dma_config
{
29 unsigned int rw_priority
:1;
30 unsigned int needs_busy_check
:1;
31 unsigned int may_lose_context
:1;
32 unsigned int needs_lch_clear
:1;
35 struct omap_dma_context
{
43 struct dma_device ddev
;
46 const struct omap_dma_reg
*reg_map
;
47 struct omap_system_dma_plat_info
*plat
;
48 const struct omap_dma_config
*cfg
;
49 struct notifier_block nb
;
50 struct omap_dma_context context
;
52 DECLARE_BITMAP(lch_bitmap
, OMAP_SDMA_CHANNELS
);
53 struct mutex lch_lock
; /* for assigning logical channels */
56 struct dma_pool
*desc_pool
;
57 unsigned dma_requests
;
59 uint32_t irq_enable_mask
;
60 struct omap_chan
**lch_map
;
64 struct virt_dma_chan vc
;
65 void __iomem
*channel_base
;
66 const struct omap_dma_reg
*reg_map
;
69 struct dma_slave_config cfg
;
76 struct omap_desc
*desc
;
80 #define DESC_NXT_SV_REFRESH (0x1 << 24)
81 #define DESC_NXT_SV_REUSE (0x2 << 24)
82 #define DESC_NXT_DV_REFRESH (0x1 << 26)
83 #define DESC_NXT_DV_REUSE (0x2 << 26)
84 #define DESC_NTYPE_TYPE2 (0x2 << 29)
86 /* Type 2 descriptor with Source or Destination address update */
87 struct omap_type2_desc
{
90 uint32_t addr
; /* src or dst */
101 uint32_t en
; /* number of elements (24-bit) */
102 uint32_t fn
; /* number of frames (16-bit) */
103 int32_t fi
; /* for double indexing */
104 int16_t ei
; /* for double indexing */
107 struct omap_type2_desc
*t2_desc
;
108 dma_addr_t t2_desc_paddr
;
112 struct virt_dma_desc vd
;
114 enum dma_transfer_direction dir
;
118 int32_t fi
; /* for OMAP_DMA_SYNC_PACKET / double indexing */
119 int16_t ei
; /* for double indexing */
120 uint8_t es
; /* CSDP_DATA_TYPE_xxx */
121 uint32_t ccr
; /* CCR value */
122 uint16_t clnk_ctrl
; /* CLNK_CTRL value */
123 uint16_t cicr
; /* CICR value */
124 uint32_t csdp
; /* CSDP value */
131 CAPS_0_SUPPORT_LL123
= BIT(20), /* Linked List type1/2/3 */
132 CAPS_0_SUPPORT_LL4
= BIT(21), /* Linked List type4 */
135 CCR_READ_PRIORITY
= BIT(6),
137 CCR_AUTO_INIT
= BIT(8), /* OMAP1 only */
138 CCR_REPEAT
= BIT(9), /* OMAP1 only */
139 CCR_OMAP31_DISABLE
= BIT(10), /* OMAP1 only */
140 CCR_SUSPEND_SENSITIVE
= BIT(8), /* OMAP2+ only */
141 CCR_RD_ACTIVE
= BIT(9), /* OMAP2+ only */
142 CCR_WR_ACTIVE
= BIT(10), /* OMAP2+ only */
143 CCR_SRC_AMODE_CONSTANT
= 0 << 12,
144 CCR_SRC_AMODE_POSTINC
= 1 << 12,
145 CCR_SRC_AMODE_SGLIDX
= 2 << 12,
146 CCR_SRC_AMODE_DBLIDX
= 3 << 12,
147 CCR_DST_AMODE_CONSTANT
= 0 << 14,
148 CCR_DST_AMODE_POSTINC
= 1 << 14,
149 CCR_DST_AMODE_SGLIDX
= 2 << 14,
150 CCR_DST_AMODE_DBLIDX
= 3 << 14,
151 CCR_CONSTANT_FILL
= BIT(16),
152 CCR_TRANSPARENT_COPY
= BIT(17),
154 CCR_SUPERVISOR
= BIT(22),
155 CCR_PREFETCH
= BIT(23),
156 CCR_TRIGGER_SRC
= BIT(24),
157 CCR_BUFFERING_DISABLE
= BIT(25),
158 CCR_WRITE_PRIORITY
= BIT(26),
159 CCR_SYNC_ELEMENT
= 0,
160 CCR_SYNC_FRAME
= CCR_FS
,
161 CCR_SYNC_BLOCK
= CCR_BS
,
162 CCR_SYNC_PACKET
= CCR_BS
| CCR_FS
,
164 CSDP_DATA_TYPE_8
= 0,
165 CSDP_DATA_TYPE_16
= 1,
166 CSDP_DATA_TYPE_32
= 2,
167 CSDP_SRC_PORT_EMIFF
= 0 << 2, /* OMAP1 only */
168 CSDP_SRC_PORT_EMIFS
= 1 << 2, /* OMAP1 only */
169 CSDP_SRC_PORT_OCP_T1
= 2 << 2, /* OMAP1 only */
170 CSDP_SRC_PORT_TIPB
= 3 << 2, /* OMAP1 only */
171 CSDP_SRC_PORT_OCP_T2
= 4 << 2, /* OMAP1 only */
172 CSDP_SRC_PORT_MPUI
= 5 << 2, /* OMAP1 only */
173 CSDP_SRC_PACKED
= BIT(6),
174 CSDP_SRC_BURST_1
= 0 << 7,
175 CSDP_SRC_BURST_16
= 1 << 7,
176 CSDP_SRC_BURST_32
= 2 << 7,
177 CSDP_SRC_BURST_64
= 3 << 7,
178 CSDP_DST_PORT_EMIFF
= 0 << 9, /* OMAP1 only */
179 CSDP_DST_PORT_EMIFS
= 1 << 9, /* OMAP1 only */
180 CSDP_DST_PORT_OCP_T1
= 2 << 9, /* OMAP1 only */
181 CSDP_DST_PORT_TIPB
= 3 << 9, /* OMAP1 only */
182 CSDP_DST_PORT_OCP_T2
= 4 << 9, /* OMAP1 only */
183 CSDP_DST_PORT_MPUI
= 5 << 9, /* OMAP1 only */
184 CSDP_DST_PACKED
= BIT(13),
185 CSDP_DST_BURST_1
= 0 << 14,
186 CSDP_DST_BURST_16
= 1 << 14,
187 CSDP_DST_BURST_32
= 2 << 14,
188 CSDP_DST_BURST_64
= 3 << 14,
189 CSDP_WRITE_NON_POSTED
= 0 << 16,
190 CSDP_WRITE_POSTED
= 1 << 16,
191 CSDP_WRITE_LAST_NON_POSTED
= 2 << 16,
193 CICR_TOUT_IE
= BIT(0), /* OMAP1 only */
194 CICR_DROP_IE
= BIT(1),
195 CICR_HALF_IE
= BIT(2),
196 CICR_FRAME_IE
= BIT(3),
197 CICR_LAST_IE
= BIT(4),
198 CICR_BLOCK_IE
= BIT(5),
199 CICR_PKT_IE
= BIT(7), /* OMAP2+ only */
200 CICR_TRANS_ERR_IE
= BIT(8), /* OMAP2+ only */
201 CICR_SUPERVISOR_ERR_IE
= BIT(10), /* OMAP2+ only */
202 CICR_MISALIGNED_ERR_IE
= BIT(11), /* OMAP2+ only */
203 CICR_DRAIN_IE
= BIT(12), /* OMAP2+ only */
204 CICR_SUPER_BLOCK_IE
= BIT(14), /* OMAP2+ only */
206 CLNK_CTRL_ENABLE_LNK
= BIT(15),
208 CDP_DST_VALID_INC
= 0 << 0,
209 CDP_DST_VALID_RELOAD
= 1 << 0,
210 CDP_DST_VALID_REUSE
= 2 << 0,
211 CDP_SRC_VALID_INC
= 0 << 2,
212 CDP_SRC_VALID_RELOAD
= 1 << 2,
213 CDP_SRC_VALID_REUSE
= 2 << 2,
214 CDP_NTYPE_TYPE1
= 1 << 4,
215 CDP_NTYPE_TYPE2
= 2 << 4,
216 CDP_NTYPE_TYPE3
= 3 << 4,
217 CDP_TMODE_NORMAL
= 0 << 8,
218 CDP_TMODE_LLIST
= 1 << 8,
222 static const unsigned es_bytes
[] = {
223 [CSDP_DATA_TYPE_8
] = 1,
224 [CSDP_DATA_TYPE_16
] = 2,
225 [CSDP_DATA_TYPE_32
] = 4,
228 static bool omap_dma_filter_fn(struct dma_chan
*chan
, void *param
);
229 static struct of_dma_filter_info omap_dma_info
= {
230 .filter_fn
= omap_dma_filter_fn
,
233 static inline struct omap_dmadev
*to_omap_dma_dev(struct dma_device
*d
)
235 return container_of(d
, struct omap_dmadev
, ddev
);
238 static inline struct omap_chan
*to_omap_dma_chan(struct dma_chan
*c
)
240 return container_of(c
, struct omap_chan
, vc
.chan
);
243 static inline struct omap_desc
*to_omap_dma_desc(struct dma_async_tx_descriptor
*t
)
245 return container_of(t
, struct omap_desc
, vd
.tx
);
248 static void omap_dma_desc_free(struct virt_dma_desc
*vd
)
250 struct omap_desc
*d
= to_omap_dma_desc(&vd
->tx
);
253 struct omap_dmadev
*od
= to_omap_dma_dev(vd
->tx
.chan
->device
);
256 for (i
= 0; i
< d
->sglen
; i
++) {
257 if (d
->sg
[i
].t2_desc
)
258 dma_pool_free(od
->desc_pool
, d
->sg
[i
].t2_desc
,
259 d
->sg
[i
].t2_desc_paddr
);
266 static void omap_dma_fill_type2_desc(struct omap_desc
*d
, int idx
,
267 enum dma_transfer_direction dir
, bool last
)
269 struct omap_sg
*sg
= &d
->sg
[idx
];
270 struct omap_type2_desc
*t2_desc
= sg
->t2_desc
;
273 d
->sg
[idx
- 1].t2_desc
->next_desc
= sg
->t2_desc_paddr
;
275 t2_desc
->next_desc
= 0xfffffffc;
277 t2_desc
->en
= sg
->en
;
278 t2_desc
->addr
= sg
->addr
;
279 t2_desc
->fn
= sg
->fn
& 0xffff;
280 t2_desc
->cicr
= d
->cicr
;
282 t2_desc
->cicr
&= ~CICR_BLOCK_IE
;
286 t2_desc
->cdei
= sg
->ei
;
287 t2_desc
->csei
= d
->ei
;
288 t2_desc
->cdfi
= sg
->fi
;
289 t2_desc
->csfi
= d
->fi
;
291 t2_desc
->en
|= DESC_NXT_DV_REFRESH
;
292 t2_desc
->en
|= DESC_NXT_SV_REUSE
;
295 t2_desc
->cdei
= d
->ei
;
296 t2_desc
->csei
= sg
->ei
;
297 t2_desc
->cdfi
= d
->fi
;
298 t2_desc
->csfi
= sg
->fi
;
300 t2_desc
->en
|= DESC_NXT_SV_REFRESH
;
301 t2_desc
->en
|= DESC_NXT_DV_REUSE
;
307 t2_desc
->en
|= DESC_NTYPE_TYPE2
;
310 static void omap_dma_write(uint32_t val
, unsigned type
, void __iomem
*addr
)
313 case OMAP_DMA_REG_16BIT
:
314 writew_relaxed(val
, addr
);
316 case OMAP_DMA_REG_2X16BIT
:
317 writew_relaxed(val
, addr
);
318 writew_relaxed(val
>> 16, addr
+ 2);
320 case OMAP_DMA_REG_32BIT
:
321 writel_relaxed(val
, addr
);
328 static unsigned omap_dma_read(unsigned type
, void __iomem
*addr
)
333 case OMAP_DMA_REG_16BIT
:
334 val
= readw_relaxed(addr
);
336 case OMAP_DMA_REG_2X16BIT
:
337 val
= readw_relaxed(addr
);
338 val
|= readw_relaxed(addr
+ 2) << 16;
340 case OMAP_DMA_REG_32BIT
:
341 val
= readl_relaxed(addr
);
351 static void omap_dma_glbl_write(struct omap_dmadev
*od
, unsigned reg
, unsigned val
)
353 const struct omap_dma_reg
*r
= od
->reg_map
+ reg
;
357 omap_dma_write(val
, r
->type
, od
->base
+ r
->offset
);
360 static unsigned omap_dma_glbl_read(struct omap_dmadev
*od
, unsigned reg
)
362 const struct omap_dma_reg
*r
= od
->reg_map
+ reg
;
366 return omap_dma_read(r
->type
, od
->base
+ r
->offset
);
369 static void omap_dma_chan_write(struct omap_chan
*c
, unsigned reg
, unsigned val
)
371 const struct omap_dma_reg
*r
= c
->reg_map
+ reg
;
373 omap_dma_write(val
, r
->type
, c
->channel_base
+ r
->offset
);
376 static unsigned omap_dma_chan_read(struct omap_chan
*c
, unsigned reg
)
378 const struct omap_dma_reg
*r
= c
->reg_map
+ reg
;
380 return omap_dma_read(r
->type
, c
->channel_base
+ r
->offset
);
383 static void omap_dma_clear_csr(struct omap_chan
*c
)
386 omap_dma_chan_read(c
, CSR
);
388 omap_dma_chan_write(c
, CSR
, ~0);
391 static unsigned omap_dma_get_csr(struct omap_chan
*c
)
393 unsigned val
= omap_dma_chan_read(c
, CSR
);
396 omap_dma_chan_write(c
, CSR
, val
);
401 static void omap_dma_clear_lch(struct omap_dmadev
*od
, int lch
)
406 c
= od
->lch_map
[lch
];
410 for (i
= CSDP
; i
<= od
->cfg
->lch_end
; i
++)
411 omap_dma_chan_write(c
, i
, 0);
414 static void omap_dma_assign(struct omap_dmadev
*od
, struct omap_chan
*c
,
417 c
->channel_base
= od
->base
+ od
->plat
->channel_stride
* lch
;
419 od
->lch_map
[lch
] = c
;
422 static void omap_dma_start(struct omap_chan
*c
, struct omap_desc
*d
)
424 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
425 uint16_t cicr
= d
->cicr
;
427 if (__dma_omap15xx(od
->plat
->dma_attr
))
428 omap_dma_chan_write(c
, CPC
, 0);
430 omap_dma_chan_write(c
, CDAC
, 0);
432 omap_dma_clear_csr(c
);
435 uint32_t cdp
= CDP_TMODE_LLIST
| CDP_NTYPE_TYPE2
| CDP_FAST
;
437 if (d
->dir
== DMA_DEV_TO_MEM
)
438 cdp
|= (CDP_DST_VALID_RELOAD
| CDP_SRC_VALID_REUSE
);
440 cdp
|= (CDP_DST_VALID_REUSE
| CDP_SRC_VALID_RELOAD
);
441 omap_dma_chan_write(c
, CDP
, cdp
);
443 omap_dma_chan_write(c
, CNDP
, d
->sg
[0].t2_desc_paddr
);
444 omap_dma_chan_write(c
, CCDN
, 0);
445 omap_dma_chan_write(c
, CCFN
, 0xffff);
446 omap_dma_chan_write(c
, CCEN
, 0xffffff);
448 cicr
&= ~CICR_BLOCK_IE
;
449 } else if (od
->ll123_supported
) {
450 omap_dma_chan_write(c
, CDP
, 0);
453 /* Enable interrupts */
454 omap_dma_chan_write(c
, CICR
, cicr
);
457 omap_dma_chan_write(c
, CCR
, d
->ccr
| CCR_ENABLE
);
462 static void omap_dma_drain_chan(struct omap_chan
*c
)
467 /* Wait for sDMA FIFO to drain */
469 val
= omap_dma_chan_read(c
, CCR
);
470 if (!(val
& (CCR_RD_ACTIVE
| CCR_WR_ACTIVE
)))
479 if (val
& (CCR_RD_ACTIVE
| CCR_WR_ACTIVE
))
480 dev_err(c
->vc
.chan
.device
->dev
,
481 "DMA drain did not complete on lch %d\n",
485 static int omap_dma_stop(struct omap_chan
*c
)
487 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
491 omap_dma_chan_write(c
, CICR
, 0);
493 omap_dma_clear_csr(c
);
495 val
= omap_dma_chan_read(c
, CCR
);
496 if (od
->plat
->errata
& DMA_ERRATA_i541
&& val
& CCR_TRIGGER_SRC
) {
499 sysconfig
= omap_dma_glbl_read(od
, OCP_SYSCONFIG
);
500 val
= sysconfig
& ~DMA_SYSCONFIG_MIDLEMODE_MASK
;
501 val
|= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE
);
502 omap_dma_glbl_write(od
, OCP_SYSCONFIG
, val
);
504 val
= omap_dma_chan_read(c
, CCR
);
506 omap_dma_chan_write(c
, CCR
, val
);
508 if (!(c
->ccr
& CCR_BUFFERING_DISABLE
))
509 omap_dma_drain_chan(c
);
511 omap_dma_glbl_write(od
, OCP_SYSCONFIG
, sysconfig
);
513 if (!(val
& CCR_ENABLE
))
517 omap_dma_chan_write(c
, CCR
, val
);
519 if (!(c
->ccr
& CCR_BUFFERING_DISABLE
))
520 omap_dma_drain_chan(c
);
525 if (!__dma_omap15xx(od
->plat
->dma_attr
) && c
->cyclic
) {
526 val
= omap_dma_chan_read(c
, CLNK_CTRL
);
529 val
|= 1 << 14; /* set the STOP_LNK bit */
531 val
&= ~CLNK_CTRL_ENABLE_LNK
;
533 omap_dma_chan_write(c
, CLNK_CTRL
, val
);
539 static void omap_dma_start_sg(struct omap_chan
*c
, struct omap_desc
*d
)
541 struct omap_sg
*sg
= d
->sg
+ c
->sgidx
;
542 unsigned cxsa
, cxei
, cxfi
;
544 if (d
->dir
== DMA_DEV_TO_MEM
|| d
->dir
== DMA_MEM_TO_MEM
) {
554 omap_dma_chan_write(c
, cxsa
, sg
->addr
);
555 omap_dma_chan_write(c
, cxei
, sg
->ei
);
556 omap_dma_chan_write(c
, cxfi
, sg
->fi
);
557 omap_dma_chan_write(c
, CEN
, sg
->en
);
558 omap_dma_chan_write(c
, CFN
, sg
->fn
);
560 omap_dma_start(c
, d
);
564 static void omap_dma_start_desc(struct omap_chan
*c
)
566 struct virt_dma_desc
*vd
= vchan_next_desc(&c
->vc
);
568 unsigned cxsa
, cxei
, cxfi
;
577 c
->desc
= d
= to_omap_dma_desc(&vd
->tx
);
581 * This provides the necessary barrier to ensure data held in
582 * DMA coherent memory is visible to the DMA engine prior to
583 * the transfer starting.
587 omap_dma_chan_write(c
, CCR
, d
->ccr
);
589 omap_dma_chan_write(c
, CCR2
, d
->ccr
>> 16);
591 if (d
->dir
== DMA_DEV_TO_MEM
|| d
->dir
== DMA_MEM_TO_MEM
) {
601 omap_dma_chan_write(c
, cxsa
, d
->dev_addr
);
602 omap_dma_chan_write(c
, cxei
, d
->ei
);
603 omap_dma_chan_write(c
, cxfi
, d
->fi
);
604 omap_dma_chan_write(c
, CSDP
, d
->csdp
);
605 omap_dma_chan_write(c
, CLNK_CTRL
, d
->clnk_ctrl
);
607 omap_dma_start_sg(c
, d
);
610 static void omap_dma_callback(int ch
, u16 status
, void *data
)
612 struct omap_chan
*c
= data
;
616 spin_lock_irqsave(&c
->vc
.lock
, flags
);
620 vchan_cyclic_callback(&d
->vd
);
621 } else if (d
->using_ll
|| c
->sgidx
== d
->sglen
) {
622 omap_dma_start_desc(c
);
623 vchan_cookie_complete(&d
->vd
);
625 omap_dma_start_sg(c
, d
);
628 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
631 static irqreturn_t
omap_dma_irq(int irq
, void *devid
)
633 struct omap_dmadev
*od
= devid
;
634 unsigned status
, channel
;
636 spin_lock(&od
->irq_lock
);
638 status
= omap_dma_glbl_read(od
, IRQSTATUS_L1
);
639 status
&= od
->irq_enable_mask
;
641 spin_unlock(&od
->irq_lock
);
645 while ((channel
= ffs(status
)) != 0) {
653 c
= od
->lch_map
[channel
];
655 /* This should never happen */
656 dev_err(od
->ddev
.dev
, "invalid channel %u\n", channel
);
660 csr
= omap_dma_get_csr(c
);
661 omap_dma_glbl_write(od
, IRQSTATUS_L1
, mask
);
663 omap_dma_callback(channel
, csr
, c
);
666 spin_unlock(&od
->irq_lock
);
671 static int omap_dma_get_lch(struct omap_dmadev
*od
, int *lch
)
675 mutex_lock(&od
->lch_lock
);
676 channel
= find_first_zero_bit(od
->lch_bitmap
, od
->lch_count
);
677 if (channel
>= od
->lch_count
)
679 set_bit(channel
, od
->lch_bitmap
);
680 mutex_unlock(&od
->lch_lock
);
682 omap_dma_clear_lch(od
, channel
);
688 mutex_unlock(&od
->lch_lock
);
694 static void omap_dma_put_lch(struct omap_dmadev
*od
, int lch
)
696 omap_dma_clear_lch(od
, lch
);
697 mutex_lock(&od
->lch_lock
);
698 clear_bit(lch
, od
->lch_bitmap
);
699 mutex_unlock(&od
->lch_lock
);
702 static int omap_dma_alloc_chan_resources(struct dma_chan
*chan
)
704 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
705 struct omap_chan
*c
= to_omap_dma_chan(chan
);
706 struct device
*dev
= od
->ddev
.dev
;
710 ret
= omap_request_dma(c
->dma_sig
, "DMA engine",
711 omap_dma_callback
, c
, &c
->dma_ch
);
713 ret
= omap_dma_get_lch(od
, &c
->dma_ch
);
716 dev_dbg(dev
, "allocating channel %u for %u\n", c
->dma_ch
, c
->dma_sig
);
719 omap_dma_assign(od
, c
, c
->dma_ch
);
724 spin_lock_irq(&od
->irq_lock
);
725 val
= BIT(c
->dma_ch
);
726 omap_dma_glbl_write(od
, IRQSTATUS_L1
, val
);
727 od
->irq_enable_mask
|= val
;
728 omap_dma_glbl_write(od
, IRQENABLE_L1
, od
->irq_enable_mask
);
730 val
= omap_dma_glbl_read(od
, IRQENABLE_L0
);
731 val
&= ~BIT(c
->dma_ch
);
732 omap_dma_glbl_write(od
, IRQENABLE_L0
, val
);
733 spin_unlock_irq(&od
->irq_lock
);
738 if (__dma_omap16xx(od
->plat
->dma_attr
)) {
739 c
->ccr
= CCR_OMAP31_DISABLE
;
740 /* Duplicate what plat-omap/dma.c does */
741 c
->ccr
|= c
->dma_ch
+ 1;
743 c
->ccr
= c
->dma_sig
& 0x1f;
746 c
->ccr
= c
->dma_sig
& 0x1f;
747 c
->ccr
|= (c
->dma_sig
& ~0x1f) << 14;
749 if (od
->plat
->errata
& DMA_ERRATA_IFRAME_BUFFERING
)
750 c
->ccr
|= CCR_BUFFERING_DISABLE
;
755 static void omap_dma_free_chan_resources(struct dma_chan
*chan
)
757 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
758 struct omap_chan
*c
= to_omap_dma_chan(chan
);
761 spin_lock_irq(&od
->irq_lock
);
762 od
->irq_enable_mask
&= ~BIT(c
->dma_ch
);
763 omap_dma_glbl_write(od
, IRQENABLE_L1
, od
->irq_enable_mask
);
764 spin_unlock_irq(&od
->irq_lock
);
767 c
->channel_base
= NULL
;
768 od
->lch_map
[c
->dma_ch
] = NULL
;
769 vchan_free_chan_resources(&c
->vc
);
772 omap_free_dma(c
->dma_ch
);
774 omap_dma_put_lch(od
, c
->dma_ch
);
776 dev_dbg(od
->ddev
.dev
, "freeing channel %u used for %u\n", c
->dma_ch
,
781 static size_t omap_dma_sg_size(struct omap_sg
*sg
)
783 return sg
->en
* sg
->fn
;
786 static size_t omap_dma_desc_size(struct omap_desc
*d
)
791 for (size
= i
= 0; i
< d
->sglen
; i
++)
792 size
+= omap_dma_sg_size(&d
->sg
[i
]);
794 return size
* es_bytes
[d
->es
];
797 static size_t omap_dma_desc_size_pos(struct omap_desc
*d
, dma_addr_t addr
)
800 size_t size
, es_size
= es_bytes
[d
->es
];
802 for (size
= i
= 0; i
< d
->sglen
; i
++) {
803 size_t this_size
= omap_dma_sg_size(&d
->sg
[i
]) * es_size
;
807 else if (addr
>= d
->sg
[i
].addr
&&
808 addr
< d
->sg
[i
].addr
+ this_size
)
809 size
+= d
->sg
[i
].addr
+ this_size
- addr
;
815 * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
816 * read before the DMA controller finished disabling the channel.
818 static uint32_t omap_dma_chan_read_3_3(struct omap_chan
*c
, unsigned reg
)
820 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
823 val
= omap_dma_chan_read(c
, reg
);
824 if (val
== 0 && od
->plat
->errata
& DMA_ERRATA_3_3
)
825 val
= omap_dma_chan_read(c
, reg
);
830 static dma_addr_t
omap_dma_get_src_pos(struct omap_chan
*c
)
832 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
833 dma_addr_t addr
, cdac
;
835 if (__dma_omap15xx(od
->plat
->dma_attr
)) {
836 addr
= omap_dma_chan_read(c
, CPC
);
838 addr
= omap_dma_chan_read_3_3(c
, CSAC
);
839 cdac
= omap_dma_chan_read_3_3(c
, CDAC
);
842 * CDAC == 0 indicates that the DMA transfer on the channel has
843 * not been started (no data has been transferred so far).
844 * Return the programmed source start address in this case.
847 addr
= omap_dma_chan_read(c
, CSSA
);
851 addr
|= omap_dma_chan_read(c
, CSSA
) & 0xffff0000;
856 static dma_addr_t
omap_dma_get_dst_pos(struct omap_chan
*c
)
858 struct omap_dmadev
*od
= to_omap_dma_dev(c
->vc
.chan
.device
);
861 if (__dma_omap15xx(od
->plat
->dma_attr
)) {
862 addr
= omap_dma_chan_read(c
, CPC
);
864 addr
= omap_dma_chan_read_3_3(c
, CDAC
);
867 * CDAC == 0 indicates that the DMA transfer on the channel
868 * has not been started (no data has been transferred so
869 * far). Return the programmed destination start address in
873 addr
= omap_dma_chan_read(c
, CDSA
);
877 addr
|= omap_dma_chan_read(c
, CDSA
) & 0xffff0000;
882 static enum dma_status
omap_dma_tx_status(struct dma_chan
*chan
,
883 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
885 struct omap_chan
*c
= to_omap_dma_chan(chan
);
888 struct omap_desc
*d
= NULL
;
890 ret
= dma_cookie_status(chan
, cookie
, txstate
);
891 if (ret
== DMA_COMPLETE
)
894 spin_lock_irqsave(&c
->vc
.lock
, flags
);
895 if (c
->desc
&& c
->desc
->vd
.tx
.cookie
== cookie
)
904 if (d
->dir
== DMA_MEM_TO_DEV
)
905 pos
= omap_dma_get_src_pos(c
);
906 else if (d
->dir
== DMA_DEV_TO_MEM
|| d
->dir
== DMA_MEM_TO_MEM
)
907 pos
= omap_dma_get_dst_pos(c
);
911 txstate
->residue
= omap_dma_desc_size_pos(d
, pos
);
913 struct virt_dma_desc
*vd
= vchan_find_desc(&c
->vc
, cookie
);
916 txstate
->residue
= omap_dma_desc_size(
917 to_omap_dma_desc(&vd
->tx
));
919 txstate
->residue
= 0;
923 if (ret
== DMA_IN_PROGRESS
&& c
->paused
) {
925 } else if (d
&& d
->polled
&& c
->running
) {
926 uint32_t ccr
= omap_dma_chan_read(c
, CCR
);
928 * The channel is no longer active, set the return value
929 * accordingly and mark it as completed
931 if (!(ccr
& CCR_ENABLE
)) {
933 omap_dma_start_desc(c
);
934 vchan_cookie_complete(&d
->vd
);
938 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
943 static void omap_dma_issue_pending(struct dma_chan
*chan
)
945 struct omap_chan
*c
= to_omap_dma_chan(chan
);
948 spin_lock_irqsave(&c
->vc
.lock
, flags
);
949 if (vchan_issue_pending(&c
->vc
) && !c
->desc
)
950 omap_dma_start_desc(c
);
951 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
954 static struct dma_async_tx_descriptor
*omap_dma_prep_slave_sg(
955 struct dma_chan
*chan
, struct scatterlist
*sgl
, unsigned sglen
,
956 enum dma_transfer_direction dir
, unsigned long tx_flags
, void *context
)
958 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
959 struct omap_chan
*c
= to_omap_dma_chan(chan
);
960 enum dma_slave_buswidth dev_width
;
961 struct scatterlist
*sgent
;
964 unsigned i
, es
, en
, frame_bytes
;
965 bool ll_failed
= false;
967 u32 port_window
, port_window_bytes
;
969 if (dir
== DMA_DEV_TO_MEM
) {
970 dev_addr
= c
->cfg
.src_addr
;
971 dev_width
= c
->cfg
.src_addr_width
;
972 burst
= c
->cfg
.src_maxburst
;
973 port_window
= c
->cfg
.src_port_window_size
;
974 } else if (dir
== DMA_MEM_TO_DEV
) {
975 dev_addr
= c
->cfg
.dst_addr
;
976 dev_width
= c
->cfg
.dst_addr_width
;
977 burst
= c
->cfg
.dst_maxburst
;
978 port_window
= c
->cfg
.dst_port_window_size
;
980 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
984 /* Bus width translates to the element size (ES) */
986 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
987 es
= CSDP_DATA_TYPE_8
;
989 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
990 es
= CSDP_DATA_TYPE_16
;
992 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
993 es
= CSDP_DATA_TYPE_32
;
995 default: /* not reached */
999 /* Now allocate and setup the descriptor. */
1000 d
= kzalloc(struct_size(d
, sg
, sglen
), GFP_ATOMIC
);
1005 d
->dev_addr
= dev_addr
;
1008 /* When the port_window is used, one frame must cover the window */
1010 burst
= port_window
;
1011 port_window_bytes
= port_window
* es_bytes
[es
];
1015 * One frame covers the port_window and by configure
1016 * the source frame index to be -1 * (port_window - 1)
1017 * we instruct the sDMA that after a frame is processed
1018 * it should move back to the start of the window.
1020 d
->fi
= -(port_window_bytes
- 1);
1023 d
->ccr
= c
->ccr
| CCR_SYNC_FRAME
;
1024 if (dir
== DMA_DEV_TO_MEM
) {
1025 d
->csdp
= CSDP_DST_BURST_64
| CSDP_DST_PACKED
;
1027 d
->ccr
|= CCR_DST_AMODE_POSTINC
;
1029 d
->ccr
|= CCR_SRC_AMODE_DBLIDX
;
1031 if (port_window_bytes
>= 64)
1032 d
->csdp
|= CSDP_SRC_BURST_64
;
1033 else if (port_window_bytes
>= 32)
1034 d
->csdp
|= CSDP_SRC_BURST_32
;
1035 else if (port_window_bytes
>= 16)
1036 d
->csdp
|= CSDP_SRC_BURST_16
;
1039 d
->ccr
|= CCR_SRC_AMODE_CONSTANT
;
1042 d
->csdp
= CSDP_SRC_BURST_64
| CSDP_SRC_PACKED
;
1044 d
->ccr
|= CCR_SRC_AMODE_POSTINC
;
1046 d
->ccr
|= CCR_DST_AMODE_DBLIDX
;
1048 if (port_window_bytes
>= 64)
1049 d
->csdp
|= CSDP_DST_BURST_64
;
1050 else if (port_window_bytes
>= 32)
1051 d
->csdp
|= CSDP_DST_BURST_32
;
1052 else if (port_window_bytes
>= 16)
1053 d
->csdp
|= CSDP_DST_BURST_16
;
1055 d
->ccr
|= CCR_DST_AMODE_CONSTANT
;
1059 d
->cicr
= CICR_DROP_IE
| CICR_BLOCK_IE
;
1063 d
->cicr
|= CICR_TOUT_IE
;
1065 if (dir
== DMA_DEV_TO_MEM
)
1066 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_TIPB
;
1068 d
->csdp
|= CSDP_DST_PORT_TIPB
| CSDP_SRC_PORT_EMIFF
;
1070 if (dir
== DMA_DEV_TO_MEM
)
1071 d
->ccr
|= CCR_TRIGGER_SRC
;
1073 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
1076 d
->csdp
|= CSDP_WRITE_LAST_NON_POSTED
;
1078 if (od
->plat
->errata
& DMA_ERRATA_PARALLEL_CHANNELS
)
1079 d
->clnk_ctrl
= c
->dma_ch
;
1082 * Build our scatterlist entries: each contains the address,
1083 * the number of elements (EN) in each frame, and the number of
1084 * frames (FN). Number of bytes for this entry = ES * EN * FN.
1086 * Burst size translates to number of elements with frame sync.
1087 * Note: DMA engine defines burst to be the number of dev-width
1091 frame_bytes
= es_bytes
[es
] * en
;
1094 d
->using_ll
= od
->ll123_supported
;
1096 for_each_sg(sgl
, sgent
, sglen
, i
) {
1097 struct omap_sg
*osg
= &d
->sg
[i
];
1099 osg
->addr
= sg_dma_address(sgent
);
1101 osg
->fn
= sg_dma_len(sgent
) / frame_bytes
;
1104 osg
->t2_desc
= dma_pool_alloc(od
->desc_pool
, GFP_ATOMIC
,
1105 &osg
->t2_desc_paddr
);
1106 if (!osg
->t2_desc
) {
1107 dev_err(chan
->device
->dev
,
1108 "t2_desc[%d] allocation failed\n", i
);
1110 d
->using_ll
= false;
1114 omap_dma_fill_type2_desc(d
, i
, dir
, (i
== sglen
- 1));
1120 /* Release the dma_pool entries if one allocation failed */
1122 for (i
= 0; i
< d
->sglen
; i
++) {
1123 struct omap_sg
*osg
= &d
->sg
[i
];
1126 dma_pool_free(od
->desc_pool
, osg
->t2_desc
,
1127 osg
->t2_desc_paddr
);
1128 osg
->t2_desc
= NULL
;
1133 return vchan_tx_prep(&c
->vc
, &d
->vd
, tx_flags
);
1136 static struct dma_async_tx_descriptor
*omap_dma_prep_dma_cyclic(
1137 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
1138 size_t period_len
, enum dma_transfer_direction dir
, unsigned long flags
)
1140 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
1141 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1142 enum dma_slave_buswidth dev_width
;
1143 struct omap_desc
*d
;
1144 dma_addr_t dev_addr
;
1148 if (dir
== DMA_DEV_TO_MEM
) {
1149 dev_addr
= c
->cfg
.src_addr
;
1150 dev_width
= c
->cfg
.src_addr_width
;
1151 burst
= c
->cfg
.src_maxburst
;
1152 } else if (dir
== DMA_MEM_TO_DEV
) {
1153 dev_addr
= c
->cfg
.dst_addr
;
1154 dev_width
= c
->cfg
.dst_addr_width
;
1155 burst
= c
->cfg
.dst_maxburst
;
1157 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
1161 /* Bus width translates to the element size (ES) */
1162 switch (dev_width
) {
1163 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
1164 es
= CSDP_DATA_TYPE_8
;
1166 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
1167 es
= CSDP_DATA_TYPE_16
;
1169 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
1170 es
= CSDP_DATA_TYPE_32
;
1172 default: /* not reached */
1176 /* Now allocate and setup the descriptor. */
1177 d
= kzalloc(sizeof(*d
) + sizeof(d
->sg
[0]), GFP_ATOMIC
);
1182 d
->dev_addr
= dev_addr
;
1185 d
->sg
[0].addr
= buf_addr
;
1186 d
->sg
[0].en
= period_len
/ es_bytes
[es
];
1187 d
->sg
[0].fn
= buf_len
/ period_len
;
1191 if (dir
== DMA_DEV_TO_MEM
)
1192 d
->ccr
|= CCR_DST_AMODE_POSTINC
| CCR_SRC_AMODE_CONSTANT
;
1194 d
->ccr
|= CCR_DST_AMODE_CONSTANT
| CCR_SRC_AMODE_POSTINC
;
1196 d
->cicr
= CICR_DROP_IE
;
1197 if (flags
& DMA_PREP_INTERRUPT
)
1198 d
->cicr
|= CICR_FRAME_IE
;
1203 d
->cicr
|= CICR_TOUT_IE
;
1205 if (dir
== DMA_DEV_TO_MEM
)
1206 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_MPUI
;
1208 d
->csdp
|= CSDP_DST_PORT_MPUI
| CSDP_SRC_PORT_EMIFF
;
1211 d
->ccr
|= CCR_SYNC_PACKET
;
1213 d
->ccr
|= CCR_SYNC_ELEMENT
;
1215 if (dir
== DMA_DEV_TO_MEM
) {
1216 d
->ccr
|= CCR_TRIGGER_SRC
;
1217 d
->csdp
|= CSDP_DST_PACKED
;
1219 d
->csdp
|= CSDP_SRC_PACKED
;
1222 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
1224 d
->csdp
|= CSDP_DST_BURST_64
| CSDP_SRC_BURST_64
;
1227 if (__dma_omap15xx(od
->plat
->dma_attr
))
1228 d
->ccr
|= CCR_AUTO_INIT
| CCR_REPEAT
;
1230 d
->clnk_ctrl
= c
->dma_ch
| CLNK_CTRL_ENABLE_LNK
;
1234 return vchan_tx_prep(&c
->vc
, &d
->vd
, flags
);
1237 static struct dma_async_tx_descriptor
*omap_dma_prep_dma_memcpy(
1238 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
1239 size_t len
, unsigned long tx_flags
)
1241 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1242 struct omap_desc
*d
;
1245 d
= kzalloc(sizeof(*d
) + sizeof(d
->sg
[0]), GFP_ATOMIC
);
1249 data_type
= __ffs((src
| dest
| len
));
1250 if (data_type
> CSDP_DATA_TYPE_32
)
1251 data_type
= CSDP_DATA_TYPE_32
;
1253 d
->dir
= DMA_MEM_TO_MEM
;
1257 d
->sg
[0].en
= len
/ BIT(data_type
);
1259 d
->sg
[0].addr
= dest
;
1262 d
->ccr
|= CCR_DST_AMODE_POSTINC
| CCR_SRC_AMODE_POSTINC
;
1264 if (tx_flags
& DMA_PREP_INTERRUPT
)
1265 d
->cicr
|= CICR_FRAME_IE
;
1269 d
->csdp
= data_type
;
1272 d
->cicr
|= CICR_TOUT_IE
;
1273 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_EMIFF
;
1275 d
->csdp
|= CSDP_DST_PACKED
| CSDP_SRC_PACKED
;
1276 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
1277 d
->csdp
|= CSDP_DST_BURST_64
| CSDP_SRC_BURST_64
;
1280 return vchan_tx_prep(&c
->vc
, &d
->vd
, tx_flags
);
1283 static struct dma_async_tx_descriptor
*omap_dma_prep_dma_interleaved(
1284 struct dma_chan
*chan
, struct dma_interleaved_template
*xt
,
1285 unsigned long flags
)
1287 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1288 struct omap_desc
*d
;
1291 size_t src_icg
, dst_icg
;
1293 /* Slave mode is not supported */
1294 if (is_slave_direction(xt
->dir
))
1297 if (xt
->frame_size
!= 1 || xt
->numf
== 0)
1300 d
= kzalloc(sizeof(*d
) + sizeof(d
->sg
[0]), GFP_ATOMIC
);
1304 data_type
= __ffs((xt
->src_start
| xt
->dst_start
| xt
->sgl
[0].size
));
1305 if (data_type
> CSDP_DATA_TYPE_32
)
1306 data_type
= CSDP_DATA_TYPE_32
;
1309 d
->dir
= DMA_MEM_TO_MEM
;
1310 d
->dev_addr
= xt
->src_start
;
1312 sg
->en
= xt
->sgl
[0].size
/ BIT(data_type
);
1314 sg
->addr
= xt
->dst_start
;
1318 src_icg
= dmaengine_get_src_icg(xt
, &xt
->sgl
[0]);
1319 dst_icg
= dmaengine_get_dst_icg(xt
, &xt
->sgl
[0]);
1321 d
->ccr
|= CCR_SRC_AMODE_DBLIDX
;
1323 d
->fi
= src_icg
+ 1;
1324 } else if (xt
->src_inc
) {
1325 d
->ccr
|= CCR_SRC_AMODE_POSTINC
;
1328 dev_err(chan
->device
->dev
,
1329 "%s: SRC constant addressing is not supported\n",
1336 d
->ccr
|= CCR_DST_AMODE_DBLIDX
;
1338 sg
->fi
= dst_icg
+ 1;
1339 } else if (xt
->dst_inc
) {
1340 d
->ccr
|= CCR_DST_AMODE_POSTINC
;
1343 dev_err(chan
->device
->dev
,
1344 "%s: DST constant addressing is not supported\n",
1350 d
->cicr
= CICR_DROP_IE
| CICR_FRAME_IE
;
1352 d
->csdp
= data_type
;
1355 d
->cicr
|= CICR_TOUT_IE
;
1356 d
->csdp
|= CSDP_DST_PORT_EMIFF
| CSDP_SRC_PORT_EMIFF
;
1358 d
->csdp
|= CSDP_DST_PACKED
| CSDP_SRC_PACKED
;
1359 d
->cicr
|= CICR_MISALIGNED_ERR_IE
| CICR_TRANS_ERR_IE
;
1360 d
->csdp
|= CSDP_DST_BURST_64
| CSDP_SRC_BURST_64
;
1363 return vchan_tx_prep(&c
->vc
, &d
->vd
, flags
);
1366 static int omap_dma_slave_config(struct dma_chan
*chan
, struct dma_slave_config
*cfg
)
1368 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1370 if (cfg
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
1371 cfg
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
1374 if (cfg
->src_maxburst
> chan
->device
->max_burst
||
1375 cfg
->dst_maxburst
> chan
->device
->max_burst
)
1378 memcpy(&c
->cfg
, cfg
, sizeof(c
->cfg
));
1383 static int omap_dma_terminate_all(struct dma_chan
*chan
)
1385 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1386 unsigned long flags
;
1389 spin_lock_irqsave(&c
->vc
.lock
, flags
);
1392 * Stop DMA activity: we assume the callback will not be called
1393 * after omap_dma_stop() returns (even if it does, it will see
1394 * c->desc is NULL and exit.)
1397 vchan_terminate_vdesc(&c
->desc
->vd
);
1399 /* Avoid stopping the dma twice */
1407 vchan_get_all_descriptors(&c
->vc
, &head
);
1408 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
1409 vchan_dma_desc_free_list(&c
->vc
, &head
);
1414 static void omap_dma_synchronize(struct dma_chan
*chan
)
1416 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1418 vchan_synchronize(&c
->vc
);
1421 static int omap_dma_pause(struct dma_chan
*chan
)
1423 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1424 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
1425 unsigned long flags
;
1427 bool can_pause
= false;
1429 spin_lock_irqsave(&od
->irq_lock
, flags
);
1438 * We do not allow DMA_MEM_TO_DEV transfers to be paused.
1439 * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
1440 * "When a channel is disabled during a transfer, the channel undergoes
1441 * an abort, unless it is hardware-source-synchronized …".
1442 * A source-synchronised channel is one where the fetching of data is
1443 * under control of the device. In other words, a device-to-memory
1444 * transfer. So, a destination-synchronised channel (which would be a
1445 * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
1447 * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
1448 * aborts immediately after completion of current read/write
1449 * transactions and then the FIFO is cleaned up." The term "cleaned up"
1450 * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
1451 * are both clear _before_ disabling the channel, otherwise data loss
1453 * The problem is that if the channel is active, then device activity
1454 * can result in DMA activity starting between reading those as both
1455 * clear and the write to DMA_CCR to clear the enable bit hitting the
1456 * hardware. If the DMA hardware can't drain the data in its FIFO to the
1457 * destination, then data loss "might" occur (say if we write to an UART
1458 * and the UART is not accepting any further data).
1460 else if (c
->desc
->dir
== DMA_DEV_TO_MEM
)
1463 if (can_pause
&& !c
->paused
) {
1464 ret
= omap_dma_stop(c
);
1469 spin_unlock_irqrestore(&od
->irq_lock
, flags
);
1474 static int omap_dma_resume(struct dma_chan
*chan
)
1476 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1477 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
1478 unsigned long flags
;
1481 spin_lock_irqsave(&od
->irq_lock
, flags
);
1483 if (c
->paused
&& c
->desc
) {
1486 /* Restore channel link register */
1487 omap_dma_chan_write(c
, CLNK_CTRL
, c
->desc
->clnk_ctrl
);
1489 omap_dma_start(c
, c
->desc
);
1493 spin_unlock_irqrestore(&od
->irq_lock
, flags
);
1498 static int omap_dma_chan_init(struct omap_dmadev
*od
)
1500 struct omap_chan
*c
;
1502 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
1506 c
->reg_map
= od
->reg_map
;
1507 c
->vc
.desc_free
= omap_dma_desc_free
;
1508 vchan_init(&c
->vc
, &od
->ddev
);
1513 static void omap_dma_free(struct omap_dmadev
*od
)
1515 while (!list_empty(&od
->ddev
.channels
)) {
1516 struct omap_chan
*c
= list_first_entry(&od
->ddev
.channels
,
1517 struct omap_chan
, vc
.chan
.device_node
);
1519 list_del(&c
->vc
.chan
.device_node
);
1520 tasklet_kill(&c
->vc
.task
);
1525 /* Currently used by omap2 & 3 to block deeper SoC idle states */
1526 static bool omap_dma_busy(struct omap_dmadev
*od
)
1528 struct omap_chan
*c
;
1532 lch
= find_next_bit(od
->lch_bitmap
, od
->lch_count
, lch
+ 1);
1533 if (lch
>= od
->lch_count
)
1535 c
= od
->lch_map
[lch
];
1538 if (omap_dma_chan_read(c
, CCR
) & CCR_ENABLE
)
1545 /* Currently only used for omap2. For omap1, also a check for lcd_dma is needed */
1546 static int omap_dma_busy_notifier(struct notifier_block
*nb
,
1547 unsigned long cmd
, void *v
)
1549 struct omap_dmadev
*od
;
1551 od
= container_of(nb
, struct omap_dmadev
, nb
);
1554 case CPU_CLUSTER_PM_ENTER
:
1555 if (omap_dma_busy(od
))
1558 case CPU_CLUSTER_PM_ENTER_FAILED
:
1559 case CPU_CLUSTER_PM_EXIT
:
1567 * We are using IRQENABLE_L1, and legacy DMA code was using IRQENABLE_L0.
1568 * As the DSP may be using IRQENABLE_L2 and L3, let's not touch those for
1569 * now. Context save seems to be only currently needed on omap3.
1571 static void omap_dma_context_save(struct omap_dmadev
*od
)
1573 od
->context
.irqenable_l0
= omap_dma_glbl_read(od
, IRQENABLE_L0
);
1574 od
->context
.irqenable_l1
= omap_dma_glbl_read(od
, IRQENABLE_L1
);
1575 od
->context
.ocp_sysconfig
= omap_dma_glbl_read(od
, OCP_SYSCONFIG
);
1576 od
->context
.gcr
= omap_dma_glbl_read(od
, GCR
);
1579 static void omap_dma_context_restore(struct omap_dmadev
*od
)
1583 omap_dma_glbl_write(od
, GCR
, od
->context
.gcr
);
1584 omap_dma_glbl_write(od
, OCP_SYSCONFIG
, od
->context
.ocp_sysconfig
);
1585 omap_dma_glbl_write(od
, IRQENABLE_L0
, od
->context
.irqenable_l0
);
1586 omap_dma_glbl_write(od
, IRQENABLE_L1
, od
->context
.irqenable_l1
);
1588 /* Clear IRQSTATUS_L0 as legacy DMA code is no longer doing it */
1589 if (od
->plat
->errata
& DMA_ROMCODE_BUG
)
1590 omap_dma_glbl_write(od
, IRQSTATUS_L0
, 0);
1592 /* Clear dma channels */
1593 for (i
= 0; i
< od
->lch_count
; i
++)
1594 omap_dma_clear_lch(od
, i
);
1597 /* Currently only used for omap3 */
1598 static int omap_dma_context_notifier(struct notifier_block
*nb
,
1599 unsigned long cmd
, void *v
)
1601 struct omap_dmadev
*od
;
1603 od
= container_of(nb
, struct omap_dmadev
, nb
);
1606 case CPU_CLUSTER_PM_ENTER
:
1607 if (omap_dma_busy(od
))
1609 omap_dma_context_save(od
);
1611 case CPU_CLUSTER_PM_ENTER_FAILED
:
1612 case CPU_CLUSTER_PM_EXIT
:
1613 omap_dma_context_restore(od
);
1620 static void omap_dma_init_gcr(struct omap_dmadev
*od
, int arb_rate
,
1621 int max_fifo_depth
, int tparams
)
1625 /* Set only for omap2430 and later */
1626 if (!od
->cfg
->rw_priority
)
1629 if (max_fifo_depth
== 0)
1634 val
= 0xff & max_fifo_depth
;
1635 val
|= (0x3 & tparams
) << 12;
1636 val
|= (arb_rate
& 0xff) << 16;
1638 omap_dma_glbl_write(od
, GCR
, val
);
1641 #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1642 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1643 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1646 * No flags currently set for default configuration as omap1 is still
1647 * using platform data.
1649 static const struct omap_dma_config default_cfg
;
1651 static int omap_dma_probe(struct platform_device
*pdev
)
1653 const struct omap_dma_config
*conf
;
1654 struct omap_dmadev
*od
;
1655 struct resource
*res
;
1659 od
= devm_kzalloc(&pdev
->dev
, sizeof(*od
), GFP_KERNEL
);
1663 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1664 od
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
1665 if (IS_ERR(od
->base
))
1666 return PTR_ERR(od
->base
);
1668 conf
= of_device_get_match_data(&pdev
->dev
);
1671 od
->plat
= dev_get_platdata(&pdev
->dev
);
1673 dev_err(&pdev
->dev
, "omap_system_dma_plat_info is missing");
1677 od
->cfg
= &default_cfg
;
1679 od
->plat
= omap_get_plat_info();
1681 return -EPROBE_DEFER
;
1684 od
->reg_map
= od
->plat
->reg_map
;
1686 dma_cap_set(DMA_SLAVE
, od
->ddev
.cap_mask
);
1687 dma_cap_set(DMA_CYCLIC
, od
->ddev
.cap_mask
);
1688 dma_cap_set(DMA_MEMCPY
, od
->ddev
.cap_mask
);
1689 dma_cap_set(DMA_INTERLEAVE
, od
->ddev
.cap_mask
);
1690 od
->ddev
.device_alloc_chan_resources
= omap_dma_alloc_chan_resources
;
1691 od
->ddev
.device_free_chan_resources
= omap_dma_free_chan_resources
;
1692 od
->ddev
.device_tx_status
= omap_dma_tx_status
;
1693 od
->ddev
.device_issue_pending
= omap_dma_issue_pending
;
1694 od
->ddev
.device_prep_slave_sg
= omap_dma_prep_slave_sg
;
1695 od
->ddev
.device_prep_dma_cyclic
= omap_dma_prep_dma_cyclic
;
1696 od
->ddev
.device_prep_dma_memcpy
= omap_dma_prep_dma_memcpy
;
1697 od
->ddev
.device_prep_interleaved_dma
= omap_dma_prep_dma_interleaved
;
1698 od
->ddev
.device_config
= omap_dma_slave_config
;
1699 od
->ddev
.device_pause
= omap_dma_pause
;
1700 od
->ddev
.device_resume
= omap_dma_resume
;
1701 od
->ddev
.device_terminate_all
= omap_dma_terminate_all
;
1702 od
->ddev
.device_synchronize
= omap_dma_synchronize
;
1703 od
->ddev
.src_addr_widths
= OMAP_DMA_BUSWIDTHS
;
1704 od
->ddev
.dst_addr_widths
= OMAP_DMA_BUSWIDTHS
;
1705 od
->ddev
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
1706 if (__dma_omap15xx(od
->plat
->dma_attr
))
1707 od
->ddev
.residue_granularity
=
1708 DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
1710 od
->ddev
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1711 od
->ddev
.max_burst
= SZ_16M
- 1; /* CCEN: 24bit unsigned */
1712 od
->ddev
.dev
= &pdev
->dev
;
1713 INIT_LIST_HEAD(&od
->ddev
.channels
);
1714 mutex_init(&od
->lch_lock
);
1715 spin_lock_init(&od
->lock
);
1716 spin_lock_init(&od
->irq_lock
);
1718 /* Number of DMA requests */
1719 od
->dma_requests
= OMAP_SDMA_REQUESTS
;
1720 if (pdev
->dev
.of_node
&& of_property_read_u32(pdev
->dev
.of_node
,
1722 &od
->dma_requests
)) {
1723 dev_info(&pdev
->dev
,
1724 "Missing dma-requests property, using %u.\n",
1725 OMAP_SDMA_REQUESTS
);
1728 /* Number of available logical channels */
1729 if (!pdev
->dev
.of_node
) {
1730 od
->lch_count
= od
->plat
->dma_attr
->lch_count
;
1731 if (unlikely(!od
->lch_count
))
1732 od
->lch_count
= OMAP_SDMA_CHANNELS
;
1733 } else if (of_property_read_u32(pdev
->dev
.of_node
, "dma-channels",
1735 dev_info(&pdev
->dev
,
1736 "Missing dma-channels property, using %u.\n",
1737 OMAP_SDMA_CHANNELS
);
1738 od
->lch_count
= OMAP_SDMA_CHANNELS
;
1741 /* Mask of allowed logical channels */
1742 if (pdev
->dev
.of_node
&& !of_property_read_u32(pdev
->dev
.of_node
,
1745 /* Tag channels not in mask as reserved */
1747 bitmap_from_arr32(od
->lch_bitmap
, &val
, od
->lch_count
);
1749 if (od
->plat
->dma_attr
->dev_caps
& HS_CHANNELS_RESERVED
)
1750 bitmap_set(od
->lch_bitmap
, 0, 2);
1752 od
->lch_map
= devm_kcalloc(&pdev
->dev
, od
->lch_count
,
1753 sizeof(*od
->lch_map
),
1758 for (i
= 0; i
< od
->dma_requests
; i
++) {
1759 rc
= omap_dma_chan_init(od
);
1766 irq
= platform_get_irq(pdev
, 1);
1768 dev_info(&pdev
->dev
, "failed to get L1 IRQ: %d\n", irq
);
1771 /* Disable all interrupts */
1772 od
->irq_enable_mask
= 0;
1773 omap_dma_glbl_write(od
, IRQENABLE_L1
, 0);
1775 rc
= devm_request_irq(&pdev
->dev
, irq
, omap_dma_irq
,
1776 IRQF_SHARED
, "omap-dma-engine", od
);
1783 if (omap_dma_glbl_read(od
, CAPS_0
) & CAPS_0_SUPPORT_LL123
)
1784 od
->ll123_supported
= true;
1786 od
->ddev
.filter
.map
= od
->plat
->slave_map
;
1787 od
->ddev
.filter
.mapcnt
= od
->plat
->slavecnt
;
1788 od
->ddev
.filter
.fn
= omap_dma_filter_fn
;
1790 if (od
->ll123_supported
) {
1791 od
->desc_pool
= dma_pool_create(dev_name(&pdev
->dev
),
1793 sizeof(struct omap_type2_desc
),
1795 if (!od
->desc_pool
) {
1797 "unable to allocate descriptor pool\n");
1798 od
->ll123_supported
= false;
1802 rc
= dma_async_device_register(&od
->ddev
);
1804 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
1810 platform_set_drvdata(pdev
, od
);
1812 if (pdev
->dev
.of_node
) {
1813 omap_dma_info
.dma_cap
= od
->ddev
.cap_mask
;
1815 /* Device-tree DMA controller registration */
1816 rc
= of_dma_controller_register(pdev
->dev
.of_node
,
1817 of_dma_simple_xlate
, &omap_dma_info
);
1819 pr_warn("OMAP-DMA: failed to register DMA controller\n");
1820 dma_async_device_unregister(&od
->ddev
);
1825 omap_dma_init_gcr(od
, DMA_DEFAULT_ARB_RATE
, DMA_DEFAULT_FIFO_DEPTH
, 0);
1827 if (od
->cfg
->needs_busy_check
) {
1828 od
->nb
.notifier_call
= omap_dma_busy_notifier
;
1829 cpu_pm_register_notifier(&od
->nb
);
1830 } else if (od
->cfg
->may_lose_context
) {
1831 od
->nb
.notifier_call
= omap_dma_context_notifier
;
1832 cpu_pm_register_notifier(&od
->nb
);
1835 dev_info(&pdev
->dev
, "OMAP DMA engine driver%s\n",
1836 od
->ll123_supported
? " (LinkedList1/2/3 supported)" : "");
1841 static int omap_dma_remove(struct platform_device
*pdev
)
1843 struct omap_dmadev
*od
= platform_get_drvdata(pdev
);
1846 if (od
->cfg
->may_lose_context
)
1847 cpu_pm_unregister_notifier(&od
->nb
);
1849 if (pdev
->dev
.of_node
)
1850 of_dma_controller_free(pdev
->dev
.of_node
);
1852 irq
= platform_get_irq(pdev
, 1);
1853 devm_free_irq(&pdev
->dev
, irq
, od
);
1855 dma_async_device_unregister(&od
->ddev
);
1858 /* Disable all interrupts */
1859 omap_dma_glbl_write(od
, IRQENABLE_L0
, 0);
1862 if (od
->ll123_supported
)
1863 dma_pool_destroy(od
->desc_pool
);
1870 static const struct omap_dma_config omap2420_data
= {
1872 .rw_priority
= true,
1873 .needs_lch_clear
= true,
1874 .needs_busy_check
= true,
1877 static const struct omap_dma_config omap2430_data
= {
1879 .rw_priority
= true,
1880 .needs_lch_clear
= true,
1883 static const struct omap_dma_config omap3430_data
= {
1885 .rw_priority
= true,
1886 .needs_lch_clear
= true,
1887 .may_lose_context
= true,
1890 static const struct omap_dma_config omap3630_data
= {
1892 .rw_priority
= true,
1893 .needs_lch_clear
= true,
1894 .may_lose_context
= true,
1897 static const struct omap_dma_config omap4_data
= {
1899 .rw_priority
= true,
1900 .needs_lch_clear
= true,
1903 static const struct of_device_id omap_dma_match
[] = {
1904 { .compatible
= "ti,omap2420-sdma", .data
= &omap2420_data
, },
1905 { .compatible
= "ti,omap2430-sdma", .data
= &omap2430_data
, },
1906 { .compatible
= "ti,omap3430-sdma", .data
= &omap3430_data
, },
1907 { .compatible
= "ti,omap3630-sdma", .data
= &omap3630_data
, },
1908 { .compatible
= "ti,omap4430-sdma", .data
= &omap4_data
, },
1911 MODULE_DEVICE_TABLE(of
, omap_dma_match
);
1913 static struct platform_driver omap_dma_driver
= {
1914 .probe
= omap_dma_probe
,
1915 .remove
= omap_dma_remove
,
1917 .name
= "omap-dma-engine",
1918 .of_match_table
= omap_dma_match
,
1922 static bool omap_dma_filter_fn(struct dma_chan
*chan
, void *param
)
1924 if (chan
->device
->dev
->driver
== &omap_dma_driver
.driver
) {
1925 struct omap_dmadev
*od
= to_omap_dma_dev(chan
->device
);
1926 struct omap_chan
*c
= to_omap_dma_chan(chan
);
1927 unsigned req
= *(unsigned *)param
;
1929 if (req
<= od
->dma_requests
) {
1937 static int omap_dma_init(void)
1939 return platform_driver_register(&omap_dma_driver
);
1941 subsys_initcall(omap_dma_init
);
1943 static void __exit
omap_dma_exit(void)
1945 platform_driver_unregister(&omap_dma_driver
);
1947 module_exit(omap_dma_exit
);
1949 MODULE_AUTHOR("Russell King");
1950 MODULE_LICENSE("GPL");