gro: Allow tunnel stacking in the case of FOU/GUE
[linux/fpc-iii.git] / drivers / dma / omap-dma.c
blob167dbaf6574275a0fffd5590b7a169e811320b39
1 /*
2 * OMAP DMAengine support
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/omap-dma.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_device.h>
23 #include "virt-dma.h"
25 struct omap_dmadev {
26 struct dma_device ddev;
27 spinlock_t lock;
28 struct tasklet_struct task;
29 struct list_head pending;
30 void __iomem *base;
31 const struct omap_dma_reg *reg_map;
32 struct omap_system_dma_plat_info *plat;
33 bool legacy;
34 spinlock_t irq_lock;
35 uint32_t irq_enable_mask;
36 struct omap_chan *lch_map[32];
39 struct omap_chan {
40 struct virt_dma_chan vc;
41 struct list_head node;
42 void __iomem *channel_base;
43 const struct omap_dma_reg *reg_map;
44 uint32_t ccr;
46 struct dma_slave_config cfg;
47 unsigned dma_sig;
48 bool cyclic;
49 bool paused;
51 int dma_ch;
52 struct omap_desc *desc;
53 unsigned sgidx;
56 struct omap_sg {
57 dma_addr_t addr;
58 uint32_t en; /* number of elements (24-bit) */
59 uint32_t fn; /* number of frames (16-bit) */
62 struct omap_desc {
63 struct virt_dma_desc vd;
64 enum dma_transfer_direction dir;
65 dma_addr_t dev_addr;
67 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
68 uint8_t es; /* CSDP_DATA_TYPE_xxx */
69 uint32_t ccr; /* CCR value */
70 uint16_t clnk_ctrl; /* CLNK_CTRL value */
71 uint16_t cicr; /* CICR value */
72 uint32_t csdp; /* CSDP value */
74 unsigned sglen;
75 struct omap_sg sg[0];
78 enum {
79 CCR_FS = BIT(5),
80 CCR_READ_PRIORITY = BIT(6),
81 CCR_ENABLE = BIT(7),
82 CCR_AUTO_INIT = BIT(8), /* OMAP1 only */
83 CCR_REPEAT = BIT(9), /* OMAP1 only */
84 CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */
85 CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */
86 CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */
87 CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */
88 CCR_SRC_AMODE_CONSTANT = 0 << 12,
89 CCR_SRC_AMODE_POSTINC = 1 << 12,
90 CCR_SRC_AMODE_SGLIDX = 2 << 12,
91 CCR_SRC_AMODE_DBLIDX = 3 << 12,
92 CCR_DST_AMODE_CONSTANT = 0 << 14,
93 CCR_DST_AMODE_POSTINC = 1 << 14,
94 CCR_DST_AMODE_SGLIDX = 2 << 14,
95 CCR_DST_AMODE_DBLIDX = 3 << 14,
96 CCR_CONSTANT_FILL = BIT(16),
97 CCR_TRANSPARENT_COPY = BIT(17),
98 CCR_BS = BIT(18),
99 CCR_SUPERVISOR = BIT(22),
100 CCR_PREFETCH = BIT(23),
101 CCR_TRIGGER_SRC = BIT(24),
102 CCR_BUFFERING_DISABLE = BIT(25),
103 CCR_WRITE_PRIORITY = BIT(26),
104 CCR_SYNC_ELEMENT = 0,
105 CCR_SYNC_FRAME = CCR_FS,
106 CCR_SYNC_BLOCK = CCR_BS,
107 CCR_SYNC_PACKET = CCR_BS | CCR_FS,
109 CSDP_DATA_TYPE_8 = 0,
110 CSDP_DATA_TYPE_16 = 1,
111 CSDP_DATA_TYPE_32 = 2,
112 CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */
113 CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */
114 CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */
115 CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */
116 CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */
117 CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */
118 CSDP_SRC_PACKED = BIT(6),
119 CSDP_SRC_BURST_1 = 0 << 7,
120 CSDP_SRC_BURST_16 = 1 << 7,
121 CSDP_SRC_BURST_32 = 2 << 7,
122 CSDP_SRC_BURST_64 = 3 << 7,
123 CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */
124 CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */
125 CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */
126 CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */
127 CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */
128 CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */
129 CSDP_DST_PACKED = BIT(13),
130 CSDP_DST_BURST_1 = 0 << 14,
131 CSDP_DST_BURST_16 = 1 << 14,
132 CSDP_DST_BURST_32 = 2 << 14,
133 CSDP_DST_BURST_64 = 3 << 14,
135 CICR_TOUT_IE = BIT(0), /* OMAP1 only */
136 CICR_DROP_IE = BIT(1),
137 CICR_HALF_IE = BIT(2),
138 CICR_FRAME_IE = BIT(3),
139 CICR_LAST_IE = BIT(4),
140 CICR_BLOCK_IE = BIT(5),
141 CICR_PKT_IE = BIT(7), /* OMAP2+ only */
142 CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */
143 CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */
144 CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */
145 CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */
146 CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */
148 CLNK_CTRL_ENABLE_LNK = BIT(15),
151 static const unsigned es_bytes[] = {
152 [CSDP_DATA_TYPE_8] = 1,
153 [CSDP_DATA_TYPE_16] = 2,
154 [CSDP_DATA_TYPE_32] = 4,
157 static struct of_dma_filter_info omap_dma_info = {
158 .filter_fn = omap_dma_filter_fn,
161 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
163 return container_of(d, struct omap_dmadev, ddev);
166 static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
168 return container_of(c, struct omap_chan, vc.chan);
171 static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
173 return container_of(t, struct omap_desc, vd.tx);
176 static void omap_dma_desc_free(struct virt_dma_desc *vd)
178 kfree(container_of(vd, struct omap_desc, vd));
181 static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr)
183 switch (type) {
184 case OMAP_DMA_REG_16BIT:
185 writew_relaxed(val, addr);
186 break;
187 case OMAP_DMA_REG_2X16BIT:
188 writew_relaxed(val, addr);
189 writew_relaxed(val >> 16, addr + 2);
190 break;
191 case OMAP_DMA_REG_32BIT:
192 writel_relaxed(val, addr);
193 break;
194 default:
195 WARN_ON(1);
199 static unsigned omap_dma_read(unsigned type, void __iomem *addr)
201 unsigned val;
203 switch (type) {
204 case OMAP_DMA_REG_16BIT:
205 val = readw_relaxed(addr);
206 break;
207 case OMAP_DMA_REG_2X16BIT:
208 val = readw_relaxed(addr);
209 val |= readw_relaxed(addr + 2) << 16;
210 break;
211 case OMAP_DMA_REG_32BIT:
212 val = readl_relaxed(addr);
213 break;
214 default:
215 WARN_ON(1);
216 val = 0;
219 return val;
222 static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val)
224 const struct omap_dma_reg *r = od->reg_map + reg;
226 WARN_ON(r->stride);
228 omap_dma_write(val, r->type, od->base + r->offset);
231 static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg)
233 const struct omap_dma_reg *r = od->reg_map + reg;
235 WARN_ON(r->stride);
237 return omap_dma_read(r->type, od->base + r->offset);
240 static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val)
242 const struct omap_dma_reg *r = c->reg_map + reg;
244 omap_dma_write(val, r->type, c->channel_base + r->offset);
247 static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg)
249 const struct omap_dma_reg *r = c->reg_map + reg;
251 return omap_dma_read(r->type, c->channel_base + r->offset);
254 static void omap_dma_clear_csr(struct omap_chan *c)
256 if (dma_omap1())
257 omap_dma_chan_read(c, CSR);
258 else
259 omap_dma_chan_write(c, CSR, ~0);
262 static unsigned omap_dma_get_csr(struct omap_chan *c)
264 unsigned val = omap_dma_chan_read(c, CSR);
266 if (!dma_omap1())
267 omap_dma_chan_write(c, CSR, val);
269 return val;
272 static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
273 unsigned lch)
275 c->channel_base = od->base + od->plat->channel_stride * lch;
277 od->lch_map[lch] = c;
280 static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
282 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
284 if (__dma_omap15xx(od->plat->dma_attr))
285 omap_dma_chan_write(c, CPC, 0);
286 else
287 omap_dma_chan_write(c, CDAC, 0);
289 omap_dma_clear_csr(c);
291 /* Enable interrupts */
292 omap_dma_chan_write(c, CICR, d->cicr);
294 /* Enable channel */
295 omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
298 static void omap_dma_stop(struct omap_chan *c)
300 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
301 uint32_t val;
303 /* disable irq */
304 omap_dma_chan_write(c, CICR, 0);
306 omap_dma_clear_csr(c);
308 val = omap_dma_chan_read(c, CCR);
309 if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
310 uint32_t sysconfig;
311 unsigned i;
313 sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
314 val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
315 val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
316 omap_dma_glbl_write(od, OCP_SYSCONFIG, val);
318 val = omap_dma_chan_read(c, CCR);
319 val &= ~CCR_ENABLE;
320 omap_dma_chan_write(c, CCR, val);
322 /* Wait for sDMA FIFO to drain */
323 for (i = 0; ; i++) {
324 val = omap_dma_chan_read(c, CCR);
325 if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
326 break;
328 if (i > 100)
329 break;
331 udelay(5);
334 if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
335 dev_err(c->vc.chan.device->dev,
336 "DMA drain did not complete on lch %d\n",
337 c->dma_ch);
339 omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
340 } else {
341 val &= ~CCR_ENABLE;
342 omap_dma_chan_write(c, CCR, val);
345 mb();
347 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
348 val = omap_dma_chan_read(c, CLNK_CTRL);
350 if (dma_omap1())
351 val |= 1 << 14; /* set the STOP_LNK bit */
352 else
353 val &= ~CLNK_CTRL_ENABLE_LNK;
355 omap_dma_chan_write(c, CLNK_CTRL, val);
359 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
360 unsigned idx)
362 struct omap_sg *sg = d->sg + idx;
363 unsigned cxsa, cxei, cxfi;
365 if (d->dir == DMA_DEV_TO_MEM) {
366 cxsa = CDSA;
367 cxei = CDEI;
368 cxfi = CDFI;
369 } else {
370 cxsa = CSSA;
371 cxei = CSEI;
372 cxfi = CSFI;
375 omap_dma_chan_write(c, cxsa, sg->addr);
376 omap_dma_chan_write(c, cxei, 0);
377 omap_dma_chan_write(c, cxfi, 0);
378 omap_dma_chan_write(c, CEN, sg->en);
379 omap_dma_chan_write(c, CFN, sg->fn);
381 omap_dma_start(c, d);
384 static void omap_dma_start_desc(struct omap_chan *c)
386 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
387 struct omap_desc *d;
388 unsigned cxsa, cxei, cxfi;
390 if (!vd) {
391 c->desc = NULL;
392 return;
395 list_del(&vd->node);
397 c->desc = d = to_omap_dma_desc(&vd->tx);
398 c->sgidx = 0;
401 * This provides the necessary barrier to ensure data held in
402 * DMA coherent memory is visible to the DMA engine prior to
403 * the transfer starting.
405 mb();
407 omap_dma_chan_write(c, CCR, d->ccr);
408 if (dma_omap1())
409 omap_dma_chan_write(c, CCR2, d->ccr >> 16);
411 if (d->dir == DMA_DEV_TO_MEM) {
412 cxsa = CSSA;
413 cxei = CSEI;
414 cxfi = CSFI;
415 } else {
416 cxsa = CDSA;
417 cxei = CDEI;
418 cxfi = CDFI;
421 omap_dma_chan_write(c, cxsa, d->dev_addr);
422 omap_dma_chan_write(c, cxei, 0);
423 omap_dma_chan_write(c, cxfi, d->fi);
424 omap_dma_chan_write(c, CSDP, d->csdp);
425 omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
427 omap_dma_start_sg(c, d, 0);
430 static void omap_dma_callback(int ch, u16 status, void *data)
432 struct omap_chan *c = data;
433 struct omap_desc *d;
434 unsigned long flags;
436 spin_lock_irqsave(&c->vc.lock, flags);
437 d = c->desc;
438 if (d) {
439 if (!c->cyclic) {
440 if (++c->sgidx < d->sglen) {
441 omap_dma_start_sg(c, d, c->sgidx);
442 } else {
443 omap_dma_start_desc(c);
444 vchan_cookie_complete(&d->vd);
446 } else {
447 vchan_cyclic_callback(&d->vd);
450 spin_unlock_irqrestore(&c->vc.lock, flags);
454 * This callback schedules all pending channels. We could be more
455 * clever here by postponing allocation of the real DMA channels to
456 * this point, and freeing them when our virtual channel becomes idle.
458 * We would then need to deal with 'all channels in-use'
460 static void omap_dma_sched(unsigned long data)
462 struct omap_dmadev *d = (struct omap_dmadev *)data;
463 LIST_HEAD(head);
465 spin_lock_irq(&d->lock);
466 list_splice_tail_init(&d->pending, &head);
467 spin_unlock_irq(&d->lock);
469 while (!list_empty(&head)) {
470 struct omap_chan *c = list_first_entry(&head,
471 struct omap_chan, node);
473 spin_lock_irq(&c->vc.lock);
474 list_del_init(&c->node);
475 omap_dma_start_desc(c);
476 spin_unlock_irq(&c->vc.lock);
480 static irqreturn_t omap_dma_irq(int irq, void *devid)
482 struct omap_dmadev *od = devid;
483 unsigned status, channel;
485 spin_lock(&od->irq_lock);
487 status = omap_dma_glbl_read(od, IRQSTATUS_L1);
488 status &= od->irq_enable_mask;
489 if (status == 0) {
490 spin_unlock(&od->irq_lock);
491 return IRQ_NONE;
494 while ((channel = ffs(status)) != 0) {
495 unsigned mask, csr;
496 struct omap_chan *c;
498 channel -= 1;
499 mask = BIT(channel);
500 status &= ~mask;
502 c = od->lch_map[channel];
503 if (c == NULL) {
504 /* This should never happen */
505 dev_err(od->ddev.dev, "invalid channel %u\n", channel);
506 continue;
509 csr = omap_dma_get_csr(c);
510 omap_dma_glbl_write(od, IRQSTATUS_L1, mask);
512 omap_dma_callback(channel, csr, c);
515 spin_unlock(&od->irq_lock);
517 return IRQ_HANDLED;
520 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
522 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
523 struct omap_chan *c = to_omap_dma_chan(chan);
524 int ret;
526 if (od->legacy) {
527 ret = omap_request_dma(c->dma_sig, "DMA engine",
528 omap_dma_callback, c, &c->dma_ch);
529 } else {
530 ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL,
531 &c->dma_ch);
534 dev_dbg(od->ddev.dev, "allocating channel %u for %u\n",
535 c->dma_ch, c->dma_sig);
537 if (ret >= 0) {
538 omap_dma_assign(od, c, c->dma_ch);
540 if (!od->legacy) {
541 unsigned val;
543 spin_lock_irq(&od->irq_lock);
544 val = BIT(c->dma_ch);
545 omap_dma_glbl_write(od, IRQSTATUS_L1, val);
546 od->irq_enable_mask |= val;
547 omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
549 val = omap_dma_glbl_read(od, IRQENABLE_L0);
550 val &= ~BIT(c->dma_ch);
551 omap_dma_glbl_write(od, IRQENABLE_L0, val);
552 spin_unlock_irq(&od->irq_lock);
556 if (dma_omap1()) {
557 if (__dma_omap16xx(od->plat->dma_attr)) {
558 c->ccr = CCR_OMAP31_DISABLE;
559 /* Duplicate what plat-omap/dma.c does */
560 c->ccr |= c->dma_ch + 1;
561 } else {
562 c->ccr = c->dma_sig & 0x1f;
564 } else {
565 c->ccr = c->dma_sig & 0x1f;
566 c->ccr |= (c->dma_sig & ~0x1f) << 14;
568 if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
569 c->ccr |= CCR_BUFFERING_DISABLE;
571 return ret;
574 static void omap_dma_free_chan_resources(struct dma_chan *chan)
576 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
577 struct omap_chan *c = to_omap_dma_chan(chan);
579 if (!od->legacy) {
580 spin_lock_irq(&od->irq_lock);
581 od->irq_enable_mask &= ~BIT(c->dma_ch);
582 omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
583 spin_unlock_irq(&od->irq_lock);
586 c->channel_base = NULL;
587 od->lch_map[c->dma_ch] = NULL;
588 vchan_free_chan_resources(&c->vc);
589 omap_free_dma(c->dma_ch);
591 dev_dbg(od->ddev.dev, "freeing channel for %u\n", c->dma_sig);
594 static size_t omap_dma_sg_size(struct omap_sg *sg)
596 return sg->en * sg->fn;
599 static size_t omap_dma_desc_size(struct omap_desc *d)
601 unsigned i;
602 size_t size;
604 for (size = i = 0; i < d->sglen; i++)
605 size += omap_dma_sg_size(&d->sg[i]);
607 return size * es_bytes[d->es];
610 static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
612 unsigned i;
613 size_t size, es_size = es_bytes[d->es];
615 for (size = i = 0; i < d->sglen; i++) {
616 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
618 if (size)
619 size += this_size;
620 else if (addr >= d->sg[i].addr &&
621 addr < d->sg[i].addr + this_size)
622 size += d->sg[i].addr + this_size - addr;
624 return size;
628 * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
629 * read before the DMA controller finished disabling the channel.
631 static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg)
633 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
634 uint32_t val;
636 val = omap_dma_chan_read(c, reg);
637 if (val == 0 && od->plat->errata & DMA_ERRATA_3_3)
638 val = omap_dma_chan_read(c, reg);
640 return val;
643 static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
645 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
646 dma_addr_t addr, cdac;
648 if (__dma_omap15xx(od->plat->dma_attr)) {
649 addr = omap_dma_chan_read(c, CPC);
650 } else {
651 addr = omap_dma_chan_read_3_3(c, CSAC);
652 cdac = omap_dma_chan_read_3_3(c, CDAC);
655 * CDAC == 0 indicates that the DMA transfer on the channel has
656 * not been started (no data has been transferred so far).
657 * Return the programmed source start address in this case.
659 if (cdac == 0)
660 addr = omap_dma_chan_read(c, CSSA);
663 if (dma_omap1())
664 addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000;
666 return addr;
669 static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
671 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
672 dma_addr_t addr;
674 if (__dma_omap15xx(od->plat->dma_attr)) {
675 addr = omap_dma_chan_read(c, CPC);
676 } else {
677 addr = omap_dma_chan_read_3_3(c, CDAC);
680 * CDAC == 0 indicates that the DMA transfer on the channel
681 * has not been started (no data has been transferred so
682 * far). Return the programmed destination start address in
683 * this case.
685 if (addr == 0)
686 addr = omap_dma_chan_read(c, CDSA);
689 if (dma_omap1())
690 addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000;
692 return addr;
695 static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
696 dma_cookie_t cookie, struct dma_tx_state *txstate)
698 struct omap_chan *c = to_omap_dma_chan(chan);
699 struct virt_dma_desc *vd;
700 enum dma_status ret;
701 unsigned long flags;
703 ret = dma_cookie_status(chan, cookie, txstate);
704 if (ret == DMA_COMPLETE || !txstate)
705 return ret;
707 spin_lock_irqsave(&c->vc.lock, flags);
708 vd = vchan_find_desc(&c->vc, cookie);
709 if (vd) {
710 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
711 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
712 struct omap_desc *d = c->desc;
713 dma_addr_t pos;
715 if (d->dir == DMA_MEM_TO_DEV)
716 pos = omap_dma_get_src_pos(c);
717 else if (d->dir == DMA_DEV_TO_MEM)
718 pos = omap_dma_get_dst_pos(c);
719 else
720 pos = 0;
722 txstate->residue = omap_dma_desc_size_pos(d, pos);
723 } else {
724 txstate->residue = 0;
726 spin_unlock_irqrestore(&c->vc.lock, flags);
728 return ret;
731 static void omap_dma_issue_pending(struct dma_chan *chan)
733 struct omap_chan *c = to_omap_dma_chan(chan);
734 unsigned long flags;
736 spin_lock_irqsave(&c->vc.lock, flags);
737 if (vchan_issue_pending(&c->vc) && !c->desc) {
739 * c->cyclic is used only by audio and in this case the DMA need
740 * to be started without delay.
742 if (!c->cyclic) {
743 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
744 spin_lock(&d->lock);
745 if (list_empty(&c->node))
746 list_add_tail(&c->node, &d->pending);
747 spin_unlock(&d->lock);
748 tasklet_schedule(&d->task);
749 } else {
750 omap_dma_start_desc(c);
753 spin_unlock_irqrestore(&c->vc.lock, flags);
756 static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
757 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
758 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
760 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
761 struct omap_chan *c = to_omap_dma_chan(chan);
762 enum dma_slave_buswidth dev_width;
763 struct scatterlist *sgent;
764 struct omap_desc *d;
765 dma_addr_t dev_addr;
766 unsigned i, j = 0, es, en, frame_bytes;
767 u32 burst;
769 if (dir == DMA_DEV_TO_MEM) {
770 dev_addr = c->cfg.src_addr;
771 dev_width = c->cfg.src_addr_width;
772 burst = c->cfg.src_maxburst;
773 } else if (dir == DMA_MEM_TO_DEV) {
774 dev_addr = c->cfg.dst_addr;
775 dev_width = c->cfg.dst_addr_width;
776 burst = c->cfg.dst_maxburst;
777 } else {
778 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
779 return NULL;
782 /* Bus width translates to the element size (ES) */
783 switch (dev_width) {
784 case DMA_SLAVE_BUSWIDTH_1_BYTE:
785 es = CSDP_DATA_TYPE_8;
786 break;
787 case DMA_SLAVE_BUSWIDTH_2_BYTES:
788 es = CSDP_DATA_TYPE_16;
789 break;
790 case DMA_SLAVE_BUSWIDTH_4_BYTES:
791 es = CSDP_DATA_TYPE_32;
792 break;
793 default: /* not reached */
794 return NULL;
797 /* Now allocate and setup the descriptor. */
798 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
799 if (!d)
800 return NULL;
802 d->dir = dir;
803 d->dev_addr = dev_addr;
804 d->es = es;
806 d->ccr = c->ccr | CCR_SYNC_FRAME;
807 if (dir == DMA_DEV_TO_MEM)
808 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
809 else
810 d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
812 d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
813 d->csdp = es;
815 if (dma_omap1()) {
816 d->cicr |= CICR_TOUT_IE;
818 if (dir == DMA_DEV_TO_MEM)
819 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB;
820 else
821 d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF;
822 } else {
823 if (dir == DMA_DEV_TO_MEM)
824 d->ccr |= CCR_TRIGGER_SRC;
826 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
828 if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
829 d->clnk_ctrl = c->dma_ch;
832 * Build our scatterlist entries: each contains the address,
833 * the number of elements (EN) in each frame, and the number of
834 * frames (FN). Number of bytes for this entry = ES * EN * FN.
836 * Burst size translates to number of elements with frame sync.
837 * Note: DMA engine defines burst to be the number of dev-width
838 * transfers.
840 en = burst;
841 frame_bytes = es_bytes[es] * en;
842 for_each_sg(sgl, sgent, sglen, i) {
843 d->sg[j].addr = sg_dma_address(sgent);
844 d->sg[j].en = en;
845 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
846 j++;
849 d->sglen = j;
851 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
854 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
855 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
856 size_t period_len, enum dma_transfer_direction dir, unsigned long flags)
858 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
859 struct omap_chan *c = to_omap_dma_chan(chan);
860 enum dma_slave_buswidth dev_width;
861 struct omap_desc *d;
862 dma_addr_t dev_addr;
863 unsigned es;
864 u32 burst;
866 if (dir == DMA_DEV_TO_MEM) {
867 dev_addr = c->cfg.src_addr;
868 dev_width = c->cfg.src_addr_width;
869 burst = c->cfg.src_maxburst;
870 } else if (dir == DMA_MEM_TO_DEV) {
871 dev_addr = c->cfg.dst_addr;
872 dev_width = c->cfg.dst_addr_width;
873 burst = c->cfg.dst_maxburst;
874 } else {
875 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
876 return NULL;
879 /* Bus width translates to the element size (ES) */
880 switch (dev_width) {
881 case DMA_SLAVE_BUSWIDTH_1_BYTE:
882 es = CSDP_DATA_TYPE_8;
883 break;
884 case DMA_SLAVE_BUSWIDTH_2_BYTES:
885 es = CSDP_DATA_TYPE_16;
886 break;
887 case DMA_SLAVE_BUSWIDTH_4_BYTES:
888 es = CSDP_DATA_TYPE_32;
889 break;
890 default: /* not reached */
891 return NULL;
894 /* Now allocate and setup the descriptor. */
895 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
896 if (!d)
897 return NULL;
899 d->dir = dir;
900 d->dev_addr = dev_addr;
901 d->fi = burst;
902 d->es = es;
903 d->sg[0].addr = buf_addr;
904 d->sg[0].en = period_len / es_bytes[es];
905 d->sg[0].fn = buf_len / period_len;
906 d->sglen = 1;
908 d->ccr = c->ccr;
909 if (dir == DMA_DEV_TO_MEM)
910 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
911 else
912 d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
914 d->cicr = CICR_DROP_IE;
915 if (flags & DMA_PREP_INTERRUPT)
916 d->cicr |= CICR_FRAME_IE;
918 d->csdp = es;
920 if (dma_omap1()) {
921 d->cicr |= CICR_TOUT_IE;
923 if (dir == DMA_DEV_TO_MEM)
924 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI;
925 else
926 d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF;
927 } else {
928 if (burst)
929 d->ccr |= CCR_SYNC_PACKET;
930 else
931 d->ccr |= CCR_SYNC_ELEMENT;
933 if (dir == DMA_DEV_TO_MEM)
934 d->ccr |= CCR_TRIGGER_SRC;
936 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
938 d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
941 if (__dma_omap15xx(od->plat->dma_attr))
942 d->ccr |= CCR_AUTO_INIT | CCR_REPEAT;
943 else
944 d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK;
946 c->cyclic = true;
948 return vchan_tx_prep(&c->vc, &d->vd, flags);
951 static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
953 struct omap_chan *c = to_omap_dma_chan(chan);
955 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
956 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
957 return -EINVAL;
959 memcpy(&c->cfg, cfg, sizeof(c->cfg));
961 return 0;
964 static int omap_dma_terminate_all(struct dma_chan *chan)
966 struct omap_chan *c = to_omap_dma_chan(chan);
967 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
968 unsigned long flags;
969 LIST_HEAD(head);
971 spin_lock_irqsave(&c->vc.lock, flags);
973 /* Prevent this channel being scheduled */
974 spin_lock(&d->lock);
975 list_del_init(&c->node);
976 spin_unlock(&d->lock);
979 * Stop DMA activity: we assume the callback will not be called
980 * after omap_dma_stop() returns (even if it does, it will see
981 * c->desc is NULL and exit.)
983 if (c->desc) {
984 omap_dma_desc_free(&c->desc->vd);
985 c->desc = NULL;
986 /* Avoid stopping the dma twice */
987 if (!c->paused)
988 omap_dma_stop(c);
991 if (c->cyclic) {
992 c->cyclic = false;
993 c->paused = false;
996 vchan_get_all_descriptors(&c->vc, &head);
997 spin_unlock_irqrestore(&c->vc.lock, flags);
998 vchan_dma_desc_free_list(&c->vc, &head);
1000 return 0;
1003 static int omap_dma_pause(struct dma_chan *chan)
1005 struct omap_chan *c = to_omap_dma_chan(chan);
1007 /* Pause/Resume only allowed with cyclic mode */
1008 if (!c->cyclic)
1009 return -EINVAL;
1011 if (!c->paused) {
1012 omap_dma_stop(c);
1013 c->paused = true;
1016 return 0;
1019 static int omap_dma_resume(struct dma_chan *chan)
1021 struct omap_chan *c = to_omap_dma_chan(chan);
1023 /* Pause/Resume only allowed with cyclic mode */
1024 if (!c->cyclic)
1025 return -EINVAL;
1027 if (c->paused) {
1028 mb();
1030 /* Restore channel link register */
1031 omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl);
1033 omap_dma_start(c, c->desc);
1034 c->paused = false;
1037 return 0;
1040 static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
1042 struct omap_chan *c;
1044 c = kzalloc(sizeof(*c), GFP_KERNEL);
1045 if (!c)
1046 return -ENOMEM;
1048 c->reg_map = od->reg_map;
1049 c->dma_sig = dma_sig;
1050 c->vc.desc_free = omap_dma_desc_free;
1051 vchan_init(&c->vc, &od->ddev);
1052 INIT_LIST_HEAD(&c->node);
1054 return 0;
1057 static void omap_dma_free(struct omap_dmadev *od)
1059 tasklet_kill(&od->task);
1060 while (!list_empty(&od->ddev.channels)) {
1061 struct omap_chan *c = list_first_entry(&od->ddev.channels,
1062 struct omap_chan, vc.chan.device_node);
1064 list_del(&c->vc.chan.device_node);
1065 tasklet_kill(&c->vc.task);
1066 kfree(c);
1070 #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1071 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1072 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1074 static int omap_dma_probe(struct platform_device *pdev)
1076 struct omap_dmadev *od;
1077 struct resource *res;
1078 int rc, i, irq;
1080 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
1081 if (!od)
1082 return -ENOMEM;
1084 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1085 od->base = devm_ioremap_resource(&pdev->dev, res);
1086 if (IS_ERR(od->base))
1087 return PTR_ERR(od->base);
1089 od->plat = omap_get_plat_info();
1090 if (!od->plat)
1091 return -EPROBE_DEFER;
1093 od->reg_map = od->plat->reg_map;
1095 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
1096 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
1097 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
1098 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
1099 od->ddev.device_tx_status = omap_dma_tx_status;
1100 od->ddev.device_issue_pending = omap_dma_issue_pending;
1101 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
1102 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
1103 od->ddev.device_config = omap_dma_slave_config;
1104 od->ddev.device_pause = omap_dma_pause;
1105 od->ddev.device_resume = omap_dma_resume;
1106 od->ddev.device_terminate_all = omap_dma_terminate_all;
1107 od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
1108 od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
1109 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1110 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1111 od->ddev.dev = &pdev->dev;
1112 INIT_LIST_HEAD(&od->ddev.channels);
1113 INIT_LIST_HEAD(&od->pending);
1114 spin_lock_init(&od->lock);
1115 spin_lock_init(&od->irq_lock);
1117 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
1119 for (i = 0; i < 127; i++) {
1120 rc = omap_dma_chan_init(od, i);
1121 if (rc) {
1122 omap_dma_free(od);
1123 return rc;
1127 irq = platform_get_irq(pdev, 1);
1128 if (irq <= 0) {
1129 dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq);
1130 od->legacy = true;
1131 } else {
1132 /* Disable all interrupts */
1133 od->irq_enable_mask = 0;
1134 omap_dma_glbl_write(od, IRQENABLE_L1, 0);
1136 rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
1137 IRQF_SHARED, "omap-dma-engine", od);
1138 if (rc)
1139 return rc;
1142 rc = dma_async_device_register(&od->ddev);
1143 if (rc) {
1144 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
1145 rc);
1146 omap_dma_free(od);
1147 return rc;
1150 platform_set_drvdata(pdev, od);
1152 if (pdev->dev.of_node) {
1153 omap_dma_info.dma_cap = od->ddev.cap_mask;
1155 /* Device-tree DMA controller registration */
1156 rc = of_dma_controller_register(pdev->dev.of_node,
1157 of_dma_simple_xlate, &omap_dma_info);
1158 if (rc) {
1159 pr_warn("OMAP-DMA: failed to register DMA controller\n");
1160 dma_async_device_unregister(&od->ddev);
1161 omap_dma_free(od);
1165 dev_info(&pdev->dev, "OMAP DMA engine driver\n");
1167 return rc;
1170 static int omap_dma_remove(struct platform_device *pdev)
1172 struct omap_dmadev *od = platform_get_drvdata(pdev);
1174 if (pdev->dev.of_node)
1175 of_dma_controller_free(pdev->dev.of_node);
1177 dma_async_device_unregister(&od->ddev);
1179 if (!od->legacy) {
1180 /* Disable all interrupts */
1181 omap_dma_glbl_write(od, IRQENABLE_L0, 0);
1184 omap_dma_free(od);
1186 return 0;
1189 static const struct of_device_id omap_dma_match[] = {
1190 { .compatible = "ti,omap2420-sdma", },
1191 { .compatible = "ti,omap2430-sdma", },
1192 { .compatible = "ti,omap3430-sdma", },
1193 { .compatible = "ti,omap3630-sdma", },
1194 { .compatible = "ti,omap4430-sdma", },
1197 MODULE_DEVICE_TABLE(of, omap_dma_match);
1199 static struct platform_driver omap_dma_driver = {
1200 .probe = omap_dma_probe,
1201 .remove = omap_dma_remove,
1202 .driver = {
1203 .name = "omap-dma-engine",
1204 .of_match_table = of_match_ptr(omap_dma_match),
1208 bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
1210 if (chan->device->dev->driver == &omap_dma_driver.driver) {
1211 struct omap_chan *c = to_omap_dma_chan(chan);
1212 unsigned req = *(unsigned *)param;
1214 return req == c->dma_sig;
1216 return false;
1218 EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
1220 static int omap_dma_init(void)
1222 return platform_driver_register(&omap_dma_driver);
1224 subsys_initcall(omap_dma_init);
1226 static void __exit omap_dma_exit(void)
1228 platform_driver_unregister(&omap_dma_driver);
1230 module_exit(omap_dma_exit);
1232 MODULE_AUTHOR("Russell King");
1233 MODULE_LICENSE("GPL");