PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / dma / edma.c
blobcd8da451d1995fef8b6d076005b17ad1a17b44d1
1 /*
2 * TI EDMA DMA engine driver
4 * Copyright 2012 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
27 #include <linux/platform_data/edma.h>
29 #include "dmaengine.h"
30 #include "virt-dma.h"
33 * This will go away when the private EDMA API is folded
34 * into this driver and the platform device(s) are
35 * instantiated in the arch code. We can only get away
36 * with this simplification because DA8XX may not be built
37 * in the same kernel image with other DaVinci parts. This
38 * avoids having to sprinkle dmaengine driver platform devices
39 * and data throughout all the existing board files.
41 #ifdef CONFIG_ARCH_DAVINCI_DA8XX
42 #define EDMA_CTLRS 2
43 #define EDMA_CHANS 32
44 #else
45 #define EDMA_CTLRS 1
46 #define EDMA_CHANS 64
47 #endif /* CONFIG_ARCH_DAVINCI_DA8XX */
50 * Max of 20 segments per channel to conserve PaRAM slots
51 * Also note that MAX_NR_SG should be atleast the no.of periods
52 * that are required for ASoC, otherwise DMA prep calls will
53 * fail. Today davinci-pcm is the only user of this driver and
54 * requires atleast 17 slots, so we setup the default to 20.
56 #define MAX_NR_SG 20
57 #define EDMA_MAX_SLOTS MAX_NR_SG
58 #define EDMA_DESCRIPTORS 16
60 struct edma_desc {
61 struct virt_dma_desc vdesc;
62 struct list_head node;
63 int cyclic;
64 int absync;
65 int pset_nr;
66 int processed;
67 struct edmacc_param pset[0];
70 struct edma_cc;
72 struct edma_chan {
73 struct virt_dma_chan vchan;
74 struct list_head node;
75 struct edma_desc *edesc;
76 struct edma_cc *ecc;
77 int ch_num;
78 bool alloced;
79 int slot[EDMA_MAX_SLOTS];
80 int missed;
81 struct dma_slave_config cfg;
84 struct edma_cc {
85 int ctlr;
86 struct dma_device dma_slave;
87 struct edma_chan slave_chans[EDMA_CHANS];
88 int num_slave_chans;
89 int dummy_slot;
92 static inline struct edma_cc *to_edma_cc(struct dma_device *d)
94 return container_of(d, struct edma_cc, dma_slave);
97 static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
99 return container_of(c, struct edma_chan, vchan.chan);
102 static inline struct edma_desc
103 *to_edma_desc(struct dma_async_tx_descriptor *tx)
105 return container_of(tx, struct edma_desc, vdesc.tx);
108 static void edma_desc_free(struct virt_dma_desc *vdesc)
110 kfree(container_of(vdesc, struct edma_desc, vdesc));
113 /* Dispatch a queued descriptor to the controller (caller holds lock) */
114 static void edma_execute(struct edma_chan *echan)
116 struct virt_dma_desc *vdesc;
117 struct edma_desc *edesc;
118 struct device *dev = echan->vchan.chan.device->dev;
119 int i, j, left, nslots;
121 /* If either we processed all psets or we're still not started */
122 if (!echan->edesc ||
123 echan->edesc->pset_nr == echan->edesc->processed) {
124 /* Get next vdesc */
125 vdesc = vchan_next_desc(&echan->vchan);
126 if (!vdesc) {
127 echan->edesc = NULL;
128 return;
130 list_del(&vdesc->node);
131 echan->edesc = to_edma_desc(&vdesc->tx);
134 edesc = echan->edesc;
136 /* Find out how many left */
137 left = edesc->pset_nr - edesc->processed;
138 nslots = min(MAX_NR_SG, left);
140 /* Write descriptor PaRAM set(s) */
141 for (i = 0; i < nslots; i++) {
142 j = i + edesc->processed;
143 edma_write_slot(echan->slot[i], &edesc->pset[j]);
144 dev_dbg(echan->vchan.chan.device->dev,
145 "\n pset[%d]:\n"
146 " chnum\t%d\n"
147 " slot\t%d\n"
148 " opt\t%08x\n"
149 " src\t%08x\n"
150 " dst\t%08x\n"
151 " abcnt\t%08x\n"
152 " ccnt\t%08x\n"
153 " bidx\t%08x\n"
154 " cidx\t%08x\n"
155 " lkrld\t%08x\n",
156 j, echan->ch_num, echan->slot[i],
157 edesc->pset[j].opt,
158 edesc->pset[j].src,
159 edesc->pset[j].dst,
160 edesc->pset[j].a_b_cnt,
161 edesc->pset[j].ccnt,
162 edesc->pset[j].src_dst_bidx,
163 edesc->pset[j].src_dst_cidx,
164 edesc->pset[j].link_bcntrld);
165 /* Link to the previous slot if not the last set */
166 if (i != (nslots - 1))
167 edma_link(echan->slot[i], echan->slot[i+1]);
170 edesc->processed += nslots;
173 * If this is either the last set in a set of SG-list transactions
174 * then setup a link to the dummy slot, this results in all future
175 * events being absorbed and that's OK because we're done
177 if (edesc->processed == edesc->pset_nr) {
178 if (edesc->cyclic)
179 edma_link(echan->slot[nslots-1], echan->slot[1]);
180 else
181 edma_link(echan->slot[nslots-1],
182 echan->ecc->dummy_slot);
185 edma_resume(echan->ch_num);
187 if (edesc->processed <= MAX_NR_SG) {
188 dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
189 edma_start(echan->ch_num);
193 * This happens due to setup times between intermediate transfers
194 * in long SG lists which have to be broken up into transfers of
195 * MAX_NR_SG
197 if (echan->missed) {
198 dev_dbg(dev, "missed event in execute detected\n");
199 edma_clean_channel(echan->ch_num);
200 edma_stop(echan->ch_num);
201 edma_start(echan->ch_num);
202 edma_trigger_channel(echan->ch_num);
203 echan->missed = 0;
207 static int edma_terminate_all(struct edma_chan *echan)
209 unsigned long flags;
210 LIST_HEAD(head);
212 spin_lock_irqsave(&echan->vchan.lock, flags);
215 * Stop DMA activity: we assume the callback will not be called
216 * after edma_dma() returns (even if it does, it will see
217 * echan->edesc is NULL and exit.)
219 if (echan->edesc) {
220 echan->edesc = NULL;
221 edma_stop(echan->ch_num);
224 vchan_get_all_descriptors(&echan->vchan, &head);
225 spin_unlock_irqrestore(&echan->vchan.lock, flags);
226 vchan_dma_desc_free_list(&echan->vchan, &head);
228 return 0;
231 static int edma_slave_config(struct edma_chan *echan,
232 struct dma_slave_config *cfg)
234 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
235 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
236 return -EINVAL;
238 memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
240 return 0;
243 static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
244 unsigned long arg)
246 int ret = 0;
247 struct dma_slave_config *config;
248 struct edma_chan *echan = to_edma_chan(chan);
250 switch (cmd) {
251 case DMA_TERMINATE_ALL:
252 edma_terminate_all(echan);
253 break;
254 case DMA_SLAVE_CONFIG:
255 config = (struct dma_slave_config *)arg;
256 ret = edma_slave_config(echan, config);
257 break;
258 default:
259 ret = -ENOSYS;
262 return ret;
266 * A PaRAM set configuration abstraction used by other modes
267 * @chan: Channel who's PaRAM set we're configuring
268 * @pset: PaRAM set to initialize and setup.
269 * @src_addr: Source address of the DMA
270 * @dst_addr: Destination address of the DMA
271 * @burst: In units of dev_width, how much to send
272 * @dev_width: How much is the dev_width
273 * @dma_length: Total length of the DMA transfer
274 * @direction: Direction of the transfer
276 static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
277 dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
278 enum dma_slave_buswidth dev_width, unsigned int dma_length,
279 enum dma_transfer_direction direction)
281 struct edma_chan *echan = to_edma_chan(chan);
282 struct device *dev = chan->device->dev;
283 int acnt, bcnt, ccnt, cidx;
284 int src_bidx, dst_bidx, src_cidx, dst_cidx;
285 int absync;
287 acnt = dev_width;
289 * If the maxburst is equal to the fifo width, use
290 * A-synced transfers. This allows for large contiguous
291 * buffer transfers using only one PaRAM set.
293 if (burst == 1) {
295 * For the A-sync case, bcnt and ccnt are the remainder
296 * and quotient respectively of the division of:
297 * (dma_length / acnt) by (SZ_64K -1). This is so
298 * that in case bcnt over flows, we have ccnt to use.
299 * Note: In A-sync tranfer only, bcntrld is used, but it
300 * only applies for sg_dma_len(sg) >= SZ_64K.
301 * In this case, the best way adopted is- bccnt for the
302 * first frame will be the remainder below. Then for
303 * every successive frame, bcnt will be SZ_64K-1. This
304 * is assured as bcntrld = 0xffff in end of function.
306 absync = false;
307 ccnt = dma_length / acnt / (SZ_64K - 1);
308 bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
310 * If bcnt is non-zero, we have a remainder and hence an
311 * extra frame to transfer, so increment ccnt.
313 if (bcnt)
314 ccnt++;
315 else
316 bcnt = SZ_64K - 1;
317 cidx = acnt;
318 } else {
320 * If maxburst is greater than the fifo address_width,
321 * use AB-synced transfers where A count is the fifo
322 * address_width and B count is the maxburst. In this
323 * case, we are limited to transfers of C count frames
324 * of (address_width * maxburst) where C count is limited
325 * to SZ_64K-1. This places an upper bound on the length
326 * of an SG segment that can be handled.
328 absync = true;
329 bcnt = burst;
330 ccnt = dma_length / (acnt * bcnt);
331 if (ccnt > (SZ_64K - 1)) {
332 dev_err(dev, "Exceeded max SG segment size\n");
333 return -EINVAL;
335 cidx = acnt * bcnt;
338 if (direction == DMA_MEM_TO_DEV) {
339 src_bidx = acnt;
340 src_cidx = cidx;
341 dst_bidx = 0;
342 dst_cidx = 0;
343 } else if (direction == DMA_DEV_TO_MEM) {
344 src_bidx = 0;
345 src_cidx = 0;
346 dst_bidx = acnt;
347 dst_cidx = cidx;
348 } else {
349 dev_err(dev, "%s: direction not implemented yet\n", __func__);
350 return -EINVAL;
353 pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
354 /* Configure A or AB synchronized transfers */
355 if (absync)
356 pset->opt |= SYNCDIM;
358 pset->src = src_addr;
359 pset->dst = dst_addr;
361 pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
362 pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;
364 pset->a_b_cnt = bcnt << 16 | acnt;
365 pset->ccnt = ccnt;
367 * Only time when (bcntrld) auto reload is required is for
368 * A-sync case, and in this case, a requirement of reload value
369 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
370 * and then later will be populated by edma_execute.
372 pset->link_bcntrld = 0xffffffff;
373 return absync;
376 static struct dma_async_tx_descriptor *edma_prep_slave_sg(
377 struct dma_chan *chan, struct scatterlist *sgl,
378 unsigned int sg_len, enum dma_transfer_direction direction,
379 unsigned long tx_flags, void *context)
381 struct edma_chan *echan = to_edma_chan(chan);
382 struct device *dev = chan->device->dev;
383 struct edma_desc *edesc;
384 dma_addr_t src_addr = 0, dst_addr = 0;
385 enum dma_slave_buswidth dev_width;
386 u32 burst;
387 struct scatterlist *sg;
388 int i, nslots, ret;
390 if (unlikely(!echan || !sgl || !sg_len))
391 return NULL;
393 if (direction == DMA_DEV_TO_MEM) {
394 src_addr = echan->cfg.src_addr;
395 dev_width = echan->cfg.src_addr_width;
396 burst = echan->cfg.src_maxburst;
397 } else if (direction == DMA_MEM_TO_DEV) {
398 dst_addr = echan->cfg.dst_addr;
399 dev_width = echan->cfg.dst_addr_width;
400 burst = echan->cfg.dst_maxburst;
401 } else {
402 dev_err(dev, "%s: bad direction?\n", __func__);
403 return NULL;
406 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
407 dev_err(dev, "Undefined slave buswidth\n");
408 return NULL;
411 edesc = kzalloc(sizeof(*edesc) + sg_len *
412 sizeof(edesc->pset[0]), GFP_ATOMIC);
413 if (!edesc) {
414 dev_dbg(dev, "Failed to allocate a descriptor\n");
415 return NULL;
418 edesc->pset_nr = sg_len;
420 /* Allocate a PaRAM slot, if needed */
421 nslots = min_t(unsigned, MAX_NR_SG, sg_len);
423 for (i = 0; i < nslots; i++) {
424 if (echan->slot[i] < 0) {
425 echan->slot[i] =
426 edma_alloc_slot(EDMA_CTLR(echan->ch_num),
427 EDMA_SLOT_ANY);
428 if (echan->slot[i] < 0) {
429 kfree(edesc);
430 dev_err(dev, "Failed to allocate slot\n");
431 return NULL;
436 /* Configure PaRAM sets for each SG */
437 for_each_sg(sgl, sg, sg_len, i) {
438 /* Get address for each SG */
439 if (direction == DMA_DEV_TO_MEM)
440 dst_addr = sg_dma_address(sg);
441 else
442 src_addr = sg_dma_address(sg);
444 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
445 dst_addr, burst, dev_width,
446 sg_dma_len(sg), direction);
447 if (ret < 0) {
448 kfree(edesc);
449 return NULL;
452 edesc->absync = ret;
454 /* If this is the last in a current SG set of transactions,
455 enable interrupts so that next set is processed */
456 if (!((i+1) % MAX_NR_SG))
457 edesc->pset[i].opt |= TCINTEN;
459 /* If this is the last set, enable completion interrupt flag */
460 if (i == sg_len - 1)
461 edesc->pset[i].opt |= TCINTEN;
464 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
467 static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
468 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
469 size_t period_len, enum dma_transfer_direction direction,
470 unsigned long tx_flags, void *context)
472 struct edma_chan *echan = to_edma_chan(chan);
473 struct device *dev = chan->device->dev;
474 struct edma_desc *edesc;
475 dma_addr_t src_addr, dst_addr;
476 enum dma_slave_buswidth dev_width;
477 u32 burst;
478 int i, ret, nslots;
480 if (unlikely(!echan || !buf_len || !period_len))
481 return NULL;
483 if (direction == DMA_DEV_TO_MEM) {
484 src_addr = echan->cfg.src_addr;
485 dst_addr = buf_addr;
486 dev_width = echan->cfg.src_addr_width;
487 burst = echan->cfg.src_maxburst;
488 } else if (direction == DMA_MEM_TO_DEV) {
489 src_addr = buf_addr;
490 dst_addr = echan->cfg.dst_addr;
491 dev_width = echan->cfg.dst_addr_width;
492 burst = echan->cfg.dst_maxburst;
493 } else {
494 dev_err(dev, "%s: bad direction?\n", __func__);
495 return NULL;
498 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
499 dev_err(dev, "Undefined slave buswidth\n");
500 return NULL;
503 if (unlikely(buf_len % period_len)) {
504 dev_err(dev, "Period should be multiple of Buffer length\n");
505 return NULL;
508 nslots = (buf_len / period_len) + 1;
511 * Cyclic DMA users such as audio cannot tolerate delays introduced
512 * by cases where the number of periods is more than the maximum
513 * number of SGs the EDMA driver can handle at a time. For DMA types
514 * such as Slave SGs, such delays are tolerable and synchronized,
515 * but the synchronization is difficult to achieve with Cyclic and
516 * cannot be guaranteed, so we error out early.
518 if (nslots > MAX_NR_SG)
519 return NULL;
521 edesc = kzalloc(sizeof(*edesc) + nslots *
522 sizeof(edesc->pset[0]), GFP_ATOMIC);
523 if (!edesc) {
524 dev_dbg(dev, "Failed to allocate a descriptor\n");
525 return NULL;
528 edesc->cyclic = 1;
529 edesc->pset_nr = nslots;
531 dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots);
532 dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len);
533 dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len);
535 for (i = 0; i < nslots; i++) {
536 /* Allocate a PaRAM slot, if needed */
537 if (echan->slot[i] < 0) {
538 echan->slot[i] =
539 edma_alloc_slot(EDMA_CTLR(echan->ch_num),
540 EDMA_SLOT_ANY);
541 if (echan->slot[i] < 0) {
542 dev_err(dev, "Failed to allocate slot\n");
543 return NULL;
547 if (i == nslots - 1) {
548 memcpy(&edesc->pset[i], &edesc->pset[0],
549 sizeof(edesc->pset[0]));
550 break;
553 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
554 dst_addr, burst, dev_width, period_len,
555 direction);
556 if (ret < 0)
557 return NULL;
559 if (direction == DMA_DEV_TO_MEM)
560 dst_addr += period_len;
561 else
562 src_addr += period_len;
564 dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
565 dev_dbg(dev,
566 "\n pset[%d]:\n"
567 " chnum\t%d\n"
568 " slot\t%d\n"
569 " opt\t%08x\n"
570 " src\t%08x\n"
571 " dst\t%08x\n"
572 " abcnt\t%08x\n"
573 " ccnt\t%08x\n"
574 " bidx\t%08x\n"
575 " cidx\t%08x\n"
576 " lkrld\t%08x\n",
577 i, echan->ch_num, echan->slot[i],
578 edesc->pset[i].opt,
579 edesc->pset[i].src,
580 edesc->pset[i].dst,
581 edesc->pset[i].a_b_cnt,
582 edesc->pset[i].ccnt,
583 edesc->pset[i].src_dst_bidx,
584 edesc->pset[i].src_dst_cidx,
585 edesc->pset[i].link_bcntrld);
587 edesc->absync = ret;
590 * Enable interrupts for every period because callback
591 * has to be called for every period.
593 edesc->pset[i].opt |= TCINTEN;
596 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
599 static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
601 struct edma_chan *echan = data;
602 struct device *dev = echan->vchan.chan.device->dev;
603 struct edma_desc *edesc;
604 unsigned long flags;
605 struct edmacc_param p;
607 edesc = echan->edesc;
609 /* Pause the channel for non-cyclic */
610 if (!edesc || (edesc && !edesc->cyclic))
611 edma_pause(echan->ch_num);
613 switch (ch_status) {
614 case EDMA_DMA_COMPLETE:
615 spin_lock_irqsave(&echan->vchan.lock, flags);
617 if (edesc) {
618 if (edesc->cyclic) {
619 vchan_cyclic_callback(&edesc->vdesc);
620 } else if (edesc->processed == edesc->pset_nr) {
621 dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
622 edma_stop(echan->ch_num);
623 vchan_cookie_complete(&edesc->vdesc);
624 edma_execute(echan);
625 } else {
626 dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
627 edma_execute(echan);
631 spin_unlock_irqrestore(&echan->vchan.lock, flags);
633 break;
634 case EDMA_DMA_CC_ERROR:
635 spin_lock_irqsave(&echan->vchan.lock, flags);
637 edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
640 * Issue later based on missed flag which will be sure
641 * to happen as:
642 * (1) we finished transmitting an intermediate slot and
643 * edma_execute is coming up.
644 * (2) or we finished current transfer and issue will
645 * call edma_execute.
647 * Important note: issuing can be dangerous here and
648 * lead to some nasty recursion when we are in a NULL
649 * slot. So we avoid doing so and set the missed flag.
651 if (p.a_b_cnt == 0 && p.ccnt == 0) {
652 dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n");
653 echan->missed = 1;
654 } else {
656 * The slot is already programmed but the event got
657 * missed, so its safe to issue it here.
659 dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n");
660 edma_clean_channel(echan->ch_num);
661 edma_stop(echan->ch_num);
662 edma_start(echan->ch_num);
663 edma_trigger_channel(echan->ch_num);
666 spin_unlock_irqrestore(&echan->vchan.lock, flags);
668 break;
669 default:
670 break;
674 /* Alloc channel resources */
675 static int edma_alloc_chan_resources(struct dma_chan *chan)
677 struct edma_chan *echan = to_edma_chan(chan);
678 struct device *dev = chan->device->dev;
679 int ret;
680 int a_ch_num;
681 LIST_HEAD(descs);
683 a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback,
684 chan, EVENTQ_DEFAULT);
686 if (a_ch_num < 0) {
687 ret = -ENODEV;
688 goto err_no_chan;
691 if (a_ch_num != echan->ch_num) {
692 dev_err(dev, "failed to allocate requested channel %u:%u\n",
693 EDMA_CTLR(echan->ch_num),
694 EDMA_CHAN_SLOT(echan->ch_num));
695 ret = -ENODEV;
696 goto err_wrong_chan;
699 echan->alloced = true;
700 echan->slot[0] = echan->ch_num;
702 dev_dbg(dev, "allocated channel for %u:%u\n",
703 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
705 return 0;
707 err_wrong_chan:
708 edma_free_channel(a_ch_num);
709 err_no_chan:
710 return ret;
713 /* Free channel resources */
714 static void edma_free_chan_resources(struct dma_chan *chan)
716 struct edma_chan *echan = to_edma_chan(chan);
717 struct device *dev = chan->device->dev;
718 int i;
720 /* Terminate transfers */
721 edma_stop(echan->ch_num);
723 vchan_free_chan_resources(&echan->vchan);
725 /* Free EDMA PaRAM slots */
726 for (i = 1; i < EDMA_MAX_SLOTS; i++) {
727 if (echan->slot[i] >= 0) {
728 edma_free_slot(echan->slot[i]);
729 echan->slot[i] = -1;
733 /* Free EDMA channel */
734 if (echan->alloced) {
735 edma_free_channel(echan->ch_num);
736 echan->alloced = false;
739 dev_dbg(dev, "freeing channel for %u\n", echan->ch_num);
742 /* Send pending descriptor to hardware */
743 static void edma_issue_pending(struct dma_chan *chan)
745 struct edma_chan *echan = to_edma_chan(chan);
746 unsigned long flags;
748 spin_lock_irqsave(&echan->vchan.lock, flags);
749 if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
750 edma_execute(echan);
751 spin_unlock_irqrestore(&echan->vchan.lock, flags);
754 static size_t edma_desc_size(struct edma_desc *edesc)
756 int i;
757 size_t size;
759 if (edesc->absync)
760 for (size = i = 0; i < edesc->pset_nr; i++)
761 size += (edesc->pset[i].a_b_cnt & 0xffff) *
762 (edesc->pset[i].a_b_cnt >> 16) *
763 edesc->pset[i].ccnt;
764 else
765 size = (edesc->pset[0].a_b_cnt & 0xffff) *
766 (edesc->pset[0].a_b_cnt >> 16) +
767 (edesc->pset[0].a_b_cnt & 0xffff) *
768 (SZ_64K - 1) * edesc->pset[0].ccnt;
770 return size;
773 /* Check request completion status */
774 static enum dma_status edma_tx_status(struct dma_chan *chan,
775 dma_cookie_t cookie,
776 struct dma_tx_state *txstate)
778 struct edma_chan *echan = to_edma_chan(chan);
779 struct virt_dma_desc *vdesc;
780 enum dma_status ret;
781 unsigned long flags;
783 ret = dma_cookie_status(chan, cookie, txstate);
784 if (ret == DMA_COMPLETE || !txstate)
785 return ret;
787 spin_lock_irqsave(&echan->vchan.lock, flags);
788 vdesc = vchan_find_desc(&echan->vchan, cookie);
789 if (vdesc) {
790 txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx));
791 } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
792 struct edma_desc *edesc = echan->edesc;
793 txstate->residue = edma_desc_size(edesc);
795 spin_unlock_irqrestore(&echan->vchan.lock, flags);
797 return ret;
800 static void __init edma_chan_init(struct edma_cc *ecc,
801 struct dma_device *dma,
802 struct edma_chan *echans)
804 int i, j;
806 for (i = 0; i < EDMA_CHANS; i++) {
807 struct edma_chan *echan = &echans[i];
808 echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i);
809 echan->ecc = ecc;
810 echan->vchan.desc_free = edma_desc_free;
812 vchan_init(&echan->vchan, dma);
814 INIT_LIST_HEAD(&echan->node);
815 for (j = 0; j < EDMA_MAX_SLOTS; j++)
816 echan->slot[j] = -1;
820 static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
821 struct device *dev)
823 dma->device_prep_slave_sg = edma_prep_slave_sg;
824 dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
825 dma->device_alloc_chan_resources = edma_alloc_chan_resources;
826 dma->device_free_chan_resources = edma_free_chan_resources;
827 dma->device_issue_pending = edma_issue_pending;
828 dma->device_tx_status = edma_tx_status;
829 dma->device_control = edma_control;
830 dma->dev = dev;
832 INIT_LIST_HEAD(&dma->channels);
835 static int edma_probe(struct platform_device *pdev)
837 struct edma_cc *ecc;
838 int ret;
840 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
841 if (ret)
842 return ret;
844 ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
845 if (!ecc) {
846 dev_err(&pdev->dev, "Can't allocate controller\n");
847 return -ENOMEM;
850 ecc->ctlr = pdev->id;
851 ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY);
852 if (ecc->dummy_slot < 0) {
853 dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n");
854 return -EIO;
857 dma_cap_zero(ecc->dma_slave.cap_mask);
858 dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
860 edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);
862 edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans);
864 ret = dma_async_device_register(&ecc->dma_slave);
865 if (ret)
866 goto err_reg1;
868 platform_set_drvdata(pdev, ecc);
870 dev_info(&pdev->dev, "TI EDMA DMA engine driver\n");
872 return 0;
874 err_reg1:
875 edma_free_slot(ecc->dummy_slot);
876 return ret;
879 static int edma_remove(struct platform_device *pdev)
881 struct device *dev = &pdev->dev;
882 struct edma_cc *ecc = dev_get_drvdata(dev);
884 dma_async_device_unregister(&ecc->dma_slave);
885 edma_free_slot(ecc->dummy_slot);
887 return 0;
890 static struct platform_driver edma_driver = {
891 .probe = edma_probe,
892 .remove = edma_remove,
893 .driver = {
894 .name = "edma-dma-engine",
895 .owner = THIS_MODULE,
899 bool edma_filter_fn(struct dma_chan *chan, void *param)
901 if (chan->device->dev->driver == &edma_driver.driver) {
902 struct edma_chan *echan = to_edma_chan(chan);
903 unsigned ch_req = *(unsigned *)param;
904 return ch_req == echan->ch_num;
906 return false;
908 EXPORT_SYMBOL(edma_filter_fn);
910 static struct platform_device *pdev0, *pdev1;
912 static const struct platform_device_info edma_dev_info0 = {
913 .name = "edma-dma-engine",
914 .id = 0,
915 .dma_mask = DMA_BIT_MASK(32),
918 static const struct platform_device_info edma_dev_info1 = {
919 .name = "edma-dma-engine",
920 .id = 1,
921 .dma_mask = DMA_BIT_MASK(32),
924 static int edma_init(void)
926 int ret = platform_driver_register(&edma_driver);
928 if (ret == 0) {
929 pdev0 = platform_device_register_full(&edma_dev_info0);
930 if (IS_ERR(pdev0)) {
931 platform_driver_unregister(&edma_driver);
932 ret = PTR_ERR(pdev0);
933 goto out;
937 if (EDMA_CTLRS == 2) {
938 pdev1 = platform_device_register_full(&edma_dev_info1);
939 if (IS_ERR(pdev1)) {
940 platform_driver_unregister(&edma_driver);
941 platform_device_unregister(pdev0);
942 ret = PTR_ERR(pdev1);
946 out:
947 return ret;
949 subsys_initcall(edma_init);
951 static void __exit edma_exit(void)
953 platform_device_unregister(pdev0);
954 if (pdev1)
955 platform_device_unregister(pdev1);
956 platform_driver_unregister(&edma_driver);
958 module_exit(edma_exit);
960 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
961 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
962 MODULE_LICENSE("GPL v2");