2 * driver/dma/coh901318.c
4 * Copyright (C) 2007-2009 ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 * DMA driver for COH 901 318
7 * Author: Per Friden <per.friden@stericsson.com>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h> /* printk() */
13 #include <linux/fs.h> /* everything... */
14 #include <linux/slab.h> /* kmalloc() */
15 #include <linux/dmaengine.h>
16 #include <linux/platform_device.h>
17 #include <linux/device.h>
18 #include <linux/irqreturn.h>
19 #include <linux/interrupt.h>
21 #include <linux/uaccess.h>
22 #include <linux/debugfs.h>
23 #include <mach/coh901318.h>
25 #include "coh901318_lli.h"
27 #define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
30 #define COH_DBG(x) ({ if (1) x; 0; })
32 #define COH_DBG(x) ({ if (0) x; 0; })
35 struct coh901318_desc
{
36 struct dma_async_tx_descriptor desc
;
37 struct list_head node
;
38 struct scatterlist
*sg
;
40 struct coh901318_lli
*lli
;
41 enum dma_data_direction dir
;
45 struct coh901318_base
{
47 void __iomem
*virtbase
;
48 struct coh901318_pool pool
;
50 struct dma_device dma_slave
;
51 struct dma_device dma_memcpy
;
52 struct coh901318_chan
*chans
;
53 struct coh901318_platform
*platform
;
56 struct coh901318_chan
{
63 struct work_struct free_work
;
66 struct tasklet_struct tasklet
;
68 struct list_head active
;
69 struct list_head queue
;
70 struct list_head free
;
72 unsigned long nbr_active_done
;
75 struct coh901318_base
*base
;
78 static void coh901318_list_print(struct coh901318_chan
*cohc
,
79 struct coh901318_lli
*lli
)
81 struct coh901318_lli
*l
= lli
;
85 dev_vdbg(COHC_2_DEV(cohc
), "i %d, lli %p, ctrl 0x%x, src 0x%x"
86 ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n",
87 i
, l
, l
->control
, l
->src_addr
, l
->dst_addr
,
88 l
->link_addr
, l
->virt_link_addr
);
90 l
= l
->virt_link_addr
;
94 #ifdef CONFIG_DEBUG_FS
96 #define COH901318_DEBUGFS_ASSIGN(x, y) (x = y)
98 static struct coh901318_base
*debugfs_dma_base
;
99 static struct dentry
*dma_dentry
;
101 static int coh901318_debugfs_open(struct inode
*inode
, struct file
*file
)
104 file
->private_data
= inode
->i_private
;
108 static int coh901318_debugfs_read(struct file
*file
, char __user
*buf
,
109 size_t count
, loff_t
*f_pos
)
111 u64 started_channels
= debugfs_dma_base
->pm
.started_channels
;
112 int pool_count
= debugfs_dma_base
->pool
.debugfs_pool_counter
;
119 dev_buf
= kmalloc(4*1024, GFP_KERNEL
);
124 tmp
+= sprintf(tmp
, "DMA -- enabled dma channels\n");
126 for (i
= 0; i
< debugfs_dma_base
->platform
->max_channels
; i
++)
127 if (started_channels
& (1 << i
))
128 tmp
+= sprintf(tmp
, "channel %d\n", i
);
130 tmp
+= sprintf(tmp
, "Pool alloc nbr %d\n", pool_count
);
131 dev_size
= tmp
- dev_buf
;
133 /* No more to read if offset != 0 */
134 if (*f_pos
> dev_size
)
137 if (count
> dev_size
- *f_pos
)
138 count
= dev_size
- *f_pos
;
140 if (copy_to_user(buf
, dev_buf
+ *f_pos
, count
))
153 static const struct file_operations coh901318_debugfs_status_operations
= {
154 .owner
= THIS_MODULE
,
155 .open
= coh901318_debugfs_open
,
156 .read
= coh901318_debugfs_read
,
160 static int __init
init_coh901318_debugfs(void)
163 dma_dentry
= debugfs_create_dir("dma", NULL
);
165 (void) debugfs_create_file("status",
168 &coh901318_debugfs_status_operations
);
172 static void __exit
exit_coh901318_debugfs(void)
174 debugfs_remove_recursive(dma_dentry
);
177 module_init(init_coh901318_debugfs
);
178 module_exit(exit_coh901318_debugfs
);
181 #define COH901318_DEBUGFS_ASSIGN(x, y)
183 #endif /* CONFIG_DEBUG_FS */
185 static inline struct coh901318_chan
*to_coh901318_chan(struct dma_chan
*chan
)
187 return container_of(chan
, struct coh901318_chan
, chan
);
190 static inline dma_addr_t
191 cohc_dev_addr(struct coh901318_chan
*cohc
)
193 return cohc
->base
->platform
->chan_conf
[cohc
->id
].dev_addr
;
196 static inline const struct coh901318_params
*
197 cohc_chan_param(struct coh901318_chan
*cohc
)
199 return &cohc
->base
->platform
->chan_conf
[cohc
->id
].param
;
202 static inline const struct coh_dma_channel
*
203 cohc_chan_conf(struct coh901318_chan
*cohc
)
205 return &cohc
->base
->platform
->chan_conf
[cohc
->id
];
208 static void enable_powersave(struct coh901318_chan
*cohc
)
211 struct powersave
*pm
= &cohc
->base
->pm
;
213 spin_lock_irqsave(&pm
->lock
, flags
);
215 pm
->started_channels
&= ~(1ULL << cohc
->id
);
217 if (!pm
->started_channels
) {
218 /* DMA no longer intends to access memory */
219 cohc
->base
->platform
->access_memory_state(cohc
->base
->dev
,
223 spin_unlock_irqrestore(&pm
->lock
, flags
);
225 static void disable_powersave(struct coh901318_chan
*cohc
)
228 struct powersave
*pm
= &cohc
->base
->pm
;
230 spin_lock_irqsave(&pm
->lock
, flags
);
232 if (!pm
->started_channels
) {
233 /* DMA intends to access memory */
234 cohc
->base
->platform
->access_memory_state(cohc
->base
->dev
,
238 pm
->started_channels
|= (1ULL << cohc
->id
);
240 spin_unlock_irqrestore(&pm
->lock
, flags
);
243 static inline int coh901318_set_ctrl(struct coh901318_chan
*cohc
, u32 control
)
245 int channel
= cohc
->id
;
246 void __iomem
*virtbase
= cohc
->base
->virtbase
;
249 virtbase
+ COH901318_CX_CTRL
+
250 COH901318_CX_CTRL_SPACING
* channel
);
254 static inline int coh901318_set_conf(struct coh901318_chan
*cohc
, u32 conf
)
256 int channel
= cohc
->id
;
257 void __iomem
*virtbase
= cohc
->base
->virtbase
;
260 virtbase
+ COH901318_CX_CFG
+
261 COH901318_CX_CFG_SPACING
*channel
);
266 static int coh901318_start(struct coh901318_chan
*cohc
)
269 int channel
= cohc
->id
;
270 void __iomem
*virtbase
= cohc
->base
->virtbase
;
272 disable_powersave(cohc
);
274 val
= readl(virtbase
+ COH901318_CX_CFG
+
275 COH901318_CX_CFG_SPACING
* channel
);
278 val
|= COH901318_CX_CFG_CH_ENABLE
;
279 writel(val
, virtbase
+ COH901318_CX_CFG
+
280 COH901318_CX_CFG_SPACING
* channel
);
285 static int coh901318_prep_linked_list(struct coh901318_chan
*cohc
,
286 struct coh901318_lli
*lli
)
288 int channel
= cohc
->id
;
289 void __iomem
*virtbase
= cohc
->base
->virtbase
;
291 BUG_ON(readl(virtbase
+ COH901318_CX_STAT
+
292 COH901318_CX_STAT_SPACING
*channel
) &
293 COH901318_CX_STAT_ACTIVE
);
295 writel(lli
->src_addr
,
296 virtbase
+ COH901318_CX_SRC_ADDR
+
297 COH901318_CX_SRC_ADDR_SPACING
* channel
);
299 writel(lli
->dst_addr
, virtbase
+
300 COH901318_CX_DST_ADDR
+
301 COH901318_CX_DST_ADDR_SPACING
* channel
);
303 writel(lli
->link_addr
, virtbase
+ COH901318_CX_LNK_ADDR
+
304 COH901318_CX_LNK_ADDR_SPACING
* channel
);
306 writel(lli
->control
, virtbase
+ COH901318_CX_CTRL
+
307 COH901318_CX_CTRL_SPACING
* channel
);
312 coh901318_assign_cookie(struct coh901318_chan
*cohc
,
313 struct coh901318_desc
*cohd
)
315 dma_cookie_t cookie
= cohc
->chan
.cookie
;
320 cohc
->chan
.cookie
= cookie
;
321 cohd
->desc
.cookie
= cookie
;
326 static struct coh901318_desc
*
327 coh901318_desc_get(struct coh901318_chan
*cohc
)
329 struct coh901318_desc
*desc
;
331 if (list_empty(&cohc
->free
)) {
332 /* alloc new desc because we're out of used ones
333 * TODO: alloc a pile of descs instead of just one,
334 * avoid many small allocations.
336 desc
= kzalloc(sizeof(struct coh901318_desc
), GFP_NOWAIT
);
339 INIT_LIST_HEAD(&desc
->node
);
340 dma_async_tx_descriptor_init(&desc
->desc
, &cohc
->chan
);
342 /* Reuse an old desc. */
343 desc
= list_first_entry(&cohc
->free
,
344 struct coh901318_desc
,
346 list_del(&desc
->node
);
347 /* Initialize it a bit so it's not insane */
350 desc
->desc
.callback
= NULL
;
351 desc
->desc
.callback_param
= NULL
;
359 coh901318_desc_free(struct coh901318_chan
*cohc
, struct coh901318_desc
*cohd
)
361 list_add_tail(&cohd
->node
, &cohc
->free
);
364 /* call with irq lock held */
366 coh901318_desc_submit(struct coh901318_chan
*cohc
, struct coh901318_desc
*desc
)
368 list_add_tail(&desc
->node
, &cohc
->active
);
371 static struct coh901318_desc
*
372 coh901318_first_active_get(struct coh901318_chan
*cohc
)
374 struct coh901318_desc
*d
;
376 if (list_empty(&cohc
->active
))
379 d
= list_first_entry(&cohc
->active
,
380 struct coh901318_desc
,
386 coh901318_desc_remove(struct coh901318_desc
*cohd
)
388 list_del(&cohd
->node
);
392 coh901318_desc_queue(struct coh901318_chan
*cohc
, struct coh901318_desc
*desc
)
394 list_add_tail(&desc
->node
, &cohc
->queue
);
397 static struct coh901318_desc
*
398 coh901318_first_queued(struct coh901318_chan
*cohc
)
400 struct coh901318_desc
*d
;
402 if (list_empty(&cohc
->queue
))
405 d
= list_first_entry(&cohc
->queue
,
406 struct coh901318_desc
,
411 static inline u32
coh901318_get_bytes_in_lli(struct coh901318_lli
*in_lli
)
413 struct coh901318_lli
*lli
= in_lli
;
417 bytes
+= lli
->control
& COH901318_CX_CTRL_TC_VALUE_MASK
;
418 lli
= lli
->virt_link_addr
;
424 * Get the number of bytes left to transfer on this channel,
425 * it is unwise to call this before stopping the channel for
426 * absolute measures, but for a rough guess you can still call
429 static u32
coh901318_get_bytes_left(struct dma_chan
*chan
)
431 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
432 struct coh901318_desc
*cohd
;
433 struct list_head
*pos
;
438 spin_lock_irqsave(&cohc
->lock
, flags
);
441 * If there are many queued jobs, we iterate and add the
442 * size of them all. We take a special look on the first
443 * job though, since it is probably active.
445 list_for_each(pos
, &cohc
->active
) {
447 * The first job in the list will be working on the
448 * hardware. The job can be stopped but still active,
449 * so that the transfer counter is somewhere inside
452 cohd
= list_entry(pos
, struct coh901318_desc
, node
);
455 struct coh901318_lli
*lli
;
458 /* Read current transfer count value */
459 left
= readl(cohc
->base
->virtbase
+
461 COH901318_CX_CTRL_SPACING
* cohc
->id
) &
462 COH901318_CX_CTRL_TC_VALUE_MASK
;
464 /* See if the transfer is linked... */
465 ladd
= readl(cohc
->base
->virtbase
+
466 COH901318_CX_LNK_ADDR
+
467 COH901318_CX_LNK_ADDR_SPACING
*
469 ~COH901318_CX_LNK_LINK_IMMEDIATE
;
470 /* Single transaction */
475 * Linked transaction, follow the lli, find the
476 * currently processing lli, and proceed to the next
479 while (lli
&& lli
->link_addr
!= ladd
)
480 lli
= lli
->virt_link_addr
;
483 lli
= lli
->virt_link_addr
;
486 * Follow remaining lli links around to count the total
487 * number of bytes left
489 left
+= coh901318_get_bytes_in_lli(lli
);
491 left
+= coh901318_get_bytes_in_lli(cohd
->lli
);
496 /* Also count bytes in the queued jobs */
497 list_for_each(pos
, &cohc
->queue
) {
498 cohd
= list_entry(pos
, struct coh901318_desc
, node
);
499 left
+= coh901318_get_bytes_in_lli(cohd
->lli
);
502 spin_unlock_irqrestore(&cohc
->lock
, flags
);
508 * Pauses a transfer without losing data. Enables power save.
509 * Use this function in conjunction with coh901318_resume.
511 static void coh901318_pause(struct dma_chan
*chan
)
515 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
516 int channel
= cohc
->id
;
517 void __iomem
*virtbase
= cohc
->base
->virtbase
;
519 spin_lock_irqsave(&cohc
->lock
, flags
);
521 /* Disable channel in HW */
522 val
= readl(virtbase
+ COH901318_CX_CFG
+
523 COH901318_CX_CFG_SPACING
* channel
);
525 /* Stopping infinit transfer */
526 if ((val
& COH901318_CX_CTRL_TC_ENABLE
) == 0 &&
527 (val
& COH901318_CX_CFG_CH_ENABLE
))
531 val
&= ~COH901318_CX_CFG_CH_ENABLE
;
532 /* Enable twice, HW bug work around */
533 writel(val
, virtbase
+ COH901318_CX_CFG
+
534 COH901318_CX_CFG_SPACING
* channel
);
535 writel(val
, virtbase
+ COH901318_CX_CFG
+
536 COH901318_CX_CFG_SPACING
* channel
);
538 /* Spin-wait for it to actually go inactive */
539 while (readl(virtbase
+ COH901318_CX_STAT
+COH901318_CX_STAT_SPACING
*
540 channel
) & COH901318_CX_STAT_ACTIVE
)
543 /* Check if we stopped an active job */
544 if ((readl(virtbase
+ COH901318_CX_CTRL
+COH901318_CX_CTRL_SPACING
*
545 channel
) & COH901318_CX_CTRL_TC_VALUE_MASK
) > 0)
548 enable_powersave(cohc
);
550 spin_unlock_irqrestore(&cohc
->lock
, flags
);
553 /* Resumes a transfer that has been stopped via 300_dma_stop(..).
554 Power save is handled.
556 static void coh901318_resume(struct dma_chan
*chan
)
560 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
561 int channel
= cohc
->id
;
563 spin_lock_irqsave(&cohc
->lock
, flags
);
565 disable_powersave(cohc
);
568 /* Enable channel in HW */
569 val
= readl(cohc
->base
->virtbase
+ COH901318_CX_CFG
+
570 COH901318_CX_CFG_SPACING
* channel
);
572 val
|= COH901318_CX_CFG_CH_ENABLE
;
574 writel(val
, cohc
->base
->virtbase
+ COH901318_CX_CFG
+
575 COH901318_CX_CFG_SPACING
*channel
);
580 spin_unlock_irqrestore(&cohc
->lock
, flags
);
583 bool coh901318_filter_id(struct dma_chan
*chan
, void *chan_id
)
585 unsigned int ch_nr
= (unsigned int) chan_id
;
587 if (ch_nr
== to_coh901318_chan(chan
)->id
)
592 EXPORT_SYMBOL(coh901318_filter_id
);
595 * DMA channel allocation
597 static int coh901318_config(struct coh901318_chan
*cohc
,
598 struct coh901318_params
*param
)
601 const struct coh901318_params
*p
;
602 int channel
= cohc
->id
;
603 void __iomem
*virtbase
= cohc
->base
->virtbase
;
605 spin_lock_irqsave(&cohc
->lock
, flags
);
610 p
= &cohc
->base
->platform
->chan_conf
[channel
].param
;
612 /* Clear any pending BE or TC interrupt */
614 writel(1 << channel
, virtbase
+ COH901318_BE_INT_CLEAR1
);
615 writel(1 << channel
, virtbase
+ COH901318_TC_INT_CLEAR1
);
617 writel(1 << (channel
- 32), virtbase
+
618 COH901318_BE_INT_CLEAR2
);
619 writel(1 << (channel
- 32), virtbase
+
620 COH901318_TC_INT_CLEAR2
);
623 coh901318_set_conf(cohc
, p
->config
);
624 coh901318_set_ctrl(cohc
, p
->ctrl_lli_last
);
626 spin_unlock_irqrestore(&cohc
->lock
, flags
);
631 /* must lock when calling this function
632 * start queued jobs, if any
633 * TODO: start all queued jobs in one go
635 * Returns descriptor if queued job is started otherwise NULL.
636 * If the queue is empty NULL is returned.
638 static struct coh901318_desc
*coh901318_queue_start(struct coh901318_chan
*cohc
)
640 struct coh901318_desc
*cohd
;
643 * start queued jobs, if any
644 * TODO: transmit all queued jobs in one go
646 cohd
= coh901318_first_queued(cohc
);
649 /* Remove from queue */
650 coh901318_desc_remove(cohd
);
651 /* initiate DMA job */
654 coh901318_desc_submit(cohc
, cohd
);
656 coh901318_prep_linked_list(cohc
, cohd
->lli
);
658 /* start dma job on this channel */
659 coh901318_start(cohc
);
667 * This tasklet is called from the interrupt handler to
668 * handle each descriptor (DMA job) that is sent to a channel.
670 static void dma_tasklet(unsigned long data
)
672 struct coh901318_chan
*cohc
= (struct coh901318_chan
*) data
;
673 struct coh901318_desc
*cohd_fin
;
675 dma_async_tx_callback callback
;
676 void *callback_param
;
678 dev_vdbg(COHC_2_DEV(cohc
), "[%s] chan_id %d"
679 " nbr_active_done %ld\n", __func__
,
680 cohc
->id
, cohc
->nbr_active_done
);
682 spin_lock_irqsave(&cohc
->lock
, flags
);
684 /* get first active descriptor entry from list */
685 cohd_fin
= coh901318_first_active_get(cohc
);
687 if (cohd_fin
== NULL
)
690 /* locate callback to client */
691 callback
= cohd_fin
->desc
.callback
;
692 callback_param
= cohd_fin
->desc
.callback_param
;
694 /* sign this job as completed on the channel */
695 cohc
->completed
= cohd_fin
->desc
.cookie
;
697 /* release the lli allocation and remove the descriptor */
698 coh901318_lli_free(&cohc
->base
->pool
, &cohd_fin
->lli
);
700 /* return desc to free-list */
701 coh901318_desc_remove(cohd_fin
);
702 coh901318_desc_free(cohc
, cohd_fin
);
704 spin_unlock_irqrestore(&cohc
->lock
, flags
);
706 /* Call the callback when we're done */
708 callback(callback_param
);
710 spin_lock_irqsave(&cohc
->lock
, flags
);
713 * If another interrupt fired while the tasklet was scheduling,
714 * we don't get called twice, so we have this number of active
715 * counter that keep track of the number of IRQs expected to
716 * be handled for this channel. If there happen to be more than
717 * one IRQ to be ack:ed, we simply schedule this tasklet again.
719 cohc
->nbr_active_done
--;
720 if (cohc
->nbr_active_done
) {
721 dev_dbg(COHC_2_DEV(cohc
), "scheduling tasklet again, new IRQs "
722 "came in while we were scheduling this tasklet\n");
723 if (cohc_chan_conf(cohc
)->priority_high
)
724 tasklet_hi_schedule(&cohc
->tasklet
);
726 tasklet_schedule(&cohc
->tasklet
);
729 spin_unlock_irqrestore(&cohc
->lock
, flags
);
734 spin_unlock_irqrestore(&cohc
->lock
, flags
);
735 dev_err(COHC_2_DEV(cohc
), "[%s] No active dma desc\n", __func__
);
739 /* called from interrupt context */
740 static void dma_tc_handle(struct coh901318_chan
*cohc
)
743 * If the channel is not allocated, then we shouldn't have
744 * any TC interrupts on it.
746 if (!cohc
->allocated
) {
747 dev_err(COHC_2_DEV(cohc
), "spurious interrupt from "
748 "unallocated channel\n");
752 spin_lock(&cohc
->lock
);
755 * When we reach this point, at least one queue item
756 * should have been moved over from cohc->queue to
757 * cohc->active and run to completion, that is why we're
758 * getting a terminal count interrupt is it not?
759 * If you get this BUG() the most probable cause is that
760 * the individual nodes in the lli chain have IRQ enabled,
761 * so check your platform config for lli chain ctrl.
763 BUG_ON(list_empty(&cohc
->active
));
765 cohc
->nbr_active_done
++;
768 * This attempt to take a job from cohc->queue, put it
769 * into cohc->active and start it.
771 if (coh901318_queue_start(cohc
) == NULL
)
774 spin_unlock(&cohc
->lock
);
777 * This tasklet will remove items from cohc->active
778 * and thus terminates them.
780 if (cohc_chan_conf(cohc
)->priority_high
)
781 tasklet_hi_schedule(&cohc
->tasklet
);
783 tasklet_schedule(&cohc
->tasklet
);
787 static irqreturn_t
dma_irq_handler(int irq
, void *dev_id
)
793 struct coh901318_base
*base
= dev_id
;
794 struct coh901318_chan
*cohc
;
795 void __iomem
*virtbase
= base
->virtbase
;
797 status1
= readl(virtbase
+ COH901318_INT_STATUS1
);
798 status2
= readl(virtbase
+ COH901318_INT_STATUS2
);
800 if (unlikely(status1
== 0 && status2
== 0)) {
801 dev_warn(base
->dev
, "spurious DMA IRQ from no channel!\n");
805 /* TODO: consider handle IRQ in tasklet here to
806 * minimize interrupt latency */
808 /* Check the first 32 DMA channels for IRQ */
810 /* Find first bit set, return as a number. */
811 i
= ffs(status1
) - 1;
814 cohc
= &base
->chans
[ch
];
815 spin_lock(&cohc
->lock
);
817 /* Mask off this bit */
818 status1
&= ~(1 << i
);
819 /* Check the individual channel bits */
820 if (test_bit(i
, virtbase
+ COH901318_BE_INT_STATUS1
)) {
821 dev_crit(COHC_2_DEV(cohc
),
822 "DMA bus error on channel %d!\n", ch
);
824 /* Clear BE interrupt */
825 __set_bit(i
, virtbase
+ COH901318_BE_INT_CLEAR1
);
827 /* Caused by TC, really? */
828 if (unlikely(!test_bit(i
, virtbase
+
829 COH901318_TC_INT_STATUS1
))) {
830 dev_warn(COHC_2_DEV(cohc
),
831 "ignoring interrupt not caused by terminal count on channel %d\n", ch
);
832 /* Clear TC interrupt */
834 __set_bit(i
, virtbase
+ COH901318_TC_INT_CLEAR1
);
836 /* Enable powersave if transfer has finished */
837 if (!(readl(virtbase
+ COH901318_CX_STAT
+
838 COH901318_CX_STAT_SPACING
*ch
) &
839 COH901318_CX_STAT_ENABLED
)) {
840 enable_powersave(cohc
);
843 /* Must clear TC interrupt before calling
845 * in case tc_handle initate a new dma job
847 __set_bit(i
, virtbase
+ COH901318_TC_INT_CLEAR1
);
852 spin_unlock(&cohc
->lock
);
855 /* Check the remaining 32 DMA channels for IRQ */
857 /* Find first bit set, return as a number. */
858 i
= ffs(status2
) - 1;
860 cohc
= &base
->chans
[ch
];
861 spin_lock(&cohc
->lock
);
863 /* Mask off this bit */
864 status2
&= ~(1 << i
);
865 /* Check the individual channel bits */
866 if (test_bit(i
, virtbase
+ COH901318_BE_INT_STATUS2
)) {
867 dev_crit(COHC_2_DEV(cohc
),
868 "DMA bus error on channel %d!\n", ch
);
869 /* Clear BE interrupt */
871 __set_bit(i
, virtbase
+ COH901318_BE_INT_CLEAR2
);
873 /* Caused by TC, really? */
874 if (unlikely(!test_bit(i
, virtbase
+
875 COH901318_TC_INT_STATUS2
))) {
876 dev_warn(COHC_2_DEV(cohc
),
877 "ignoring interrupt not caused by terminal count on channel %d\n", ch
);
878 /* Clear TC interrupt */
879 __set_bit(i
, virtbase
+ COH901318_TC_INT_CLEAR2
);
882 /* Enable powersave if transfer has finished */
883 if (!(readl(virtbase
+ COH901318_CX_STAT
+
884 COH901318_CX_STAT_SPACING
*ch
) &
885 COH901318_CX_STAT_ENABLED
)) {
886 enable_powersave(cohc
);
888 /* Must clear TC interrupt before calling
890 * in case tc_handle initate a new dma job
892 __set_bit(i
, virtbase
+ COH901318_TC_INT_CLEAR2
);
897 spin_unlock(&cohc
->lock
);
903 static int coh901318_alloc_chan_resources(struct dma_chan
*chan
)
905 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
908 dev_vdbg(COHC_2_DEV(cohc
), "[%s] DMA channel %d\n",
911 if (chan
->client_count
> 1)
914 spin_lock_irqsave(&cohc
->lock
, flags
);
916 coh901318_config(cohc
, NULL
);
919 cohc
->completed
= chan
->cookie
= 1;
921 spin_unlock_irqrestore(&cohc
->lock
, flags
);
927 coh901318_free_chan_resources(struct dma_chan
*chan
)
929 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
930 int channel
= cohc
->id
;
933 spin_lock_irqsave(&cohc
->lock
, flags
);
936 writel(0x00000000U
, cohc
->base
->virtbase
+ COH901318_CX_CFG
+
937 COH901318_CX_CFG_SPACING
*channel
);
938 writel(0x00000000U
, cohc
->base
->virtbase
+ COH901318_CX_CTRL
+
939 COH901318_CX_CTRL_SPACING
*channel
);
943 spin_unlock_irqrestore(&cohc
->lock
, flags
);
945 chan
->device
->device_control(chan
, DMA_TERMINATE_ALL
, 0);
950 coh901318_tx_submit(struct dma_async_tx_descriptor
*tx
)
952 struct coh901318_desc
*cohd
= container_of(tx
, struct coh901318_desc
,
954 struct coh901318_chan
*cohc
= to_coh901318_chan(tx
->chan
);
957 spin_lock_irqsave(&cohc
->lock
, flags
);
959 tx
->cookie
= coh901318_assign_cookie(cohc
, cohd
);
961 coh901318_desc_queue(cohc
, cohd
);
963 spin_unlock_irqrestore(&cohc
->lock
, flags
);
968 static struct dma_async_tx_descriptor
*
969 coh901318_prep_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
970 size_t size
, unsigned long flags
)
972 struct coh901318_lli
*lli
;
973 struct coh901318_desc
*cohd
;
975 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
977 u32 ctrl_last
= cohc_chan_param(cohc
)->ctrl_lli_last
;
980 spin_lock_irqsave(&cohc
->lock
, flg
);
982 dev_vdbg(COHC_2_DEV(cohc
),
983 "[%s] channel %d src 0x%x dest 0x%x size %d\n",
984 __func__
, cohc
->id
, src
, dest
, size
);
986 if (flags
& DMA_PREP_INTERRUPT
)
987 /* Trigger interrupt after last lli */
988 ctrl_last
|= COH901318_CX_CTRL_TC_IRQ_ENABLE
;
990 lli_len
= size
>> MAX_DMA_PACKET_SIZE_SHIFT
;
991 if ((lli_len
<< MAX_DMA_PACKET_SIZE_SHIFT
) < size
)
994 lli
= coh901318_lli_alloc(&cohc
->base
->pool
, lli_len
);
999 ret
= coh901318_lli_fill_memcpy(
1000 &cohc
->base
->pool
, lli
, src
, size
, dest
,
1001 cohc_chan_param(cohc
)->ctrl_lli_chained
,
1006 COH_DBG(coh901318_list_print(cohc
, lli
));
1008 /* Pick a descriptor to handle this transfer */
1009 cohd
= coh901318_desc_get(cohc
);
1011 cohd
->flags
= flags
;
1012 cohd
->desc
.tx_submit
= coh901318_tx_submit
;
1014 spin_unlock_irqrestore(&cohc
->lock
, flg
);
1018 spin_unlock_irqrestore(&cohc
->lock
, flg
);
1022 static struct dma_async_tx_descriptor
*
1023 coh901318_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
1024 unsigned int sg_len
, enum dma_data_direction direction
,
1025 unsigned long flags
)
1027 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
1028 struct coh901318_lli
*lli
;
1029 struct coh901318_desc
*cohd
;
1030 const struct coh901318_params
*params
;
1031 struct scatterlist
*sg
;
1035 u32 ctrl_chained
= cohc_chan_param(cohc
)->ctrl_lli_chained
;
1036 u32 ctrl
= cohc_chan_param(cohc
)->ctrl_lli
;
1037 u32 ctrl_last
= cohc_chan_param(cohc
)->ctrl_lli_last
;
1044 if (sgl
->length
== 0)
1047 spin_lock_irqsave(&cohc
->lock
, flg
);
1049 dev_vdbg(COHC_2_DEV(cohc
), "[%s] sg_len %d dir %d\n",
1050 __func__
, sg_len
, direction
);
1052 if (flags
& DMA_PREP_INTERRUPT
)
1053 /* Trigger interrupt after last lli */
1054 ctrl_last
|= COH901318_CX_CTRL_TC_IRQ_ENABLE
;
1056 params
= cohc_chan_param(cohc
);
1057 config
= params
->config
;
1059 if (direction
== DMA_TO_DEVICE
) {
1060 u32 tx_flags
= COH901318_CX_CTRL_PRDD_SOURCE
|
1061 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE
;
1063 config
|= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY
;
1064 ctrl_chained
|= tx_flags
;
1065 ctrl_last
|= tx_flags
;
1067 } else if (direction
== DMA_FROM_DEVICE
) {
1068 u32 rx_flags
= COH901318_CX_CTRL_PRDD_DEST
|
1069 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE
;
1071 config
|= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY
;
1072 ctrl_chained
|= rx_flags
;
1073 ctrl_last
|= rx_flags
;
1078 coh901318_set_conf(cohc
, config
);
1080 /* The dma only supports transmitting packages up to
1081 * MAX_DMA_PACKET_SIZE. Calculate to total number of
1082 * dma elemts required to send the entire sg list
1084 for_each_sg(sgl
, sg
, sg_len
, i
) {
1085 unsigned int factor
;
1086 size
= sg_dma_len(sg
);
1088 if (size
<= MAX_DMA_PACKET_SIZE
) {
1093 factor
= size
>> MAX_DMA_PACKET_SIZE_SHIFT
;
1094 if ((factor
<< MAX_DMA_PACKET_SIZE_SHIFT
) < size
)
1100 pr_debug("Allocate %d lli:s for this transfer\n", len
);
1101 lli
= coh901318_lli_alloc(&cohc
->base
->pool
, len
);
1106 /* initiate allocated lli list */
1107 ret
= coh901318_lli_fill_sg(&cohc
->base
->pool
, lli
, sgl
, sg_len
,
1108 cohc_dev_addr(cohc
),
1112 direction
, COH901318_CX_CTRL_TC_IRQ_ENABLE
);
1116 COH_DBG(coh901318_list_print(cohc
, lli
));
1118 /* Pick a descriptor to handle this transfer */
1119 cohd
= coh901318_desc_get(cohc
);
1120 cohd
->dir
= direction
;
1121 cohd
->flags
= flags
;
1122 cohd
->desc
.tx_submit
= coh901318_tx_submit
;
1125 spin_unlock_irqrestore(&cohc
->lock
, flg
);
1131 spin_unlock_irqrestore(&cohc
->lock
, flg
);
1136 static enum dma_status
1137 coh901318_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
1138 struct dma_tx_state
*txstate
)
1140 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
1141 dma_cookie_t last_used
;
1142 dma_cookie_t last_complete
;
1145 last_complete
= cohc
->completed
;
1146 last_used
= chan
->cookie
;
1148 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
1150 dma_set_tx_state(txstate
, last_complete
, last_used
,
1151 coh901318_get_bytes_left(chan
));
1152 if (ret
== DMA_IN_PROGRESS
&& cohc
->stopped
)
1159 coh901318_issue_pending(struct dma_chan
*chan
)
1161 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
1162 unsigned long flags
;
1164 spin_lock_irqsave(&cohc
->lock
, flags
);
1167 * Busy means that pending jobs are already being processed,
1168 * and then there is no point in starting the queue: the
1169 * terminal count interrupt on the channel will take the next
1170 * job on the queue and execute it anyway.
1173 coh901318_queue_start(cohc
);
1175 spin_unlock_irqrestore(&cohc
->lock
, flags
);
1179 coh901318_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
1182 unsigned long flags
;
1183 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
1184 struct coh901318_desc
*cohd
;
1185 void __iomem
*virtbase
= cohc
->base
->virtbase
;
1187 if (cmd
== DMA_PAUSE
) {
1188 coh901318_pause(chan
);
1192 if (cmd
== DMA_RESUME
) {
1193 coh901318_resume(chan
);
1197 if (cmd
!= DMA_TERMINATE_ALL
)
1200 /* The remainder of this function terminates the transfer */
1201 coh901318_pause(chan
);
1202 spin_lock_irqsave(&cohc
->lock
, flags
);
1204 /* Clear any pending BE or TC interrupt */
1205 if (cohc
->id
< 32) {
1206 writel(1 << cohc
->id
, virtbase
+ COH901318_BE_INT_CLEAR1
);
1207 writel(1 << cohc
->id
, virtbase
+ COH901318_TC_INT_CLEAR1
);
1209 writel(1 << (cohc
->id
- 32), virtbase
+
1210 COH901318_BE_INT_CLEAR2
);
1211 writel(1 << (cohc
->id
- 32), virtbase
+
1212 COH901318_TC_INT_CLEAR2
);
1215 enable_powersave(cohc
);
1217 while ((cohd
= coh901318_first_active_get(cohc
))) {
1218 /* release the lli allocation*/
1219 coh901318_lli_free(&cohc
->base
->pool
, &cohd
->lli
);
1221 /* return desc to free-list */
1222 coh901318_desc_remove(cohd
);
1223 coh901318_desc_free(cohc
, cohd
);
1226 while ((cohd
= coh901318_first_queued(cohc
))) {
1227 /* release the lli allocation*/
1228 coh901318_lli_free(&cohc
->base
->pool
, &cohd
->lli
);
1230 /* return desc to free-list */
1231 coh901318_desc_remove(cohd
);
1232 coh901318_desc_free(cohc
, cohd
);
1236 cohc
->nbr_active_done
= 0;
1239 spin_unlock_irqrestore(&cohc
->lock
, flags
);
1243 void coh901318_base_init(struct dma_device
*dma
, const int *pick_chans
,
1244 struct coh901318_base
*base
)
1248 struct coh901318_chan
*cohc
;
1250 INIT_LIST_HEAD(&dma
->channels
);
1252 for (chans_i
= 0; pick_chans
[chans_i
] != -1; chans_i
+= 2) {
1253 for (i
= pick_chans
[chans_i
]; i
<= pick_chans
[chans_i
+1]; i
++) {
1254 cohc
= &base
->chans
[i
];
1257 cohc
->chan
.device
= dma
;
1260 /* TODO: do we really need this lock if only one
1261 * client is connected to each channel?
1264 spin_lock_init(&cohc
->lock
);
1266 cohc
->nbr_active_done
= 0;
1268 INIT_LIST_HEAD(&cohc
->free
);
1269 INIT_LIST_HEAD(&cohc
->active
);
1270 INIT_LIST_HEAD(&cohc
->queue
);
1272 tasklet_init(&cohc
->tasklet
, dma_tasklet
,
1273 (unsigned long) cohc
);
1275 list_add_tail(&cohc
->chan
.device_node
,
1281 static int __init
coh901318_probe(struct platform_device
*pdev
)
1284 struct coh901318_platform
*pdata
;
1285 struct coh901318_base
*base
;
1287 struct resource
*io
;
1289 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1291 goto err_get_resource
;
1293 /* Map DMA controller registers to virtual memory */
1294 if (request_mem_region(io
->start
,
1296 pdev
->dev
.driver
->name
) == NULL
) {
1298 goto err_request_mem
;
1301 pdata
= pdev
->dev
.platform_data
;
1303 goto err_no_platformdata
;
1305 base
= kmalloc(ALIGN(sizeof(struct coh901318_base
), 4) +
1306 pdata
->max_channels
*
1307 sizeof(struct coh901318_chan
),
1310 goto err_alloc_coh_dma_channels
;
1312 base
->chans
= ((void *)base
) + ALIGN(sizeof(struct coh901318_base
), 4);
1314 base
->virtbase
= ioremap(io
->start
, resource_size(io
));
1315 if (!base
->virtbase
) {
1317 goto err_no_ioremap
;
1320 base
->dev
= &pdev
->dev
;
1321 base
->platform
= pdata
;
1322 spin_lock_init(&base
->pm
.lock
);
1323 base
->pm
.started_channels
= 0;
1325 COH901318_DEBUGFS_ASSIGN(debugfs_dma_base
, base
);
1327 platform_set_drvdata(pdev
, base
);
1329 irq
= platform_get_irq(pdev
, 0);
1333 err
= request_irq(irq
, dma_irq_handler
, IRQF_DISABLED
,
1336 dev_crit(&pdev
->dev
,
1337 "Cannot allocate IRQ for DMA controller!\n");
1338 goto err_request_irq
;
1341 err
= coh901318_pool_create(&base
->pool
, &pdev
->dev
,
1342 sizeof(struct coh901318_lli
),
1345 goto err_pool_create
;
1347 /* init channels for device transfers */
1348 coh901318_base_init(&base
->dma_slave
, base
->platform
->chans_slave
,
1351 dma_cap_zero(base
->dma_slave
.cap_mask
);
1352 dma_cap_set(DMA_SLAVE
, base
->dma_slave
.cap_mask
);
1354 base
->dma_slave
.device_alloc_chan_resources
= coh901318_alloc_chan_resources
;
1355 base
->dma_slave
.device_free_chan_resources
= coh901318_free_chan_resources
;
1356 base
->dma_slave
.device_prep_slave_sg
= coh901318_prep_slave_sg
;
1357 base
->dma_slave
.device_tx_status
= coh901318_tx_status
;
1358 base
->dma_slave
.device_issue_pending
= coh901318_issue_pending
;
1359 base
->dma_slave
.device_control
= coh901318_control
;
1360 base
->dma_slave
.dev
= &pdev
->dev
;
1362 err
= dma_async_device_register(&base
->dma_slave
);
1365 goto err_register_slave
;
1367 /* init channels for memcpy */
1368 coh901318_base_init(&base
->dma_memcpy
, base
->platform
->chans_memcpy
,
1371 dma_cap_zero(base
->dma_memcpy
.cap_mask
);
1372 dma_cap_set(DMA_MEMCPY
, base
->dma_memcpy
.cap_mask
);
1374 base
->dma_memcpy
.device_alloc_chan_resources
= coh901318_alloc_chan_resources
;
1375 base
->dma_memcpy
.device_free_chan_resources
= coh901318_free_chan_resources
;
1376 base
->dma_memcpy
.device_prep_dma_memcpy
= coh901318_prep_memcpy
;
1377 base
->dma_memcpy
.device_tx_status
= coh901318_tx_status
;
1378 base
->dma_memcpy
.device_issue_pending
= coh901318_issue_pending
;
1379 base
->dma_memcpy
.device_control
= coh901318_control
;
1380 base
->dma_memcpy
.dev
= &pdev
->dev
;
1382 * This controller can only access address at even 32bit boundaries,
1385 base
->dma_memcpy
.copy_align
= 2;
1386 err
= dma_async_device_register(&base
->dma_memcpy
);
1389 goto err_register_memcpy
;
1391 dev_info(&pdev
->dev
, "Initialized COH901318 DMA on virtual base 0x%08x\n",
1392 (u32
) base
->virtbase
);
1396 err_register_memcpy
:
1397 dma_async_device_unregister(&base
->dma_slave
);
1399 coh901318_pool_destroy(&base
->pool
);
1401 free_irq(platform_get_irq(pdev
, 0), base
);
1404 iounmap(base
->virtbase
);
1407 err_alloc_coh_dma_channels
:
1408 err_no_platformdata
:
1409 release_mem_region(pdev
->resource
->start
,
1410 resource_size(pdev
->resource
));
1416 static int __exit
coh901318_remove(struct platform_device
*pdev
)
1418 struct coh901318_base
*base
= platform_get_drvdata(pdev
);
1420 dma_async_device_unregister(&base
->dma_memcpy
);
1421 dma_async_device_unregister(&base
->dma_slave
);
1422 coh901318_pool_destroy(&base
->pool
);
1423 free_irq(platform_get_irq(pdev
, 0), base
);
1424 iounmap(base
->virtbase
);
1426 release_mem_region(pdev
->resource
->start
,
1427 resource_size(pdev
->resource
));
1432 static struct platform_driver coh901318_driver
= {
1433 .remove
= __exit_p(coh901318_remove
),
1435 .name
= "coh901318",
1439 int __init
coh901318_init(void)
1441 return platform_driver_probe(&coh901318_driver
, coh901318_probe
);
1443 subsys_initcall(coh901318_init
);
1445 void __exit
coh901318_exit(void)
1447 platform_driver_unregister(&coh901318_driver
);
1449 module_exit(coh901318_exit
);
1451 MODULE_LICENSE("GPL");
1452 MODULE_AUTHOR("Per Friden");