1 /* linux/arch/arm/plat-s3c64xx/dma.c
3 * Copyright 2009 Openmoko, Inc.
4 * Copyright 2009 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/interrupt.h>
18 #include <linux/dmapool.h>
19 #include <linux/device.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/clk.h>
24 #include <linux/err.h>
29 #include <mach/irqs.h>
31 #include <mach/regs-sys.h>
33 #include <asm/hardware/pl080.h>
35 /* dma channel state information */
41 struct s3c2410_dma_chan
*channels
;
45 /* pool to provide LLI buffers */
46 static struct dma_pool
*dma_pool
;
48 /* Debug configuration and code */
50 static unsigned char debug_show_buffs
= 0;
52 static void dbg_showchan(struct s3c2410_dma_chan
*chan
)
54 pr_debug("DMA%d: %08x->%08x L %08x C %08x,%08x S %08x\n",
56 readl(chan
->regs
+ PL080_CH_SRC_ADDR
),
57 readl(chan
->regs
+ PL080_CH_DST_ADDR
),
58 readl(chan
->regs
+ PL080_CH_LLI
),
59 readl(chan
->regs
+ PL080_CH_CONTROL
),
60 readl(chan
->regs
+ PL080S_CH_CONTROL2
),
61 readl(chan
->regs
+ PL080S_CH_CONFIG
));
64 static void show_lli(struct pl080s_lli
*lli
)
66 pr_debug("LLI[%p] %08x->%08x, NL %08x C %08x,%08x\n",
67 lli
, lli
->src_addr
, lli
->dst_addr
, lli
->next_lli
,
68 lli
->control0
, lli
->control1
);
71 static void dbg_showbuffs(struct s3c2410_dma_chan
*chan
)
73 struct s3c64xx_dma_buff
*ptr
;
74 struct s3c64xx_dma_buff
*end
;
76 pr_debug("DMA%d: buffs next %p, curr %p, end %p\n",
77 chan
->number
, chan
->next
, chan
->curr
, chan
->end
);
82 if (debug_show_buffs
) {
83 for (; ptr
!= NULL
; ptr
= ptr
->next
) {
84 pr_debug("DMA%d: %08x ",
85 chan
->number
, ptr
->lli_dma
);
93 static struct s3c2410_dma_chan
*s3c64xx_dma_map_channel(unsigned int channel
)
95 struct s3c2410_dma_chan
*chan
;
96 unsigned int start
, offs
;
100 if (channel
>= DMACH_PCM1_TX
)
103 for (offs
= 0; offs
< 8; offs
++) {
104 chan
= &s3c2410_chans
[start
+ offs
];
112 s3c_dma_chan_map
[channel
] = chan
;
116 int s3c2410_dma_config(enum dma_ch channel
, int xferunit
)
118 struct s3c2410_dma_chan
*chan
= s3c_dma_lookup_channel(channel
);
134 printk(KERN_ERR
"%s: illegal width %d\n", __func__
, xferunit
);
140 EXPORT_SYMBOL(s3c2410_dma_config
);
142 static void s3c64xx_dma_fill_lli(struct s3c2410_dma_chan
*chan
,
143 struct pl080s_lli
*lli
,
144 dma_addr_t data
, int size
)
147 u32 control0
, control1
;
149 switch (chan
->source
) {
150 case DMA_FROM_DEVICE
:
151 src
= chan
->dev_addr
;
153 control0
= PL080_CONTROL_SRC_AHB2
;
154 control0
|= PL080_CONTROL_DST_INCR
;
159 dst
= chan
->dev_addr
;
160 control0
= PL080_CONTROL_DST_AHB2
;
161 control0
|= PL080_CONTROL_SRC_INCR
;
167 /* note, we do not currently setup any of the burst controls */
169 control1
= size
>> chan
->hw_width
; /* size in no of xfers */
170 control0
|= PL080_CONTROL_PROT_SYS
; /* always in priv. mode */
171 control0
|= PL080_CONTROL_TC_IRQ_EN
; /* always fire IRQ */
172 control0
|= (u32
)chan
->hw_width
<< PL080_CONTROL_DWIDTH_SHIFT
;
173 control0
|= (u32
)chan
->hw_width
<< PL080_CONTROL_SWIDTH_SHIFT
;
178 lli
->control0
= control0
;
179 lli
->control1
= control1
;
182 static void s3c64xx_lli_to_regs(struct s3c2410_dma_chan
*chan
,
183 struct pl080s_lli
*lli
)
185 void __iomem
*regs
= chan
->regs
;
187 pr_debug("%s: LLI %p => regs\n", __func__
, lli
);
190 writel(lli
->src_addr
, regs
+ PL080_CH_SRC_ADDR
);
191 writel(lli
->dst_addr
, regs
+ PL080_CH_DST_ADDR
);
192 writel(lli
->next_lli
, regs
+ PL080_CH_LLI
);
193 writel(lli
->control0
, regs
+ PL080_CH_CONTROL
);
194 writel(lli
->control1
, regs
+ PL080S_CH_CONTROL2
);
197 static int s3c64xx_dma_start(struct s3c2410_dma_chan
*chan
)
199 struct s3c64xx_dmac
*dmac
= chan
->dmac
;
205 pr_debug("%s: clearing interrupts\n", __func__
);
207 /* clear interrupts */
208 writel(bit
, dmac
->regs
+ PL080_TC_CLEAR
);
209 writel(bit
, dmac
->regs
+ PL080_ERR_CLEAR
);
211 pr_debug("%s: starting channel\n", __func__
);
213 config
= readl(chan
->regs
+ PL080S_CH_CONFIG
);
214 config
|= PL080_CONFIG_ENABLE
;
215 config
&= ~PL080_CONFIG_HALT
;
217 pr_debug("%s: writing config %08x\n", __func__
, config
);
218 writel(config
, chan
->regs
+ PL080S_CH_CONFIG
);
223 static int s3c64xx_dma_stop(struct s3c2410_dma_chan
*chan
)
228 pr_debug("%s: stopping channel\n", __func__
);
232 config
= readl(chan
->regs
+ PL080S_CH_CONFIG
);
233 config
|= PL080_CONFIG_HALT
;
234 writel(config
, chan
->regs
+ PL080S_CH_CONFIG
);
238 config
= readl(chan
->regs
+ PL080S_CH_CONFIG
);
239 pr_debug("%s: %d - config %08x\n", __func__
, timeout
, config
);
240 if (config
& PL080_CONFIG_ACTIVE
)
244 } while (--timeout
> 0);
246 if (config
& PL080_CONFIG_ACTIVE
) {
247 printk(KERN_ERR
"%s: channel still active\n", __func__
);
251 config
= readl(chan
->regs
+ PL080S_CH_CONFIG
);
252 config
&= ~PL080_CONFIG_ENABLE
;
253 writel(config
, chan
->regs
+ PL080S_CH_CONFIG
);
258 static inline void s3c64xx_dma_bufffdone(struct s3c2410_dma_chan
*chan
,
259 struct s3c64xx_dma_buff
*buf
,
260 enum s3c2410_dma_buffresult result
)
262 if (chan
->callback_fn
!= NULL
)
263 (chan
->callback_fn
)(chan
, buf
->pw
, 0, result
);
266 static void s3c64xx_dma_freebuff(struct s3c64xx_dma_buff
*buff
)
268 dma_pool_free(dma_pool
, buff
->lli
, buff
->lli_dma
);
272 static int s3c64xx_dma_flush(struct s3c2410_dma_chan
*chan
)
274 struct s3c64xx_dma_buff
*buff
, *next
;
279 pr_debug("%s: flushing channel\n", __func__
);
281 config
= readl(chan
->regs
+ PL080S_CH_CONFIG
);
282 config
&= ~PL080_CONFIG_ENABLE
;
283 writel(config
, chan
->regs
+ PL080S_CH_CONFIG
);
285 /* dump all the buffers associated with this channel */
287 for (buff
= chan
->curr
; buff
!= NULL
; buff
= next
) {
289 pr_debug("%s: buff %p (next %p)\n", __func__
, buff
, buff
->next
);
291 s3c64xx_dma_bufffdone(chan
, buff
, S3C2410_RES_ABORT
);
292 s3c64xx_dma_freebuff(buff
);
295 chan
->curr
= chan
->next
= chan
->end
= NULL
;
300 int s3c2410_dma_ctrl(enum dma_ch channel
, enum s3c2410_chan_op op
)
302 struct s3c2410_dma_chan
*chan
= s3c_dma_lookup_channel(channel
);
309 case S3C2410_DMAOP_START
:
310 return s3c64xx_dma_start(chan
);
312 case S3C2410_DMAOP_STOP
:
313 return s3c64xx_dma_stop(chan
);
315 case S3C2410_DMAOP_FLUSH
:
316 return s3c64xx_dma_flush(chan
);
318 /* believe PAUSE/RESUME are no-ops */
319 case S3C2410_DMAOP_PAUSE
:
320 case S3C2410_DMAOP_RESUME
:
321 case S3C2410_DMAOP_STARTED
:
322 case S3C2410_DMAOP_TIMEOUT
:
328 EXPORT_SYMBOL(s3c2410_dma_ctrl
);
334 int s3c2410_dma_enqueue(enum dma_ch channel
, void *id
,
335 dma_addr_t data
, int size
)
337 struct s3c2410_dma_chan
*chan
= s3c_dma_lookup_channel(channel
);
338 struct s3c64xx_dma_buff
*next
;
339 struct s3c64xx_dma_buff
*buff
;
340 struct pl080s_lli
*lli
;
348 buff
= kzalloc(sizeof(struct s3c64xx_dma_buff
), GFP_ATOMIC
);
350 printk(KERN_ERR
"%s: no memory for buffer\n", __func__
);
354 lli
= dma_pool_alloc(dma_pool
, GFP_ATOMIC
, &buff
->lli_dma
);
356 printk(KERN_ERR
"%s: no memory for lli\n", __func__
);
361 pr_debug("%s: buff %p, dp %08x lli (%p, %08x) %d\n",
362 __func__
, buff
, data
, lli
, (u32
)buff
->lli_dma
, size
);
367 s3c64xx_dma_fill_lli(chan
, lli
, data
, size
);
369 local_irq_save(flags
);
371 if ((next
= chan
->next
) != NULL
) {
372 struct s3c64xx_dma_buff
*end
= chan
->end
;
373 struct pl080s_lli
*endlli
= end
->lli
;
375 pr_debug("enquing onto channel\n");
378 endlli
->next_lli
= buff
->lli_dma
;
380 if (chan
->flags
& S3C2410_DMAF_CIRCULAR
) {
381 struct s3c64xx_dma_buff
*curr
= chan
->curr
;
382 lli
->next_lli
= curr
->lli_dma
;
385 if (next
== chan
->curr
) {
386 writel(buff
->lli_dma
, chan
->regs
+ PL080_CH_LLI
);
393 pr_debug("enquing onto empty channel\n");
399 s3c64xx_lli_to_regs(chan
, lli
);
402 local_irq_restore(flags
);
415 EXPORT_SYMBOL(s3c2410_dma_enqueue
);
418 int s3c2410_dma_devconfig(enum dma_ch channel
,
419 enum dma_data_direction source
,
420 unsigned long devaddr
)
422 struct s3c2410_dma_chan
*chan
= s3c_dma_lookup_channel(channel
);
426 pr_debug("%s: channel %d, source %d, dev %08lx, chan %p\n",
427 __func__
, channel
, source
, devaddr
, chan
);
433 peripheral
= (chan
->peripheral
& 0xf);
434 chan
->source
= source
;
435 chan
->dev_addr
= devaddr
;
437 pr_debug("%s: peripheral %d\n", __func__
, peripheral
);
440 case DMA_FROM_DEVICE
:
441 config
= 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT
;
442 config
|= peripheral
<< PL080_CONFIG_SRC_SEL_SHIFT
;
445 config
= 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT
;
446 config
|= peripheral
<< PL080_CONFIG_DST_SEL_SHIFT
;
449 printk(KERN_ERR
"%s: bad source\n", __func__
);
453 /* allow TC and ERR interrupts */
454 config
|= PL080_CONFIG_TC_IRQ_MASK
;
455 config
|= PL080_CONFIG_ERR_IRQ_MASK
;
457 pr_debug("%s: config %08x\n", __func__
, config
);
459 writel(config
, chan
->regs
+ PL080S_CH_CONFIG
);
463 EXPORT_SYMBOL(s3c2410_dma_devconfig
);
466 int s3c2410_dma_getposition(enum dma_ch channel
,
467 dma_addr_t
*src
, dma_addr_t
*dst
)
469 struct s3c2410_dma_chan
*chan
= s3c_dma_lookup_channel(channel
);
476 *src
= readl(chan
->regs
+ PL080_CH_SRC_ADDR
);
479 *dst
= readl(chan
->regs
+ PL080_CH_DST_ADDR
);
483 EXPORT_SYMBOL(s3c2410_dma_getposition
);
485 /* s3c2410_request_dma
487 * get control of an dma channel
490 int s3c2410_dma_request(enum dma_ch channel
,
491 struct s3c2410_dma_client
*client
,
494 struct s3c2410_dma_chan
*chan
;
497 pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n",
498 channel
, client
->name
, dev
);
500 local_irq_save(flags
);
502 chan
= s3c64xx_dma_map_channel(channel
);
504 local_irq_restore(flags
);
510 chan
->client
= client
;
512 chan
->peripheral
= channel
;
514 local_irq_restore(flags
);
518 pr_debug("%s: channel initialised, %p\n", __func__
, chan
);
520 return chan
->number
| DMACH_LOW_LEVEL
;
523 EXPORT_SYMBOL(s3c2410_dma_request
);
527 * release the given channel back to the system, will stop and flush
528 * any outstanding transfers, and ensure the channel is ready for the
531 * Note, although a warning is currently printed if the freeing client
532 * info is not the same as the registrant's client info, the free is still
533 * allowed to go through.
536 int s3c2410_dma_free(enum dma_ch channel
, struct s3c2410_dma_client
*client
)
538 struct s3c2410_dma_chan
*chan
= s3c_dma_lookup_channel(channel
);
544 local_irq_save(flags
);
546 if (chan
->client
!= client
) {
547 printk(KERN_WARNING
"dma%d: possible free from different client (channel %p, passed %p)\n",
548 channel
, chan
->client
, client
);
551 /* sort out stopping and freeing the channel */
557 if (!(channel
& DMACH_LOW_LEVEL
))
558 s3c_dma_chan_map
[channel
] = NULL
;
560 local_irq_restore(flags
);
565 EXPORT_SYMBOL(s3c2410_dma_free
);
567 static irqreturn_t
s3c64xx_dma_irq(int irq
, void *pw
)
569 struct s3c64xx_dmac
*dmac
= pw
;
570 struct s3c2410_dma_chan
*chan
;
571 enum s3c2410_dma_buffresult res
;
576 tcstat
= readl(dmac
->regs
+ PL080_TC_STATUS
);
577 errstat
= readl(dmac
->regs
+ PL080_ERR_STATUS
);
579 for (offs
= 0, bit
= 1; offs
< 8; offs
++, bit
<<= 1) {
580 struct s3c64xx_dma_buff
*buff
;
582 if (!(errstat
& bit
) && !(tcstat
& bit
))
585 chan
= dmac
->channels
+ offs
;
586 res
= S3C2410_RES_ERR
;
589 writel(bit
, dmac
->regs
+ PL080_TC_CLEAR
);
590 res
= S3C2410_RES_OK
;
594 writel(bit
, dmac
->regs
+ PL080_ERR_CLEAR
);
596 /* 'next' points to the buffer that is next to the
597 * currently active buffer.
598 * For CIRCULAR queues, 'next' will be same as 'curr'
599 * when 'end' is the active buffer.
602 while (buff
&& buff
!= chan
->next
603 && buff
->next
!= chan
->next
)
609 if (buff
== chan
->next
)
612 s3c64xx_dma_bufffdone(chan
, buff
, res
);
614 /* Free the node and update curr, if non-circular queue */
615 if (!(chan
->flags
& S3C2410_DMAF_CIRCULAR
)) {
616 chan
->curr
= buff
->next
;
617 s3c64xx_dma_freebuff(buff
);
622 if (chan
->next
== chan
->end
) {
623 chan
->next
= chan
->curr
;
624 if (!(chan
->flags
& S3C2410_DMAF_CIRCULAR
))
627 chan
->next
= buff
->next
;
634 static struct bus_type dma_subsys
= {
635 .name
= "s3c64xx-dma",
636 .dev_name
= "s3c64xx-dma",
639 static int s3c64xx_dma_init1(int chno
, enum dma_ch chbase
,
640 int irq
, unsigned int base
)
642 struct s3c2410_dma_chan
*chptr
= &s3c2410_chans
[chno
];
643 struct s3c64xx_dmac
*dmac
;
646 void __iomem
*regptr
;
649 dmac
= kzalloc(sizeof(struct s3c64xx_dmac
), GFP_KERNEL
);
651 printk(KERN_ERR
"%s: failed to alloc mem\n", __func__
);
655 dmac
->dev
.id
= chno
/ 8;
656 dmac
->dev
.bus
= &dma_subsys
;
658 err
= device_register(&dmac
->dev
);
660 printk(KERN_ERR
"%s: failed to register device\n", __func__
);
664 regs
= ioremap(base
, 0x200);
666 printk(KERN_ERR
"%s: failed to ioremap()\n", __func__
);
671 snprintf(clkname
, sizeof(clkname
), "dma%d", dmac
->dev
.id
);
673 dmac
->clk
= clk_get(NULL
, clkname
);
674 if (IS_ERR(dmac
->clk
)) {
675 printk(KERN_ERR
"%s: failed to get clock %s\n", __func__
, clkname
);
676 err
= PTR_ERR(dmac
->clk
);
680 clk_enable(dmac
->clk
);
683 dmac
->chanbase
= chbase
;
684 dmac
->channels
= chptr
;
686 err
= request_irq(irq
, s3c64xx_dma_irq
, 0, "DMA", dmac
);
688 printk(KERN_ERR
"%s: failed to get irq\n", __func__
);
692 regptr
= regs
+ PL080_Cx_BASE(0);
694 for (ch
= 0; ch
< 8; ch
++, chptr
++) {
695 pr_debug("%s: registering DMA %d (%p)\n",
696 __func__
, chno
+ ch
, regptr
);
698 chptr
->bit
= 1 << ch
;
699 chptr
->number
= chno
+ ch
;
701 chptr
->regs
= regptr
;
702 regptr
+= PL080_Cx_STRIDE
;
705 /* for the moment, permanently enable the controller */
706 writel(PL080_CONFIG_ENABLE
, regs
+ PL080_CONFIG
);
708 printk(KERN_INFO
"PL080: IRQ %d, at %p, channels %d..%d\n",
709 irq
, regs
, chno
, chno
+8);
714 clk_disable(dmac
->clk
);
719 device_unregister(&dmac
->dev
);
725 static int __init
s3c64xx_dma_init(void)
729 printk(KERN_INFO
"%s: Registering DMA channels\n", __func__
);
731 dma_pool
= dma_pool_create("DMA-LLI", NULL
, sizeof(struct pl080s_lli
), 16, 0);
733 printk(KERN_ERR
"%s: failed to create pool\n", __func__
);
737 ret
= subsys_system_register(&dma_subsys
, NULL
);
739 printk(KERN_ERR
"%s: failed to create subsys\n", __func__
);
743 /* Set all DMA configuration to be DMA, not SDMA */
744 writel(0xffffff, S3C64XX_SDMA_SEL
);
746 /* Register standard DMA controllers */
747 s3c64xx_dma_init1(0, DMACH_UART0
, IRQ_DMA0
, 0x75000000);
748 s3c64xx_dma_init1(8, DMACH_PCM1_TX
, IRQ_DMA1
, 0x75100000);
753 arch_initcall(s3c64xx_dma_init
);