powercap: restrict energy meter to root access
[linux/fpc-iii.git] / drivers / dma / bcm2835-dma.c
blobb984d00bc05587240961198f38efb26e86d72d65
1 /*
2 * BCM2835 DMA engine support
4 * This driver only supports cyclic DMA transfers
5 * as needed for the I2S module.
7 * Author: Florian Meier <florian.meier@koalo.de>
8 * Copyright 2013
10 * Based on
11 * OMAP DMAengine support by Russell King
13 * BCM2708 DMA Driver
14 * Copyright (C) 2010 Broadcom
16 * Raspberry Pi PCM I2S ALSA Driver
17 * Copyright (c) by Phil Poole 2013
19 * MARVELL MMP Peripheral DMA Driver
20 * Copyright 2012 Marvell International Ltd.
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2 of the License, or
25 * (at your option) any later version.
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
32 #include <linux/dmaengine.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/dmapool.h>
35 #include <linux/err.h>
36 #include <linux/init.h>
37 #include <linux/interrupt.h>
38 #include <linux/list.h>
39 #include <linux/module.h>
40 #include <linux/platform_device.h>
41 #include <linux/slab.h>
42 #include <linux/io.h>
43 #include <linux/spinlock.h>
44 #include <linux/of.h>
45 #include <linux/of_dma.h>
47 #include "virt-dma.h"
49 #define BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED 14
50 #define BCM2835_DMA_CHAN_NAME_SIZE 8
52 struct bcm2835_dmadev {
53 struct dma_device ddev;
54 spinlock_t lock;
55 void __iomem *base;
56 struct device_dma_parameters dma_parms;
59 struct bcm2835_dma_cb {
60 uint32_t info;
61 uint32_t src;
62 uint32_t dst;
63 uint32_t length;
64 uint32_t stride;
65 uint32_t next;
66 uint32_t pad[2];
69 struct bcm2835_cb_entry {
70 struct bcm2835_dma_cb *cb;
71 dma_addr_t paddr;
74 struct bcm2835_chan {
75 struct virt_dma_chan vc;
76 struct list_head node;
78 struct dma_slave_config cfg;
79 unsigned int dreq;
81 int ch;
82 struct bcm2835_desc *desc;
83 struct dma_pool *cb_pool;
85 void __iomem *chan_base;
86 int irq_number;
87 unsigned int irq_flags;
89 bool is_lite_channel;
92 struct bcm2835_desc {
93 struct bcm2835_chan *c;
94 struct virt_dma_desc vd;
95 enum dma_transfer_direction dir;
97 unsigned int frames;
98 size_t size;
100 bool cyclic;
102 struct bcm2835_cb_entry cb_list[];
105 #define BCM2835_DMA_CS 0x00
106 #define BCM2835_DMA_ADDR 0x04
107 #define BCM2835_DMA_TI 0x08
108 #define BCM2835_DMA_SOURCE_AD 0x0c
109 #define BCM2835_DMA_DEST_AD 0x10
110 #define BCM2835_DMA_LEN 0x14
111 #define BCM2835_DMA_STRIDE 0x18
112 #define BCM2835_DMA_NEXTCB 0x1c
113 #define BCM2835_DMA_DEBUG 0x20
115 /* DMA CS Control and Status bits */
116 #define BCM2835_DMA_ACTIVE BIT(0) /* activate the DMA */
117 #define BCM2835_DMA_END BIT(1) /* current CB has ended */
118 #define BCM2835_DMA_INT BIT(2) /* interrupt status */
119 #define BCM2835_DMA_DREQ BIT(3) /* DREQ state */
120 #define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */
121 #define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */
122 #define BCM2835_DMA_WAITING_FOR_WRITES BIT(6) /* waiting for last
123 * AXI-write to ack
125 #define BCM2835_DMA_ERR BIT(8)
126 #define BCM2835_DMA_PRIORITY(x) ((x & 15) << 16) /* AXI priority */
127 #define BCM2835_DMA_PANIC_PRIORITY(x) ((x & 15) << 20) /* panic priority */
128 /* current value of TI.BCM2835_DMA_WAIT_RESP */
129 #define BCM2835_DMA_WAIT_FOR_WRITES BIT(28)
130 #define BCM2835_DMA_DIS_DEBUG BIT(29) /* disable debug pause signal */
131 #define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */
132 #define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */
134 /* Transfer information bits - also bcm2835_cb.info field */
135 #define BCM2835_DMA_INT_EN BIT(0)
136 #define BCM2835_DMA_TDMODE BIT(1) /* 2D-Mode */
137 #define BCM2835_DMA_WAIT_RESP BIT(3) /* wait for AXI-write to be acked */
138 #define BCM2835_DMA_D_INC BIT(4)
139 #define BCM2835_DMA_D_WIDTH BIT(5) /* 128bit writes if set */
140 #define BCM2835_DMA_D_DREQ BIT(6) /* enable DREQ for destination */
141 #define BCM2835_DMA_D_IGNORE BIT(7) /* ignore destination writes */
142 #define BCM2835_DMA_S_INC BIT(8)
143 #define BCM2835_DMA_S_WIDTH BIT(9) /* 128bit writes if set */
144 #define BCM2835_DMA_S_DREQ BIT(10) /* enable SREQ for source */
145 #define BCM2835_DMA_S_IGNORE BIT(11) /* ignore source reads - read 0 */
146 #define BCM2835_DMA_BURST_LENGTH(x) ((x & 15) << 12)
147 #define BCM2835_DMA_PER_MAP(x) ((x & 31) << 16) /* REQ source */
148 #define BCM2835_DMA_WAIT(x) ((x & 31) << 21) /* add DMA-wait cycles */
149 #define BCM2835_DMA_NO_WIDE_BURSTS BIT(26) /* no 2 beat write bursts */
151 /* debug register bits */
152 #define BCM2835_DMA_DEBUG_LAST_NOT_SET_ERR BIT(0)
153 #define BCM2835_DMA_DEBUG_FIFO_ERR BIT(1)
154 #define BCM2835_DMA_DEBUG_READ_ERR BIT(2)
155 #define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_SHIFT 4
156 #define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_BITS 4
157 #define BCM2835_DMA_DEBUG_ID_SHIFT 16
158 #define BCM2835_DMA_DEBUG_ID_BITS 9
159 #define BCM2835_DMA_DEBUG_STATE_SHIFT 16
160 #define BCM2835_DMA_DEBUG_STATE_BITS 9
161 #define BCM2835_DMA_DEBUG_VERSION_SHIFT 25
162 #define BCM2835_DMA_DEBUG_VERSION_BITS 3
163 #define BCM2835_DMA_DEBUG_LITE BIT(28)
165 /* shared registers for all dma channels */
166 #define BCM2835_DMA_INT_STATUS 0xfe0
167 #define BCM2835_DMA_ENABLE 0xff0
169 #define BCM2835_DMA_DATA_TYPE_S8 1
170 #define BCM2835_DMA_DATA_TYPE_S16 2
171 #define BCM2835_DMA_DATA_TYPE_S32 4
172 #define BCM2835_DMA_DATA_TYPE_S128 16
174 /* Valid only for channels 0 - 14, 15 has its own base address */
175 #define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */
176 #define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))
178 /* the max dma length for different channels */
179 #define MAX_DMA_LEN SZ_1G
180 #define MAX_LITE_DMA_LEN (SZ_64K - 4)
182 static inline size_t bcm2835_dma_max_frame_length(struct bcm2835_chan *c)
184 /* lite and normal channels have different max frame length */
185 return c->is_lite_channel ? MAX_LITE_DMA_LEN : MAX_DMA_LEN;
188 /* how many frames of max_len size do we need to transfer len bytes */
189 static inline size_t bcm2835_dma_frames_for_length(size_t len,
190 size_t max_len)
192 return DIV_ROUND_UP(len, max_len);
195 static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
197 return container_of(d, struct bcm2835_dmadev, ddev);
200 static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c)
202 return container_of(c, struct bcm2835_chan, vc.chan);
205 static inline struct bcm2835_desc *to_bcm2835_dma_desc(
206 struct dma_async_tx_descriptor *t)
208 return container_of(t, struct bcm2835_desc, vd.tx);
211 static void bcm2835_dma_free_cb_chain(struct bcm2835_desc *desc)
213 size_t i;
215 for (i = 0; i < desc->frames; i++)
216 dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
217 desc->cb_list[i].paddr);
219 kfree(desc);
222 static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
224 bcm2835_dma_free_cb_chain(
225 container_of(vd, struct bcm2835_desc, vd));
228 static void bcm2835_dma_create_cb_set_length(
229 struct bcm2835_chan *chan,
230 struct bcm2835_dma_cb *control_block,
231 size_t len,
232 size_t period_len,
233 size_t *total_len,
234 u32 finalextrainfo)
236 size_t max_len = bcm2835_dma_max_frame_length(chan);
238 /* set the length taking lite-channel limitations into account */
239 control_block->length = min_t(u32, len, max_len);
241 /* finished if we have no period_length */
242 if (!period_len)
243 return;
246 * period_len means: that we need to generate
247 * transfers that are terminating at every
248 * multiple of period_len - this is typically
249 * used to set the interrupt flag in info
250 * which is required during cyclic transfers
253 /* have we filled in period_length yet? */
254 if (*total_len + control_block->length < period_len) {
255 /* update number of bytes in this period so far */
256 *total_len += control_block->length;
257 return;
260 /* calculate the length that remains to reach period_length */
261 control_block->length = period_len - *total_len;
263 /* reset total_length for next period */
264 *total_len = 0;
266 /* add extrainfo bits in info */
267 control_block->info |= finalextrainfo;
270 static inline size_t bcm2835_dma_count_frames_for_sg(
271 struct bcm2835_chan *c,
272 struct scatterlist *sgl,
273 unsigned int sg_len)
275 size_t frames = 0;
276 struct scatterlist *sgent;
277 unsigned int i;
278 size_t plength = bcm2835_dma_max_frame_length(c);
280 for_each_sg(sgl, sgent, sg_len, i)
281 frames += bcm2835_dma_frames_for_length(
282 sg_dma_len(sgent), plength);
284 return frames;
288 * bcm2835_dma_create_cb_chain - create a control block and fills data in
290 * @chan: the @dma_chan for which we run this
291 * @direction: the direction in which we transfer
292 * @cyclic: it is a cyclic transfer
293 * @info: the default info bits to apply per controlblock
294 * @frames: number of controlblocks to allocate
295 * @src: the src address to assign (if the S_INC bit is set
296 * in @info, then it gets incremented)
297 * @dst: the dst address to assign (if the D_INC bit is set
298 * in @info, then it gets incremented)
299 * @buf_len: the full buffer length (may also be 0)
300 * @period_len: the period length when to apply @finalextrainfo
301 * in addition to the last transfer
302 * this will also break some control-blocks early
303 * @finalextrainfo: additional bits in last controlblock
304 * (or when period_len is reached in case of cyclic)
305 * @gfp: the GFP flag to use for allocation
307 static struct bcm2835_desc *bcm2835_dma_create_cb_chain(
308 struct dma_chan *chan, enum dma_transfer_direction direction,
309 bool cyclic, u32 info, u32 finalextrainfo, size_t frames,
310 dma_addr_t src, dma_addr_t dst, size_t buf_len,
311 size_t period_len, gfp_t gfp)
313 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
314 size_t len = buf_len, total_len;
315 size_t frame;
316 struct bcm2835_desc *d;
317 struct bcm2835_cb_entry *cb_entry;
318 struct bcm2835_dma_cb *control_block;
320 if (!frames)
321 return NULL;
323 /* allocate and setup the descriptor. */
324 d = kzalloc(sizeof(*d) + frames * sizeof(struct bcm2835_cb_entry),
325 gfp);
326 if (!d)
327 return NULL;
329 d->c = c;
330 d->dir = direction;
331 d->cyclic = cyclic;
334 * Iterate over all frames, create a control block
335 * for each frame and link them together.
337 for (frame = 0, total_len = 0; frame < frames; d->frames++, frame++) {
338 cb_entry = &d->cb_list[frame];
339 cb_entry->cb = dma_pool_alloc(c->cb_pool, gfp,
340 &cb_entry->paddr);
341 if (!cb_entry->cb)
342 goto error_cb;
344 /* fill in the control block */
345 control_block = cb_entry->cb;
346 control_block->info = info;
347 control_block->src = src;
348 control_block->dst = dst;
349 control_block->stride = 0;
350 control_block->next = 0;
351 /* set up length in control_block if requested */
352 if (buf_len) {
353 /* calculate length honoring period_length */
354 bcm2835_dma_create_cb_set_length(
355 c, control_block,
356 len, period_len, &total_len,
357 cyclic ? finalextrainfo : 0);
359 /* calculate new remaining length */
360 len -= control_block->length;
363 /* link this the last controlblock */
364 if (frame)
365 d->cb_list[frame - 1].cb->next = cb_entry->paddr;
367 /* update src and dst and length */
368 if (src && (info & BCM2835_DMA_S_INC))
369 src += control_block->length;
370 if (dst && (info & BCM2835_DMA_D_INC))
371 dst += control_block->length;
373 /* Length of total transfer */
374 d->size += control_block->length;
377 /* the last frame requires extra flags */
378 d->cb_list[d->frames - 1].cb->info |= finalextrainfo;
380 /* detect a size missmatch */
381 if (buf_len && (d->size != buf_len))
382 goto error_cb;
384 return d;
385 error_cb:
386 bcm2835_dma_free_cb_chain(d);
388 return NULL;
391 static void bcm2835_dma_fill_cb_chain_with_sg(
392 struct dma_chan *chan,
393 enum dma_transfer_direction direction,
394 struct bcm2835_cb_entry *cb,
395 struct scatterlist *sgl,
396 unsigned int sg_len)
398 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
399 size_t len, max_len;
400 unsigned int i;
401 dma_addr_t addr;
402 struct scatterlist *sgent;
404 max_len = bcm2835_dma_max_frame_length(c);
405 for_each_sg(sgl, sgent, sg_len, i) {
406 for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent);
407 len > 0;
408 addr += cb->cb->length, len -= cb->cb->length, cb++) {
409 if (direction == DMA_DEV_TO_MEM)
410 cb->cb->dst = addr;
411 else
412 cb->cb->src = addr;
413 cb->cb->length = min(len, max_len);
418 static int bcm2835_dma_abort(struct bcm2835_chan *c)
420 void __iomem *chan_base = c->chan_base;
421 long int timeout = 10000;
424 * A zero control block address means the channel is idle.
425 * (The ACTIVE flag in the CS register is not a reliable indicator.)
427 if (!readl(chan_base + BCM2835_DMA_ADDR))
428 return 0;
430 /* Write 0 to the active bit - Pause the DMA */
431 writel(0, chan_base + BCM2835_DMA_CS);
433 /* Wait for any current AXI transfer to complete */
434 while ((readl(chan_base + BCM2835_DMA_CS) &
435 BCM2835_DMA_WAITING_FOR_WRITES) && --timeout)
436 cpu_relax();
438 /* Peripheral might be stuck and fail to signal AXI write responses */
439 if (!timeout)
440 dev_err(c->vc.chan.device->dev,
441 "failed to complete outstanding writes\n");
443 writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
444 return 0;
447 static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
449 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
450 struct bcm2835_desc *d;
452 if (!vd) {
453 c->desc = NULL;
454 return;
457 list_del(&vd->node);
459 c->desc = d = to_bcm2835_dma_desc(&vd->tx);
461 writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR);
462 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
465 static irqreturn_t bcm2835_dma_callback(int irq, void *data)
467 struct bcm2835_chan *c = data;
468 struct bcm2835_desc *d;
469 unsigned long flags;
471 /* check the shared interrupt */
472 if (c->irq_flags & IRQF_SHARED) {
473 /* check if the interrupt is enabled */
474 flags = readl(c->chan_base + BCM2835_DMA_CS);
475 /* if not set then we are not the reason for the irq */
476 if (!(flags & BCM2835_DMA_INT))
477 return IRQ_NONE;
480 spin_lock_irqsave(&c->vc.lock, flags);
483 * Clear the INT flag to receive further interrupts. Keep the channel
484 * active in case the descriptor is cyclic or in case the client has
485 * already terminated the descriptor and issued a new one. (May happen
486 * if this IRQ handler is threaded.) If the channel is finished, it
487 * will remain idle despite the ACTIVE flag being set.
489 writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
490 c->chan_base + BCM2835_DMA_CS);
492 d = c->desc;
494 if (d) {
495 if (d->cyclic) {
496 /* call the cyclic callback */
497 vchan_cyclic_callback(&d->vd);
498 } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
499 vchan_cookie_complete(&c->desc->vd);
500 bcm2835_dma_start_desc(c);
504 spin_unlock_irqrestore(&c->vc.lock, flags);
506 return IRQ_HANDLED;
509 static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
511 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
512 struct device *dev = c->vc.chan.device->dev;
514 dev_dbg(dev, "Allocating DMA channel %d\n", c->ch);
516 c->cb_pool = dma_pool_create(dev_name(dev), dev,
517 sizeof(struct bcm2835_dma_cb), 0, 0);
518 if (!c->cb_pool) {
519 dev_err(dev, "unable to allocate descriptor pool\n");
520 return -ENOMEM;
523 return request_irq(c->irq_number, bcm2835_dma_callback,
524 c->irq_flags, "DMA IRQ", c);
527 static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
529 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
531 vchan_free_chan_resources(&c->vc);
532 free_irq(c->irq_number, c);
533 dma_pool_destroy(c->cb_pool);
535 dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
538 static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d)
540 return d->size;
543 static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
545 unsigned int i;
546 size_t size;
548 for (size = i = 0; i < d->frames; i++) {
549 struct bcm2835_dma_cb *control_block = d->cb_list[i].cb;
550 size_t this_size = control_block->length;
551 dma_addr_t dma;
553 if (d->dir == DMA_DEV_TO_MEM)
554 dma = control_block->dst;
555 else
556 dma = control_block->src;
558 if (size)
559 size += this_size;
560 else if (addr >= dma && addr < dma + this_size)
561 size += dma + this_size - addr;
564 return size;
567 static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan,
568 dma_cookie_t cookie, struct dma_tx_state *txstate)
570 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
571 struct virt_dma_desc *vd;
572 enum dma_status ret;
573 unsigned long flags;
575 ret = dma_cookie_status(chan, cookie, txstate);
576 if (ret == DMA_COMPLETE || !txstate)
577 return ret;
579 spin_lock_irqsave(&c->vc.lock, flags);
580 vd = vchan_find_desc(&c->vc, cookie);
581 if (vd) {
582 txstate->residue =
583 bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx));
584 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
585 struct bcm2835_desc *d = c->desc;
586 dma_addr_t pos;
588 if (d->dir == DMA_MEM_TO_DEV)
589 pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD);
590 else if (d->dir == DMA_DEV_TO_MEM)
591 pos = readl(c->chan_base + BCM2835_DMA_DEST_AD);
592 else
593 pos = 0;
595 txstate->residue = bcm2835_dma_desc_size_pos(d, pos);
596 } else {
597 txstate->residue = 0;
600 spin_unlock_irqrestore(&c->vc.lock, flags);
602 return ret;
605 static void bcm2835_dma_issue_pending(struct dma_chan *chan)
607 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
608 unsigned long flags;
610 spin_lock_irqsave(&c->vc.lock, flags);
611 if (vchan_issue_pending(&c->vc) && !c->desc)
612 bcm2835_dma_start_desc(c);
614 spin_unlock_irqrestore(&c->vc.lock, flags);
617 static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy(
618 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
619 size_t len, unsigned long flags)
621 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
622 struct bcm2835_desc *d;
623 u32 info = BCM2835_DMA_D_INC | BCM2835_DMA_S_INC;
624 u32 extra = BCM2835_DMA_INT_EN | BCM2835_DMA_WAIT_RESP;
625 size_t max_len = bcm2835_dma_max_frame_length(c);
626 size_t frames;
628 /* if src, dst or len is not given return with an error */
629 if (!src || !dst || !len)
630 return NULL;
632 /* calculate number of frames */
633 frames = bcm2835_dma_frames_for_length(len, max_len);
635 /* allocate the CB chain - this also fills in the pointers */
636 d = bcm2835_dma_create_cb_chain(chan, DMA_MEM_TO_MEM, false,
637 info, extra, frames,
638 src, dst, len, 0, GFP_KERNEL);
639 if (!d)
640 return NULL;
642 return vchan_tx_prep(&c->vc, &d->vd, flags);
645 static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg(
646 struct dma_chan *chan,
647 struct scatterlist *sgl, unsigned int sg_len,
648 enum dma_transfer_direction direction,
649 unsigned long flags, void *context)
651 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
652 struct bcm2835_desc *d;
653 dma_addr_t src = 0, dst = 0;
654 u32 info = BCM2835_DMA_WAIT_RESP;
655 u32 extra = BCM2835_DMA_INT_EN;
656 size_t frames;
658 if (!is_slave_direction(direction)) {
659 dev_err(chan->device->dev,
660 "%s: bad direction?\n", __func__);
661 return NULL;
664 if (c->dreq != 0)
665 info |= BCM2835_DMA_PER_MAP(c->dreq);
667 if (direction == DMA_DEV_TO_MEM) {
668 if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
669 return NULL;
670 src = c->cfg.src_addr;
671 info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC;
672 } else {
673 if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
674 return NULL;
675 dst = c->cfg.dst_addr;
676 info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
679 /* count frames in sg list */
680 frames = bcm2835_dma_count_frames_for_sg(c, sgl, sg_len);
682 /* allocate the CB chain */
683 d = bcm2835_dma_create_cb_chain(chan, direction, false,
684 info, extra,
685 frames, src, dst, 0, 0,
686 GFP_KERNEL);
687 if (!d)
688 return NULL;
690 /* fill in frames with scatterlist pointers */
691 bcm2835_dma_fill_cb_chain_with_sg(chan, direction, d->cb_list,
692 sgl, sg_len);
694 return vchan_tx_prep(&c->vc, &d->vd, flags);
697 static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
698 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
699 size_t period_len, enum dma_transfer_direction direction,
700 unsigned long flags)
702 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
703 struct bcm2835_desc *d;
704 dma_addr_t src, dst;
705 u32 info = BCM2835_DMA_WAIT_RESP;
706 u32 extra = BCM2835_DMA_INT_EN;
707 size_t max_len = bcm2835_dma_max_frame_length(c);
708 size_t frames;
710 /* Grab configuration */
711 if (!is_slave_direction(direction)) {
712 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
713 return NULL;
716 if (!buf_len) {
717 dev_err(chan->device->dev,
718 "%s: bad buffer length (= 0)\n", __func__);
719 return NULL;
723 * warn if buf_len is not a multiple of period_len - this may leed
724 * to unexpected latencies for interrupts and thus audiable clicks
726 if (buf_len % period_len)
727 dev_warn_once(chan->device->dev,
728 "%s: buffer_length (%zd) is not a multiple of period_len (%zd)\n",
729 __func__, buf_len, period_len);
731 /* Setup DREQ channel */
732 if (c->dreq != 0)
733 info |= BCM2835_DMA_PER_MAP(c->dreq);
735 if (direction == DMA_DEV_TO_MEM) {
736 if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
737 return NULL;
738 src = c->cfg.src_addr;
739 dst = buf_addr;
740 info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC;
741 } else {
742 if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
743 return NULL;
744 dst = c->cfg.dst_addr;
745 src = buf_addr;
746 info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
749 /* calculate number of frames */
750 frames = /* number of periods */
751 DIV_ROUND_UP(buf_len, period_len) *
752 /* number of frames per period */
753 bcm2835_dma_frames_for_length(period_len, max_len);
756 * allocate the CB chain
757 * note that we need to use GFP_NOWAIT, as the ALSA i2s dmaengine
758 * implementation calls prep_dma_cyclic with interrupts disabled.
760 d = bcm2835_dma_create_cb_chain(chan, direction, true,
761 info, extra,
762 frames, src, dst, buf_len,
763 period_len, GFP_NOWAIT);
764 if (!d)
765 return NULL;
767 /* wrap around into a loop */
768 d->cb_list[d->frames - 1].cb->next = d->cb_list[0].paddr;
770 return vchan_tx_prep(&c->vc, &d->vd, flags);
773 static int bcm2835_dma_slave_config(struct dma_chan *chan,
774 struct dma_slave_config *cfg)
776 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
778 if ((cfg->direction == DMA_DEV_TO_MEM &&
779 cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
780 (cfg->direction == DMA_MEM_TO_DEV &&
781 cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
782 !is_slave_direction(cfg->direction)) {
783 return -EINVAL;
786 c->cfg = *cfg;
788 return 0;
791 static int bcm2835_dma_terminate_all(struct dma_chan *chan)
793 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
794 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
795 unsigned long flags;
796 LIST_HEAD(head);
798 spin_lock_irqsave(&c->vc.lock, flags);
800 /* Prevent this channel being scheduled */
801 spin_lock(&d->lock);
802 list_del_init(&c->node);
803 spin_unlock(&d->lock);
805 /* stop DMA activity */
806 if (c->desc) {
807 bcm2835_dma_desc_free(&c->desc->vd);
808 c->desc = NULL;
809 bcm2835_dma_abort(c);
812 vchan_get_all_descriptors(&c->vc, &head);
813 spin_unlock_irqrestore(&c->vc.lock, flags);
814 vchan_dma_desc_free_list(&c->vc, &head);
816 return 0;
819 static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id,
820 int irq, unsigned int irq_flags)
822 struct bcm2835_chan *c;
824 c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL);
825 if (!c)
826 return -ENOMEM;
828 c->vc.desc_free = bcm2835_dma_desc_free;
829 vchan_init(&c->vc, &d->ddev);
830 INIT_LIST_HEAD(&c->node);
832 c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
833 c->ch = chan_id;
834 c->irq_number = irq;
835 c->irq_flags = irq_flags;
837 /* check in DEBUG register if this is a LITE channel */
838 if (readl(c->chan_base + BCM2835_DMA_DEBUG) &
839 BCM2835_DMA_DEBUG_LITE)
840 c->is_lite_channel = true;
842 return 0;
845 static void bcm2835_dma_free(struct bcm2835_dmadev *od)
847 struct bcm2835_chan *c, *next;
849 list_for_each_entry_safe(c, next, &od->ddev.channels,
850 vc.chan.device_node) {
851 list_del(&c->vc.chan.device_node);
852 tasklet_kill(&c->vc.task);
856 static const struct of_device_id bcm2835_dma_of_match[] = {
857 { .compatible = "brcm,bcm2835-dma", },
860 MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match);
862 static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
863 struct of_dma *ofdma)
865 struct bcm2835_dmadev *d = ofdma->of_dma_data;
866 struct dma_chan *chan;
868 chan = dma_get_any_slave_channel(&d->ddev);
869 if (!chan)
870 return NULL;
872 /* Set DREQ from param */
873 to_bcm2835_dma_chan(chan)->dreq = spec->args[0];
875 return chan;
878 static int bcm2835_dma_probe(struct platform_device *pdev)
880 struct bcm2835_dmadev *od;
881 struct resource *res;
882 void __iomem *base;
883 int rc;
884 int i, j;
885 int irq[BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED + 1];
886 int irq_flags;
887 uint32_t chans_available;
888 char chan_name[BCM2835_DMA_CHAN_NAME_SIZE];
890 if (!pdev->dev.dma_mask)
891 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
893 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
894 if (rc) {
895 dev_err(&pdev->dev, "Unable to set DMA mask\n");
896 return rc;
899 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
900 if (!od)
901 return -ENOMEM;
903 pdev->dev.dma_parms = &od->dma_parms;
904 dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
906 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
907 base = devm_ioremap_resource(&pdev->dev, res);
908 if (IS_ERR(base))
909 return PTR_ERR(base);
911 od->base = base;
913 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
914 dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
915 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
916 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
917 dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
918 od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
919 od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
920 od->ddev.device_tx_status = bcm2835_dma_tx_status;
921 od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
922 od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
923 od->ddev.device_prep_slave_sg = bcm2835_dma_prep_slave_sg;
924 od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy;
925 od->ddev.device_config = bcm2835_dma_slave_config;
926 od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
927 od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
928 od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
929 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
930 BIT(DMA_MEM_TO_MEM);
931 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
932 od->ddev.dev = &pdev->dev;
933 INIT_LIST_HEAD(&od->ddev.channels);
934 spin_lock_init(&od->lock);
936 platform_set_drvdata(pdev, od);
938 /* Request DMA channel mask from device tree */
939 if (of_property_read_u32(pdev->dev.of_node,
940 "brcm,dma-channel-mask",
941 &chans_available)) {
942 dev_err(&pdev->dev, "Failed to get channel mask\n");
943 rc = -EINVAL;
944 goto err_no_dma;
947 /* get irqs for each channel that we support */
948 for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) {
949 /* skip masked out channels */
950 if (!(chans_available & (1 << i))) {
951 irq[i] = -1;
952 continue;
955 /* get the named irq */
956 snprintf(chan_name, sizeof(chan_name), "dma%i", i);
957 irq[i] = platform_get_irq_byname(pdev, chan_name);
958 if (irq[i] >= 0)
959 continue;
961 /* legacy device tree case handling */
962 dev_warn_once(&pdev->dev,
963 "missing interrupt-names property in device tree - legacy interpretation is used\n");
965 * in case of channel >= 11
966 * use the 11th interrupt and that is shared
968 irq[i] = platform_get_irq(pdev, i < 11 ? i : 11);
971 /* get irqs for each channel */
972 for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) {
973 /* skip channels without irq */
974 if (irq[i] < 0)
975 continue;
977 /* check if there are other channels that also use this irq */
978 irq_flags = 0;
979 for (j = 0; j <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; j++)
980 if ((i != j) && (irq[j] == irq[i])) {
981 irq_flags = IRQF_SHARED;
982 break;
985 /* initialize the channel */
986 rc = bcm2835_dma_chan_init(od, i, irq[i], irq_flags);
987 if (rc)
988 goto err_no_dma;
991 dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i);
993 /* Device-tree DMA controller registration */
994 rc = of_dma_controller_register(pdev->dev.of_node,
995 bcm2835_dma_xlate, od);
996 if (rc) {
997 dev_err(&pdev->dev, "Failed to register DMA controller\n");
998 goto err_no_dma;
1001 rc = dma_async_device_register(&od->ddev);
1002 if (rc) {
1003 dev_err(&pdev->dev,
1004 "Failed to register slave DMA engine device: %d\n", rc);
1005 goto err_no_dma;
1008 dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n");
1010 return 0;
1012 err_no_dma:
1013 bcm2835_dma_free(od);
1014 return rc;
1017 static int bcm2835_dma_remove(struct platform_device *pdev)
1019 struct bcm2835_dmadev *od = platform_get_drvdata(pdev);
1021 dma_async_device_unregister(&od->ddev);
1022 bcm2835_dma_free(od);
1024 return 0;
1027 static struct platform_driver bcm2835_dma_driver = {
1028 .probe = bcm2835_dma_probe,
1029 .remove = bcm2835_dma_remove,
1030 .driver = {
1031 .name = "bcm2835-dma",
1032 .of_match_table = of_match_ptr(bcm2835_dma_of_match),
1036 module_platform_driver(bcm2835_dma_driver);
1038 MODULE_ALIAS("platform:bcm2835-dma");
1039 MODULE_DESCRIPTION("BCM2835 DMA engine driver");
1040 MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
1041 MODULE_LICENSE("GPL v2");