Merge branch 'v6v7' into devel
[linux/fpc-iii.git] / arch / arm / mach-tegra / dma.c
blobedda6ec5e925b9dd1d75dc8b776573cae5200d1c
1 /*
2 * arch/arm/mach-tegra/dma.c
4 * System DMA driver for NVIDIA Tegra SoCs
6 * Copyright (c) 2008-2009, NVIDIA Corporation.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #include <linux/io.h>
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/err.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <mach/dma.h>
31 #include <mach/irqs.h>
32 #include <mach/iomap.h>
34 #define APB_DMA_GEN 0x000
35 #define GEN_ENABLE (1<<31)
37 #define APB_DMA_CNTRL 0x010
39 #define APB_DMA_IRQ_MASK 0x01c
41 #define APB_DMA_IRQ_MASK_SET 0x020
43 #define APB_DMA_CHAN_CSR 0x000
44 #define CSR_ENB (1<<31)
45 #define CSR_IE_EOC (1<<30)
46 #define CSR_HOLD (1<<29)
47 #define CSR_DIR (1<<28)
48 #define CSR_ONCE (1<<27)
49 #define CSR_FLOW (1<<21)
50 #define CSR_REQ_SEL_SHIFT 16
51 #define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
52 #define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT)
53 #define CSR_WCOUNT_SHIFT 2
54 #define CSR_WCOUNT_MASK 0xFFFC
56 #define APB_DMA_CHAN_STA 0x004
57 #define STA_BUSY (1<<31)
58 #define STA_ISE_EOC (1<<30)
59 #define STA_HALT (1<<29)
60 #define STA_PING_PONG (1<<28)
61 #define STA_COUNT_SHIFT 2
62 #define STA_COUNT_MASK 0xFFFC
64 #define APB_DMA_CHAN_AHB_PTR 0x010
66 #define APB_DMA_CHAN_AHB_SEQ 0x014
67 #define AHB_SEQ_INTR_ENB (1<<31)
68 #define AHB_SEQ_BUS_WIDTH_SHIFT 28
69 #define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
70 #define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
71 #define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
72 #define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
73 #define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
74 #define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
75 #define AHB_SEQ_DATA_SWAP (1<<27)
76 #define AHB_SEQ_BURST_MASK (0x7<<24)
77 #define AHB_SEQ_BURST_1 (4<<24)
78 #define AHB_SEQ_BURST_4 (5<<24)
79 #define AHB_SEQ_BURST_8 (6<<24)
80 #define AHB_SEQ_DBL_BUF (1<<19)
81 #define AHB_SEQ_WRAP_SHIFT 16
82 #define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
84 #define APB_DMA_CHAN_APB_PTR 0x018
86 #define APB_DMA_CHAN_APB_SEQ 0x01c
87 #define APB_SEQ_BUS_WIDTH_SHIFT 28
88 #define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
89 #define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
90 #define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
91 #define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
92 #define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
93 #define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
94 #define APB_SEQ_DATA_SWAP (1<<27)
95 #define APB_SEQ_WRAP_SHIFT 16
96 #define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
98 #define TEGRA_SYSTEM_DMA_CH_NR 16
99 #define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
100 #define TEGRA_SYSTEM_DMA_CH_MIN 0
101 #define TEGRA_SYSTEM_DMA_CH_MAX \
102 (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
104 #define NV_DMA_MAX_TRASFER_SIZE 0x10000
106 const unsigned int ahb_addr_wrap_table[8] = {
107 0, 32, 64, 128, 256, 512, 1024, 2048
110 const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64};
112 const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128};
114 #define TEGRA_DMA_NAME_SIZE 16
115 struct tegra_dma_channel {
116 struct list_head list;
117 int id;
118 spinlock_t lock;
119 char name[TEGRA_DMA_NAME_SIZE];
120 void __iomem *addr;
121 int mode;
122 int irq;
124 /* Register shadow */
125 u32 csr;
126 u32 ahb_seq;
127 u32 ahb_ptr;
128 u32 apb_seq;
129 u32 apb_ptr;
132 #define NV_DMA_MAX_CHANNELS 32
134 static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
135 static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
137 static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
138 struct tegra_dma_req *req);
139 static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
140 struct tegra_dma_req *req);
141 static void tegra_dma_init_hw(struct tegra_dma_channel *ch);
142 static void tegra_dma_stop(struct tegra_dma_channel *ch);
144 void tegra_dma_flush(struct tegra_dma_channel *ch)
147 EXPORT_SYMBOL(tegra_dma_flush);
149 void tegra_dma_dequeue(struct tegra_dma_channel *ch)
151 struct tegra_dma_req *req;
153 req = list_entry(ch->list.next, typeof(*req), node);
155 tegra_dma_dequeue_req(ch, req);
156 return;
159 void tegra_dma_stop(struct tegra_dma_channel *ch)
161 unsigned int csr;
162 unsigned int status;
164 csr = ch->csr;
165 csr &= ~CSR_IE_EOC;
166 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
168 csr &= ~CSR_ENB;
169 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
171 status = readl(ch->addr + APB_DMA_CHAN_STA);
172 if (status & STA_ISE_EOC)
173 writel(status, ch->addr + APB_DMA_CHAN_STA);
176 int tegra_dma_cancel(struct tegra_dma_channel *ch)
178 unsigned int csr;
179 unsigned long irq_flags;
181 spin_lock_irqsave(&ch->lock, irq_flags);
182 while (!list_empty(&ch->list))
183 list_del(ch->list.next);
185 csr = ch->csr;
186 csr &= ~CSR_REQ_SEL_MASK;
187 csr |= CSR_REQ_SEL_INVALID;
189 /* Set the enable as that is not shadowed */
190 csr |= CSR_ENB;
191 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
193 tegra_dma_stop(ch);
195 spin_unlock_irqrestore(&ch->lock, irq_flags);
196 return 0;
199 int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
200 struct tegra_dma_req *_req)
202 unsigned int csr;
203 unsigned int status;
204 struct tegra_dma_req *req = NULL;
205 int found = 0;
206 unsigned long irq_flags;
207 int to_transfer;
208 int req_transfer_count;
210 spin_lock_irqsave(&ch->lock, irq_flags);
211 list_for_each_entry(req, &ch->list, node) {
212 if (req == _req) {
213 list_del(&req->node);
214 found = 1;
215 break;
218 if (!found) {
219 spin_unlock_irqrestore(&ch->lock, irq_flags);
220 return 0;
223 /* STOP the DMA and get the transfer count.
224 * Getting the transfer count is tricky.
225 * - Change the source selector to invalid to stop the DMA from
226 * FIFO to memory.
227 * - Read the status register to know the number of pending
228 * bytes to be transfered.
229 * - Finally stop or program the DMA to the next buffer in the
230 * list.
232 csr = ch->csr;
233 csr &= ~CSR_REQ_SEL_MASK;
234 csr |= CSR_REQ_SEL_INVALID;
236 /* Set the enable as that is not shadowed */
237 csr |= CSR_ENB;
238 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
240 /* Get the transfer count */
241 status = readl(ch->addr + APB_DMA_CHAN_STA);
242 to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
243 req_transfer_count = (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
244 req_transfer_count += 1;
245 to_transfer += 1;
247 req->bytes_transferred = req_transfer_count;
249 if (status & STA_BUSY)
250 req->bytes_transferred -= to_transfer;
252 /* In continous transfer mode, DMA only tracks the count of the
253 * half DMA buffer. So, if the DMA already finished half the DMA
254 * then add the half buffer to the completed count.
256 * FIXME: There can be a race here. What if the req to
257 * dequue happens at the same time as the DMA just moved to
258 * the new buffer and SW didn't yet received the interrupt?
260 if (ch->mode & TEGRA_DMA_MODE_CONTINOUS)
261 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
262 req->bytes_transferred += req_transfer_count;
264 req->bytes_transferred *= 4;
266 tegra_dma_stop(ch);
267 if (!list_empty(&ch->list)) {
268 /* if the list is not empty, queue the next request */
269 struct tegra_dma_req *next_req;
270 next_req = list_entry(ch->list.next,
271 typeof(*next_req), node);
272 tegra_dma_update_hw(ch, next_req);
274 req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
276 spin_unlock_irqrestore(&ch->lock, irq_flags);
278 /* Callback should be called without any lock */
279 req->complete(req);
280 return 0;
282 EXPORT_SYMBOL(tegra_dma_dequeue_req);
284 bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
286 unsigned long irq_flags;
287 bool is_empty;
289 spin_lock_irqsave(&ch->lock, irq_flags);
290 if (list_empty(&ch->list))
291 is_empty = true;
292 else
293 is_empty = false;
294 spin_unlock_irqrestore(&ch->lock, irq_flags);
295 return is_empty;
297 EXPORT_SYMBOL(tegra_dma_is_empty);
299 bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
300 struct tegra_dma_req *_req)
302 unsigned long irq_flags;
303 struct tegra_dma_req *req;
305 spin_lock_irqsave(&ch->lock, irq_flags);
306 list_for_each_entry(req, &ch->list, node) {
307 if (req == _req) {
308 spin_unlock_irqrestore(&ch->lock, irq_flags);
309 return true;
312 spin_unlock_irqrestore(&ch->lock, irq_flags);
313 return false;
315 EXPORT_SYMBOL(tegra_dma_is_req_inflight);
317 int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
318 struct tegra_dma_req *req)
320 unsigned long irq_flags;
321 int start_dma = 0;
323 if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
324 req->source_addr & 0x3 || req->dest_addr & 0x3) {
325 pr_err("Invalid DMA request for channel %d\n", ch->id);
326 return -EINVAL;
329 spin_lock_irqsave(&ch->lock, irq_flags);
331 req->bytes_transferred = 0;
332 req->status = 0;
333 req->buffer_status = 0;
334 if (list_empty(&ch->list))
335 start_dma = 1;
337 list_add_tail(&req->node, &ch->list);
339 if (start_dma)
340 tegra_dma_update_hw(ch, req);
342 spin_unlock_irqrestore(&ch->lock, irq_flags);
344 return 0;
346 EXPORT_SYMBOL(tegra_dma_enqueue_req);
348 struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
350 int channel;
351 struct tegra_dma_channel *ch;
353 /* first channel is the shared channel */
354 if (mode & TEGRA_DMA_SHARED) {
355 channel = TEGRA_SYSTEM_DMA_CH_MIN;
356 } else {
357 channel = find_first_zero_bit(channel_usage,
358 ARRAY_SIZE(dma_channels));
359 if (channel >= ARRAY_SIZE(dma_channels))
360 return NULL;
362 __set_bit(channel, channel_usage);
363 ch = &dma_channels[channel];
364 ch->mode = mode;
365 return ch;
367 EXPORT_SYMBOL(tegra_dma_allocate_channel);
369 void tegra_dma_free_channel(struct tegra_dma_channel *ch)
371 if (ch->mode & TEGRA_DMA_SHARED)
372 return;
373 tegra_dma_cancel(ch);
374 __clear_bit(ch->id, channel_usage);
376 EXPORT_SYMBOL(tegra_dma_free_channel);
378 static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
379 struct tegra_dma_req *req)
381 if (req->to_memory) {
382 ch->apb_ptr = req->source_addr;
383 ch->ahb_ptr = req->dest_addr;
384 } else {
385 ch->apb_ptr = req->dest_addr;
386 ch->ahb_ptr = req->source_addr;
388 writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
389 writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
391 req->status = TEGRA_DMA_REQ_INFLIGHT;
392 return;
395 static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
396 struct tegra_dma_req *req)
398 int ahb_addr_wrap;
399 int apb_addr_wrap;
400 int ahb_bus_width;
401 int apb_bus_width;
402 int index;
403 unsigned long csr;
406 ch->csr |= CSR_FLOW;
407 ch->csr &= ~CSR_REQ_SEL_MASK;
408 ch->csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
409 ch->ahb_seq &= ~AHB_SEQ_BURST_MASK;
410 ch->ahb_seq |= AHB_SEQ_BURST_1;
412 /* One shot mode is always single buffered,
413 * continuous mode is always double buffered
414 * */
415 if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
416 ch->csr |= CSR_ONCE;
417 ch->ahb_seq &= ~AHB_SEQ_DBL_BUF;
418 ch->csr &= ~CSR_WCOUNT_MASK;
419 ch->csr |= ((req->size>>2) - 1) << CSR_WCOUNT_SHIFT;
420 } else {
421 ch->csr &= ~CSR_ONCE;
422 ch->ahb_seq |= AHB_SEQ_DBL_BUF;
424 /* In double buffered mode, we set the size to half the
425 * requested size and interrupt when half the buffer
426 * is full */
427 ch->csr &= ~CSR_WCOUNT_MASK;
428 ch->csr |= ((req->size>>3) - 1) << CSR_WCOUNT_SHIFT;
431 if (req->to_memory) {
432 ch->csr &= ~CSR_DIR;
433 ch->apb_ptr = req->source_addr;
434 ch->ahb_ptr = req->dest_addr;
436 apb_addr_wrap = req->source_wrap;
437 ahb_addr_wrap = req->dest_wrap;
438 apb_bus_width = req->source_bus_width;
439 ahb_bus_width = req->dest_bus_width;
441 } else {
442 ch->csr |= CSR_DIR;
443 ch->apb_ptr = req->dest_addr;
444 ch->ahb_ptr = req->source_addr;
446 apb_addr_wrap = req->dest_wrap;
447 ahb_addr_wrap = req->source_wrap;
448 apb_bus_width = req->dest_bus_width;
449 ahb_bus_width = req->source_bus_width;
452 apb_addr_wrap >>= 2;
453 ahb_addr_wrap >>= 2;
455 /* set address wrap for APB size */
456 index = 0;
457 do {
458 if (apb_addr_wrap_table[index] == apb_addr_wrap)
459 break;
460 index++;
461 } while (index < ARRAY_SIZE(apb_addr_wrap_table));
462 BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
463 ch->apb_seq &= ~APB_SEQ_WRAP_MASK;
464 ch->apb_seq |= index << APB_SEQ_WRAP_SHIFT;
466 /* set address wrap for AHB size */
467 index = 0;
468 do {
469 if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
470 break;
471 index++;
472 } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
473 BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
474 ch->ahb_seq &= ~AHB_SEQ_WRAP_MASK;
475 ch->ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
477 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
478 if (bus_width_table[index] == ahb_bus_width)
479 break;
481 BUG_ON(index == ARRAY_SIZE(bus_width_table));
482 ch->ahb_seq &= ~AHB_SEQ_BUS_WIDTH_MASK;
483 ch->ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
485 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
486 if (bus_width_table[index] == apb_bus_width)
487 break;
489 BUG_ON(index == ARRAY_SIZE(bus_width_table));
490 ch->apb_seq &= ~APB_SEQ_BUS_WIDTH_MASK;
491 ch->apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
493 ch->csr |= CSR_IE_EOC;
495 /* update hw registers with the shadow */
496 writel(ch->csr, ch->addr + APB_DMA_CHAN_CSR);
497 writel(ch->apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
498 writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
499 writel(ch->ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
500 writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
502 csr = ch->csr | CSR_ENB;
503 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
505 req->status = TEGRA_DMA_REQ_INFLIGHT;
508 static void tegra_dma_init_hw(struct tegra_dma_channel *ch)
510 /* One shot with an interrupt to CPU after transfer */
511 ch->csr = CSR_ONCE | CSR_IE_EOC;
512 ch->ahb_seq = AHB_SEQ_BUS_WIDTH_32 | AHB_SEQ_INTR_ENB;
513 ch->apb_seq = APB_SEQ_BUS_WIDTH_32 | 1 << APB_SEQ_WRAP_SHIFT;
516 static void handle_oneshot_dma(struct tegra_dma_channel *ch)
518 struct tegra_dma_req *req;
520 spin_lock(&ch->lock);
521 if (list_empty(&ch->list)) {
522 spin_unlock(&ch->lock);
523 return;
526 req = list_entry(ch->list.next, typeof(*req), node);
527 if (req) {
528 int bytes_transferred;
530 bytes_transferred =
531 (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
532 bytes_transferred += 1;
533 bytes_transferred <<= 2;
535 list_del(&req->node);
536 req->bytes_transferred = bytes_transferred;
537 req->status = TEGRA_DMA_REQ_SUCCESS;
539 spin_unlock(&ch->lock);
540 /* Callback should be called without any lock */
541 pr_debug("%s: transferred %d bytes\n", __func__,
542 req->bytes_transferred);
543 req->complete(req);
544 spin_lock(&ch->lock);
547 if (!list_empty(&ch->list)) {
548 req = list_entry(ch->list.next, typeof(*req), node);
549 /* the complete function we just called may have enqueued
550 another req, in which case dma has already started */
551 if (req->status != TEGRA_DMA_REQ_INFLIGHT)
552 tegra_dma_update_hw(ch, req);
554 spin_unlock(&ch->lock);
557 static void handle_continuous_dma(struct tegra_dma_channel *ch)
559 struct tegra_dma_req *req;
561 spin_lock(&ch->lock);
562 if (list_empty(&ch->list)) {
563 spin_unlock(&ch->lock);
564 return;
567 req = list_entry(ch->list.next, typeof(*req), node);
568 if (req) {
569 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
570 /* Load the next request into the hardware, if available
571 * */
572 if (!list_is_last(&req->node, &ch->list)) {
573 struct tegra_dma_req *next_req;
575 next_req = list_entry(req->node.next,
576 typeof(*next_req), node);
577 tegra_dma_update_hw_partial(ch, next_req);
579 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
580 req->status = TEGRA_DMA_REQ_SUCCESS;
581 /* DMA lock is NOT held when callback is called */
582 spin_unlock(&ch->lock);
583 if (likely(req->threshold))
584 req->threshold(req);
585 return;
587 } else if (req->buffer_status ==
588 TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
589 /* Callback when the buffer is completely full (i.e on
590 * the second interrupt */
591 int bytes_transferred;
593 bytes_transferred =
594 (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT;
595 bytes_transferred += 1;
596 bytes_transferred <<= 3;
598 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
599 req->bytes_transferred = bytes_transferred;
600 req->status = TEGRA_DMA_REQ_SUCCESS;
601 list_del(&req->node);
603 /* DMA lock is NOT held when callbak is called */
604 spin_unlock(&ch->lock);
605 req->complete(req);
606 return;
608 } else {
609 BUG();
612 spin_unlock(&ch->lock);
615 static irqreturn_t dma_isr(int irq, void *data)
617 struct tegra_dma_channel *ch = data;
618 unsigned long status;
620 status = readl(ch->addr + APB_DMA_CHAN_STA);
621 if (status & STA_ISE_EOC)
622 writel(status, ch->addr + APB_DMA_CHAN_STA);
623 else {
624 pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
625 return IRQ_HANDLED;
627 return IRQ_WAKE_THREAD;
630 static irqreturn_t dma_thread_fn(int irq, void *data)
632 struct tegra_dma_channel *ch = data;
634 if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
635 handle_oneshot_dma(ch);
636 else
637 handle_continuous_dma(ch);
640 return IRQ_HANDLED;
643 int __init tegra_dma_init(void)
645 int ret = 0;
646 int i;
647 unsigned int irq;
648 void __iomem *addr;
650 addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
651 writel(GEN_ENABLE, addr + APB_DMA_GEN);
652 writel(0, addr + APB_DMA_CNTRL);
653 writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
654 addr + APB_DMA_IRQ_MASK_SET);
656 memset(channel_usage, 0, sizeof(channel_usage));
657 memset(dma_channels, 0, sizeof(dma_channels));
659 /* Reserve all the channels we are not supposed to touch */
660 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++)
661 __set_bit(i, channel_usage);
663 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
664 struct tegra_dma_channel *ch = &dma_channels[i];
666 __clear_bit(i, channel_usage);
668 ch->id = i;
669 snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
671 ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
672 TEGRA_APB_DMA_CH0_SIZE * i);
674 spin_lock_init(&ch->lock);
675 INIT_LIST_HEAD(&ch->list);
676 tegra_dma_init_hw(ch);
678 irq = INT_APB_DMA_CH0 + i;
679 ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
680 dma_channels[i].name, ch);
681 if (ret) {
682 pr_err("Failed to register IRQ %d for DMA %d\n",
683 irq, i);
684 goto fail;
686 ch->irq = irq;
688 /* mark the shared channel allocated */
689 __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
691 for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++)
692 __set_bit(i, channel_usage);
694 return ret;
695 fail:
696 writel(0, addr + APB_DMA_GEN);
697 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
698 struct tegra_dma_channel *ch = &dma_channels[i];
699 if (ch->irq)
700 free_irq(ch->irq, ch);
702 return ret;
705 #ifdef CONFIG_PM
706 static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
708 void tegra_dma_suspend(void)
710 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
711 u32 *ctx = apb_dma;
712 int i;
714 *ctx++ = readl(addr + APB_DMA_GEN);
715 *ctx++ = readl(addr + APB_DMA_CNTRL);
716 *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
718 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
719 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
720 TEGRA_APB_DMA_CH0_SIZE * i);
722 *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
723 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
724 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
725 *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
726 *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
730 void tegra_dma_resume(void)
732 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
733 u32 *ctx = apb_dma;
734 int i;
736 writel(*ctx++, addr + APB_DMA_GEN);
737 writel(*ctx++, addr + APB_DMA_CNTRL);
738 writel(*ctx++, addr + APB_DMA_IRQ_MASK);
740 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
741 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
742 TEGRA_APB_DMA_CH0_SIZE * i);
744 writel(*ctx++, addr + APB_DMA_CHAN_CSR);
745 writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
746 writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
747 writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
748 writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);
752 #endif