2 * Freescale MPC85xx, MPC83xx DMA Engine support
4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA contorller is also added.
15 * This is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/pci.h>
25 #include <linux/interrupt.h>
26 #include <linux/dmaengine.h>
27 #include <linux/delay.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/dmapool.h>
30 #include <linux/of_platform.h>
34 static void dma_init(struct fsl_dma_chan
*fsl_chan
)
36 /* Reset the channel */
37 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
, 0, 32);
39 switch (fsl_chan
->feature
& FSL_DMA_IP_MASK
) {
41 /* Set the channel to below modes:
42 * EIE - Error interrupt enable
43 * EOSIE - End of segments interrupt enable (basic mode)
44 * EOLNIE - End of links interrupt enable
46 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
, FSL_DMA_MR_EIE
47 | FSL_DMA_MR_EOLNIE
| FSL_DMA_MR_EOSIE
, 32);
50 /* Set the channel to below modes:
51 * EOTIE - End-of-transfer interrupt enable
53 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
, FSL_DMA_MR_EOTIE
,
60 static void set_sr(struct fsl_dma_chan
*fsl_chan
, u32 val
)
62 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->sr
, val
, 32);
65 static u32
get_sr(struct fsl_dma_chan
*fsl_chan
)
67 return DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->sr
, 32);
70 static void set_desc_cnt(struct fsl_dma_chan
*fsl_chan
,
71 struct fsl_dma_ld_hw
*hw
, u32 count
)
73 hw
->count
= CPU_TO_DMA(fsl_chan
, count
, 32);
76 static void set_desc_src(struct fsl_dma_chan
*fsl_chan
,
77 struct fsl_dma_ld_hw
*hw
, dma_addr_t src
)
81 snoop_bits
= ((fsl_chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
)
82 ? ((u64
)FSL_DMA_SATR_SREADTYPE_SNOOP_READ
<< 32) : 0;
83 hw
->src_addr
= CPU_TO_DMA(fsl_chan
, snoop_bits
| src
, 64);
86 static void set_desc_dest(struct fsl_dma_chan
*fsl_chan
,
87 struct fsl_dma_ld_hw
*hw
, dma_addr_t dest
)
91 snoop_bits
= ((fsl_chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
)
92 ? ((u64
)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE
<< 32) : 0;
93 hw
->dst_addr
= CPU_TO_DMA(fsl_chan
, snoop_bits
| dest
, 64);
96 static void set_desc_next(struct fsl_dma_chan
*fsl_chan
,
97 struct fsl_dma_ld_hw
*hw
, dma_addr_t next
)
101 snoop_bits
= ((fsl_chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_83XX
)
103 hw
->next_ln_addr
= CPU_TO_DMA(fsl_chan
, snoop_bits
| next
, 64);
106 static void set_cdar(struct fsl_dma_chan
*fsl_chan
, dma_addr_t addr
)
108 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->cdar
, addr
| FSL_DMA_SNEN
, 64);
111 static dma_addr_t
get_cdar(struct fsl_dma_chan
*fsl_chan
)
113 return DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->cdar
, 64) & ~FSL_DMA_SNEN
;
116 static void set_ndar(struct fsl_dma_chan
*fsl_chan
, dma_addr_t addr
)
118 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->ndar
, addr
, 64);
121 static dma_addr_t
get_ndar(struct fsl_dma_chan
*fsl_chan
)
123 return DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->ndar
, 64);
126 static int dma_is_idle(struct fsl_dma_chan
*fsl_chan
)
128 u32 sr
= get_sr(fsl_chan
);
129 return (!(sr
& FSL_DMA_SR_CB
)) || (sr
& FSL_DMA_SR_CH
);
132 static void dma_start(struct fsl_dma_chan
*fsl_chan
)
136 if (fsl_chan
->feature
& FSL_DMA_CHAN_PAUSE_EXT
) {
137 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->bcr
, 0, 32);
138 mr_set
|= FSL_DMA_MR_EMP_EN
;
140 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
141 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32)
142 & ~FSL_DMA_MR_EMP_EN
, 32);
144 if (fsl_chan
->feature
& FSL_DMA_CHAN_START_EXT
)
145 mr_set
|= FSL_DMA_MR_EMS_EN
;
147 mr_set
|= FSL_DMA_MR_CS
;
149 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
150 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32)
154 static void dma_halt(struct fsl_dma_chan
*fsl_chan
)
157 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
158 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32) | FSL_DMA_MR_CA
,
160 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
161 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32) & ~(FSL_DMA_MR_CS
162 | FSL_DMA_MR_EMS_EN
| FSL_DMA_MR_CA
), 32);
164 while (!dma_is_idle(fsl_chan
) && (i
++ < 100))
166 if (i
>= 100 && !dma_is_idle(fsl_chan
))
167 dev_err(fsl_chan
->dev
, "DMA halt timeout!\n");
170 static void set_ld_eol(struct fsl_dma_chan
*fsl_chan
,
171 struct fsl_desc_sw
*desc
)
173 desc
->hw
.next_ln_addr
= CPU_TO_DMA(fsl_chan
,
174 DMA_TO_CPU(fsl_chan
, desc
->hw
.next_ln_addr
, 64) | FSL_DMA_EOL
,
178 static void append_ld_queue(struct fsl_dma_chan
*fsl_chan
,
179 struct fsl_desc_sw
*new_desc
)
181 struct fsl_desc_sw
*queue_tail
= to_fsl_desc(fsl_chan
->ld_queue
.prev
);
183 if (list_empty(&fsl_chan
->ld_queue
))
186 /* Link to the new descriptor physical address and
187 * Enable End-of-segment interrupt for
188 * the last link descriptor.
189 * (the previous node's next link descriptor)
191 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
193 queue_tail
->hw
.next_ln_addr
= CPU_TO_DMA(fsl_chan
,
194 new_desc
->async_tx
.phys
| FSL_DMA_EOSIE
|
195 (((fsl_chan
->feature
& FSL_DMA_IP_MASK
)
196 == FSL_DMA_IP_83XX
) ? FSL_DMA_SNEN
: 0), 64);
200 * fsl_chan_set_src_loop_size - Set source address hold transfer size
201 * @fsl_chan : Freescale DMA channel
202 * @size : Address loop size, 0 for disable loop
204 * The set source address hold transfer size. The source
205 * address hold or loop transfer size is when the DMA transfer
206 * data from source address (SA), if the loop size is 4, the DMA will
207 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
208 * SA + 1 ... and so on.
210 static void fsl_chan_set_src_loop_size(struct fsl_dma_chan
*fsl_chan
, int size
)
214 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
215 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32) &
216 (~FSL_DMA_MR_SAHE
), 32);
222 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
223 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32) |
224 FSL_DMA_MR_SAHE
| (__ilog2(size
) << 14),
231 * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
232 * @fsl_chan : Freescale DMA channel
233 * @size : Address loop size, 0 for disable loop
235 * The set destination address hold transfer size. The destination
236 * address hold or loop transfer size is when the DMA transfer
237 * data to destination address (TA), if the loop size is 4, the DMA will
238 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
239 * TA + 1 ... and so on.
241 static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan
*fsl_chan
, int size
)
245 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
246 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32) &
247 (~FSL_DMA_MR_DAHE
), 32);
253 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
254 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32) |
255 FSL_DMA_MR_DAHE
| (__ilog2(size
) << 16),
262 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
263 * @fsl_chan : Freescale DMA channel
264 * @size : Pause control size, 0 for disable external pause control.
265 * The maximum is 1024.
267 * The Freescale DMA channel can be controlled by the external
268 * signal DREQ#. The pause control size is how many bytes are allowed
269 * to transfer before pausing the channel, after which a new assertion
270 * of DREQ# resumes channel operation.
272 static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan
*fsl_chan
, int size
)
278 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
,
279 DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32)
280 | ((__ilog2(size
) << 24) & 0x0f000000),
282 fsl_chan
->feature
|= FSL_DMA_CHAN_PAUSE_EXT
;
284 fsl_chan
->feature
&= ~FSL_DMA_CHAN_PAUSE_EXT
;
288 * fsl_chan_toggle_ext_start - Toggle channel external start status
289 * @fsl_chan : Freescale DMA channel
290 * @enable : 0 is disabled, 1 is enabled.
292 * If enable the external start, the channel can be started by an
293 * external DMA start pin. So the dma_start() does not start the
294 * transfer immediately. The DMA channel will wait for the
295 * control pin asserted.
297 static void fsl_chan_toggle_ext_start(struct fsl_dma_chan
*fsl_chan
, int enable
)
300 fsl_chan
->feature
|= FSL_DMA_CHAN_START_EXT
;
302 fsl_chan
->feature
&= ~FSL_DMA_CHAN_START_EXT
;
305 static dma_cookie_t
fsl_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
307 struct fsl_desc_sw
*desc
= tx_to_fsl_desc(tx
);
308 struct fsl_dma_chan
*fsl_chan
= to_fsl_chan(tx
->chan
);
312 /* cookie increment and adding to ld_queue must be atomic */
313 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
315 cookie
= fsl_chan
->common
.cookie
;
319 desc
->async_tx
.cookie
= cookie
;
320 fsl_chan
->common
.cookie
= desc
->async_tx
.cookie
;
322 append_ld_queue(fsl_chan
, desc
);
323 list_splice_init(&desc
->async_tx
.tx_list
, fsl_chan
->ld_queue
.prev
);
325 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
331 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
332 * @fsl_chan : Freescale DMA channel
334 * Return - The descriptor allocated. NULL for failed.
336 static struct fsl_desc_sw
*fsl_dma_alloc_descriptor(
337 struct fsl_dma_chan
*fsl_chan
)
340 struct fsl_desc_sw
*desc_sw
;
342 desc_sw
= dma_pool_alloc(fsl_chan
->desc_pool
, GFP_ATOMIC
, &pdesc
);
344 memset(desc_sw
, 0, sizeof(struct fsl_desc_sw
));
345 dma_async_tx_descriptor_init(&desc_sw
->async_tx
,
347 desc_sw
->async_tx
.tx_submit
= fsl_dma_tx_submit
;
348 INIT_LIST_HEAD(&desc_sw
->async_tx
.tx_list
);
349 desc_sw
->async_tx
.phys
= pdesc
;
357 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
358 * @fsl_chan : Freescale DMA channel
360 * This function will create a dma pool for descriptor allocation.
362 * Return - The number of descriptors allocated.
364 static int fsl_dma_alloc_chan_resources(struct dma_chan
*chan
)
366 struct fsl_dma_chan
*fsl_chan
= to_fsl_chan(chan
);
369 /* We need the descriptor to be aligned to 32bytes
370 * for meeting FSL DMA specification requirement.
372 fsl_chan
->desc_pool
= dma_pool_create("fsl_dma_engine_desc_pool",
373 fsl_chan
->dev
, sizeof(struct fsl_desc_sw
),
375 if (!fsl_chan
->desc_pool
) {
376 dev_err(fsl_chan
->dev
, "No memory for channel %d "
377 "descriptor dma pool.\n", fsl_chan
->id
);
385 * fsl_dma_free_chan_resources - Free all resources of the channel.
386 * @fsl_chan : Freescale DMA channel
388 static void fsl_dma_free_chan_resources(struct dma_chan
*chan
)
390 struct fsl_dma_chan
*fsl_chan
= to_fsl_chan(chan
);
391 struct fsl_desc_sw
*desc
, *_desc
;
394 dev_dbg(fsl_chan
->dev
, "Free all channel resources.\n");
395 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
396 list_for_each_entry_safe(desc
, _desc
, &fsl_chan
->ld_queue
, node
) {
397 #ifdef FSL_DMA_LD_DEBUG
398 dev_dbg(fsl_chan
->dev
,
399 "LD %p will be released.\n", desc
);
401 list_del(&desc
->node
);
402 /* free link descriptor */
403 dma_pool_free(fsl_chan
->desc_pool
, desc
, desc
->async_tx
.phys
);
405 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
406 dma_pool_destroy(fsl_chan
->desc_pool
);
409 static struct dma_async_tx_descriptor
*
410 fsl_dma_prep_interrupt(struct dma_chan
*chan
)
412 struct fsl_dma_chan
*fsl_chan
;
413 struct fsl_desc_sw
*new;
418 fsl_chan
= to_fsl_chan(chan
);
420 new = fsl_dma_alloc_descriptor(fsl_chan
);
422 dev_err(fsl_chan
->dev
, "No free memory for link descriptor\n");
426 new->async_tx
.cookie
= -EBUSY
;
427 new->async_tx
.ack
= 0;
429 /* Set End-of-link to the last link descriptor of new list*/
430 set_ld_eol(fsl_chan
, new);
432 return &new->async_tx
;
435 static struct dma_async_tx_descriptor
*fsl_dma_prep_memcpy(
436 struct dma_chan
*chan
, dma_addr_t dma_dest
, dma_addr_t dma_src
,
437 size_t len
, unsigned long flags
)
439 struct fsl_dma_chan
*fsl_chan
;
440 struct fsl_desc_sw
*first
= NULL
, *prev
= NULL
, *new;
442 LIST_HEAD(link_chain
);
450 fsl_chan
= to_fsl_chan(chan
);
454 /* Allocate the link descriptor from DMA pool */
455 new = fsl_dma_alloc_descriptor(fsl_chan
);
457 dev_err(fsl_chan
->dev
,
458 "No free memory for link descriptor\n");
461 #ifdef FSL_DMA_LD_DEBUG
462 dev_dbg(fsl_chan
->dev
, "new link desc alloc %p\n", new);
465 copy
= min(len
, (size_t)FSL_DMA_BCR_MAX_CNT
);
467 set_desc_cnt(fsl_chan
, &new->hw
, copy
);
468 set_desc_src(fsl_chan
, &new->hw
, dma_src
);
469 set_desc_dest(fsl_chan
, &new->hw
, dma_dest
);
474 set_desc_next(fsl_chan
, &prev
->hw
, new->async_tx
.phys
);
476 new->async_tx
.cookie
= 0;
477 new->async_tx
.ack
= 1;
484 /* Insert the link descriptor to the LD ring */
485 list_add_tail(&new->node
, &first
->async_tx
.tx_list
);
488 new->async_tx
.ack
= 0; /* client is in control of this ack */
489 new->async_tx
.cookie
= -EBUSY
;
491 /* Set End-of-link to the last link descriptor of new list*/
492 set_ld_eol(fsl_chan
, new);
494 return first
? &first
->async_tx
: NULL
;
498 * fsl_dma_update_completed_cookie - Update the completed cookie.
499 * @fsl_chan : Freescale DMA channel
501 static void fsl_dma_update_completed_cookie(struct fsl_dma_chan
*fsl_chan
)
503 struct fsl_desc_sw
*cur_desc
, *desc
;
506 ld_phy
= get_cdar(fsl_chan
) & FSL_DMA_NLDA_MASK
;
510 list_for_each_entry(desc
, &fsl_chan
->ld_queue
, node
)
511 if (desc
->async_tx
.phys
== ld_phy
) {
516 if (cur_desc
&& cur_desc
->async_tx
.cookie
) {
517 if (dma_is_idle(fsl_chan
))
518 fsl_chan
->completed_cookie
=
519 cur_desc
->async_tx
.cookie
;
521 fsl_chan
->completed_cookie
=
522 cur_desc
->async_tx
.cookie
- 1;
528 * fsl_chan_ld_cleanup - Clean up link descriptors
529 * @fsl_chan : Freescale DMA channel
531 * This function clean up the ld_queue of DMA channel.
532 * If 'in_intr' is set, the function will move the link descriptor to
533 * the recycle list. Otherwise, free it directly.
535 static void fsl_chan_ld_cleanup(struct fsl_dma_chan
*fsl_chan
)
537 struct fsl_desc_sw
*desc
, *_desc
;
540 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
542 dev_dbg(fsl_chan
->dev
, "chan completed_cookie = %d\n",
543 fsl_chan
->completed_cookie
);
544 list_for_each_entry_safe(desc
, _desc
, &fsl_chan
->ld_queue
, node
) {
545 dma_async_tx_callback callback
;
546 void *callback_param
;
548 if (dma_async_is_complete(desc
->async_tx
.cookie
,
549 fsl_chan
->completed_cookie
, fsl_chan
->common
.cookie
)
553 callback
= desc
->async_tx
.callback
;
554 callback_param
= desc
->async_tx
.callback_param
;
556 /* Remove from ld_queue list */
557 list_del(&desc
->node
);
559 dev_dbg(fsl_chan
->dev
, "link descriptor %p will be recycle.\n",
561 dma_pool_free(fsl_chan
->desc_pool
, desc
, desc
->async_tx
.phys
);
563 /* Run the link descriptor callback function */
565 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
566 dev_dbg(fsl_chan
->dev
, "link descriptor %p callback\n",
568 callback(callback_param
);
569 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
572 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
576 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
577 * @fsl_chan : Freescale DMA channel
579 static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan
*fsl_chan
)
581 struct list_head
*ld_node
;
582 dma_addr_t next_dest_addr
;
585 if (!dma_is_idle(fsl_chan
))
590 /* If there are some link descriptors
591 * not transfered in queue. We need to start it.
593 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
595 /* Find the first un-transfer desciptor */
596 for (ld_node
= fsl_chan
->ld_queue
.next
;
597 (ld_node
!= &fsl_chan
->ld_queue
)
598 && (dma_async_is_complete(
599 to_fsl_desc(ld_node
)->async_tx
.cookie
,
600 fsl_chan
->completed_cookie
,
601 fsl_chan
->common
.cookie
) == DMA_SUCCESS
);
602 ld_node
= ld_node
->next
);
604 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
606 if (ld_node
!= &fsl_chan
->ld_queue
) {
607 /* Get the ld start address from ld_queue */
608 next_dest_addr
= to_fsl_desc(ld_node
)->async_tx
.phys
;
609 dev_dbg(fsl_chan
->dev
, "xfer LDs staring from %p\n",
610 (void *)next_dest_addr
);
611 set_cdar(fsl_chan
, next_dest_addr
);
614 set_cdar(fsl_chan
, 0);
615 set_ndar(fsl_chan
, 0);
620 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
621 * @fsl_chan : Freescale DMA channel
623 static void fsl_dma_memcpy_issue_pending(struct dma_chan
*chan
)
625 struct fsl_dma_chan
*fsl_chan
= to_fsl_chan(chan
);
627 #ifdef FSL_DMA_LD_DEBUG
628 struct fsl_desc_sw
*ld
;
631 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
632 if (list_empty(&fsl_chan
->ld_queue
)) {
633 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
637 dev_dbg(fsl_chan
->dev
, "--memcpy issue--\n");
638 list_for_each_entry(ld
, &fsl_chan
->ld_queue
, node
) {
640 dev_dbg(fsl_chan
->dev
, "Ch %d, LD %08x\n",
641 fsl_chan
->id
, ld
->async_tx
.phys
);
642 for (i
= 0; i
< 8; i
++)
643 dev_dbg(fsl_chan
->dev
, "LD offset %d: %08x\n",
644 i
, *(((u32
*)&ld
->hw
) + i
));
646 dev_dbg(fsl_chan
->dev
, "----------------\n");
647 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
650 fsl_chan_xfer_ld_queue(fsl_chan
);
653 static void fsl_dma_dependency_added(struct dma_chan
*chan
)
655 struct fsl_dma_chan
*fsl_chan
= to_fsl_chan(chan
);
657 fsl_chan_ld_cleanup(fsl_chan
);
661 * fsl_dma_is_complete - Determine the DMA status
662 * @fsl_chan : Freescale DMA channel
664 static enum dma_status
fsl_dma_is_complete(struct dma_chan
*chan
,
669 struct fsl_dma_chan
*fsl_chan
= to_fsl_chan(chan
);
670 dma_cookie_t last_used
;
671 dma_cookie_t last_complete
;
673 fsl_chan_ld_cleanup(fsl_chan
);
675 last_used
= chan
->cookie
;
676 last_complete
= fsl_chan
->completed_cookie
;
679 *done
= last_complete
;
684 return dma_async_is_complete(cookie
, last_complete
, last_used
);
687 static irqreturn_t
fsl_dma_chan_do_interrupt(int irq
, void *data
)
689 struct fsl_dma_chan
*fsl_chan
= (struct fsl_dma_chan
*)data
;
692 stat
= get_sr(fsl_chan
);
693 dev_dbg(fsl_chan
->dev
, "event: channel %d, stat = 0x%x\n",
695 set_sr(fsl_chan
, stat
); /* Clear the event register */
697 stat
&= ~(FSL_DMA_SR_CB
| FSL_DMA_SR_CH
);
701 if (stat
& FSL_DMA_SR_TE
)
702 dev_err(fsl_chan
->dev
, "Transfer Error!\n");
704 /* If the link descriptor segment transfer finishes,
705 * we will recycle the used descriptor.
707 if (stat
& FSL_DMA_SR_EOSI
) {
708 dev_dbg(fsl_chan
->dev
, "event: End-of-segments INT\n");
709 dev_dbg(fsl_chan
->dev
, "event: clndar %p, nlndar %p\n",
710 (void *)get_cdar(fsl_chan
), (void *)get_ndar(fsl_chan
));
711 stat
&= ~FSL_DMA_SR_EOSI
;
712 fsl_dma_update_completed_cookie(fsl_chan
);
715 /* If it current transfer is the end-of-transfer,
716 * we should clear the Channel Start bit for
717 * prepare next transfer.
719 if (stat
& (FSL_DMA_SR_EOLNI
| FSL_DMA_SR_EOCDI
)) {
720 dev_dbg(fsl_chan
->dev
, "event: End-of-link INT\n");
721 stat
&= ~FSL_DMA_SR_EOLNI
;
722 fsl_chan_xfer_ld_queue(fsl_chan
);
726 dev_dbg(fsl_chan
->dev
, "event: unhandled sr 0x%02x\n",
729 dev_dbg(fsl_chan
->dev
, "event: Exit\n");
730 tasklet_schedule(&fsl_chan
->tasklet
);
734 static irqreturn_t
fsl_dma_do_interrupt(int irq
, void *data
)
736 struct fsl_dma_device
*fdev
= (struct fsl_dma_device
*)data
;
740 gsr
= (fdev
->feature
& FSL_DMA_BIG_ENDIAN
) ? in_be32(fdev
->reg_base
)
741 : in_le32(fdev
->reg_base
);
742 ch_nr
= (32 - ffs(gsr
)) / 8;
744 return fdev
->chan
[ch_nr
] ? fsl_dma_chan_do_interrupt(irq
,
745 fdev
->chan
[ch_nr
]) : IRQ_NONE
;
748 static void dma_do_tasklet(unsigned long data
)
750 struct fsl_dma_chan
*fsl_chan
= (struct fsl_dma_chan
*)data
;
751 fsl_chan_ld_cleanup(fsl_chan
);
754 #ifdef FSL_DMA_CALLBACKTEST
755 static void fsl_dma_callback_test(struct fsl_dma_chan
*fsl_chan
)
758 dev_info(fsl_chan
->dev
, "selftest: callback is ok!\n");
762 #ifdef CONFIG_FSL_DMA_SELFTEST
763 static int fsl_dma_self_test(struct fsl_dma_chan
*fsl_chan
)
765 struct dma_chan
*chan
;
767 dma_addr_t dma_dest
, dma_src
;
772 struct dma_async_tx_descriptor
*tx1
, *tx2
, *tx3
;
776 src
= kmalloc(test_size
* 2, GFP_KERNEL
);
778 dev_err(fsl_chan
->dev
,
779 "selftest: Cannot alloc memory for test!\n");
784 dest
= src
+ test_size
;
786 for (i
= 0; i
< test_size
; i
++)
789 chan
= &fsl_chan
->common
;
791 if (fsl_dma_alloc_chan_resources(chan
) < 1) {
792 dev_err(fsl_chan
->dev
,
793 "selftest: Cannot alloc resources for DMA\n");
799 dma_src
= dma_map_single(fsl_chan
->dev
, src
, test_size
/ 2,
801 dma_dest
= dma_map_single(fsl_chan
->dev
, dest
, test_size
/ 2,
803 tx1
= fsl_dma_prep_memcpy(chan
, dma_dest
, dma_src
, test_size
/ 2, 0);
806 cookie
= fsl_dma_tx_submit(tx1
);
807 fsl_dma_memcpy_issue_pending(chan
);
810 if (fsl_dma_is_complete(chan
, cookie
, NULL
, NULL
) != DMA_SUCCESS
) {
811 dev_err(fsl_chan
->dev
, "selftest: Time out!\n");
816 /* Test free and re-alloc channel resources */
817 fsl_dma_free_chan_resources(chan
);
819 if (fsl_dma_alloc_chan_resources(chan
) < 1) {
820 dev_err(fsl_chan
->dev
,
821 "selftest: Cannot alloc resources for DMA\n");
829 dma_src
= dma_map_single(fsl_chan
->dev
, src
+ test_size
/ 2,
830 test_size
/ 4, DMA_TO_DEVICE
);
831 dma_dest
= dma_map_single(fsl_chan
->dev
, dest
+ test_size
/ 2,
832 test_size
/ 4, DMA_FROM_DEVICE
);
833 tx2
= fsl_dma_prep_memcpy(chan
, dma_dest
, dma_src
, test_size
/ 4, 0);
837 dma_src
= dma_map_single(fsl_chan
->dev
, src
+ test_size
* 3 / 4,
838 test_size
/ 4, DMA_TO_DEVICE
);
839 dma_dest
= dma_map_single(fsl_chan
->dev
, dest
+ test_size
* 3 / 4,
840 test_size
/ 4, DMA_FROM_DEVICE
);
841 tx3
= fsl_dma_prep_memcpy(chan
, dma_dest
, dma_src
, test_size
/ 4, 0);
844 /* Test exchanging the prepared tx sort */
845 cookie
= fsl_dma_tx_submit(tx3
);
846 cookie
= fsl_dma_tx_submit(tx2
);
848 #ifdef FSL_DMA_CALLBACKTEST
849 if (dma_has_cap(DMA_INTERRUPT
, ((struct fsl_dma_device
*)
850 dev_get_drvdata(fsl_chan
->dev
->parent
))->common
.cap_mask
)) {
851 tx3
->callback
= fsl_dma_callback_test
;
852 tx3
->callback_param
= fsl_chan
;
855 fsl_dma_memcpy_issue_pending(chan
);
858 if (fsl_dma_is_complete(chan
, cookie
, NULL
, NULL
) != DMA_SUCCESS
) {
859 dev_err(fsl_chan
->dev
, "selftest: Time out!\n");
864 err
= memcmp(src
, dest
, test_size
);
866 for (i
= 0; (*(src
+ i
) == *(dest
+ i
)) && (i
< test_size
);
868 dev_err(fsl_chan
->dev
, "selftest: Test failed, data %d/%ld is "
869 "error! src 0x%x, dest 0x%x\n",
870 i
, (long)test_size
, *(src
+ i
), *(dest
+ i
));
874 fsl_dma_free_chan_resources(chan
);
881 static int __devinit
of_fsl_dma_chan_probe(struct of_device
*dev
,
882 const struct of_device_id
*match
)
884 struct fsl_dma_device
*fdev
;
885 struct fsl_dma_chan
*new_fsl_chan
;
888 fdev
= dev_get_drvdata(dev
->dev
.parent
);
892 new_fsl_chan
= kzalloc(sizeof(struct fsl_dma_chan
), GFP_KERNEL
);
894 dev_err(&dev
->dev
, "No free memory for allocating "
900 /* get dma channel register base */
901 err
= of_address_to_resource(dev
->node
, 0, &new_fsl_chan
->reg
);
903 dev_err(&dev
->dev
, "Can't get %s property 'reg'\n",
904 dev
->node
->full_name
);
908 new_fsl_chan
->feature
= *(u32
*)match
->data
;
911 fdev
->feature
= new_fsl_chan
->feature
;
913 /* If the DMA device's feature is different than its channels',
916 WARN_ON(fdev
->feature
!= new_fsl_chan
->feature
);
918 new_fsl_chan
->dev
= &dev
->dev
;
919 new_fsl_chan
->reg_base
= ioremap(new_fsl_chan
->reg
.start
,
920 new_fsl_chan
->reg
.end
- new_fsl_chan
->reg
.start
+ 1);
922 new_fsl_chan
->id
= ((new_fsl_chan
->reg
.start
- 0x100) & 0xfff) >> 7;
923 if (new_fsl_chan
->id
> FSL_DMA_MAX_CHANS_PER_DEVICE
) {
924 dev_err(&dev
->dev
, "There is no %d channel!\n",
929 fdev
->chan
[new_fsl_chan
->id
] = new_fsl_chan
;
930 tasklet_init(&new_fsl_chan
->tasklet
, dma_do_tasklet
,
931 (unsigned long)new_fsl_chan
);
933 /* Init the channel */
934 dma_init(new_fsl_chan
);
936 /* Clear cdar registers */
937 set_cdar(new_fsl_chan
, 0);
939 switch (new_fsl_chan
->feature
& FSL_DMA_IP_MASK
) {
940 case FSL_DMA_IP_85XX
:
941 new_fsl_chan
->toggle_ext_start
= fsl_chan_toggle_ext_start
;
942 new_fsl_chan
->toggle_ext_pause
= fsl_chan_toggle_ext_pause
;
943 case FSL_DMA_IP_83XX
:
944 new_fsl_chan
->set_src_loop_size
= fsl_chan_set_src_loop_size
;
945 new_fsl_chan
->set_dest_loop_size
= fsl_chan_set_dest_loop_size
;
948 spin_lock_init(&new_fsl_chan
->desc_lock
);
949 INIT_LIST_HEAD(&new_fsl_chan
->ld_queue
);
951 new_fsl_chan
->common
.device
= &fdev
->common
;
953 /* Add the channel to DMA device channel list */
954 list_add_tail(&new_fsl_chan
->common
.device_node
,
955 &fdev
->common
.channels
);
956 fdev
->common
.chancnt
++;
958 new_fsl_chan
->irq
= irq_of_parse_and_map(dev
->node
, 0);
959 if (new_fsl_chan
->irq
!= NO_IRQ
) {
960 err
= request_irq(new_fsl_chan
->irq
,
961 &fsl_dma_chan_do_interrupt
, IRQF_SHARED
,
962 "fsldma-channel", new_fsl_chan
);
964 dev_err(&dev
->dev
, "DMA channel %s request_irq error "
965 "with return %d\n", dev
->node
->full_name
, err
);
970 #ifdef CONFIG_FSL_DMA_SELFTEST
971 err
= fsl_dma_self_test(new_fsl_chan
);
976 dev_info(&dev
->dev
, "#%d (%s), irq %d\n", new_fsl_chan
->id
,
977 match
->compatible
, new_fsl_chan
->irq
);
981 dma_halt(new_fsl_chan
);
982 iounmap(new_fsl_chan
->reg_base
);
983 free_irq(new_fsl_chan
->irq
, new_fsl_chan
);
984 list_del(&new_fsl_chan
->common
.device_node
);
989 const u32 mpc8540_dma_ip_feature
= FSL_DMA_IP_85XX
| FSL_DMA_BIG_ENDIAN
;
990 const u32 mpc8349_dma_ip_feature
= FSL_DMA_IP_83XX
| FSL_DMA_LITTLE_ENDIAN
;
992 static struct of_device_id of_fsl_dma_chan_ids
[] = {
994 .compatible
= "fsl,mpc8540-dma-channel",
995 .data
= (void *)&mpc8540_dma_ip_feature
,
998 .compatible
= "fsl,mpc8349-dma-channel",
999 .data
= (void *)&mpc8349_dma_ip_feature
,
1004 static struct of_platform_driver of_fsl_dma_chan_driver
= {
1005 .name
= "of-fsl-dma-channel",
1006 .match_table
= of_fsl_dma_chan_ids
,
1007 .probe
= of_fsl_dma_chan_probe
,
1010 static __init
int of_fsl_dma_chan_init(void)
1012 return of_register_platform_driver(&of_fsl_dma_chan_driver
);
1015 static int __devinit
of_fsl_dma_probe(struct of_device
*dev
,
1016 const struct of_device_id
*match
)
1020 struct fsl_dma_device
*fdev
;
1022 fdev
= kzalloc(sizeof(struct fsl_dma_device
), GFP_KERNEL
);
1024 dev_err(&dev
->dev
, "No enough memory for 'priv'\n");
1028 fdev
->dev
= &dev
->dev
;
1029 INIT_LIST_HEAD(&fdev
->common
.channels
);
1031 /* get DMA controller register base */
1032 err
= of_address_to_resource(dev
->node
, 0, &fdev
->reg
);
1034 dev_err(&dev
->dev
, "Can't get %s property 'reg'\n",
1035 dev
->node
->full_name
);
1039 dev_info(&dev
->dev
, "Probe the Freescale DMA driver for %s "
1040 "controller at %p...\n",
1041 match
->compatible
, (void *)fdev
->reg
.start
);
1042 fdev
->reg_base
= ioremap(fdev
->reg
.start
, fdev
->reg
.end
1043 - fdev
->reg
.start
+ 1);
1045 dma_cap_set(DMA_MEMCPY
, fdev
->common
.cap_mask
);
1046 dma_cap_set(DMA_INTERRUPT
, fdev
->common
.cap_mask
);
1047 fdev
->common
.device_alloc_chan_resources
= fsl_dma_alloc_chan_resources
;
1048 fdev
->common
.device_free_chan_resources
= fsl_dma_free_chan_resources
;
1049 fdev
->common
.device_prep_dma_interrupt
= fsl_dma_prep_interrupt
;
1050 fdev
->common
.device_prep_dma_memcpy
= fsl_dma_prep_memcpy
;
1051 fdev
->common
.device_is_tx_complete
= fsl_dma_is_complete
;
1052 fdev
->common
.device_issue_pending
= fsl_dma_memcpy_issue_pending
;
1053 fdev
->common
.device_dependency_added
= fsl_dma_dependency_added
;
1054 fdev
->common
.dev
= &dev
->dev
;
1056 irq
= irq_of_parse_and_map(dev
->node
, 0);
1057 if (irq
!= NO_IRQ
) {
1058 err
= request_irq(irq
, &fsl_dma_do_interrupt
, IRQF_SHARED
,
1059 "fsldma-device", fdev
);
1061 dev_err(&dev
->dev
, "DMA device request_irq error "
1062 "with return %d\n", err
);
1067 dev_set_drvdata(&(dev
->dev
), fdev
);
1068 of_platform_bus_probe(dev
->node
, of_fsl_dma_chan_ids
, &dev
->dev
);
1070 dma_async_device_register(&fdev
->common
);
1074 iounmap(fdev
->reg_base
);
1079 static struct of_device_id of_fsl_dma_ids
[] = {
1080 { .compatible
= "fsl,mpc8540-dma", },
1081 { .compatible
= "fsl,mpc8349-dma", },
1085 static struct of_platform_driver of_fsl_dma_driver
= {
1086 .name
= "of-fsl-dma",
1087 .match_table
= of_fsl_dma_ids
,
1088 .probe
= of_fsl_dma_probe
,
1091 static __init
int of_fsl_dma_init(void)
1093 return of_register_platform_driver(&of_fsl_dma_driver
);
1096 subsys_initcall(of_fsl_dma_chan_init
);
1097 subsys_initcall(of_fsl_dma_init
);