2 * offload engine driver for the Intel Xscale series of i/o processors
3 * Copyright © 2006, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * This driver supports the asynchrounous DMA copy and RAID engines available
22 * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/async_tx.h>
28 #include <linux/delay.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/spinlock.h>
31 #include <linux/interrupt.h>
32 #include <linux/platform_device.h>
33 #include <linux/memory.h>
34 #include <linux/ioport.h>
36 #include <asm/arch/adma.h>
38 #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
39 #define to_iop_adma_device(dev) \
40 container_of(dev, struct iop_adma_device, common)
41 #define tx_to_iop_adma_slot(tx) \
42 container_of(tx, struct iop_adma_desc_slot, async_tx)
45 * iop_adma_free_slots - flags descriptor slots for reuse
47 * Caller must hold &iop_chan->lock while calling this function
49 static void iop_adma_free_slots(struct iop_adma_desc_slot
*slot
)
51 int stride
= slot
->slots_per_op
;
54 slot
->slots_per_op
= 0;
55 slot
= list_entry(slot
->slot_node
.next
,
56 struct iop_adma_desc_slot
,
62 iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot
*desc
,
63 struct iop_adma_chan
*iop_chan
, dma_cookie_t cookie
)
65 BUG_ON(desc
->async_tx
.cookie
< 0);
66 spin_lock_bh(&desc
->async_tx
.lock
);
67 if (desc
->async_tx
.cookie
> 0) {
68 cookie
= desc
->async_tx
.cookie
;
69 desc
->async_tx
.cookie
= 0;
71 /* call the callback (must not sleep or submit new
72 * operations to this channel)
74 if (desc
->async_tx
.callback
)
75 desc
->async_tx
.callback(
76 desc
->async_tx
.callback_param
);
78 /* unmap dma addresses
79 * (unmap_single vs unmap_page?)
81 if (desc
->group_head
&& desc
->unmap_len
) {
82 struct iop_adma_desc_slot
*unmap
= desc
->group_head
;
84 &iop_chan
->device
->pdev
->dev
;
85 u32 len
= unmap
->unmap_len
;
86 u32 src_cnt
= unmap
->unmap_src_cnt
;
87 dma_addr_t addr
= iop_desc_get_dest_addr(unmap
,
90 dma_unmap_page(dev
, addr
, len
, DMA_FROM_DEVICE
);
92 addr
= iop_desc_get_src_addr(unmap
,
95 dma_unmap_page(dev
, addr
, len
,
98 desc
->group_head
= NULL
;
102 /* run dependent operations */
103 async_tx_run_dependencies(&desc
->async_tx
);
104 spin_unlock_bh(&desc
->async_tx
.lock
);
110 iop_adma_clean_slot(struct iop_adma_desc_slot
*desc
,
111 struct iop_adma_chan
*iop_chan
)
113 /* the client is allowed to attach dependent operations
116 if (!desc
->async_tx
.ack
)
119 /* leave the last descriptor in the chain
120 * so we can append to it
122 if (desc
->chain_node
.next
== &iop_chan
->chain
)
125 dev_dbg(iop_chan
->device
->common
.dev
,
126 "\tfree slot: %d slots_per_op: %d\n",
127 desc
->idx
, desc
->slots_per_op
);
129 list_del(&desc
->chain_node
);
130 iop_adma_free_slots(desc
);
135 static void __iop_adma_slot_cleanup(struct iop_adma_chan
*iop_chan
)
137 struct iop_adma_desc_slot
*iter
, *_iter
, *grp_start
= NULL
;
138 dma_cookie_t cookie
= 0;
139 u32 current_desc
= iop_chan_get_current_descriptor(iop_chan
);
140 int busy
= iop_chan_is_busy(iop_chan
);
141 int seen_current
= 0, slot_cnt
= 0, slots_per_op
= 0;
143 dev_dbg(iop_chan
->device
->common
.dev
, "%s\n", __FUNCTION__
);
144 /* free completed slots from the chain starting with
145 * the oldest descriptor
147 list_for_each_entry_safe(iter
, _iter
, &iop_chan
->chain
,
149 pr_debug("\tcookie: %d slot: %d busy: %d "
150 "this_desc: %#x next_desc: %#x ack: %d\n",
151 iter
->async_tx
.cookie
, iter
->idx
, busy
,
152 iter
->async_tx
.phys
, iop_desc_get_next_desc(iter
),
155 prefetch(&_iter
->async_tx
);
157 /* do not advance past the current descriptor loaded into the
158 * hardware channel, subsequent descriptors are either in
159 * process or have not been submitted
164 /* stop the search if we reach the current descriptor and the
165 * channel is busy, or if it appears that the current descriptor
166 * needs to be re-read (i.e. has been appended to)
168 if (iter
->async_tx
.phys
== current_desc
) {
169 BUG_ON(seen_current
++);
170 if (busy
|| iop_desc_get_next_desc(iter
))
174 /* detect the start of a group transaction */
175 if (!slot_cnt
&& !slots_per_op
) {
176 slot_cnt
= iter
->slot_cnt
;
177 slots_per_op
= iter
->slots_per_op
;
178 if (slot_cnt
<= slots_per_op
) {
185 pr_debug("\tgroup++\n");
188 slot_cnt
-= slots_per_op
;
191 /* all the members of a group are complete */
192 if (slots_per_op
!= 0 && slot_cnt
== 0) {
193 struct iop_adma_desc_slot
*grp_iter
, *_grp_iter
;
194 int end_of_chain
= 0;
195 pr_debug("\tgroup end\n");
197 /* collect the total results */
198 if (grp_start
->xor_check_result
) {
199 u32 zero_sum_result
= 0;
200 slot_cnt
= grp_start
->slot_cnt
;
201 grp_iter
= grp_start
;
203 list_for_each_entry_from(grp_iter
,
204 &iop_chan
->chain
, chain_node
) {
206 iop_desc_get_zero_result(grp_iter
);
207 pr_debug("\titer%d result: %d\n",
208 grp_iter
->idx
, zero_sum_result
);
209 slot_cnt
-= slots_per_op
;
213 pr_debug("\tgrp_start->xor_check_result: %p\n",
214 grp_start
->xor_check_result
);
215 *grp_start
->xor_check_result
= zero_sum_result
;
218 /* clean up the group */
219 slot_cnt
= grp_start
->slot_cnt
;
220 grp_iter
= grp_start
;
221 list_for_each_entry_safe_from(grp_iter
, _grp_iter
,
222 &iop_chan
->chain
, chain_node
) {
223 cookie
= iop_adma_run_tx_complete_actions(
224 grp_iter
, iop_chan
, cookie
);
226 slot_cnt
-= slots_per_op
;
227 end_of_chain
= iop_adma_clean_slot(grp_iter
,
230 if (slot_cnt
== 0 || end_of_chain
)
234 /* the group should be complete at this point */
243 } else if (slots_per_op
) /* wait for group completion */
246 /* write back zero sum results (single descriptor case) */
247 if (iter
->xor_check_result
&& iter
->async_tx
.cookie
)
248 *iter
->xor_check_result
=
249 iop_desc_get_zero_result(iter
);
251 cookie
= iop_adma_run_tx_complete_actions(
252 iter
, iop_chan
, cookie
);
254 if (iop_adma_clean_slot(iter
, iop_chan
))
258 BUG_ON(!seen_current
);
260 iop_chan_idle(busy
, iop_chan
);
263 iop_chan
->completed_cookie
= cookie
;
264 pr_debug("\tcompleted cookie %d\n", cookie
);
269 iop_adma_slot_cleanup(struct iop_adma_chan
*iop_chan
)
271 spin_lock_bh(&iop_chan
->lock
);
272 __iop_adma_slot_cleanup(iop_chan
);
273 spin_unlock_bh(&iop_chan
->lock
);
276 static void iop_adma_tasklet(unsigned long data
)
278 struct iop_adma_chan
*chan
= (struct iop_adma_chan
*) data
;
279 __iop_adma_slot_cleanup(chan
);
282 static struct iop_adma_desc_slot
*
283 iop_adma_alloc_slots(struct iop_adma_chan
*iop_chan
, int num_slots
,
286 struct iop_adma_desc_slot
*iter
, *_iter
, *alloc_start
= NULL
;
288 int slots_found
, retry
= 0;
290 /* start search from the last allocated descrtiptor
291 * if a contiguous allocation can not be found start searching
292 * from the beginning of the list
297 iter
= iop_chan
->last_used
;
299 iter
= list_entry(&iop_chan
->all_slots
,
300 struct iop_adma_desc_slot
,
303 list_for_each_entry_safe_continue(
304 iter
, _iter
, &iop_chan
->all_slots
, slot_node
) {
306 prefetch(&_iter
->async_tx
);
307 if (iter
->slots_per_op
) {
308 /* give up after finding the first busy slot
309 * on the second pass through the list
318 /* start the allocation if the slot is correctly aligned */
319 if (!slots_found
++) {
320 if (iop_desc_is_aligned(iter
, slots_per_op
))
328 if (slots_found
== num_slots
) {
329 struct iop_adma_desc_slot
*alloc_tail
= NULL
;
330 struct iop_adma_desc_slot
*last_used
= NULL
;
334 dev_dbg(iop_chan
->device
->common
.dev
,
335 "allocated slot: %d "
336 "(desc %p phys: %#x) slots_per_op %d\n",
337 iter
->idx
, iter
->hw_desc
,
338 iter
->async_tx
.phys
, slots_per_op
);
340 /* pre-ack all but the last descriptor */
341 if (num_slots
!= slots_per_op
)
342 iter
->async_tx
.ack
= 1;
344 iter
->async_tx
.ack
= 0;
346 list_add_tail(&iter
->chain_node
, &chain
);
348 iter
->async_tx
.cookie
= 0;
349 iter
->slot_cnt
= num_slots
;
350 iter
->xor_check_result
= NULL
;
351 for (i
= 0; i
< slots_per_op
; i
++) {
352 iter
->slots_per_op
= slots_per_op
- i
;
354 iter
= list_entry(iter
->slot_node
.next
,
355 struct iop_adma_desc_slot
,
358 num_slots
-= slots_per_op
;
360 alloc_tail
->group_head
= alloc_start
;
361 alloc_tail
->async_tx
.cookie
= -EBUSY
;
362 list_splice(&chain
, &alloc_tail
->async_tx
.tx_list
);
363 iop_chan
->last_used
= last_used
;
364 iop_desc_clear_next_desc(alloc_start
);
365 iop_desc_clear_next_desc(alloc_tail
);
372 /* try to free some slots if the allocation fails */
373 tasklet_schedule(&iop_chan
->irq_tasklet
);
379 iop_desc_assign_cookie(struct iop_adma_chan
*iop_chan
,
380 struct iop_adma_desc_slot
*desc
)
382 dma_cookie_t cookie
= iop_chan
->common
.cookie
;
386 iop_chan
->common
.cookie
= desc
->async_tx
.cookie
= cookie
;
390 static void iop_adma_check_threshold(struct iop_adma_chan
*iop_chan
)
392 dev_dbg(iop_chan
->device
->common
.dev
, "pending: %d\n",
395 if (iop_chan
->pending
>= IOP_ADMA_THRESHOLD
) {
396 iop_chan
->pending
= 0;
397 iop_chan_append(iop_chan
);
402 iop_adma_tx_submit(struct dma_async_tx_descriptor
*tx
)
404 struct iop_adma_desc_slot
*sw_desc
= tx_to_iop_adma_slot(tx
);
405 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(tx
->chan
);
406 struct iop_adma_desc_slot
*grp_start
, *old_chain_tail
;
411 grp_start
= sw_desc
->group_head
;
412 slot_cnt
= grp_start
->slot_cnt
;
413 slots_per_op
= grp_start
->slots_per_op
;
415 spin_lock_bh(&iop_chan
->lock
);
416 cookie
= iop_desc_assign_cookie(iop_chan
, sw_desc
);
418 old_chain_tail
= list_entry(iop_chan
->chain
.prev
,
419 struct iop_adma_desc_slot
, chain_node
);
420 list_splice_init(&sw_desc
->async_tx
.tx_list
,
421 &old_chain_tail
->chain_node
);
423 /* fix up the hardware chain */
424 iop_desc_set_next_desc(old_chain_tail
, grp_start
->async_tx
.phys
);
426 /* 1/ don't add pre-chained descriptors
427 * 2/ dummy read to flush next_desc write
429 BUG_ON(iop_desc_get_next_desc(sw_desc
));
431 /* increment the pending count by the number of slots
432 * memcpy operations have a 1:1 (slot:operation) relation
433 * other operations are heavier and will pop the threshold
436 iop_chan
->pending
+= slot_cnt
;
437 iop_adma_check_threshold(iop_chan
);
438 spin_unlock_bh(&iop_chan
->lock
);
440 dev_dbg(iop_chan
->device
->common
.dev
, "%s cookie: %d slot: %d\n",
441 __FUNCTION__
, sw_desc
->async_tx
.cookie
, sw_desc
->idx
);
446 static void iop_chan_start_null_memcpy(struct iop_adma_chan
*iop_chan
);
447 static void iop_chan_start_null_xor(struct iop_adma_chan
*iop_chan
);
449 /* returns the number of allocated descriptors */
450 static int iop_adma_alloc_chan_resources(struct dma_chan
*chan
)
454 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
455 struct iop_adma_desc_slot
*slot
= NULL
;
456 int init
= iop_chan
->slots_allocated
? 0 : 1;
457 struct iop_adma_platform_data
*plat_data
=
458 iop_chan
->device
->pdev
->dev
.platform_data
;
459 int num_descs_in_pool
= plat_data
->pool_size
/IOP_ADMA_SLOT_SIZE
;
461 /* Allocate descriptor slots */
463 idx
= iop_chan
->slots_allocated
;
464 if (idx
== num_descs_in_pool
)
467 slot
= kzalloc(sizeof(*slot
), GFP_KERNEL
);
469 printk(KERN_INFO
"IOP ADMA Channel only initialized"
470 " %d descriptor slots", idx
);
473 hw_desc
= (char *) iop_chan
->device
->dma_desc_pool_virt
;
474 slot
->hw_desc
= (void *) &hw_desc
[idx
* IOP_ADMA_SLOT_SIZE
];
476 dma_async_tx_descriptor_init(&slot
->async_tx
, chan
);
477 slot
->async_tx
.tx_submit
= iop_adma_tx_submit
;
478 INIT_LIST_HEAD(&slot
->chain_node
);
479 INIT_LIST_HEAD(&slot
->slot_node
);
480 INIT_LIST_HEAD(&slot
->async_tx
.tx_list
);
481 hw_desc
= (char *) iop_chan
->device
->dma_desc_pool
;
482 slot
->async_tx
.phys
=
483 (dma_addr_t
) &hw_desc
[idx
* IOP_ADMA_SLOT_SIZE
];
486 spin_lock_bh(&iop_chan
->lock
);
487 iop_chan
->slots_allocated
++;
488 list_add_tail(&slot
->slot_node
, &iop_chan
->all_slots
);
489 spin_unlock_bh(&iop_chan
->lock
);
490 } while (iop_chan
->slots_allocated
< num_descs_in_pool
);
492 if (idx
&& !iop_chan
->last_used
)
493 iop_chan
->last_used
= list_entry(iop_chan
->all_slots
.next
,
494 struct iop_adma_desc_slot
,
497 dev_dbg(iop_chan
->device
->common
.dev
,
498 "allocated %d descriptor slots last_used: %p\n",
499 iop_chan
->slots_allocated
, iop_chan
->last_used
);
501 /* initialize the channel and the chain with a null operation */
503 if (dma_has_cap(DMA_MEMCPY
,
504 iop_chan
->device
->common
.cap_mask
))
505 iop_chan_start_null_memcpy(iop_chan
);
506 else if (dma_has_cap(DMA_XOR
,
507 iop_chan
->device
->common
.cap_mask
))
508 iop_chan_start_null_xor(iop_chan
);
513 return (idx
> 0) ? idx
: -ENOMEM
;
516 static struct dma_async_tx_descriptor
*
517 iop_adma_prep_dma_interrupt(struct dma_chan
*chan
)
519 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
520 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
521 int slot_cnt
, slots_per_op
;
523 dev_dbg(iop_chan
->device
->common
.dev
, "%s\n", __FUNCTION__
);
525 spin_lock_bh(&iop_chan
->lock
);
526 slot_cnt
= iop_chan_interrupt_slot_count(&slots_per_op
, iop_chan
);
527 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
529 grp_start
= sw_desc
->group_head
;
530 iop_desc_init_interrupt(grp_start
, iop_chan
);
531 grp_start
->unmap_len
= 0;
533 spin_unlock_bh(&iop_chan
->lock
);
535 return sw_desc
? &sw_desc
->async_tx
: NULL
;
538 static struct dma_async_tx_descriptor
*
539 iop_adma_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dma_dest
,
540 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
542 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
543 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
544 int slot_cnt
, slots_per_op
;
548 BUG_ON(unlikely(len
> IOP_ADMA_MAX_BYTE_COUNT
));
550 dev_dbg(iop_chan
->device
->common
.dev
, "%s len: %u\n",
553 spin_lock_bh(&iop_chan
->lock
);
554 slot_cnt
= iop_chan_memcpy_slot_count(len
, &slots_per_op
);
555 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
557 grp_start
= sw_desc
->group_head
;
558 iop_desc_init_memcpy(grp_start
, flags
);
559 iop_desc_set_byte_count(grp_start
, iop_chan
, len
);
560 iop_desc_set_dest_addr(grp_start
, iop_chan
, dma_dest
);
561 iop_desc_set_memcpy_src_addr(grp_start
, dma_src
);
562 sw_desc
->unmap_src_cnt
= 1;
563 sw_desc
->unmap_len
= len
;
565 spin_unlock_bh(&iop_chan
->lock
);
567 return sw_desc
? &sw_desc
->async_tx
: NULL
;
570 static struct dma_async_tx_descriptor
*
571 iop_adma_prep_dma_memset(struct dma_chan
*chan
, dma_addr_t dma_dest
,
572 int value
, size_t len
, unsigned long flags
)
574 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
575 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
576 int slot_cnt
, slots_per_op
;
580 BUG_ON(unlikely(len
> IOP_ADMA_MAX_BYTE_COUNT
));
582 dev_dbg(iop_chan
->device
->common
.dev
, "%s len: %u\n",
585 spin_lock_bh(&iop_chan
->lock
);
586 slot_cnt
= iop_chan_memset_slot_count(len
, &slots_per_op
);
587 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
589 grp_start
= sw_desc
->group_head
;
590 iop_desc_init_memset(grp_start
, flags
);
591 iop_desc_set_byte_count(grp_start
, iop_chan
, len
);
592 iop_desc_set_block_fill_val(grp_start
, value
);
593 iop_desc_set_dest_addr(grp_start
, iop_chan
, dma_dest
);
594 sw_desc
->unmap_src_cnt
= 1;
595 sw_desc
->unmap_len
= len
;
597 spin_unlock_bh(&iop_chan
->lock
);
599 return sw_desc
? &sw_desc
->async_tx
: NULL
;
602 static struct dma_async_tx_descriptor
*
603 iop_adma_prep_dma_xor(struct dma_chan
*chan
, dma_addr_t dma_dest
,
604 dma_addr_t
*dma_src
, unsigned int src_cnt
, size_t len
,
607 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
608 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
609 int slot_cnt
, slots_per_op
;
613 BUG_ON(unlikely(len
> IOP_ADMA_XOR_MAX_BYTE_COUNT
));
615 dev_dbg(iop_chan
->device
->common
.dev
,
616 "%s src_cnt: %d len: %u flags: %lx\n",
617 __FUNCTION__
, src_cnt
, len
, flags
);
619 spin_lock_bh(&iop_chan
->lock
);
620 slot_cnt
= iop_chan_xor_slot_count(len
, src_cnt
, &slots_per_op
);
621 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
623 grp_start
= sw_desc
->group_head
;
624 iop_desc_init_xor(grp_start
, src_cnt
, flags
);
625 iop_desc_set_byte_count(grp_start
, iop_chan
, len
);
626 iop_desc_set_dest_addr(grp_start
, iop_chan
, dma_dest
);
627 sw_desc
->unmap_src_cnt
= src_cnt
;
628 sw_desc
->unmap_len
= len
;
630 iop_desc_set_xor_src_addr(grp_start
, src_cnt
,
633 spin_unlock_bh(&iop_chan
->lock
);
635 return sw_desc
? &sw_desc
->async_tx
: NULL
;
638 static struct dma_async_tx_descriptor
*
639 iop_adma_prep_dma_zero_sum(struct dma_chan
*chan
, dma_addr_t
*dma_src
,
640 unsigned int src_cnt
, size_t len
, u32
*result
,
643 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
644 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
645 int slot_cnt
, slots_per_op
;
650 dev_dbg(iop_chan
->device
->common
.dev
, "%s src_cnt: %d len: %u\n",
651 __FUNCTION__
, src_cnt
, len
);
653 spin_lock_bh(&iop_chan
->lock
);
654 slot_cnt
= iop_chan_zero_sum_slot_count(len
, src_cnt
, &slots_per_op
);
655 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
657 grp_start
= sw_desc
->group_head
;
658 iop_desc_init_zero_sum(grp_start
, src_cnt
, flags
);
659 iop_desc_set_zero_sum_byte_count(grp_start
, len
);
660 grp_start
->xor_check_result
= result
;
661 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
662 __FUNCTION__
, grp_start
->xor_check_result
);
663 sw_desc
->unmap_src_cnt
= src_cnt
;
664 sw_desc
->unmap_len
= len
;
666 iop_desc_set_zero_sum_src_addr(grp_start
, src_cnt
,
669 spin_unlock_bh(&iop_chan
->lock
);
671 return sw_desc
? &sw_desc
->async_tx
: NULL
;
674 static void iop_adma_dependency_added(struct dma_chan
*chan
)
676 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
677 tasklet_schedule(&iop_chan
->irq_tasklet
);
680 static void iop_adma_free_chan_resources(struct dma_chan
*chan
)
682 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
683 struct iop_adma_desc_slot
*iter
, *_iter
;
684 int in_use_descs
= 0;
686 iop_adma_slot_cleanup(iop_chan
);
688 spin_lock_bh(&iop_chan
->lock
);
689 list_for_each_entry_safe(iter
, _iter
, &iop_chan
->chain
,
692 list_del(&iter
->chain_node
);
694 list_for_each_entry_safe_reverse(
695 iter
, _iter
, &iop_chan
->all_slots
, slot_node
) {
696 list_del(&iter
->slot_node
);
698 iop_chan
->slots_allocated
--;
700 iop_chan
->last_used
= NULL
;
702 dev_dbg(iop_chan
->device
->common
.dev
, "%s slots_allocated %d\n",
703 __FUNCTION__
, iop_chan
->slots_allocated
);
704 spin_unlock_bh(&iop_chan
->lock
);
706 /* one is ok since we left it on there on purpose */
707 if (in_use_descs
> 1)
708 printk(KERN_ERR
"IOP: Freeing %d in use descriptors!\n",
713 * iop_adma_is_complete - poll the status of an ADMA transaction
714 * @chan: ADMA channel handle
715 * @cookie: ADMA transaction identifier
717 static enum dma_status
iop_adma_is_complete(struct dma_chan
*chan
,
722 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
723 dma_cookie_t last_used
;
724 dma_cookie_t last_complete
;
727 last_used
= chan
->cookie
;
728 last_complete
= iop_chan
->completed_cookie
;
731 *done
= last_complete
;
735 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
736 if (ret
== DMA_SUCCESS
)
739 iop_adma_slot_cleanup(iop_chan
);
741 last_used
= chan
->cookie
;
742 last_complete
= iop_chan
->completed_cookie
;
745 *done
= last_complete
;
749 return dma_async_is_complete(cookie
, last_complete
, last_used
);
752 static irqreturn_t
iop_adma_eot_handler(int irq
, void *data
)
754 struct iop_adma_chan
*chan
= data
;
756 dev_dbg(chan
->device
->common
.dev
, "%s\n", __FUNCTION__
);
758 tasklet_schedule(&chan
->irq_tasklet
);
760 iop_adma_device_clear_eot_status(chan
);
765 static irqreturn_t
iop_adma_eoc_handler(int irq
, void *data
)
767 struct iop_adma_chan
*chan
= data
;
769 dev_dbg(chan
->device
->common
.dev
, "%s\n", __FUNCTION__
);
771 tasklet_schedule(&chan
->irq_tasklet
);
773 iop_adma_device_clear_eoc_status(chan
);
778 static irqreturn_t
iop_adma_err_handler(int irq
, void *data
)
780 struct iop_adma_chan
*chan
= data
;
781 unsigned long status
= iop_chan_get_status(chan
);
783 dev_printk(KERN_ERR
, chan
->device
->common
.dev
,
784 "error ( %s%s%s%s%s%s%s)\n",
785 iop_is_err_int_parity(status
, chan
) ? "int_parity " : "",
786 iop_is_err_mcu_abort(status
, chan
) ? "mcu_abort " : "",
787 iop_is_err_int_tabort(status
, chan
) ? "int_tabort " : "",
788 iop_is_err_int_mabort(status
, chan
) ? "int_mabort " : "",
789 iop_is_err_pci_tabort(status
, chan
) ? "pci_tabort " : "",
790 iop_is_err_pci_mabort(status
, chan
) ? "pci_mabort " : "",
791 iop_is_err_split_tx(status
, chan
) ? "split_tx " : "");
793 iop_adma_device_clear_err_status(chan
);
800 static void iop_adma_issue_pending(struct dma_chan
*chan
)
802 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
804 if (iop_chan
->pending
) {
805 iop_chan
->pending
= 0;
806 iop_chan_append(iop_chan
);
811 * Perform a transaction to verify the HW works.
813 #define IOP_ADMA_TEST_SIZE 2000
815 static int __devinit
iop_adma_memcpy_self_test(struct iop_adma_device
*device
)
819 dma_addr_t src_dma
, dest_dma
;
820 struct dma_chan
*dma_chan
;
822 struct dma_async_tx_descriptor
*tx
;
824 struct iop_adma_chan
*iop_chan
;
826 dev_dbg(device
->common
.dev
, "%s\n", __FUNCTION__
);
828 src
= kzalloc(sizeof(u8
) * IOP_ADMA_TEST_SIZE
, GFP_KERNEL
);
831 dest
= kzalloc(sizeof(u8
) * IOP_ADMA_TEST_SIZE
, GFP_KERNEL
);
837 /* Fill in src buffer */
838 for (i
= 0; i
< IOP_ADMA_TEST_SIZE
; i
++)
839 ((u8
*) src
)[i
] = (u8
)i
;
841 memset(dest
, 0, IOP_ADMA_TEST_SIZE
);
843 /* Start copy, using first DMA channel */
844 dma_chan
= container_of(device
->common
.channels
.next
,
847 if (iop_adma_alloc_chan_resources(dma_chan
) < 1) {
852 dest_dma
= dma_map_single(dma_chan
->device
->dev
, dest
,
853 IOP_ADMA_TEST_SIZE
, DMA_FROM_DEVICE
);
854 src_dma
= dma_map_single(dma_chan
->device
->dev
, src
,
855 IOP_ADMA_TEST_SIZE
, DMA_TO_DEVICE
);
856 tx
= iop_adma_prep_dma_memcpy(dma_chan
, dest_dma
, src_dma
,
857 IOP_ADMA_TEST_SIZE
, 1);
859 cookie
= iop_adma_tx_submit(tx
);
860 iop_adma_issue_pending(dma_chan
);
864 if (iop_adma_is_complete(dma_chan
, cookie
, NULL
, NULL
) !=
866 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
867 "Self-test copy timed out, disabling\n");
872 iop_chan
= to_iop_adma_chan(dma_chan
);
873 dma_sync_single_for_cpu(&iop_chan
->device
->pdev
->dev
, dest_dma
,
874 IOP_ADMA_TEST_SIZE
, DMA_FROM_DEVICE
);
875 if (memcmp(src
, dest
, IOP_ADMA_TEST_SIZE
)) {
876 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
877 "Self-test copy failed compare, disabling\n");
883 iop_adma_free_chan_resources(dma_chan
);
890 #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
892 iop_adma_xor_zero_sum_self_test(struct iop_adma_device
*device
)
896 struct page
*xor_srcs
[IOP_ADMA_NUM_SRC_TEST
];
897 struct page
*zero_sum_srcs
[IOP_ADMA_NUM_SRC_TEST
+ 1];
898 dma_addr_t dma_srcs
[IOP_ADMA_NUM_SRC_TEST
+ 1];
899 dma_addr_t dma_addr
, dest_dma
;
900 struct dma_async_tx_descriptor
*tx
;
901 struct dma_chan
*dma_chan
;
907 struct iop_adma_chan
*iop_chan
;
909 dev_dbg(device
->common
.dev
, "%s\n", __FUNCTION__
);
911 for (src_idx
= 0; src_idx
< IOP_ADMA_NUM_SRC_TEST
; src_idx
++) {
912 xor_srcs
[src_idx
] = alloc_page(GFP_KERNEL
);
913 if (!xor_srcs
[src_idx
])
915 __free_page(xor_srcs
[src_idx
]);
920 dest
= alloc_page(GFP_KERNEL
);
923 __free_page(xor_srcs
[src_idx
]);
927 /* Fill in src buffers */
928 for (src_idx
= 0; src_idx
< IOP_ADMA_NUM_SRC_TEST
; src_idx
++) {
929 u8
*ptr
= page_address(xor_srcs
[src_idx
]);
930 for (i
= 0; i
< PAGE_SIZE
; i
++)
931 ptr
[i
] = (1 << src_idx
);
934 for (src_idx
= 0; src_idx
< IOP_ADMA_NUM_SRC_TEST
; src_idx
++)
935 cmp_byte
^= (u8
) (1 << src_idx
);
937 cmp_word
= (cmp_byte
<< 24) | (cmp_byte
<< 16) |
938 (cmp_byte
<< 8) | cmp_byte
;
940 memset(page_address(dest
), 0, PAGE_SIZE
);
942 dma_chan
= container_of(device
->common
.channels
.next
,
945 if (iop_adma_alloc_chan_resources(dma_chan
) < 1) {
951 dest_dma
= dma_map_page(dma_chan
->device
->dev
, dest
, 0,
952 PAGE_SIZE
, DMA_FROM_DEVICE
);
953 for (i
= 0; i
< IOP_ADMA_NUM_SRC_TEST
; i
++)
954 dma_srcs
[i
] = dma_map_page(dma_chan
->device
->dev
, xor_srcs
[i
],
955 0, PAGE_SIZE
, DMA_TO_DEVICE
);
956 tx
= iop_adma_prep_dma_xor(dma_chan
, dest_dma
, dma_srcs
,
957 IOP_ADMA_NUM_SRC_TEST
, PAGE_SIZE
, 1);
959 cookie
= iop_adma_tx_submit(tx
);
960 iop_adma_issue_pending(dma_chan
);
964 if (iop_adma_is_complete(dma_chan
, cookie
, NULL
, NULL
) !=
966 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
967 "Self-test xor timed out, disabling\n");
972 iop_chan
= to_iop_adma_chan(dma_chan
);
973 dma_sync_single_for_cpu(&iop_chan
->device
->pdev
->dev
, dest_dma
,
974 PAGE_SIZE
, DMA_FROM_DEVICE
);
975 for (i
= 0; i
< (PAGE_SIZE
/ sizeof(u32
)); i
++) {
976 u32
*ptr
= page_address(dest
);
977 if (ptr
[i
] != cmp_word
) {
978 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
979 "Self-test xor failed compare, disabling\n");
984 dma_sync_single_for_device(&iop_chan
->device
->pdev
->dev
, dest_dma
,
985 PAGE_SIZE
, DMA_TO_DEVICE
);
987 /* skip zero sum if the capability is not present */
988 if (!dma_has_cap(DMA_ZERO_SUM
, dma_chan
->device
->cap_mask
))
991 /* zero sum the sources with the destintation page */
992 for (i
= 0; i
< IOP_ADMA_NUM_SRC_TEST
; i
++)
993 zero_sum_srcs
[i
] = xor_srcs
[i
];
994 zero_sum_srcs
[i
] = dest
;
998 for (i
= 0; i
< IOP_ADMA_NUM_SRC_TEST
+ 1; i
++)
999 dma_srcs
[i
] = dma_map_page(dma_chan
->device
->dev
,
1000 zero_sum_srcs
[i
], 0, PAGE_SIZE
,
1002 tx
= iop_adma_prep_dma_zero_sum(dma_chan
, dma_srcs
,
1003 IOP_ADMA_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
1004 &zero_sum_result
, 1);
1006 cookie
= iop_adma_tx_submit(tx
);
1007 iop_adma_issue_pending(dma_chan
);
1011 if (iop_adma_is_complete(dma_chan
, cookie
, NULL
, NULL
) != DMA_SUCCESS
) {
1012 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1013 "Self-test zero sum timed out, disabling\n");
1015 goto free_resources
;
1018 if (zero_sum_result
!= 0) {
1019 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1020 "Self-test zero sum failed compare, disabling\n");
1022 goto free_resources
;
1026 dma_addr
= dma_map_page(dma_chan
->device
->dev
, dest
, 0,
1027 PAGE_SIZE
, DMA_FROM_DEVICE
);
1028 tx
= iop_adma_prep_dma_memset(dma_chan
, dma_addr
, 0, PAGE_SIZE
, 1);
1030 cookie
= iop_adma_tx_submit(tx
);
1031 iop_adma_issue_pending(dma_chan
);
1035 if (iop_adma_is_complete(dma_chan
, cookie
, NULL
, NULL
) != DMA_SUCCESS
) {
1036 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1037 "Self-test memset timed out, disabling\n");
1039 goto free_resources
;
1042 for (i
= 0; i
< PAGE_SIZE
/sizeof(u32
); i
++) {
1043 u32
*ptr
= page_address(dest
);
1045 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1046 "Self-test memset failed compare, disabling\n");
1048 goto free_resources
;
1052 /* test for non-zero parity sum */
1053 zero_sum_result
= 0;
1054 for (i
= 0; i
< IOP_ADMA_NUM_SRC_TEST
+ 1; i
++)
1055 dma_srcs
[i
] = dma_map_page(dma_chan
->device
->dev
,
1056 zero_sum_srcs
[i
], 0, PAGE_SIZE
,
1058 tx
= iop_adma_prep_dma_zero_sum(dma_chan
, dma_srcs
,
1059 IOP_ADMA_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
1060 &zero_sum_result
, 1);
1062 cookie
= iop_adma_tx_submit(tx
);
1063 iop_adma_issue_pending(dma_chan
);
1067 if (iop_adma_is_complete(dma_chan
, cookie
, NULL
, NULL
) != DMA_SUCCESS
) {
1068 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1069 "Self-test non-zero sum timed out, disabling\n");
1071 goto free_resources
;
1074 if (zero_sum_result
!= 1) {
1075 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1076 "Self-test non-zero sum failed compare, disabling\n");
1078 goto free_resources
;
1082 iop_adma_free_chan_resources(dma_chan
);
1084 src_idx
= IOP_ADMA_NUM_SRC_TEST
;
1086 __free_page(xor_srcs
[src_idx
]);
1091 static int __devexit
iop_adma_remove(struct platform_device
*dev
)
1093 struct iop_adma_device
*device
= platform_get_drvdata(dev
);
1094 struct dma_chan
*chan
, *_chan
;
1095 struct iop_adma_chan
*iop_chan
;
1097 struct iop_adma_platform_data
*plat_data
= dev
->dev
.platform_data
;
1099 dma_async_device_unregister(&device
->common
);
1101 for (i
= 0; i
< 3; i
++) {
1103 irq
= platform_get_irq(dev
, i
);
1104 free_irq(irq
, device
);
1107 dma_free_coherent(&dev
->dev
, plat_data
->pool_size
,
1108 device
->dma_desc_pool_virt
, device
->dma_desc_pool
);
1111 struct resource
*res
;
1112 res
= platform_get_resource(dev
, IORESOURCE_MEM
, 0);
1113 release_mem_region(res
->start
, res
->end
- res
->start
);
1116 list_for_each_entry_safe(chan
, _chan
, &device
->common
.channels
,
1118 iop_chan
= to_iop_adma_chan(chan
);
1119 list_del(&chan
->device_node
);
1127 static int __devinit
iop_adma_probe(struct platform_device
*pdev
)
1129 struct resource
*res
;
1131 struct iop_adma_device
*adev
;
1132 struct iop_adma_chan
*iop_chan
;
1133 struct dma_device
*dma_dev
;
1134 struct iop_adma_platform_data
*plat_data
= pdev
->dev
.platform_data
;
1136 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1140 if (!devm_request_mem_region(&pdev
->dev
, res
->start
,
1141 res
->end
- res
->start
, pdev
->name
))
1144 adev
= kzalloc(sizeof(*adev
), GFP_KERNEL
);
1147 dma_dev
= &adev
->common
;
1149 /* allocate coherent memory for hardware descriptors
1150 * note: writecombine gives slightly better performance, but
1151 * requires that we explicitly flush the writes
1153 if ((adev
->dma_desc_pool_virt
= dma_alloc_writecombine(&pdev
->dev
,
1154 plat_data
->pool_size
,
1155 &adev
->dma_desc_pool
,
1156 GFP_KERNEL
)) == NULL
) {
1161 dev_dbg(&pdev
->dev
, "%s: allocted descriptor pool virt %p phys %p\n",
1162 __FUNCTION__
, adev
->dma_desc_pool_virt
,
1163 (void *) adev
->dma_desc_pool
);
1165 adev
->id
= plat_data
->hw_id
;
1167 /* discover transaction capabilites from the platform data */
1168 dma_dev
->cap_mask
= plat_data
->cap_mask
;
1171 platform_set_drvdata(pdev
, adev
);
1173 INIT_LIST_HEAD(&dma_dev
->channels
);
1175 /* set base routines */
1176 dma_dev
->device_alloc_chan_resources
= iop_adma_alloc_chan_resources
;
1177 dma_dev
->device_free_chan_resources
= iop_adma_free_chan_resources
;
1178 dma_dev
->device_is_tx_complete
= iop_adma_is_complete
;
1179 dma_dev
->device_issue_pending
= iop_adma_issue_pending
;
1180 dma_dev
->device_dependency_added
= iop_adma_dependency_added
;
1181 dma_dev
->dev
= &pdev
->dev
;
1183 /* set prep routines based on capability */
1184 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
))
1185 dma_dev
->device_prep_dma_memcpy
= iop_adma_prep_dma_memcpy
;
1186 if (dma_has_cap(DMA_MEMSET
, dma_dev
->cap_mask
))
1187 dma_dev
->device_prep_dma_memset
= iop_adma_prep_dma_memset
;
1188 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
1189 dma_dev
->max_xor
= iop_adma_get_max_xor();
1190 dma_dev
->device_prep_dma_xor
= iop_adma_prep_dma_xor
;
1192 if (dma_has_cap(DMA_ZERO_SUM
, dma_dev
->cap_mask
))
1193 dma_dev
->device_prep_dma_zero_sum
=
1194 iop_adma_prep_dma_zero_sum
;
1195 if (dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
))
1196 dma_dev
->device_prep_dma_interrupt
=
1197 iop_adma_prep_dma_interrupt
;
1199 iop_chan
= kzalloc(sizeof(*iop_chan
), GFP_KERNEL
);
1204 iop_chan
->device
= adev
;
1206 iop_chan
->mmr_base
= devm_ioremap(&pdev
->dev
, res
->start
,
1207 res
->end
- res
->start
);
1208 if (!iop_chan
->mmr_base
) {
1210 goto err_free_iop_chan
;
1212 tasklet_init(&iop_chan
->irq_tasklet
, iop_adma_tasklet
, (unsigned long)
1215 /* clear errors before enabling interrupts */
1216 iop_adma_device_clear_err_status(iop_chan
);
1218 for (i
= 0; i
< 3; i
++) {
1219 irq_handler_t handler
[] = { iop_adma_eot_handler
,
1220 iop_adma_eoc_handler
,
1221 iop_adma_err_handler
};
1222 int irq
= platform_get_irq(pdev
, i
);
1225 goto err_free_iop_chan
;
1227 ret
= devm_request_irq(&pdev
->dev
, irq
,
1228 handler
[i
], 0, pdev
->name
, iop_chan
);
1230 goto err_free_iop_chan
;
1234 spin_lock_init(&iop_chan
->lock
);
1235 init_timer(&iop_chan
->cleanup_watchdog
);
1236 iop_chan
->cleanup_watchdog
.data
= (unsigned long) iop_chan
;
1237 iop_chan
->cleanup_watchdog
.function
= iop_adma_tasklet
;
1238 INIT_LIST_HEAD(&iop_chan
->chain
);
1239 INIT_LIST_HEAD(&iop_chan
->all_slots
);
1240 INIT_RCU_HEAD(&iop_chan
->common
.rcu
);
1241 iop_chan
->common
.device
= dma_dev
;
1242 list_add_tail(&iop_chan
->common
.device_node
, &dma_dev
->channels
);
1244 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
)) {
1245 ret
= iop_adma_memcpy_self_test(adev
);
1246 dev_dbg(&pdev
->dev
, "memcpy self test returned %d\n", ret
);
1248 goto err_free_iop_chan
;
1251 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
) ||
1252 dma_has_cap(DMA_MEMSET
, dma_dev
->cap_mask
)) {
1253 ret
= iop_adma_xor_zero_sum_self_test(adev
);
1254 dev_dbg(&pdev
->dev
, "xor self test returned %d\n", ret
);
1256 goto err_free_iop_chan
;
1259 dev_printk(KERN_INFO
, &pdev
->dev
, "Intel(R) IOP: "
1260 "( %s%s%s%s%s%s%s%s%s%s)\n",
1261 dma_has_cap(DMA_PQ_XOR
, dma_dev
->cap_mask
) ? "pq_xor " : "",
1262 dma_has_cap(DMA_PQ_UPDATE
, dma_dev
->cap_mask
) ? "pq_update " : "",
1263 dma_has_cap(DMA_PQ_ZERO_SUM
, dma_dev
->cap_mask
) ? "pq_zero_sum " : "",
1264 dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
) ? "xor " : "",
1265 dma_has_cap(DMA_DUAL_XOR
, dma_dev
->cap_mask
) ? "dual_xor " : "",
1266 dma_has_cap(DMA_ZERO_SUM
, dma_dev
->cap_mask
) ? "xor_zero_sum " : "",
1267 dma_has_cap(DMA_MEMSET
, dma_dev
->cap_mask
) ? "fill " : "",
1268 dma_has_cap(DMA_MEMCPY_CRC32C
, dma_dev
->cap_mask
) ? "cpy+crc " : "",
1269 dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
) ? "cpy " : "",
1270 dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
) ? "intr " : "");
1272 dma_async_device_register(dma_dev
);
1278 dma_free_coherent(&adev
->pdev
->dev
, plat_data
->pool_size
,
1279 adev
->dma_desc_pool_virt
, adev
->dma_desc_pool
);
1286 static void iop_chan_start_null_memcpy(struct iop_adma_chan
*iop_chan
)
1288 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
1289 dma_cookie_t cookie
;
1290 int slot_cnt
, slots_per_op
;
1292 dev_dbg(iop_chan
->device
->common
.dev
, "%s\n", __FUNCTION__
);
1294 spin_lock_bh(&iop_chan
->lock
);
1295 slot_cnt
= iop_chan_memcpy_slot_count(0, &slots_per_op
);
1296 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
1298 grp_start
= sw_desc
->group_head
;
1300 list_splice_init(&sw_desc
->async_tx
.tx_list
, &iop_chan
->chain
);
1301 sw_desc
->async_tx
.ack
= 1;
1302 iop_desc_init_memcpy(grp_start
, 0);
1303 iop_desc_set_byte_count(grp_start
, iop_chan
, 0);
1304 iop_desc_set_dest_addr(grp_start
, iop_chan
, 0);
1305 iop_desc_set_memcpy_src_addr(grp_start
, 0);
1307 cookie
= iop_chan
->common
.cookie
;
1312 /* initialize the completed cookie to be less than
1313 * the most recently used cookie
1315 iop_chan
->completed_cookie
= cookie
- 1;
1316 iop_chan
->common
.cookie
= sw_desc
->async_tx
.cookie
= cookie
;
1318 /* channel should not be busy */
1319 BUG_ON(iop_chan_is_busy(iop_chan
));
1321 /* clear any prior error-status bits */
1322 iop_adma_device_clear_err_status(iop_chan
);
1324 /* disable operation */
1325 iop_chan_disable(iop_chan
);
1327 /* set the descriptor address */
1328 iop_chan_set_next_descriptor(iop_chan
, sw_desc
->async_tx
.phys
);
1330 /* 1/ don't add pre-chained descriptors
1331 * 2/ dummy read to flush next_desc write
1333 BUG_ON(iop_desc_get_next_desc(sw_desc
));
1335 /* run the descriptor */
1336 iop_chan_enable(iop_chan
);
1338 dev_printk(KERN_ERR
, iop_chan
->device
->common
.dev
,
1339 "failed to allocate null descriptor\n");
1340 spin_unlock_bh(&iop_chan
->lock
);
1343 static void iop_chan_start_null_xor(struct iop_adma_chan
*iop_chan
)
1345 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
1346 dma_cookie_t cookie
;
1347 int slot_cnt
, slots_per_op
;
1349 dev_dbg(iop_chan
->device
->common
.dev
, "%s\n", __FUNCTION__
);
1351 spin_lock_bh(&iop_chan
->lock
);
1352 slot_cnt
= iop_chan_xor_slot_count(0, 2, &slots_per_op
);
1353 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
1355 grp_start
= sw_desc
->group_head
;
1356 list_splice_init(&sw_desc
->async_tx
.tx_list
, &iop_chan
->chain
);
1357 sw_desc
->async_tx
.ack
= 1;
1358 iop_desc_init_null_xor(grp_start
, 2, 0);
1359 iop_desc_set_byte_count(grp_start
, iop_chan
, 0);
1360 iop_desc_set_dest_addr(grp_start
, iop_chan
, 0);
1361 iop_desc_set_xor_src_addr(grp_start
, 0, 0);
1362 iop_desc_set_xor_src_addr(grp_start
, 1, 0);
1364 cookie
= iop_chan
->common
.cookie
;
1369 /* initialize the completed cookie to be less than
1370 * the most recently used cookie
1372 iop_chan
->completed_cookie
= cookie
- 1;
1373 iop_chan
->common
.cookie
= sw_desc
->async_tx
.cookie
= cookie
;
1375 /* channel should not be busy */
1376 BUG_ON(iop_chan_is_busy(iop_chan
));
1378 /* clear any prior error-status bits */
1379 iop_adma_device_clear_err_status(iop_chan
);
1381 /* disable operation */
1382 iop_chan_disable(iop_chan
);
1384 /* set the descriptor address */
1385 iop_chan_set_next_descriptor(iop_chan
, sw_desc
->async_tx
.phys
);
1387 /* 1/ don't add pre-chained descriptors
1388 * 2/ dummy read to flush next_desc write
1390 BUG_ON(iop_desc_get_next_desc(sw_desc
));
1392 /* run the descriptor */
1393 iop_chan_enable(iop_chan
);
1395 dev_printk(KERN_ERR
, iop_chan
->device
->common
.dev
,
1396 "failed to allocate null descriptor\n");
1397 spin_unlock_bh(&iop_chan
->lock
);
1400 static struct platform_driver iop_adma_driver
= {
1401 .probe
= iop_adma_probe
,
1402 .remove
= iop_adma_remove
,
1404 .owner
= THIS_MODULE
,
1409 static int __init
iop_adma_init (void)
1411 return platform_driver_register(&iop_adma_driver
);
1414 /* it's currently unsafe to unload this module */
1416 static void __exit
iop_adma_exit (void)
1418 platform_driver_unregister(&iop_adma_driver
);
1421 module_exit(iop_adma_exit
);
1424 module_init(iop_adma_init
);
1426 MODULE_AUTHOR("Intel Corporation");
1427 MODULE_DESCRIPTION("IOP ADMA Engine Driver");
1428 MODULE_LICENSE("GPL");