Full support for Ginger Console
[linux-ginger.git] / arch / arm / include / asm / hardware / iop3xx-adma.h
blob1a8c7279a28b39eb8473d5e5ffb383cfbc5040ec
1 /*
2 * Copyright © 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 #ifndef _ADMA_H
19 #define _ADMA_H
20 #include <linux/types.h>
21 #include <linux/io.h>
22 #include <mach/hardware.h>
23 #include <asm/hardware/iop_adma.h>
25 /* Memory copy units */
26 #define DMA_CCR(chan) (chan->mmr_base + 0x0)
27 #define DMA_CSR(chan) (chan->mmr_base + 0x4)
28 #define DMA_DAR(chan) (chan->mmr_base + 0xc)
29 #define DMA_NDAR(chan) (chan->mmr_base + 0x10)
30 #define DMA_PADR(chan) (chan->mmr_base + 0x14)
31 #define DMA_PUADR(chan) (chan->mmr_base + 0x18)
32 #define DMA_LADR(chan) (chan->mmr_base + 0x1c)
33 #define DMA_BCR(chan) (chan->mmr_base + 0x20)
34 #define DMA_DCR(chan) (chan->mmr_base + 0x24)
36 /* Application accelerator unit */
37 #define AAU_ACR(chan) (chan->mmr_base + 0x0)
38 #define AAU_ASR(chan) (chan->mmr_base + 0x4)
39 #define AAU_ADAR(chan) (chan->mmr_base + 0x8)
40 #define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
41 #define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
42 #define AAU_DAR(chan) (chan->mmr_base + 0x20)
43 #define AAU_ABCR(chan) (chan->mmr_base + 0x24)
44 #define AAU_ADCR(chan) (chan->mmr_base + 0x28)
45 #define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
46 #define AAU_EDCR0_IDX 8
47 #define AAU_EDCR1_IDX 17
48 #define AAU_EDCR2_IDX 26
50 #define DMA0_ID 0
51 #define DMA1_ID 1
52 #define AAU_ID 2
54 struct iop3xx_aau_desc_ctrl {
55 unsigned int int_en:1;
56 unsigned int blk1_cmd_ctrl:3;
57 unsigned int blk2_cmd_ctrl:3;
58 unsigned int blk3_cmd_ctrl:3;
59 unsigned int blk4_cmd_ctrl:3;
60 unsigned int blk5_cmd_ctrl:3;
61 unsigned int blk6_cmd_ctrl:3;
62 unsigned int blk7_cmd_ctrl:3;
63 unsigned int blk8_cmd_ctrl:3;
64 unsigned int blk_ctrl:2;
65 unsigned int dual_xor_en:1;
66 unsigned int tx_complete:1;
67 unsigned int zero_result_err:1;
68 unsigned int zero_result_en:1;
69 unsigned int dest_write_en:1;
72 struct iop3xx_aau_e_desc_ctrl {
73 unsigned int reserved:1;
74 unsigned int blk1_cmd_ctrl:3;
75 unsigned int blk2_cmd_ctrl:3;
76 unsigned int blk3_cmd_ctrl:3;
77 unsigned int blk4_cmd_ctrl:3;
78 unsigned int blk5_cmd_ctrl:3;
79 unsigned int blk6_cmd_ctrl:3;
80 unsigned int blk7_cmd_ctrl:3;
81 unsigned int blk8_cmd_ctrl:3;
82 unsigned int reserved2:7;
85 struct iop3xx_dma_desc_ctrl {
86 unsigned int pci_transaction:4;
87 unsigned int int_en:1;
88 unsigned int dac_cycle_en:1;
89 unsigned int mem_to_mem_en:1;
90 unsigned int crc_data_tx_en:1;
91 unsigned int crc_gen_en:1;
92 unsigned int crc_seed_dis:1;
93 unsigned int reserved:21;
94 unsigned int crc_tx_complete:1;
97 struct iop3xx_desc_dma {
98 u32 next_desc;
99 union {
100 u32 pci_src_addr;
101 u32 pci_dest_addr;
102 u32 src_addr;
104 union {
105 u32 upper_pci_src_addr;
106 u32 upper_pci_dest_addr;
108 union {
109 u32 local_pci_src_addr;
110 u32 local_pci_dest_addr;
111 u32 dest_addr;
113 u32 byte_count;
114 union {
115 u32 desc_ctrl;
116 struct iop3xx_dma_desc_ctrl desc_ctrl_field;
118 u32 crc_addr;
121 struct iop3xx_desc_aau {
122 u32 next_desc;
123 u32 src[4];
124 u32 dest_addr;
125 u32 byte_count;
126 union {
127 u32 desc_ctrl;
128 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
130 union {
131 u32 src_addr;
132 u32 e_desc_ctrl;
133 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
134 } src_edc[31];
137 struct iop3xx_aau_gfmr {
138 unsigned int gfmr1:8;
139 unsigned int gfmr2:8;
140 unsigned int gfmr3:8;
141 unsigned int gfmr4:8;
144 struct iop3xx_desc_pq_xor {
145 u32 next_desc;
146 u32 src[3];
147 union {
148 u32 data_mult1;
149 struct iop3xx_aau_gfmr data_mult1_field;
151 u32 dest_addr;
152 u32 byte_count;
153 union {
154 u32 desc_ctrl;
155 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
157 union {
158 u32 src_addr;
159 u32 e_desc_ctrl;
160 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
161 u32 data_multiplier;
162 struct iop3xx_aau_gfmr data_mult_field;
163 u32 reserved;
164 } src_edc_gfmr[19];
167 struct iop3xx_desc_dual_xor {
168 u32 next_desc;
169 u32 src0_addr;
170 u32 src1_addr;
171 u32 h_src_addr;
172 u32 d_src_addr;
173 u32 h_dest_addr;
174 u32 byte_count;
175 union {
176 u32 desc_ctrl;
177 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
179 u32 d_dest_addr;
182 union iop3xx_desc {
183 struct iop3xx_desc_aau *aau;
184 struct iop3xx_desc_dma *dma;
185 struct iop3xx_desc_pq_xor *pq_xor;
186 struct iop3xx_desc_dual_xor *dual_xor;
187 void *ptr;
190 /* No support for p+q operations */
191 static inline int
192 iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
194 BUG();
195 return 0;
198 static inline void
199 iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
200 unsigned long flags)
202 BUG();
205 static inline void
206 iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
208 BUG();
211 static inline void
212 iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
213 dma_addr_t addr, unsigned char coef)
215 BUG();
218 static inline int
219 iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
221 BUG();
222 return 0;
225 static inline void
226 iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
227 unsigned long flags)
229 BUG();
232 static inline void
233 iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
235 BUG();
238 #define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
240 static inline void
241 iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
242 dma_addr_t *src)
244 BUG();
247 static inline int iop_adma_get_max_xor(void)
249 return 32;
252 static inline int iop_adma_get_max_pq(void)
254 BUG();
255 return 0;
258 static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
260 int id = chan->device->id;
262 switch (id) {
263 case DMA0_ID:
264 case DMA1_ID:
265 return __raw_readl(DMA_DAR(chan));
266 case AAU_ID:
267 return __raw_readl(AAU_ADAR(chan));
268 default:
269 BUG();
271 return 0;
274 static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
275 u32 next_desc_addr)
277 int id = chan->device->id;
279 switch (id) {
280 case DMA0_ID:
281 case DMA1_ID:
282 __raw_writel(next_desc_addr, DMA_NDAR(chan));
283 break;
284 case AAU_ID:
285 __raw_writel(next_desc_addr, AAU_ANDAR(chan));
286 break;
291 #define IOP_ADMA_STATUS_BUSY (1 << 10)
292 #define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
293 #define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
294 #define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
296 static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
298 u32 status = __raw_readl(DMA_CSR(chan));
299 return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
302 static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
303 int num_slots)
305 /* num_slots will only ever be 1, 2, 4, or 8 */
306 return (desc->idx & (num_slots - 1)) ? 0 : 1;
309 /* to do: support large (i.e. > hw max) buffer sizes */
310 static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
312 *slots_per_op = 1;
313 return 1;
316 /* to do: support large (i.e. > hw max) buffer sizes */
317 static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
319 *slots_per_op = 1;
320 return 1;
323 static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
324 int *slots_per_op)
326 static const char slot_count_table[] = {
327 1, 1, 1, 1, /* 01 - 04 */
328 2, 2, 2, 2, /* 05 - 08 */
329 4, 4, 4, 4, /* 09 - 12 */
330 4, 4, 4, 4, /* 13 - 16 */
331 8, 8, 8, 8, /* 17 - 20 */
332 8, 8, 8, 8, /* 21 - 24 */
333 8, 8, 8, 8, /* 25 - 28 */
334 8, 8, 8, 8, /* 29 - 32 */
336 *slots_per_op = slot_count_table[src_cnt - 1];
337 return *slots_per_op;
340 static inline int
341 iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
343 switch (chan->device->id) {
344 case DMA0_ID:
345 case DMA1_ID:
346 return iop_chan_memcpy_slot_count(0, slots_per_op);
347 case AAU_ID:
348 return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
349 default:
350 BUG();
352 return 0;
355 static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
356 int *slots_per_op)
358 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
360 if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
361 return slot_cnt;
363 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
364 while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
365 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
366 slot_cnt += *slots_per_op;
369 if (len)
370 slot_cnt += *slots_per_op;
372 return slot_cnt;
375 /* zero sum on iop3xx is limited to 1k at a time so it requires multiple
376 * descriptors
378 static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
379 int *slots_per_op)
381 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
383 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
384 return slot_cnt;
386 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
387 while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
388 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
389 slot_cnt += *slots_per_op;
392 if (len)
393 slot_cnt += *slots_per_op;
395 return slot_cnt;
398 static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
400 return 0;
403 static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
404 struct iop_adma_chan *chan)
406 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
408 switch (chan->device->id) {
409 case DMA0_ID:
410 case DMA1_ID:
411 return hw_desc.dma->dest_addr;
412 case AAU_ID:
413 return hw_desc.aau->dest_addr;
414 default:
415 BUG();
417 return 0;
421 static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
422 struct iop_adma_chan *chan)
424 BUG();
425 return 0;
428 static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
429 struct iop_adma_chan *chan)
431 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
433 switch (chan->device->id) {
434 case DMA0_ID:
435 case DMA1_ID:
436 return hw_desc.dma->byte_count;
437 case AAU_ID:
438 return hw_desc.aau->byte_count;
439 default:
440 BUG();
442 return 0;
445 /* translate the src_idx to a descriptor word index */
446 static inline int __desc_idx(int src_idx)
448 static const int desc_idx_table[] = { 0, 0, 0, 0,
449 0, 1, 2, 3,
450 5, 6, 7, 8,
451 9, 10, 11, 12,
452 14, 15, 16, 17,
453 18, 19, 20, 21,
454 23, 24, 25, 26,
455 27, 28, 29, 30,
458 return desc_idx_table[src_idx];
461 static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
462 struct iop_adma_chan *chan,
463 int src_idx)
465 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
467 switch (chan->device->id) {
468 case DMA0_ID:
469 case DMA1_ID:
470 return hw_desc.dma->src_addr;
471 case AAU_ID:
472 break;
473 default:
474 BUG();
477 if (src_idx < 4)
478 return hw_desc.aau->src[src_idx];
479 else
480 return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
483 static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
484 int src_idx, dma_addr_t addr)
486 if (src_idx < 4)
487 hw_desc->src[src_idx] = addr;
488 else
489 hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
492 static inline void
493 iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
495 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
496 union {
497 u32 value;
498 struct iop3xx_dma_desc_ctrl field;
499 } u_desc_ctrl;
501 u_desc_ctrl.value = 0;
502 u_desc_ctrl.field.mem_to_mem_en = 1;
503 u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
504 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
505 hw_desc->desc_ctrl = u_desc_ctrl.value;
506 hw_desc->upper_pci_src_addr = 0;
507 hw_desc->crc_addr = 0;
510 static inline void
511 iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
513 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
514 union {
515 u32 value;
516 struct iop3xx_aau_desc_ctrl field;
517 } u_desc_ctrl;
519 u_desc_ctrl.value = 0;
520 u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
521 u_desc_ctrl.field.dest_write_en = 1;
522 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
523 hw_desc->desc_ctrl = u_desc_ctrl.value;
526 static inline u32
527 iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
528 unsigned long flags)
530 int i, shift;
531 u32 edcr;
532 union {
533 u32 value;
534 struct iop3xx_aau_desc_ctrl field;
535 } u_desc_ctrl;
537 u_desc_ctrl.value = 0;
538 switch (src_cnt) {
539 case 25 ... 32:
540 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
541 edcr = 0;
542 shift = 1;
543 for (i = 24; i < src_cnt; i++) {
544 edcr |= (1 << shift);
545 shift += 3;
547 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
548 src_cnt = 24;
549 /* fall through */
550 case 17 ... 24:
551 if (!u_desc_ctrl.field.blk_ctrl) {
552 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
553 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
555 edcr = 0;
556 shift = 1;
557 for (i = 16; i < src_cnt; i++) {
558 edcr |= (1 << shift);
559 shift += 3;
561 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
562 src_cnt = 16;
563 /* fall through */
564 case 9 ... 16:
565 if (!u_desc_ctrl.field.blk_ctrl)
566 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
567 edcr = 0;
568 shift = 1;
569 for (i = 8; i < src_cnt; i++) {
570 edcr |= (1 << shift);
571 shift += 3;
573 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
574 src_cnt = 8;
575 /* fall through */
576 case 2 ... 8:
577 shift = 1;
578 for (i = 0; i < src_cnt; i++) {
579 u_desc_ctrl.value |= (1 << shift);
580 shift += 3;
583 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
584 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
587 u_desc_ctrl.field.dest_write_en = 1;
588 u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
589 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
590 hw_desc->desc_ctrl = u_desc_ctrl.value;
592 return u_desc_ctrl.value;
595 static inline void
596 iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
597 unsigned long flags)
599 iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
602 /* return the number of operations */
603 static inline int
604 iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
605 unsigned long flags)
607 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
608 struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
609 union {
610 u32 value;
611 struct iop3xx_aau_desc_ctrl field;
612 } u_desc_ctrl;
613 int i, j;
615 hw_desc = desc->hw_desc;
617 for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
618 i += slots_per_op, j++) {
619 iter = iop_hw_desc_slot_idx(hw_desc, i);
620 u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
621 u_desc_ctrl.field.dest_write_en = 0;
622 u_desc_ctrl.field.zero_result_en = 1;
623 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
624 iter->desc_ctrl = u_desc_ctrl.value;
626 /* for the subsequent descriptors preserve the store queue
627 * and chain them together
629 if (i) {
630 prev_hw_desc =
631 iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
632 prev_hw_desc->next_desc =
633 (u32) (desc->async_tx.phys + (i << 5));
637 return j;
640 static inline void
641 iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
642 unsigned long flags)
644 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
645 union {
646 u32 value;
647 struct iop3xx_aau_desc_ctrl field;
648 } u_desc_ctrl;
650 u_desc_ctrl.value = 0;
651 switch (src_cnt) {
652 case 25 ... 32:
653 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
654 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
655 /* fall through */
656 case 17 ... 24:
657 if (!u_desc_ctrl.field.blk_ctrl) {
658 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
659 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
661 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
662 /* fall through */
663 case 9 ... 16:
664 if (!u_desc_ctrl.field.blk_ctrl)
665 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
666 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
667 /* fall through */
668 case 1 ... 8:
669 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
670 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
673 u_desc_ctrl.field.dest_write_en = 0;
674 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
675 hw_desc->desc_ctrl = u_desc_ctrl.value;
678 static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
679 struct iop_adma_chan *chan,
680 u32 byte_count)
682 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
684 switch (chan->device->id) {
685 case DMA0_ID:
686 case DMA1_ID:
687 hw_desc.dma->byte_count = byte_count;
688 break;
689 case AAU_ID:
690 hw_desc.aau->byte_count = byte_count;
691 break;
692 default:
693 BUG();
697 static inline void
698 iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
699 struct iop_adma_chan *chan)
701 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
703 switch (chan->device->id) {
704 case DMA0_ID:
705 case DMA1_ID:
706 iop_desc_init_memcpy(desc, 1);
707 hw_desc.dma->byte_count = 0;
708 hw_desc.dma->dest_addr = 0;
709 hw_desc.dma->src_addr = 0;
710 break;
711 case AAU_ID:
712 iop_desc_init_null_xor(desc, 2, 1);
713 hw_desc.aau->byte_count = 0;
714 hw_desc.aau->dest_addr = 0;
715 hw_desc.aau->src[0] = 0;
716 hw_desc.aau->src[1] = 0;
717 break;
718 default:
719 BUG();
723 static inline void
724 iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
726 int slots_per_op = desc->slots_per_op;
727 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
728 int i = 0;
730 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
731 hw_desc->byte_count = len;
732 } else {
733 do {
734 iter = iop_hw_desc_slot_idx(hw_desc, i);
735 iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
736 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
737 i += slots_per_op;
738 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
740 if (len) {
741 iter = iop_hw_desc_slot_idx(hw_desc, i);
742 iter->byte_count = len;
747 static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
748 struct iop_adma_chan *chan,
749 dma_addr_t addr)
751 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
753 switch (chan->device->id) {
754 case DMA0_ID:
755 case DMA1_ID:
756 hw_desc.dma->dest_addr = addr;
757 break;
758 case AAU_ID:
759 hw_desc.aau->dest_addr = addr;
760 break;
761 default:
762 BUG();
766 static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
767 dma_addr_t addr)
769 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
770 hw_desc->src_addr = addr;
773 static inline void
774 iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
775 dma_addr_t addr)
778 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
779 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
780 int i;
782 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
783 i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
784 iter = iop_hw_desc_slot_idx(hw_desc, i);
785 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
789 static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
790 int src_idx, dma_addr_t addr)
793 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
794 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
795 int i;
797 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
798 i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
799 iter = iop_hw_desc_slot_idx(hw_desc, i);
800 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
804 static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
805 u32 next_desc_addr)
807 /* hw_desc->next_desc is the same location for all channels */
808 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
810 iop_paranoia(hw_desc.dma->next_desc);
811 hw_desc.dma->next_desc = next_desc_addr;
814 static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
816 /* hw_desc->next_desc is the same location for all channels */
817 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
818 return hw_desc.dma->next_desc;
821 static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
823 /* hw_desc->next_desc is the same location for all channels */
824 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
825 hw_desc.dma->next_desc = 0;
828 static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
829 u32 val)
831 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
832 hw_desc->src[0] = val;
835 static inline enum sum_check_flags
836 iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
838 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
839 struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
841 iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
842 return desc_ctrl.zero_result_err << SUM_CHECK_P;
845 static inline void iop_chan_append(struct iop_adma_chan *chan)
847 u32 dma_chan_ctrl;
849 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
850 dma_chan_ctrl |= 0x2;
851 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
854 static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
856 return __raw_readl(DMA_CSR(chan));
859 static inline void iop_chan_disable(struct iop_adma_chan *chan)
861 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
862 dma_chan_ctrl &= ~1;
863 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
866 static inline void iop_chan_enable(struct iop_adma_chan *chan)
868 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
870 dma_chan_ctrl |= 1;
871 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
874 static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
876 u32 status = __raw_readl(DMA_CSR(chan));
877 status &= (1 << 9);
878 __raw_writel(status, DMA_CSR(chan));
881 static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
883 u32 status = __raw_readl(DMA_CSR(chan));
884 status &= (1 << 8);
885 __raw_writel(status, DMA_CSR(chan));
888 static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
890 u32 status = __raw_readl(DMA_CSR(chan));
892 switch (chan->device->id) {
893 case DMA0_ID:
894 case DMA1_ID:
895 status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
896 break;
897 case AAU_ID:
898 status &= (1 << 5);
899 break;
900 default:
901 BUG();
904 __raw_writel(status, DMA_CSR(chan));
907 static inline int
908 iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
910 return 0;
913 static inline int
914 iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
916 return 0;
919 static inline int
920 iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
922 return 0;
925 static inline int
926 iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
928 return test_bit(5, &status);
931 static inline int
932 iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
934 switch (chan->device->id) {
935 case DMA0_ID:
936 case DMA1_ID:
937 return test_bit(2, &status);
938 default:
939 return 0;
943 static inline int
944 iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
946 switch (chan->device->id) {
947 case DMA0_ID:
948 case DMA1_ID:
949 return test_bit(3, &status);
950 default:
951 return 0;
955 static inline int
956 iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
958 switch (chan->device->id) {
959 case DMA0_ID:
960 case DMA1_ID:
961 return test_bit(1, &status);
962 default:
963 return 0;
966 #endif /* _ADMA_H */