2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 * The full GNU General Public License is included in this distribution in
23 * the file called "COPYING".
27 * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions are met:
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
43 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
46 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
47 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
48 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
50 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
51 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
52 * POSSIBILITY OF SUCH DAMAGE.
56 * Support routines for v3+ hardware
59 #include <linux/pci.h>
60 #include <linux/gfp.h>
61 #include <linux/dmaengine.h>
62 #include <linux/dma-mapping.h>
63 #include "registers.h"
68 /* ioat hardware assumes at least two sources for raid operations */
69 #define src_cnt_to_sw(x) ((x) + 2)
70 #define src_cnt_to_hw(x) ((x) - 2)
72 /* provide a lookup table for setting the source address in the base or
73 * extended descriptor of an xor or pq descriptor
75 static const u8 xor_idx_to_desc __read_mostly
= 0xd0;
76 static const u8 xor_idx_to_field
[] __read_mostly
= { 1, 4, 5, 6, 7, 0, 1, 2 };
77 static const u8 pq_idx_to_desc __read_mostly
= 0xf8;
78 static const u8 pq_idx_to_field
[] __read_mostly
= { 1, 4, 5, 0, 1, 2, 4, 5 };
80 static dma_addr_t
xor_get_src(struct ioat_raw_descriptor
*descs
[2], int idx
)
82 struct ioat_raw_descriptor
*raw
= descs
[xor_idx_to_desc
>> idx
& 1];
84 return raw
->field
[xor_idx_to_field
[idx
]];
87 static void xor_set_src(struct ioat_raw_descriptor
*descs
[2],
88 dma_addr_t addr
, u32 offset
, int idx
)
90 struct ioat_raw_descriptor
*raw
= descs
[xor_idx_to_desc
>> idx
& 1];
92 raw
->field
[xor_idx_to_field
[idx
]] = addr
+ offset
;
95 static dma_addr_t
pq_get_src(struct ioat_raw_descriptor
*descs
[2], int idx
)
97 struct ioat_raw_descriptor
*raw
= descs
[pq_idx_to_desc
>> idx
& 1];
99 return raw
->field
[pq_idx_to_field
[idx
]];
102 static void pq_set_src(struct ioat_raw_descriptor
*descs
[2],
103 dma_addr_t addr
, u32 offset
, u8 coef
, int idx
)
105 struct ioat_pq_descriptor
*pq
= (struct ioat_pq_descriptor
*) descs
[0];
106 struct ioat_raw_descriptor
*raw
= descs
[pq_idx_to_desc
>> idx
& 1];
108 raw
->field
[pq_idx_to_field
[idx
]] = addr
+ offset
;
109 pq
->coef
[idx
] = coef
;
112 static void ioat3_dma_unmap(struct ioat2_dma_chan
*ioat
,
113 struct ioat_ring_ent
*desc
, int idx
)
115 struct ioat_chan_common
*chan
= &ioat
->base
;
116 struct pci_dev
*pdev
= chan
->device
->pdev
;
117 size_t len
= desc
->len
;
118 size_t offset
= len
- desc
->hw
->size
;
119 struct dma_async_tx_descriptor
*tx
= &desc
->txd
;
120 enum dma_ctrl_flags flags
= tx
->flags
;
122 switch (desc
->hw
->ctl_f
.op
) {
124 if (!desc
->hw
->ctl_f
.null
) /* skip 'interrupt' ops */
125 ioat_dma_unmap(chan
, flags
, len
, desc
->hw
);
128 struct ioat_fill_descriptor
*hw
= desc
->fill
;
130 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
))
131 ioat_unmap(pdev
, hw
->dst_addr
- offset
, len
,
132 PCI_DMA_FROMDEVICE
, flags
, 1);
135 case IOAT_OP_XOR_VAL
:
137 struct ioat_xor_descriptor
*xor = desc
->xor;
138 struct ioat_ring_ent
*ext
;
139 struct ioat_xor_ext_descriptor
*xor_ex
= NULL
;
140 int src_cnt
= src_cnt_to_sw(xor->ctl_f
.src_cnt
);
141 struct ioat_raw_descriptor
*descs
[2];
145 ext
= ioat2_get_ring_ent(ioat
, idx
+ 1);
146 xor_ex
= ext
->xor_ex
;
149 if (!(flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
150 descs
[0] = (struct ioat_raw_descriptor
*) xor;
151 descs
[1] = (struct ioat_raw_descriptor
*) xor_ex
;
152 for (i
= 0; i
< src_cnt
; i
++) {
153 dma_addr_t src
= xor_get_src(descs
, i
);
155 ioat_unmap(pdev
, src
- offset
, len
,
156 PCI_DMA_TODEVICE
, flags
, 0);
159 /* dest is a source in xor validate operations */
160 if (xor->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
161 ioat_unmap(pdev
, xor->dst_addr
- offset
, len
,
162 PCI_DMA_TODEVICE
, flags
, 1);
167 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
))
168 ioat_unmap(pdev
, xor->dst_addr
- offset
, len
,
169 PCI_DMA_FROMDEVICE
, flags
, 1);
174 struct ioat_pq_descriptor
*pq
= desc
->pq
;
175 struct ioat_ring_ent
*ext
;
176 struct ioat_pq_ext_descriptor
*pq_ex
= NULL
;
177 int src_cnt
= src_cnt_to_sw(pq
->ctl_f
.src_cnt
);
178 struct ioat_raw_descriptor
*descs
[2];
182 ext
= ioat2_get_ring_ent(ioat
, idx
+ 1);
186 /* in the 'continue' case don't unmap the dests as sources */
187 if (dmaf_p_disabled_continue(flags
))
189 else if (dmaf_continue(flags
))
192 if (!(flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
193 descs
[0] = (struct ioat_raw_descriptor
*) pq
;
194 descs
[1] = (struct ioat_raw_descriptor
*) pq_ex
;
195 for (i
= 0; i
< src_cnt
; i
++) {
196 dma_addr_t src
= pq_get_src(descs
, i
);
198 ioat_unmap(pdev
, src
- offset
, len
,
199 PCI_DMA_TODEVICE
, flags
, 0);
202 /* the dests are sources in pq validate operations */
203 if (pq
->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
204 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
205 ioat_unmap(pdev
, pq
->p_addr
- offset
,
206 len
, PCI_DMA_TODEVICE
, flags
, 0);
207 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
208 ioat_unmap(pdev
, pq
->q_addr
- offset
,
209 len
, PCI_DMA_TODEVICE
, flags
, 0);
214 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
215 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
216 ioat_unmap(pdev
, pq
->p_addr
- offset
, len
,
217 PCI_DMA_BIDIRECTIONAL
, flags
, 1);
218 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
219 ioat_unmap(pdev
, pq
->q_addr
- offset
, len
,
220 PCI_DMA_BIDIRECTIONAL
, flags
, 1);
225 dev_err(&pdev
->dev
, "%s: unknown op type: %#x\n",
226 __func__
, desc
->hw
->ctl_f
.op
);
230 static bool desc_has_ext(struct ioat_ring_ent
*desc
)
232 struct ioat_dma_descriptor
*hw
= desc
->hw
;
234 if (hw
->ctl_f
.op
== IOAT_OP_XOR
||
235 hw
->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
236 struct ioat_xor_descriptor
*xor = desc
->xor;
238 if (src_cnt_to_sw(xor->ctl_f
.src_cnt
) > 5)
240 } else if (hw
->ctl_f
.op
== IOAT_OP_PQ
||
241 hw
->ctl_f
.op
== IOAT_OP_PQ_VAL
) {
242 struct ioat_pq_descriptor
*pq
= desc
->pq
;
244 if (src_cnt_to_sw(pq
->ctl_f
.src_cnt
) > 3)
252 * __cleanup - reclaim used descriptors
253 * @ioat: channel (ring) to clean
255 * The difference from the dma_v2.c __cleanup() is that this routine
256 * handles extended descriptors and dma-unmapping raid operations.
258 static void __cleanup(struct ioat2_dma_chan
*ioat
, unsigned long phys_complete
)
260 struct ioat_chan_common
*chan
= &ioat
->base
;
261 struct ioat_ring_ent
*desc
;
262 bool seen_current
= false;
266 dev_dbg(to_dev(chan
), "%s: head: %#x tail: %#x issued: %#x\n",
267 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
);
269 active
= ioat2_ring_active(ioat
);
270 for (i
= 0; i
< active
&& !seen_current
; i
++) {
271 struct dma_async_tx_descriptor
*tx
;
273 prefetch(ioat2_get_ring_ent(ioat
, ioat
->tail
+ i
+ 1));
274 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
+ i
);
275 dump_desc_dbg(ioat
, desc
);
278 chan
->completed_cookie
= tx
->cookie
;
279 ioat3_dma_unmap(ioat
, desc
, ioat
->tail
+ i
);
282 tx
->callback(tx
->callback_param
);
287 if (tx
->phys
== phys_complete
)
290 /* skip extended descriptors */
291 if (desc_has_ext(desc
)) {
292 BUG_ON(i
+ 1 >= active
);
297 BUG_ON(active
&& !seen_current
); /* no active descs have written a completion? */
298 chan
->last_completion
= phys_complete
;
300 active
= ioat2_ring_active(ioat
);
302 dev_dbg(to_dev(chan
), "%s: cancel completion timeout\n",
304 clear_bit(IOAT_COMPLETION_PENDING
, &chan
->state
);
305 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
307 /* 5 microsecond delay per pending descriptor */
308 writew(min((5 * active
), IOAT_INTRDELAY_MASK
),
309 chan
->device
->reg_base
+ IOAT_INTRDELAY_OFFSET
);
312 /* try to cleanup, but yield (via spin_trylock) to incoming submissions
313 * with the expectation that we will immediately poll again shortly
315 static void ioat3_cleanup_poll(struct ioat2_dma_chan
*ioat
)
317 struct ioat_chan_common
*chan
= &ioat
->base
;
318 unsigned long phys_complete
;
320 prefetch(chan
->completion
);
322 if (!spin_trylock_bh(&chan
->cleanup_lock
))
325 if (!ioat_cleanup_preamble(chan
, &phys_complete
)) {
326 spin_unlock_bh(&chan
->cleanup_lock
);
330 if (!spin_trylock_bh(&ioat
->ring_lock
)) {
331 spin_unlock_bh(&chan
->cleanup_lock
);
335 __cleanup(ioat
, phys_complete
);
337 spin_unlock_bh(&ioat
->ring_lock
);
338 spin_unlock_bh(&chan
->cleanup_lock
);
341 /* run cleanup now because we already delayed the interrupt via INTRDELAY */
342 static void ioat3_cleanup_sync(struct ioat2_dma_chan
*ioat
)
344 struct ioat_chan_common
*chan
= &ioat
->base
;
345 unsigned long phys_complete
;
347 prefetch(chan
->completion
);
349 spin_lock_bh(&chan
->cleanup_lock
);
350 if (!ioat_cleanup_preamble(chan
, &phys_complete
)) {
351 spin_unlock_bh(&chan
->cleanup_lock
);
354 spin_lock_bh(&ioat
->ring_lock
);
356 __cleanup(ioat
, phys_complete
);
358 spin_unlock_bh(&ioat
->ring_lock
);
359 spin_unlock_bh(&chan
->cleanup_lock
);
362 static void ioat3_cleanup_event(unsigned long data
)
364 struct ioat2_dma_chan
*ioat
= to_ioat2_chan((void *) data
);
366 ioat3_cleanup_sync(ioat
);
367 writew(IOAT_CHANCTRL_RUN
, ioat
->base
.reg_base
+ IOAT_CHANCTRL_OFFSET
);
370 static void ioat3_restart_channel(struct ioat2_dma_chan
*ioat
)
372 struct ioat_chan_common
*chan
= &ioat
->base
;
373 unsigned long phys_complete
;
375 ioat2_quiesce(chan
, 0);
376 if (ioat_cleanup_preamble(chan
, &phys_complete
))
377 __cleanup(ioat
, phys_complete
);
379 __ioat2_restart_chan(ioat
);
382 static void ioat3_timer_event(unsigned long data
)
384 struct ioat2_dma_chan
*ioat
= to_ioat2_chan((void *) data
);
385 struct ioat_chan_common
*chan
= &ioat
->base
;
387 spin_lock_bh(&chan
->cleanup_lock
);
388 if (test_bit(IOAT_COMPLETION_PENDING
, &chan
->state
)) {
389 unsigned long phys_complete
;
392 spin_lock_bh(&ioat
->ring_lock
);
393 status
= ioat_chansts(chan
);
395 /* when halted due to errors check for channel
396 * programming errors before advancing the completion state
398 if (is_ioat_halted(status
)) {
401 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
402 dev_err(to_dev(chan
), "%s: Channel halted (%x)\n",
404 BUG_ON(is_ioat_bug(chanerr
));
407 /* if we haven't made progress and we have already
408 * acknowledged a pending completion once, then be more
409 * forceful with a restart
411 if (ioat_cleanup_preamble(chan
, &phys_complete
))
412 __cleanup(ioat
, phys_complete
);
413 else if (test_bit(IOAT_COMPLETION_ACK
, &chan
->state
))
414 ioat3_restart_channel(ioat
);
416 set_bit(IOAT_COMPLETION_ACK
, &chan
->state
);
417 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
419 spin_unlock_bh(&ioat
->ring_lock
);
423 /* if the ring is idle, empty, and oversized try to step
426 spin_lock_bh(&ioat
->ring_lock
);
427 active
= ioat2_ring_active(ioat
);
428 if (active
== 0 && ioat
->alloc_order
> ioat_get_alloc_order())
429 reshape_ring(ioat
, ioat
->alloc_order
-1);
430 spin_unlock_bh(&ioat
->ring_lock
);
432 /* keep shrinking until we get back to our minimum
435 if (ioat
->alloc_order
> ioat_get_alloc_order())
436 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
438 spin_unlock_bh(&chan
->cleanup_lock
);
441 static enum dma_status
442 ioat3_is_complete(struct dma_chan
*c
, dma_cookie_t cookie
,
443 dma_cookie_t
*done
, dma_cookie_t
*used
)
445 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
447 if (ioat_is_complete(c
, cookie
, done
, used
) == DMA_SUCCESS
)
450 ioat3_cleanup_poll(ioat
);
452 return ioat_is_complete(c
, cookie
, done
, used
);
455 static struct dma_async_tx_descriptor
*
456 ioat3_prep_memset_lock(struct dma_chan
*c
, dma_addr_t dest
, int value
,
457 size_t len
, unsigned long flags
)
459 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
460 struct ioat_ring_ent
*desc
;
461 size_t total_len
= len
;
462 struct ioat_fill_descriptor
*fill
;
464 u64 src_data
= (0x0101010101010101ULL
) * (value
& 0xff);
468 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
469 if (likely(num_descs
) &&
470 ioat2_alloc_and_lock(&idx
, ioat
, num_descs
) == 0)
476 size_t xfer_size
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
478 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
481 fill
->size
= xfer_size
;
482 fill
->src_data
= src_data
;
483 fill
->dst_addr
= dest
;
485 fill
->ctl_f
.op
= IOAT_OP_FILL
;
489 dump_desc_dbg(ioat
, desc
);
490 } while (++i
< num_descs
);
492 desc
->txd
.flags
= flags
;
493 desc
->len
= total_len
;
494 fill
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
495 fill
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
496 fill
->ctl_f
.compl_write
= 1;
497 dump_desc_dbg(ioat
, desc
);
499 /* we leave the channel locked to ensure in order submission */
503 static struct dma_async_tx_descriptor
*
504 __ioat3_prep_xor_lock(struct dma_chan
*c
, enum sum_check_flags
*result
,
505 dma_addr_t dest
, dma_addr_t
*src
, unsigned int src_cnt
,
506 size_t len
, unsigned long flags
)
508 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
509 struct ioat_ring_ent
*compl_desc
;
510 struct ioat_ring_ent
*desc
;
511 struct ioat_ring_ent
*ext
;
512 size_t total_len
= len
;
513 struct ioat_xor_descriptor
*xor;
514 struct ioat_xor_ext_descriptor
*xor_ex
= NULL
;
515 struct ioat_dma_descriptor
*hw
;
521 u8 op
= result
? IOAT_OP_XOR_VAL
: IOAT_OP_XOR
;
525 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
526 /* we need 2x the number of descriptors to cover greater than 5
535 /* completion writes from the raid engine may pass completion
536 * writes from the legacy engine, so we need one extra null
537 * (legacy) descriptor to ensure all completion writes arrive in
540 if (likely(num_descs
) &&
541 ioat2_alloc_and_lock(&idx
, ioat
, num_descs
+1) == 0)
547 struct ioat_raw_descriptor
*descs
[2];
548 size_t xfer_size
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
551 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
554 /* save a branch by unconditionally retrieving the
555 * extended descriptor xor_set_src() knows to not write
556 * to it in the single descriptor case
558 ext
= ioat2_get_ring_ent(ioat
, idx
+ i
+ 1);
559 xor_ex
= ext
->xor_ex
;
561 descs
[0] = (struct ioat_raw_descriptor
*) xor;
562 descs
[1] = (struct ioat_raw_descriptor
*) xor_ex
;
563 for (s
= 0; s
< src_cnt
; s
++)
564 xor_set_src(descs
, src
[s
], offset
, s
);
565 xor->size
= xfer_size
;
566 xor->dst_addr
= dest
+ offset
;
569 xor->ctl_f
.src_cnt
= src_cnt_to_hw(src_cnt
);
573 dump_desc_dbg(ioat
, desc
);
574 } while ((i
+= 1 + with_ext
) < num_descs
);
576 /* last xor descriptor carries the unmap parameters and fence bit */
577 desc
->txd
.flags
= flags
;
578 desc
->len
= total_len
;
580 desc
->result
= result
;
581 xor->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
583 /* completion descriptor carries interrupt bit */
584 compl_desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
585 compl_desc
->txd
.flags
= flags
& DMA_PREP_INTERRUPT
;
589 hw
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
590 hw
->ctl_f
.compl_write
= 1;
591 hw
->size
= NULL_DESC_BUFFER_SIZE
;
592 dump_desc_dbg(ioat
, compl_desc
);
594 /* we leave the channel locked to ensure in order submission */
595 return &compl_desc
->txd
;
598 static struct dma_async_tx_descriptor
*
599 ioat3_prep_xor(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t
*src
,
600 unsigned int src_cnt
, size_t len
, unsigned long flags
)
602 return __ioat3_prep_xor_lock(chan
, NULL
, dest
, src
, src_cnt
, len
, flags
);
605 struct dma_async_tx_descriptor
*
606 ioat3_prep_xor_val(struct dma_chan
*chan
, dma_addr_t
*src
,
607 unsigned int src_cnt
, size_t len
,
608 enum sum_check_flags
*result
, unsigned long flags
)
610 /* the cleanup routine only sets bits on validate failure, it
611 * does not clear bits on validate success... so clear it here
615 return __ioat3_prep_xor_lock(chan
, result
, src
[0], &src
[1],
616 src_cnt
- 1, len
, flags
);
620 dump_pq_desc_dbg(struct ioat2_dma_chan
*ioat
, struct ioat_ring_ent
*desc
, struct ioat_ring_ent
*ext
)
622 struct device
*dev
= to_dev(&ioat
->base
);
623 struct ioat_pq_descriptor
*pq
= desc
->pq
;
624 struct ioat_pq_ext_descriptor
*pq_ex
= ext
? ext
->pq_ex
: NULL
;
625 struct ioat_raw_descriptor
*descs
[] = { (void *) pq
, (void *) pq_ex
};
626 int src_cnt
= src_cnt_to_sw(pq
->ctl_f
.src_cnt
);
629 dev_dbg(dev
, "desc[%d]: (%#llx->%#llx) flags: %#x"
630 " sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n",
631 desc_id(desc
), (unsigned long long) desc
->txd
.phys
,
632 (unsigned long long) (pq_ex
? pq_ex
->next
: pq
->next
),
633 desc
->txd
.flags
, pq
->size
, pq
->ctl
, pq
->ctl_f
.op
, pq
->ctl_f
.int_en
,
634 pq
->ctl_f
.compl_write
,
635 pq
->ctl_f
.p_disable
? "" : "p", pq
->ctl_f
.q_disable
? "" : "q",
637 for (i
= 0; i
< src_cnt
; i
++)
638 dev_dbg(dev
, "\tsrc[%d]: %#llx coef: %#x\n", i
,
639 (unsigned long long) pq_get_src(descs
, i
), pq
->coef
[i
]);
640 dev_dbg(dev
, "\tP: %#llx\n", pq
->p_addr
);
641 dev_dbg(dev
, "\tQ: %#llx\n", pq
->q_addr
);
644 static struct dma_async_tx_descriptor
*
645 __ioat3_prep_pq_lock(struct dma_chan
*c
, enum sum_check_flags
*result
,
646 const dma_addr_t
*dst
, const dma_addr_t
*src
,
647 unsigned int src_cnt
, const unsigned char *scf
,
648 size_t len
, unsigned long flags
)
650 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
651 struct ioat_chan_common
*chan
= &ioat
->base
;
652 struct ioat_ring_ent
*compl_desc
;
653 struct ioat_ring_ent
*desc
;
654 struct ioat_ring_ent
*ext
;
655 size_t total_len
= len
;
656 struct ioat_pq_descriptor
*pq
;
657 struct ioat_pq_ext_descriptor
*pq_ex
= NULL
;
658 struct ioat_dma_descriptor
*hw
;
664 u8 op
= result
? IOAT_OP_PQ_VAL
: IOAT_OP_PQ
;
666 dev_dbg(to_dev(chan
), "%s\n", __func__
);
667 /* the engine requires at least two sources (we provide
668 * at least 1 implied source in the DMA_PREP_CONTINUE case)
670 BUG_ON(src_cnt
+ dmaf_continue(flags
) < 2);
672 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
673 /* we need 2x the number of descriptors to cover greater than 3
674 * sources (we need 1 extra source in the q-only continuation
675 * case and 3 extra sources in the p+q continuation case.
677 if (src_cnt
+ dmaf_p_disabled_continue(flags
) > 3 ||
678 (dmaf_continue(flags
) && !dmaf_p_disabled_continue(flags
))) {
684 /* completion writes from the raid engine may pass completion
685 * writes from the legacy engine, so we need one extra null
686 * (legacy) descriptor to ensure all completion writes arrive in
689 if (likely(num_descs
) &&
690 ioat2_alloc_and_lock(&idx
, ioat
, num_descs
+1) == 0)
696 struct ioat_raw_descriptor
*descs
[2];
697 size_t xfer_size
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
699 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
702 /* save a branch by unconditionally retrieving the
703 * extended descriptor pq_set_src() knows to not write
704 * to it in the single descriptor case
706 ext
= ioat2_get_ring_ent(ioat
, idx
+ i
+ with_ext
);
709 descs
[0] = (struct ioat_raw_descriptor
*) pq
;
710 descs
[1] = (struct ioat_raw_descriptor
*) pq_ex
;
712 for (s
= 0; s
< src_cnt
; s
++)
713 pq_set_src(descs
, src
[s
], offset
, scf
[s
], s
);
715 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
716 if (dmaf_p_disabled_continue(flags
))
717 pq_set_src(descs
, dst
[1], offset
, 1, s
++);
718 else if (dmaf_continue(flags
)) {
719 pq_set_src(descs
, dst
[0], offset
, 0, s
++);
720 pq_set_src(descs
, dst
[1], offset
, 1, s
++);
721 pq_set_src(descs
, dst
[1], offset
, 0, s
++);
723 pq
->size
= xfer_size
;
724 pq
->p_addr
= dst
[0] + offset
;
725 pq
->q_addr
= dst
[1] + offset
;
728 pq
->ctl_f
.src_cnt
= src_cnt_to_hw(s
);
729 pq
->ctl_f
.p_disable
= !!(flags
& DMA_PREP_PQ_DISABLE_P
);
730 pq
->ctl_f
.q_disable
= !!(flags
& DMA_PREP_PQ_DISABLE_Q
);
734 } while ((i
+= 1 + with_ext
) < num_descs
);
736 /* last pq descriptor carries the unmap parameters and fence bit */
737 desc
->txd
.flags
= flags
;
738 desc
->len
= total_len
;
740 desc
->result
= result
;
741 pq
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
742 dump_pq_desc_dbg(ioat
, desc
, ext
);
744 /* completion descriptor carries interrupt bit */
745 compl_desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
746 compl_desc
->txd
.flags
= flags
& DMA_PREP_INTERRUPT
;
750 hw
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
751 hw
->ctl_f
.compl_write
= 1;
752 hw
->size
= NULL_DESC_BUFFER_SIZE
;
753 dump_desc_dbg(ioat
, compl_desc
);
755 /* we leave the channel locked to ensure in order submission */
756 return &compl_desc
->txd
;
759 static struct dma_async_tx_descriptor
*
760 ioat3_prep_pq(struct dma_chan
*chan
, dma_addr_t
*dst
, dma_addr_t
*src
,
761 unsigned int src_cnt
, const unsigned char *scf
, size_t len
,
764 /* specify valid address for disabled result */
765 if (flags
& DMA_PREP_PQ_DISABLE_P
)
767 if (flags
& DMA_PREP_PQ_DISABLE_Q
)
770 /* handle the single source multiply case from the raid6
773 if ((flags
& DMA_PREP_PQ_DISABLE_P
) && src_cnt
== 1) {
774 dma_addr_t single_source
[2];
775 unsigned char single_source_coef
[2];
777 BUG_ON(flags
& DMA_PREP_PQ_DISABLE_Q
);
778 single_source
[0] = src
[0];
779 single_source
[1] = src
[0];
780 single_source_coef
[0] = scf
[0];
781 single_source_coef
[1] = 0;
783 return __ioat3_prep_pq_lock(chan
, NULL
, dst
, single_source
, 2,
784 single_source_coef
, len
, flags
);
786 return __ioat3_prep_pq_lock(chan
, NULL
, dst
, src
, src_cnt
, scf
,
790 struct dma_async_tx_descriptor
*
791 ioat3_prep_pq_val(struct dma_chan
*chan
, dma_addr_t
*pq
, dma_addr_t
*src
,
792 unsigned int src_cnt
, const unsigned char *scf
, size_t len
,
793 enum sum_check_flags
*pqres
, unsigned long flags
)
795 /* specify valid address for disabled result */
796 if (flags
& DMA_PREP_PQ_DISABLE_P
)
798 if (flags
& DMA_PREP_PQ_DISABLE_Q
)
801 /* the cleanup routine only sets bits on validate failure, it
802 * does not clear bits on validate success... so clear it here
806 return __ioat3_prep_pq_lock(chan
, pqres
, pq
, src
, src_cnt
, scf
, len
,
810 static struct dma_async_tx_descriptor
*
811 ioat3_prep_pqxor(struct dma_chan
*chan
, dma_addr_t dst
, dma_addr_t
*src
,
812 unsigned int src_cnt
, size_t len
, unsigned long flags
)
814 unsigned char scf
[src_cnt
];
817 memset(scf
, 0, src_cnt
);
819 flags
|= DMA_PREP_PQ_DISABLE_Q
;
820 pq
[1] = dst
; /* specify valid address for disabled result */
822 return __ioat3_prep_pq_lock(chan
, NULL
, pq
, src
, src_cnt
, scf
, len
,
826 struct dma_async_tx_descriptor
*
827 ioat3_prep_pqxor_val(struct dma_chan
*chan
, dma_addr_t
*src
,
828 unsigned int src_cnt
, size_t len
,
829 enum sum_check_flags
*result
, unsigned long flags
)
831 unsigned char scf
[src_cnt
];
834 /* the cleanup routine only sets bits on validate failure, it
835 * does not clear bits on validate success... so clear it here
839 memset(scf
, 0, src_cnt
);
841 flags
|= DMA_PREP_PQ_DISABLE_Q
;
842 pq
[1] = pq
[0]; /* specify valid address for disabled result */
844 return __ioat3_prep_pq_lock(chan
, result
, pq
, &src
[1], src_cnt
- 1, scf
,
848 static struct dma_async_tx_descriptor
*
849 ioat3_prep_interrupt_lock(struct dma_chan
*c
, unsigned long flags
)
851 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
852 struct ioat_ring_ent
*desc
;
853 struct ioat_dma_descriptor
*hw
;
856 if (ioat2_alloc_and_lock(&idx
, ioat
, 1) == 0)
857 desc
= ioat2_get_ring_ent(ioat
, idx
);
864 hw
->ctl_f
.int_en
= 1;
865 hw
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
866 hw
->ctl_f
.compl_write
= 1;
867 hw
->size
= NULL_DESC_BUFFER_SIZE
;
871 desc
->txd
.flags
= flags
;
874 dump_desc_dbg(ioat
, desc
);
876 /* we leave the channel locked to ensure in order submission */
880 static void __devinit
ioat3_dma_test_callback(void *dma_async_param
)
882 struct completion
*cmp
= dma_async_param
;
887 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
888 static int __devinit
ioat_xor_val_self_test(struct ioatdma_device
*device
)
892 struct page
*xor_srcs
[IOAT_NUM_SRC_TEST
];
893 struct page
*xor_val_srcs
[IOAT_NUM_SRC_TEST
+ 1];
894 dma_addr_t dma_srcs
[IOAT_NUM_SRC_TEST
+ 1];
895 dma_addr_t dma_addr
, dest_dma
;
896 struct dma_async_tx_descriptor
*tx
;
897 struct dma_chan
*dma_chan
;
903 struct completion cmp
;
905 struct device
*dev
= &device
->pdev
->dev
;
906 struct dma_device
*dma
= &device
->common
;
908 dev_dbg(dev
, "%s\n", __func__
);
910 if (!dma_has_cap(DMA_XOR
, dma
->cap_mask
))
913 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++) {
914 xor_srcs
[src_idx
] = alloc_page(GFP_KERNEL
);
915 if (!xor_srcs
[src_idx
]) {
917 __free_page(xor_srcs
[src_idx
]);
922 dest
= alloc_page(GFP_KERNEL
);
925 __free_page(xor_srcs
[src_idx
]);
929 /* Fill in src buffers */
930 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++) {
931 u8
*ptr
= page_address(xor_srcs
[src_idx
]);
932 for (i
= 0; i
< PAGE_SIZE
; i
++)
933 ptr
[i
] = (1 << src_idx
);
936 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++)
937 cmp_byte
^= (u8
) (1 << src_idx
);
939 cmp_word
= (cmp_byte
<< 24) | (cmp_byte
<< 16) |
940 (cmp_byte
<< 8) | cmp_byte
;
942 memset(page_address(dest
), 0, PAGE_SIZE
);
944 dma_chan
= container_of(dma
->channels
.next
, struct dma_chan
,
946 if (dma
->device_alloc_chan_resources(dma_chan
) < 1) {
952 dest_dma
= dma_map_page(dev
, dest
, 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
953 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
954 dma_srcs
[i
] = dma_map_page(dev
, xor_srcs
[i
], 0, PAGE_SIZE
,
956 tx
= dma
->device_prep_dma_xor(dma_chan
, dest_dma
, dma_srcs
,
957 IOAT_NUM_SRC_TEST
, PAGE_SIZE
,
961 dev_err(dev
, "Self-test xor prep failed\n");
967 init_completion(&cmp
);
968 tx
->callback
= ioat3_dma_test_callback
;
969 tx
->callback_param
= &cmp
;
970 cookie
= tx
->tx_submit(tx
);
972 dev_err(dev
, "Self-test xor setup failed\n");
976 dma
->device_issue_pending(dma_chan
);
978 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
980 if (dma
->device_is_tx_complete(dma_chan
, cookie
, NULL
, NULL
) != DMA_SUCCESS
) {
981 dev_err(dev
, "Self-test xor timed out\n");
986 dma_sync_single_for_cpu(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
987 for (i
= 0; i
< (PAGE_SIZE
/ sizeof(u32
)); i
++) {
988 u32
*ptr
= page_address(dest
);
989 if (ptr
[i
] != cmp_word
) {
990 dev_err(dev
, "Self-test xor failed compare\n");
995 dma_sync_single_for_device(dev
, dest_dma
, PAGE_SIZE
, DMA_TO_DEVICE
);
997 /* skip validate if the capability is not present */
998 if (!dma_has_cap(DMA_XOR_VAL
, dma_chan
->device
->cap_mask
))
1001 /* validate the sources with the destintation page */
1002 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
1003 xor_val_srcs
[i
] = xor_srcs
[i
];
1004 xor_val_srcs
[i
] = dest
;
1008 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1009 dma_srcs
[i
] = dma_map_page(dev
, xor_val_srcs
[i
], 0, PAGE_SIZE
,
1011 tx
= dma
->device_prep_dma_xor_val(dma_chan
, dma_srcs
,
1012 IOAT_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
1013 &xor_val_result
, DMA_PREP_INTERRUPT
);
1015 dev_err(dev
, "Self-test zero prep failed\n");
1017 goto free_resources
;
1021 init_completion(&cmp
);
1022 tx
->callback
= ioat3_dma_test_callback
;
1023 tx
->callback_param
= &cmp
;
1024 cookie
= tx
->tx_submit(tx
);
1026 dev_err(dev
, "Self-test zero setup failed\n");
1028 goto free_resources
;
1030 dma
->device_issue_pending(dma_chan
);
1032 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
1034 if (dma
->device_is_tx_complete(dma_chan
, cookie
, NULL
, NULL
) != DMA_SUCCESS
) {
1035 dev_err(dev
, "Self-test validate timed out\n");
1037 goto free_resources
;
1040 if (xor_val_result
!= 0) {
1041 dev_err(dev
, "Self-test validate failed compare\n");
1043 goto free_resources
;
1046 /* skip memset if the capability is not present */
1047 if (!dma_has_cap(DMA_MEMSET
, dma_chan
->device
->cap_mask
))
1048 goto free_resources
;
1051 dma_addr
= dma_map_page(dev
, dest
, 0,
1052 PAGE_SIZE
, DMA_FROM_DEVICE
);
1053 tx
= dma
->device_prep_dma_memset(dma_chan
, dma_addr
, 0, PAGE_SIZE
,
1054 DMA_PREP_INTERRUPT
);
1056 dev_err(dev
, "Self-test memset prep failed\n");
1058 goto free_resources
;
1062 init_completion(&cmp
);
1063 tx
->callback
= ioat3_dma_test_callback
;
1064 tx
->callback_param
= &cmp
;
1065 cookie
= tx
->tx_submit(tx
);
1067 dev_err(dev
, "Self-test memset setup failed\n");
1069 goto free_resources
;
1071 dma
->device_issue_pending(dma_chan
);
1073 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
1075 if (dma
->device_is_tx_complete(dma_chan
, cookie
, NULL
, NULL
) != DMA_SUCCESS
) {
1076 dev_err(dev
, "Self-test memset timed out\n");
1078 goto free_resources
;
1081 for (i
= 0; i
< PAGE_SIZE
/sizeof(u32
); i
++) {
1082 u32
*ptr
= page_address(dest
);
1084 dev_err(dev
, "Self-test memset failed compare\n");
1086 goto free_resources
;
1090 /* test for non-zero parity sum */
1092 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1093 dma_srcs
[i
] = dma_map_page(dev
, xor_val_srcs
[i
], 0, PAGE_SIZE
,
1095 tx
= dma
->device_prep_dma_xor_val(dma_chan
, dma_srcs
,
1096 IOAT_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
1097 &xor_val_result
, DMA_PREP_INTERRUPT
);
1099 dev_err(dev
, "Self-test 2nd zero prep failed\n");
1101 goto free_resources
;
1105 init_completion(&cmp
);
1106 tx
->callback
= ioat3_dma_test_callback
;
1107 tx
->callback_param
= &cmp
;
1108 cookie
= tx
->tx_submit(tx
);
1110 dev_err(dev
, "Self-test 2nd zero setup failed\n");
1112 goto free_resources
;
1114 dma
->device_issue_pending(dma_chan
);
1116 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
1118 if (dma
->device_is_tx_complete(dma_chan
, cookie
, NULL
, NULL
) != DMA_SUCCESS
) {
1119 dev_err(dev
, "Self-test 2nd validate timed out\n");
1121 goto free_resources
;
1124 if (xor_val_result
!= SUM_CHECK_P_RESULT
) {
1125 dev_err(dev
, "Self-test validate failed compare\n");
1127 goto free_resources
;
1131 dma
->device_free_chan_resources(dma_chan
);
1133 src_idx
= IOAT_NUM_SRC_TEST
;
1135 __free_page(xor_srcs
[src_idx
]);
1140 static int __devinit
ioat3_dma_self_test(struct ioatdma_device
*device
)
1142 int rc
= ioat_dma_self_test(device
);
1147 rc
= ioat_xor_val_self_test(device
);
1154 static int ioat3_reset_hw(struct ioat_chan_common
*chan
)
1156 /* throw away whatever the channel was doing and get it
1157 * initialized, with ioat3 specific workarounds
1159 struct ioatdma_device
*device
= chan
->device
;
1160 struct pci_dev
*pdev
= device
->pdev
;
1165 ioat2_quiesce(chan
, msecs_to_jiffies(100));
1167 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
1168 writel(chanerr
, chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
1170 /* -= IOAT ver.3 workarounds =- */
1171 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1172 * that can cause stability issues for IOAT ver.3, and clear any
1175 pci_write_config_dword(pdev
, IOAT_PCI_CHANERRMASK_INT_OFFSET
, 0x3e07);
1176 err
= pci_read_config_dword(pdev
, IOAT_PCI_CHANERR_INT_OFFSET
, &chanerr
);
1178 dev_err(&pdev
->dev
, "channel error register unreachable\n");
1181 pci_write_config_dword(pdev
, IOAT_PCI_CHANERR_INT_OFFSET
, chanerr
);
1183 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1184 * (workaround for spurious config parity error after restart)
1186 pci_read_config_word(pdev
, IOAT_PCI_DEVICE_ID_OFFSET
, &dev_id
);
1187 if (dev_id
== PCI_DEVICE_ID_INTEL_IOAT_TBG0
)
1188 pci_write_config_dword(pdev
, IOAT_PCI_DMAUNCERRSTS_OFFSET
, 0x10);
1190 return ioat2_reset_sync(chan
, msecs_to_jiffies(200));
1193 int __devinit
ioat3_dma_probe(struct ioatdma_device
*device
, int dca
)
1195 struct pci_dev
*pdev
= device
->pdev
;
1196 int dca_en
= system_has_dca_enabled(pdev
);
1197 struct dma_device
*dma
;
1199 struct ioat_chan_common
*chan
;
1200 bool is_raid_device
= false;
1204 device
->enumerate_channels
= ioat2_enumerate_channels
;
1205 device
->reset_hw
= ioat3_reset_hw
;
1206 device
->self_test
= ioat3_dma_self_test
;
1207 dma
= &device
->common
;
1208 dma
->device_prep_dma_memcpy
= ioat2_dma_prep_memcpy_lock
;
1209 dma
->device_issue_pending
= ioat2_issue_pending
;
1210 dma
->device_alloc_chan_resources
= ioat2_alloc_chan_resources
;
1211 dma
->device_free_chan_resources
= ioat2_free_chan_resources
;
1213 dma_cap_set(DMA_INTERRUPT
, dma
->cap_mask
);
1214 dma
->device_prep_dma_interrupt
= ioat3_prep_interrupt_lock
;
1216 cap
= readl(device
->reg_base
+ IOAT_DMA_CAP_OFFSET
);
1218 /* dca is incompatible with raid operations */
1219 if (dca_en
&& (cap
& (IOAT_CAP_XOR
|IOAT_CAP_PQ
)))
1220 cap
&= ~(IOAT_CAP_XOR
|IOAT_CAP_PQ
);
1222 if (cap
& IOAT_CAP_XOR
) {
1223 is_raid_device
= true;
1227 dma_cap_set(DMA_XOR
, dma
->cap_mask
);
1228 dma
->device_prep_dma_xor
= ioat3_prep_xor
;
1230 dma_cap_set(DMA_XOR_VAL
, dma
->cap_mask
);
1231 dma
->device_prep_dma_xor_val
= ioat3_prep_xor_val
;
1233 if (cap
& IOAT_CAP_PQ
) {
1234 is_raid_device
= true;
1235 dma_set_maxpq(dma
, 8, 0);
1238 dma_cap_set(DMA_PQ
, dma
->cap_mask
);
1239 dma
->device_prep_dma_pq
= ioat3_prep_pq
;
1241 dma_cap_set(DMA_PQ_VAL
, dma
->cap_mask
);
1242 dma
->device_prep_dma_pq_val
= ioat3_prep_pq_val
;
1244 if (!(cap
& IOAT_CAP_XOR
)) {
1248 dma_cap_set(DMA_XOR
, dma
->cap_mask
);
1249 dma
->device_prep_dma_xor
= ioat3_prep_pqxor
;
1251 dma_cap_set(DMA_XOR_VAL
, dma
->cap_mask
);
1252 dma
->device_prep_dma_xor_val
= ioat3_prep_pqxor_val
;
1255 if (is_raid_device
&& (cap
& IOAT_CAP_FILL_BLOCK
)) {
1256 dma_cap_set(DMA_MEMSET
, dma
->cap_mask
);
1257 dma
->device_prep_dma_memset
= ioat3_prep_memset_lock
;
1261 if (is_raid_device
) {
1262 dma
->device_is_tx_complete
= ioat3_is_complete
;
1263 device
->cleanup_fn
= ioat3_cleanup_event
;
1264 device
->timer_fn
= ioat3_timer_event
;
1266 dma
->device_is_tx_complete
= ioat_is_dma_complete
;
1267 device
->cleanup_fn
= ioat2_cleanup_event
;
1268 device
->timer_fn
= ioat2_timer_event
;
1271 #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
1272 dma_cap_clear(DMA_PQ_VAL
, dma
->cap_mask
);
1273 dma
->device_prep_dma_pq_val
= NULL
;
1276 #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
1277 dma_cap_clear(DMA_XOR_VAL
, dma
->cap_mask
);
1278 dma
->device_prep_dma_xor_val
= NULL
;
1281 err
= ioat_probe(device
);
1284 ioat_set_tcp_copy_break(262144);
1286 list_for_each_entry(c
, &dma
->channels
, device_node
) {
1287 chan
= to_chan_common(c
);
1288 writel(IOAT_DMA_DCA_ANY_CPU
,
1289 chan
->reg_base
+ IOAT_DCACTRL_OFFSET
);
1292 err
= ioat_register(device
);
1296 ioat_kobject_add(device
, &ioat2_ktype
);
1299 device
->dca
= ioat3_dca_init(pdev
, device
->reg_base
);