2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
25 * does asynchronous data movement and checksumming operations.
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
39 #include "registers.h"
42 static int ioat_ring_alloc_order
= 8;
43 module_param(ioat_ring_alloc_order
, int, 0644);
44 MODULE_PARM_DESC(ioat_ring_alloc_order
,
45 "ioat2+: allocate 2^n descriptors per channel (default: n=8)");
47 static void __ioat2_issue_pending(struct ioat2_dma_chan
*ioat
)
49 void * __iomem reg_base
= ioat
->base
.reg_base
;
52 ioat
->dmacount
+= ioat2_ring_pending(ioat
);
53 ioat
->issued
= ioat
->head
;
54 /* make descriptor updates globally visible before notifying channel */
56 writew(ioat
->dmacount
, reg_base
+ IOAT_CHAN_DMACOUNT_OFFSET
);
57 dev_dbg(to_dev(&ioat
->base
),
58 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
59 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
, ioat
->dmacount
);
62 static void ioat2_issue_pending(struct dma_chan
*chan
)
64 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(chan
);
66 spin_lock_bh(&ioat
->ring_lock
);
67 if (ioat
->pending
== 1)
68 __ioat2_issue_pending(ioat
);
69 spin_unlock_bh(&ioat
->ring_lock
);
73 * ioat2_update_pending - log pending descriptors
74 * @ioat: ioat2+ channel
76 * set pending to '1' unless pending is already set to '2', pending == 2
77 * indicates that submission is temporarily blocked due to an in-flight
78 * reset. If we are already above the ioat_pending_level threshold then
81 * called with ring_lock held
83 static void ioat2_update_pending(struct ioat2_dma_chan
*ioat
)
85 if (unlikely(ioat
->pending
== 2))
87 else if (ioat2_ring_pending(ioat
) > ioat_pending_level
)
88 __ioat2_issue_pending(ioat
);
93 static void __ioat2_start_null_desc(struct ioat2_dma_chan
*ioat
)
95 void __iomem
*reg_base
= ioat
->base
.reg_base
;
96 struct ioat_ring_ent
*desc
;
97 struct ioat_dma_descriptor
*hw
;
100 if (ioat2_ring_space(ioat
) < 1) {
101 dev_err(to_dev(&ioat
->base
),
102 "Unable to start null desc - ring full\n");
106 dev_dbg(to_dev(&ioat
->base
), "%s: head: %#x tail: %#x issued: %#x\n",
107 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
);
108 idx
= ioat2_desc_alloc(ioat
, 1);
109 desc
= ioat2_get_ring_ent(ioat
, idx
);
114 hw
->ctl_f
.int_en
= 1;
115 hw
->ctl_f
.compl_write
= 1;
116 /* set size to non-zero value (channel returns error when size is 0) */
117 hw
->size
= NULL_DESC_BUFFER_SIZE
;
120 async_tx_ack(&desc
->txd
);
121 writel(((u64
) desc
->txd
.phys
) & 0x00000000FFFFFFFF,
122 reg_base
+ IOAT2_CHAINADDR_OFFSET_LOW
);
123 writel(((u64
) desc
->txd
.phys
) >> 32,
124 reg_base
+ IOAT2_CHAINADDR_OFFSET_HIGH
);
125 dump_desc_dbg(ioat
, desc
);
126 __ioat2_issue_pending(ioat
);
129 static void ioat2_start_null_desc(struct ioat2_dma_chan
*ioat
)
131 spin_lock_bh(&ioat
->ring_lock
);
132 __ioat2_start_null_desc(ioat
);
133 spin_unlock_bh(&ioat
->ring_lock
);
136 static void ioat2_cleanup(struct ioat2_dma_chan
*ioat
);
139 * ioat2_reset_part2 - reinit the channel after a reset
141 static void ioat2_reset_part2(struct work_struct
*work
)
143 struct ioat_chan_common
*chan
;
144 struct ioat2_dma_chan
*ioat
;
146 chan
= container_of(work
, struct ioat_chan_common
, work
.work
);
147 ioat
= container_of(chan
, struct ioat2_dma_chan
, base
);
149 /* ensure that ->tail points to the stalled descriptor
150 * (ioat->pending is set to 2 at this point so no new
151 * descriptors will be issued while we perform this cleanup)
155 spin_lock_bh(&chan
->cleanup_lock
);
156 spin_lock_bh(&ioat
->ring_lock
);
158 /* set the tail to be re-issued */
159 ioat
->issued
= ioat
->tail
;
162 dev_dbg(to_dev(&ioat
->base
),
163 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
164 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
, ioat
->dmacount
);
166 if (ioat2_ring_pending(ioat
)) {
167 struct ioat_ring_ent
*desc
;
169 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
);
170 writel(((u64
) desc
->txd
.phys
) & 0x00000000FFFFFFFF,
171 chan
->reg_base
+ IOAT2_CHAINADDR_OFFSET_LOW
);
172 writel(((u64
) desc
->txd
.phys
) >> 32,
173 chan
->reg_base
+ IOAT2_CHAINADDR_OFFSET_HIGH
);
174 __ioat2_issue_pending(ioat
);
176 __ioat2_start_null_desc(ioat
);
178 spin_unlock_bh(&ioat
->ring_lock
);
179 spin_unlock_bh(&chan
->cleanup_lock
);
181 dev_info(to_dev(chan
),
182 "chan%d reset - %d descs waiting, %d total desc\n",
183 chan_num(chan
), ioat
->dmacount
, 1 << ioat
->alloc_order
);
187 * ioat2_reset_channel - restart a channel
188 * @ioat: IOAT DMA channel handle
190 static void ioat2_reset_channel(struct ioat2_dma_chan
*ioat
)
192 u32 chansts
, chanerr
;
193 struct ioat_chan_common
*chan
= &ioat
->base
;
196 spin_lock_bh(&ioat
->ring_lock
);
197 active
= ioat2_ring_active(ioat
);
198 spin_unlock_bh(&ioat
->ring_lock
);
202 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
203 chansts
= (chan
->completion_virt
->low
204 & IOAT_CHANSTS_DMA_TRANSFER_STATUS
);
206 dev_err(to_dev(chan
),
207 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
208 chan_num(chan
), chansts
, chanerr
);
209 writel(chanerr
, chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
212 spin_lock_bh(&ioat
->ring_lock
);
214 writeb(IOAT_CHANCMD_RESET
,
216 + IOAT_CHANCMD_OFFSET(chan
->device
->version
));
217 spin_unlock_bh(&ioat
->ring_lock
);
218 schedule_delayed_work(&chan
->work
, RESET_DELAY
);
222 * ioat2_chan_watchdog - watch for stuck channels
224 static void ioat2_chan_watchdog(struct work_struct
*work
)
226 struct ioatdma_device
*device
=
227 container_of(work
, struct ioatdma_device
, work
.work
);
228 struct ioat2_dma_chan
*ioat
;
229 struct ioat_chan_common
*chan
;
233 dev_dbg(&device
->pdev
->dev
, "%s\n", __func__
);
235 for (i
= 0; i
< device
->common
.chancnt
; i
++) {
236 chan
= ioat_chan_by_index(device
, i
);
237 ioat
= container_of(chan
, struct ioat2_dma_chan
, base
);
240 * for version 2.0 if there are descriptors yet to be processed
241 * and the last completed hasn't changed since the last watchdog
242 * if they haven't hit the pending level
243 * issue the pending to push them through
245 * try resetting the channel
247 spin_lock_bh(&ioat
->ring_lock
);
248 active
= ioat2_ring_active(ioat
);
249 spin_unlock_bh(&ioat
->ring_lock
);
252 chan
->last_completion
&&
253 chan
->last_completion
== chan
->watchdog_completion
) {
255 if (ioat
->pending
== 1)
256 ioat2_issue_pending(&chan
->common
);
258 ioat2_reset_channel(ioat
);
259 chan
->watchdog_completion
= 0;
262 chan
->last_compl_desc_addr_hw
= 0;
263 chan
->watchdog_completion
= chan
->last_completion
;
265 chan
->watchdog_last_tcp_cookie
= chan
->watchdog_tcp_cookie
;
267 schedule_delayed_work(&device
->work
, WATCHDOG_DELAY
);
271 * ioat2_cleanup - clean finished descriptors (advance tail pointer)
272 * @chan: ioat channel to be cleaned up
274 static void ioat2_cleanup(struct ioat2_dma_chan
*ioat
)
276 struct ioat_chan_common
*chan
= &ioat
->base
;
277 unsigned long phys_complete
;
278 struct ioat_ring_ent
*desc
;
279 bool seen_current
= false;
282 struct dma_async_tx_descriptor
*tx
;
284 prefetch(chan
->completion_virt
);
286 spin_lock_bh(&chan
->cleanup_lock
);
287 phys_complete
= ioat_get_current_completion(chan
);
288 if (phys_complete
== chan
->last_completion
) {
289 spin_unlock_bh(&chan
->cleanup_lock
);
291 * perhaps we're stuck so hard that the watchdog can't go off?
292 * try to catch it after WATCHDOG_DELAY seconds
294 if (chan
->device
->version
< IOAT_VER_3_0
) {
297 tmo
= chan
->last_completion_time
+ HZ
*WATCHDOG_DELAY
;
298 if (time_after(jiffies
, tmo
)) {
299 ioat2_chan_watchdog(&(chan
->device
->work
.work
));
300 chan
->last_completion_time
= jiffies
;
305 chan
->last_completion_time
= jiffies
;
307 spin_lock_bh(&ioat
->ring_lock
);
309 dev_dbg(to_dev(chan
), "%s: head: %#x tail: %#x issued: %#x\n",
310 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
);
312 active
= ioat2_ring_active(ioat
);
313 for (i
= 0; i
< active
&& !seen_current
; i
++) {
314 prefetch(ioat2_get_ring_ent(ioat
, ioat
->tail
+ i
+ 1));
315 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
+ i
);
317 dump_desc_dbg(ioat
, desc
);
319 ioat_dma_unmap(chan
, tx
->flags
, desc
->len
, desc
->hw
);
320 chan
->completed_cookie
= tx
->cookie
;
323 tx
->callback(tx
->callback_param
);
328 if (tx
->phys
== phys_complete
)
332 BUG_ON(!seen_current
); /* no active descs have written a completion? */
333 spin_unlock_bh(&ioat
->ring_lock
);
335 chan
->last_completion
= phys_complete
;
337 spin_unlock_bh(&chan
->cleanup_lock
);
340 static void ioat2_cleanup_tasklet(unsigned long data
)
342 struct ioat2_dma_chan
*ioat
= (void *) data
;
345 writew(IOAT_CHANCTRL_INT_DISABLE
,
346 ioat
->base
.reg_base
+ IOAT_CHANCTRL_OFFSET
);
350 * ioat2_enumerate_channels - find and initialize the device's channels
351 * @device: the device to be enumerated
353 static int ioat2_enumerate_channels(struct ioatdma_device
*device
)
355 struct ioat2_dma_chan
*ioat
;
356 struct device
*dev
= &device
->pdev
->dev
;
357 struct dma_device
*dma
= &device
->common
;
361 INIT_LIST_HEAD(&dma
->channels
);
362 dma
->chancnt
= readb(device
->reg_base
+ IOAT_CHANCNT_OFFSET
);
363 xfercap_log
= readb(device
->reg_base
+ IOAT_XFERCAP_OFFSET
);
364 if (xfercap_log
== 0)
366 dev_dbg(dev
, "%s: xfercap = %d\n", __func__
, 1 << xfercap_log
);
368 /* FIXME which i/oat version is i7300? */
369 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
370 if (i7300_idle_platform_probe(NULL
, NULL
, 1) == 0)
373 for (i
= 0; i
< dma
->chancnt
; i
++) {
374 ioat
= devm_kzalloc(dev
, sizeof(*ioat
), GFP_KERNEL
);
378 ioat_init_channel(device
, &ioat
->base
, i
,
380 ioat2_cleanup_tasklet
,
381 (unsigned long) ioat
);
382 ioat
->xfercap_log
= xfercap_log
;
383 spin_lock_init(&ioat
->ring_lock
);
389 static dma_cookie_t
ioat2_tx_submit_unlock(struct dma_async_tx_descriptor
*tx
)
391 struct dma_chan
*c
= tx
->chan
;
392 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
393 dma_cookie_t cookie
= c
->cookie
;
400 dev_dbg(to_dev(&ioat
->base
), "%s: cookie: %d\n", __func__
, cookie
);
402 ioat2_update_pending(ioat
);
403 spin_unlock_bh(&ioat
->ring_lock
);
408 static struct ioat_ring_ent
*ioat2_alloc_ring_ent(struct dma_chan
*chan
)
410 struct ioat_dma_descriptor
*hw
;
411 struct ioat_ring_ent
*desc
;
412 struct ioatdma_device
*dma
;
415 dma
= to_ioatdma_device(chan
->device
);
416 hw
= pci_pool_alloc(dma
->dma_pool
, GFP_KERNEL
, &phys
);
419 memset(hw
, 0, sizeof(*hw
));
421 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
423 pci_pool_free(dma
->dma_pool
, hw
, phys
);
427 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
428 desc
->txd
.tx_submit
= ioat2_tx_submit_unlock
;
430 desc
->txd
.phys
= phys
;
434 static void ioat2_free_ring_ent(struct ioat_ring_ent
*desc
, struct dma_chan
*chan
)
436 struct ioatdma_device
*dma
;
438 dma
= to_ioatdma_device(chan
->device
);
439 pci_pool_free(dma
->dma_pool
, desc
->hw
, desc
->txd
.phys
);
443 /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
444 * @chan: channel to be initialized
446 static int ioat2_alloc_chan_resources(struct dma_chan
*c
)
448 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
449 struct ioat_chan_common
*chan
= &ioat
->base
;
450 struct ioat_ring_ent
**ring
;
456 /* have we already been set up? */
458 return 1 << ioat
->alloc_order
;
460 /* Setup register to interrupt and write completion status on error */
461 chanctrl
= IOAT_CHANCTRL_ERR_INT_EN
| IOAT_CHANCTRL_ANY_ERR_ABORT_EN
|
462 IOAT_CHANCTRL_ERR_COMPLETION_EN
;
463 writew(chanctrl
, chan
->reg_base
+ IOAT_CHANCTRL_OFFSET
);
465 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
467 dev_err(to_dev(chan
), "CHANERR = %x, clearing\n", chanerr
);
468 writel(chanerr
, chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
471 /* allocate a completion writeback area */
472 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
473 chan
->completion_virt
= pci_pool_alloc(chan
->device
->completion_pool
,
475 &chan
->completion_addr
);
476 if (!chan
->completion_virt
)
479 memset(chan
->completion_virt
, 0,
480 sizeof(*chan
->completion_virt
));
481 writel(((u64
) chan
->completion_addr
) & 0x00000000FFFFFFFF,
482 chan
->reg_base
+ IOAT_CHANCMP_OFFSET_LOW
);
483 writel(((u64
) chan
->completion_addr
) >> 32,
484 chan
->reg_base
+ IOAT_CHANCMP_OFFSET_HIGH
);
486 ioat
->alloc_order
= ioat_get_alloc_order();
487 descs
= 1 << ioat
->alloc_order
;
489 /* allocate the array to hold the software ring */
490 ring
= kcalloc(descs
, sizeof(*ring
), GFP_KERNEL
);
493 for (i
= 0; i
< descs
; i
++) {
494 ring
[i
] = ioat2_alloc_ring_ent(c
);
497 ioat2_free_ring_ent(ring
[i
], c
);
501 set_desc_id(ring
[i
], i
);
505 for (i
= 0; i
< descs
-1; i
++) {
506 struct ioat_ring_ent
*next
= ring
[i
+1];
507 struct ioat_dma_descriptor
*hw
= ring
[i
]->hw
;
509 hw
->next
= next
->txd
.phys
;
511 ring
[i
]->hw
->next
= ring
[0]->txd
.phys
;
513 spin_lock_bh(&ioat
->ring_lock
);
519 spin_unlock_bh(&ioat
->ring_lock
);
521 tasklet_enable(&chan
->cleanup_task
);
522 ioat2_start_null_desc(ioat
);
528 * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops
529 * @idx: gets starting descriptor index on successful allocation
530 * @ioat: ioat2,3 channel (ring) to operate on
531 * @num_descs: allocation length
533 static int ioat2_alloc_and_lock(u16
*idx
, struct ioat2_dma_chan
*ioat
, int num_descs
)
535 struct ioat_chan_common
*chan
= &ioat
->base
;
537 spin_lock_bh(&ioat
->ring_lock
);
538 if (unlikely(ioat2_ring_space(ioat
) < num_descs
)) {
539 if (printk_ratelimit())
540 dev_dbg(to_dev(chan
),
541 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
542 __func__
, num_descs
, ioat
->head
, ioat
->tail
,
544 spin_unlock_bh(&ioat
->ring_lock
);
546 /* do direct reclaim in the allocation failure case */
552 dev_dbg(to_dev(chan
), "%s: num_descs: %d (%x:%x:%x)\n",
553 __func__
, num_descs
, ioat
->head
, ioat
->tail
, ioat
->issued
);
555 *idx
= ioat2_desc_alloc(ioat
, num_descs
);
556 return 0; /* with ioat->ring_lock held */
559 static struct dma_async_tx_descriptor
*
560 ioat2_dma_prep_memcpy_lock(struct dma_chan
*c
, dma_addr_t dma_dest
,
561 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
563 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
564 struct ioat_dma_descriptor
*hw
;
565 struct ioat_ring_ent
*desc
;
566 dma_addr_t dst
= dma_dest
;
567 dma_addr_t src
= dma_src
;
568 size_t total_len
= len
;
573 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
574 if (likely(num_descs
) &&
575 ioat2_alloc_and_lock(&idx
, ioat
, num_descs
) == 0)
579 for (i
= 0; i
< num_descs
; i
++) {
580 size_t copy
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
582 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
593 dump_desc_dbg(ioat
, desc
);
596 desc
->txd
.flags
= flags
;
597 desc
->len
= total_len
;
598 hw
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
599 hw
->ctl_f
.compl_write
= 1;
600 dump_desc_dbg(ioat
, desc
);
601 /* we leave the channel locked to ensure in order submission */
607 * ioat2_free_chan_resources - release all the descriptors
608 * @chan: the channel to be cleaned
610 static void ioat2_free_chan_resources(struct dma_chan
*c
)
612 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
613 struct ioat_chan_common
*chan
= &ioat
->base
;
614 struct ioatdma_device
*ioatdma_device
= chan
->device
;
615 struct ioat_ring_ent
*desc
;
616 const u16 total_descs
= 1 << ioat
->alloc_order
;
620 /* Before freeing channel resources first check
621 * if they have been previously allocated for this channel.
626 tasklet_disable(&chan
->cleanup_task
);
629 /* Delay 100ms after reset to allow internal DMA logic to quiesce
630 * before removing DMA descriptor resources.
632 writeb(IOAT_CHANCMD_RESET
,
633 chan
->reg_base
+ IOAT_CHANCMD_OFFSET(chan
->device
->version
));
636 spin_lock_bh(&ioat
->ring_lock
);
637 descs
= ioat2_ring_space(ioat
);
638 dev_dbg(to_dev(chan
), "freeing %d idle descriptors\n", descs
);
639 for (i
= 0; i
< descs
; i
++) {
640 desc
= ioat2_get_ring_ent(ioat
, ioat
->head
+ i
);
641 ioat2_free_ring_ent(desc
, c
);
644 if (descs
< total_descs
)
645 dev_err(to_dev(chan
), "Freeing %d in use descriptors!\n",
646 total_descs
- descs
);
648 for (i
= 0; i
< total_descs
- descs
; i
++) {
649 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
+ i
);
650 dump_desc_dbg(ioat
, desc
);
651 ioat2_free_ring_ent(desc
, c
);
656 ioat
->alloc_order
= 0;
657 pci_pool_free(ioatdma_device
->completion_pool
,
658 chan
->completion_virt
,
659 chan
->completion_addr
);
660 spin_unlock_bh(&ioat
->ring_lock
);
662 chan
->last_completion
= 0;
663 chan
->completion_addr
= 0;
666 chan
->watchdog_completion
= 0;
667 chan
->last_compl_desc_addr_hw
= 0;
668 chan
->watchdog_tcp_cookie
= 0;
669 chan
->watchdog_last_tcp_cookie
= 0;
672 static enum dma_status
673 ioat2_is_complete(struct dma_chan
*c
, dma_cookie_t cookie
,
674 dma_cookie_t
*done
, dma_cookie_t
*used
)
676 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
678 if (ioat_is_complete(c
, cookie
, done
, used
) == DMA_SUCCESS
)
683 return ioat_is_complete(c
, cookie
, done
, used
);
686 int ioat2_dma_probe(struct ioatdma_device
*device
, int dca
)
688 struct pci_dev
*pdev
= device
->pdev
;
689 struct dma_device
*dma
;
691 struct ioat_chan_common
*chan
;
694 device
->enumerate_channels
= ioat2_enumerate_channels
;
695 dma
= &device
->common
;
696 dma
->device_prep_dma_memcpy
= ioat2_dma_prep_memcpy_lock
;
697 dma
->device_issue_pending
= ioat2_issue_pending
;
698 dma
->device_alloc_chan_resources
= ioat2_alloc_chan_resources
;
699 dma
->device_free_chan_resources
= ioat2_free_chan_resources
;
700 dma
->device_is_tx_complete
= ioat2_is_complete
;
702 err
= ioat_probe(device
);
705 ioat_set_tcp_copy_break(2048);
707 list_for_each_entry(c
, &dma
->channels
, device_node
) {
708 chan
= to_chan_common(c
);
709 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
| IOAT_DMA_DCA_ANY_CPU
,
710 chan
->reg_base
+ IOAT_DCACTRL_OFFSET
);
713 err
= ioat_register(device
);
717 device
->dca
= ioat2_dca_init(pdev
, device
->reg_base
);
719 INIT_DELAYED_WORK(&device
->work
, ioat2_chan_watchdog
);
720 schedule_delayed_work(&device
->work
, WATCHDOG_DELAY
);
725 int ioat3_dma_probe(struct ioatdma_device
*device
, int dca
)
727 struct pci_dev
*pdev
= device
->pdev
;
728 struct dma_device
*dma
;
730 struct ioat_chan_common
*chan
;
734 device
->enumerate_channels
= ioat2_enumerate_channels
;
735 dma
= &device
->common
;
736 dma
->device_prep_dma_memcpy
= ioat2_dma_prep_memcpy_lock
;
737 dma
->device_issue_pending
= ioat2_issue_pending
;
738 dma
->device_alloc_chan_resources
= ioat2_alloc_chan_resources
;
739 dma
->device_free_chan_resources
= ioat2_free_chan_resources
;
740 dma
->device_is_tx_complete
= ioat2_is_complete
;
742 /* -= IOAT ver.3 workarounds =- */
743 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
744 * that can cause stability issues for IOAT ver.3
746 pci_write_config_dword(pdev
, IOAT_PCI_CHANERRMASK_INT_OFFSET
, 0x3e07);
748 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
749 * (workaround for spurious config parity error after restart)
751 pci_read_config_word(pdev
, IOAT_PCI_DEVICE_ID_OFFSET
, &dev_id
);
752 if (dev_id
== PCI_DEVICE_ID_INTEL_IOAT_TBG0
)
753 pci_write_config_dword(pdev
, IOAT_PCI_DMAUNCERRSTS_OFFSET
, 0x10);
755 err
= ioat_probe(device
);
758 ioat_set_tcp_copy_break(262144);
760 list_for_each_entry(c
, &dma
->channels
, device_node
) {
761 chan
= to_chan_common(c
);
762 writel(IOAT_DMA_DCA_ANY_CPU
,
763 chan
->reg_base
+ IOAT_DCACTRL_OFFSET
);
766 err
= ioat_register(device
);
770 device
->dca
= ioat3_dca_init(pdev
, device
->reg_base
);