2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/dmaengine.h>
34 #include <linux/delay.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/workqueue.h>
37 #include <linux/prefetch.h>
38 #include <linux/i7300_idle.h>
40 #include "registers.h"
43 #include "../dmaengine.h"
45 int ioat_pending_level
= 4;
46 module_param(ioat_pending_level
, int, 0644);
47 MODULE_PARM_DESC(ioat_pending_level
,
48 "high-water mark for pushing ioat descriptors (default: 4)");
50 /* internal functions */
51 static void ioat1_cleanup(struct ioat_dma_chan
*ioat
);
52 static void ioat1_dma_start_null_desc(struct ioat_dma_chan
*ioat
);
55 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
57 * @data: interrupt data
59 static irqreturn_t
ioat_dma_do_interrupt(int irq
, void *data
)
61 struct ioatdma_device
*instance
= data
;
62 struct ioat_chan_common
*chan
;
63 unsigned long attnstatus
;
67 intrctrl
= readb(instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
69 if (!(intrctrl
& IOAT_INTRCTRL_MASTER_INT_EN
))
72 if (!(intrctrl
& IOAT_INTRCTRL_INT_STATUS
)) {
73 writeb(intrctrl
, instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
77 attnstatus
= readl(instance
->reg_base
+ IOAT_ATTNSTATUS_OFFSET
);
78 for_each_set_bit(bit
, &attnstatus
, BITS_PER_LONG
) {
79 chan
= ioat_chan_by_index(instance
, bit
);
80 if (test_bit(IOAT_RUN
, &chan
->state
))
81 tasklet_schedule(&chan
->cleanup_task
);
84 writeb(intrctrl
, instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
89 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
91 * @data: interrupt data
93 static irqreturn_t
ioat_dma_do_interrupt_msix(int irq
, void *data
)
95 struct ioat_chan_common
*chan
= data
;
97 if (test_bit(IOAT_RUN
, &chan
->state
))
98 tasklet_schedule(&chan
->cleanup_task
);
103 /* common channel initialization */
104 void ioat_init_channel(struct ioatdma_device
*device
, struct ioat_chan_common
*chan
, int idx
)
106 struct dma_device
*dma
= &device
->common
;
107 struct dma_chan
*c
= &chan
->common
;
108 unsigned long data
= (unsigned long) c
;
110 chan
->device
= device
;
111 chan
->reg_base
= device
->reg_base
+ (0x80 * (idx
+ 1));
112 spin_lock_init(&chan
->cleanup_lock
);
113 chan
->common
.device
= dma
;
114 dma_cookie_init(&chan
->common
);
115 list_add_tail(&chan
->common
.device_node
, &dma
->channels
);
116 device
->idx
[idx
] = chan
;
117 init_timer(&chan
->timer
);
118 chan
->timer
.function
= device
->timer_fn
;
119 chan
->timer
.data
= data
;
120 tasklet_init(&chan
->cleanup_task
, device
->cleanup_fn
, data
);
124 * ioat1_dma_enumerate_channels - find and initialize the device's channels
125 * @device: the device to be enumerated
127 static int ioat1_enumerate_channels(struct ioatdma_device
*device
)
132 struct ioat_dma_chan
*ioat
;
133 struct device
*dev
= &device
->pdev
->dev
;
134 struct dma_device
*dma
= &device
->common
;
136 INIT_LIST_HEAD(&dma
->channels
);
137 dma
->chancnt
= readb(device
->reg_base
+ IOAT_CHANCNT_OFFSET
);
138 dma
->chancnt
&= 0x1f; /* bits [4:0] valid */
139 if (dma
->chancnt
> ARRAY_SIZE(device
->idx
)) {
140 dev_warn(dev
, "(%d) exceeds max supported channels (%zu)\n",
141 dma
->chancnt
, ARRAY_SIZE(device
->idx
));
142 dma
->chancnt
= ARRAY_SIZE(device
->idx
);
144 xfercap_scale
= readb(device
->reg_base
+ IOAT_XFERCAP_OFFSET
);
145 xfercap_scale
&= 0x1f; /* bits [4:0] valid */
146 xfercap
= (xfercap_scale
== 0 ? -1 : (1UL << xfercap_scale
));
147 dev_dbg(dev
, "%s: xfercap = %d\n", __func__
, xfercap
);
149 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
150 if (i7300_idle_platform_probe(NULL
, NULL
, 1) == 0)
153 for (i
= 0; i
< dma
->chancnt
; i
++) {
154 ioat
= devm_kzalloc(dev
, sizeof(*ioat
), GFP_KERNEL
);
158 ioat_init_channel(device
, &ioat
->base
, i
);
159 ioat
->xfercap
= xfercap
;
160 spin_lock_init(&ioat
->desc_lock
);
161 INIT_LIST_HEAD(&ioat
->free_desc
);
162 INIT_LIST_HEAD(&ioat
->used_desc
);
169 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
171 * @chan: DMA channel handle
174 __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan
*ioat
)
176 void __iomem
*reg_base
= ioat
->base
.reg_base
;
178 dev_dbg(to_dev(&ioat
->base
), "%s: pending: %d\n",
179 __func__
, ioat
->pending
);
181 writeb(IOAT_CHANCMD_APPEND
, reg_base
+ IOAT1_CHANCMD_OFFSET
);
184 static void ioat1_dma_memcpy_issue_pending(struct dma_chan
*chan
)
186 struct ioat_dma_chan
*ioat
= to_ioat_chan(chan
);
188 if (ioat
->pending
> 0) {
189 spin_lock_bh(&ioat
->desc_lock
);
190 __ioat1_dma_memcpy_issue_pending(ioat
);
191 spin_unlock_bh(&ioat
->desc_lock
);
196 * ioat1_reset_channel - restart a channel
197 * @ioat: IOAT DMA channel handle
199 static void ioat1_reset_channel(struct ioat_dma_chan
*ioat
)
201 struct ioat_chan_common
*chan
= &ioat
->base
;
202 void __iomem
*reg_base
= chan
->reg_base
;
203 u32 chansts
, chanerr
;
205 dev_warn(to_dev(chan
), "reset\n");
206 chanerr
= readl(reg_base
+ IOAT_CHANERR_OFFSET
);
207 chansts
= *chan
->completion
& IOAT_CHANSTS_STATUS
;
209 dev_err(to_dev(chan
),
210 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
211 chan_num(chan
), chansts
, chanerr
);
212 writel(chanerr
, reg_base
+ IOAT_CHANERR_OFFSET
);
216 * whack it upside the head with a reset
217 * and wait for things to settle out.
218 * force the pending count to a really big negative
219 * to make sure no one forces an issue_pending
220 * while we're waiting.
223 ioat
->pending
= INT_MIN
;
224 writeb(IOAT_CHANCMD_RESET
,
225 reg_base
+ IOAT_CHANCMD_OFFSET(chan
->device
->version
));
226 set_bit(IOAT_RESET_PENDING
, &chan
->state
);
227 mod_timer(&chan
->timer
, jiffies
+ RESET_DELAY
);
230 static dma_cookie_t
ioat1_tx_submit(struct dma_async_tx_descriptor
*tx
)
232 struct dma_chan
*c
= tx
->chan
;
233 struct ioat_dma_chan
*ioat
= to_ioat_chan(c
);
234 struct ioat_desc_sw
*desc
= tx_to_ioat_desc(tx
);
235 struct ioat_chan_common
*chan
= &ioat
->base
;
236 struct ioat_desc_sw
*first
;
237 struct ioat_desc_sw
*chain_tail
;
240 spin_lock_bh(&ioat
->desc_lock
);
241 /* cookie incr and addition to used_list must be atomic */
242 cookie
= dma_cookie_assign(tx
);
243 dev_dbg(to_dev(&ioat
->base
), "%s: cookie: %d\n", __func__
, cookie
);
245 /* write address into NextDescriptor field of last desc in chain */
246 first
= to_ioat_desc(desc
->tx_list
.next
);
247 chain_tail
= to_ioat_desc(ioat
->used_desc
.prev
);
248 /* make descriptor updates globally visible before chaining */
250 chain_tail
->hw
->next
= first
->txd
.phys
;
251 list_splice_tail_init(&desc
->tx_list
, &ioat
->used_desc
);
252 dump_desc_dbg(ioat
, chain_tail
);
253 dump_desc_dbg(ioat
, first
);
255 if (!test_and_set_bit(IOAT_COMPLETION_PENDING
, &chan
->state
))
256 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
258 ioat
->active
+= desc
->hw
->tx_cnt
;
259 ioat
->pending
+= desc
->hw
->tx_cnt
;
260 if (ioat
->pending
>= ioat_pending_level
)
261 __ioat1_dma_memcpy_issue_pending(ioat
);
262 spin_unlock_bh(&ioat
->desc_lock
);
268 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
269 * @ioat: the channel supplying the memory pool for the descriptors
270 * @flags: allocation flags
272 static struct ioat_desc_sw
*
273 ioat_dma_alloc_descriptor(struct ioat_dma_chan
*ioat
, gfp_t flags
)
275 struct ioat_dma_descriptor
*desc
;
276 struct ioat_desc_sw
*desc_sw
;
277 struct ioatdma_device
*ioatdma_device
;
280 ioatdma_device
= ioat
->base
.device
;
281 desc
= pci_pool_alloc(ioatdma_device
->dma_pool
, flags
, &phys
);
285 desc_sw
= kzalloc(sizeof(*desc_sw
), flags
);
286 if (unlikely(!desc_sw
)) {
287 pci_pool_free(ioatdma_device
->dma_pool
, desc
, phys
);
291 memset(desc
, 0, sizeof(*desc
));
293 INIT_LIST_HEAD(&desc_sw
->tx_list
);
294 dma_async_tx_descriptor_init(&desc_sw
->txd
, &ioat
->base
.common
);
295 desc_sw
->txd
.tx_submit
= ioat1_tx_submit
;
297 desc_sw
->txd
.phys
= phys
;
298 set_desc_id(desc_sw
, -1);
303 static int ioat_initial_desc_count
= 256;
304 module_param(ioat_initial_desc_count
, int, 0644);
305 MODULE_PARM_DESC(ioat_initial_desc_count
,
306 "ioat1: initial descriptors per channel (default: 256)");
308 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
309 * @chan: the channel to be filled out
311 static int ioat1_dma_alloc_chan_resources(struct dma_chan
*c
)
313 struct ioat_dma_chan
*ioat
= to_ioat_chan(c
);
314 struct ioat_chan_common
*chan
= &ioat
->base
;
315 struct ioat_desc_sw
*desc
;
320 /* have we already been set up? */
321 if (!list_empty(&ioat
->free_desc
))
322 return ioat
->desccount
;
324 /* Setup register to interrupt and write completion status on error */
325 writew(IOAT_CHANCTRL_RUN
, chan
->reg_base
+ IOAT_CHANCTRL_OFFSET
);
327 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
329 dev_err(to_dev(chan
), "CHANERR = %x, clearing\n", chanerr
);
330 writel(chanerr
, chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
333 /* Allocate descriptors */
334 for (i
= 0; i
< ioat_initial_desc_count
; i
++) {
335 desc
= ioat_dma_alloc_descriptor(ioat
, GFP_KERNEL
);
337 dev_err(to_dev(chan
), "Only %d initial descriptors\n", i
);
340 set_desc_id(desc
, i
);
341 list_add_tail(&desc
->node
, &tmp_list
);
343 spin_lock_bh(&ioat
->desc_lock
);
345 list_splice(&tmp_list
, &ioat
->free_desc
);
346 spin_unlock_bh(&ioat
->desc_lock
);
348 /* allocate a completion writeback area */
349 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
350 chan
->completion
= pci_pool_alloc(chan
->device
->completion_pool
,
351 GFP_KERNEL
, &chan
->completion_dma
);
352 memset(chan
->completion
, 0, sizeof(*chan
->completion
));
353 writel(((u64
) chan
->completion_dma
) & 0x00000000FFFFFFFF,
354 chan
->reg_base
+ IOAT_CHANCMP_OFFSET_LOW
);
355 writel(((u64
) chan
->completion_dma
) >> 32,
356 chan
->reg_base
+ IOAT_CHANCMP_OFFSET_HIGH
);
358 set_bit(IOAT_RUN
, &chan
->state
);
359 ioat1_dma_start_null_desc(ioat
); /* give chain to dma device */
360 dev_dbg(to_dev(chan
), "%s: allocated %d descriptors\n",
361 __func__
, ioat
->desccount
);
362 return ioat
->desccount
;
365 void ioat_stop(struct ioat_chan_common
*chan
)
367 struct ioatdma_device
*device
= chan
->device
;
368 struct pci_dev
*pdev
= device
->pdev
;
369 int chan_id
= chan_num(chan
);
370 struct msix_entry
*msix
;
372 /* 1/ stop irq from firing tasklets
373 * 2/ stop the tasklet from re-arming irqs
375 clear_bit(IOAT_RUN
, &chan
->state
);
377 /* flush inflight interrupts */
378 switch (device
->irq_mode
) {
380 msix
= &device
->msix_entries
[chan_id
];
381 synchronize_irq(msix
->vector
);
385 synchronize_irq(pdev
->irq
);
391 /* flush inflight timers */
392 del_timer_sync(&chan
->timer
);
394 /* flush inflight tasklet runs */
395 tasklet_kill(&chan
->cleanup_task
);
397 /* final cleanup now that everything is quiesced and can't re-arm */
398 device
->cleanup_fn((unsigned long) &chan
->common
);
402 * ioat1_dma_free_chan_resources - release all the descriptors
403 * @chan: the channel to be cleaned
405 static void ioat1_dma_free_chan_resources(struct dma_chan
*c
)
407 struct ioat_dma_chan
*ioat
= to_ioat_chan(c
);
408 struct ioat_chan_common
*chan
= &ioat
->base
;
409 struct ioatdma_device
*ioatdma_device
= chan
->device
;
410 struct ioat_desc_sw
*desc
, *_desc
;
411 int in_use_descs
= 0;
413 /* Before freeing channel resources first check
414 * if they have been previously allocated for this channel.
416 if (ioat
->desccount
== 0)
421 /* Delay 100ms after reset to allow internal DMA logic to quiesce
422 * before removing DMA descriptor resources.
424 writeb(IOAT_CHANCMD_RESET
,
425 chan
->reg_base
+ IOAT_CHANCMD_OFFSET(chan
->device
->version
));
428 spin_lock_bh(&ioat
->desc_lock
);
429 list_for_each_entry_safe(desc
, _desc
, &ioat
->used_desc
, node
) {
430 dev_dbg(to_dev(chan
), "%s: freeing %d from used list\n",
431 __func__
, desc_id(desc
));
432 dump_desc_dbg(ioat
, desc
);
434 list_del(&desc
->node
);
435 pci_pool_free(ioatdma_device
->dma_pool
, desc
->hw
,
439 list_for_each_entry_safe(desc
, _desc
,
440 &ioat
->free_desc
, node
) {
441 list_del(&desc
->node
);
442 pci_pool_free(ioatdma_device
->dma_pool
, desc
->hw
,
446 spin_unlock_bh(&ioat
->desc_lock
);
448 pci_pool_free(ioatdma_device
->completion_pool
,
450 chan
->completion_dma
);
452 /* one is ok since we left it on there on purpose */
453 if (in_use_descs
> 1)
454 dev_err(to_dev(chan
), "Freeing %d in use descriptors!\n",
457 chan
->last_completion
= 0;
458 chan
->completion_dma
= 0;
464 * ioat1_dma_get_next_descriptor - return the next available descriptor
465 * @ioat: IOAT DMA channel handle
467 * Gets the next descriptor from the chain, and must be called with the
468 * channel's desc_lock held. Allocates more descriptors if the channel
471 static struct ioat_desc_sw
*
472 ioat1_dma_get_next_descriptor(struct ioat_dma_chan
*ioat
)
474 struct ioat_desc_sw
*new;
476 if (!list_empty(&ioat
->free_desc
)) {
477 new = to_ioat_desc(ioat
->free_desc
.next
);
478 list_del(&new->node
);
480 /* try to get another desc */
481 new = ioat_dma_alloc_descriptor(ioat
, GFP_ATOMIC
);
483 dev_err(to_dev(&ioat
->base
), "alloc failed\n");
487 dev_dbg(to_dev(&ioat
->base
), "%s: allocated: %d\n",
488 __func__
, desc_id(new));
493 static struct dma_async_tx_descriptor
*
494 ioat1_dma_prep_memcpy(struct dma_chan
*c
, dma_addr_t dma_dest
,
495 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
497 struct ioat_dma_chan
*ioat
= to_ioat_chan(c
);
498 struct ioat_desc_sw
*desc
;
501 dma_addr_t src
= dma_src
;
502 dma_addr_t dest
= dma_dest
;
503 size_t total_len
= len
;
504 struct ioat_dma_descriptor
*hw
= NULL
;
507 spin_lock_bh(&ioat
->desc_lock
);
508 desc
= ioat1_dma_get_next_descriptor(ioat
);
514 copy
= min_t(size_t, len
, ioat
->xfercap
);
522 list_add_tail(&desc
->node
, &chain
);
528 struct ioat_desc_sw
*next
;
530 async_tx_ack(&desc
->txd
);
531 next
= ioat1_dma_get_next_descriptor(ioat
);
532 hw
->next
= next
? next
->txd
.phys
: 0;
533 dump_desc_dbg(ioat
, desc
);
540 struct ioat_chan_common
*chan
= &ioat
->base
;
542 dev_err(to_dev(chan
),
543 "chan%d - get_next_desc failed\n", chan_num(chan
));
544 list_splice(&chain
, &ioat
->free_desc
);
545 spin_unlock_bh(&ioat
->desc_lock
);
548 spin_unlock_bh(&ioat
->desc_lock
);
550 desc
->txd
.flags
= flags
;
551 desc
->len
= total_len
;
552 list_splice(&chain
, &desc
->tx_list
);
553 hw
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
554 hw
->ctl_f
.compl_write
= 1;
556 dump_desc_dbg(ioat
, desc
);
561 static void ioat1_cleanup_event(unsigned long data
)
563 struct ioat_dma_chan
*ioat
= to_ioat_chan((void *) data
);
564 struct ioat_chan_common
*chan
= &ioat
->base
;
567 if (!test_bit(IOAT_RUN
, &chan
->state
))
569 writew(IOAT_CHANCTRL_RUN
, ioat
->base
.reg_base
+ IOAT_CHANCTRL_OFFSET
);
572 void ioat_dma_unmap(struct ioat_chan_common
*chan
, enum dma_ctrl_flags flags
,
573 size_t len
, struct ioat_dma_descriptor
*hw
)
575 struct pci_dev
*pdev
= chan
->device
->pdev
;
576 size_t offset
= len
- hw
->size
;
578 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
))
579 ioat_unmap(pdev
, hw
->dst_addr
- offset
, len
,
580 PCI_DMA_FROMDEVICE
, flags
, 1);
582 if (!(flags
& DMA_COMPL_SKIP_SRC_UNMAP
))
583 ioat_unmap(pdev
, hw
->src_addr
- offset
, len
,
584 PCI_DMA_TODEVICE
, flags
, 0);
587 dma_addr_t
ioat_get_current_completion(struct ioat_chan_common
*chan
)
589 dma_addr_t phys_complete
;
592 completion
= *chan
->completion
;
593 phys_complete
= ioat_chansts_to_addr(completion
);
595 dev_dbg(to_dev(chan
), "%s: phys_complete: %#llx\n", __func__
,
596 (unsigned long long) phys_complete
);
598 if (is_ioat_halted(completion
)) {
599 u32 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
600 dev_err(to_dev(chan
), "Channel halted, chanerr = %x\n",
603 /* TODO do something to salvage the situation */
606 return phys_complete
;
609 bool ioat_cleanup_preamble(struct ioat_chan_common
*chan
,
610 dma_addr_t
*phys_complete
)
612 *phys_complete
= ioat_get_current_completion(chan
);
613 if (*phys_complete
== chan
->last_completion
)
615 clear_bit(IOAT_COMPLETION_ACK
, &chan
->state
);
616 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
621 static void __cleanup(struct ioat_dma_chan
*ioat
, dma_addr_t phys_complete
)
623 struct ioat_chan_common
*chan
= &ioat
->base
;
624 struct list_head
*_desc
, *n
;
625 struct dma_async_tx_descriptor
*tx
;
627 dev_dbg(to_dev(chan
), "%s: phys_complete: %llx\n",
628 __func__
, (unsigned long long) phys_complete
);
629 list_for_each_safe(_desc
, n
, &ioat
->used_desc
) {
630 struct ioat_desc_sw
*desc
;
633 desc
= list_entry(_desc
, typeof(*desc
), node
);
636 * Incoming DMA requests may use multiple descriptors,
637 * due to exceeding xfercap, perhaps. If so, only the
638 * last one will have a cookie, and require unmapping.
640 dump_desc_dbg(ioat
, desc
);
642 dma_cookie_complete(tx
);
643 ioat_dma_unmap(chan
, tx
->flags
, desc
->len
, desc
->hw
);
644 ioat
->active
-= desc
->hw
->tx_cnt
;
646 tx
->callback(tx
->callback_param
);
651 if (tx
->phys
!= phys_complete
) {
653 * a completed entry, but not the last, so clean
654 * up if the client is done with the descriptor
656 if (async_tx_test_ack(tx
))
657 list_move_tail(&desc
->node
, &ioat
->free_desc
);
660 * last used desc. Do not remove, so we can
664 /* if nothing else is pending, cancel the
667 if (n
== &ioat
->used_desc
) {
668 dev_dbg(to_dev(chan
),
669 "%s cancel completion timeout\n",
671 clear_bit(IOAT_COMPLETION_PENDING
, &chan
->state
);
674 /* TODO check status bits? */
679 chan
->last_completion
= phys_complete
;
683 * ioat1_cleanup - cleanup up finished descriptors
684 * @chan: ioat channel to be cleaned up
686 * To prevent lock contention we defer cleanup when the locks are
687 * contended with a terminal timeout that forces cleanup and catches
688 * completion notification errors.
690 static void ioat1_cleanup(struct ioat_dma_chan
*ioat
)
692 struct ioat_chan_common
*chan
= &ioat
->base
;
693 dma_addr_t phys_complete
;
695 prefetch(chan
->completion
);
697 if (!spin_trylock_bh(&chan
->cleanup_lock
))
700 if (!ioat_cleanup_preamble(chan
, &phys_complete
)) {
701 spin_unlock_bh(&chan
->cleanup_lock
);
705 if (!spin_trylock_bh(&ioat
->desc_lock
)) {
706 spin_unlock_bh(&chan
->cleanup_lock
);
710 __cleanup(ioat
, phys_complete
);
712 spin_unlock_bh(&ioat
->desc_lock
);
713 spin_unlock_bh(&chan
->cleanup_lock
);
716 static void ioat1_timer_event(unsigned long data
)
718 struct ioat_dma_chan
*ioat
= to_ioat_chan((void *) data
);
719 struct ioat_chan_common
*chan
= &ioat
->base
;
721 dev_dbg(to_dev(chan
), "%s: state: %lx\n", __func__
, chan
->state
);
723 spin_lock_bh(&chan
->cleanup_lock
);
724 if (test_and_clear_bit(IOAT_RESET_PENDING
, &chan
->state
)) {
725 struct ioat_desc_sw
*desc
;
727 spin_lock_bh(&ioat
->desc_lock
);
729 /* restart active descriptors */
730 desc
= to_ioat_desc(ioat
->used_desc
.prev
);
731 ioat_set_chainaddr(ioat
, desc
->txd
.phys
);
735 set_bit(IOAT_COMPLETION_PENDING
, &chan
->state
);
736 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
737 spin_unlock_bh(&ioat
->desc_lock
);
738 } else if (test_bit(IOAT_COMPLETION_PENDING
, &chan
->state
)) {
739 dma_addr_t phys_complete
;
741 spin_lock_bh(&ioat
->desc_lock
);
742 /* if we haven't made progress and we have already
743 * acknowledged a pending completion once, then be more
744 * forceful with a restart
746 if (ioat_cleanup_preamble(chan
, &phys_complete
))
747 __cleanup(ioat
, phys_complete
);
748 else if (test_bit(IOAT_COMPLETION_ACK
, &chan
->state
))
749 ioat1_reset_channel(ioat
);
751 u64 status
= ioat_chansts(chan
);
753 /* manually update the last completion address */
754 if (ioat_chansts_to_addr(status
) != 0)
755 *chan
->completion
= status
;
757 set_bit(IOAT_COMPLETION_ACK
, &chan
->state
);
758 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
760 spin_unlock_bh(&ioat
->desc_lock
);
762 spin_unlock_bh(&chan
->cleanup_lock
);
766 ioat_dma_tx_status(struct dma_chan
*c
, dma_cookie_t cookie
,
767 struct dma_tx_state
*txstate
)
769 struct ioat_chan_common
*chan
= to_chan_common(c
);
770 struct ioatdma_device
*device
= chan
->device
;
773 ret
= dma_cookie_status(c
, cookie
, txstate
);
774 if (ret
== DMA_SUCCESS
)
777 device
->cleanup_fn((unsigned long) c
);
779 return dma_cookie_status(c
, cookie
, txstate
);
782 static void ioat1_dma_start_null_desc(struct ioat_dma_chan
*ioat
)
784 struct ioat_chan_common
*chan
= &ioat
->base
;
785 struct ioat_desc_sw
*desc
;
786 struct ioat_dma_descriptor
*hw
;
788 spin_lock_bh(&ioat
->desc_lock
);
790 desc
= ioat1_dma_get_next_descriptor(ioat
);
793 dev_err(to_dev(chan
),
794 "Unable to start null desc - get next desc failed\n");
795 spin_unlock_bh(&ioat
->desc_lock
);
802 hw
->ctl_f
.int_en
= 1;
803 hw
->ctl_f
.compl_write
= 1;
804 /* set size to non-zero value (channel returns error when size is 0) */
805 hw
->size
= NULL_DESC_BUFFER_SIZE
;
808 async_tx_ack(&desc
->txd
);
810 list_add_tail(&desc
->node
, &ioat
->used_desc
);
811 dump_desc_dbg(ioat
, desc
);
813 ioat_set_chainaddr(ioat
, desc
->txd
.phys
);
815 spin_unlock_bh(&ioat
->desc_lock
);
819 * Perform a IOAT transaction to verify the HW works.
821 #define IOAT_TEST_SIZE 2000
823 static void ioat_dma_test_callback(void *dma_async_param
)
825 struct completion
*cmp
= dma_async_param
;
831 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
832 * @device: device to be tested
834 int ioat_dma_self_test(struct ioatdma_device
*device
)
839 struct dma_device
*dma
= &device
->common
;
840 struct device
*dev
= &device
->pdev
->dev
;
841 struct dma_chan
*dma_chan
;
842 struct dma_async_tx_descriptor
*tx
;
843 dma_addr_t dma_dest
, dma_src
;
846 struct completion cmp
;
850 src
= kzalloc(sizeof(u8
) * IOAT_TEST_SIZE
, GFP_KERNEL
);
853 dest
= kzalloc(sizeof(u8
) * IOAT_TEST_SIZE
, GFP_KERNEL
);
859 /* Fill in src buffer */
860 for (i
= 0; i
< IOAT_TEST_SIZE
; i
++)
863 /* Start copy, using first DMA channel */
864 dma_chan
= container_of(dma
->channels
.next
, struct dma_chan
,
866 if (dma
->device_alloc_chan_resources(dma_chan
) < 1) {
867 dev_err(dev
, "selftest cannot allocate chan resource\n");
872 dma_src
= dma_map_single(dev
, src
, IOAT_TEST_SIZE
, DMA_TO_DEVICE
);
873 dma_dest
= dma_map_single(dev
, dest
, IOAT_TEST_SIZE
, DMA_FROM_DEVICE
);
874 flags
= DMA_COMPL_SKIP_SRC_UNMAP
| DMA_COMPL_SKIP_DEST_UNMAP
|
876 tx
= device
->common
.device_prep_dma_memcpy(dma_chan
, dma_dest
, dma_src
,
877 IOAT_TEST_SIZE
, flags
);
879 dev_err(dev
, "Self-test prep failed, disabling\n");
885 init_completion(&cmp
);
886 tx
->callback
= ioat_dma_test_callback
;
887 tx
->callback_param
= &cmp
;
888 cookie
= tx
->tx_submit(tx
);
890 dev_err(dev
, "Self-test setup failed, disabling\n");
894 dma
->device_issue_pending(dma_chan
);
896 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
899 dma
->device_tx_status(dma_chan
, cookie
, NULL
)
901 dev_err(dev
, "Self-test copy timed out, disabling\n");
905 if (memcmp(src
, dest
, IOAT_TEST_SIZE
)) {
906 dev_err(dev
, "Self-test copy failed compare, disabling\n");
912 dma_unmap_single(dev
, dma_src
, IOAT_TEST_SIZE
, DMA_TO_DEVICE
);
913 dma_unmap_single(dev
, dma_dest
, IOAT_TEST_SIZE
, DMA_FROM_DEVICE
);
915 dma
->device_free_chan_resources(dma_chan
);
922 static char ioat_interrupt_style
[32] = "msix";
923 module_param_string(ioat_interrupt_style
, ioat_interrupt_style
,
924 sizeof(ioat_interrupt_style
), 0644);
925 MODULE_PARM_DESC(ioat_interrupt_style
,
926 "set ioat interrupt style: msix (default), "
927 "msix-single-vector, msi, intx)");
930 * ioat_dma_setup_interrupts - setup interrupt handler
931 * @device: ioat device
933 int ioat_dma_setup_interrupts(struct ioatdma_device
*device
)
935 struct ioat_chan_common
*chan
;
936 struct pci_dev
*pdev
= device
->pdev
;
937 struct device
*dev
= &pdev
->dev
;
938 struct msix_entry
*msix
;
943 if (!strcmp(ioat_interrupt_style
, "msix"))
945 if (!strcmp(ioat_interrupt_style
, "msix-single-vector"))
946 goto msix_single_vector
;
947 if (!strcmp(ioat_interrupt_style
, "msi"))
949 if (!strcmp(ioat_interrupt_style
, "intx"))
951 dev_err(dev
, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style
);
955 /* The number of MSI-X vectors should equal the number of channels */
956 msixcnt
= device
->common
.chancnt
;
957 for (i
= 0; i
< msixcnt
; i
++)
958 device
->msix_entries
[i
].entry
= i
;
960 err
= pci_enable_msix(pdev
, device
->msix_entries
, msixcnt
);
964 goto msix_single_vector
;
966 for (i
= 0; i
< msixcnt
; i
++) {
967 msix
= &device
->msix_entries
[i
];
968 chan
= ioat_chan_by_index(device
, i
);
969 err
= devm_request_irq(dev
, msix
->vector
,
970 ioat_dma_do_interrupt_msix
, 0,
973 for (j
= 0; j
< i
; j
++) {
974 msix
= &device
->msix_entries
[j
];
975 chan
= ioat_chan_by_index(device
, j
);
976 devm_free_irq(dev
, msix
->vector
, chan
);
978 goto msix_single_vector
;
981 intrctrl
|= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL
;
982 device
->irq_mode
= IOAT_MSIX
;
986 msix
= &device
->msix_entries
[0];
988 err
= pci_enable_msix(pdev
, device
->msix_entries
, 1);
992 err
= devm_request_irq(dev
, msix
->vector
, ioat_dma_do_interrupt
, 0,
993 "ioat-msix", device
);
995 pci_disable_msix(pdev
);
998 device
->irq_mode
= IOAT_MSIX_SINGLE
;
1002 err
= pci_enable_msi(pdev
);
1006 err
= devm_request_irq(dev
, pdev
->irq
, ioat_dma_do_interrupt
, 0,
1007 "ioat-msi", device
);
1009 pci_disable_msi(pdev
);
1012 device
->irq_mode
= IOAT_MSIX
;
1016 err
= devm_request_irq(dev
, pdev
->irq
, ioat_dma_do_interrupt
,
1017 IRQF_SHARED
, "ioat-intx", device
);
1021 device
->irq_mode
= IOAT_INTX
;
1023 if (device
->intr_quirk
)
1024 device
->intr_quirk(device
);
1025 intrctrl
|= IOAT_INTRCTRL_MASTER_INT_EN
;
1026 writeb(intrctrl
, device
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
1030 /* Disable all interrupt generation */
1031 writeb(0, device
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
1032 device
->irq_mode
= IOAT_NOIRQ
;
1033 dev_err(dev
, "no usable interrupts\n");
1036 EXPORT_SYMBOL(ioat_dma_setup_interrupts
);
1038 static void ioat_disable_interrupts(struct ioatdma_device
*device
)
1040 /* Disable all interrupt generation */
1041 writeb(0, device
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
1044 int ioat_probe(struct ioatdma_device
*device
)
1047 struct dma_device
*dma
= &device
->common
;
1048 struct pci_dev
*pdev
= device
->pdev
;
1049 struct device
*dev
= &pdev
->dev
;
1051 /* DMA coherent memory pool for DMA descriptor allocations */
1052 device
->dma_pool
= pci_pool_create("dma_desc_pool", pdev
,
1053 sizeof(struct ioat_dma_descriptor
),
1055 if (!device
->dma_pool
) {
1060 device
->completion_pool
= pci_pool_create("completion_pool", pdev
,
1061 sizeof(u64
), SMP_CACHE_BYTES
,
1064 if (!device
->completion_pool
) {
1066 goto err_completion_pool
;
1069 device
->enumerate_channels(device
);
1071 dma_cap_set(DMA_MEMCPY
, dma
->cap_mask
);
1072 dma
->dev
= &pdev
->dev
;
1074 if (!dma
->chancnt
) {
1075 dev_err(dev
, "channel enumeration error\n");
1076 goto err_setup_interrupts
;
1079 err
= ioat_dma_setup_interrupts(device
);
1081 goto err_setup_interrupts
;
1083 err
= device
->self_test(device
);
1090 ioat_disable_interrupts(device
);
1091 err_setup_interrupts
:
1092 pci_pool_destroy(device
->completion_pool
);
1093 err_completion_pool
:
1094 pci_pool_destroy(device
->dma_pool
);
1099 int ioat_register(struct ioatdma_device
*device
)
1101 int err
= dma_async_device_register(&device
->common
);
1104 ioat_disable_interrupts(device
);
1105 pci_pool_destroy(device
->completion_pool
);
1106 pci_pool_destroy(device
->dma_pool
);
1112 /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1113 static void ioat1_intr_quirk(struct ioatdma_device
*device
)
1115 struct pci_dev
*pdev
= device
->pdev
;
1118 pci_read_config_dword(pdev
, IOAT_PCI_DMACTRL_OFFSET
, &dmactrl
);
1119 if (pdev
->msi_enabled
)
1120 dmactrl
|= IOAT_PCI_DMACTRL_MSI_EN
;
1122 dmactrl
&= ~IOAT_PCI_DMACTRL_MSI_EN
;
1123 pci_write_config_dword(pdev
, IOAT_PCI_DMACTRL_OFFSET
, dmactrl
);
1126 static ssize_t
ring_size_show(struct dma_chan
*c
, char *page
)
1128 struct ioat_dma_chan
*ioat
= to_ioat_chan(c
);
1130 return sprintf(page
, "%d\n", ioat
->desccount
);
1132 static struct ioat_sysfs_entry ring_size_attr
= __ATTR_RO(ring_size
);
1134 static ssize_t
ring_active_show(struct dma_chan
*c
, char *page
)
1136 struct ioat_dma_chan
*ioat
= to_ioat_chan(c
);
1138 return sprintf(page
, "%d\n", ioat
->active
);
1140 static struct ioat_sysfs_entry ring_active_attr
= __ATTR_RO(ring_active
);
1142 static ssize_t
cap_show(struct dma_chan
*c
, char *page
)
1144 struct dma_device
*dma
= c
->device
;
1146 return sprintf(page
, "copy%s%s%s%s%s\n",
1147 dma_has_cap(DMA_PQ
, dma
->cap_mask
) ? " pq" : "",
1148 dma_has_cap(DMA_PQ_VAL
, dma
->cap_mask
) ? " pq_val" : "",
1149 dma_has_cap(DMA_XOR
, dma
->cap_mask
) ? " xor" : "",
1150 dma_has_cap(DMA_XOR_VAL
, dma
->cap_mask
) ? " xor_val" : "",
1151 dma_has_cap(DMA_INTERRUPT
, dma
->cap_mask
) ? " intr" : "");
1154 struct ioat_sysfs_entry ioat_cap_attr
= __ATTR_RO(cap
);
1156 static ssize_t
version_show(struct dma_chan
*c
, char *page
)
1158 struct dma_device
*dma
= c
->device
;
1159 struct ioatdma_device
*device
= to_ioatdma_device(dma
);
1161 return sprintf(page
, "%d.%d\n",
1162 device
->version
>> 4, device
->version
& 0xf);
1164 struct ioat_sysfs_entry ioat_version_attr
= __ATTR_RO(version
);
1166 static struct attribute
*ioat1_attrs
[] = {
1167 &ring_size_attr
.attr
,
1168 &ring_active_attr
.attr
,
1169 &ioat_cap_attr
.attr
,
1170 &ioat_version_attr
.attr
,
1175 ioat_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
1177 struct ioat_sysfs_entry
*entry
;
1178 struct ioat_chan_common
*chan
;
1180 entry
= container_of(attr
, struct ioat_sysfs_entry
, attr
);
1181 chan
= container_of(kobj
, struct ioat_chan_common
, kobj
);
1185 return entry
->show(&chan
->common
, page
);
1188 const struct sysfs_ops ioat_sysfs_ops
= {
1189 .show
= ioat_attr_show
,
1192 static struct kobj_type ioat1_ktype
= {
1193 .sysfs_ops
= &ioat_sysfs_ops
,
1194 .default_attrs
= ioat1_attrs
,
1197 void ioat_kobject_add(struct ioatdma_device
*device
, struct kobj_type
*type
)
1199 struct dma_device
*dma
= &device
->common
;
1202 list_for_each_entry(c
, &dma
->channels
, device_node
) {
1203 struct ioat_chan_common
*chan
= to_chan_common(c
);
1204 struct kobject
*parent
= &c
->dev
->device
.kobj
;
1207 err
= kobject_init_and_add(&chan
->kobj
, type
, parent
, "quickdata");
1209 dev_warn(to_dev(chan
),
1210 "sysfs init error (%d), continuing...\n", err
);
1211 kobject_put(&chan
->kobj
);
1212 set_bit(IOAT_KOBJ_INIT_FAIL
, &chan
->state
);
1217 void ioat_kobject_del(struct ioatdma_device
*device
)
1219 struct dma_device
*dma
= &device
->common
;
1222 list_for_each_entry(c
, &dma
->channels
, device_node
) {
1223 struct ioat_chan_common
*chan
= to_chan_common(c
);
1225 if (!test_bit(IOAT_KOBJ_INIT_FAIL
, &chan
->state
)) {
1226 kobject_del(&chan
->kobj
);
1227 kobject_put(&chan
->kobj
);
1232 int ioat1_dma_probe(struct ioatdma_device
*device
, int dca
)
1234 struct pci_dev
*pdev
= device
->pdev
;
1235 struct dma_device
*dma
;
1238 device
->intr_quirk
= ioat1_intr_quirk
;
1239 device
->enumerate_channels
= ioat1_enumerate_channels
;
1240 device
->self_test
= ioat_dma_self_test
;
1241 device
->timer_fn
= ioat1_timer_event
;
1242 device
->cleanup_fn
= ioat1_cleanup_event
;
1243 dma
= &device
->common
;
1244 dma
->device_prep_dma_memcpy
= ioat1_dma_prep_memcpy
;
1245 dma
->device_issue_pending
= ioat1_dma_memcpy_issue_pending
;
1246 dma
->device_alloc_chan_resources
= ioat1_dma_alloc_chan_resources
;
1247 dma
->device_free_chan_resources
= ioat1_dma_free_chan_resources
;
1248 dma
->device_tx_status
= ioat_dma_tx_status
;
1250 err
= ioat_probe(device
);
1253 ioat_set_tcp_copy_break(4096);
1254 err
= ioat_register(device
);
1257 ioat_kobject_add(device
, &ioat1_ktype
);
1260 device
->dca
= ioat_dca_init(pdev
, device
->reg_base
);
1265 void ioat_dma_remove(struct ioatdma_device
*device
)
1267 struct dma_device
*dma
= &device
->common
;
1269 ioat_disable_interrupts(device
);
1271 ioat_kobject_del(device
);
1273 dma_async_device_unregister(dma
);
1275 pci_pool_destroy(device
->dma_pool
);
1276 pci_pool_destroy(device
->completion_pool
);
1278 INIT_LIST_HEAD(&dma
->channels
);