OMAP3: PM: Added resource refresh to OPP unlock requests
[linux-ginger.git] / drivers / dma / ioat / dma.c
blobc524d36d3c2e199db83e5b2a9a8a7b7bf1939a21
1 /*
2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25 * copy operations.
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
37 #include "dma.h"
38 #include "registers.h"
39 #include "hw.h"
41 int ioat_pending_level = 4;
42 module_param(ioat_pending_level, int, 0644);
43 MODULE_PARM_DESC(ioat_pending_level,
44 "high-water mark for pushing ioat descriptors (default: 4)");
46 /* internal functions */
47 static void ioat1_cleanup(struct ioat_dma_chan *ioat);
48 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
50 /**
51 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
52 * @irq: interrupt id
53 * @data: interrupt data
55 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
57 struct ioatdma_device *instance = data;
58 struct ioat_chan_common *chan;
59 unsigned long attnstatus;
60 int bit;
61 u8 intrctrl;
63 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
65 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
66 return IRQ_NONE;
68 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
69 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
70 return IRQ_NONE;
73 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
74 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
75 chan = ioat_chan_by_index(instance, bit);
76 tasklet_schedule(&chan->cleanup_task);
79 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
80 return IRQ_HANDLED;
83 /**
84 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
85 * @irq: interrupt id
86 * @data: interrupt data
88 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
90 struct ioat_chan_common *chan = data;
92 tasklet_schedule(&chan->cleanup_task);
94 return IRQ_HANDLED;
97 static void ioat1_cleanup_tasklet(unsigned long data);
99 /* common channel initialization */
100 void ioat_init_channel(struct ioatdma_device *device,
101 struct ioat_chan_common *chan, int idx,
102 void (*timer_fn)(unsigned long),
103 void (*tasklet)(unsigned long),
104 unsigned long ioat)
106 struct dma_device *dma = &device->common;
108 chan->device = device;
109 chan->reg_base = device->reg_base + (0x80 * (idx + 1));
110 spin_lock_init(&chan->cleanup_lock);
111 chan->common.device = dma;
112 list_add_tail(&chan->common.device_node, &dma->channels);
113 device->idx[idx] = chan;
114 init_timer(&chan->timer);
115 chan->timer.function = timer_fn;
116 chan->timer.data = ioat;
117 tasklet_init(&chan->cleanup_task, tasklet, ioat);
118 tasklet_disable(&chan->cleanup_task);
121 static void ioat1_timer_event(unsigned long data);
124 * ioat1_dma_enumerate_channels - find and initialize the device's channels
125 * @device: the device to be enumerated
127 static int ioat1_enumerate_channels(struct ioatdma_device *device)
129 u8 xfercap_scale;
130 u32 xfercap;
131 int i;
132 struct ioat_dma_chan *ioat;
133 struct device *dev = &device->pdev->dev;
134 struct dma_device *dma = &device->common;
136 INIT_LIST_HEAD(&dma->channels);
137 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
138 dma->chancnt &= 0x1f; /* bits [4:0] valid */
139 if (dma->chancnt > ARRAY_SIZE(device->idx)) {
140 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
141 dma->chancnt, ARRAY_SIZE(device->idx));
142 dma->chancnt = ARRAY_SIZE(device->idx);
144 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
145 xfercap_scale &= 0x1f; /* bits [4:0] valid */
146 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
147 dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
149 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
150 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
151 dma->chancnt--;
152 #endif
153 for (i = 0; i < dma->chancnt; i++) {
154 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
155 if (!ioat)
156 break;
158 ioat_init_channel(device, &ioat->base, i,
159 ioat1_timer_event,
160 ioat1_cleanup_tasklet,
161 (unsigned long) ioat);
162 ioat->xfercap = xfercap;
163 spin_lock_init(&ioat->desc_lock);
164 INIT_LIST_HEAD(&ioat->free_desc);
165 INIT_LIST_HEAD(&ioat->used_desc);
167 dma->chancnt = i;
168 return i;
172 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
173 * descriptors to hw
174 * @chan: DMA channel handle
176 static inline void
177 __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
179 void __iomem *reg_base = ioat->base.reg_base;
181 dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
182 __func__, ioat->pending);
183 ioat->pending = 0;
184 writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
187 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
189 struct ioat_dma_chan *ioat = to_ioat_chan(chan);
191 if (ioat->pending > 0) {
192 spin_lock_bh(&ioat->desc_lock);
193 __ioat1_dma_memcpy_issue_pending(ioat);
194 spin_unlock_bh(&ioat->desc_lock);
199 * ioat1_reset_channel - restart a channel
200 * @ioat: IOAT DMA channel handle
202 static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
204 struct ioat_chan_common *chan = &ioat->base;
205 void __iomem *reg_base = chan->reg_base;
206 u32 chansts, chanerr;
208 dev_warn(to_dev(chan), "reset\n");
209 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
210 chansts = *chan->completion & IOAT_CHANSTS_STATUS;
211 if (chanerr) {
212 dev_err(to_dev(chan),
213 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
214 chan_num(chan), chansts, chanerr);
215 writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
219 * whack it upside the head with a reset
220 * and wait for things to settle out.
221 * force the pending count to a really big negative
222 * to make sure no one forces an issue_pending
223 * while we're waiting.
226 ioat->pending = INT_MIN;
227 writeb(IOAT_CHANCMD_RESET,
228 reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
229 set_bit(IOAT_RESET_PENDING, &chan->state);
230 mod_timer(&chan->timer, jiffies + RESET_DELAY);
233 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
235 struct dma_chan *c = tx->chan;
236 struct ioat_dma_chan *ioat = to_ioat_chan(c);
237 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
238 struct ioat_chan_common *chan = &ioat->base;
239 struct ioat_desc_sw *first;
240 struct ioat_desc_sw *chain_tail;
241 dma_cookie_t cookie;
243 spin_lock_bh(&ioat->desc_lock);
244 /* cookie incr and addition to used_list must be atomic */
245 cookie = c->cookie;
246 cookie++;
247 if (cookie < 0)
248 cookie = 1;
249 c->cookie = cookie;
250 tx->cookie = cookie;
251 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
253 /* write address into NextDescriptor field of last desc in chain */
254 first = to_ioat_desc(desc->tx_list.next);
255 chain_tail = to_ioat_desc(ioat->used_desc.prev);
256 /* make descriptor updates globally visible before chaining */
257 wmb();
258 chain_tail->hw->next = first->txd.phys;
259 list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
260 dump_desc_dbg(ioat, chain_tail);
261 dump_desc_dbg(ioat, first);
263 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
264 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
266 ioat->active += desc->hw->tx_cnt;
267 ioat->pending += desc->hw->tx_cnt;
268 if (ioat->pending >= ioat_pending_level)
269 __ioat1_dma_memcpy_issue_pending(ioat);
270 spin_unlock_bh(&ioat->desc_lock);
272 return cookie;
276 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
277 * @ioat: the channel supplying the memory pool for the descriptors
278 * @flags: allocation flags
280 static struct ioat_desc_sw *
281 ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
283 struct ioat_dma_descriptor *desc;
284 struct ioat_desc_sw *desc_sw;
285 struct ioatdma_device *ioatdma_device;
286 dma_addr_t phys;
288 ioatdma_device = ioat->base.device;
289 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
290 if (unlikely(!desc))
291 return NULL;
293 desc_sw = kzalloc(sizeof(*desc_sw), flags);
294 if (unlikely(!desc_sw)) {
295 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
296 return NULL;
299 memset(desc, 0, sizeof(*desc));
301 INIT_LIST_HEAD(&desc_sw->tx_list);
302 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
303 desc_sw->txd.tx_submit = ioat1_tx_submit;
304 desc_sw->hw = desc;
305 desc_sw->txd.phys = phys;
306 set_desc_id(desc_sw, -1);
308 return desc_sw;
311 static int ioat_initial_desc_count = 256;
312 module_param(ioat_initial_desc_count, int, 0644);
313 MODULE_PARM_DESC(ioat_initial_desc_count,
314 "ioat1: initial descriptors per channel (default: 256)");
316 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
317 * @chan: the channel to be filled out
319 static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
321 struct ioat_dma_chan *ioat = to_ioat_chan(c);
322 struct ioat_chan_common *chan = &ioat->base;
323 struct ioat_desc_sw *desc;
324 u32 chanerr;
325 int i;
326 LIST_HEAD(tmp_list);
328 /* have we already been set up? */
329 if (!list_empty(&ioat->free_desc))
330 return ioat->desccount;
332 /* Setup register to interrupt and write completion status on error */
333 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
335 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
336 if (chanerr) {
337 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
338 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
341 /* Allocate descriptors */
342 for (i = 0; i < ioat_initial_desc_count; i++) {
343 desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
344 if (!desc) {
345 dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
346 break;
348 set_desc_id(desc, i);
349 list_add_tail(&desc->node, &tmp_list);
351 spin_lock_bh(&ioat->desc_lock);
352 ioat->desccount = i;
353 list_splice(&tmp_list, &ioat->free_desc);
354 spin_unlock_bh(&ioat->desc_lock);
356 /* allocate a completion writeback area */
357 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
358 chan->completion = pci_pool_alloc(chan->device->completion_pool,
359 GFP_KERNEL, &chan->completion_dma);
360 memset(chan->completion, 0, sizeof(*chan->completion));
361 writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
362 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
363 writel(((u64) chan->completion_dma) >> 32,
364 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
366 tasklet_enable(&chan->cleanup_task);
367 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
368 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
369 __func__, ioat->desccount);
370 return ioat->desccount;
374 * ioat1_dma_free_chan_resources - release all the descriptors
375 * @chan: the channel to be cleaned
377 static void ioat1_dma_free_chan_resources(struct dma_chan *c)
379 struct ioat_dma_chan *ioat = to_ioat_chan(c);
380 struct ioat_chan_common *chan = &ioat->base;
381 struct ioatdma_device *ioatdma_device = chan->device;
382 struct ioat_desc_sw *desc, *_desc;
383 int in_use_descs = 0;
385 /* Before freeing channel resources first check
386 * if they have been previously allocated for this channel.
388 if (ioat->desccount == 0)
389 return;
391 tasklet_disable(&chan->cleanup_task);
392 del_timer_sync(&chan->timer);
393 ioat1_cleanup(ioat);
395 /* Delay 100ms after reset to allow internal DMA logic to quiesce
396 * before removing DMA descriptor resources.
398 writeb(IOAT_CHANCMD_RESET,
399 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
400 mdelay(100);
402 spin_lock_bh(&ioat->desc_lock);
403 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
404 dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
405 __func__, desc_id(desc));
406 dump_desc_dbg(ioat, desc);
407 in_use_descs++;
408 list_del(&desc->node);
409 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
410 desc->txd.phys);
411 kfree(desc);
413 list_for_each_entry_safe(desc, _desc,
414 &ioat->free_desc, node) {
415 list_del(&desc->node);
416 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
417 desc->txd.phys);
418 kfree(desc);
420 spin_unlock_bh(&ioat->desc_lock);
422 pci_pool_free(ioatdma_device->completion_pool,
423 chan->completion,
424 chan->completion_dma);
426 /* one is ok since we left it on there on purpose */
427 if (in_use_descs > 1)
428 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
429 in_use_descs - 1);
431 chan->last_completion = 0;
432 chan->completion_dma = 0;
433 ioat->pending = 0;
434 ioat->desccount = 0;
438 * ioat1_dma_get_next_descriptor - return the next available descriptor
439 * @ioat: IOAT DMA channel handle
441 * Gets the next descriptor from the chain, and must be called with the
442 * channel's desc_lock held. Allocates more descriptors if the channel
443 * has run out.
445 static struct ioat_desc_sw *
446 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
448 struct ioat_desc_sw *new;
450 if (!list_empty(&ioat->free_desc)) {
451 new = to_ioat_desc(ioat->free_desc.next);
452 list_del(&new->node);
453 } else {
454 /* try to get another desc */
455 new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
456 if (!new) {
457 dev_err(to_dev(&ioat->base), "alloc failed\n");
458 return NULL;
461 dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
462 __func__, desc_id(new));
463 prefetch(new->hw);
464 return new;
467 static struct dma_async_tx_descriptor *
468 ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
469 dma_addr_t dma_src, size_t len, unsigned long flags)
471 struct ioat_dma_chan *ioat = to_ioat_chan(c);
472 struct ioat_desc_sw *desc;
473 size_t copy;
474 LIST_HEAD(chain);
475 dma_addr_t src = dma_src;
476 dma_addr_t dest = dma_dest;
477 size_t total_len = len;
478 struct ioat_dma_descriptor *hw = NULL;
479 int tx_cnt = 0;
481 spin_lock_bh(&ioat->desc_lock);
482 desc = ioat1_dma_get_next_descriptor(ioat);
483 do {
484 if (!desc)
485 break;
487 tx_cnt++;
488 copy = min_t(size_t, len, ioat->xfercap);
490 hw = desc->hw;
491 hw->size = copy;
492 hw->ctl = 0;
493 hw->src_addr = src;
494 hw->dst_addr = dest;
496 list_add_tail(&desc->node, &chain);
498 len -= copy;
499 dest += copy;
500 src += copy;
501 if (len) {
502 struct ioat_desc_sw *next;
504 async_tx_ack(&desc->txd);
505 next = ioat1_dma_get_next_descriptor(ioat);
506 hw->next = next ? next->txd.phys : 0;
507 dump_desc_dbg(ioat, desc);
508 desc = next;
509 } else
510 hw->next = 0;
511 } while (len);
513 if (!desc) {
514 struct ioat_chan_common *chan = &ioat->base;
516 dev_err(to_dev(chan),
517 "chan%d - get_next_desc failed\n", chan_num(chan));
518 list_splice(&chain, &ioat->free_desc);
519 spin_unlock_bh(&ioat->desc_lock);
520 return NULL;
522 spin_unlock_bh(&ioat->desc_lock);
524 desc->txd.flags = flags;
525 desc->len = total_len;
526 list_splice(&chain, &desc->tx_list);
527 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
528 hw->ctl_f.compl_write = 1;
529 hw->tx_cnt = tx_cnt;
530 dump_desc_dbg(ioat, desc);
532 return &desc->txd;
535 static void ioat1_cleanup_tasklet(unsigned long data)
537 struct ioat_dma_chan *chan = (void *)data;
539 ioat1_cleanup(chan);
540 writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
543 void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
544 size_t len, struct ioat_dma_descriptor *hw)
546 struct pci_dev *pdev = chan->device->pdev;
547 size_t offset = len - hw->size;
549 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
550 ioat_unmap(pdev, hw->dst_addr - offset, len,
551 PCI_DMA_FROMDEVICE, flags, 1);
553 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
554 ioat_unmap(pdev, hw->src_addr - offset, len,
555 PCI_DMA_TODEVICE, flags, 0);
558 unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
560 unsigned long phys_complete;
561 u64 completion;
563 completion = *chan->completion;
564 phys_complete = ioat_chansts_to_addr(completion);
566 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
567 (unsigned long long) phys_complete);
569 if (is_ioat_halted(completion)) {
570 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
571 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
572 chanerr);
574 /* TODO do something to salvage the situation */
577 return phys_complete;
580 bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
581 unsigned long *phys_complete)
583 *phys_complete = ioat_get_current_completion(chan);
584 if (*phys_complete == chan->last_completion)
585 return false;
586 clear_bit(IOAT_COMPLETION_ACK, &chan->state);
587 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
589 return true;
592 static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
594 struct ioat_chan_common *chan = &ioat->base;
595 struct list_head *_desc, *n;
596 struct dma_async_tx_descriptor *tx;
598 dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
599 __func__, phys_complete);
600 list_for_each_safe(_desc, n, &ioat->used_desc) {
601 struct ioat_desc_sw *desc;
603 prefetch(n);
604 desc = list_entry(_desc, typeof(*desc), node);
605 tx = &desc->txd;
607 * Incoming DMA requests may use multiple descriptors,
608 * due to exceeding xfercap, perhaps. If so, only the
609 * last one will have a cookie, and require unmapping.
611 dump_desc_dbg(ioat, desc);
612 if (tx->cookie) {
613 chan->completed_cookie = tx->cookie;
614 tx->cookie = 0;
615 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
616 ioat->active -= desc->hw->tx_cnt;
617 if (tx->callback) {
618 tx->callback(tx->callback_param);
619 tx->callback = NULL;
623 if (tx->phys != phys_complete) {
625 * a completed entry, but not the last, so clean
626 * up if the client is done with the descriptor
628 if (async_tx_test_ack(tx))
629 list_move_tail(&desc->node, &ioat->free_desc);
630 } else {
632 * last used desc. Do not remove, so we can
633 * append from it.
636 /* if nothing else is pending, cancel the
637 * completion timeout
639 if (n == &ioat->used_desc) {
640 dev_dbg(to_dev(chan),
641 "%s cancel completion timeout\n",
642 __func__);
643 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
646 /* TODO check status bits? */
647 break;
651 chan->last_completion = phys_complete;
655 * ioat1_cleanup - cleanup up finished descriptors
656 * @chan: ioat channel to be cleaned up
658 * To prevent lock contention we defer cleanup when the locks are
659 * contended with a terminal timeout that forces cleanup and catches
660 * completion notification errors.
662 static void ioat1_cleanup(struct ioat_dma_chan *ioat)
664 struct ioat_chan_common *chan = &ioat->base;
665 unsigned long phys_complete;
667 prefetch(chan->completion);
669 if (!spin_trylock_bh(&chan->cleanup_lock))
670 return;
672 if (!ioat_cleanup_preamble(chan, &phys_complete)) {
673 spin_unlock_bh(&chan->cleanup_lock);
674 return;
677 if (!spin_trylock_bh(&ioat->desc_lock)) {
678 spin_unlock_bh(&chan->cleanup_lock);
679 return;
682 __cleanup(ioat, phys_complete);
684 spin_unlock_bh(&ioat->desc_lock);
685 spin_unlock_bh(&chan->cleanup_lock);
688 static void ioat1_timer_event(unsigned long data)
690 struct ioat_dma_chan *ioat = (void *) data;
691 struct ioat_chan_common *chan = &ioat->base;
693 dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
695 spin_lock_bh(&chan->cleanup_lock);
696 if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
697 struct ioat_desc_sw *desc;
699 spin_lock_bh(&ioat->desc_lock);
701 /* restart active descriptors */
702 desc = to_ioat_desc(ioat->used_desc.prev);
703 ioat_set_chainaddr(ioat, desc->txd.phys);
704 ioat_start(chan);
706 ioat->pending = 0;
707 set_bit(IOAT_COMPLETION_PENDING, &chan->state);
708 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
709 spin_unlock_bh(&ioat->desc_lock);
710 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
711 unsigned long phys_complete;
713 spin_lock_bh(&ioat->desc_lock);
714 /* if we haven't made progress and we have already
715 * acknowledged a pending completion once, then be more
716 * forceful with a restart
718 if (ioat_cleanup_preamble(chan, &phys_complete))
719 __cleanup(ioat, phys_complete);
720 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
721 ioat1_reset_channel(ioat);
722 else {
723 u64 status = ioat_chansts(chan);
725 /* manually update the last completion address */
726 if (ioat_chansts_to_addr(status) != 0)
727 *chan->completion = status;
729 set_bit(IOAT_COMPLETION_ACK, &chan->state);
730 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
732 spin_unlock_bh(&ioat->desc_lock);
734 spin_unlock_bh(&chan->cleanup_lock);
737 static enum dma_status
738 ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
739 dma_cookie_t *done, dma_cookie_t *used)
741 struct ioat_dma_chan *ioat = to_ioat_chan(c);
743 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
744 return DMA_SUCCESS;
746 ioat1_cleanup(ioat);
748 return ioat_is_complete(c, cookie, done, used);
751 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
753 struct ioat_chan_common *chan = &ioat->base;
754 struct ioat_desc_sw *desc;
755 struct ioat_dma_descriptor *hw;
757 spin_lock_bh(&ioat->desc_lock);
759 desc = ioat1_dma_get_next_descriptor(ioat);
761 if (!desc) {
762 dev_err(to_dev(chan),
763 "Unable to start null desc - get next desc failed\n");
764 spin_unlock_bh(&ioat->desc_lock);
765 return;
768 hw = desc->hw;
769 hw->ctl = 0;
770 hw->ctl_f.null = 1;
771 hw->ctl_f.int_en = 1;
772 hw->ctl_f.compl_write = 1;
773 /* set size to non-zero value (channel returns error when size is 0) */
774 hw->size = NULL_DESC_BUFFER_SIZE;
775 hw->src_addr = 0;
776 hw->dst_addr = 0;
777 async_tx_ack(&desc->txd);
778 hw->next = 0;
779 list_add_tail(&desc->node, &ioat->used_desc);
780 dump_desc_dbg(ioat, desc);
782 ioat_set_chainaddr(ioat, desc->txd.phys);
783 ioat_start(chan);
784 spin_unlock_bh(&ioat->desc_lock);
788 * Perform a IOAT transaction to verify the HW works.
790 #define IOAT_TEST_SIZE 2000
792 static void __devinit ioat_dma_test_callback(void *dma_async_param)
794 struct completion *cmp = dma_async_param;
796 complete(cmp);
800 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
801 * @device: device to be tested
803 int __devinit ioat_dma_self_test(struct ioatdma_device *device)
805 int i;
806 u8 *src;
807 u8 *dest;
808 struct dma_device *dma = &device->common;
809 struct device *dev = &device->pdev->dev;
810 struct dma_chan *dma_chan;
811 struct dma_async_tx_descriptor *tx;
812 dma_addr_t dma_dest, dma_src;
813 dma_cookie_t cookie;
814 int err = 0;
815 struct completion cmp;
816 unsigned long tmo;
817 unsigned long flags;
819 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
820 if (!src)
821 return -ENOMEM;
822 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
823 if (!dest) {
824 kfree(src);
825 return -ENOMEM;
828 /* Fill in src buffer */
829 for (i = 0; i < IOAT_TEST_SIZE; i++)
830 src[i] = (u8)i;
832 /* Start copy, using first DMA channel */
833 dma_chan = container_of(dma->channels.next, struct dma_chan,
834 device_node);
835 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
836 dev_err(dev, "selftest cannot allocate chan resource\n");
837 err = -ENODEV;
838 goto out;
841 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
842 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
843 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
844 DMA_PREP_INTERRUPT;
845 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
846 IOAT_TEST_SIZE, flags);
847 if (!tx) {
848 dev_err(dev, "Self-test prep failed, disabling\n");
849 err = -ENODEV;
850 goto free_resources;
853 async_tx_ack(tx);
854 init_completion(&cmp);
855 tx->callback = ioat_dma_test_callback;
856 tx->callback_param = &cmp;
857 cookie = tx->tx_submit(tx);
858 if (cookie < 0) {
859 dev_err(dev, "Self-test setup failed, disabling\n");
860 err = -ENODEV;
861 goto free_resources;
863 dma->device_issue_pending(dma_chan);
865 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
867 if (tmo == 0 ||
868 dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
869 != DMA_SUCCESS) {
870 dev_err(dev, "Self-test copy timed out, disabling\n");
871 err = -ENODEV;
872 goto free_resources;
874 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
875 dev_err(dev, "Self-test copy failed compare, disabling\n");
876 err = -ENODEV;
877 goto free_resources;
880 free_resources:
881 dma->device_free_chan_resources(dma_chan);
882 out:
883 kfree(src);
884 kfree(dest);
885 return err;
888 static char ioat_interrupt_style[32] = "msix";
889 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
890 sizeof(ioat_interrupt_style), 0644);
891 MODULE_PARM_DESC(ioat_interrupt_style,
892 "set ioat interrupt style: msix (default), "
893 "msix-single-vector, msi, intx)");
896 * ioat_dma_setup_interrupts - setup interrupt handler
897 * @device: ioat device
899 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
901 struct ioat_chan_common *chan;
902 struct pci_dev *pdev = device->pdev;
903 struct device *dev = &pdev->dev;
904 struct msix_entry *msix;
905 int i, j, msixcnt;
906 int err = -EINVAL;
907 u8 intrctrl = 0;
909 if (!strcmp(ioat_interrupt_style, "msix"))
910 goto msix;
911 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
912 goto msix_single_vector;
913 if (!strcmp(ioat_interrupt_style, "msi"))
914 goto msi;
915 if (!strcmp(ioat_interrupt_style, "intx"))
916 goto intx;
917 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
918 goto err_no_irq;
920 msix:
921 /* The number of MSI-X vectors should equal the number of channels */
922 msixcnt = device->common.chancnt;
923 for (i = 0; i < msixcnt; i++)
924 device->msix_entries[i].entry = i;
926 err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
927 if (err < 0)
928 goto msi;
929 if (err > 0)
930 goto msix_single_vector;
932 for (i = 0; i < msixcnt; i++) {
933 msix = &device->msix_entries[i];
934 chan = ioat_chan_by_index(device, i);
935 err = devm_request_irq(dev, msix->vector,
936 ioat_dma_do_interrupt_msix, 0,
937 "ioat-msix", chan);
938 if (err) {
939 for (j = 0; j < i; j++) {
940 msix = &device->msix_entries[j];
941 chan = ioat_chan_by_index(device, j);
942 devm_free_irq(dev, msix->vector, chan);
944 goto msix_single_vector;
947 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
948 goto done;
950 msix_single_vector:
951 msix = &device->msix_entries[0];
952 msix->entry = 0;
953 err = pci_enable_msix(pdev, device->msix_entries, 1);
954 if (err)
955 goto msi;
957 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
958 "ioat-msix", device);
959 if (err) {
960 pci_disable_msix(pdev);
961 goto msi;
963 goto done;
965 msi:
966 err = pci_enable_msi(pdev);
967 if (err)
968 goto intx;
970 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
971 "ioat-msi", device);
972 if (err) {
973 pci_disable_msi(pdev);
974 goto intx;
976 goto done;
978 intx:
979 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
980 IRQF_SHARED, "ioat-intx", device);
981 if (err)
982 goto err_no_irq;
984 done:
985 if (device->intr_quirk)
986 device->intr_quirk(device);
987 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
988 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
989 return 0;
991 err_no_irq:
992 /* Disable all interrupt generation */
993 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
994 dev_err(dev, "no usable interrupts\n");
995 return err;
998 static void ioat_disable_interrupts(struct ioatdma_device *device)
1000 /* Disable all interrupt generation */
1001 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1004 int __devinit ioat_probe(struct ioatdma_device *device)
1006 int err = -ENODEV;
1007 struct dma_device *dma = &device->common;
1008 struct pci_dev *pdev = device->pdev;
1009 struct device *dev = &pdev->dev;
1011 /* DMA coherent memory pool for DMA descriptor allocations */
1012 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1013 sizeof(struct ioat_dma_descriptor),
1014 64, 0);
1015 if (!device->dma_pool) {
1016 err = -ENOMEM;
1017 goto err_dma_pool;
1020 device->completion_pool = pci_pool_create("completion_pool", pdev,
1021 sizeof(u64), SMP_CACHE_BYTES,
1022 SMP_CACHE_BYTES);
1024 if (!device->completion_pool) {
1025 err = -ENOMEM;
1026 goto err_completion_pool;
1029 device->enumerate_channels(device);
1031 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
1032 dma->dev = &pdev->dev;
1034 if (!dma->chancnt) {
1035 dev_err(dev, "zero channels detected\n");
1036 goto err_setup_interrupts;
1039 err = ioat_dma_setup_interrupts(device);
1040 if (err)
1041 goto err_setup_interrupts;
1043 err = device->self_test(device);
1044 if (err)
1045 goto err_self_test;
1047 return 0;
1049 err_self_test:
1050 ioat_disable_interrupts(device);
1051 err_setup_interrupts:
1052 pci_pool_destroy(device->completion_pool);
1053 err_completion_pool:
1054 pci_pool_destroy(device->dma_pool);
1055 err_dma_pool:
1056 return err;
1059 int __devinit ioat_register(struct ioatdma_device *device)
1061 int err = dma_async_device_register(&device->common);
1063 if (err) {
1064 ioat_disable_interrupts(device);
1065 pci_pool_destroy(device->completion_pool);
1066 pci_pool_destroy(device->dma_pool);
1069 return err;
1072 /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1073 static void ioat1_intr_quirk(struct ioatdma_device *device)
1075 struct pci_dev *pdev = device->pdev;
1076 u32 dmactrl;
1078 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1079 if (pdev->msi_enabled)
1080 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1081 else
1082 dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
1083 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1086 static ssize_t ring_size_show(struct dma_chan *c, char *page)
1088 struct ioat_dma_chan *ioat = to_ioat_chan(c);
1090 return sprintf(page, "%d\n", ioat->desccount);
1092 static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
1094 static ssize_t ring_active_show(struct dma_chan *c, char *page)
1096 struct ioat_dma_chan *ioat = to_ioat_chan(c);
1098 return sprintf(page, "%d\n", ioat->active);
1100 static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
1102 static ssize_t cap_show(struct dma_chan *c, char *page)
1104 struct dma_device *dma = c->device;
1106 return sprintf(page, "copy%s%s%s%s%s%s\n",
1107 dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
1108 dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
1109 dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
1110 dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
1111 dma_has_cap(DMA_MEMSET, dma->cap_mask) ? " fill" : "",
1112 dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
1115 struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
1117 static ssize_t version_show(struct dma_chan *c, char *page)
1119 struct dma_device *dma = c->device;
1120 struct ioatdma_device *device = to_ioatdma_device(dma);
1122 return sprintf(page, "%d.%d\n",
1123 device->version >> 4, device->version & 0xf);
1125 struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
1127 static struct attribute *ioat1_attrs[] = {
1128 &ring_size_attr.attr,
1129 &ring_active_attr.attr,
1130 &ioat_cap_attr.attr,
1131 &ioat_version_attr.attr,
1132 NULL,
1135 static ssize_t
1136 ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1138 struct ioat_sysfs_entry *entry;
1139 struct ioat_chan_common *chan;
1141 entry = container_of(attr, struct ioat_sysfs_entry, attr);
1142 chan = container_of(kobj, struct ioat_chan_common, kobj);
1144 if (!entry->show)
1145 return -EIO;
1146 return entry->show(&chan->common, page);
1149 struct sysfs_ops ioat_sysfs_ops = {
1150 .show = ioat_attr_show,
1153 static struct kobj_type ioat1_ktype = {
1154 .sysfs_ops = &ioat_sysfs_ops,
1155 .default_attrs = ioat1_attrs,
1158 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
1160 struct dma_device *dma = &device->common;
1161 struct dma_chan *c;
1163 list_for_each_entry(c, &dma->channels, device_node) {
1164 struct ioat_chan_common *chan = to_chan_common(c);
1165 struct kobject *parent = &c->dev->device.kobj;
1166 int err;
1168 err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
1169 if (err) {
1170 dev_warn(to_dev(chan),
1171 "sysfs init error (%d), continuing...\n", err);
1172 kobject_put(&chan->kobj);
1173 set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
1178 void ioat_kobject_del(struct ioatdma_device *device)
1180 struct dma_device *dma = &device->common;
1181 struct dma_chan *c;
1183 list_for_each_entry(c, &dma->channels, device_node) {
1184 struct ioat_chan_common *chan = to_chan_common(c);
1186 if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
1187 kobject_del(&chan->kobj);
1188 kobject_put(&chan->kobj);
1193 int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
1195 struct pci_dev *pdev = device->pdev;
1196 struct dma_device *dma;
1197 int err;
1199 device->intr_quirk = ioat1_intr_quirk;
1200 device->enumerate_channels = ioat1_enumerate_channels;
1201 device->self_test = ioat_dma_self_test;
1202 dma = &device->common;
1203 dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1204 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1205 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
1206 dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
1207 dma->device_is_tx_complete = ioat1_dma_is_complete;
1209 err = ioat_probe(device);
1210 if (err)
1211 return err;
1212 ioat_set_tcp_copy_break(4096);
1213 err = ioat_register(device);
1214 if (err)
1215 return err;
1216 ioat_kobject_add(device, &ioat1_ktype);
1218 if (dca)
1219 device->dca = ioat_dca_init(pdev, device->reg_base);
1221 return err;
1224 void __devexit ioat_dma_remove(struct ioatdma_device *device)
1226 struct dma_device *dma = &device->common;
1228 ioat_disable_interrupts(device);
1230 ioat_kobject_del(device);
1232 dma_async_device_unregister(dma);
1234 pci_pool_destroy(device->dma_pool);
1235 pci_pool_destroy(device->completion_pool);
1237 INIT_LIST_HEAD(&dma->channels);