Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux...
[linux/fpc-iii.git] / drivers / dma / ioat / init.c
blobefdee1a69fc4af12d7e2db0663edb06e9051196e
1 /*
2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/workqueue.h>
28 #include <linux/prefetch.h>
29 #include <linux/dca.h>
30 #include <linux/aer.h>
31 #include <linux/sizes.h>
32 #include "dma.h"
33 #include "registers.h"
34 #include "hw.h"
36 #include "../dmaengine.h"
38 MODULE_VERSION(IOAT_DMA_VERSION);
39 MODULE_LICENSE("Dual BSD/GPL");
40 MODULE_AUTHOR("Intel Corporation");
42 static struct pci_device_id ioat_pci_tbl[] = {
43 /* I/OAT v3 platforms */
44 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
45 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
46 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
47 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
48 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
49 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
50 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
51 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
53 /* I/OAT v3.2 platforms */
54 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
55 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
56 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
57 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
58 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
59 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
60 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
61 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
62 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
63 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
65 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
66 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
67 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
68 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
69 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
70 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
71 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
72 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
73 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
74 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
76 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
77 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
78 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
79 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
80 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
81 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
82 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
83 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
84 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
85 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
87 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
88 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
89 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
90 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
91 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
92 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
93 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
94 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
95 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
96 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
98 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0) },
99 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1) },
100 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2) },
101 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3) },
102 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4) },
103 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5) },
104 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6) },
105 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7) },
106 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
107 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
109 /* I/OAT v3.3 platforms */
110 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
111 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
112 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
113 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
115 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
116 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
117 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
118 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
120 { 0, }
122 MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
124 static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
125 static void ioat_remove(struct pci_dev *pdev);
126 static void
127 ioat_init_channel(struct ioatdma_device *ioat_dma,
128 struct ioatdma_chan *ioat_chan, int idx);
129 static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
130 static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
131 static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
133 static int ioat_dca_enabled = 1;
134 module_param(ioat_dca_enabled, int, 0644);
135 MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
136 int ioat_pending_level = 4;
137 module_param(ioat_pending_level, int, 0644);
138 MODULE_PARM_DESC(ioat_pending_level,
139 "high-water mark for pushing ioat descriptors (default: 4)");
140 static char ioat_interrupt_style[32] = "msix";
141 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
142 sizeof(ioat_interrupt_style), 0644);
143 MODULE_PARM_DESC(ioat_interrupt_style,
144 "set ioat interrupt style: msix (default), msi, intx");
146 struct kmem_cache *ioat_cache;
147 struct kmem_cache *ioat_sed_cache;
149 static bool is_jf_ioat(struct pci_dev *pdev)
151 switch (pdev->device) {
152 case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
153 case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
154 case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
155 case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
156 case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
157 case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
158 case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
159 case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
160 case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
161 case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
162 return true;
163 default:
164 return false;
168 static bool is_snb_ioat(struct pci_dev *pdev)
170 switch (pdev->device) {
171 case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
172 case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
173 case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
174 case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
175 case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
176 case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
177 case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
178 case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
179 case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
180 case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
181 return true;
182 default:
183 return false;
187 static bool is_ivb_ioat(struct pci_dev *pdev)
189 switch (pdev->device) {
190 case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
191 case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
192 case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
193 case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
194 case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
195 case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
196 case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
197 case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
198 case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
199 case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
200 return true;
201 default:
202 return false;
207 static bool is_hsw_ioat(struct pci_dev *pdev)
209 switch (pdev->device) {
210 case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
211 case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
212 case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
213 case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
214 case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
215 case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
216 case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
217 case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
218 case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
219 case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
220 return true;
221 default:
222 return false;
227 static bool is_bdx_ioat(struct pci_dev *pdev)
229 switch (pdev->device) {
230 case PCI_DEVICE_ID_INTEL_IOAT_BDX0:
231 case PCI_DEVICE_ID_INTEL_IOAT_BDX1:
232 case PCI_DEVICE_ID_INTEL_IOAT_BDX2:
233 case PCI_DEVICE_ID_INTEL_IOAT_BDX3:
234 case PCI_DEVICE_ID_INTEL_IOAT_BDX4:
235 case PCI_DEVICE_ID_INTEL_IOAT_BDX5:
236 case PCI_DEVICE_ID_INTEL_IOAT_BDX6:
237 case PCI_DEVICE_ID_INTEL_IOAT_BDX7:
238 case PCI_DEVICE_ID_INTEL_IOAT_BDX8:
239 case PCI_DEVICE_ID_INTEL_IOAT_BDX9:
240 return true;
241 default:
242 return false;
246 static bool is_xeon_cb32(struct pci_dev *pdev)
248 return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
249 is_hsw_ioat(pdev) || is_bdx_ioat(pdev);
252 bool is_bwd_ioat(struct pci_dev *pdev)
254 switch (pdev->device) {
255 case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
256 case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
257 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
258 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
259 /* even though not Atom, BDX-DE has same DMA silicon */
260 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
261 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
262 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
263 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
264 return true;
265 default:
266 return false;
270 static bool is_bwd_noraid(struct pci_dev *pdev)
272 switch (pdev->device) {
273 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
274 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
275 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
276 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
277 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
278 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
279 return true;
280 default:
281 return false;
287 * Perform a IOAT transaction to verify the HW works.
289 #define IOAT_TEST_SIZE 2000
291 static void ioat_dma_test_callback(void *dma_async_param)
293 struct completion *cmp = dma_async_param;
295 complete(cmp);
299 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
300 * @ioat_dma: dma device to be tested
302 static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
304 int i;
305 u8 *src;
306 u8 *dest;
307 struct dma_device *dma = &ioat_dma->dma_dev;
308 struct device *dev = &ioat_dma->pdev->dev;
309 struct dma_chan *dma_chan;
310 struct dma_async_tx_descriptor *tx;
311 dma_addr_t dma_dest, dma_src;
312 dma_cookie_t cookie;
313 int err = 0;
314 struct completion cmp;
315 unsigned long tmo;
316 unsigned long flags;
318 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
319 if (!src)
320 return -ENOMEM;
321 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
322 if (!dest) {
323 kfree(src);
324 return -ENOMEM;
327 /* Fill in src buffer */
328 for (i = 0; i < IOAT_TEST_SIZE; i++)
329 src[i] = (u8)i;
331 /* Start copy, using first DMA channel */
332 dma_chan = container_of(dma->channels.next, struct dma_chan,
333 device_node);
334 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
335 dev_err(dev, "selftest cannot allocate chan resource\n");
336 err = -ENODEV;
337 goto out;
340 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
341 if (dma_mapping_error(dev, dma_src)) {
342 dev_err(dev, "mapping src buffer failed\n");
343 goto free_resources;
345 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
346 if (dma_mapping_error(dev, dma_dest)) {
347 dev_err(dev, "mapping dest buffer failed\n");
348 goto unmap_src;
350 flags = DMA_PREP_INTERRUPT;
351 tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest,
352 dma_src, IOAT_TEST_SIZE,
353 flags);
354 if (!tx) {
355 dev_err(dev, "Self-test prep failed, disabling\n");
356 err = -ENODEV;
357 goto unmap_dma;
360 async_tx_ack(tx);
361 init_completion(&cmp);
362 tx->callback = ioat_dma_test_callback;
363 tx->callback_param = &cmp;
364 cookie = tx->tx_submit(tx);
365 if (cookie < 0) {
366 dev_err(dev, "Self-test setup failed, disabling\n");
367 err = -ENODEV;
368 goto unmap_dma;
370 dma->device_issue_pending(dma_chan);
372 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
374 if (tmo == 0 ||
375 dma->device_tx_status(dma_chan, cookie, NULL)
376 != DMA_COMPLETE) {
377 dev_err(dev, "Self-test copy timed out, disabling\n");
378 err = -ENODEV;
379 goto unmap_dma;
381 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
382 dev_err(dev, "Self-test copy failed compare, disabling\n");
383 err = -ENODEV;
384 goto free_resources;
387 unmap_dma:
388 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
389 unmap_src:
390 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
391 free_resources:
392 dma->device_free_chan_resources(dma_chan);
393 out:
394 kfree(src);
395 kfree(dest);
396 return err;
400 * ioat_dma_setup_interrupts - setup interrupt handler
401 * @ioat_dma: ioat dma device
403 int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
405 struct ioatdma_chan *ioat_chan;
406 struct pci_dev *pdev = ioat_dma->pdev;
407 struct device *dev = &pdev->dev;
408 struct msix_entry *msix;
409 int i, j, msixcnt;
410 int err = -EINVAL;
411 u8 intrctrl = 0;
413 if (!strcmp(ioat_interrupt_style, "msix"))
414 goto msix;
415 if (!strcmp(ioat_interrupt_style, "msi"))
416 goto msi;
417 if (!strcmp(ioat_interrupt_style, "intx"))
418 goto intx;
419 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
420 goto err_no_irq;
422 msix:
423 /* The number of MSI-X vectors should equal the number of channels */
424 msixcnt = ioat_dma->dma_dev.chancnt;
425 for (i = 0; i < msixcnt; i++)
426 ioat_dma->msix_entries[i].entry = i;
428 err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt);
429 if (err)
430 goto msi;
432 for (i = 0; i < msixcnt; i++) {
433 msix = &ioat_dma->msix_entries[i];
434 ioat_chan = ioat_chan_by_index(ioat_dma, i);
435 err = devm_request_irq(dev, msix->vector,
436 ioat_dma_do_interrupt_msix, 0,
437 "ioat-msix", ioat_chan);
438 if (err) {
439 for (j = 0; j < i; j++) {
440 msix = &ioat_dma->msix_entries[j];
441 ioat_chan = ioat_chan_by_index(ioat_dma, j);
442 devm_free_irq(dev, msix->vector, ioat_chan);
444 goto msi;
447 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
448 ioat_dma->irq_mode = IOAT_MSIX;
449 goto done;
451 msi:
452 err = pci_enable_msi(pdev);
453 if (err)
454 goto intx;
456 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
457 "ioat-msi", ioat_dma);
458 if (err) {
459 pci_disable_msi(pdev);
460 goto intx;
462 ioat_dma->irq_mode = IOAT_MSI;
463 goto done;
465 intx:
466 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
467 IRQF_SHARED, "ioat-intx", ioat_dma);
468 if (err)
469 goto err_no_irq;
471 ioat_dma->irq_mode = IOAT_INTX;
472 done:
473 if (is_bwd_ioat(pdev))
474 ioat_intr_quirk(ioat_dma);
475 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
476 writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
477 return 0;
479 err_no_irq:
480 /* Disable all interrupt generation */
481 writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
482 ioat_dma->irq_mode = IOAT_NOIRQ;
483 dev_err(dev, "no usable interrupts\n");
484 return err;
487 static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma)
489 /* Disable all interrupt generation */
490 writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
493 static int ioat_probe(struct ioatdma_device *ioat_dma)
495 int err = -ENODEV;
496 struct dma_device *dma = &ioat_dma->dma_dev;
497 struct pci_dev *pdev = ioat_dma->pdev;
498 struct device *dev = &pdev->dev;
500 ioat_dma->completion_pool = dma_pool_create("completion_pool", dev,
501 sizeof(u64),
502 SMP_CACHE_BYTES,
503 SMP_CACHE_BYTES);
505 if (!ioat_dma->completion_pool) {
506 err = -ENOMEM;
507 goto err_out;
510 ioat_enumerate_channels(ioat_dma);
512 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
513 dma->dev = &pdev->dev;
515 if (!dma->chancnt) {
516 dev_err(dev, "channel enumeration error\n");
517 goto err_setup_interrupts;
520 err = ioat_dma_setup_interrupts(ioat_dma);
521 if (err)
522 goto err_setup_interrupts;
524 err = ioat3_dma_self_test(ioat_dma);
525 if (err)
526 goto err_self_test;
528 return 0;
530 err_self_test:
531 ioat_disable_interrupts(ioat_dma);
532 err_setup_interrupts:
533 dma_pool_destroy(ioat_dma->completion_pool);
534 err_out:
535 return err;
538 static int ioat_register(struct ioatdma_device *ioat_dma)
540 int err = dma_async_device_register(&ioat_dma->dma_dev);
542 if (err) {
543 ioat_disable_interrupts(ioat_dma);
544 dma_pool_destroy(ioat_dma->completion_pool);
547 return err;
550 static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
552 struct dma_device *dma = &ioat_dma->dma_dev;
554 ioat_disable_interrupts(ioat_dma);
556 ioat_kobject_del(ioat_dma);
558 dma_async_device_unregister(dma);
560 dma_pool_destroy(ioat_dma->completion_pool);
562 INIT_LIST_HEAD(&dma->channels);
566 * ioat_enumerate_channels - find and initialize the device's channels
567 * @ioat_dma: the ioat dma device to be enumerated
569 static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
571 struct ioatdma_chan *ioat_chan;
572 struct device *dev = &ioat_dma->pdev->dev;
573 struct dma_device *dma = &ioat_dma->dma_dev;
574 u8 xfercap_log;
575 int i;
577 INIT_LIST_HEAD(&dma->channels);
578 dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
579 dma->chancnt &= 0x1f; /* bits [4:0] valid */
580 if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
581 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
582 dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
583 dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
585 xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
586 xfercap_log &= 0x1f; /* bits [4:0] valid */
587 if (xfercap_log == 0)
588 return 0;
589 dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
591 for (i = 0; i < dma->chancnt; i++) {
592 ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
593 if (!ioat_chan)
594 break;
596 ioat_init_channel(ioat_dma, ioat_chan, i);
597 ioat_chan->xfercap_log = xfercap_log;
598 spin_lock_init(&ioat_chan->prep_lock);
599 if (ioat_reset_hw(ioat_chan)) {
600 i = 0;
601 break;
604 dma->chancnt = i;
605 return i;
609 * ioat_free_chan_resources - release all the descriptors
610 * @chan: the channel to be cleaned
612 static void ioat_free_chan_resources(struct dma_chan *c)
614 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
615 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
616 struct ioat_ring_ent *desc;
617 const int total_descs = 1 << ioat_chan->alloc_order;
618 int descs;
619 int i;
621 /* Before freeing channel resources first check
622 * if they have been previously allocated for this channel.
624 if (!ioat_chan->ring)
625 return;
627 ioat_stop(ioat_chan);
628 ioat_reset_hw(ioat_chan);
630 spin_lock_bh(&ioat_chan->cleanup_lock);
631 spin_lock_bh(&ioat_chan->prep_lock);
632 descs = ioat_ring_space(ioat_chan);
633 dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
634 for (i = 0; i < descs; i++) {
635 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i);
636 ioat_free_ring_ent(desc, c);
639 if (descs < total_descs)
640 dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
641 total_descs - descs);
643 for (i = 0; i < total_descs - descs; i++) {
644 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i);
645 dump_desc_dbg(ioat_chan, desc);
646 ioat_free_ring_ent(desc, c);
649 for (i = 0; i < ioat_chan->desc_chunks; i++) {
650 dma_free_coherent(to_dev(ioat_chan), SZ_2M,
651 ioat_chan->descs[i].virt,
652 ioat_chan->descs[i].hw);
653 ioat_chan->descs[i].virt = NULL;
654 ioat_chan->descs[i].hw = 0;
656 ioat_chan->desc_chunks = 0;
658 kfree(ioat_chan->ring);
659 ioat_chan->ring = NULL;
660 ioat_chan->alloc_order = 0;
661 dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
662 ioat_chan->completion_dma);
663 spin_unlock_bh(&ioat_chan->prep_lock);
664 spin_unlock_bh(&ioat_chan->cleanup_lock);
666 ioat_chan->last_completion = 0;
667 ioat_chan->completion_dma = 0;
668 ioat_chan->dmacount = 0;
671 /* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring
672 * @chan: channel to be initialized
674 static int ioat_alloc_chan_resources(struct dma_chan *c)
676 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
677 struct ioat_ring_ent **ring;
678 u64 status;
679 int order;
680 int i = 0;
681 u32 chanerr;
683 /* have we already been set up? */
684 if (ioat_chan->ring)
685 return 1 << ioat_chan->alloc_order;
687 /* Setup register to interrupt and write completion status on error */
688 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
690 /* allocate a completion writeback area */
691 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
692 ioat_chan->completion =
693 dma_pool_alloc(ioat_chan->ioat_dma->completion_pool,
694 GFP_KERNEL, &ioat_chan->completion_dma);
695 if (!ioat_chan->completion)
696 return -ENOMEM;
698 memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion));
699 writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
700 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
701 writel(((u64)ioat_chan->completion_dma) >> 32,
702 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
704 order = IOAT_MAX_ORDER;
705 ring = ioat_alloc_ring(c, order, GFP_KERNEL);
706 if (!ring)
707 return -ENOMEM;
709 spin_lock_bh(&ioat_chan->cleanup_lock);
710 spin_lock_bh(&ioat_chan->prep_lock);
711 ioat_chan->ring = ring;
712 ioat_chan->head = 0;
713 ioat_chan->issued = 0;
714 ioat_chan->tail = 0;
715 ioat_chan->alloc_order = order;
716 set_bit(IOAT_RUN, &ioat_chan->state);
717 spin_unlock_bh(&ioat_chan->prep_lock);
718 spin_unlock_bh(&ioat_chan->cleanup_lock);
720 ioat_start_null_desc(ioat_chan);
722 /* check that we got off the ground */
723 do {
724 udelay(1);
725 status = ioat_chansts(ioat_chan);
726 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
728 if (is_ioat_active(status) || is_ioat_idle(status))
729 return 1 << ioat_chan->alloc_order;
731 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
733 dev_WARN(to_dev(ioat_chan),
734 "failed to start channel chanerr: %#x\n", chanerr);
735 ioat_free_chan_resources(c);
736 return -EFAULT;
739 /* common channel initialization */
740 static void
741 ioat_init_channel(struct ioatdma_device *ioat_dma,
742 struct ioatdma_chan *ioat_chan, int idx)
744 struct dma_device *dma = &ioat_dma->dma_dev;
745 struct dma_chan *c = &ioat_chan->dma_chan;
746 unsigned long data = (unsigned long) c;
748 ioat_chan->ioat_dma = ioat_dma;
749 ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
750 spin_lock_init(&ioat_chan->cleanup_lock);
751 ioat_chan->dma_chan.device = dma;
752 dma_cookie_init(&ioat_chan->dma_chan);
753 list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
754 ioat_dma->idx[idx] = ioat_chan;
755 init_timer(&ioat_chan->timer);
756 ioat_chan->timer.function = ioat_timer_event;
757 ioat_chan->timer.data = data;
758 tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
761 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
762 static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
764 int i, src_idx;
765 struct page *dest;
766 struct page *xor_srcs[IOAT_NUM_SRC_TEST];
767 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
768 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
769 dma_addr_t dest_dma;
770 struct dma_async_tx_descriptor *tx;
771 struct dma_chan *dma_chan;
772 dma_cookie_t cookie;
773 u8 cmp_byte = 0;
774 u32 cmp_word;
775 u32 xor_val_result;
776 int err = 0;
777 struct completion cmp;
778 unsigned long tmo;
779 struct device *dev = &ioat_dma->pdev->dev;
780 struct dma_device *dma = &ioat_dma->dma_dev;
781 u8 op = 0;
783 dev_dbg(dev, "%s\n", __func__);
785 if (!dma_has_cap(DMA_XOR, dma->cap_mask))
786 return 0;
788 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
789 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
790 if (!xor_srcs[src_idx]) {
791 while (src_idx--)
792 __free_page(xor_srcs[src_idx]);
793 return -ENOMEM;
797 dest = alloc_page(GFP_KERNEL);
798 if (!dest) {
799 while (src_idx--)
800 __free_page(xor_srcs[src_idx]);
801 return -ENOMEM;
804 /* Fill in src buffers */
805 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
806 u8 *ptr = page_address(xor_srcs[src_idx]);
808 for (i = 0; i < PAGE_SIZE; i++)
809 ptr[i] = (1 << src_idx);
812 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
813 cmp_byte ^= (u8) (1 << src_idx);
815 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
816 (cmp_byte << 8) | cmp_byte;
818 memset(page_address(dest), 0, PAGE_SIZE);
820 dma_chan = container_of(dma->channels.next, struct dma_chan,
821 device_node);
822 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
823 err = -ENODEV;
824 goto out;
827 /* test xor */
828 op = IOAT_OP_XOR;
830 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
831 if (dma_mapping_error(dev, dest_dma))
832 goto dma_unmap;
834 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
835 dma_srcs[i] = DMA_ERROR_CODE;
836 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
837 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
838 DMA_TO_DEVICE);
839 if (dma_mapping_error(dev, dma_srcs[i]))
840 goto dma_unmap;
842 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
843 IOAT_NUM_SRC_TEST, PAGE_SIZE,
844 DMA_PREP_INTERRUPT);
846 if (!tx) {
847 dev_err(dev, "Self-test xor prep failed\n");
848 err = -ENODEV;
849 goto dma_unmap;
852 async_tx_ack(tx);
853 init_completion(&cmp);
854 tx->callback = ioat_dma_test_callback;
855 tx->callback_param = &cmp;
856 cookie = tx->tx_submit(tx);
857 if (cookie < 0) {
858 dev_err(dev, "Self-test xor setup failed\n");
859 err = -ENODEV;
860 goto dma_unmap;
862 dma->device_issue_pending(dma_chan);
864 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
866 if (tmo == 0 ||
867 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
868 dev_err(dev, "Self-test xor timed out\n");
869 err = -ENODEV;
870 goto dma_unmap;
873 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
874 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
876 dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
877 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
878 u32 *ptr = page_address(dest);
880 if (ptr[i] != cmp_word) {
881 dev_err(dev, "Self-test xor failed compare\n");
882 err = -ENODEV;
883 goto free_resources;
886 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
888 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
890 /* skip validate if the capability is not present */
891 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
892 goto free_resources;
894 op = IOAT_OP_XOR_VAL;
896 /* validate the sources with the destintation page */
897 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
898 xor_val_srcs[i] = xor_srcs[i];
899 xor_val_srcs[i] = dest;
901 xor_val_result = 1;
903 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
904 dma_srcs[i] = DMA_ERROR_CODE;
905 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
906 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
907 DMA_TO_DEVICE);
908 if (dma_mapping_error(dev, dma_srcs[i]))
909 goto dma_unmap;
911 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
912 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
913 &xor_val_result, DMA_PREP_INTERRUPT);
914 if (!tx) {
915 dev_err(dev, "Self-test zero prep failed\n");
916 err = -ENODEV;
917 goto dma_unmap;
920 async_tx_ack(tx);
921 init_completion(&cmp);
922 tx->callback = ioat_dma_test_callback;
923 tx->callback_param = &cmp;
924 cookie = tx->tx_submit(tx);
925 if (cookie < 0) {
926 dev_err(dev, "Self-test zero setup failed\n");
927 err = -ENODEV;
928 goto dma_unmap;
930 dma->device_issue_pending(dma_chan);
932 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
934 if (tmo == 0 ||
935 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
936 dev_err(dev, "Self-test validate timed out\n");
937 err = -ENODEV;
938 goto dma_unmap;
941 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
942 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
944 if (xor_val_result != 0) {
945 dev_err(dev, "Self-test validate failed compare\n");
946 err = -ENODEV;
947 goto free_resources;
950 memset(page_address(dest), 0, PAGE_SIZE);
952 /* test for non-zero parity sum */
953 op = IOAT_OP_XOR_VAL;
955 xor_val_result = 0;
956 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
957 dma_srcs[i] = DMA_ERROR_CODE;
958 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
959 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
960 DMA_TO_DEVICE);
961 if (dma_mapping_error(dev, dma_srcs[i]))
962 goto dma_unmap;
964 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
965 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
966 &xor_val_result, DMA_PREP_INTERRUPT);
967 if (!tx) {
968 dev_err(dev, "Self-test 2nd zero prep failed\n");
969 err = -ENODEV;
970 goto dma_unmap;
973 async_tx_ack(tx);
974 init_completion(&cmp);
975 tx->callback = ioat_dma_test_callback;
976 tx->callback_param = &cmp;
977 cookie = tx->tx_submit(tx);
978 if (cookie < 0) {
979 dev_err(dev, "Self-test 2nd zero setup failed\n");
980 err = -ENODEV;
981 goto dma_unmap;
983 dma->device_issue_pending(dma_chan);
985 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
987 if (tmo == 0 ||
988 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
989 dev_err(dev, "Self-test 2nd validate timed out\n");
990 err = -ENODEV;
991 goto dma_unmap;
994 if (xor_val_result != SUM_CHECK_P_RESULT) {
995 dev_err(dev, "Self-test validate failed compare\n");
996 err = -ENODEV;
997 goto dma_unmap;
1000 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1001 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1003 goto free_resources;
1004 dma_unmap:
1005 if (op == IOAT_OP_XOR) {
1006 if (dest_dma != DMA_ERROR_CODE)
1007 dma_unmap_page(dev, dest_dma, PAGE_SIZE,
1008 DMA_FROM_DEVICE);
1009 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1010 if (dma_srcs[i] != DMA_ERROR_CODE)
1011 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1012 DMA_TO_DEVICE);
1013 } else if (op == IOAT_OP_XOR_VAL) {
1014 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1015 if (dma_srcs[i] != DMA_ERROR_CODE)
1016 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1017 DMA_TO_DEVICE);
1019 free_resources:
1020 dma->device_free_chan_resources(dma_chan);
1021 out:
1022 src_idx = IOAT_NUM_SRC_TEST;
1023 while (src_idx--)
1024 __free_page(xor_srcs[src_idx]);
1025 __free_page(dest);
1026 return err;
1029 static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma)
1031 int rc;
1033 rc = ioat_dma_self_test(ioat_dma);
1034 if (rc)
1035 return rc;
1037 rc = ioat_xor_val_self_test(ioat_dma);
1039 return rc;
1042 static void ioat_intr_quirk(struct ioatdma_device *ioat_dma)
1044 struct dma_device *dma;
1045 struct dma_chan *c;
1046 struct ioatdma_chan *ioat_chan;
1047 u32 errmask;
1049 dma = &ioat_dma->dma_dev;
1052 * if we have descriptor write back error status, we mask the
1053 * error interrupts
1055 if (ioat_dma->cap & IOAT_CAP_DWBES) {
1056 list_for_each_entry(c, &dma->channels, device_node) {
1057 ioat_chan = to_ioat_chan(c);
1058 errmask = readl(ioat_chan->reg_base +
1059 IOAT_CHANERR_MASK_OFFSET);
1060 errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
1061 IOAT_CHANERR_XOR_Q_ERR;
1062 writel(errmask, ioat_chan->reg_base +
1063 IOAT_CHANERR_MASK_OFFSET);
1068 static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
1070 struct pci_dev *pdev = ioat_dma->pdev;
1071 int dca_en = system_has_dca_enabled(pdev);
1072 struct dma_device *dma;
1073 struct dma_chan *c;
1074 struct ioatdma_chan *ioat_chan;
1075 bool is_raid_device = false;
1076 int err;
1078 dma = &ioat_dma->dma_dev;
1079 dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
1080 dma->device_issue_pending = ioat_issue_pending;
1081 dma->device_alloc_chan_resources = ioat_alloc_chan_resources;
1082 dma->device_free_chan_resources = ioat_free_chan_resources;
1084 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1085 dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock;
1087 ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET);
1089 if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
1090 ioat_dma->cap &=
1091 ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
1093 /* dca is incompatible with raid operations */
1094 if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
1095 ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
1097 if (ioat_dma->cap & IOAT_CAP_XOR) {
1098 is_raid_device = true;
1099 dma->max_xor = 8;
1101 dma_cap_set(DMA_XOR, dma->cap_mask);
1102 dma->device_prep_dma_xor = ioat_prep_xor;
1104 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1105 dma->device_prep_dma_xor_val = ioat_prep_xor_val;
1108 if (ioat_dma->cap & IOAT_CAP_PQ) {
1109 is_raid_device = true;
1111 dma->device_prep_dma_pq = ioat_prep_pq;
1112 dma->device_prep_dma_pq_val = ioat_prep_pq_val;
1113 dma_cap_set(DMA_PQ, dma->cap_mask);
1114 dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
1116 if (ioat_dma->cap & IOAT_CAP_RAID16SS)
1117 dma_set_maxpq(dma, 16, 0);
1118 else
1119 dma_set_maxpq(dma, 8, 0);
1121 if (!(ioat_dma->cap & IOAT_CAP_XOR)) {
1122 dma->device_prep_dma_xor = ioat_prep_pqxor;
1123 dma->device_prep_dma_xor_val = ioat_prep_pqxor_val;
1124 dma_cap_set(DMA_XOR, dma->cap_mask);
1125 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1127 if (ioat_dma->cap & IOAT_CAP_RAID16SS)
1128 dma->max_xor = 16;
1129 else
1130 dma->max_xor = 8;
1134 dma->device_tx_status = ioat_tx_status;
1136 /* starting with CB3.3 super extended descriptors are supported */
1137 if (ioat_dma->cap & IOAT_CAP_RAID16SS) {
1138 char pool_name[14];
1139 int i;
1141 for (i = 0; i < MAX_SED_POOLS; i++) {
1142 snprintf(pool_name, 14, "ioat_hw%d_sed", i);
1144 /* allocate SED DMA pool */
1145 ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name,
1146 &pdev->dev,
1147 SED_SIZE * (i + 1), 64, 0);
1148 if (!ioat_dma->sed_hw_pool[i])
1149 return -ENOMEM;
1154 if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ)))
1155 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
1157 err = ioat_probe(ioat_dma);
1158 if (err)
1159 return err;
1161 list_for_each_entry(c, &dma->channels, device_node) {
1162 ioat_chan = to_ioat_chan(c);
1163 writel(IOAT_DMA_DCA_ANY_CPU,
1164 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
1167 err = ioat_register(ioat_dma);
1168 if (err)
1169 return err;
1171 ioat_kobject_add(ioat_dma, &ioat_ktype);
1173 if (dca)
1174 ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
1176 return 0;
1179 static void ioat_shutdown(struct pci_dev *pdev)
1181 struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
1182 struct ioatdma_chan *ioat_chan;
1183 int i;
1185 if (!ioat_dma)
1186 return;
1188 for (i = 0; i < IOAT_MAX_CHANS; i++) {
1189 ioat_chan = ioat_dma->idx[i];
1190 if (!ioat_chan)
1191 continue;
1193 spin_lock_bh(&ioat_chan->prep_lock);
1194 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
1195 del_timer_sync(&ioat_chan->timer);
1196 spin_unlock_bh(&ioat_chan->prep_lock);
1197 /* this should quiesce then reset */
1198 ioat_reset_hw(ioat_chan);
1201 ioat_disable_interrupts(ioat_dma);
1204 void ioat_resume(struct ioatdma_device *ioat_dma)
1206 struct ioatdma_chan *ioat_chan;
1207 u32 chanerr;
1208 int i;
1210 for (i = 0; i < IOAT_MAX_CHANS; i++) {
1211 ioat_chan = ioat_dma->idx[i];
1212 if (!ioat_chan)
1213 continue;
1215 spin_lock_bh(&ioat_chan->prep_lock);
1216 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
1217 spin_unlock_bh(&ioat_chan->prep_lock);
1219 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1220 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1222 /* no need to reset as shutdown already did that */
1226 #define DRV_NAME "ioatdma"
1228 static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev,
1229 enum pci_channel_state error)
1231 dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error);
1233 /* quiesce and block I/O */
1234 ioat_shutdown(pdev);
1236 return PCI_ERS_RESULT_NEED_RESET;
1239 static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
1241 pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
1242 int err;
1244 dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME);
1246 if (pci_enable_device_mem(pdev) < 0) {
1247 dev_err(&pdev->dev,
1248 "Failed to enable PCIe device after reset.\n");
1249 result = PCI_ERS_RESULT_DISCONNECT;
1250 } else {
1251 pci_set_master(pdev);
1252 pci_restore_state(pdev);
1253 pci_save_state(pdev);
1254 pci_wake_from_d3(pdev, false);
1257 err = pci_cleanup_aer_uncorrect_error_status(pdev);
1258 if (err) {
1259 dev_err(&pdev->dev,
1260 "AER uncorrect error status clear failed: %#x\n", err);
1263 return result;
1266 static void ioat_pcie_error_resume(struct pci_dev *pdev)
1268 struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
1270 dev_dbg(&pdev->dev, "%s: AER handling resuming\n", DRV_NAME);
1272 /* initialize and bring everything back */
1273 ioat_resume(ioat_dma);
1276 static const struct pci_error_handlers ioat_err_handler = {
1277 .error_detected = ioat_pcie_error_detected,
1278 .slot_reset = ioat_pcie_error_slot_reset,
1279 .resume = ioat_pcie_error_resume,
1282 static struct pci_driver ioat_pci_driver = {
1283 .name = DRV_NAME,
1284 .id_table = ioat_pci_tbl,
1285 .probe = ioat_pci_probe,
1286 .remove = ioat_remove,
1287 .shutdown = ioat_shutdown,
1288 .err_handler = &ioat_err_handler,
1291 static struct ioatdma_device *
1292 alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
1294 struct device *dev = &pdev->dev;
1295 struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
1297 if (!d)
1298 return NULL;
1299 d->pdev = pdev;
1300 d->reg_base = iobase;
1301 return d;
1304 static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1306 void __iomem * const *iomap;
1307 struct device *dev = &pdev->dev;
1308 struct ioatdma_device *device;
1309 int err;
1311 err = pcim_enable_device(pdev);
1312 if (err)
1313 return err;
1315 err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
1316 if (err)
1317 return err;
1318 iomap = pcim_iomap_table(pdev);
1319 if (!iomap)
1320 return -ENOMEM;
1322 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1323 if (err)
1324 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1325 if (err)
1326 return err;
1328 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1329 if (err)
1330 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1331 if (err)
1332 return err;
1334 device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
1335 if (!device)
1336 return -ENOMEM;
1337 pci_set_master(pdev);
1338 pci_set_drvdata(pdev, device);
1340 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
1341 if (device->version >= IOAT_VER_3_0) {
1342 err = ioat3_dma_probe(device, ioat_dca_enabled);
1344 if (device->version >= IOAT_VER_3_3)
1345 pci_enable_pcie_error_reporting(pdev);
1346 } else
1347 return -ENODEV;
1349 if (err) {
1350 dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
1351 pci_disable_pcie_error_reporting(pdev);
1352 return -ENODEV;
1355 return 0;
1358 static void ioat_remove(struct pci_dev *pdev)
1360 struct ioatdma_device *device = pci_get_drvdata(pdev);
1362 if (!device)
1363 return;
1365 dev_err(&pdev->dev, "Removing dma and dca services\n");
1366 if (device->dca) {
1367 unregister_dca_provider(device->dca, &pdev->dev);
1368 free_dca_provider(device->dca);
1369 device->dca = NULL;
1372 pci_disable_pcie_error_reporting(pdev);
1373 ioat_dma_remove(device);
1376 static int __init ioat_init_module(void)
1378 int err = -ENOMEM;
1380 pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
1381 DRV_NAME, IOAT_DMA_VERSION);
1383 ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent),
1384 0, SLAB_HWCACHE_ALIGN, NULL);
1385 if (!ioat_cache)
1386 return -ENOMEM;
1388 ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
1389 if (!ioat_sed_cache)
1390 goto err_ioat_cache;
1392 err = pci_register_driver(&ioat_pci_driver);
1393 if (err)
1394 goto err_ioat3_cache;
1396 return 0;
1398 err_ioat3_cache:
1399 kmem_cache_destroy(ioat_sed_cache);
1401 err_ioat_cache:
1402 kmem_cache_destroy(ioat_cache);
1404 return err;
1406 module_init(ioat_init_module);
1408 static void __exit ioat_exit_module(void)
1410 pci_unregister_driver(&ioat_pci_driver);
1411 kmem_cache_destroy(ioat_cache);
1413 module_exit(ioat_exit_module);