Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / dma / idxd / init.c
blob2c051e07c34c2410458e2590ead84dbdc9f72201
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/pci.h>
8 #include <linux/interrupt.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/workqueue.h>
12 #include <linux/aer.h>
13 #include <linux/fs.h>
14 #include <linux/io-64-nonatomic-lo-hi.h>
15 #include <linux/device.h>
16 #include <linux/idr.h>
17 #include <linux/intel-svm.h>
18 #include <linux/iommu.h>
19 #include <uapi/linux/idxd.h>
20 #include <linux/dmaengine.h>
21 #include "../dmaengine.h"
22 #include "registers.h"
23 #include "idxd.h"
25 MODULE_VERSION(IDXD_DRIVER_VERSION);
26 MODULE_LICENSE("GPL v2");
27 MODULE_AUTHOR("Intel Corporation");
29 #define DRV_NAME "idxd"
31 bool support_enqcmd;
33 static struct idr idxd_idrs[IDXD_TYPE_MAX];
34 static struct mutex idxd_idr_lock;
36 static struct pci_device_id idxd_pci_tbl[] = {
37 /* DSA ver 1.0 platforms */
38 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
40 /* IAX ver 1.0 platforms */
41 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IAX_SPR0) },
42 { 0, }
44 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
46 static char *idxd_name[] = {
47 "dsa",
48 "iax"
51 const char *idxd_get_dev_name(struct idxd_device *idxd)
53 return idxd_name[idxd->type];
56 static int idxd_setup_interrupts(struct idxd_device *idxd)
58 struct pci_dev *pdev = idxd->pdev;
59 struct device *dev = &pdev->dev;
60 struct msix_entry *msix;
61 struct idxd_irq_entry *irq_entry;
62 int i, msixcnt;
63 int rc = 0;
64 union msix_perm mperm;
66 msixcnt = pci_msix_vec_count(pdev);
67 if (msixcnt < 0) {
68 dev_err(dev, "Not MSI-X interrupt capable.\n");
69 goto err_no_irq;
72 idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
73 msixcnt, GFP_KERNEL);
74 if (!idxd->msix_entries) {
75 rc = -ENOMEM;
76 goto err_no_irq;
79 for (i = 0; i < msixcnt; i++)
80 idxd->msix_entries[i].entry = i;
82 rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
83 if (rc) {
84 dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
85 goto err_no_irq;
87 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
90 * We implement 1 completion list per MSI-X entry except for
91 * entry 0, which is for errors and others.
93 idxd->irq_entries = devm_kcalloc(dev, msixcnt,
94 sizeof(struct idxd_irq_entry),
95 GFP_KERNEL);
96 if (!idxd->irq_entries) {
97 rc = -ENOMEM;
98 goto err_no_irq;
101 for (i = 0; i < msixcnt; i++) {
102 idxd->irq_entries[i].id = i;
103 idxd->irq_entries[i].idxd = idxd;
104 spin_lock_init(&idxd->irq_entries[i].list_lock);
107 msix = &idxd->msix_entries[0];
108 irq_entry = &idxd->irq_entries[0];
109 rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
110 idxd_misc_thread, 0, "idxd-misc",
111 irq_entry);
112 if (rc < 0) {
113 dev_err(dev, "Failed to allocate misc interrupt.\n");
114 goto err_no_irq;
117 dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
118 msix->vector);
120 /* first MSI-X entry is not for wq interrupts */
121 idxd->num_wq_irqs = msixcnt - 1;
123 for (i = 1; i < msixcnt; i++) {
124 msix = &idxd->msix_entries[i];
125 irq_entry = &idxd->irq_entries[i];
127 init_llist_head(&idxd->irq_entries[i].pending_llist);
128 INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
129 rc = devm_request_threaded_irq(dev, msix->vector,
130 idxd_irq_handler,
131 idxd_wq_thread, 0,
132 "idxd-portal", irq_entry);
133 if (rc < 0) {
134 dev_err(dev, "Failed to allocate irq %d.\n",
135 msix->vector);
136 goto err_no_irq;
138 dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
139 i, msix->vector);
142 idxd_unmask_error_interrupts(idxd);
144 /* Setup MSIX permission table */
145 mperm.bits = 0;
146 mperm.pasid = idxd->pasid;
147 mperm.pasid_en = device_pasid_enabled(idxd);
148 for (i = 1; i < msixcnt; i++)
149 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
151 return 0;
153 err_no_irq:
154 /* Disable error interrupt generation */
155 idxd_mask_error_interrupts(idxd);
156 pci_disable_msix(pdev);
157 dev_err(dev, "No usable interrupts\n");
158 return rc;
161 static int idxd_setup_internals(struct idxd_device *idxd)
163 struct device *dev = &idxd->pdev->dev;
164 int i;
166 init_waitqueue_head(&idxd->cmd_waitq);
167 idxd->groups = devm_kcalloc(dev, idxd->max_groups,
168 sizeof(struct idxd_group), GFP_KERNEL);
169 if (!idxd->groups)
170 return -ENOMEM;
172 for (i = 0; i < idxd->max_groups; i++) {
173 idxd->groups[i].idxd = idxd;
174 idxd->groups[i].id = i;
175 idxd->groups[i].tc_a = -1;
176 idxd->groups[i].tc_b = -1;
179 idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
180 GFP_KERNEL);
181 if (!idxd->wqs)
182 return -ENOMEM;
184 idxd->engines = devm_kcalloc(dev, idxd->max_engines,
185 sizeof(struct idxd_engine), GFP_KERNEL);
186 if (!idxd->engines)
187 return -ENOMEM;
189 for (i = 0; i < idxd->max_wqs; i++) {
190 struct idxd_wq *wq = &idxd->wqs[i];
192 wq->id = i;
193 wq->idxd = idxd;
194 mutex_init(&wq->wq_lock);
195 wq->idxd_cdev.minor = -1;
196 wq->max_xfer_bytes = idxd->max_xfer_bytes;
197 wq->max_batch_size = idxd->max_batch_size;
198 wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
199 if (!wq->wqcfg)
200 return -ENOMEM;
203 for (i = 0; i < idxd->max_engines; i++) {
204 idxd->engines[i].idxd = idxd;
205 idxd->engines[i].id = i;
208 idxd->wq = create_workqueue(dev_name(dev));
209 if (!idxd->wq)
210 return -ENOMEM;
212 return 0;
215 static void idxd_read_table_offsets(struct idxd_device *idxd)
217 union offsets_reg offsets;
218 struct device *dev = &idxd->pdev->dev;
220 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
221 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
222 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
223 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
224 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
225 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
226 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
227 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
228 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
229 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
232 static void idxd_read_caps(struct idxd_device *idxd)
234 struct device *dev = &idxd->pdev->dev;
235 int i;
237 /* reading generic capabilities */
238 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
239 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
240 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
241 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
242 idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
243 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
244 if (idxd->hw.gen_cap.config_en)
245 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
247 /* reading group capabilities */
248 idxd->hw.group_cap.bits =
249 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
250 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
251 idxd->max_groups = idxd->hw.group_cap.num_groups;
252 dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
253 idxd->max_tokens = idxd->hw.group_cap.total_tokens;
254 dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
255 idxd->nr_tokens = idxd->max_tokens;
257 /* read engine capabilities */
258 idxd->hw.engine_cap.bits =
259 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
260 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
261 idxd->max_engines = idxd->hw.engine_cap.num_engines;
262 dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
264 /* read workqueue capabilities */
265 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
266 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
267 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
268 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
269 idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
270 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
271 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
272 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
274 /* reading operation capabilities */
275 for (i = 0; i < 4; i++) {
276 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
277 IDXD_OPCAP_OFFSET + i * sizeof(u64));
278 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
282 static struct idxd_device *idxd_alloc(struct pci_dev *pdev)
284 struct device *dev = &pdev->dev;
285 struct idxd_device *idxd;
287 idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
288 if (!idxd)
289 return NULL;
291 idxd->pdev = pdev;
292 spin_lock_init(&idxd->dev_lock);
294 return idxd;
297 static int idxd_enable_system_pasid(struct idxd_device *idxd)
299 int flags;
300 unsigned int pasid;
301 struct iommu_sva *sva;
303 flags = SVM_FLAG_SUPERVISOR_MODE;
305 sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags);
306 if (IS_ERR(sva)) {
307 dev_warn(&idxd->pdev->dev,
308 "iommu sva bind failed: %ld\n", PTR_ERR(sva));
309 return PTR_ERR(sva);
312 pasid = iommu_sva_get_pasid(sva);
313 if (pasid == IOMMU_PASID_INVALID) {
314 iommu_sva_unbind_device(sva);
315 return -ENODEV;
318 idxd->sva = sva;
319 idxd->pasid = pasid;
320 dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid);
321 return 0;
324 static void idxd_disable_system_pasid(struct idxd_device *idxd)
327 iommu_sva_unbind_device(idxd->sva);
328 idxd->sva = NULL;
331 static int idxd_probe(struct idxd_device *idxd)
333 struct pci_dev *pdev = idxd->pdev;
334 struct device *dev = &pdev->dev;
335 int rc;
337 dev_dbg(dev, "%s entered and resetting device\n", __func__);
338 idxd_device_init_reset(idxd);
339 dev_dbg(dev, "IDXD reset complete\n");
341 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM)) {
342 rc = idxd_enable_system_pasid(idxd);
343 if (rc < 0)
344 dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
345 else
346 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
349 idxd_read_caps(idxd);
350 idxd_read_table_offsets(idxd);
352 rc = idxd_setup_internals(idxd);
353 if (rc)
354 goto err_setup;
356 rc = idxd_setup_interrupts(idxd);
357 if (rc)
358 goto err_setup;
360 dev_dbg(dev, "IDXD interrupt setup complete.\n");
362 mutex_lock(&idxd_idr_lock);
363 idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
364 mutex_unlock(&idxd_idr_lock);
365 if (idxd->id < 0) {
366 rc = -ENOMEM;
367 goto err_idr_fail;
370 idxd->major = idxd_cdev_get_major(idxd);
372 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
373 return 0;
375 err_idr_fail:
376 idxd_mask_error_interrupts(idxd);
377 idxd_mask_msix_vectors(idxd);
378 err_setup:
379 if (device_pasid_enabled(idxd))
380 idxd_disable_system_pasid(idxd);
381 return rc;
384 static void idxd_type_init(struct idxd_device *idxd)
386 if (idxd->type == IDXD_TYPE_DSA)
387 idxd->compl_size = sizeof(struct dsa_completion_record);
388 else if (idxd->type == IDXD_TYPE_IAX)
389 idxd->compl_size = sizeof(struct iax_completion_record);
392 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
394 struct device *dev = &pdev->dev;
395 struct idxd_device *idxd;
396 int rc;
398 rc = pcim_enable_device(pdev);
399 if (rc)
400 return rc;
402 dev_dbg(dev, "Alloc IDXD context\n");
403 idxd = idxd_alloc(pdev);
404 if (!idxd)
405 return -ENOMEM;
407 dev_dbg(dev, "Mapping BARs\n");
408 idxd->reg_base = pcim_iomap(pdev, IDXD_MMIO_BAR, 0);
409 if (!idxd->reg_base)
410 return -ENOMEM;
412 dev_dbg(dev, "Set DMA masks\n");
413 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
414 if (rc)
415 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
416 if (rc)
417 return rc;
419 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
420 if (rc)
421 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
422 if (rc)
423 return rc;
425 idxd_set_type(idxd);
427 idxd_type_init(idxd);
429 dev_dbg(dev, "Set PCI master\n");
430 pci_set_master(pdev);
431 pci_set_drvdata(pdev, idxd);
433 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
434 rc = idxd_probe(idxd);
435 if (rc) {
436 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
437 return -ENODEV;
440 rc = idxd_setup_sysfs(idxd);
441 if (rc) {
442 dev_err(dev, "IDXD sysfs setup failed\n");
443 return -ENODEV;
446 idxd->state = IDXD_DEV_CONF_READY;
448 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
449 idxd->hw.version);
451 return 0;
454 static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
456 struct idxd_desc *desc, *itr;
457 struct llist_node *head;
459 head = llist_del_all(&ie->pending_llist);
460 if (!head)
461 return;
463 llist_for_each_entry_safe(desc, itr, head, llnode) {
464 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
465 idxd_free_desc(desc->wq, desc);
469 static void idxd_flush_work_list(struct idxd_irq_entry *ie)
471 struct idxd_desc *desc, *iter;
473 list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
474 list_del(&desc->list);
475 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
476 idxd_free_desc(desc->wq, desc);
480 static void idxd_shutdown(struct pci_dev *pdev)
482 struct idxd_device *idxd = pci_get_drvdata(pdev);
483 int rc, i;
484 struct idxd_irq_entry *irq_entry;
485 int msixcnt = pci_msix_vec_count(pdev);
487 rc = idxd_device_disable(idxd);
488 if (rc)
489 dev_err(&pdev->dev, "Disabling device failed\n");
491 dev_dbg(&pdev->dev, "%s called\n", __func__);
492 idxd_mask_msix_vectors(idxd);
493 idxd_mask_error_interrupts(idxd);
495 for (i = 0; i < msixcnt; i++) {
496 irq_entry = &idxd->irq_entries[i];
497 synchronize_irq(idxd->msix_entries[i].vector);
498 if (i == 0)
499 continue;
500 idxd_flush_pending_llist(irq_entry);
501 idxd_flush_work_list(irq_entry);
504 destroy_workqueue(idxd->wq);
507 static void idxd_remove(struct pci_dev *pdev)
509 struct idxd_device *idxd = pci_get_drvdata(pdev);
511 dev_dbg(&pdev->dev, "%s called\n", __func__);
512 idxd_cleanup_sysfs(idxd);
513 idxd_shutdown(pdev);
514 if (device_pasid_enabled(idxd))
515 idxd_disable_system_pasid(idxd);
516 mutex_lock(&idxd_idr_lock);
517 idr_remove(&idxd_idrs[idxd->type], idxd->id);
518 mutex_unlock(&idxd_idr_lock);
521 static struct pci_driver idxd_pci_driver = {
522 .name = DRV_NAME,
523 .id_table = idxd_pci_tbl,
524 .probe = idxd_pci_probe,
525 .remove = idxd_remove,
526 .shutdown = idxd_shutdown,
529 static int __init idxd_init_module(void)
531 int err, i;
534 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
535 * enumerating the device. We can not utilize it.
537 if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
538 pr_warn("idxd driver failed to load without MOVDIR64B.\n");
539 return -ENODEV;
542 if (!boot_cpu_has(X86_FEATURE_ENQCMD))
543 pr_warn("Platform does not have ENQCMD(S) support.\n");
544 else
545 support_enqcmd = true;
547 mutex_init(&idxd_idr_lock);
548 for (i = 0; i < IDXD_TYPE_MAX; i++)
549 idr_init(&idxd_idrs[i]);
551 err = idxd_register_bus_type();
552 if (err < 0)
553 return err;
555 err = idxd_register_driver();
556 if (err < 0)
557 goto err_idxd_driver_register;
559 err = idxd_cdev_register();
560 if (err)
561 goto err_cdev_register;
563 err = pci_register_driver(&idxd_pci_driver);
564 if (err)
565 goto err_pci_register;
567 return 0;
569 err_pci_register:
570 idxd_cdev_remove();
571 err_cdev_register:
572 idxd_unregister_driver();
573 err_idxd_driver_register:
574 idxd_unregister_bus_type();
575 return err;
577 module_init(idxd_init_module);
579 static void __exit idxd_exit_module(void)
581 pci_unregister_driver(&idxd_pci_driver);
582 idxd_cdev_remove();
583 idxd_unregister_bus_type();
585 module_exit(idxd_exit_module);