1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
8 #include <linux/interrupt.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/workqueue.h>
13 #include <linux/io-64-nonatomic-lo-hi.h>
14 #include <linux/device.h>
15 #include <linux/idr.h>
16 #include <linux/iommu.h>
17 #include <uapi/linux/idxd.h>
18 #include <linux/dmaengine.h>
19 #include "../dmaengine.h"
20 #include "registers.h"
24 MODULE_VERSION(IDXD_DRIVER_VERSION
);
25 MODULE_DESCRIPTION("Intel Data Streaming Accelerator and In-Memory Analytics Accelerator common driver");
26 MODULE_LICENSE("GPL v2");
27 MODULE_AUTHOR("Intel Corporation");
28 MODULE_IMPORT_NS(IDXD
);
30 static bool sva
= true;
31 module_param(sva
, bool, 0644);
32 MODULE_PARM_DESC(sva
, "Toggle SVA support on/off");
35 module_param(tc_override
, bool, 0644);
36 MODULE_PARM_DESC(tc_override
, "Override traffic class defaults");
38 #define DRV_NAME "idxd"
43 static struct idxd_driver_data idxd_driver_data
[] = {
46 .type
= IDXD_TYPE_DSA
,
47 .compl_size
= sizeof(struct dsa_completion_record
),
49 .dev_type
= &dsa_device_type
,
50 .evl_cr_off
= offsetof(struct dsa_evl_entry
, cr
),
51 .user_submission_safe
= false, /* See INTEL-SA-01084 security advisory */
52 .cr_status_off
= offsetof(struct dsa_completion_record
, status
),
53 .cr_result_off
= offsetof(struct dsa_completion_record
, result
),
57 .type
= IDXD_TYPE_IAX
,
58 .compl_size
= sizeof(struct iax_completion_record
),
60 .dev_type
= &iax_device_type
,
61 .evl_cr_off
= offsetof(struct iax_evl_entry
, cr
),
62 .user_submission_safe
= false, /* See INTEL-SA-01084 security advisory */
63 .cr_status_off
= offsetof(struct iax_completion_record
, status
),
64 .cr_result_off
= offsetof(struct iax_completion_record
, error_code
),
65 .load_device_defaults
= idxd_load_iaa_device_defaults
,
69 static struct pci_device_id idxd_pci_tbl
[] = {
70 /* DSA ver 1.0 platforms */
71 { PCI_DEVICE_DATA(INTEL
, DSA_SPR0
, &idxd_driver_data
[IDXD_TYPE_DSA
]) },
72 /* DSA on GNR-D platforms */
73 { PCI_DEVICE_DATA(INTEL
, DSA_GNRD
, &idxd_driver_data
[IDXD_TYPE_DSA
]) },
74 /* DSA on DMR platforms */
75 { PCI_DEVICE_DATA(INTEL
, DSA_DMR
, &idxd_driver_data
[IDXD_TYPE_DSA
]) },
77 /* IAX ver 1.0 platforms */
78 { PCI_DEVICE_DATA(INTEL
, IAX_SPR0
, &idxd_driver_data
[IDXD_TYPE_IAX
]) },
79 /* IAA on DMR platforms */
80 { PCI_DEVICE_DATA(INTEL
, IAA_DMR
, &idxd_driver_data
[IDXD_TYPE_IAX
]) },
83 MODULE_DEVICE_TABLE(pci
, idxd_pci_tbl
);
85 static int idxd_setup_interrupts(struct idxd_device
*idxd
)
87 struct pci_dev
*pdev
= idxd
->pdev
;
88 struct device
*dev
= &pdev
->dev
;
89 struct idxd_irq_entry
*ie
;
93 msixcnt
= pci_msix_vec_count(pdev
);
95 dev_err(dev
, "Not MSI-X interrupt capable.\n");
98 idxd
->irq_cnt
= msixcnt
;
100 rc
= pci_alloc_irq_vectors(pdev
, msixcnt
, msixcnt
, PCI_IRQ_MSIX
);
102 dev_err(dev
, "Failed enabling %d MSIX entries: %d\n", msixcnt
, rc
);
105 dev_dbg(dev
, "Enabled %d msix vectors\n", msixcnt
);
108 ie
= idxd_get_ie(idxd
, 0);
109 ie
->vector
= pci_irq_vector(pdev
, 0);
110 rc
= request_threaded_irq(ie
->vector
, NULL
, idxd_misc_thread
, 0, "idxd-misc", ie
);
112 dev_err(dev
, "Failed to allocate misc interrupt.\n");
115 dev_dbg(dev
, "Requested idxd-misc handler on msix vector %d\n", ie
->vector
);
117 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
118 int msix_idx
= i
+ 1;
120 ie
= idxd_get_ie(idxd
, msix_idx
);
122 ie
->int_handle
= INVALID_INT_HANDLE
;
123 ie
->pasid
= IOMMU_PASID_INVALID
;
125 spin_lock_init(&ie
->list_lock
);
126 init_llist_head(&ie
->pending_llist
);
127 INIT_LIST_HEAD(&ie
->work_list
);
130 idxd_unmask_error_interrupts(idxd
);
134 idxd_mask_error_interrupts(idxd
);
135 pci_free_irq_vectors(pdev
);
136 dev_err(dev
, "No usable interrupts\n");
140 static void idxd_cleanup_interrupts(struct idxd_device
*idxd
)
142 struct pci_dev
*pdev
= idxd
->pdev
;
143 struct idxd_irq_entry
*ie
;
146 msixcnt
= pci_msix_vec_count(pdev
);
150 ie
= idxd_get_ie(idxd
, 0);
151 idxd_mask_error_interrupts(idxd
);
152 free_irq(ie
->vector
, ie
);
153 pci_free_irq_vectors(pdev
);
156 static int idxd_setup_wqs(struct idxd_device
*idxd
)
158 struct device
*dev
= &idxd
->pdev
->dev
;
160 struct device
*conf_dev
;
163 idxd
->wqs
= kcalloc_node(idxd
->max_wqs
, sizeof(struct idxd_wq
*),
164 GFP_KERNEL
, dev_to_node(dev
));
168 idxd
->wq_enable_map
= bitmap_zalloc_node(idxd
->max_wqs
, GFP_KERNEL
, dev_to_node(dev
));
169 if (!idxd
->wq_enable_map
) {
174 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
175 wq
= kzalloc_node(sizeof(*wq
), GFP_KERNEL
, dev_to_node(dev
));
181 idxd_dev_set_type(&wq
->idxd_dev
, IDXD_DEV_WQ
);
182 conf_dev
= wq_confdev(wq
);
185 device_initialize(wq_confdev(wq
));
186 conf_dev
->parent
= idxd_confdev(idxd
);
187 conf_dev
->bus
= &dsa_bus_type
;
188 conf_dev
->type
= &idxd_wq_device_type
;
189 rc
= dev_set_name(conf_dev
, "wq%d.%d", idxd
->id
, wq
->id
);
191 put_device(conf_dev
);
195 mutex_init(&wq
->wq_lock
);
196 init_waitqueue_head(&wq
->err_queue
);
197 init_completion(&wq
->wq_dead
);
198 init_completion(&wq
->wq_resurrect
);
199 wq
->max_xfer_bytes
= WQ_DEFAULT_MAX_XFER
;
200 idxd_wq_set_max_batch_size(idxd
->data
->type
, wq
, WQ_DEFAULT_MAX_BATCH
);
201 wq
->enqcmds_retries
= IDXD_ENQCMDS_RETRIES
;
202 wq
->wqcfg
= kzalloc_node(idxd
->wqcfg_size
, GFP_KERNEL
, dev_to_node(dev
));
204 put_device(conf_dev
);
209 if (idxd
->hw
.wq_cap
.op_config
) {
210 wq
->opcap_bmap
= bitmap_zalloc(IDXD_MAX_OPCAP_BITS
, GFP_KERNEL
);
211 if (!wq
->opcap_bmap
) {
212 put_device(conf_dev
);
216 bitmap_copy(wq
->opcap_bmap
, idxd
->opcap_bmap
, IDXD_MAX_OPCAP_BITS
);
218 mutex_init(&wq
->uc_lock
);
219 xa_init(&wq
->upasid_xa
);
228 conf_dev
= wq_confdev(wq
);
229 put_device(conf_dev
);
234 static int idxd_setup_engines(struct idxd_device
*idxd
)
236 struct idxd_engine
*engine
;
237 struct device
*dev
= &idxd
->pdev
->dev
;
238 struct device
*conf_dev
;
241 idxd
->engines
= kcalloc_node(idxd
->max_engines
, sizeof(struct idxd_engine
*),
242 GFP_KERNEL
, dev_to_node(dev
));
246 for (i
= 0; i
< idxd
->max_engines
; i
++) {
247 engine
= kzalloc_node(sizeof(*engine
), GFP_KERNEL
, dev_to_node(dev
));
253 idxd_dev_set_type(&engine
->idxd_dev
, IDXD_DEV_ENGINE
);
254 conf_dev
= engine_confdev(engine
);
257 device_initialize(conf_dev
);
258 conf_dev
->parent
= idxd_confdev(idxd
);
259 conf_dev
->bus
= &dsa_bus_type
;
260 conf_dev
->type
= &idxd_engine_device_type
;
261 rc
= dev_set_name(conf_dev
, "engine%d.%d", idxd
->id
, engine
->id
);
263 put_device(conf_dev
);
267 idxd
->engines
[i
] = engine
;
274 engine
= idxd
->engines
[i
];
275 conf_dev
= engine_confdev(engine
);
276 put_device(conf_dev
);
281 static int idxd_setup_groups(struct idxd_device
*idxd
)
283 struct device
*dev
= &idxd
->pdev
->dev
;
284 struct device
*conf_dev
;
285 struct idxd_group
*group
;
288 idxd
->groups
= kcalloc_node(idxd
->max_groups
, sizeof(struct idxd_group
*),
289 GFP_KERNEL
, dev_to_node(dev
));
293 for (i
= 0; i
< idxd
->max_groups
; i
++) {
294 group
= kzalloc_node(sizeof(*group
), GFP_KERNEL
, dev_to_node(dev
));
300 idxd_dev_set_type(&group
->idxd_dev
, IDXD_DEV_GROUP
);
301 conf_dev
= group_confdev(group
);
304 device_initialize(conf_dev
);
305 conf_dev
->parent
= idxd_confdev(idxd
);
306 conf_dev
->bus
= &dsa_bus_type
;
307 conf_dev
->type
= &idxd_group_device_type
;
308 rc
= dev_set_name(conf_dev
, "group%d.%d", idxd
->id
, group
->id
);
310 put_device(conf_dev
);
314 idxd
->groups
[i
] = group
;
315 if (idxd
->hw
.version
<= DEVICE_VERSION_2
&& !tc_override
) {
323 * The default value is the same as the value of
324 * total read buffers in GRPCAP.
326 group
->rdbufs_allowed
= idxd
->max_rdbufs
;
333 group
= idxd
->groups
[i
];
334 put_device(group_confdev(group
));
339 static void idxd_cleanup_internals(struct idxd_device
*idxd
)
343 for (i
= 0; i
< idxd
->max_groups
; i
++)
344 put_device(group_confdev(idxd
->groups
[i
]));
345 for (i
= 0; i
< idxd
->max_engines
; i
++)
346 put_device(engine_confdev(idxd
->engines
[i
]));
347 for (i
= 0; i
< idxd
->max_wqs
; i
++)
348 put_device(wq_confdev(idxd
->wqs
[i
]));
349 destroy_workqueue(idxd
->wq
);
352 static int idxd_init_evl(struct idxd_device
*idxd
)
354 struct device
*dev
= &idxd
->pdev
->dev
;
355 unsigned int evl_cache_size
;
356 struct idxd_evl
*evl
;
357 const char *idxd_name
;
359 if (idxd
->hw
.gen_cap
.evl_support
== 0)
362 evl
= kzalloc_node(sizeof(*evl
), GFP_KERNEL
, dev_to_node(dev
));
366 mutex_init(&evl
->lock
);
367 evl
->size
= IDXD_EVL_SIZE_MIN
;
369 idxd_name
= dev_name(idxd_confdev(idxd
));
370 evl_cache_size
= sizeof(struct idxd_evl_fault
) + evl_ent_size(idxd
);
372 * Since completion record in evl_cache will be copied to user
373 * when handling completion record page fault, need to create
374 * the cache suitable for user copy.
376 idxd
->evl_cache
= kmem_cache_create_usercopy(idxd_name
, evl_cache_size
,
377 0, 0, 0, evl_cache_size
,
379 if (!idxd
->evl_cache
) {
388 static int idxd_setup_internals(struct idxd_device
*idxd
)
390 struct device
*dev
= &idxd
->pdev
->dev
;
393 init_waitqueue_head(&idxd
->cmd_waitq
);
395 rc
= idxd_setup_wqs(idxd
);
399 rc
= idxd_setup_engines(idxd
);
403 rc
= idxd_setup_groups(idxd
);
407 idxd
->wq
= create_workqueue(dev_name(dev
));
413 rc
= idxd_init_evl(idxd
);
420 destroy_workqueue(idxd
->wq
);
422 for (i
= 0; i
< idxd
->max_groups
; i
++)
423 put_device(group_confdev(idxd
->groups
[i
]));
425 for (i
= 0; i
< idxd
->max_engines
; i
++)
426 put_device(engine_confdev(idxd
->engines
[i
]));
428 for (i
= 0; i
< idxd
->max_wqs
; i
++)
429 put_device(wq_confdev(idxd
->wqs
[i
]));
434 static void idxd_read_table_offsets(struct idxd_device
*idxd
)
436 union offsets_reg offsets
;
437 struct device
*dev
= &idxd
->pdev
->dev
;
439 offsets
.bits
[0] = ioread64(idxd
->reg_base
+ IDXD_TABLE_OFFSET
);
440 offsets
.bits
[1] = ioread64(idxd
->reg_base
+ IDXD_TABLE_OFFSET
+ sizeof(u64
));
441 idxd
->grpcfg_offset
= offsets
.grpcfg
* IDXD_TABLE_MULT
;
442 dev_dbg(dev
, "IDXD Group Config Offset: %#x\n", idxd
->grpcfg_offset
);
443 idxd
->wqcfg_offset
= offsets
.wqcfg
* IDXD_TABLE_MULT
;
444 dev_dbg(dev
, "IDXD Work Queue Config Offset: %#x\n", idxd
->wqcfg_offset
);
445 idxd
->msix_perm_offset
= offsets
.msix_perm
* IDXD_TABLE_MULT
;
446 dev_dbg(dev
, "IDXD MSIX Permission Offset: %#x\n", idxd
->msix_perm_offset
);
447 idxd
->perfmon_offset
= offsets
.perfmon
* IDXD_TABLE_MULT
;
448 dev_dbg(dev
, "IDXD Perfmon Offset: %#x\n", idxd
->perfmon_offset
);
451 void multi_u64_to_bmap(unsigned long *bmap
, u64
*val
, int count
)
455 for (i
= 0, nr
= 0; i
< count
; i
++) {
456 for (j
= 0; j
< BITS_PER_LONG_LONG
; j
++) {
464 static void idxd_read_caps(struct idxd_device
*idxd
)
466 struct device
*dev
= &idxd
->pdev
->dev
;
469 /* reading generic capabilities */
470 idxd
->hw
.gen_cap
.bits
= ioread64(idxd
->reg_base
+ IDXD_GENCAP_OFFSET
);
471 dev_dbg(dev
, "gen_cap: %#llx\n", idxd
->hw
.gen_cap
.bits
);
473 if (idxd
->hw
.gen_cap
.cmd_cap
) {
474 idxd
->hw
.cmd_cap
= ioread32(idxd
->reg_base
+ IDXD_CMDCAP_OFFSET
);
475 dev_dbg(dev
, "cmd_cap: %#x\n", idxd
->hw
.cmd_cap
);
478 /* reading command capabilities */
479 if (idxd
->hw
.cmd_cap
& BIT(IDXD_CMD_REQUEST_INT_HANDLE
))
480 idxd
->request_int_handles
= true;
482 idxd
->max_xfer_bytes
= 1ULL << idxd
->hw
.gen_cap
.max_xfer_shift
;
483 dev_dbg(dev
, "max xfer size: %llu bytes\n", idxd
->max_xfer_bytes
);
484 idxd_set_max_batch_size(idxd
->data
->type
, idxd
, 1U << idxd
->hw
.gen_cap
.max_batch_shift
);
485 dev_dbg(dev
, "max batch size: %u\n", idxd
->max_batch_size
);
486 if (idxd
->hw
.gen_cap
.config_en
)
487 set_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
);
489 /* reading group capabilities */
490 idxd
->hw
.group_cap
.bits
=
491 ioread64(idxd
->reg_base
+ IDXD_GRPCAP_OFFSET
);
492 dev_dbg(dev
, "group_cap: %#llx\n", idxd
->hw
.group_cap
.bits
);
493 idxd
->max_groups
= idxd
->hw
.group_cap
.num_groups
;
494 dev_dbg(dev
, "max groups: %u\n", idxd
->max_groups
);
495 idxd
->max_rdbufs
= idxd
->hw
.group_cap
.total_rdbufs
;
496 dev_dbg(dev
, "max read buffers: %u\n", idxd
->max_rdbufs
);
497 idxd
->nr_rdbufs
= idxd
->max_rdbufs
;
499 /* read engine capabilities */
500 idxd
->hw
.engine_cap
.bits
=
501 ioread64(idxd
->reg_base
+ IDXD_ENGCAP_OFFSET
);
502 dev_dbg(dev
, "engine_cap: %#llx\n", idxd
->hw
.engine_cap
.bits
);
503 idxd
->max_engines
= idxd
->hw
.engine_cap
.num_engines
;
504 dev_dbg(dev
, "max engines: %u\n", idxd
->max_engines
);
506 /* read workqueue capabilities */
507 idxd
->hw
.wq_cap
.bits
= ioread64(idxd
->reg_base
+ IDXD_WQCAP_OFFSET
);
508 dev_dbg(dev
, "wq_cap: %#llx\n", idxd
->hw
.wq_cap
.bits
);
509 idxd
->max_wq_size
= idxd
->hw
.wq_cap
.total_wq_size
;
510 dev_dbg(dev
, "total workqueue size: %u\n", idxd
->max_wq_size
);
511 idxd
->max_wqs
= idxd
->hw
.wq_cap
.num_wqs
;
512 dev_dbg(dev
, "max workqueues: %u\n", idxd
->max_wqs
);
513 idxd
->wqcfg_size
= 1 << (idxd
->hw
.wq_cap
.wqcfg_size
+ IDXD_WQCFG_MIN
);
514 dev_dbg(dev
, "wqcfg size: %u\n", idxd
->wqcfg_size
);
516 /* reading operation capabilities */
517 for (i
= 0; i
< 4; i
++) {
518 idxd
->hw
.opcap
.bits
[i
] = ioread64(idxd
->reg_base
+
519 IDXD_OPCAP_OFFSET
+ i
* sizeof(u64
));
520 dev_dbg(dev
, "opcap[%d]: %#llx\n", i
, idxd
->hw
.opcap
.bits
[i
]);
522 multi_u64_to_bmap(idxd
->opcap_bmap
, &idxd
->hw
.opcap
.bits
[0], 4);
525 if (idxd
->data
->type
== IDXD_TYPE_IAX
&& idxd
->hw
.version
>= DEVICE_VERSION_2
)
526 idxd
->hw
.iaa_cap
.bits
= ioread64(idxd
->reg_base
+ IDXD_IAACAP_OFFSET
);
529 static struct idxd_device
*idxd_alloc(struct pci_dev
*pdev
, struct idxd_driver_data
*data
)
531 struct device
*dev
= &pdev
->dev
;
532 struct device
*conf_dev
;
533 struct idxd_device
*idxd
;
536 idxd
= kzalloc_node(sizeof(*idxd
), GFP_KERNEL
, dev_to_node(dev
));
540 conf_dev
= idxd_confdev(idxd
);
543 idxd_dev_set_type(&idxd
->idxd_dev
, idxd
->data
->type
);
544 idxd
->id
= ida_alloc(&idxd_ida
, GFP_KERNEL
);
548 idxd
->opcap_bmap
= bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS
, GFP_KERNEL
, dev_to_node(dev
));
549 if (!idxd
->opcap_bmap
) {
550 ida_free(&idxd_ida
, idxd
->id
);
554 device_initialize(conf_dev
);
555 conf_dev
->parent
= dev
;
556 conf_dev
->bus
= &dsa_bus_type
;
557 conf_dev
->type
= idxd
->data
->dev_type
;
558 rc
= dev_set_name(conf_dev
, "%s%d", idxd
->data
->name_prefix
, idxd
->id
);
560 put_device(conf_dev
);
564 spin_lock_init(&idxd
->dev_lock
);
565 spin_lock_init(&idxd
->cmd_lock
);
570 static int idxd_enable_system_pasid(struct idxd_device
*idxd
)
572 struct pci_dev
*pdev
= idxd
->pdev
;
573 struct device
*dev
= &pdev
->dev
;
574 struct iommu_domain
*domain
;
579 * Attach a global PASID to the DMA domain so that we can use ENQCMDS
580 * to submit work on buffers mapped by DMA API.
582 domain
= iommu_get_domain_for_dev(dev
);
586 pasid
= iommu_alloc_global_pasid(dev
);
587 if (pasid
== IOMMU_PASID_INVALID
)
591 * DMA domain is owned by the driver, it should support all valid
592 * types such as DMA-FQ, identity, etc.
594 ret
= iommu_attach_device_pasid(domain
, dev
, pasid
, NULL
);
596 dev_err(dev
, "failed to attach device pasid %d, domain type %d",
597 pasid
, domain
->type
);
598 iommu_free_global_pasid(pasid
);
602 /* Since we set user privilege for kernel DMA, enable completion IRQ */
603 idxd_set_user_intr(idxd
, 1);
609 static void idxd_disable_system_pasid(struct idxd_device
*idxd
)
611 struct pci_dev
*pdev
= idxd
->pdev
;
612 struct device
*dev
= &pdev
->dev
;
613 struct iommu_domain
*domain
;
615 domain
= iommu_get_domain_for_dev(dev
);
619 iommu_detach_device_pasid(domain
, dev
, idxd
->pasid
);
620 iommu_free_global_pasid(idxd
->pasid
);
622 idxd_set_user_intr(idxd
, 0);
624 idxd
->pasid
= IOMMU_PASID_INVALID
;
627 static int idxd_enable_sva(struct pci_dev
*pdev
)
631 ret
= iommu_dev_enable_feature(&pdev
->dev
, IOMMU_DEV_FEAT_IOPF
);
635 ret
= iommu_dev_enable_feature(&pdev
->dev
, IOMMU_DEV_FEAT_SVA
);
637 iommu_dev_disable_feature(&pdev
->dev
, IOMMU_DEV_FEAT_IOPF
);
642 static void idxd_disable_sva(struct pci_dev
*pdev
)
644 iommu_dev_disable_feature(&pdev
->dev
, IOMMU_DEV_FEAT_SVA
);
645 iommu_dev_disable_feature(&pdev
->dev
, IOMMU_DEV_FEAT_IOPF
);
648 static int idxd_probe(struct idxd_device
*idxd
)
650 struct pci_dev
*pdev
= idxd
->pdev
;
651 struct device
*dev
= &pdev
->dev
;
654 dev_dbg(dev
, "%s entered and resetting device\n", __func__
);
655 rc
= idxd_device_init_reset(idxd
);
659 dev_dbg(dev
, "IDXD reset complete\n");
661 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM
) && sva
) {
662 if (idxd_enable_sva(pdev
)) {
663 dev_warn(dev
, "Unable to turn on user SVA feature.\n");
665 set_bit(IDXD_FLAG_USER_PASID_ENABLED
, &idxd
->flags
);
667 rc
= idxd_enable_system_pasid(idxd
);
669 dev_warn(dev
, "No in-kernel DMA with PASID. %d\n", rc
);
671 set_bit(IDXD_FLAG_PASID_ENABLED
, &idxd
->flags
);
674 dev_warn(dev
, "User forced SVA off via module param.\n");
677 idxd_read_caps(idxd
);
678 idxd_read_table_offsets(idxd
);
680 rc
= idxd_setup_internals(idxd
);
684 /* If the configs are readonly, then load them from device */
685 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
)) {
686 dev_dbg(dev
, "Loading RO device config\n");
687 rc
= idxd_device_load_config(idxd
);
692 rc
= idxd_setup_interrupts(idxd
);
696 idxd
->major
= idxd_cdev_get_major(idxd
);
698 rc
= perfmon_pmu_init(idxd
);
700 dev_warn(dev
, "Failed to initialize perfmon. No PMU support: %d\n", rc
);
702 dev_dbg(dev
, "IDXD device %d probed successfully\n", idxd
->id
);
706 idxd_cleanup_internals(idxd
);
708 if (device_pasid_enabled(idxd
))
709 idxd_disable_system_pasid(idxd
);
710 if (device_user_pasid_enabled(idxd
))
711 idxd_disable_sva(pdev
);
715 static void idxd_cleanup(struct idxd_device
*idxd
)
717 perfmon_pmu_remove(idxd
);
718 idxd_cleanup_interrupts(idxd
);
719 idxd_cleanup_internals(idxd
);
720 if (device_pasid_enabled(idxd
))
721 idxd_disable_system_pasid(idxd
);
722 if (device_user_pasid_enabled(idxd
))
723 idxd_disable_sva(idxd
->pdev
);
726 static int idxd_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
728 struct device
*dev
= &pdev
->dev
;
729 struct idxd_device
*idxd
;
730 struct idxd_driver_data
*data
= (struct idxd_driver_data
*)id
->driver_data
;
733 rc
= pci_enable_device(pdev
);
737 dev_dbg(dev
, "Alloc IDXD context\n");
738 idxd
= idxd_alloc(pdev
, data
);
744 dev_dbg(dev
, "Mapping BARs\n");
745 idxd
->reg_base
= pci_iomap(pdev
, IDXD_MMIO_BAR
, 0);
746 if (!idxd
->reg_base
) {
751 dev_dbg(dev
, "Set DMA masks\n");
752 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
756 dev_dbg(dev
, "Set PCI master\n");
757 pci_set_master(pdev
);
758 pci_set_drvdata(pdev
, idxd
);
760 idxd
->hw
.version
= ioread32(idxd
->reg_base
+ IDXD_VER_OFFSET
);
761 rc
= idxd_probe(idxd
);
763 dev_err(dev
, "Intel(R) IDXD DMA Engine init failed\n");
767 if (data
->load_device_defaults
) {
768 rc
= data
->load_device_defaults(idxd
);
770 dev_warn(dev
, "IDXD loading device defaults failed\n");
773 rc
= idxd_register_devices(idxd
);
775 dev_err(dev
, "IDXD sysfs setup failed\n");
776 goto err_dev_register
;
779 rc
= idxd_device_init_debugfs(idxd
);
781 dev_warn(dev
, "IDXD debugfs failed to setup\n");
783 dev_info(&pdev
->dev
, "Intel(R) Accelerator Device (v%x)\n",
786 idxd
->user_submission_safe
= data
->user_submission_safe
;
793 pci_iounmap(pdev
, idxd
->reg_base
);
795 put_device(idxd_confdev(idxd
));
797 pci_disable_device(pdev
);
801 void idxd_wqs_quiesce(struct idxd_device
*idxd
)
806 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
808 if (wq
->state
== IDXD_WQ_ENABLED
&& wq
->type
== IDXD_WQT_KERNEL
)
813 static void idxd_shutdown(struct pci_dev
*pdev
)
815 struct idxd_device
*idxd
= pci_get_drvdata(pdev
);
816 struct idxd_irq_entry
*irq_entry
;
819 rc
= idxd_device_disable(idxd
);
821 dev_err(&pdev
->dev
, "Disabling device failed\n");
823 irq_entry
= &idxd
->ie
;
824 synchronize_irq(irq_entry
->vector
);
825 idxd_mask_error_interrupts(idxd
);
826 flush_workqueue(idxd
->wq
);
829 static void idxd_remove(struct pci_dev
*pdev
)
831 struct idxd_device
*idxd
= pci_get_drvdata(pdev
);
832 struct idxd_irq_entry
*irq_entry
;
834 idxd_unregister_devices(idxd
);
836 * When ->release() is called for the idxd->conf_dev, it frees all the memory related
837 * to the idxd context. The driver still needs those bits in order to do the rest of
838 * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref
839 * on the device here to hold off the freeing while allowing the idxd sub-driver
842 get_device(idxd_confdev(idxd
));
843 device_unregister(idxd_confdev(idxd
));
845 if (device_pasid_enabled(idxd
))
846 idxd_disable_system_pasid(idxd
);
847 idxd_device_remove_debugfs(idxd
);
849 irq_entry
= idxd_get_ie(idxd
, 0);
850 free_irq(irq_entry
->vector
, irq_entry
);
851 pci_free_irq_vectors(pdev
);
852 pci_iounmap(pdev
, idxd
->reg_base
);
853 if (device_user_pasid_enabled(idxd
))
854 idxd_disable_sva(pdev
);
855 pci_disable_device(pdev
);
856 destroy_workqueue(idxd
->wq
);
857 perfmon_pmu_remove(idxd
);
858 put_device(idxd_confdev(idxd
));
861 static struct pci_driver idxd_pci_driver
= {
863 .id_table
= idxd_pci_tbl
,
864 .probe
= idxd_pci_probe
,
865 .remove
= idxd_remove
,
866 .shutdown
= idxd_shutdown
,
869 static int __init
idxd_init_module(void)
874 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
875 * enumerating the device. We can not utilize it.
877 if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B
)) {
878 pr_warn("idxd driver failed to load without MOVDIR64B.\n");
882 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD
))
883 pr_warn("Platform does not have ENQCMD(S) support.\n");
885 support_enqcmd
= true;
887 err
= idxd_driver_register(&idxd_drv
);
889 goto err_idxd_driver_register
;
891 err
= idxd_driver_register(&idxd_dmaengine_drv
);
893 goto err_idxd_dmaengine_driver_register
;
895 err
= idxd_driver_register(&idxd_user_drv
);
897 goto err_idxd_user_driver_register
;
899 err
= idxd_cdev_register();
901 goto err_cdev_register
;
903 err
= idxd_init_debugfs();
907 err
= pci_register_driver(&idxd_pci_driver
);
909 goto err_pci_register
;
914 idxd_remove_debugfs();
918 idxd_driver_unregister(&idxd_user_drv
);
919 err_idxd_user_driver_register
:
920 idxd_driver_unregister(&idxd_dmaengine_drv
);
921 err_idxd_dmaengine_driver_register
:
922 idxd_driver_unregister(&idxd_drv
);
923 err_idxd_driver_register
:
926 module_init(idxd_init_module
);
928 static void __exit
idxd_exit_module(void)
930 idxd_driver_unregister(&idxd_user_drv
);
931 idxd_driver_unregister(&idxd_dmaengine_drv
);
932 idxd_driver_unregister(&idxd_drv
);
933 pci_unregister_driver(&idxd_pci_driver
);
935 idxd_remove_debugfs();
937 module_exit(idxd_exit_module
);