1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
6 #include <linux/sbitmap.h>
7 #include <linux/dmaengine.h>
8 #include <linux/percpu-rwsem.h>
9 #include <linux/wait.h>
10 #include <linux/cdev.h>
11 #include <linux/idr.h>
12 #include <linux/pci.h>
13 #include <linux/bitmap.h>
14 #include <linux/perf_event.h>
15 #include <linux/iommu.h>
16 #include <linux/crypto.h>
17 #include <uapi/linux/idxd.h>
18 #include "registers.h"
20 #define IDXD_DRIVER_VERSION "1.00"
22 extern struct kmem_cache
*idxd_desc_pool
;
23 extern bool tc_override
;
41 struct device conf_dev
;
42 enum idxd_dev_type type
;
45 #define IDXD_REG_TIMEOUT 50
46 #define IDXD_DRAIN_TIMEOUT 5000
49 IDXD_TYPE_UNKNOWN
= -1,
55 #define IDXD_NAME_SIZE 128
56 #define IDXD_PMU_EVENT_MAX 64
58 #define IDXD_ENQCMDS_RETRIES 32
59 #define IDXD_ENQCMDS_MAX_RETRIES 64
61 enum idxd_complete_type
{
62 IDXD_COMPLETE_NORMAL
= 0,
64 IDXD_COMPLETE_DEV_FAIL
,
69 struct idxd_device_driver
{
71 enum idxd_dev_type
*type
;
72 int (*probe
)(struct idxd_dev
*idxd_dev
);
73 void (*remove
)(struct idxd_dev
*idxd_dev
);
74 void (*desc_complete
)(struct idxd_desc
*desc
,
75 enum idxd_complete_type comp_type
,
77 void *ctx
, u32
*status
);
78 struct device_driver drv
;
81 extern struct idxd_device_driver dsa_drv
;
82 extern struct idxd_device_driver idxd_drv
;
83 extern struct idxd_device_driver idxd_dmaengine_drv
;
84 extern struct idxd_device_driver idxd_user_drv
;
86 #define INVALID_INT_HANDLE -1
87 struct idxd_irq_entry
{
90 struct llist_head pending_llist
;
91 struct list_head work_list
;
93 * Lock to protect access between irq thread process descriptor
94 * and irq thread processing error descriptor.
102 struct idxd_dev idxd_dev
;
103 struct idxd_device
*idxd
;
104 struct grpcfg grpcfg
;
108 bool use_rdbuf_limit
;
113 int desc_progress_limit
;
114 int batch_progress_limit
;
118 struct idxd_device
*idxd
;
120 struct perf_event
*event_list
[IDXD_PMU_EVENT_MAX
];
123 DECLARE_BITMAP(used_mask
, IDXD_PMU_EVENT_MAX
);
126 char name
[IDXD_NAME_SIZE
];
130 int n_event_categories
;
132 bool per_counter_caps_supported
;
133 unsigned long supported_event_categories
;
135 unsigned long supported_filters
;
139 #define IDXD_MAX_PRIORITY 0xf
148 IDXD_WQ_DISABLED
= 0,
153 WQ_FLAG_DEDICATED
= 0,
154 WQ_FLAG_BLOCK_ON_FAULT
,
168 struct idxd_dev idxd_dev
;
172 #define DRIVER_NAME_SIZE 128
174 #define IDXD_ALLOCATED_BATCH_SIZE 128U
175 #define WQ_NAME_SIZE 1024
176 #define WQ_TYPE_SIZE 10
178 #define WQ_DEFAULT_QUEUE_DEPTH 16
179 #define WQ_DEFAULT_MAX_XFER SZ_2M
180 #define WQ_DEFAULT_MAX_BATCH 32
184 IDXD_OP_NONBLOCK
= 1,
187 struct idxd_dma_chan
{
188 struct dma_chan chan
;
193 void __iomem
*portal
;
195 unsigned int enqcmds_retries
;
196 struct percpu_ref wq_active
;
197 struct completion wq_dead
;
198 struct completion wq_resurrect
;
199 struct idxd_dev idxd_dev
;
200 struct idxd_cdev
*idxd_cdev
;
201 struct wait_queue_head err_queue
;
202 struct workqueue_struct
*wq
;
203 struct idxd_device
*idxd
;
205 struct idxd_irq_entry ie
;
206 enum idxd_wq_type type
;
207 struct idxd_group
*group
;
209 struct mutex wq_lock
; /* mutex for workqueue */
213 enum idxd_wq_state state
;
216 unsigned long *opcap_bmap
;
218 struct dsa_hw_desc
**hw_descs
;
221 struct dsa_completion_record
*compls
;
222 struct iax_completion_record
*iax_compls
;
224 dma_addr_t compls_addr
;
226 struct idxd_desc
**descs
;
227 struct sbitmap_queue sbq
;
228 struct idxd_dma_chan
*idxd_chan
;
229 char name
[WQ_NAME_SIZE
+ 1];
233 /* Lock to protect upasid_xa access. */
234 struct mutex uc_lock
;
235 struct xarray upasid_xa
;
237 char driver_name
[DRIVER_NAME_SIZE
+ 1];
241 struct idxd_dev idxd_dev
;
243 struct idxd_group
*group
;
244 struct idxd_device
*idxd
;
247 /* shadow registers */
250 union gen_cap_reg gen_cap
;
251 union wq_cap_reg wq_cap
;
252 union group_cap_reg group_cap
;
253 union engine_cap_reg engine_cap
;
256 union iaa_cap_reg iaa_cap
;
259 enum idxd_device_state
{
260 IDXD_DEV_HALTED
= -1,
261 IDXD_DEV_DISABLED
= 0,
265 enum idxd_device_flag
{
266 IDXD_FLAG_CONFIGURABLE
= 0,
267 IDXD_FLAG_CMD_RUNNING
,
268 IDXD_FLAG_PASID_ENABLED
,
269 IDXD_FLAG_USER_PASID_ENABLED
,
272 struct idxd_dma_dev
{
273 struct idxd_device
*idxd
;
274 struct dma_device dma
;
277 typedef int (*load_device_defaults_fn_t
) (struct idxd_device
*idxd
);
279 struct idxd_driver_data
{
280 const char *name_prefix
;
282 const struct device_type
*dev_type
;
288 bool user_submission_safe
;
289 load_device_defaults_fn_t load_device_defaults
;
293 /* Lock to protect event log access. */
297 /* Total size of event log = number of entries * entry size. */
298 unsigned int log_size
;
299 /* The number of entries in the event log. */
302 bool batch_fail
[IDXD_MAX_BATCH_IDENT
];
305 struct idxd_evl_fault
{
306 struct work_struct work
;
310 /* make this last member always */
311 struct __evl_entry entry
[];
315 struct idxd_dev idxd_dev
;
316 struct idxd_driver_data
*data
;
317 struct list_head list
;
319 enum idxd_device_state state
;
324 struct idxd_irq_entry ie
; /* misc irq, msix 0 */
326 struct pci_dev
*pdev
;
327 void __iomem
*reg_base
;
329 spinlock_t dev_lock
; /* spinlock for device */
330 spinlock_t cmd_lock
; /* spinlock for device commands */
331 struct completion
*cmd_done
;
332 struct idxd_group
**groups
;
333 struct idxd_wq
**wqs
;
334 struct idxd_engine
**engines
;
336 struct iommu_sva
*sva
;
341 bool request_int_handles
;
343 u32 msix_perm_offset
;
356 int nr_rdbufs
; /* non-reserved read buffers */
357 unsigned int wqcfg_size
;
358 unsigned long *wq_enable_map
;
360 union sw_err_reg sw_err
;
361 wait_queue_head_t cmd_waitq
;
363 struct idxd_dma_dev
*idxd_dma
;
364 struct workqueue_struct
*wq
;
365 struct work_struct work
;
367 struct idxd_pmu
*idxd_pmu
;
369 unsigned long *opcap_bmap
;
370 struct idxd_evl
*evl
;
371 struct kmem_cache
*evl_cache
;
373 struct dentry
*dbgfs_dir
;
374 struct dentry
*dbgfs_evl_file
;
376 bool user_submission_safe
;
379 static inline unsigned int evl_ent_size(struct idxd_device
*idxd
)
381 return idxd
->hw
.gen_cap
.evl_support
?
382 (32 * (1 << idxd
->hw
.gen_cap
.evl_support
)) : 0;
385 static inline unsigned int evl_size(struct idxd_device
*idxd
)
387 return idxd
->evl
->size
* evl_ent_size(idxd
);
391 struct acomp_req
*req
;
392 struct crypto_tfm
*tfm
;
398 /* IDXD software descriptor */
401 struct dsa_hw_desc
*hw
;
402 struct iax_hw_desc
*iax_hw
;
406 struct dsa_completion_record
*completion
;
407 struct iax_completion_record
*iax_completion
;
409 dma_addr_t compl_dma
;
411 struct dma_async_tx_descriptor txd
;
412 struct crypto_ctx crypto
;
414 struct llist_node llnode
;
415 struct list_head list
;
422 * This is software defined error for the completion status. We overload the error code
423 * that will never appear in completion status and only SWERR register.
425 enum idxd_completion_status
{
426 IDXD_COMP_DESC_ABORT
= 0xff,
429 #define idxd_confdev(idxd) &idxd->idxd_dev.conf_dev
430 #define wq_confdev(wq) &wq->idxd_dev.conf_dev
431 #define engine_confdev(engine) &engine->idxd_dev.conf_dev
432 #define group_confdev(group) &group->idxd_dev.conf_dev
433 #define cdev_dev(cdev) &cdev->idxd_dev.conf_dev
434 #define user_ctx_dev(ctx) (&(ctx)->idxd_dev.conf_dev)
436 #define confdev_to_idxd_dev(dev) container_of(dev, struct idxd_dev, conf_dev)
437 #define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev)
438 #define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev)
440 static inline struct idxd_device_driver
*wq_to_idxd_drv(struct idxd_wq
*wq
)
442 struct device
*dev
= wq_confdev(wq
);
443 struct idxd_device_driver
*idxd_drv
=
444 container_of(dev
->driver
, struct idxd_device_driver
, drv
);
449 static inline struct idxd_device
*confdev_to_idxd(struct device
*dev
)
451 struct idxd_dev
*idxd_dev
= confdev_to_idxd_dev(dev
);
453 return idxd_dev_to_idxd(idxd_dev
);
456 static inline struct idxd_wq
*confdev_to_wq(struct device
*dev
)
458 struct idxd_dev
*idxd_dev
= confdev_to_idxd_dev(dev
);
460 return idxd_dev_to_wq(idxd_dev
);
463 static inline struct idxd_engine
*confdev_to_engine(struct device
*dev
)
465 struct idxd_dev
*idxd_dev
= confdev_to_idxd_dev(dev
);
467 return container_of(idxd_dev
, struct idxd_engine
, idxd_dev
);
470 static inline struct idxd_group
*confdev_to_group(struct device
*dev
)
472 struct idxd_dev
*idxd_dev
= confdev_to_idxd_dev(dev
);
474 return container_of(idxd_dev
, struct idxd_group
, idxd_dev
);
477 static inline struct idxd_cdev
*dev_to_cdev(struct device
*dev
)
479 struct idxd_dev
*idxd_dev
= confdev_to_idxd_dev(dev
);
481 return container_of(idxd_dev
, struct idxd_cdev
, idxd_dev
);
484 static inline void idxd_dev_set_type(struct idxd_dev
*idev
, int type
)
486 if (type
>= IDXD_DEV_MAX_TYPE
) {
487 idev
->type
= IDXD_DEV_NONE
;
494 static inline struct idxd_irq_entry
*idxd_get_ie(struct idxd_device
*idxd
, int idx
)
496 return (idx
== 0) ? &idxd
->ie
: &idxd
->wqs
[idx
- 1]->ie
;
499 static inline struct idxd_wq
*ie_to_wq(struct idxd_irq_entry
*ie
)
501 return container_of(ie
, struct idxd_wq
, ie
);
504 static inline struct idxd_device
*ie_to_idxd(struct idxd_irq_entry
*ie
)
506 return container_of(ie
, struct idxd_device
, ie
);
509 static inline void idxd_set_user_intr(struct idxd_device
*idxd
, bool enable
)
511 union gencfg_reg reg
;
513 reg
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENCFG_OFFSET
);
514 reg
.user_int_en
= enable
;
515 iowrite32(reg
.bits
, idxd
->reg_base
+ IDXD_GENCFG_OFFSET
);
518 extern const struct bus_type dsa_bus_type
;
520 extern bool support_enqcmd
;
521 extern struct ida idxd_ida
;
522 extern const struct device_type dsa_device_type
;
523 extern const struct device_type iax_device_type
;
524 extern const struct device_type idxd_wq_device_type
;
525 extern const struct device_type idxd_engine_device_type
;
526 extern const struct device_type idxd_group_device_type
;
528 static inline bool is_dsa_dev(struct idxd_dev
*idxd_dev
)
530 return idxd_dev
->type
== IDXD_DEV_DSA
;
533 static inline bool is_iax_dev(struct idxd_dev
*idxd_dev
)
535 return idxd_dev
->type
== IDXD_DEV_IAX
;
538 static inline bool is_idxd_dev(struct idxd_dev
*idxd_dev
)
540 return is_dsa_dev(idxd_dev
) || is_iax_dev(idxd_dev
);
543 static inline bool is_idxd_wq_dev(struct idxd_dev
*idxd_dev
)
545 return idxd_dev
->type
== IDXD_DEV_WQ
;
548 static inline bool is_idxd_wq_dmaengine(struct idxd_wq
*wq
)
550 if (wq
->type
== IDXD_WQT_KERNEL
&& strcmp(wq
->name
, "dmaengine") == 0)
555 static inline bool is_idxd_wq_user(struct idxd_wq
*wq
)
557 return wq
->type
== IDXD_WQT_USER
;
560 static inline bool is_idxd_wq_kernel(struct idxd_wq
*wq
)
562 return wq
->type
== IDXD_WQT_KERNEL
;
565 static inline bool wq_dedicated(struct idxd_wq
*wq
)
567 return test_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
570 static inline bool wq_shared(struct idxd_wq
*wq
)
572 return !test_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
575 static inline bool device_pasid_enabled(struct idxd_device
*idxd
)
577 return test_bit(IDXD_FLAG_PASID_ENABLED
, &idxd
->flags
);
580 static inline bool device_user_pasid_enabled(struct idxd_device
*idxd
)
582 return test_bit(IDXD_FLAG_USER_PASID_ENABLED
, &idxd
->flags
);
585 static inline bool wq_pasid_enabled(struct idxd_wq
*wq
)
587 return (is_idxd_wq_kernel(wq
) && device_pasid_enabled(wq
->idxd
)) ||
588 (is_idxd_wq_user(wq
) && device_user_pasid_enabled(wq
->idxd
));
591 static inline bool wq_shared_supported(struct idxd_wq
*wq
)
593 return (support_enqcmd
&& wq_pasid_enabled(wq
));
596 enum idxd_portal_prot
{
597 IDXD_PORTAL_UNLIMITED
= 0,
601 enum idxd_interrupt_type
{
606 static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot
)
608 return prot
* 0x1000;
611 static inline int idxd_get_wq_portal_full_offset(int wq_id
,
612 enum idxd_portal_prot prot
)
614 return ((wq_id
* 4) << PAGE_SHIFT
) + idxd_get_wq_portal_offset(prot
);
617 #define IDXD_PORTAL_MASK (PAGE_SIZE - 1)
620 * Even though this function can be accessed by multiple threads, it is safe to use.
621 * At worst the address gets used more than once before it gets incremented. We don't
622 * hit a threshold until iops becomes many million times a second. So the occasional
623 * reuse of the same address is tolerable compare to using an atomic variable. This is
624 * safe on a system that has atomic load/store for 32bit integers. Given that this is an
625 * Intel iEP device, that should not be a problem.
627 static inline void __iomem
*idxd_wq_portal_addr(struct idxd_wq
*wq
)
629 int ofs
= wq
->portal_offset
;
631 wq
->portal_offset
= (ofs
+ sizeof(struct dsa_raw_desc
)) & IDXD_PORTAL_MASK
;
632 return wq
->portal
+ ofs
;
635 static inline void idxd_wq_get(struct idxd_wq
*wq
)
640 static inline void idxd_wq_put(struct idxd_wq
*wq
)
645 static inline int idxd_wq_refcount(struct idxd_wq
*wq
)
647 return wq
->client_count
;
650 static inline void idxd_wq_set_private(struct idxd_wq
*wq
, void *private)
652 dev_set_drvdata(wq_confdev(wq
), private);
655 static inline void *idxd_wq_get_private(struct idxd_wq
*wq
)
657 return dev_get_drvdata(wq_confdev(wq
));
661 * Intel IAA does not support batch processing.
662 * The max batch size of device, max batch size of wq and
663 * max batch shift of wqcfg should be always 0 on IAA.
665 static inline void idxd_set_max_batch_size(int idxd_type
, struct idxd_device
*idxd
,
668 if (idxd_type
== IDXD_TYPE_IAX
)
669 idxd
->max_batch_size
= 0;
671 idxd
->max_batch_size
= max_batch_size
;
674 static inline void idxd_wq_set_max_batch_size(int idxd_type
, struct idxd_wq
*wq
,
677 if (idxd_type
== IDXD_TYPE_IAX
)
678 wq
->max_batch_size
= 0;
680 wq
->max_batch_size
= max_batch_size
;
683 static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type
, union wqcfg
*wqcfg
,
686 if (idxd_type
== IDXD_TYPE_IAX
)
687 wqcfg
->max_batch_shift
= 0;
689 wqcfg
->max_batch_shift
= max_batch_shift
;
692 static inline int idxd_wq_driver_name_match(struct idxd_wq
*wq
, struct device
*dev
)
694 return (strncmp(wq
->driver_name
, dev
->driver
->name
, strlen(dev
->driver
->name
)) == 0);
697 #define MODULE_ALIAS_IDXD_DEVICE(type) MODULE_ALIAS("idxd:t" __stringify(type) "*")
698 #define IDXD_DEVICES_MODALIAS_FMT "idxd:t%d"
700 int __must_check
__idxd_driver_register(struct idxd_device_driver
*idxd_drv
,
701 struct module
*module
, const char *mod_name
);
702 #define idxd_driver_register(driver) \
703 __idxd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
705 void idxd_driver_unregister(struct idxd_device_driver
*idxd_drv
);
707 #define module_idxd_driver(__idxd_driver) \
708 module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister)
710 void idxd_free_desc(struct idxd_wq
*wq
, struct idxd_desc
*desc
);
711 void idxd_dma_complete_txd(struct idxd_desc
*desc
,
712 enum idxd_complete_type comp_type
,
713 bool free_desc
, void *ctx
, u32
*status
);
715 static inline void idxd_desc_complete(struct idxd_desc
*desc
,
716 enum idxd_complete_type comp_type
,
719 struct idxd_device_driver
*drv
;
722 drv
= wq_to_idxd_drv(desc
->wq
);
723 if (drv
->desc_complete
)
724 drv
->desc_complete(desc
, comp_type
, free_desc
,
725 &desc
->txd
, &status
);
728 int idxd_register_bus_type(void);
729 void idxd_unregister_bus_type(void);
730 int idxd_register_devices(struct idxd_device
*idxd
);
731 void idxd_unregister_devices(struct idxd_device
*idxd
);
732 void idxd_wqs_quiesce(struct idxd_device
*idxd
);
733 bool idxd_queue_int_handle_resubmit(struct idxd_desc
*desc
);
734 void multi_u64_to_bmap(unsigned long *bmap
, u64
*val
, int count
);
735 int idxd_load_iaa_device_defaults(struct idxd_device
*idxd
);
737 /* device interrupt control */
738 irqreturn_t
idxd_misc_thread(int vec
, void *data
);
739 irqreturn_t
idxd_wq_thread(int irq
, void *data
);
740 void idxd_mask_error_interrupts(struct idxd_device
*idxd
);
741 void idxd_unmask_error_interrupts(struct idxd_device
*idxd
);
744 int idxd_device_drv_probe(struct idxd_dev
*idxd_dev
);
745 void idxd_device_drv_remove(struct idxd_dev
*idxd_dev
);
746 int idxd_drv_enable_wq(struct idxd_wq
*wq
);
747 void idxd_drv_disable_wq(struct idxd_wq
*wq
);
748 int idxd_device_init_reset(struct idxd_device
*idxd
);
749 int idxd_device_enable(struct idxd_device
*idxd
);
750 int idxd_device_disable(struct idxd_device
*idxd
);
751 void idxd_device_reset(struct idxd_device
*idxd
);
752 void idxd_device_clear_state(struct idxd_device
*idxd
);
753 int idxd_device_config(struct idxd_device
*idxd
);
754 void idxd_device_drain_pasid(struct idxd_device
*idxd
, int pasid
);
755 int idxd_device_load_config(struct idxd_device
*idxd
);
756 int idxd_device_request_int_handle(struct idxd_device
*idxd
, int idx
, int *handle
,
757 enum idxd_interrupt_type irq_type
);
758 int idxd_device_release_int_handle(struct idxd_device
*idxd
, int handle
,
759 enum idxd_interrupt_type irq_type
);
761 /* work queue control */
762 void idxd_wqs_unmap_portal(struct idxd_device
*idxd
);
763 int idxd_wq_alloc_resources(struct idxd_wq
*wq
);
764 void idxd_wq_free_resources(struct idxd_wq
*wq
);
765 int idxd_wq_enable(struct idxd_wq
*wq
);
766 int idxd_wq_disable(struct idxd_wq
*wq
, bool reset_config
);
767 void idxd_wq_drain(struct idxd_wq
*wq
);
768 void idxd_wq_reset(struct idxd_wq
*wq
);
769 int idxd_wq_map_portal(struct idxd_wq
*wq
);
770 void idxd_wq_unmap_portal(struct idxd_wq
*wq
);
771 int idxd_wq_set_pasid(struct idxd_wq
*wq
, int pasid
);
772 int idxd_wq_disable_pasid(struct idxd_wq
*wq
);
773 void __idxd_wq_quiesce(struct idxd_wq
*wq
);
774 void idxd_wq_quiesce(struct idxd_wq
*wq
);
775 int idxd_wq_init_percpu_ref(struct idxd_wq
*wq
);
776 void idxd_wq_free_irq(struct idxd_wq
*wq
);
777 int idxd_wq_request_irq(struct idxd_wq
*wq
);
780 int idxd_submit_desc(struct idxd_wq
*wq
, struct idxd_desc
*desc
);
781 struct idxd_desc
*idxd_alloc_desc(struct idxd_wq
*wq
, enum idxd_op_type optype
);
782 int idxd_enqcmds(struct idxd_wq
*wq
, void __iomem
*portal
, const void *desc
);
785 int idxd_register_dma_device(struct idxd_device
*idxd
);
786 void idxd_unregister_dma_device(struct idxd_device
*idxd
);
789 int idxd_cdev_register(void);
790 void idxd_cdev_remove(void);
791 int idxd_cdev_get_major(struct idxd_device
*idxd
);
792 int idxd_wq_add_cdev(struct idxd_wq
*wq
);
793 void idxd_wq_del_cdev(struct idxd_wq
*wq
);
794 int idxd_copy_cr(struct idxd_wq
*wq
, ioasid_t pasid
, unsigned long addr
,
796 void idxd_user_counter_increment(struct idxd_wq
*wq
, u32 pasid
, int index
);
799 #if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON)
800 int perfmon_pmu_init(struct idxd_device
*idxd
);
801 void perfmon_pmu_remove(struct idxd_device
*idxd
);
802 void perfmon_counter_overflow(struct idxd_device
*idxd
);
804 static inline int perfmon_pmu_init(struct idxd_device
*idxd
) { return 0; }
805 static inline void perfmon_pmu_remove(struct idxd_device
*idxd
) {}
806 static inline void perfmon_counter_overflow(struct idxd_device
*idxd
) {}
810 int idxd_device_init_debugfs(struct idxd_device
*idxd
);
811 void idxd_device_remove_debugfs(struct idxd_device
*idxd
);
812 int idxd_init_debugfs(void);
813 void idxd_remove_debugfs(void);