module: Convert symbol namespace to string literal
[linux.git] / drivers / vfio / pci / qat / main.c
blob845ed15b67718c5d7c2b7cd98b091f50cd17c411
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2024 Intel Corporation */
4 #include <linux/anon_inodes.h>
5 #include <linux/container_of.h>
6 #include <linux/device.h>
7 #include <linux/file.h>
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/mutex.h>
12 #include <linux/pci.h>
13 #include <linux/sizes.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 #include <linux/vfio_pci_core.h>
17 #include <linux/qat/qat_mig_dev.h>
20 * The migration data of each Intel QAT VF device is encapsulated into a
21 * 4096 bytes block. The data consists of two parts.
22 * The first is a pre-configured set of attributes of the VF being migrated,
23 * which are only set when it is created. This can be migrated during pre-copy
24 * stage and used for a device compatibility check.
25 * The second is the VF state. This includes the required MMIO regions and
26 * the shadow states maintained by the QAT PF driver. This part can only be
27 * saved when the VF is fully quiesced and be migrated during stop-copy stage.
28 * Both these 2 parts of data are saved in hierarchical structures including
29 * a preamble section and several raw state sections.
30 * When the pre-configured part of the migration data is fully retrieved from
31 * user space, the preamble section are used to validate the correctness of
32 * the data blocks and check the version compatibility. The raw state sections
33 * are then used to do a device compatibility check.
34 * When the device transits from RESUMING state, the VF states are extracted
35 * from the raw state sections of the VF state part of the migration data and
36 * then loaded into the device.
39 struct qat_vf_migration_file {
40 struct file *filp;
41 /* protects migration region context */
42 struct mutex lock;
43 bool disabled;
44 struct qat_vf_core_device *qat_vdev;
45 ssize_t filled_size;
48 struct qat_vf_core_device {
49 struct vfio_pci_core_device core_device;
50 struct qat_mig_dev *mdev;
51 /* protects migration state */
52 struct mutex state_mutex;
53 enum vfio_device_mig_state mig_state;
54 struct qat_vf_migration_file *resuming_migf;
55 struct qat_vf_migration_file *saving_migf;
58 static int qat_vf_pci_open_device(struct vfio_device *core_vdev)
60 struct qat_vf_core_device *qat_vdev =
61 container_of(core_vdev, struct qat_vf_core_device,
62 core_device.vdev);
63 struct vfio_pci_core_device *vdev = &qat_vdev->core_device;
64 int ret;
66 ret = vfio_pci_core_enable(vdev);
67 if (ret)
68 return ret;
70 ret = qat_vfmig_open(qat_vdev->mdev);
71 if (ret) {
72 vfio_pci_core_disable(vdev);
73 return ret;
75 qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
77 vfio_pci_core_finish_enable(vdev);
79 return 0;
82 static void qat_vf_disable_fd(struct qat_vf_migration_file *migf)
84 mutex_lock(&migf->lock);
85 migf->disabled = true;
86 migf->filp->f_pos = 0;
87 migf->filled_size = 0;
88 mutex_unlock(&migf->lock);
91 static void qat_vf_disable_fds(struct qat_vf_core_device *qat_vdev)
93 if (qat_vdev->resuming_migf) {
94 qat_vf_disable_fd(qat_vdev->resuming_migf);
95 fput(qat_vdev->resuming_migf->filp);
96 qat_vdev->resuming_migf = NULL;
99 if (qat_vdev->saving_migf) {
100 qat_vf_disable_fd(qat_vdev->saving_migf);
101 fput(qat_vdev->saving_migf->filp);
102 qat_vdev->saving_migf = NULL;
106 static void qat_vf_pci_close_device(struct vfio_device *core_vdev)
108 struct qat_vf_core_device *qat_vdev = container_of(core_vdev,
109 struct qat_vf_core_device, core_device.vdev);
111 qat_vfmig_close(qat_vdev->mdev);
112 qat_vf_disable_fds(qat_vdev);
113 vfio_pci_core_close_device(core_vdev);
116 static long qat_vf_precopy_ioctl(struct file *filp, unsigned int cmd,
117 unsigned long arg)
119 struct qat_vf_migration_file *migf = filp->private_data;
120 struct qat_vf_core_device *qat_vdev = migf->qat_vdev;
121 struct qat_mig_dev *mig_dev = qat_vdev->mdev;
122 struct vfio_precopy_info info;
123 loff_t *pos = &filp->f_pos;
124 unsigned long minsz;
125 int ret = 0;
127 if (cmd != VFIO_MIG_GET_PRECOPY_INFO)
128 return -ENOTTY;
130 minsz = offsetofend(struct vfio_precopy_info, dirty_bytes);
132 if (copy_from_user(&info, (void __user *)arg, minsz))
133 return -EFAULT;
134 if (info.argsz < minsz)
135 return -EINVAL;
137 mutex_lock(&qat_vdev->state_mutex);
138 if (qat_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY &&
139 qat_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY_P2P) {
140 mutex_unlock(&qat_vdev->state_mutex);
141 return -EINVAL;
144 mutex_lock(&migf->lock);
145 if (migf->disabled) {
146 ret = -ENODEV;
147 goto out;
150 if (*pos > mig_dev->setup_size) {
151 ret = -EINVAL;
152 goto out;
155 info.dirty_bytes = 0;
156 info.initial_bytes = mig_dev->setup_size - *pos;
158 out:
159 mutex_unlock(&migf->lock);
160 mutex_unlock(&qat_vdev->state_mutex);
161 if (ret)
162 return ret;
163 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
166 static ssize_t qat_vf_save_read(struct file *filp, char __user *buf,
167 size_t len, loff_t *pos)
169 struct qat_vf_migration_file *migf = filp->private_data;
170 struct qat_mig_dev *mig_dev = migf->qat_vdev->mdev;
171 ssize_t done = 0;
172 loff_t *offs;
173 int ret;
175 if (pos)
176 return -ESPIPE;
177 offs = &filp->f_pos;
179 mutex_lock(&migf->lock);
180 if (*offs > migf->filled_size || *offs < 0) {
181 done = -EINVAL;
182 goto out_unlock;
185 if (migf->disabled) {
186 done = -ENODEV;
187 goto out_unlock;
190 len = min_t(size_t, migf->filled_size - *offs, len);
191 if (len) {
192 ret = copy_to_user(buf, mig_dev->state + *offs, len);
193 if (ret) {
194 done = -EFAULT;
195 goto out_unlock;
197 *offs += len;
198 done = len;
201 out_unlock:
202 mutex_unlock(&migf->lock);
203 return done;
206 static int qat_vf_release_file(struct inode *inode, struct file *filp)
208 struct qat_vf_migration_file *migf = filp->private_data;
210 qat_vf_disable_fd(migf);
211 mutex_destroy(&migf->lock);
212 kfree(migf);
214 return 0;
217 static const struct file_operations qat_vf_save_fops = {
218 .owner = THIS_MODULE,
219 .read = qat_vf_save_read,
220 .unlocked_ioctl = qat_vf_precopy_ioctl,
221 .compat_ioctl = compat_ptr_ioctl,
222 .release = qat_vf_release_file,
225 static int qat_vf_save_state(struct qat_vf_core_device *qat_vdev,
226 struct qat_vf_migration_file *migf)
228 int ret;
230 ret = qat_vfmig_save_state(qat_vdev->mdev);
231 if (ret)
232 return ret;
233 migf->filled_size = qat_vdev->mdev->state_size;
235 return 0;
238 static int qat_vf_save_setup(struct qat_vf_core_device *qat_vdev,
239 struct qat_vf_migration_file *migf)
241 int ret;
243 ret = qat_vfmig_save_setup(qat_vdev->mdev);
244 if (ret)
245 return ret;
246 migf->filled_size = qat_vdev->mdev->setup_size;
248 return 0;
252 * Allocate a file handler for user space and then save the migration data for
253 * the device being migrated. If this is called in the pre-copy stage, save the
254 * pre-configured device data. Otherwise, if this is called in the stop-copy
255 * stage, save the device state. In both cases, update the data size which can
256 * then be read from user space.
258 static struct qat_vf_migration_file *
259 qat_vf_save_device_data(struct qat_vf_core_device *qat_vdev, bool pre_copy)
261 struct qat_vf_migration_file *migf;
262 int ret;
264 migf = kzalloc(sizeof(*migf), GFP_KERNEL);
265 if (!migf)
266 return ERR_PTR(-ENOMEM);
268 migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_save_fops,
269 migf, O_RDONLY);
270 ret = PTR_ERR_OR_ZERO(migf->filp);
271 if (ret) {
272 kfree(migf);
273 return ERR_PTR(ret);
276 stream_open(migf->filp->f_inode, migf->filp);
277 mutex_init(&migf->lock);
279 if (pre_copy)
280 ret = qat_vf_save_setup(qat_vdev, migf);
281 else
282 ret = qat_vf_save_state(qat_vdev, migf);
283 if (ret) {
284 fput(migf->filp);
285 return ERR_PTR(ret);
288 migf->qat_vdev = qat_vdev;
290 return migf;
293 static ssize_t qat_vf_resume_write(struct file *filp, const char __user *buf,
294 size_t len, loff_t *pos)
296 struct qat_vf_migration_file *migf = filp->private_data;
297 struct qat_mig_dev *mig_dev = migf->qat_vdev->mdev;
298 loff_t end, *offs;
299 ssize_t done = 0;
300 int ret;
302 if (pos)
303 return -ESPIPE;
304 offs = &filp->f_pos;
306 if (*offs < 0 ||
307 check_add_overflow(len, *offs, &end))
308 return -EOVERFLOW;
310 if (end > mig_dev->state_size)
311 return -ENOMEM;
313 mutex_lock(&migf->lock);
314 if (migf->disabled) {
315 done = -ENODEV;
316 goto out_unlock;
319 ret = copy_from_user(mig_dev->state + *offs, buf, len);
320 if (ret) {
321 done = -EFAULT;
322 goto out_unlock;
324 *offs += len;
325 migf->filled_size += len;
328 * Load the pre-configured device data first to check if the target
329 * device is compatible with the source device.
331 ret = qat_vfmig_load_setup(mig_dev, migf->filled_size);
332 if (ret && ret != -EAGAIN) {
333 done = ret;
334 goto out_unlock;
336 done = len;
338 out_unlock:
339 mutex_unlock(&migf->lock);
340 return done;
343 static const struct file_operations qat_vf_resume_fops = {
344 .owner = THIS_MODULE,
345 .write = qat_vf_resume_write,
346 .release = qat_vf_release_file,
349 static struct qat_vf_migration_file *
350 qat_vf_resume_device_data(struct qat_vf_core_device *qat_vdev)
352 struct qat_vf_migration_file *migf;
353 int ret;
355 migf = kzalloc(sizeof(*migf), GFP_KERNEL);
356 if (!migf)
357 return ERR_PTR(-ENOMEM);
359 migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_resume_fops, migf, O_WRONLY);
360 ret = PTR_ERR_OR_ZERO(migf->filp);
361 if (ret) {
362 kfree(migf);
363 return ERR_PTR(ret);
366 migf->qat_vdev = qat_vdev;
367 migf->filled_size = 0;
368 stream_open(migf->filp->f_inode, migf->filp);
369 mutex_init(&migf->lock);
371 return migf;
374 static int qat_vf_load_device_data(struct qat_vf_core_device *qat_vdev)
376 return qat_vfmig_load_state(qat_vdev->mdev);
379 static struct file *qat_vf_pci_step_device_state(struct qat_vf_core_device *qat_vdev, u32 new)
381 u32 cur = qat_vdev->mig_state;
382 int ret;
385 * As the device is not capable of just stopping P2P DMAs, suspend the
386 * device completely once any of the P2P states are reached.
387 * When it is suspended, all its MMIO registers can still be operated
388 * correctly, jobs submitted through ring are queued while no jobs are
389 * processed by the device. The MMIO states can be safely migrated to
390 * the target VF during stop-copy stage and restored correctly in the
391 * target VF. All queued jobs can be resumed then.
393 if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) ||
394 (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) {
395 ret = qat_vfmig_suspend(qat_vdev->mdev);
396 if (ret)
397 return ERR_PTR(ret);
398 return NULL;
401 if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) ||
402 (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_PRE_COPY)) {
403 qat_vfmig_resume(qat_vdev->mdev);
404 return NULL;
407 if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) ||
408 (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P))
409 return NULL;
411 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
412 struct qat_vf_migration_file *migf;
414 migf = qat_vf_save_device_data(qat_vdev, false);
415 if (IS_ERR(migf))
416 return ERR_CAST(migf);
417 get_file(migf->filp);
418 qat_vdev->saving_migf = migf;
419 return migf->filp;
422 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
423 struct qat_vf_migration_file *migf;
425 migf = qat_vf_resume_device_data(qat_vdev);
426 if (IS_ERR(migf))
427 return ERR_CAST(migf);
428 get_file(migf->filp);
429 qat_vdev->resuming_migf = migf;
430 return migf->filp;
433 if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) ||
434 (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) ||
435 (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_RUNNING_P2P)) {
436 qat_vf_disable_fds(qat_vdev);
437 return NULL;
440 if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) ||
441 (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) {
442 struct qat_vf_migration_file *migf;
444 migf = qat_vf_save_device_data(qat_vdev, true);
445 if (IS_ERR(migf))
446 return ERR_CAST(migf);
447 get_file(migf->filp);
448 qat_vdev->saving_migf = migf;
449 return migf->filp;
452 if (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_STOP_COPY) {
453 struct qat_vf_migration_file *migf = qat_vdev->saving_migf;
455 if (!migf)
456 return ERR_PTR(-EINVAL);
457 ret = qat_vf_save_state(qat_vdev, migf);
458 if (ret)
459 return ERR_PTR(ret);
460 return NULL;
463 if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
464 ret = qat_vf_load_device_data(qat_vdev);
465 if (ret)
466 return ERR_PTR(ret);
468 qat_vf_disable_fds(qat_vdev);
469 return NULL;
472 /* vfio_mig_get_next_state() does not use arcs other than the above */
473 WARN_ON(true);
474 return ERR_PTR(-EINVAL);
477 static void qat_vf_reset_done(struct qat_vf_core_device *qat_vdev)
479 qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
480 qat_vfmig_reset(qat_vdev->mdev);
481 qat_vf_disable_fds(qat_vdev);
484 static struct file *qat_vf_pci_set_device_state(struct vfio_device *vdev,
485 enum vfio_device_mig_state new_state)
487 struct qat_vf_core_device *qat_vdev = container_of(vdev,
488 struct qat_vf_core_device, core_device.vdev);
489 enum vfio_device_mig_state next_state;
490 struct file *res = NULL;
491 int ret;
493 mutex_lock(&qat_vdev->state_mutex);
494 while (new_state != qat_vdev->mig_state) {
495 ret = vfio_mig_get_next_state(vdev, qat_vdev->mig_state,
496 new_state, &next_state);
497 if (ret) {
498 res = ERR_PTR(ret);
499 break;
501 res = qat_vf_pci_step_device_state(qat_vdev, next_state);
502 if (IS_ERR(res))
503 break;
504 qat_vdev->mig_state = next_state;
505 if (WARN_ON(res && new_state != qat_vdev->mig_state)) {
506 fput(res);
507 res = ERR_PTR(-EINVAL);
508 break;
511 mutex_unlock(&qat_vdev->state_mutex);
513 return res;
516 static int qat_vf_pci_get_device_state(struct vfio_device *vdev,
517 enum vfio_device_mig_state *curr_state)
519 struct qat_vf_core_device *qat_vdev = container_of(vdev,
520 struct qat_vf_core_device, core_device.vdev);
522 mutex_lock(&qat_vdev->state_mutex);
523 *curr_state = qat_vdev->mig_state;
524 mutex_unlock(&qat_vdev->state_mutex);
526 return 0;
529 static int qat_vf_pci_get_data_size(struct vfio_device *vdev,
530 unsigned long *stop_copy_length)
532 struct qat_vf_core_device *qat_vdev = container_of(vdev,
533 struct qat_vf_core_device, core_device.vdev);
535 mutex_lock(&qat_vdev->state_mutex);
536 *stop_copy_length = qat_vdev->mdev->state_size;
537 mutex_unlock(&qat_vdev->state_mutex);
539 return 0;
542 static const struct vfio_migration_ops qat_vf_pci_mig_ops = {
543 .migration_set_state = qat_vf_pci_set_device_state,
544 .migration_get_state = qat_vf_pci_get_device_state,
545 .migration_get_data_size = qat_vf_pci_get_data_size,
548 static void qat_vf_pci_release_dev(struct vfio_device *core_vdev)
550 struct qat_vf_core_device *qat_vdev = container_of(core_vdev,
551 struct qat_vf_core_device, core_device.vdev);
553 qat_vfmig_cleanup(qat_vdev->mdev);
554 qat_vfmig_destroy(qat_vdev->mdev);
555 mutex_destroy(&qat_vdev->state_mutex);
556 vfio_pci_core_release_dev(core_vdev);
559 static int qat_vf_pci_init_dev(struct vfio_device *core_vdev)
561 struct qat_vf_core_device *qat_vdev = container_of(core_vdev,
562 struct qat_vf_core_device, core_device.vdev);
563 struct qat_mig_dev *mdev;
564 struct pci_dev *parent;
565 int ret, vf_id;
567 core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P |
568 VFIO_MIGRATION_PRE_COPY;
569 core_vdev->mig_ops = &qat_vf_pci_mig_ops;
571 ret = vfio_pci_core_init_dev(core_vdev);
572 if (ret)
573 return ret;
575 mutex_init(&qat_vdev->state_mutex);
577 parent = pci_physfn(qat_vdev->core_device.pdev);
578 vf_id = pci_iov_vf_id(qat_vdev->core_device.pdev);
579 if (vf_id < 0) {
580 ret = -ENODEV;
581 goto err_rel;
584 mdev = qat_vfmig_create(parent, vf_id);
585 if (IS_ERR(mdev)) {
586 ret = PTR_ERR(mdev);
587 goto err_rel;
590 ret = qat_vfmig_init(mdev);
591 if (ret)
592 goto err_destroy;
594 qat_vdev->mdev = mdev;
596 return 0;
598 err_destroy:
599 qat_vfmig_destroy(mdev);
600 err_rel:
601 vfio_pci_core_release_dev(core_vdev);
602 return ret;
605 static const struct vfio_device_ops qat_vf_pci_ops = {
606 .name = "qat-vf-vfio-pci",
607 .init = qat_vf_pci_init_dev,
608 .release = qat_vf_pci_release_dev,
609 .open_device = qat_vf_pci_open_device,
610 .close_device = qat_vf_pci_close_device,
611 .ioctl = vfio_pci_core_ioctl,
612 .read = vfio_pci_core_read,
613 .write = vfio_pci_core_write,
614 .mmap = vfio_pci_core_mmap,
615 .request = vfio_pci_core_request,
616 .match = vfio_pci_core_match,
617 .bind_iommufd = vfio_iommufd_physical_bind,
618 .unbind_iommufd = vfio_iommufd_physical_unbind,
619 .attach_ioas = vfio_iommufd_physical_attach_ioas,
620 .detach_ioas = vfio_iommufd_physical_detach_ioas,
623 static struct qat_vf_core_device *qat_vf_drvdata(struct pci_dev *pdev)
625 struct vfio_pci_core_device *core_device = pci_get_drvdata(pdev);
627 return container_of(core_device, struct qat_vf_core_device, core_device);
630 static void qat_vf_pci_aer_reset_done(struct pci_dev *pdev)
632 struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev);
634 if (!qat_vdev->mdev)
635 return;
637 mutex_lock(&qat_vdev->state_mutex);
638 qat_vf_reset_done(qat_vdev);
639 mutex_unlock(&qat_vdev->state_mutex);
642 static int
643 qat_vf_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
645 struct device *dev = &pdev->dev;
646 struct qat_vf_core_device *qat_vdev;
647 int ret;
649 qat_vdev = vfio_alloc_device(qat_vf_core_device, core_device.vdev, dev, &qat_vf_pci_ops);
650 if (IS_ERR(qat_vdev))
651 return PTR_ERR(qat_vdev);
653 pci_set_drvdata(pdev, &qat_vdev->core_device);
654 ret = vfio_pci_core_register_device(&qat_vdev->core_device);
655 if (ret)
656 goto out_put_device;
658 return 0;
660 out_put_device:
661 vfio_put_device(&qat_vdev->core_device.vdev);
662 return ret;
665 static void qat_vf_vfio_pci_remove(struct pci_dev *pdev)
667 struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev);
669 vfio_pci_core_unregister_device(&qat_vdev->core_device);
670 vfio_put_device(&qat_vdev->core_device.vdev);
673 static const struct pci_device_id qat_vf_vfio_pci_table[] = {
674 /* Intel QAT GEN4 4xxx VF device */
675 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4941) },
676 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4943) },
677 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4945) },
680 MODULE_DEVICE_TABLE(pci, qat_vf_vfio_pci_table);
682 static const struct pci_error_handlers qat_vf_err_handlers = {
683 .reset_done = qat_vf_pci_aer_reset_done,
684 .error_detected = vfio_pci_core_aer_err_detected,
687 static struct pci_driver qat_vf_vfio_pci_driver = {
688 .name = "qat_vfio_pci",
689 .id_table = qat_vf_vfio_pci_table,
690 .probe = qat_vf_vfio_pci_probe,
691 .remove = qat_vf_vfio_pci_remove,
692 .err_handler = &qat_vf_err_handlers,
693 .driver_managed_dma = true,
695 module_pci_driver(qat_vf_vfio_pci_driver);
697 MODULE_LICENSE("GPL");
698 MODULE_AUTHOR("Xin Zeng <xin.zeng@intel.com>");
699 MODULE_DESCRIPTION("QAT VFIO PCI - VFIO PCI driver with live migration support for Intel(R) QAT GEN4 device family");
700 MODULE_IMPORT_NS("CRYPTO_QAT");