1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, HiSilicon Ltd.
6 #include <linux/device.h>
7 #include <linux/eventfd.h>
8 #include <linux/file.h>
9 #include <linux/hisi_acc_qm.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/vfio.h>
14 #include <linux/vfio_pci_core.h>
15 #include <linux/anon_inodes.h>
17 #include "hisi_acc_vfio_pci.h"
19 /* Return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */
20 static int qm_wait_dev_not_ready(struct hisi_qm
*qm
)
24 return readl_relaxed_poll_timeout(qm
->io_base
+ QM_VF_STATE
,
25 val
, !(val
& 0x1), MB_POLL_PERIOD_US
,
30 * Each state Reg is checked 100 times,
31 * with a delay of 100 microseconds after each check
33 static u32
qm_check_reg_state(struct hisi_qm
*qm
, u32 regs
)
38 state
= readl(qm
->io_base
+ regs
);
39 while (state
&& check_times
< ERROR_CHECK_TIMEOUT
) {
40 udelay(CHECK_DELAY_TIME
);
41 state
= readl(qm
->io_base
+ regs
);
48 static int qm_read_regs(struct hisi_qm
*qm
, u32 reg_addr
,
53 if (nums
< 1 || nums
> QM_REGS_MAX_LEN
)
56 for (i
= 0; i
< nums
; i
++) {
57 data
[i
] = readl(qm
->io_base
+ reg_addr
);
58 reg_addr
+= QM_REG_ADDR_OFFSET
;
64 static int qm_write_regs(struct hisi_qm
*qm
, u32 reg
,
69 if (nums
< 1 || nums
> QM_REGS_MAX_LEN
)
72 for (i
= 0; i
< nums
; i
++)
73 writel(data
[i
], qm
->io_base
+ reg
+ i
* QM_REG_ADDR_OFFSET
);
78 static int qm_get_vft(struct hisi_qm
*qm
, u32
*base
)
84 ret
= hisi_qm_mb(qm
, QM_MB_CMD_SQC_VFT_V2
, 0, 0, 1);
88 sqc_vft
= readl(qm
->io_base
+ QM_MB_CMD_DATA_ADDR_L
) |
89 ((u64
)readl(qm
->io_base
+ QM_MB_CMD_DATA_ADDR_H
) <<
91 *base
= QM_SQC_VFT_BASE_MASK_V2
& (sqc_vft
>> QM_SQC_VFT_BASE_SHIFT_V2
);
92 qp_num
= (QM_SQC_VFT_NUM_MASK_V2
&
93 (sqc_vft
>> QM_SQC_VFT_NUM_SHIFT_V2
)) + 1;
98 static int qm_get_sqc(struct hisi_qm
*qm
, u64
*addr
)
102 ret
= hisi_qm_mb(qm
, QM_MB_CMD_SQC_BT
, 0, 0, 1);
106 *addr
= readl(qm
->io_base
+ QM_MB_CMD_DATA_ADDR_L
) |
107 ((u64
)readl(qm
->io_base
+ QM_MB_CMD_DATA_ADDR_H
) <<
113 static int qm_get_cqc(struct hisi_qm
*qm
, u64
*addr
)
117 ret
= hisi_qm_mb(qm
, QM_MB_CMD_CQC_BT
, 0, 0, 1);
121 *addr
= readl(qm
->io_base
+ QM_MB_CMD_DATA_ADDR_L
) |
122 ((u64
)readl(qm
->io_base
+ QM_MB_CMD_DATA_ADDR_H
) <<
128 static int qm_get_regs(struct hisi_qm
*qm
, struct acc_vf_data
*vf_data
)
130 struct device
*dev
= &qm
->pdev
->dev
;
133 ret
= qm_read_regs(qm
, QM_VF_AEQ_INT_MASK
, &vf_data
->aeq_int_mask
, 1);
135 dev_err(dev
, "failed to read QM_VF_AEQ_INT_MASK\n");
139 ret
= qm_read_regs(qm
, QM_VF_EQ_INT_MASK
, &vf_data
->eq_int_mask
, 1);
141 dev_err(dev
, "failed to read QM_VF_EQ_INT_MASK\n");
145 ret
= qm_read_regs(qm
, QM_IFC_INT_SOURCE_V
,
146 &vf_data
->ifc_int_source
, 1);
148 dev_err(dev
, "failed to read QM_IFC_INT_SOURCE_V\n");
152 ret
= qm_read_regs(qm
, QM_IFC_INT_MASK
, &vf_data
->ifc_int_mask
, 1);
154 dev_err(dev
, "failed to read QM_IFC_INT_MASK\n");
158 ret
= qm_read_regs(qm
, QM_IFC_INT_SET_V
, &vf_data
->ifc_int_set
, 1);
160 dev_err(dev
, "failed to read QM_IFC_INT_SET_V\n");
164 ret
= qm_read_regs(qm
, QM_PAGE_SIZE
, &vf_data
->page_size
, 1);
166 dev_err(dev
, "failed to read QM_PAGE_SIZE\n");
170 /* QM_EQC_DW has 7 regs */
171 ret
= qm_read_regs(qm
, QM_EQC_DW0
, vf_data
->qm_eqc_dw
, 7);
173 dev_err(dev
, "failed to read QM_EQC_DW\n");
177 /* QM_AEQC_DW has 7 regs */
178 ret
= qm_read_regs(qm
, QM_AEQC_DW0
, vf_data
->qm_aeqc_dw
, 7);
180 dev_err(dev
, "failed to read QM_AEQC_DW\n");
187 static int qm_set_regs(struct hisi_qm
*qm
, struct acc_vf_data
*vf_data
)
189 struct device
*dev
= &qm
->pdev
->dev
;
193 if (unlikely(hisi_qm_wait_mb_ready(qm
))) {
194 dev_err(&qm
->pdev
->dev
, "QM device is not ready to write\n");
198 ret
= qm_write_regs(qm
, QM_VF_AEQ_INT_MASK
, &vf_data
->aeq_int_mask
, 1);
200 dev_err(dev
, "failed to write QM_VF_AEQ_INT_MASK\n");
204 ret
= qm_write_regs(qm
, QM_VF_EQ_INT_MASK
, &vf_data
->eq_int_mask
, 1);
206 dev_err(dev
, "failed to write QM_VF_EQ_INT_MASK\n");
210 ret
= qm_write_regs(qm
, QM_IFC_INT_SOURCE_V
,
211 &vf_data
->ifc_int_source
, 1);
213 dev_err(dev
, "failed to write QM_IFC_INT_SOURCE_V\n");
217 ret
= qm_write_regs(qm
, QM_IFC_INT_MASK
, &vf_data
->ifc_int_mask
, 1);
219 dev_err(dev
, "failed to write QM_IFC_INT_MASK\n");
223 ret
= qm_write_regs(qm
, QM_IFC_INT_SET_V
, &vf_data
->ifc_int_set
, 1);
225 dev_err(dev
, "failed to write QM_IFC_INT_SET_V\n");
229 ret
= qm_write_regs(qm
, QM_QUE_ISO_CFG_V
, &vf_data
->que_iso_cfg
, 1);
231 dev_err(dev
, "failed to write QM_QUE_ISO_CFG_V\n");
235 ret
= qm_write_regs(qm
, QM_PAGE_SIZE
, &vf_data
->page_size
, 1);
237 dev_err(dev
, "failed to write QM_PAGE_SIZE\n");
241 /* QM_EQC_DW has 7 regs */
242 ret
= qm_write_regs(qm
, QM_EQC_DW0
, vf_data
->qm_eqc_dw
, 7);
244 dev_err(dev
, "failed to write QM_EQC_DW\n");
248 /* QM_AEQC_DW has 7 regs */
249 ret
= qm_write_regs(qm
, QM_AEQC_DW0
, vf_data
->qm_aeqc_dw
, 7);
251 dev_err(dev
, "failed to write QM_AEQC_DW\n");
258 static void qm_db(struct hisi_qm
*qm
, u16 qn
, u8 cmd
,
259 u16 index
, u8 priority
)
265 if (cmd
== QM_DOORBELL_CMD_SQ
|| cmd
== QM_DOORBELL_CMD_CQ
)
266 dbase
= QM_DOORBELL_SQ_CQ_BASE_V2
;
268 dbase
= QM_DOORBELL_EQ_AEQ_BASE_V2
;
270 doorbell
= qn
| ((u64
)cmd
<< QM_DB_CMD_SHIFT_V2
) |
271 ((u64
)randata
<< QM_DB_RAND_SHIFT_V2
) |
272 ((u64
)index
<< QM_DB_INDEX_SHIFT_V2
) |
273 ((u64
)priority
<< QM_DB_PRIORITY_SHIFT_V2
);
275 writeq(doorbell
, qm
->io_base
+ dbase
);
278 static int pf_qm_get_qp_num(struct hisi_qm
*qm
, int vf_id
, u32
*rbase
)
285 ret
= readl_relaxed_poll_timeout(qm
->io_base
+ QM_VFT_CFG_RDY
, val
,
286 val
& BIT(0), MB_POLL_PERIOD_US
,
291 writel(0x1, qm
->io_base
+ QM_VFT_CFG_OP_WR
);
293 writel(0x0, qm
->io_base
+ QM_VFT_CFG_TYPE
);
294 writel(vf_id
, qm
->io_base
+ QM_VFT_CFG
);
296 writel(0x0, qm
->io_base
+ QM_VFT_CFG_RDY
);
297 writel(0x1, qm
->io_base
+ QM_VFT_CFG_OP_ENABLE
);
299 ret
= readl_relaxed_poll_timeout(qm
->io_base
+ QM_VFT_CFG_RDY
, val
,
300 val
& BIT(0), MB_POLL_PERIOD_US
,
305 sqc_vft
= readl(qm
->io_base
+ QM_VFT_CFG_DATA_L
) |
306 ((u64
)readl(qm
->io_base
+ QM_VFT_CFG_DATA_H
) <<
308 *rbase
= QM_SQC_VFT_BASE_MASK_V2
&
309 (sqc_vft
>> QM_SQC_VFT_BASE_SHIFT_V2
);
310 qp_num
= (QM_SQC_VFT_NUM_MASK_V2
&
311 (sqc_vft
>> QM_SQC_VFT_NUM_SHIFT_V2
)) + 1;
316 static void qm_dev_cmd_init(struct hisi_qm
*qm
)
318 /* Clear VF communication status registers. */
319 writel(0x1, qm
->io_base
+ QM_IFC_INT_SOURCE_V
);
321 /* Enable pf and vf communication. */
322 writel(0x0, qm
->io_base
+ QM_IFC_INT_MASK
);
325 static int vf_qm_cache_wb(struct hisi_qm
*qm
)
329 writel(0x1, qm
->io_base
+ QM_CACHE_WB_START
);
330 if (readl_relaxed_poll_timeout(qm
->io_base
+ QM_CACHE_WB_DONE
,
331 val
, val
& BIT(0), MB_POLL_PERIOD_US
,
332 MB_POLL_TIMEOUT_US
)) {
333 dev_err(&qm
->pdev
->dev
, "vf QM writeback sqc cache fail\n");
340 static void vf_qm_fun_reset(struct hisi_qm
*qm
)
344 for (i
= 0; i
< qm
->qp_num
; i
++)
345 qm_db(qm
, i
, QM_DOORBELL_CMD_SQ
, 0, 1);
348 static int vf_qm_func_stop(struct hisi_qm
*qm
)
350 return hisi_qm_mb(qm
, QM_MB_CMD_PAUSE_QM
, 0, 0, 0);
353 static int vf_qm_check_match(struct hisi_acc_vf_core_device
*hisi_acc_vdev
,
354 struct hisi_acc_vf_migration_file
*migf
)
356 struct acc_vf_data
*vf_data
= &migf
->vf_data
;
357 struct hisi_qm
*vf_qm
= &hisi_acc_vdev
->vf_qm
;
358 struct hisi_qm
*pf_qm
= hisi_acc_vdev
->pf_qm
;
359 struct device
*dev
= &vf_qm
->pdev
->dev
;
363 if (migf
->total_length
< QM_MATCH_SIZE
|| hisi_acc_vdev
->match_done
)
366 if (vf_data
->acc_magic
!= ACC_DEV_MAGIC
) {
367 dev_err(dev
, "failed to match ACC_DEV_MAGIC\n");
371 if (vf_data
->dev_id
!= hisi_acc_vdev
->vf_dev
->device
) {
372 dev_err(dev
, "failed to match VF devices\n");
376 /* VF qp num check */
377 ret
= qm_get_vft(vf_qm
, &vf_qm
->qp_base
);
379 dev_err(dev
, "failed to get vft qp nums\n");
383 if (ret
!= vf_data
->qp_num
) {
384 dev_err(dev
, "failed to match VF qp num\n");
390 /* VF isolation state check */
391 ret
= qm_read_regs(pf_qm
, QM_QUE_ISO_CFG_V
, &que_iso_state
, 1);
393 dev_err(dev
, "failed to read QM_QUE_ISO_CFG_V\n");
397 if (vf_data
->que_iso_cfg
!= que_iso_state
) {
398 dev_err(dev
, "failed to match isolation state\n");
402 ret
= qm_write_regs(vf_qm
, QM_VF_STATE
, &vf_data
->vf_qm_state
, 1);
404 dev_err(dev
, "failed to write QM_VF_STATE\n");
408 hisi_acc_vdev
->vf_qm_state
= vf_data
->vf_qm_state
;
409 hisi_acc_vdev
->match_done
= true;
413 static int vf_qm_get_match_data(struct hisi_acc_vf_core_device
*hisi_acc_vdev
,
414 struct acc_vf_data
*vf_data
)
416 struct hisi_qm
*pf_qm
= hisi_acc_vdev
->pf_qm
;
417 struct device
*dev
= &pf_qm
->pdev
->dev
;
418 int vf_id
= hisi_acc_vdev
->vf_id
;
421 vf_data
->acc_magic
= ACC_DEV_MAGIC
;
423 vf_data
->dev_id
= hisi_acc_vdev
->vf_dev
->device
;
425 /* VF qp num save from PF */
426 ret
= pf_qm_get_qp_num(pf_qm
, vf_id
, &vf_data
->qp_base
);
428 dev_err(dev
, "failed to get vft qp nums!\n");
432 vf_data
->qp_num
= ret
;
434 /* VF isolation state save from PF */
435 ret
= qm_read_regs(pf_qm
, QM_QUE_ISO_CFG_V
, &vf_data
->que_iso_cfg
, 1);
437 dev_err(dev
, "failed to read QM_QUE_ISO_CFG_V!\n");
444 static int vf_qm_load_data(struct hisi_acc_vf_core_device
*hisi_acc_vdev
,
445 struct hisi_acc_vf_migration_file
*migf
)
447 struct hisi_qm
*qm
= &hisi_acc_vdev
->vf_qm
;
448 struct device
*dev
= &qm
->pdev
->dev
;
449 struct acc_vf_data
*vf_data
= &migf
->vf_data
;
452 /* Return if only match data was transferred */
453 if (migf
->total_length
== QM_MATCH_SIZE
)
456 if (migf
->total_length
< sizeof(struct acc_vf_data
))
459 qm
->eqe_dma
= vf_data
->eqe_dma
;
460 qm
->aeqe_dma
= vf_data
->aeqe_dma
;
461 qm
->sqc_dma
= vf_data
->sqc_dma
;
462 qm
->cqc_dma
= vf_data
->cqc_dma
;
464 qm
->qp_base
= vf_data
->qp_base
;
465 qm
->qp_num
= vf_data
->qp_num
;
467 ret
= qm_set_regs(qm
, vf_data
);
469 dev_err(dev
, "set VF regs failed\n");
473 ret
= hisi_qm_mb(qm
, QM_MB_CMD_SQC_BT
, qm
->sqc_dma
, 0, 0);
475 dev_err(dev
, "set sqc failed\n");
479 ret
= hisi_qm_mb(qm
, QM_MB_CMD_CQC_BT
, qm
->cqc_dma
, 0, 0);
481 dev_err(dev
, "set cqc failed\n");
489 static int vf_qm_state_save(struct hisi_acc_vf_core_device
*hisi_acc_vdev
,
490 struct hisi_acc_vf_migration_file
*migf
)
492 struct acc_vf_data
*vf_data
= &migf
->vf_data
;
493 struct hisi_qm
*vf_qm
= &hisi_acc_vdev
->vf_qm
;
494 struct device
*dev
= &vf_qm
->pdev
->dev
;
497 if (unlikely(qm_wait_dev_not_ready(vf_qm
))) {
498 /* Update state and return with match data */
499 vf_data
->vf_qm_state
= QM_NOT_READY
;
500 hisi_acc_vdev
->vf_qm_state
= vf_data
->vf_qm_state
;
501 migf
->total_length
= QM_MATCH_SIZE
;
505 vf_data
->vf_qm_state
= QM_READY
;
506 hisi_acc_vdev
->vf_qm_state
= vf_data
->vf_qm_state
;
508 ret
= vf_qm_cache_wb(vf_qm
);
510 dev_err(dev
, "failed to writeback QM Cache!\n");
514 ret
= qm_get_regs(vf_qm
, vf_data
);
518 /* Every reg is 32 bit, the dma address is 64 bit. */
519 vf_data
->eqe_dma
= vf_data
->qm_eqc_dw
[1];
520 vf_data
->eqe_dma
<<= QM_XQC_ADDR_OFFSET
;
521 vf_data
->eqe_dma
|= vf_data
->qm_eqc_dw
[0];
522 vf_data
->aeqe_dma
= vf_data
->qm_aeqc_dw
[1];
523 vf_data
->aeqe_dma
<<= QM_XQC_ADDR_OFFSET
;
524 vf_data
->aeqe_dma
|= vf_data
->qm_aeqc_dw
[0];
526 /* Through SQC_BT/CQC_BT to get sqc and cqc address */
527 ret
= qm_get_sqc(vf_qm
, &vf_data
->sqc_dma
);
529 dev_err(dev
, "failed to read SQC addr!\n");
533 ret
= qm_get_cqc(vf_qm
, &vf_data
->cqc_dma
);
535 dev_err(dev
, "failed to read CQC addr!\n");
539 migf
->total_length
= sizeof(struct acc_vf_data
);
543 static struct hisi_acc_vf_core_device
*hisi_acc_drvdata(struct pci_dev
*pdev
)
545 struct vfio_pci_core_device
*core_device
= dev_get_drvdata(&pdev
->dev
);
547 return container_of(core_device
, struct hisi_acc_vf_core_device
,
551 /* Check the PF's RAS state and Function INT state */
553 hisi_acc_check_int_state(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
555 struct hisi_qm
*vfqm
= &hisi_acc_vdev
->vf_qm
;
556 struct hisi_qm
*qm
= hisi_acc_vdev
->pf_qm
;
557 struct pci_dev
*vf_pdev
= hisi_acc_vdev
->vf_dev
;
558 struct device
*dev
= &qm
->pdev
->dev
;
561 /* Check RAS state */
562 state
= qm_check_reg_state(qm
, QM_ABNORMAL_INT_STATUS
);
564 dev_err(dev
, "failed to check QM RAS state!\n");
568 /* Check Function Communication state between PF and VF */
569 state
= qm_check_reg_state(vfqm
, QM_IFC_INT_STATUS
);
571 dev_err(dev
, "failed to check QM IFC INT state!\n");
574 state
= qm_check_reg_state(vfqm
, QM_IFC_INT_SET_V
);
576 dev_err(dev
, "failed to check QM IFC INT SET state!\n");
580 /* Check submodule task state */
581 switch (vf_pdev
->device
) {
582 case PCI_DEVICE_ID_HUAWEI_SEC_VF
:
583 state
= qm_check_reg_state(qm
, SEC_CORE_INT_STATUS
);
585 dev_err(dev
, "failed to check QM SEC Core INT state!\n");
589 case PCI_DEVICE_ID_HUAWEI_HPRE_VF
:
590 state
= qm_check_reg_state(qm
, HPRE_HAC_INT_STATUS
);
592 dev_err(dev
, "failed to check QM HPRE HAC INT state!\n");
596 case PCI_DEVICE_ID_HUAWEI_ZIP_VF
:
597 state
= qm_check_reg_state(qm
, HZIP_CORE_INT_STATUS
);
599 dev_err(dev
, "failed to check QM ZIP Core INT state!\n");
604 dev_err(dev
, "failed to detect acc module type!\n");
609 static void hisi_acc_vf_disable_fd(struct hisi_acc_vf_migration_file
*migf
)
611 mutex_lock(&migf
->lock
);
612 migf
->disabled
= true;
613 migf
->total_length
= 0;
614 migf
->filp
->f_pos
= 0;
615 mutex_unlock(&migf
->lock
);
618 static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
620 if (hisi_acc_vdev
->resuming_migf
) {
621 hisi_acc_vf_disable_fd(hisi_acc_vdev
->resuming_migf
);
622 fput(hisi_acc_vdev
->resuming_migf
->filp
);
623 hisi_acc_vdev
->resuming_migf
= NULL
;
626 if (hisi_acc_vdev
->saving_migf
) {
627 hisi_acc_vf_disable_fd(hisi_acc_vdev
->saving_migf
);
628 fput(hisi_acc_vdev
->saving_migf
->filp
);
629 hisi_acc_vdev
->saving_migf
= NULL
;
633 static void hisi_acc_vf_reset(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
635 hisi_acc_vdev
->vf_qm_state
= QM_NOT_READY
;
636 hisi_acc_vdev
->mig_state
= VFIO_DEVICE_STATE_RUNNING
;
637 hisi_acc_vf_disable_fds(hisi_acc_vdev
);
640 static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
642 struct hisi_qm
*vf_qm
= &hisi_acc_vdev
->vf_qm
;
644 if (hisi_acc_vdev
->vf_qm_state
!= QM_READY
)
647 /* Make sure the device is enabled */
648 qm_dev_cmd_init(vf_qm
);
650 vf_qm_fun_reset(vf_qm
);
653 static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
655 struct device
*dev
= &hisi_acc_vdev
->vf_dev
->dev
;
656 struct hisi_acc_vf_migration_file
*migf
= hisi_acc_vdev
->resuming_migf
;
659 /* Recover data to VF */
660 ret
= vf_qm_load_data(hisi_acc_vdev
, migf
);
662 dev_err(dev
, "failed to recover the VF!\n");
669 static int hisi_acc_vf_release_file(struct inode
*inode
, struct file
*filp
)
671 struct hisi_acc_vf_migration_file
*migf
= filp
->private_data
;
673 hisi_acc_vf_disable_fd(migf
);
674 mutex_destroy(&migf
->lock
);
679 static ssize_t
hisi_acc_vf_resume_write(struct file
*filp
, const char __user
*buf
,
680 size_t len
, loff_t
*pos
)
682 struct hisi_acc_vf_migration_file
*migf
= filp
->private_data
;
683 u8
*vf_data
= (u8
*)&migf
->vf_data
;
684 loff_t requested_length
;
693 check_add_overflow((loff_t
)len
, *pos
, &requested_length
))
696 if (requested_length
> sizeof(struct acc_vf_data
))
699 mutex_lock(&migf
->lock
);
700 if (migf
->disabled
) {
705 ret
= copy_from_user(vf_data
+ *pos
, buf
, len
);
712 migf
->total_length
+= len
;
714 ret
= vf_qm_check_match(migf
->hisi_acc_vdev
, migf
);
718 mutex_unlock(&migf
->lock
);
722 static const struct file_operations hisi_acc_vf_resume_fops
= {
723 .owner
= THIS_MODULE
,
724 .write
= hisi_acc_vf_resume_write
,
725 .release
= hisi_acc_vf_release_file
,
728 static struct hisi_acc_vf_migration_file
*
729 hisi_acc_vf_pci_resume(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
731 struct hisi_acc_vf_migration_file
*migf
;
733 migf
= kzalloc(sizeof(*migf
), GFP_KERNEL_ACCOUNT
);
735 return ERR_PTR(-ENOMEM
);
737 migf
->filp
= anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_resume_fops
, migf
,
739 if (IS_ERR(migf
->filp
)) {
740 int err
= PTR_ERR(migf
->filp
);
746 stream_open(migf
->filp
->f_inode
, migf
->filp
);
747 mutex_init(&migf
->lock
);
748 migf
->hisi_acc_vdev
= hisi_acc_vdev
;
752 static long hisi_acc_vf_precopy_ioctl(struct file
*filp
,
753 unsigned int cmd
, unsigned long arg
)
755 struct hisi_acc_vf_migration_file
*migf
= filp
->private_data
;
756 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= migf
->hisi_acc_vdev
;
757 loff_t
*pos
= &filp
->f_pos
;
758 struct vfio_precopy_info info
;
762 if (cmd
!= VFIO_MIG_GET_PRECOPY_INFO
)
765 minsz
= offsetofend(struct vfio_precopy_info
, dirty_bytes
);
767 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
769 if (info
.argsz
< minsz
)
772 mutex_lock(&hisi_acc_vdev
->state_mutex
);
773 if (hisi_acc_vdev
->mig_state
!= VFIO_DEVICE_STATE_PRE_COPY
) {
774 mutex_unlock(&hisi_acc_vdev
->state_mutex
);
778 mutex_lock(&migf
->lock
);
780 if (migf
->disabled
) {
785 if (*pos
> migf
->total_length
) {
790 info
.dirty_bytes
= 0;
791 info
.initial_bytes
= migf
->total_length
- *pos
;
792 mutex_unlock(&migf
->lock
);
793 mutex_unlock(&hisi_acc_vdev
->state_mutex
);
795 return copy_to_user((void __user
*)arg
, &info
, minsz
) ? -EFAULT
: 0;
797 mutex_unlock(&migf
->lock
);
798 mutex_unlock(&hisi_acc_vdev
->state_mutex
);
802 static ssize_t
hisi_acc_vf_save_read(struct file
*filp
, char __user
*buf
, size_t len
,
805 struct hisi_acc_vf_migration_file
*migf
= filp
->private_data
;
813 mutex_lock(&migf
->lock
);
814 if (*pos
> migf
->total_length
) {
819 if (migf
->disabled
) {
824 len
= min_t(size_t, migf
->total_length
- *pos
, len
);
826 u8
*vf_data
= (u8
*)&migf
->vf_data
;
828 ret
= copy_to_user(buf
, vf_data
+ *pos
, len
);
837 mutex_unlock(&migf
->lock
);
841 static const struct file_operations hisi_acc_vf_save_fops
= {
842 .owner
= THIS_MODULE
,
843 .read
= hisi_acc_vf_save_read
,
844 .unlocked_ioctl
= hisi_acc_vf_precopy_ioctl
,
845 .compat_ioctl
= compat_ptr_ioctl
,
846 .release
= hisi_acc_vf_release_file
,
849 static struct hisi_acc_vf_migration_file
*
850 hisi_acc_open_saving_migf(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
852 struct hisi_acc_vf_migration_file
*migf
;
855 migf
= kzalloc(sizeof(*migf
), GFP_KERNEL_ACCOUNT
);
857 return ERR_PTR(-ENOMEM
);
859 migf
->filp
= anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_save_fops
, migf
,
861 if (IS_ERR(migf
->filp
)) {
862 int err
= PTR_ERR(migf
->filp
);
868 stream_open(migf
->filp
->f_inode
, migf
->filp
);
869 mutex_init(&migf
->lock
);
870 migf
->hisi_acc_vdev
= hisi_acc_vdev
;
872 ret
= vf_qm_get_match_data(hisi_acc_vdev
, &migf
->vf_data
);
881 static struct hisi_acc_vf_migration_file
*
882 hisi_acc_vf_pre_copy(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
884 struct hisi_acc_vf_migration_file
*migf
;
886 migf
= hisi_acc_open_saving_migf(hisi_acc_vdev
);
890 migf
->total_length
= QM_MATCH_SIZE
;
894 static struct hisi_acc_vf_migration_file
*
895 hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device
*hisi_acc_vdev
, bool open
)
898 struct hisi_acc_vf_migration_file
*migf
= NULL
;
902 * Userspace didn't use PRECOPY support. Hence saving_migf
905 migf
= hisi_acc_open_saving_migf(hisi_acc_vdev
);
909 migf
= hisi_acc_vdev
->saving_migf
;
912 ret
= vf_qm_state_save(hisi_acc_vdev
, migf
);
916 return open
? migf
: NULL
;
919 static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
921 struct device
*dev
= &hisi_acc_vdev
->vf_dev
->dev
;
922 struct hisi_qm
*vf_qm
= &hisi_acc_vdev
->vf_qm
;
925 ret
= vf_qm_func_stop(vf_qm
);
927 dev_err(dev
, "failed to stop QM VF function!\n");
931 ret
= hisi_acc_check_int_state(hisi_acc_vdev
);
933 dev_err(dev
, "failed to check QM INT state!\n");
940 hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device
*hisi_acc_vdev
,
943 u32 cur
= hisi_acc_vdev
->mig_state
;
946 if (cur
== VFIO_DEVICE_STATE_RUNNING
&& new == VFIO_DEVICE_STATE_PRE_COPY
) {
947 struct hisi_acc_vf_migration_file
*migf
;
949 migf
= hisi_acc_vf_pre_copy(hisi_acc_vdev
);
951 return ERR_CAST(migf
);
952 get_file(migf
->filp
);
953 hisi_acc_vdev
->saving_migf
= migf
;
957 if (cur
== VFIO_DEVICE_STATE_PRE_COPY
&& new == VFIO_DEVICE_STATE_STOP_COPY
) {
958 struct hisi_acc_vf_migration_file
*migf
;
960 ret
= hisi_acc_vf_stop_device(hisi_acc_vdev
);
964 migf
= hisi_acc_vf_stop_copy(hisi_acc_vdev
, false);
966 return ERR_CAST(migf
);
971 if (cur
== VFIO_DEVICE_STATE_RUNNING
&& new == VFIO_DEVICE_STATE_STOP
) {
972 ret
= hisi_acc_vf_stop_device(hisi_acc_vdev
);
978 if (cur
== VFIO_DEVICE_STATE_STOP
&& new == VFIO_DEVICE_STATE_STOP_COPY
) {
979 struct hisi_acc_vf_migration_file
*migf
;
981 migf
= hisi_acc_vf_stop_copy(hisi_acc_vdev
, true);
983 return ERR_CAST(migf
);
984 get_file(migf
->filp
);
985 hisi_acc_vdev
->saving_migf
= migf
;
989 if ((cur
== VFIO_DEVICE_STATE_STOP_COPY
&& new == VFIO_DEVICE_STATE_STOP
)) {
990 hisi_acc_vf_disable_fds(hisi_acc_vdev
);
994 if (cur
== VFIO_DEVICE_STATE_STOP
&& new == VFIO_DEVICE_STATE_RESUMING
) {
995 struct hisi_acc_vf_migration_file
*migf
;
997 migf
= hisi_acc_vf_pci_resume(hisi_acc_vdev
);
999 return ERR_CAST(migf
);
1000 get_file(migf
->filp
);
1001 hisi_acc_vdev
->resuming_migf
= migf
;
1005 if (cur
== VFIO_DEVICE_STATE_RESUMING
&& new == VFIO_DEVICE_STATE_STOP
) {
1006 ret
= hisi_acc_vf_load_state(hisi_acc_vdev
);
1008 return ERR_PTR(ret
);
1009 hisi_acc_vf_disable_fds(hisi_acc_vdev
);
1013 if (cur
== VFIO_DEVICE_STATE_PRE_COPY
&& new == VFIO_DEVICE_STATE_RUNNING
) {
1014 hisi_acc_vf_disable_fds(hisi_acc_vdev
);
1018 if (cur
== VFIO_DEVICE_STATE_STOP
&& new == VFIO_DEVICE_STATE_RUNNING
) {
1019 hisi_acc_vf_start_device(hisi_acc_vdev
);
1024 * vfio_mig_get_next_state() does not use arcs other than the above
1027 return ERR_PTR(-EINVAL
);
1030 static struct file
*
1031 hisi_acc_vfio_pci_set_device_state(struct vfio_device
*vdev
,
1032 enum vfio_device_mig_state new_state
)
1034 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= container_of(vdev
,
1035 struct hisi_acc_vf_core_device
, core_device
.vdev
);
1036 enum vfio_device_mig_state next_state
;
1037 struct file
*res
= NULL
;
1040 mutex_lock(&hisi_acc_vdev
->state_mutex
);
1041 while (new_state
!= hisi_acc_vdev
->mig_state
) {
1042 ret
= vfio_mig_get_next_state(vdev
,
1043 hisi_acc_vdev
->mig_state
,
1044 new_state
, &next_state
);
1046 res
= ERR_PTR(-EINVAL
);
1050 res
= hisi_acc_vf_set_device_state(hisi_acc_vdev
, next_state
);
1053 hisi_acc_vdev
->mig_state
= next_state
;
1054 if (WARN_ON(res
&& new_state
!= hisi_acc_vdev
->mig_state
)) {
1056 res
= ERR_PTR(-EINVAL
);
1060 mutex_unlock(&hisi_acc_vdev
->state_mutex
);
1065 hisi_acc_vfio_pci_get_data_size(struct vfio_device
*vdev
,
1066 unsigned long *stop_copy_length
)
1068 *stop_copy_length
= sizeof(struct acc_vf_data
);
1073 hisi_acc_vfio_pci_get_device_state(struct vfio_device
*vdev
,
1074 enum vfio_device_mig_state
*curr_state
)
1076 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= container_of(vdev
,
1077 struct hisi_acc_vf_core_device
, core_device
.vdev
);
1079 mutex_lock(&hisi_acc_vdev
->state_mutex
);
1080 *curr_state
= hisi_acc_vdev
->mig_state
;
1081 mutex_unlock(&hisi_acc_vdev
->state_mutex
);
1085 static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev
*pdev
)
1087 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= hisi_acc_drvdata(pdev
);
1089 if (hisi_acc_vdev
->core_device
.vdev
.migration_flags
!=
1090 VFIO_MIGRATION_STOP_COPY
)
1093 mutex_lock(&hisi_acc_vdev
->state_mutex
);
1094 hisi_acc_vf_reset(hisi_acc_vdev
);
1095 mutex_unlock(&hisi_acc_vdev
->state_mutex
);
1098 static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
1100 struct vfio_pci_core_device
*vdev
= &hisi_acc_vdev
->core_device
;
1101 struct hisi_qm
*vf_qm
= &hisi_acc_vdev
->vf_qm
;
1102 struct pci_dev
*vf_dev
= vdev
->pdev
;
1105 * ACC VF dev BAR2 region consists of both functional register space
1106 * and migration control register space. For migration to work, we
1107 * need access to both. Hence, we map the entire BAR2 region here.
1108 * But unnecessarily exposing the migration BAR region to the Guest
1109 * has the potential to prevent/corrupt the Guest migration. Hence,
1110 * we restrict access to the migration control space from
1111 * Guest(Please see mmap/ioctl/read/write override functions).
1113 * Please note that it is OK to expose the entire VF BAR if migration
1114 * is not supported or required as this cannot affect the ACC PF
1117 * Also the HiSilicon ACC VF devices supported by this driver on
1118 * HiSilicon hardware platforms are integrated end point devices
1119 * and the platform lacks the capability to perform any PCIe P2P
1120 * between these devices.
1124 ioremap(pci_resource_start(vf_dev
, VFIO_PCI_BAR2_REGION_INDEX
),
1125 pci_resource_len(vf_dev
, VFIO_PCI_BAR2_REGION_INDEX
));
1126 if (!vf_qm
->io_base
)
1129 vf_qm
->fun_type
= QM_HW_VF
;
1130 vf_qm
->pdev
= vf_dev
;
1131 mutex_init(&vf_qm
->mailbox_lock
);
1136 static struct hisi_qm
*hisi_acc_get_pf_qm(struct pci_dev
*pdev
)
1138 struct hisi_qm
*pf_qm
;
1139 struct pci_driver
*pf_driver
;
1141 if (!pdev
->is_virtfn
)
1144 switch (pdev
->device
) {
1145 case PCI_DEVICE_ID_HUAWEI_SEC_VF
:
1146 pf_driver
= hisi_sec_get_pf_driver();
1148 case PCI_DEVICE_ID_HUAWEI_HPRE_VF
:
1149 pf_driver
= hisi_hpre_get_pf_driver();
1151 case PCI_DEVICE_ID_HUAWEI_ZIP_VF
:
1152 pf_driver
= hisi_zip_get_pf_driver();
1161 pf_qm
= pci_iov_get_pf_drvdata(pdev
, pf_driver
);
1163 return !IS_ERR(pf_qm
) ? pf_qm
: NULL
;
1166 static int hisi_acc_pci_rw_access_check(struct vfio_device
*core_vdev
,
1167 size_t count
, loff_t
*ppos
,
1170 unsigned int index
= VFIO_PCI_OFFSET_TO_INDEX(*ppos
);
1171 struct vfio_pci_core_device
*vdev
=
1172 container_of(core_vdev
, struct vfio_pci_core_device
, vdev
);
1174 if (index
== VFIO_PCI_BAR2_REGION_INDEX
) {
1175 loff_t pos
= *ppos
& VFIO_PCI_OFFSET_MASK
;
1176 resource_size_t end
= pci_resource_len(vdev
->pdev
, index
) / 2;
1178 /* Check if access is for migration control region */
1182 *new_count
= min(count
, (size_t)(end
- pos
));
1188 static int hisi_acc_vfio_pci_mmap(struct vfio_device
*core_vdev
,
1189 struct vm_area_struct
*vma
)
1191 struct vfio_pci_core_device
*vdev
=
1192 container_of(core_vdev
, struct vfio_pci_core_device
, vdev
);
1195 index
= vma
->vm_pgoff
>> (VFIO_PCI_OFFSET_SHIFT
- PAGE_SHIFT
);
1196 if (index
== VFIO_PCI_BAR2_REGION_INDEX
) {
1197 u64 req_len
, pgoff
, req_start
;
1198 resource_size_t end
= pci_resource_len(vdev
->pdev
, index
) / 2;
1200 req_len
= vma
->vm_end
- vma
->vm_start
;
1201 pgoff
= vma
->vm_pgoff
&
1202 ((1U << (VFIO_PCI_OFFSET_SHIFT
- PAGE_SHIFT
)) - 1);
1203 req_start
= pgoff
<< PAGE_SHIFT
;
1205 if (req_start
+ req_len
> end
)
1209 return vfio_pci_core_mmap(core_vdev
, vma
);
1212 static ssize_t
hisi_acc_vfio_pci_write(struct vfio_device
*core_vdev
,
1213 const char __user
*buf
, size_t count
,
1216 size_t new_count
= count
;
1219 ret
= hisi_acc_pci_rw_access_check(core_vdev
, count
, ppos
, &new_count
);
1223 return vfio_pci_core_write(core_vdev
, buf
, new_count
, ppos
);
1226 static ssize_t
hisi_acc_vfio_pci_read(struct vfio_device
*core_vdev
,
1227 char __user
*buf
, size_t count
,
1230 size_t new_count
= count
;
1233 ret
= hisi_acc_pci_rw_access_check(core_vdev
, count
, ppos
, &new_count
);
1237 return vfio_pci_core_read(core_vdev
, buf
, new_count
, ppos
);
1240 static long hisi_acc_vfio_pci_ioctl(struct vfio_device
*core_vdev
, unsigned int cmd
,
1243 if (cmd
== VFIO_DEVICE_GET_REGION_INFO
) {
1244 struct vfio_pci_core_device
*vdev
=
1245 container_of(core_vdev
, struct vfio_pci_core_device
, vdev
);
1246 struct pci_dev
*pdev
= vdev
->pdev
;
1247 struct vfio_region_info info
;
1248 unsigned long minsz
;
1250 minsz
= offsetofend(struct vfio_region_info
, offset
);
1252 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
1255 if (info
.argsz
< minsz
)
1258 if (info
.index
== VFIO_PCI_BAR2_REGION_INDEX
) {
1259 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
1262 * ACC VF dev BAR2 region consists of both functional
1263 * register space and migration control register space.
1264 * Report only the functional region to Guest.
1266 info
.size
= pci_resource_len(pdev
, info
.index
) / 2;
1268 info
.flags
= VFIO_REGION_INFO_FLAG_READ
|
1269 VFIO_REGION_INFO_FLAG_WRITE
|
1270 VFIO_REGION_INFO_FLAG_MMAP
;
1272 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
1276 return vfio_pci_core_ioctl(core_vdev
, cmd
, arg
);
1279 static int hisi_acc_vfio_pci_open_device(struct vfio_device
*core_vdev
)
1281 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= container_of(core_vdev
,
1282 struct hisi_acc_vf_core_device
, core_device
.vdev
);
1283 struct vfio_pci_core_device
*vdev
= &hisi_acc_vdev
->core_device
;
1286 ret
= vfio_pci_core_enable(vdev
);
1290 if (core_vdev
->mig_ops
) {
1291 ret
= hisi_acc_vf_qm_init(hisi_acc_vdev
);
1293 vfio_pci_core_disable(vdev
);
1296 hisi_acc_vdev
->mig_state
= VFIO_DEVICE_STATE_RUNNING
;
1299 vfio_pci_core_finish_enable(vdev
);
1303 static void hisi_acc_vfio_pci_close_device(struct vfio_device
*core_vdev
)
1305 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= container_of(core_vdev
,
1306 struct hisi_acc_vf_core_device
, core_device
.vdev
);
1307 struct hisi_qm
*vf_qm
= &hisi_acc_vdev
->vf_qm
;
1309 iounmap(vf_qm
->io_base
);
1310 vfio_pci_core_close_device(core_vdev
);
1313 static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops
= {
1314 .migration_set_state
= hisi_acc_vfio_pci_set_device_state
,
1315 .migration_get_state
= hisi_acc_vfio_pci_get_device_state
,
1316 .migration_get_data_size
= hisi_acc_vfio_pci_get_data_size
,
1319 static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device
*core_vdev
)
1321 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= container_of(core_vdev
,
1322 struct hisi_acc_vf_core_device
, core_device
.vdev
);
1323 struct pci_dev
*pdev
= to_pci_dev(core_vdev
->dev
);
1324 struct hisi_qm
*pf_qm
= hisi_acc_get_pf_qm(pdev
);
1326 hisi_acc_vdev
->vf_id
= pci_iov_vf_id(pdev
) + 1;
1327 hisi_acc_vdev
->pf_qm
= pf_qm
;
1328 hisi_acc_vdev
->vf_dev
= pdev
;
1329 mutex_init(&hisi_acc_vdev
->state_mutex
);
1331 core_vdev
->migration_flags
= VFIO_MIGRATION_STOP_COPY
| VFIO_MIGRATION_PRE_COPY
;
1332 core_vdev
->mig_ops
= &hisi_acc_vfio_pci_migrn_state_ops
;
1334 return vfio_pci_core_init_dev(core_vdev
);
1337 static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops
= {
1338 .name
= "hisi-acc-vfio-pci-migration",
1339 .init
= hisi_acc_vfio_pci_migrn_init_dev
,
1340 .release
= vfio_pci_core_release_dev
,
1341 .open_device
= hisi_acc_vfio_pci_open_device
,
1342 .close_device
= hisi_acc_vfio_pci_close_device
,
1343 .ioctl
= hisi_acc_vfio_pci_ioctl
,
1344 .device_feature
= vfio_pci_core_ioctl_feature
,
1345 .read
= hisi_acc_vfio_pci_read
,
1346 .write
= hisi_acc_vfio_pci_write
,
1347 .mmap
= hisi_acc_vfio_pci_mmap
,
1348 .request
= vfio_pci_core_request
,
1349 .match
= vfio_pci_core_match
,
1350 .bind_iommufd
= vfio_iommufd_physical_bind
,
1351 .unbind_iommufd
= vfio_iommufd_physical_unbind
,
1352 .attach_ioas
= vfio_iommufd_physical_attach_ioas
,
1353 .detach_ioas
= vfio_iommufd_physical_detach_ioas
,
1356 static const struct vfio_device_ops hisi_acc_vfio_pci_ops
= {
1357 .name
= "hisi-acc-vfio-pci",
1358 .init
= vfio_pci_core_init_dev
,
1359 .release
= vfio_pci_core_release_dev
,
1360 .open_device
= hisi_acc_vfio_pci_open_device
,
1361 .close_device
= vfio_pci_core_close_device
,
1362 .ioctl
= vfio_pci_core_ioctl
,
1363 .device_feature
= vfio_pci_core_ioctl_feature
,
1364 .read
= vfio_pci_core_read
,
1365 .write
= vfio_pci_core_write
,
1366 .mmap
= vfio_pci_core_mmap
,
1367 .request
= vfio_pci_core_request
,
1368 .match
= vfio_pci_core_match
,
1369 .bind_iommufd
= vfio_iommufd_physical_bind
,
1370 .unbind_iommufd
= vfio_iommufd_physical_unbind
,
1371 .attach_ioas
= vfio_iommufd_physical_attach_ioas
,
1372 .detach_ioas
= vfio_iommufd_physical_detach_ioas
,
1375 static int hisi_acc_vfio_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1377 struct hisi_acc_vf_core_device
*hisi_acc_vdev
;
1378 const struct vfio_device_ops
*ops
= &hisi_acc_vfio_pci_ops
;
1379 struct hisi_qm
*pf_qm
;
1383 pf_qm
= hisi_acc_get_pf_qm(pdev
);
1384 if (pf_qm
&& pf_qm
->ver
>= QM_HW_V3
) {
1385 vf_id
= pci_iov_vf_id(pdev
);
1387 ops
= &hisi_acc_vfio_pci_migrn_ops
;
1389 pci_warn(pdev
, "migration support failed, continue with generic interface\n");
1392 hisi_acc_vdev
= vfio_alloc_device(hisi_acc_vf_core_device
,
1393 core_device
.vdev
, &pdev
->dev
, ops
);
1394 if (IS_ERR(hisi_acc_vdev
))
1395 return PTR_ERR(hisi_acc_vdev
);
1397 dev_set_drvdata(&pdev
->dev
, &hisi_acc_vdev
->core_device
);
1398 ret
= vfio_pci_core_register_device(&hisi_acc_vdev
->core_device
);
1404 vfio_put_device(&hisi_acc_vdev
->core_device
.vdev
);
1408 static void hisi_acc_vfio_pci_remove(struct pci_dev
*pdev
)
1410 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= hisi_acc_drvdata(pdev
);
1412 vfio_pci_core_unregister_device(&hisi_acc_vdev
->core_device
);
1413 vfio_put_device(&hisi_acc_vdev
->core_device
.vdev
);
1416 static const struct pci_device_id hisi_acc_vfio_pci_table
[] = {
1417 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI
, PCI_DEVICE_ID_HUAWEI_SEC_VF
) },
1418 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI
, PCI_DEVICE_ID_HUAWEI_HPRE_VF
) },
1419 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI
, PCI_DEVICE_ID_HUAWEI_ZIP_VF
) },
1423 MODULE_DEVICE_TABLE(pci
, hisi_acc_vfio_pci_table
);
1425 static const struct pci_error_handlers hisi_acc_vf_err_handlers
= {
1426 .reset_done
= hisi_acc_vf_pci_aer_reset_done
,
1427 .error_detected
= vfio_pci_core_aer_err_detected
,
1430 static struct pci_driver hisi_acc_vfio_pci_driver
= {
1431 .name
= KBUILD_MODNAME
,
1432 .id_table
= hisi_acc_vfio_pci_table
,
1433 .probe
= hisi_acc_vfio_pci_probe
,
1434 .remove
= hisi_acc_vfio_pci_remove
,
1435 .err_handler
= &hisi_acc_vf_err_handlers
,
1436 .driver_managed_dma
= true,
1439 module_pci_driver(hisi_acc_vfio_pci_driver
);
1441 MODULE_LICENSE("GPL v2");
1442 MODULE_AUTHOR("Liu Longfang <liulongfang@huawei.com>");
1443 MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
1444 MODULE_DESCRIPTION("HiSilicon VFIO PCI - VFIO PCI driver with live migration support for HiSilicon ACC device family");