1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, HiSilicon Ltd.
6 #include <linux/device.h>
7 #include <linux/eventfd.h>
8 #include <linux/file.h>
9 #include <linux/hisi_acc_qm.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/vfio.h>
14 #include <linux/vfio_pci_core.h>
15 #include <linux/anon_inodes.h>
17 #include "hisi_acc_vfio_pci.h"
19 /* Return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */
20 static int qm_wait_dev_not_ready(struct hisi_qm
*qm
)
24 return readl_relaxed_poll_timeout(qm
->io_base
+ QM_VF_STATE
,
25 val
, !(val
& 0x1), MB_POLL_PERIOD_US
,
30 * Each state Reg is checked 100 times,
31 * with a delay of 100 microseconds after each check
33 static u32
qm_check_reg_state(struct hisi_qm
*qm
, u32 regs
)
38 state
= readl(qm
->io_base
+ regs
);
39 while (state
&& check_times
< ERROR_CHECK_TIMEOUT
) {
40 udelay(CHECK_DELAY_TIME
);
41 state
= readl(qm
->io_base
+ regs
);
48 static int qm_read_regs(struct hisi_qm
*qm
, u32 reg_addr
,
53 if (nums
< 1 || nums
> QM_REGS_MAX_LEN
)
56 for (i
= 0; i
< nums
; i
++) {
57 data
[i
] = readl(qm
->io_base
+ reg_addr
);
58 reg_addr
+= QM_REG_ADDR_OFFSET
;
64 static int qm_write_regs(struct hisi_qm
*qm
, u32 reg
,
69 if (nums
< 1 || nums
> QM_REGS_MAX_LEN
)
72 for (i
= 0; i
< nums
; i
++)
73 writel(data
[i
], qm
->io_base
+ reg
+ i
* QM_REG_ADDR_OFFSET
);
78 static int qm_get_vft(struct hisi_qm
*qm
, u32
*base
)
84 ret
= hisi_qm_mb(qm
, QM_MB_CMD_SQC_VFT_V2
, 0, 0, 1);
88 sqc_vft
= readl(qm
->io_base
+ QM_MB_CMD_DATA_ADDR_L
) |
89 ((u64
)readl(qm
->io_base
+ QM_MB_CMD_DATA_ADDR_H
) <<
91 *base
= QM_SQC_VFT_BASE_MASK_V2
& (sqc_vft
>> QM_SQC_VFT_BASE_SHIFT_V2
);
92 qp_num
= (QM_SQC_VFT_NUM_MASK_V2
&
93 (sqc_vft
>> QM_SQC_VFT_NUM_SHIFT_V2
)) + 1;
98 static int qm_get_sqc(struct hisi_qm
*qm
, u64
*addr
)
102 ret
= hisi_qm_mb(qm
, QM_MB_CMD_SQC_BT
, 0, 0, 1);
106 *addr
= readl(qm
->io_base
+ QM_MB_CMD_DATA_ADDR_L
) |
107 ((u64
)readl(qm
->io_base
+ QM_MB_CMD_DATA_ADDR_H
) <<
113 static int qm_get_cqc(struct hisi_qm
*qm
, u64
*addr
)
117 ret
= hisi_qm_mb(qm
, QM_MB_CMD_CQC_BT
, 0, 0, 1);
121 *addr
= readl(qm
->io_base
+ QM_MB_CMD_DATA_ADDR_L
) |
122 ((u64
)readl(qm
->io_base
+ QM_MB_CMD_DATA_ADDR_H
) <<
128 static int qm_get_regs(struct hisi_qm
*qm
, struct acc_vf_data
*vf_data
)
130 struct device
*dev
= &qm
->pdev
->dev
;
133 ret
= qm_read_regs(qm
, QM_VF_AEQ_INT_MASK
, &vf_data
->aeq_int_mask
, 1);
135 dev_err(dev
, "failed to read QM_VF_AEQ_INT_MASK\n");
139 ret
= qm_read_regs(qm
, QM_VF_EQ_INT_MASK
, &vf_data
->eq_int_mask
, 1);
141 dev_err(dev
, "failed to read QM_VF_EQ_INT_MASK\n");
145 ret
= qm_read_regs(qm
, QM_IFC_INT_SOURCE_V
,
146 &vf_data
->ifc_int_source
, 1);
148 dev_err(dev
, "failed to read QM_IFC_INT_SOURCE_V\n");
152 ret
= qm_read_regs(qm
, QM_IFC_INT_MASK
, &vf_data
->ifc_int_mask
, 1);
154 dev_err(dev
, "failed to read QM_IFC_INT_MASK\n");
158 ret
= qm_read_regs(qm
, QM_IFC_INT_SET_V
, &vf_data
->ifc_int_set
, 1);
160 dev_err(dev
, "failed to read QM_IFC_INT_SET_V\n");
164 ret
= qm_read_regs(qm
, QM_PAGE_SIZE
, &vf_data
->page_size
, 1);
166 dev_err(dev
, "failed to read QM_PAGE_SIZE\n");
170 /* QM_EQC_DW has 7 regs */
171 ret
= qm_read_regs(qm
, QM_EQC_DW0
, vf_data
->qm_eqc_dw
, 7);
173 dev_err(dev
, "failed to read QM_EQC_DW\n");
177 /* QM_AEQC_DW has 7 regs */
178 ret
= qm_read_regs(qm
, QM_AEQC_DW0
, vf_data
->qm_aeqc_dw
, 7);
180 dev_err(dev
, "failed to read QM_AEQC_DW\n");
187 static int qm_set_regs(struct hisi_qm
*qm
, struct acc_vf_data
*vf_data
)
189 struct device
*dev
= &qm
->pdev
->dev
;
193 if (unlikely(hisi_qm_wait_mb_ready(qm
))) {
194 dev_err(&qm
->pdev
->dev
, "QM device is not ready to write\n");
198 ret
= qm_write_regs(qm
, QM_VF_AEQ_INT_MASK
, &vf_data
->aeq_int_mask
, 1);
200 dev_err(dev
, "failed to write QM_VF_AEQ_INT_MASK\n");
204 ret
= qm_write_regs(qm
, QM_VF_EQ_INT_MASK
, &vf_data
->eq_int_mask
, 1);
206 dev_err(dev
, "failed to write QM_VF_EQ_INT_MASK\n");
210 ret
= qm_write_regs(qm
, QM_IFC_INT_SOURCE_V
,
211 &vf_data
->ifc_int_source
, 1);
213 dev_err(dev
, "failed to write QM_IFC_INT_SOURCE_V\n");
217 ret
= qm_write_regs(qm
, QM_IFC_INT_MASK
, &vf_data
->ifc_int_mask
, 1);
219 dev_err(dev
, "failed to write QM_IFC_INT_MASK\n");
223 ret
= qm_write_regs(qm
, QM_IFC_INT_SET_V
, &vf_data
->ifc_int_set
, 1);
225 dev_err(dev
, "failed to write QM_IFC_INT_SET_V\n");
229 ret
= qm_write_regs(qm
, QM_QUE_ISO_CFG_V
, &vf_data
->que_iso_cfg
, 1);
231 dev_err(dev
, "failed to write QM_QUE_ISO_CFG_V\n");
235 ret
= qm_write_regs(qm
, QM_PAGE_SIZE
, &vf_data
->page_size
, 1);
237 dev_err(dev
, "failed to write QM_PAGE_SIZE\n");
241 /* QM_EQC_DW has 7 regs */
242 ret
= qm_write_regs(qm
, QM_EQC_DW0
, vf_data
->qm_eqc_dw
, 7);
244 dev_err(dev
, "failed to write QM_EQC_DW\n");
248 /* QM_AEQC_DW has 7 regs */
249 ret
= qm_write_regs(qm
, QM_AEQC_DW0
, vf_data
->qm_aeqc_dw
, 7);
251 dev_err(dev
, "failed to write QM_AEQC_DW\n");
258 static void qm_db(struct hisi_qm
*qm
, u16 qn
, u8 cmd
,
259 u16 index
, u8 priority
)
265 if (cmd
== QM_DOORBELL_CMD_SQ
|| cmd
== QM_DOORBELL_CMD_CQ
)
266 dbase
= QM_DOORBELL_SQ_CQ_BASE_V2
;
268 dbase
= QM_DOORBELL_EQ_AEQ_BASE_V2
;
270 doorbell
= qn
| ((u64
)cmd
<< QM_DB_CMD_SHIFT_V2
) |
271 ((u64
)randata
<< QM_DB_RAND_SHIFT_V2
) |
272 ((u64
)index
<< QM_DB_INDEX_SHIFT_V2
) |
273 ((u64
)priority
<< QM_DB_PRIORITY_SHIFT_V2
);
275 writeq(doorbell
, qm
->io_base
+ dbase
);
278 static int pf_qm_get_qp_num(struct hisi_qm
*qm
, int vf_id
, u32
*rbase
)
285 ret
= readl_relaxed_poll_timeout(qm
->io_base
+ QM_VFT_CFG_RDY
, val
,
286 val
& BIT(0), MB_POLL_PERIOD_US
,
291 writel(0x1, qm
->io_base
+ QM_VFT_CFG_OP_WR
);
293 writel(0x0, qm
->io_base
+ QM_VFT_CFG_TYPE
);
294 writel(vf_id
, qm
->io_base
+ QM_VFT_CFG
);
296 writel(0x0, qm
->io_base
+ QM_VFT_CFG_RDY
);
297 writel(0x1, qm
->io_base
+ QM_VFT_CFG_OP_ENABLE
);
299 ret
= readl_relaxed_poll_timeout(qm
->io_base
+ QM_VFT_CFG_RDY
, val
,
300 val
& BIT(0), MB_POLL_PERIOD_US
,
305 sqc_vft
= readl(qm
->io_base
+ QM_VFT_CFG_DATA_L
) |
306 ((u64
)readl(qm
->io_base
+ QM_VFT_CFG_DATA_H
) <<
308 *rbase
= QM_SQC_VFT_BASE_MASK_V2
&
309 (sqc_vft
>> QM_SQC_VFT_BASE_SHIFT_V2
);
310 qp_num
= (QM_SQC_VFT_NUM_MASK_V2
&
311 (sqc_vft
>> QM_SQC_VFT_NUM_SHIFT_V2
)) + 1;
316 static void qm_dev_cmd_init(struct hisi_qm
*qm
)
318 /* Clear VF communication status registers. */
319 writel(0x1, qm
->io_base
+ QM_IFC_INT_SOURCE_V
);
321 /* Enable pf and vf communication. */
322 writel(0x0, qm
->io_base
+ QM_IFC_INT_MASK
);
325 static int vf_qm_cache_wb(struct hisi_qm
*qm
)
329 writel(0x1, qm
->io_base
+ QM_CACHE_WB_START
);
330 if (readl_relaxed_poll_timeout(qm
->io_base
+ QM_CACHE_WB_DONE
,
331 val
, val
& BIT(0), MB_POLL_PERIOD_US
,
332 MB_POLL_TIMEOUT_US
)) {
333 dev_err(&qm
->pdev
->dev
, "vf QM writeback sqc cache fail\n");
340 static void vf_qm_fun_reset(struct hisi_qm
*qm
)
344 for (i
= 0; i
< qm
->qp_num
; i
++)
345 qm_db(qm
, i
, QM_DOORBELL_CMD_SQ
, 0, 1);
348 static int vf_qm_func_stop(struct hisi_qm
*qm
)
350 return hisi_qm_mb(qm
, QM_MB_CMD_PAUSE_QM
, 0, 0, 0);
353 static int vf_qm_check_match(struct hisi_acc_vf_core_device
*hisi_acc_vdev
,
354 struct hisi_acc_vf_migration_file
*migf
)
356 struct acc_vf_data
*vf_data
= &migf
->vf_data
;
357 struct hisi_qm
*vf_qm
= &hisi_acc_vdev
->vf_qm
;
358 struct hisi_qm
*pf_qm
= hisi_acc_vdev
->pf_qm
;
359 struct device
*dev
= &vf_qm
->pdev
->dev
;
363 if (migf
->total_length
< QM_MATCH_SIZE
|| hisi_acc_vdev
->match_done
)
366 if (vf_data
->acc_magic
!= ACC_DEV_MAGIC
) {
367 dev_err(dev
, "failed to match ACC_DEV_MAGIC\n");
371 if (vf_data
->dev_id
!= hisi_acc_vdev
->vf_dev
->device
) {
372 dev_err(dev
, "failed to match VF devices\n");
376 /* VF qp num check */
377 ret
= qm_get_vft(vf_qm
, &vf_qm
->qp_base
);
379 dev_err(dev
, "failed to get vft qp nums\n");
383 if (ret
!= vf_data
->qp_num
) {
384 dev_err(dev
, "failed to match VF qp num\n");
390 /* VF isolation state check */
391 ret
= qm_read_regs(pf_qm
, QM_QUE_ISO_CFG_V
, &que_iso_state
, 1);
393 dev_err(dev
, "failed to read QM_QUE_ISO_CFG_V\n");
397 if (vf_data
->que_iso_cfg
!= que_iso_state
) {
398 dev_err(dev
, "failed to match isolation state\n");
402 ret
= qm_write_regs(vf_qm
, QM_VF_STATE
, &vf_data
->vf_qm_state
, 1);
404 dev_err(dev
, "failed to write QM_VF_STATE\n");
408 hisi_acc_vdev
->vf_qm_state
= vf_data
->vf_qm_state
;
409 hisi_acc_vdev
->match_done
= true;
413 static int vf_qm_get_match_data(struct hisi_acc_vf_core_device
*hisi_acc_vdev
,
414 struct acc_vf_data
*vf_data
)
416 struct hisi_qm
*pf_qm
= hisi_acc_vdev
->pf_qm
;
417 struct device
*dev
= &pf_qm
->pdev
->dev
;
418 int vf_id
= hisi_acc_vdev
->vf_id
;
421 vf_data
->acc_magic
= ACC_DEV_MAGIC
;
423 vf_data
->dev_id
= hisi_acc_vdev
->vf_dev
->device
;
425 /* VF qp num save from PF */
426 ret
= pf_qm_get_qp_num(pf_qm
, vf_id
, &vf_data
->qp_base
);
428 dev_err(dev
, "failed to get vft qp nums!\n");
432 vf_data
->qp_num
= ret
;
434 /* VF isolation state save from PF */
435 ret
= qm_read_regs(pf_qm
, QM_QUE_ISO_CFG_V
, &vf_data
->que_iso_cfg
, 1);
437 dev_err(dev
, "failed to read QM_QUE_ISO_CFG_V!\n");
444 static int vf_qm_load_data(struct hisi_acc_vf_core_device
*hisi_acc_vdev
,
445 struct hisi_acc_vf_migration_file
*migf
)
447 struct hisi_qm
*qm
= &hisi_acc_vdev
->vf_qm
;
448 struct device
*dev
= &qm
->pdev
->dev
;
449 struct acc_vf_data
*vf_data
= &migf
->vf_data
;
452 /* Return if only match data was transferred */
453 if (migf
->total_length
== QM_MATCH_SIZE
)
456 if (migf
->total_length
< sizeof(struct acc_vf_data
))
459 qm
->eqe_dma
= vf_data
->eqe_dma
;
460 qm
->aeqe_dma
= vf_data
->aeqe_dma
;
461 qm
->sqc_dma
= vf_data
->sqc_dma
;
462 qm
->cqc_dma
= vf_data
->cqc_dma
;
464 qm
->qp_base
= vf_data
->qp_base
;
465 qm
->qp_num
= vf_data
->qp_num
;
467 ret
= qm_set_regs(qm
, vf_data
);
469 dev_err(dev
, "set VF regs failed\n");
473 ret
= hisi_qm_mb(qm
, QM_MB_CMD_SQC_BT
, qm
->sqc_dma
, 0, 0);
475 dev_err(dev
, "set sqc failed\n");
479 ret
= hisi_qm_mb(qm
, QM_MB_CMD_CQC_BT
, qm
->cqc_dma
, 0, 0);
481 dev_err(dev
, "set cqc failed\n");
489 static int vf_qm_read_data(struct hisi_qm
*vf_qm
, struct acc_vf_data
*vf_data
)
491 struct device
*dev
= &vf_qm
->pdev
->dev
;
494 ret
= qm_get_regs(vf_qm
, vf_data
);
498 /* Every reg is 32 bit, the dma address is 64 bit. */
499 vf_data
->eqe_dma
= vf_data
->qm_eqc_dw
[1];
500 vf_data
->eqe_dma
<<= QM_XQC_ADDR_OFFSET
;
501 vf_data
->eqe_dma
|= vf_data
->qm_eqc_dw
[0];
502 vf_data
->aeqe_dma
= vf_data
->qm_aeqc_dw
[1];
503 vf_data
->aeqe_dma
<<= QM_XQC_ADDR_OFFSET
;
504 vf_data
->aeqe_dma
|= vf_data
->qm_aeqc_dw
[0];
506 /* Through SQC_BT/CQC_BT to get sqc and cqc address */
507 ret
= qm_get_sqc(vf_qm
, &vf_data
->sqc_dma
);
509 dev_err(dev
, "failed to read SQC addr!\n");
513 ret
= qm_get_cqc(vf_qm
, &vf_data
->cqc_dma
);
515 dev_err(dev
, "failed to read CQC addr!\n");
522 static int vf_qm_state_save(struct hisi_acc_vf_core_device
*hisi_acc_vdev
,
523 struct hisi_acc_vf_migration_file
*migf
)
525 struct acc_vf_data
*vf_data
= &migf
->vf_data
;
526 struct hisi_qm
*vf_qm
= &hisi_acc_vdev
->vf_qm
;
527 struct device
*dev
= &vf_qm
->pdev
->dev
;
530 if (unlikely(qm_wait_dev_not_ready(vf_qm
))) {
531 /* Update state and return with match data */
532 vf_data
->vf_qm_state
= QM_NOT_READY
;
533 hisi_acc_vdev
->vf_qm_state
= vf_data
->vf_qm_state
;
534 migf
->total_length
= QM_MATCH_SIZE
;
538 vf_data
->vf_qm_state
= QM_READY
;
539 hisi_acc_vdev
->vf_qm_state
= vf_data
->vf_qm_state
;
541 ret
= vf_qm_cache_wb(vf_qm
);
543 dev_err(dev
, "failed to writeback QM Cache!\n");
547 ret
= vf_qm_read_data(vf_qm
, vf_data
);
551 migf
->total_length
= sizeof(struct acc_vf_data
);
555 static struct hisi_acc_vf_core_device
*hisi_acc_drvdata(struct pci_dev
*pdev
)
557 struct vfio_pci_core_device
*core_device
= dev_get_drvdata(&pdev
->dev
);
559 return container_of(core_device
, struct hisi_acc_vf_core_device
,
563 /* Check the PF's RAS state and Function INT state */
565 hisi_acc_check_int_state(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
567 struct hisi_qm
*vfqm
= &hisi_acc_vdev
->vf_qm
;
568 struct hisi_qm
*qm
= hisi_acc_vdev
->pf_qm
;
569 struct pci_dev
*vf_pdev
= hisi_acc_vdev
->vf_dev
;
570 struct device
*dev
= &qm
->pdev
->dev
;
573 /* Check RAS state */
574 state
= qm_check_reg_state(qm
, QM_ABNORMAL_INT_STATUS
);
576 dev_err(dev
, "failed to check QM RAS state!\n");
580 /* Check Function Communication state between PF and VF */
581 state
= qm_check_reg_state(vfqm
, QM_IFC_INT_STATUS
);
583 dev_err(dev
, "failed to check QM IFC INT state!\n");
586 state
= qm_check_reg_state(vfqm
, QM_IFC_INT_SET_V
);
588 dev_err(dev
, "failed to check QM IFC INT SET state!\n");
592 /* Check submodule task state */
593 switch (vf_pdev
->device
) {
594 case PCI_DEVICE_ID_HUAWEI_SEC_VF
:
595 state
= qm_check_reg_state(qm
, SEC_CORE_INT_STATUS
);
597 dev_err(dev
, "failed to check QM SEC Core INT state!\n");
601 case PCI_DEVICE_ID_HUAWEI_HPRE_VF
:
602 state
= qm_check_reg_state(qm
, HPRE_HAC_INT_STATUS
);
604 dev_err(dev
, "failed to check QM HPRE HAC INT state!\n");
608 case PCI_DEVICE_ID_HUAWEI_ZIP_VF
:
609 state
= qm_check_reg_state(qm
, HZIP_CORE_INT_STATUS
);
611 dev_err(dev
, "failed to check QM ZIP Core INT state!\n");
616 dev_err(dev
, "failed to detect acc module type!\n");
621 static void hisi_acc_vf_disable_fd(struct hisi_acc_vf_migration_file
*migf
)
623 mutex_lock(&migf
->lock
);
624 migf
->disabled
= true;
625 migf
->total_length
= 0;
626 migf
->filp
->f_pos
= 0;
627 mutex_unlock(&migf
->lock
);
631 hisi_acc_debug_migf_copy(struct hisi_acc_vf_core_device
*hisi_acc_vdev
,
632 struct hisi_acc_vf_migration_file
*src_migf
)
634 struct hisi_acc_vf_migration_file
*dst_migf
= hisi_acc_vdev
->debug_migf
;
639 dst_migf
->total_length
= src_migf
->total_length
;
640 memcpy(&dst_migf
->vf_data
, &src_migf
->vf_data
,
641 sizeof(struct acc_vf_data
));
644 static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
646 if (hisi_acc_vdev
->resuming_migf
) {
647 hisi_acc_debug_migf_copy(hisi_acc_vdev
, hisi_acc_vdev
->resuming_migf
);
648 hisi_acc_vf_disable_fd(hisi_acc_vdev
->resuming_migf
);
649 fput(hisi_acc_vdev
->resuming_migf
->filp
);
650 hisi_acc_vdev
->resuming_migf
= NULL
;
653 if (hisi_acc_vdev
->saving_migf
) {
654 hisi_acc_debug_migf_copy(hisi_acc_vdev
, hisi_acc_vdev
->saving_migf
);
655 hisi_acc_vf_disable_fd(hisi_acc_vdev
->saving_migf
);
656 fput(hisi_acc_vdev
->saving_migf
->filp
);
657 hisi_acc_vdev
->saving_migf
= NULL
;
661 static struct hisi_acc_vf_core_device
*hisi_acc_get_vf_dev(struct vfio_device
*vdev
)
663 return container_of(vdev
, struct hisi_acc_vf_core_device
,
667 static void hisi_acc_vf_reset(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
669 hisi_acc_vdev
->vf_qm_state
= QM_NOT_READY
;
670 hisi_acc_vdev
->mig_state
= VFIO_DEVICE_STATE_RUNNING
;
671 hisi_acc_vf_disable_fds(hisi_acc_vdev
);
674 static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
676 struct hisi_qm
*vf_qm
= &hisi_acc_vdev
->vf_qm
;
678 if (hisi_acc_vdev
->vf_qm_state
!= QM_READY
)
681 /* Make sure the device is enabled */
682 qm_dev_cmd_init(vf_qm
);
684 vf_qm_fun_reset(vf_qm
);
687 static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
689 struct device
*dev
= &hisi_acc_vdev
->vf_dev
->dev
;
690 struct hisi_acc_vf_migration_file
*migf
= hisi_acc_vdev
->resuming_migf
;
693 /* Recover data to VF */
694 ret
= vf_qm_load_data(hisi_acc_vdev
, migf
);
696 dev_err(dev
, "failed to recover the VF!\n");
703 static int hisi_acc_vf_release_file(struct inode
*inode
, struct file
*filp
)
705 struct hisi_acc_vf_migration_file
*migf
= filp
->private_data
;
707 hisi_acc_vf_disable_fd(migf
);
708 mutex_destroy(&migf
->lock
);
713 static ssize_t
hisi_acc_vf_resume_write(struct file
*filp
, const char __user
*buf
,
714 size_t len
, loff_t
*pos
)
716 struct hisi_acc_vf_migration_file
*migf
= filp
->private_data
;
717 u8
*vf_data
= (u8
*)&migf
->vf_data
;
718 loff_t requested_length
;
727 check_add_overflow((loff_t
)len
, *pos
, &requested_length
))
730 if (requested_length
> sizeof(struct acc_vf_data
))
733 mutex_lock(&migf
->lock
);
734 if (migf
->disabled
) {
739 ret
= copy_from_user(vf_data
+ *pos
, buf
, len
);
746 migf
->total_length
+= len
;
748 ret
= vf_qm_check_match(migf
->hisi_acc_vdev
, migf
);
752 mutex_unlock(&migf
->lock
);
756 static const struct file_operations hisi_acc_vf_resume_fops
= {
757 .owner
= THIS_MODULE
,
758 .write
= hisi_acc_vf_resume_write
,
759 .release
= hisi_acc_vf_release_file
,
762 static struct hisi_acc_vf_migration_file
*
763 hisi_acc_vf_pci_resume(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
765 struct hisi_acc_vf_migration_file
*migf
;
767 migf
= kzalloc(sizeof(*migf
), GFP_KERNEL_ACCOUNT
);
769 return ERR_PTR(-ENOMEM
);
771 migf
->filp
= anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_resume_fops
, migf
,
773 if (IS_ERR(migf
->filp
)) {
774 int err
= PTR_ERR(migf
->filp
);
780 stream_open(migf
->filp
->f_inode
, migf
->filp
);
781 mutex_init(&migf
->lock
);
782 migf
->hisi_acc_vdev
= hisi_acc_vdev
;
786 static long hisi_acc_vf_precopy_ioctl(struct file
*filp
,
787 unsigned int cmd
, unsigned long arg
)
789 struct hisi_acc_vf_migration_file
*migf
= filp
->private_data
;
790 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= migf
->hisi_acc_vdev
;
791 loff_t
*pos
= &filp
->f_pos
;
792 struct vfio_precopy_info info
;
796 if (cmd
!= VFIO_MIG_GET_PRECOPY_INFO
)
799 minsz
= offsetofend(struct vfio_precopy_info
, dirty_bytes
);
801 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
803 if (info
.argsz
< minsz
)
806 mutex_lock(&hisi_acc_vdev
->state_mutex
);
807 if (hisi_acc_vdev
->mig_state
!= VFIO_DEVICE_STATE_PRE_COPY
) {
808 mutex_unlock(&hisi_acc_vdev
->state_mutex
);
812 mutex_lock(&migf
->lock
);
814 if (migf
->disabled
) {
819 if (*pos
> migf
->total_length
) {
824 info
.dirty_bytes
= 0;
825 info
.initial_bytes
= migf
->total_length
- *pos
;
826 mutex_unlock(&migf
->lock
);
827 mutex_unlock(&hisi_acc_vdev
->state_mutex
);
829 return copy_to_user((void __user
*)arg
, &info
, minsz
) ? -EFAULT
: 0;
831 mutex_unlock(&migf
->lock
);
832 mutex_unlock(&hisi_acc_vdev
->state_mutex
);
836 static ssize_t
hisi_acc_vf_save_read(struct file
*filp
, char __user
*buf
, size_t len
,
839 struct hisi_acc_vf_migration_file
*migf
= filp
->private_data
;
847 mutex_lock(&migf
->lock
);
848 if (*pos
> migf
->total_length
) {
853 if (migf
->disabled
) {
858 len
= min_t(size_t, migf
->total_length
- *pos
, len
);
860 u8
*vf_data
= (u8
*)&migf
->vf_data
;
862 ret
= copy_to_user(buf
, vf_data
+ *pos
, len
);
871 mutex_unlock(&migf
->lock
);
875 static const struct file_operations hisi_acc_vf_save_fops
= {
876 .owner
= THIS_MODULE
,
877 .read
= hisi_acc_vf_save_read
,
878 .unlocked_ioctl
= hisi_acc_vf_precopy_ioctl
,
879 .compat_ioctl
= compat_ptr_ioctl
,
880 .release
= hisi_acc_vf_release_file
,
883 static struct hisi_acc_vf_migration_file
*
884 hisi_acc_open_saving_migf(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
886 struct hisi_acc_vf_migration_file
*migf
;
889 migf
= kzalloc(sizeof(*migf
), GFP_KERNEL_ACCOUNT
);
891 return ERR_PTR(-ENOMEM
);
893 migf
->filp
= anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_save_fops
, migf
,
895 if (IS_ERR(migf
->filp
)) {
896 int err
= PTR_ERR(migf
->filp
);
902 stream_open(migf
->filp
->f_inode
, migf
->filp
);
903 mutex_init(&migf
->lock
);
904 migf
->hisi_acc_vdev
= hisi_acc_vdev
;
906 ret
= vf_qm_get_match_data(hisi_acc_vdev
, &migf
->vf_data
);
915 static struct hisi_acc_vf_migration_file
*
916 hisi_acc_vf_pre_copy(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
918 struct hisi_acc_vf_migration_file
*migf
;
920 migf
= hisi_acc_open_saving_migf(hisi_acc_vdev
);
924 migf
->total_length
= QM_MATCH_SIZE
;
928 static struct hisi_acc_vf_migration_file
*
929 hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device
*hisi_acc_vdev
, bool open
)
932 struct hisi_acc_vf_migration_file
*migf
= NULL
;
936 * Userspace didn't use PRECOPY support. Hence saving_migf
939 migf
= hisi_acc_open_saving_migf(hisi_acc_vdev
);
943 migf
= hisi_acc_vdev
->saving_migf
;
946 ret
= vf_qm_state_save(hisi_acc_vdev
, migf
);
950 return open
? migf
: NULL
;
953 static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
955 struct device
*dev
= &hisi_acc_vdev
->vf_dev
->dev
;
956 struct hisi_qm
*vf_qm
= &hisi_acc_vdev
->vf_qm
;
959 ret
= vf_qm_func_stop(vf_qm
);
961 dev_err(dev
, "failed to stop QM VF function!\n");
965 ret
= hisi_acc_check_int_state(hisi_acc_vdev
);
967 dev_err(dev
, "failed to check QM INT state!\n");
974 hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device
*hisi_acc_vdev
,
977 u32 cur
= hisi_acc_vdev
->mig_state
;
980 if (cur
== VFIO_DEVICE_STATE_RUNNING
&& new == VFIO_DEVICE_STATE_PRE_COPY
) {
981 struct hisi_acc_vf_migration_file
*migf
;
983 migf
= hisi_acc_vf_pre_copy(hisi_acc_vdev
);
985 return ERR_CAST(migf
);
986 get_file(migf
->filp
);
987 hisi_acc_vdev
->saving_migf
= migf
;
991 if (cur
== VFIO_DEVICE_STATE_PRE_COPY
&& new == VFIO_DEVICE_STATE_STOP_COPY
) {
992 struct hisi_acc_vf_migration_file
*migf
;
994 ret
= hisi_acc_vf_stop_device(hisi_acc_vdev
);
998 migf
= hisi_acc_vf_stop_copy(hisi_acc_vdev
, false);
1000 return ERR_CAST(migf
);
1005 if (cur
== VFIO_DEVICE_STATE_RUNNING
&& new == VFIO_DEVICE_STATE_STOP
) {
1006 ret
= hisi_acc_vf_stop_device(hisi_acc_vdev
);
1008 return ERR_PTR(ret
);
1012 if (cur
== VFIO_DEVICE_STATE_STOP
&& new == VFIO_DEVICE_STATE_STOP_COPY
) {
1013 struct hisi_acc_vf_migration_file
*migf
;
1015 migf
= hisi_acc_vf_stop_copy(hisi_acc_vdev
, true);
1017 return ERR_CAST(migf
);
1018 get_file(migf
->filp
);
1019 hisi_acc_vdev
->saving_migf
= migf
;
1023 if ((cur
== VFIO_DEVICE_STATE_STOP_COPY
&& new == VFIO_DEVICE_STATE_STOP
)) {
1024 hisi_acc_vf_disable_fds(hisi_acc_vdev
);
1028 if (cur
== VFIO_DEVICE_STATE_STOP
&& new == VFIO_DEVICE_STATE_RESUMING
) {
1029 struct hisi_acc_vf_migration_file
*migf
;
1031 migf
= hisi_acc_vf_pci_resume(hisi_acc_vdev
);
1033 return ERR_CAST(migf
);
1034 get_file(migf
->filp
);
1035 hisi_acc_vdev
->resuming_migf
= migf
;
1039 if (cur
== VFIO_DEVICE_STATE_RESUMING
&& new == VFIO_DEVICE_STATE_STOP
) {
1040 ret
= hisi_acc_vf_load_state(hisi_acc_vdev
);
1042 return ERR_PTR(ret
);
1043 hisi_acc_vf_disable_fds(hisi_acc_vdev
);
1047 if (cur
== VFIO_DEVICE_STATE_PRE_COPY
&& new == VFIO_DEVICE_STATE_RUNNING
) {
1048 hisi_acc_vf_disable_fds(hisi_acc_vdev
);
1052 if (cur
== VFIO_DEVICE_STATE_STOP
&& new == VFIO_DEVICE_STATE_RUNNING
) {
1053 hisi_acc_vf_start_device(hisi_acc_vdev
);
1058 * vfio_mig_get_next_state() does not use arcs other than the above
1061 return ERR_PTR(-EINVAL
);
1064 static struct file
*
1065 hisi_acc_vfio_pci_set_device_state(struct vfio_device
*vdev
,
1066 enum vfio_device_mig_state new_state
)
1068 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= hisi_acc_get_vf_dev(vdev
);
1069 enum vfio_device_mig_state next_state
;
1070 struct file
*res
= NULL
;
1073 mutex_lock(&hisi_acc_vdev
->state_mutex
);
1074 while (new_state
!= hisi_acc_vdev
->mig_state
) {
1075 ret
= vfio_mig_get_next_state(vdev
,
1076 hisi_acc_vdev
->mig_state
,
1077 new_state
, &next_state
);
1079 res
= ERR_PTR(-EINVAL
);
1083 res
= hisi_acc_vf_set_device_state(hisi_acc_vdev
, next_state
);
1086 hisi_acc_vdev
->mig_state
= next_state
;
1087 if (WARN_ON(res
&& new_state
!= hisi_acc_vdev
->mig_state
)) {
1089 res
= ERR_PTR(-EINVAL
);
1093 mutex_unlock(&hisi_acc_vdev
->state_mutex
);
1098 hisi_acc_vfio_pci_get_data_size(struct vfio_device
*vdev
,
1099 unsigned long *stop_copy_length
)
1101 *stop_copy_length
= sizeof(struct acc_vf_data
);
1106 hisi_acc_vfio_pci_get_device_state(struct vfio_device
*vdev
,
1107 enum vfio_device_mig_state
*curr_state
)
1109 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= hisi_acc_get_vf_dev(vdev
);
1111 mutex_lock(&hisi_acc_vdev
->state_mutex
);
1112 *curr_state
= hisi_acc_vdev
->mig_state
;
1113 mutex_unlock(&hisi_acc_vdev
->state_mutex
);
1117 static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev
*pdev
)
1119 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= hisi_acc_drvdata(pdev
);
1121 if (hisi_acc_vdev
->core_device
.vdev
.migration_flags
!=
1122 VFIO_MIGRATION_STOP_COPY
)
1125 mutex_lock(&hisi_acc_vdev
->state_mutex
);
1126 hisi_acc_vf_reset(hisi_acc_vdev
);
1127 mutex_unlock(&hisi_acc_vdev
->state_mutex
);
1130 static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
1132 struct vfio_pci_core_device
*vdev
= &hisi_acc_vdev
->core_device
;
1133 struct hisi_qm
*vf_qm
= &hisi_acc_vdev
->vf_qm
;
1134 struct pci_dev
*vf_dev
= vdev
->pdev
;
1137 * ACC VF dev BAR2 region consists of both functional register space
1138 * and migration control register space. For migration to work, we
1139 * need access to both. Hence, we map the entire BAR2 region here.
1140 * But unnecessarily exposing the migration BAR region to the Guest
1141 * has the potential to prevent/corrupt the Guest migration. Hence,
1142 * we restrict access to the migration control space from
1143 * Guest(Please see mmap/ioctl/read/write override functions).
1145 * Please note that it is OK to expose the entire VF BAR if migration
1146 * is not supported or required as this cannot affect the ACC PF
1149 * Also the HiSilicon ACC VF devices supported by this driver on
1150 * HiSilicon hardware platforms are integrated end point devices
1151 * and the platform lacks the capability to perform any PCIe P2P
1152 * between these devices.
1156 ioremap(pci_resource_start(vf_dev
, VFIO_PCI_BAR2_REGION_INDEX
),
1157 pci_resource_len(vf_dev
, VFIO_PCI_BAR2_REGION_INDEX
));
1158 if (!vf_qm
->io_base
)
1161 vf_qm
->fun_type
= QM_HW_VF
;
1162 vf_qm
->pdev
= vf_dev
;
1163 mutex_init(&vf_qm
->mailbox_lock
);
1168 static struct hisi_qm
*hisi_acc_get_pf_qm(struct pci_dev
*pdev
)
1170 struct hisi_qm
*pf_qm
;
1171 struct pci_driver
*pf_driver
;
1173 if (!pdev
->is_virtfn
)
1176 switch (pdev
->device
) {
1177 case PCI_DEVICE_ID_HUAWEI_SEC_VF
:
1178 pf_driver
= hisi_sec_get_pf_driver();
1180 case PCI_DEVICE_ID_HUAWEI_HPRE_VF
:
1181 pf_driver
= hisi_hpre_get_pf_driver();
1183 case PCI_DEVICE_ID_HUAWEI_ZIP_VF
:
1184 pf_driver
= hisi_zip_get_pf_driver();
1193 pf_qm
= pci_iov_get_pf_drvdata(pdev
, pf_driver
);
1195 return !IS_ERR(pf_qm
) ? pf_qm
: NULL
;
1198 static int hisi_acc_pci_rw_access_check(struct vfio_device
*core_vdev
,
1199 size_t count
, loff_t
*ppos
,
1202 unsigned int index
= VFIO_PCI_OFFSET_TO_INDEX(*ppos
);
1203 struct vfio_pci_core_device
*vdev
=
1204 container_of(core_vdev
, struct vfio_pci_core_device
, vdev
);
1206 if (index
== VFIO_PCI_BAR2_REGION_INDEX
) {
1207 loff_t pos
= *ppos
& VFIO_PCI_OFFSET_MASK
;
1208 resource_size_t end
= pci_resource_len(vdev
->pdev
, index
) / 2;
1210 /* Check if access is for migration control region */
1214 *new_count
= min(count
, (size_t)(end
- pos
));
1220 static int hisi_acc_vfio_pci_mmap(struct vfio_device
*core_vdev
,
1221 struct vm_area_struct
*vma
)
1223 struct vfio_pci_core_device
*vdev
=
1224 container_of(core_vdev
, struct vfio_pci_core_device
, vdev
);
1227 index
= vma
->vm_pgoff
>> (VFIO_PCI_OFFSET_SHIFT
- PAGE_SHIFT
);
1228 if (index
== VFIO_PCI_BAR2_REGION_INDEX
) {
1229 u64 req_len
, pgoff
, req_start
;
1230 resource_size_t end
= pci_resource_len(vdev
->pdev
, index
) / 2;
1232 req_len
= vma
->vm_end
- vma
->vm_start
;
1233 pgoff
= vma
->vm_pgoff
&
1234 ((1U << (VFIO_PCI_OFFSET_SHIFT
- PAGE_SHIFT
)) - 1);
1235 req_start
= pgoff
<< PAGE_SHIFT
;
1237 if (req_start
+ req_len
> end
)
1241 return vfio_pci_core_mmap(core_vdev
, vma
);
1244 static ssize_t
hisi_acc_vfio_pci_write(struct vfio_device
*core_vdev
,
1245 const char __user
*buf
, size_t count
,
1248 size_t new_count
= count
;
1251 ret
= hisi_acc_pci_rw_access_check(core_vdev
, count
, ppos
, &new_count
);
1255 return vfio_pci_core_write(core_vdev
, buf
, new_count
, ppos
);
1258 static ssize_t
hisi_acc_vfio_pci_read(struct vfio_device
*core_vdev
,
1259 char __user
*buf
, size_t count
,
1262 size_t new_count
= count
;
1265 ret
= hisi_acc_pci_rw_access_check(core_vdev
, count
, ppos
, &new_count
);
1269 return vfio_pci_core_read(core_vdev
, buf
, new_count
, ppos
);
1272 static long hisi_acc_vfio_pci_ioctl(struct vfio_device
*core_vdev
, unsigned int cmd
,
1275 if (cmd
== VFIO_DEVICE_GET_REGION_INFO
) {
1276 struct vfio_pci_core_device
*vdev
=
1277 container_of(core_vdev
, struct vfio_pci_core_device
, vdev
);
1278 struct pci_dev
*pdev
= vdev
->pdev
;
1279 struct vfio_region_info info
;
1280 unsigned long minsz
;
1282 minsz
= offsetofend(struct vfio_region_info
, offset
);
1284 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
1287 if (info
.argsz
< minsz
)
1290 if (info
.index
== VFIO_PCI_BAR2_REGION_INDEX
) {
1291 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
1294 * ACC VF dev BAR2 region consists of both functional
1295 * register space and migration control register space.
1296 * Report only the functional region to Guest.
1298 info
.size
= pci_resource_len(pdev
, info
.index
) / 2;
1300 info
.flags
= VFIO_REGION_INFO_FLAG_READ
|
1301 VFIO_REGION_INFO_FLAG_WRITE
|
1302 VFIO_REGION_INFO_FLAG_MMAP
;
1304 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
1308 return vfio_pci_core_ioctl(core_vdev
, cmd
, arg
);
1311 static int hisi_acc_vf_debug_check(struct seq_file
*seq
, struct vfio_device
*vdev
)
1313 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= hisi_acc_get_vf_dev(vdev
);
1314 struct hisi_qm
*vf_qm
= &hisi_acc_vdev
->vf_qm
;
1317 lockdep_assert_held(&hisi_acc_vdev
->open_mutex
);
1319 * When the device is not opened, the io_base is not mapped.
1320 * The driver cannot perform device read and write operations.
1322 if (!hisi_acc_vdev
->dev_opened
) {
1323 seq_puts(seq
, "device not opened!\n");
1327 ret
= qm_wait_dev_not_ready(vf_qm
);
1329 seq_puts(seq
, "VF device not ready!\n");
1336 static int hisi_acc_vf_debug_cmd(struct seq_file
*seq
, void *data
)
1338 struct device
*vf_dev
= seq
->private;
1339 struct vfio_pci_core_device
*core_device
= dev_get_drvdata(vf_dev
);
1340 struct vfio_device
*vdev
= &core_device
->vdev
;
1341 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= hisi_acc_get_vf_dev(vdev
);
1342 struct hisi_qm
*vf_qm
= &hisi_acc_vdev
->vf_qm
;
1346 mutex_lock(&hisi_acc_vdev
->open_mutex
);
1347 ret
= hisi_acc_vf_debug_check(seq
, vdev
);
1349 mutex_unlock(&hisi_acc_vdev
->open_mutex
);
1353 value
= readl(vf_qm
->io_base
+ QM_MB_CMD_SEND_BASE
);
1354 if (value
== QM_MB_CMD_NOT_READY
) {
1355 mutex_unlock(&hisi_acc_vdev
->open_mutex
);
1356 seq_puts(seq
, "mailbox cmd channel not ready!\n");
1359 mutex_unlock(&hisi_acc_vdev
->open_mutex
);
1360 seq_puts(seq
, "mailbox cmd channel ready!\n");
1365 static int hisi_acc_vf_dev_read(struct seq_file
*seq
, void *data
)
1367 struct device
*vf_dev
= seq
->private;
1368 struct vfio_pci_core_device
*core_device
= dev_get_drvdata(vf_dev
);
1369 struct vfio_device
*vdev
= &core_device
->vdev
;
1370 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= hisi_acc_get_vf_dev(vdev
);
1371 size_t vf_data_sz
= offsetofend(struct acc_vf_data
, padding
);
1372 struct acc_vf_data
*vf_data
;
1375 mutex_lock(&hisi_acc_vdev
->open_mutex
);
1376 ret
= hisi_acc_vf_debug_check(seq
, vdev
);
1378 mutex_unlock(&hisi_acc_vdev
->open_mutex
);
1382 mutex_lock(&hisi_acc_vdev
->state_mutex
);
1383 vf_data
= kzalloc(sizeof(*vf_data
), GFP_KERNEL
);
1389 vf_data
->vf_qm_state
= hisi_acc_vdev
->vf_qm_state
;
1390 ret
= vf_qm_read_data(&hisi_acc_vdev
->vf_qm
, vf_data
);
1394 seq_hex_dump(seq
, "Dev Data:", DUMP_PREFIX_OFFSET
, 16, 1,
1395 (const void *)vf_data
, vf_data_sz
, false);
1398 "guest driver load: %u\n"
1400 hisi_acc_vdev
->vf_qm_state
,
1401 sizeof(struct acc_vf_data
));
1406 mutex_unlock(&hisi_acc_vdev
->state_mutex
);
1407 mutex_unlock(&hisi_acc_vdev
->open_mutex
);
1412 static int hisi_acc_vf_migf_read(struct seq_file
*seq
, void *data
)
1414 struct device
*vf_dev
= seq
->private;
1415 struct vfio_pci_core_device
*core_device
= dev_get_drvdata(vf_dev
);
1416 struct vfio_device
*vdev
= &core_device
->vdev
;
1417 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= hisi_acc_get_vf_dev(vdev
);
1418 size_t vf_data_sz
= offsetofend(struct acc_vf_data
, padding
);
1419 struct hisi_acc_vf_migration_file
*debug_migf
= hisi_acc_vdev
->debug_migf
;
1421 /* Check whether the live migration operation has been performed */
1422 if (debug_migf
->total_length
< QM_MATCH_SIZE
) {
1423 seq_puts(seq
, "device not migrated!\n");
1427 seq_hex_dump(seq
, "Mig Data:", DUMP_PREFIX_OFFSET
, 16, 1,
1428 (const void *)&debug_migf
->vf_data
, vf_data_sz
, false);
1429 seq_printf(seq
, "migrate data length: %lu\n", debug_migf
->total_length
);
1434 static int hisi_acc_vfio_pci_open_device(struct vfio_device
*core_vdev
)
1436 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= hisi_acc_get_vf_dev(core_vdev
);
1437 struct vfio_pci_core_device
*vdev
= &hisi_acc_vdev
->core_device
;
1440 ret
= vfio_pci_core_enable(vdev
);
1444 if (core_vdev
->mig_ops
) {
1445 mutex_lock(&hisi_acc_vdev
->open_mutex
);
1446 ret
= hisi_acc_vf_qm_init(hisi_acc_vdev
);
1448 mutex_unlock(&hisi_acc_vdev
->open_mutex
);
1449 vfio_pci_core_disable(vdev
);
1452 hisi_acc_vdev
->mig_state
= VFIO_DEVICE_STATE_RUNNING
;
1453 hisi_acc_vdev
->dev_opened
= true;
1454 mutex_unlock(&hisi_acc_vdev
->open_mutex
);
1457 vfio_pci_core_finish_enable(vdev
);
1461 static void hisi_acc_vfio_pci_close_device(struct vfio_device
*core_vdev
)
1463 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= hisi_acc_get_vf_dev(core_vdev
);
1464 struct hisi_qm
*vf_qm
= &hisi_acc_vdev
->vf_qm
;
1466 mutex_lock(&hisi_acc_vdev
->open_mutex
);
1467 hisi_acc_vdev
->dev_opened
= false;
1468 iounmap(vf_qm
->io_base
);
1469 mutex_unlock(&hisi_acc_vdev
->open_mutex
);
1470 vfio_pci_core_close_device(core_vdev
);
1473 static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops
= {
1474 .migration_set_state
= hisi_acc_vfio_pci_set_device_state
,
1475 .migration_get_state
= hisi_acc_vfio_pci_get_device_state
,
1476 .migration_get_data_size
= hisi_acc_vfio_pci_get_data_size
,
1479 static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device
*core_vdev
)
1481 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= hisi_acc_get_vf_dev(core_vdev
);
1482 struct pci_dev
*pdev
= to_pci_dev(core_vdev
->dev
);
1483 struct hisi_qm
*pf_qm
= hisi_acc_get_pf_qm(pdev
);
1485 hisi_acc_vdev
->vf_id
= pci_iov_vf_id(pdev
) + 1;
1486 hisi_acc_vdev
->pf_qm
= pf_qm
;
1487 hisi_acc_vdev
->vf_dev
= pdev
;
1488 mutex_init(&hisi_acc_vdev
->state_mutex
);
1489 mutex_init(&hisi_acc_vdev
->open_mutex
);
1491 core_vdev
->migration_flags
= VFIO_MIGRATION_STOP_COPY
| VFIO_MIGRATION_PRE_COPY
;
1492 core_vdev
->mig_ops
= &hisi_acc_vfio_pci_migrn_state_ops
;
1494 return vfio_pci_core_init_dev(core_vdev
);
1497 static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops
= {
1498 .name
= "hisi-acc-vfio-pci-migration",
1499 .init
= hisi_acc_vfio_pci_migrn_init_dev
,
1500 .release
= vfio_pci_core_release_dev
,
1501 .open_device
= hisi_acc_vfio_pci_open_device
,
1502 .close_device
= hisi_acc_vfio_pci_close_device
,
1503 .ioctl
= hisi_acc_vfio_pci_ioctl
,
1504 .device_feature
= vfio_pci_core_ioctl_feature
,
1505 .read
= hisi_acc_vfio_pci_read
,
1506 .write
= hisi_acc_vfio_pci_write
,
1507 .mmap
= hisi_acc_vfio_pci_mmap
,
1508 .request
= vfio_pci_core_request
,
1509 .match
= vfio_pci_core_match
,
1510 .bind_iommufd
= vfio_iommufd_physical_bind
,
1511 .unbind_iommufd
= vfio_iommufd_physical_unbind
,
1512 .attach_ioas
= vfio_iommufd_physical_attach_ioas
,
1513 .detach_ioas
= vfio_iommufd_physical_detach_ioas
,
1516 static const struct vfio_device_ops hisi_acc_vfio_pci_ops
= {
1517 .name
= "hisi-acc-vfio-pci",
1518 .init
= vfio_pci_core_init_dev
,
1519 .release
= vfio_pci_core_release_dev
,
1520 .open_device
= hisi_acc_vfio_pci_open_device
,
1521 .close_device
= vfio_pci_core_close_device
,
1522 .ioctl
= vfio_pci_core_ioctl
,
1523 .device_feature
= vfio_pci_core_ioctl_feature
,
1524 .read
= vfio_pci_core_read
,
1525 .write
= vfio_pci_core_write
,
1526 .mmap
= vfio_pci_core_mmap
,
1527 .request
= vfio_pci_core_request
,
1528 .match
= vfio_pci_core_match
,
1529 .bind_iommufd
= vfio_iommufd_physical_bind
,
1530 .unbind_iommufd
= vfio_iommufd_physical_unbind
,
1531 .attach_ioas
= vfio_iommufd_physical_attach_ioas
,
1532 .detach_ioas
= vfio_iommufd_physical_detach_ioas
,
1535 static void hisi_acc_vfio_debug_init(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
1537 struct vfio_device
*vdev
= &hisi_acc_vdev
->core_device
.vdev
;
1538 struct hisi_acc_vf_migration_file
*migf
;
1539 struct dentry
*vfio_dev_migration
;
1540 struct dentry
*vfio_hisi_acc
;
1541 struct device
*dev
= vdev
->dev
;
1543 if (!debugfs_initialized() ||
1544 !IS_ENABLED(CONFIG_VFIO_DEBUGFS
))
1547 if (vdev
->ops
!= &hisi_acc_vfio_pci_migrn_ops
)
1550 vfio_dev_migration
= debugfs_lookup("migration", vdev
->debug_root
);
1551 if (!vfio_dev_migration
) {
1552 dev_err(dev
, "failed to lookup migration debugfs file!\n");
1556 migf
= kzalloc(sizeof(*migf
), GFP_KERNEL
);
1559 hisi_acc_vdev
->debug_migf
= migf
;
1561 vfio_hisi_acc
= debugfs_create_dir("hisi_acc", vfio_dev_migration
);
1562 debugfs_create_devm_seqfile(dev
, "dev_data", vfio_hisi_acc
,
1563 hisi_acc_vf_dev_read
);
1564 debugfs_create_devm_seqfile(dev
, "migf_data", vfio_hisi_acc
,
1565 hisi_acc_vf_migf_read
);
1566 debugfs_create_devm_seqfile(dev
, "cmd_state", vfio_hisi_acc
,
1567 hisi_acc_vf_debug_cmd
);
1570 static void hisi_acc_vf_debugfs_exit(struct hisi_acc_vf_core_device
*hisi_acc_vdev
)
1572 kfree(hisi_acc_vdev
->debug_migf
);
1573 hisi_acc_vdev
->debug_migf
= NULL
;
1576 static int hisi_acc_vfio_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1578 struct hisi_acc_vf_core_device
*hisi_acc_vdev
;
1579 const struct vfio_device_ops
*ops
= &hisi_acc_vfio_pci_ops
;
1580 struct hisi_qm
*pf_qm
;
1584 pf_qm
= hisi_acc_get_pf_qm(pdev
);
1585 if (pf_qm
&& pf_qm
->ver
>= QM_HW_V3
) {
1586 vf_id
= pci_iov_vf_id(pdev
);
1588 ops
= &hisi_acc_vfio_pci_migrn_ops
;
1590 pci_warn(pdev
, "migration support failed, continue with generic interface\n");
1593 hisi_acc_vdev
= vfio_alloc_device(hisi_acc_vf_core_device
,
1594 core_device
.vdev
, &pdev
->dev
, ops
);
1595 if (IS_ERR(hisi_acc_vdev
))
1596 return PTR_ERR(hisi_acc_vdev
);
1598 dev_set_drvdata(&pdev
->dev
, &hisi_acc_vdev
->core_device
);
1599 ret
= vfio_pci_core_register_device(&hisi_acc_vdev
->core_device
);
1603 hisi_acc_vfio_debug_init(hisi_acc_vdev
);
1607 vfio_put_device(&hisi_acc_vdev
->core_device
.vdev
);
1611 static void hisi_acc_vfio_pci_remove(struct pci_dev
*pdev
)
1613 struct hisi_acc_vf_core_device
*hisi_acc_vdev
= hisi_acc_drvdata(pdev
);
1615 vfio_pci_core_unregister_device(&hisi_acc_vdev
->core_device
);
1616 hisi_acc_vf_debugfs_exit(hisi_acc_vdev
);
1617 vfio_put_device(&hisi_acc_vdev
->core_device
.vdev
);
1620 static const struct pci_device_id hisi_acc_vfio_pci_table
[] = {
1621 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI
, PCI_DEVICE_ID_HUAWEI_SEC_VF
) },
1622 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI
, PCI_DEVICE_ID_HUAWEI_HPRE_VF
) },
1623 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI
, PCI_DEVICE_ID_HUAWEI_ZIP_VF
) },
1627 MODULE_DEVICE_TABLE(pci
, hisi_acc_vfio_pci_table
);
1629 static const struct pci_error_handlers hisi_acc_vf_err_handlers
= {
1630 .reset_done
= hisi_acc_vf_pci_aer_reset_done
,
1631 .error_detected
= vfio_pci_core_aer_err_detected
,
1634 static struct pci_driver hisi_acc_vfio_pci_driver
= {
1635 .name
= KBUILD_MODNAME
,
1636 .id_table
= hisi_acc_vfio_pci_table
,
1637 .probe
= hisi_acc_vfio_pci_probe
,
1638 .remove
= hisi_acc_vfio_pci_remove
,
1639 .err_handler
= &hisi_acc_vf_err_handlers
,
1640 .driver_managed_dma
= true,
1643 module_pci_driver(hisi_acc_vfio_pci_driver
);
1645 MODULE_LICENSE("GPL v2");
1646 MODULE_AUTHOR("Liu Longfang <liulongfang@huawei.com>");
1647 MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
1648 MODULE_DESCRIPTION("HiSilicon VFIO PCI - VFIO PCI driver with live migration support for HiSilicon ACC device family");