drm/rockchip: Don't change hdmi reference clock rate
[drm/drm-misc.git] / drivers / vfio / pci / hisilicon / hisi_acc_vfio_pci.c
blob451c639299eb3bdc49d4c93322512cb6fcf87f49
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021, HiSilicon Ltd.
4 */
6 #include <linux/device.h>
7 #include <linux/eventfd.h>
8 #include <linux/file.h>
9 #include <linux/hisi_acc_qm.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/vfio.h>
14 #include <linux/vfio_pci_core.h>
15 #include <linux/anon_inodes.h>
17 #include "hisi_acc_vfio_pci.h"
19 /* Return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */
20 static int qm_wait_dev_not_ready(struct hisi_qm *qm)
22 u32 val;
24 return readl_relaxed_poll_timeout(qm->io_base + QM_VF_STATE,
25 val, !(val & 0x1), MB_POLL_PERIOD_US,
26 MB_POLL_TIMEOUT_US);
30 * Each state Reg is checked 100 times,
31 * with a delay of 100 microseconds after each check
33 static u32 qm_check_reg_state(struct hisi_qm *qm, u32 regs)
35 int check_times = 0;
36 u32 state;
38 state = readl(qm->io_base + regs);
39 while (state && check_times < ERROR_CHECK_TIMEOUT) {
40 udelay(CHECK_DELAY_TIME);
41 state = readl(qm->io_base + regs);
42 check_times++;
45 return state;
48 static int qm_read_regs(struct hisi_qm *qm, u32 reg_addr,
49 u32 *data, u8 nums)
51 int i;
53 if (nums < 1 || nums > QM_REGS_MAX_LEN)
54 return -EINVAL;
56 for (i = 0; i < nums; i++) {
57 data[i] = readl(qm->io_base + reg_addr);
58 reg_addr += QM_REG_ADDR_OFFSET;
61 return 0;
64 static int qm_write_regs(struct hisi_qm *qm, u32 reg,
65 u32 *data, u8 nums)
67 int i;
69 if (nums < 1 || nums > QM_REGS_MAX_LEN)
70 return -EINVAL;
72 for (i = 0; i < nums; i++)
73 writel(data[i], qm->io_base + reg + i * QM_REG_ADDR_OFFSET);
75 return 0;
78 static int qm_get_vft(struct hisi_qm *qm, u32 *base)
80 u64 sqc_vft;
81 u32 qp_num;
82 int ret;
84 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
85 if (ret)
86 return ret;
88 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
89 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
90 QM_XQC_ADDR_OFFSET);
91 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
92 qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
93 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
95 return qp_num;
98 static int qm_get_sqc(struct hisi_qm *qm, u64 *addr)
100 int ret;
102 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, 0, 0, 1);
103 if (ret)
104 return ret;
106 *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
107 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
108 QM_XQC_ADDR_OFFSET);
110 return 0;
113 static int qm_get_cqc(struct hisi_qm *qm, u64 *addr)
115 int ret;
117 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, 0, 0, 1);
118 if (ret)
119 return ret;
121 *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
122 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
123 QM_XQC_ADDR_OFFSET);
125 return 0;
128 static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
130 struct device *dev = &qm->pdev->dev;
131 int ret;
133 ret = qm_read_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
134 if (ret) {
135 dev_err(dev, "failed to read QM_VF_AEQ_INT_MASK\n");
136 return ret;
139 ret = qm_read_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
140 if (ret) {
141 dev_err(dev, "failed to read QM_VF_EQ_INT_MASK\n");
142 return ret;
145 ret = qm_read_regs(qm, QM_IFC_INT_SOURCE_V,
146 &vf_data->ifc_int_source, 1);
147 if (ret) {
148 dev_err(dev, "failed to read QM_IFC_INT_SOURCE_V\n");
149 return ret;
152 ret = qm_read_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
153 if (ret) {
154 dev_err(dev, "failed to read QM_IFC_INT_MASK\n");
155 return ret;
158 ret = qm_read_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
159 if (ret) {
160 dev_err(dev, "failed to read QM_IFC_INT_SET_V\n");
161 return ret;
164 ret = qm_read_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
165 if (ret) {
166 dev_err(dev, "failed to read QM_PAGE_SIZE\n");
167 return ret;
170 /* QM_EQC_DW has 7 regs */
171 ret = qm_read_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
172 if (ret) {
173 dev_err(dev, "failed to read QM_EQC_DW\n");
174 return ret;
177 /* QM_AEQC_DW has 7 regs */
178 ret = qm_read_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
179 if (ret) {
180 dev_err(dev, "failed to read QM_AEQC_DW\n");
181 return ret;
184 return 0;
187 static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
189 struct device *dev = &qm->pdev->dev;
190 int ret;
192 /* Check VF state */
193 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
194 dev_err(&qm->pdev->dev, "QM device is not ready to write\n");
195 return -EBUSY;
198 ret = qm_write_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
199 if (ret) {
200 dev_err(dev, "failed to write QM_VF_AEQ_INT_MASK\n");
201 return ret;
204 ret = qm_write_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
205 if (ret) {
206 dev_err(dev, "failed to write QM_VF_EQ_INT_MASK\n");
207 return ret;
210 ret = qm_write_regs(qm, QM_IFC_INT_SOURCE_V,
211 &vf_data->ifc_int_source, 1);
212 if (ret) {
213 dev_err(dev, "failed to write QM_IFC_INT_SOURCE_V\n");
214 return ret;
217 ret = qm_write_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
218 if (ret) {
219 dev_err(dev, "failed to write QM_IFC_INT_MASK\n");
220 return ret;
223 ret = qm_write_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
224 if (ret) {
225 dev_err(dev, "failed to write QM_IFC_INT_SET_V\n");
226 return ret;
229 ret = qm_write_regs(qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
230 if (ret) {
231 dev_err(dev, "failed to write QM_QUE_ISO_CFG_V\n");
232 return ret;
235 ret = qm_write_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
236 if (ret) {
237 dev_err(dev, "failed to write QM_PAGE_SIZE\n");
238 return ret;
241 /* QM_EQC_DW has 7 regs */
242 ret = qm_write_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
243 if (ret) {
244 dev_err(dev, "failed to write QM_EQC_DW\n");
245 return ret;
248 /* QM_AEQC_DW has 7 regs */
249 ret = qm_write_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
250 if (ret) {
251 dev_err(dev, "failed to write QM_AEQC_DW\n");
252 return ret;
255 return 0;
258 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd,
259 u16 index, u8 priority)
261 u64 doorbell;
262 u64 dbase;
263 u16 randata = 0;
265 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
266 dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
267 else
268 dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
270 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
271 ((u64)randata << QM_DB_RAND_SHIFT_V2) |
272 ((u64)index << QM_DB_INDEX_SHIFT_V2) |
273 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
275 writeq(doorbell, qm->io_base + dbase);
278 static int pf_qm_get_qp_num(struct hisi_qm *qm, int vf_id, u32 *rbase)
280 unsigned int val;
281 u64 sqc_vft;
282 u32 qp_num;
283 int ret;
285 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
286 val & BIT(0), MB_POLL_PERIOD_US,
287 MB_POLL_TIMEOUT_US);
288 if (ret)
289 return ret;
291 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
292 /* 0 mean SQC VFT */
293 writel(0x0, qm->io_base + QM_VFT_CFG_TYPE);
294 writel(vf_id, qm->io_base + QM_VFT_CFG);
296 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
297 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
299 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
300 val & BIT(0), MB_POLL_PERIOD_US,
301 MB_POLL_TIMEOUT_US);
302 if (ret)
303 return ret;
305 sqc_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
306 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) <<
307 QM_XQC_ADDR_OFFSET);
308 *rbase = QM_SQC_VFT_BASE_MASK_V2 &
309 (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
310 qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
311 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
313 return qp_num;
316 static void qm_dev_cmd_init(struct hisi_qm *qm)
318 /* Clear VF communication status registers. */
319 writel(0x1, qm->io_base + QM_IFC_INT_SOURCE_V);
321 /* Enable pf and vf communication. */
322 writel(0x0, qm->io_base + QM_IFC_INT_MASK);
325 static int vf_qm_cache_wb(struct hisi_qm *qm)
327 unsigned int val;
329 writel(0x1, qm->io_base + QM_CACHE_WB_START);
330 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
331 val, val & BIT(0), MB_POLL_PERIOD_US,
332 MB_POLL_TIMEOUT_US)) {
333 dev_err(&qm->pdev->dev, "vf QM writeback sqc cache fail\n");
334 return -EINVAL;
337 return 0;
340 static void vf_qm_fun_reset(struct hisi_qm *qm)
342 int i;
344 for (i = 0; i < qm->qp_num; i++)
345 qm_db(qm, i, QM_DOORBELL_CMD_SQ, 0, 1);
348 static int vf_qm_func_stop(struct hisi_qm *qm)
350 return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0);
353 static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
354 struct hisi_acc_vf_migration_file *migf)
356 struct acc_vf_data *vf_data = &migf->vf_data;
357 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
358 struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
359 struct device *dev = &vf_qm->pdev->dev;
360 u32 que_iso_state;
361 int ret;
363 if (migf->total_length < QM_MATCH_SIZE || hisi_acc_vdev->match_done)
364 return 0;
366 if (vf_data->acc_magic != ACC_DEV_MAGIC) {
367 dev_err(dev, "failed to match ACC_DEV_MAGIC\n");
368 return -EINVAL;
371 if (vf_data->dev_id != hisi_acc_vdev->vf_dev->device) {
372 dev_err(dev, "failed to match VF devices\n");
373 return -EINVAL;
376 /* VF qp num check */
377 ret = qm_get_vft(vf_qm, &vf_qm->qp_base);
378 if (ret <= 0) {
379 dev_err(dev, "failed to get vft qp nums\n");
380 return -EINVAL;
383 if (ret != vf_data->qp_num) {
384 dev_err(dev, "failed to match VF qp num\n");
385 return -EINVAL;
388 vf_qm->qp_num = ret;
390 /* VF isolation state check */
391 ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &que_iso_state, 1);
392 if (ret) {
393 dev_err(dev, "failed to read QM_QUE_ISO_CFG_V\n");
394 return ret;
397 if (vf_data->que_iso_cfg != que_iso_state) {
398 dev_err(dev, "failed to match isolation state\n");
399 return -EINVAL;
402 ret = qm_write_regs(vf_qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
403 if (ret) {
404 dev_err(dev, "failed to write QM_VF_STATE\n");
405 return ret;
408 hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
409 hisi_acc_vdev->match_done = true;
410 return 0;
413 static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
414 struct acc_vf_data *vf_data)
416 struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
417 struct device *dev = &pf_qm->pdev->dev;
418 int vf_id = hisi_acc_vdev->vf_id;
419 int ret;
421 vf_data->acc_magic = ACC_DEV_MAGIC;
422 /* Save device id */
423 vf_data->dev_id = hisi_acc_vdev->vf_dev->device;
425 /* VF qp num save from PF */
426 ret = pf_qm_get_qp_num(pf_qm, vf_id, &vf_data->qp_base);
427 if (ret <= 0) {
428 dev_err(dev, "failed to get vft qp nums!\n");
429 return -EINVAL;
432 vf_data->qp_num = ret;
434 /* VF isolation state save from PF */
435 ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
436 if (ret) {
437 dev_err(dev, "failed to read QM_QUE_ISO_CFG_V!\n");
438 return ret;
441 return 0;
444 static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
445 struct hisi_acc_vf_migration_file *migf)
447 struct hisi_qm *qm = &hisi_acc_vdev->vf_qm;
448 struct device *dev = &qm->pdev->dev;
449 struct acc_vf_data *vf_data = &migf->vf_data;
450 int ret;
452 /* Return if only match data was transferred */
453 if (migf->total_length == QM_MATCH_SIZE)
454 return 0;
456 if (migf->total_length < sizeof(struct acc_vf_data))
457 return -EINVAL;
459 qm->eqe_dma = vf_data->eqe_dma;
460 qm->aeqe_dma = vf_data->aeqe_dma;
461 qm->sqc_dma = vf_data->sqc_dma;
462 qm->cqc_dma = vf_data->cqc_dma;
464 qm->qp_base = vf_data->qp_base;
465 qm->qp_num = vf_data->qp_num;
467 ret = qm_set_regs(qm, vf_data);
468 if (ret) {
469 dev_err(dev, "set VF regs failed\n");
470 return ret;
473 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
474 if (ret) {
475 dev_err(dev, "set sqc failed\n");
476 return ret;
479 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
480 if (ret) {
481 dev_err(dev, "set cqc failed\n");
482 return ret;
485 qm_dev_cmd_init(qm);
486 return 0;
489 static int vf_qm_read_data(struct hisi_qm *vf_qm, struct acc_vf_data *vf_data)
491 struct device *dev = &vf_qm->pdev->dev;
492 int ret;
494 ret = qm_get_regs(vf_qm, vf_data);
495 if (ret)
496 return -EINVAL;
498 /* Every reg is 32 bit, the dma address is 64 bit. */
499 vf_data->eqe_dma = vf_data->qm_eqc_dw[1];
500 vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
501 vf_data->eqe_dma |= vf_data->qm_eqc_dw[0];
502 vf_data->aeqe_dma = vf_data->qm_aeqc_dw[1];
503 vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
504 vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[0];
506 /* Through SQC_BT/CQC_BT to get sqc and cqc address */
507 ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma);
508 if (ret) {
509 dev_err(dev, "failed to read SQC addr!\n");
510 return -EINVAL;
513 ret = qm_get_cqc(vf_qm, &vf_data->cqc_dma);
514 if (ret) {
515 dev_err(dev, "failed to read CQC addr!\n");
516 return -EINVAL;
519 return 0;
522 static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
523 struct hisi_acc_vf_migration_file *migf)
525 struct acc_vf_data *vf_data = &migf->vf_data;
526 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
527 struct device *dev = &vf_qm->pdev->dev;
528 int ret;
530 if (unlikely(qm_wait_dev_not_ready(vf_qm))) {
531 /* Update state and return with match data */
532 vf_data->vf_qm_state = QM_NOT_READY;
533 hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
534 migf->total_length = QM_MATCH_SIZE;
535 return 0;
538 vf_data->vf_qm_state = QM_READY;
539 hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
541 ret = vf_qm_cache_wb(vf_qm);
542 if (ret) {
543 dev_err(dev, "failed to writeback QM Cache!\n");
544 return ret;
547 ret = vf_qm_read_data(vf_qm, vf_data);
548 if (ret)
549 return -EINVAL;
551 migf->total_length = sizeof(struct acc_vf_data);
552 return 0;
555 static struct hisi_acc_vf_core_device *hisi_acc_drvdata(struct pci_dev *pdev)
557 struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
559 return container_of(core_device, struct hisi_acc_vf_core_device,
560 core_device);
563 /* Check the PF's RAS state and Function INT state */
564 static int
565 hisi_acc_check_int_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
567 struct hisi_qm *vfqm = &hisi_acc_vdev->vf_qm;
568 struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
569 struct pci_dev *vf_pdev = hisi_acc_vdev->vf_dev;
570 struct device *dev = &qm->pdev->dev;
571 u32 state;
573 /* Check RAS state */
574 state = qm_check_reg_state(qm, QM_ABNORMAL_INT_STATUS);
575 if (state) {
576 dev_err(dev, "failed to check QM RAS state!\n");
577 return -EBUSY;
580 /* Check Function Communication state between PF and VF */
581 state = qm_check_reg_state(vfqm, QM_IFC_INT_STATUS);
582 if (state) {
583 dev_err(dev, "failed to check QM IFC INT state!\n");
584 return -EBUSY;
586 state = qm_check_reg_state(vfqm, QM_IFC_INT_SET_V);
587 if (state) {
588 dev_err(dev, "failed to check QM IFC INT SET state!\n");
589 return -EBUSY;
592 /* Check submodule task state */
593 switch (vf_pdev->device) {
594 case PCI_DEVICE_ID_HUAWEI_SEC_VF:
595 state = qm_check_reg_state(qm, SEC_CORE_INT_STATUS);
596 if (state) {
597 dev_err(dev, "failed to check QM SEC Core INT state!\n");
598 return -EBUSY;
600 return 0;
601 case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
602 state = qm_check_reg_state(qm, HPRE_HAC_INT_STATUS);
603 if (state) {
604 dev_err(dev, "failed to check QM HPRE HAC INT state!\n");
605 return -EBUSY;
607 return 0;
608 case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
609 state = qm_check_reg_state(qm, HZIP_CORE_INT_STATUS);
610 if (state) {
611 dev_err(dev, "failed to check QM ZIP Core INT state!\n");
612 return -EBUSY;
614 return 0;
615 default:
616 dev_err(dev, "failed to detect acc module type!\n");
617 return -EINVAL;
621 static void hisi_acc_vf_disable_fd(struct hisi_acc_vf_migration_file *migf)
623 mutex_lock(&migf->lock);
624 migf->disabled = true;
625 migf->total_length = 0;
626 migf->filp->f_pos = 0;
627 mutex_unlock(&migf->lock);
630 static void
631 hisi_acc_debug_migf_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev,
632 struct hisi_acc_vf_migration_file *src_migf)
634 struct hisi_acc_vf_migration_file *dst_migf = hisi_acc_vdev->debug_migf;
636 if (!dst_migf)
637 return;
639 dst_migf->total_length = src_migf->total_length;
640 memcpy(&dst_migf->vf_data, &src_migf->vf_data,
641 sizeof(struct acc_vf_data));
644 static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device *hisi_acc_vdev)
646 if (hisi_acc_vdev->resuming_migf) {
647 hisi_acc_debug_migf_copy(hisi_acc_vdev, hisi_acc_vdev->resuming_migf);
648 hisi_acc_vf_disable_fd(hisi_acc_vdev->resuming_migf);
649 fput(hisi_acc_vdev->resuming_migf->filp);
650 hisi_acc_vdev->resuming_migf = NULL;
653 if (hisi_acc_vdev->saving_migf) {
654 hisi_acc_debug_migf_copy(hisi_acc_vdev, hisi_acc_vdev->saving_migf);
655 hisi_acc_vf_disable_fd(hisi_acc_vdev->saving_migf);
656 fput(hisi_acc_vdev->saving_migf->filp);
657 hisi_acc_vdev->saving_migf = NULL;
661 static struct hisi_acc_vf_core_device *hisi_acc_get_vf_dev(struct vfio_device *vdev)
663 return container_of(vdev, struct hisi_acc_vf_core_device,
664 core_device.vdev);
667 static void hisi_acc_vf_reset(struct hisi_acc_vf_core_device *hisi_acc_vdev)
669 hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
670 hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
671 hisi_acc_vf_disable_fds(hisi_acc_vdev);
674 static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
676 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
678 if (hisi_acc_vdev->vf_qm_state != QM_READY)
679 return;
681 /* Make sure the device is enabled */
682 qm_dev_cmd_init(vf_qm);
684 vf_qm_fun_reset(vf_qm);
687 static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
689 struct device *dev = &hisi_acc_vdev->vf_dev->dev;
690 struct hisi_acc_vf_migration_file *migf = hisi_acc_vdev->resuming_migf;
691 int ret;
693 /* Recover data to VF */
694 ret = vf_qm_load_data(hisi_acc_vdev, migf);
695 if (ret) {
696 dev_err(dev, "failed to recover the VF!\n");
697 return ret;
700 return 0;
703 static int hisi_acc_vf_release_file(struct inode *inode, struct file *filp)
705 struct hisi_acc_vf_migration_file *migf = filp->private_data;
707 hisi_acc_vf_disable_fd(migf);
708 mutex_destroy(&migf->lock);
709 kfree(migf);
710 return 0;
713 static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *buf,
714 size_t len, loff_t *pos)
716 struct hisi_acc_vf_migration_file *migf = filp->private_data;
717 u8 *vf_data = (u8 *)&migf->vf_data;
718 loff_t requested_length;
719 ssize_t done = 0;
720 int ret;
722 if (pos)
723 return -ESPIPE;
724 pos = &filp->f_pos;
726 if (*pos < 0 ||
727 check_add_overflow((loff_t)len, *pos, &requested_length))
728 return -EINVAL;
730 if (requested_length > sizeof(struct acc_vf_data))
731 return -ENOMEM;
733 mutex_lock(&migf->lock);
734 if (migf->disabled) {
735 done = -ENODEV;
736 goto out_unlock;
739 ret = copy_from_user(vf_data + *pos, buf, len);
740 if (ret) {
741 done = -EFAULT;
742 goto out_unlock;
744 *pos += len;
745 done = len;
746 migf->total_length += len;
748 ret = vf_qm_check_match(migf->hisi_acc_vdev, migf);
749 if (ret)
750 done = -EFAULT;
751 out_unlock:
752 mutex_unlock(&migf->lock);
753 return done;
756 static const struct file_operations hisi_acc_vf_resume_fops = {
757 .owner = THIS_MODULE,
758 .write = hisi_acc_vf_resume_write,
759 .release = hisi_acc_vf_release_file,
762 static struct hisi_acc_vf_migration_file *
763 hisi_acc_vf_pci_resume(struct hisi_acc_vf_core_device *hisi_acc_vdev)
765 struct hisi_acc_vf_migration_file *migf;
767 migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
768 if (!migf)
769 return ERR_PTR(-ENOMEM);
771 migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_resume_fops, migf,
772 O_WRONLY);
773 if (IS_ERR(migf->filp)) {
774 int err = PTR_ERR(migf->filp);
776 kfree(migf);
777 return ERR_PTR(err);
780 stream_open(migf->filp->f_inode, migf->filp);
781 mutex_init(&migf->lock);
782 migf->hisi_acc_vdev = hisi_acc_vdev;
783 return migf;
786 static long hisi_acc_vf_precopy_ioctl(struct file *filp,
787 unsigned int cmd, unsigned long arg)
789 struct hisi_acc_vf_migration_file *migf = filp->private_data;
790 struct hisi_acc_vf_core_device *hisi_acc_vdev = migf->hisi_acc_vdev;
791 loff_t *pos = &filp->f_pos;
792 struct vfio_precopy_info info;
793 unsigned long minsz;
794 int ret;
796 if (cmd != VFIO_MIG_GET_PRECOPY_INFO)
797 return -ENOTTY;
799 minsz = offsetofend(struct vfio_precopy_info, dirty_bytes);
801 if (copy_from_user(&info, (void __user *)arg, minsz))
802 return -EFAULT;
803 if (info.argsz < minsz)
804 return -EINVAL;
806 mutex_lock(&hisi_acc_vdev->state_mutex);
807 if (hisi_acc_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY) {
808 mutex_unlock(&hisi_acc_vdev->state_mutex);
809 return -EINVAL;
812 mutex_lock(&migf->lock);
814 if (migf->disabled) {
815 ret = -ENODEV;
816 goto out;
819 if (*pos > migf->total_length) {
820 ret = -EINVAL;
821 goto out;
824 info.dirty_bytes = 0;
825 info.initial_bytes = migf->total_length - *pos;
826 mutex_unlock(&migf->lock);
827 mutex_unlock(&hisi_acc_vdev->state_mutex);
829 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
830 out:
831 mutex_unlock(&migf->lock);
832 mutex_unlock(&hisi_acc_vdev->state_mutex);
833 return ret;
836 static ssize_t hisi_acc_vf_save_read(struct file *filp, char __user *buf, size_t len,
837 loff_t *pos)
839 struct hisi_acc_vf_migration_file *migf = filp->private_data;
840 ssize_t done = 0;
841 int ret;
843 if (pos)
844 return -ESPIPE;
845 pos = &filp->f_pos;
847 mutex_lock(&migf->lock);
848 if (*pos > migf->total_length) {
849 done = -EINVAL;
850 goto out_unlock;
853 if (migf->disabled) {
854 done = -ENODEV;
855 goto out_unlock;
858 len = min_t(size_t, migf->total_length - *pos, len);
859 if (len) {
860 u8 *vf_data = (u8 *)&migf->vf_data;
862 ret = copy_to_user(buf, vf_data + *pos, len);
863 if (ret) {
864 done = -EFAULT;
865 goto out_unlock;
867 *pos += len;
868 done = len;
870 out_unlock:
871 mutex_unlock(&migf->lock);
872 return done;
875 static const struct file_operations hisi_acc_vf_save_fops = {
876 .owner = THIS_MODULE,
877 .read = hisi_acc_vf_save_read,
878 .unlocked_ioctl = hisi_acc_vf_precopy_ioctl,
879 .compat_ioctl = compat_ptr_ioctl,
880 .release = hisi_acc_vf_release_file,
883 static struct hisi_acc_vf_migration_file *
884 hisi_acc_open_saving_migf(struct hisi_acc_vf_core_device *hisi_acc_vdev)
886 struct hisi_acc_vf_migration_file *migf;
887 int ret;
889 migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
890 if (!migf)
891 return ERR_PTR(-ENOMEM);
893 migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_save_fops, migf,
894 O_RDONLY);
895 if (IS_ERR(migf->filp)) {
896 int err = PTR_ERR(migf->filp);
898 kfree(migf);
899 return ERR_PTR(err);
902 stream_open(migf->filp->f_inode, migf->filp);
903 mutex_init(&migf->lock);
904 migf->hisi_acc_vdev = hisi_acc_vdev;
906 ret = vf_qm_get_match_data(hisi_acc_vdev, &migf->vf_data);
907 if (ret) {
908 fput(migf->filp);
909 return ERR_PTR(ret);
912 return migf;
915 static struct hisi_acc_vf_migration_file *
916 hisi_acc_vf_pre_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev)
918 struct hisi_acc_vf_migration_file *migf;
920 migf = hisi_acc_open_saving_migf(hisi_acc_vdev);
921 if (IS_ERR(migf))
922 return migf;
924 migf->total_length = QM_MATCH_SIZE;
925 return migf;
928 static struct hisi_acc_vf_migration_file *
929 hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev, bool open)
931 int ret;
932 struct hisi_acc_vf_migration_file *migf = NULL;
934 if (open) {
936 * Userspace didn't use PRECOPY support. Hence saving_migf
937 * is not opened yet.
939 migf = hisi_acc_open_saving_migf(hisi_acc_vdev);
940 if (IS_ERR(migf))
941 return migf;
942 } else {
943 migf = hisi_acc_vdev->saving_migf;
946 ret = vf_qm_state_save(hisi_acc_vdev, migf);
947 if (ret)
948 return ERR_PTR(ret);
950 return open ? migf : NULL;
953 static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
955 struct device *dev = &hisi_acc_vdev->vf_dev->dev;
956 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
957 int ret;
959 ret = vf_qm_func_stop(vf_qm);
960 if (ret) {
961 dev_err(dev, "failed to stop QM VF function!\n");
962 return ret;
965 ret = hisi_acc_check_int_state(hisi_acc_vdev);
966 if (ret) {
967 dev_err(dev, "failed to check QM INT state!\n");
968 return ret;
970 return 0;
973 static struct file *
974 hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device *hisi_acc_vdev,
975 u32 new)
977 u32 cur = hisi_acc_vdev->mig_state;
978 int ret;
980 if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) {
981 struct hisi_acc_vf_migration_file *migf;
983 migf = hisi_acc_vf_pre_copy(hisi_acc_vdev);
984 if (IS_ERR(migf))
985 return ERR_CAST(migf);
986 get_file(migf->filp);
987 hisi_acc_vdev->saving_migf = migf;
988 return migf->filp;
991 if (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_STOP_COPY) {
992 struct hisi_acc_vf_migration_file *migf;
994 ret = hisi_acc_vf_stop_device(hisi_acc_vdev);
995 if (ret)
996 return ERR_PTR(ret);
998 migf = hisi_acc_vf_stop_copy(hisi_acc_vdev, false);
999 if (IS_ERR(migf))
1000 return ERR_CAST(migf);
1002 return NULL;
1005 if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_STOP) {
1006 ret = hisi_acc_vf_stop_device(hisi_acc_vdev);
1007 if (ret)
1008 return ERR_PTR(ret);
1009 return NULL;
1012 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
1013 struct hisi_acc_vf_migration_file *migf;
1015 migf = hisi_acc_vf_stop_copy(hisi_acc_vdev, true);
1016 if (IS_ERR(migf))
1017 return ERR_CAST(migf);
1018 get_file(migf->filp);
1019 hisi_acc_vdev->saving_migf = migf;
1020 return migf->filp;
1023 if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP)) {
1024 hisi_acc_vf_disable_fds(hisi_acc_vdev);
1025 return NULL;
1028 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
1029 struct hisi_acc_vf_migration_file *migf;
1031 migf = hisi_acc_vf_pci_resume(hisi_acc_vdev);
1032 if (IS_ERR(migf))
1033 return ERR_CAST(migf);
1034 get_file(migf->filp);
1035 hisi_acc_vdev->resuming_migf = migf;
1036 return migf->filp;
1039 if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
1040 ret = hisi_acc_vf_load_state(hisi_acc_vdev);
1041 if (ret)
1042 return ERR_PTR(ret);
1043 hisi_acc_vf_disable_fds(hisi_acc_vdev);
1044 return NULL;
1047 if (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) {
1048 hisi_acc_vf_disable_fds(hisi_acc_vdev);
1049 return NULL;
1052 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING) {
1053 hisi_acc_vf_start_device(hisi_acc_vdev);
1054 return NULL;
1058 * vfio_mig_get_next_state() does not use arcs other than the above
1060 WARN_ON(true);
1061 return ERR_PTR(-EINVAL);
1064 static struct file *
1065 hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev,
1066 enum vfio_device_mig_state new_state)
1068 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev);
1069 enum vfio_device_mig_state next_state;
1070 struct file *res = NULL;
1071 int ret;
1073 mutex_lock(&hisi_acc_vdev->state_mutex);
1074 while (new_state != hisi_acc_vdev->mig_state) {
1075 ret = vfio_mig_get_next_state(vdev,
1076 hisi_acc_vdev->mig_state,
1077 new_state, &next_state);
1078 if (ret) {
1079 res = ERR_PTR(-EINVAL);
1080 break;
1083 res = hisi_acc_vf_set_device_state(hisi_acc_vdev, next_state);
1084 if (IS_ERR(res))
1085 break;
1086 hisi_acc_vdev->mig_state = next_state;
1087 if (WARN_ON(res && new_state != hisi_acc_vdev->mig_state)) {
1088 fput(res);
1089 res = ERR_PTR(-EINVAL);
1090 break;
1093 mutex_unlock(&hisi_acc_vdev->state_mutex);
1094 return res;
1097 static int
1098 hisi_acc_vfio_pci_get_data_size(struct vfio_device *vdev,
1099 unsigned long *stop_copy_length)
1101 *stop_copy_length = sizeof(struct acc_vf_data);
1102 return 0;
1105 static int
1106 hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
1107 enum vfio_device_mig_state *curr_state)
1109 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev);
1111 mutex_lock(&hisi_acc_vdev->state_mutex);
1112 *curr_state = hisi_acc_vdev->mig_state;
1113 mutex_unlock(&hisi_acc_vdev->state_mutex);
1114 return 0;
1117 static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
1119 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
1121 if (hisi_acc_vdev->core_device.vdev.migration_flags !=
1122 VFIO_MIGRATION_STOP_COPY)
1123 return;
1125 mutex_lock(&hisi_acc_vdev->state_mutex);
1126 hisi_acc_vf_reset(hisi_acc_vdev);
1127 mutex_unlock(&hisi_acc_vdev->state_mutex);
1130 static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device *hisi_acc_vdev)
1132 struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
1133 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
1134 struct pci_dev *vf_dev = vdev->pdev;
1137 * ACC VF dev BAR2 region consists of both functional register space
1138 * and migration control register space. For migration to work, we
1139 * need access to both. Hence, we map the entire BAR2 region here.
1140 * But unnecessarily exposing the migration BAR region to the Guest
1141 * has the potential to prevent/corrupt the Guest migration. Hence,
1142 * we restrict access to the migration control space from
1143 * Guest(Please see mmap/ioctl/read/write override functions).
1145 * Please note that it is OK to expose the entire VF BAR if migration
1146 * is not supported or required as this cannot affect the ACC PF
1147 * configurations.
1149 * Also the HiSilicon ACC VF devices supported by this driver on
1150 * HiSilicon hardware platforms are integrated end point devices
1151 * and the platform lacks the capability to perform any PCIe P2P
1152 * between these devices.
1155 vf_qm->io_base =
1156 ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX),
1157 pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX));
1158 if (!vf_qm->io_base)
1159 return -EIO;
1161 vf_qm->fun_type = QM_HW_VF;
1162 vf_qm->pdev = vf_dev;
1163 mutex_init(&vf_qm->mailbox_lock);
1165 return 0;
1168 static struct hisi_qm *hisi_acc_get_pf_qm(struct pci_dev *pdev)
1170 struct hisi_qm *pf_qm;
1171 struct pci_driver *pf_driver;
1173 if (!pdev->is_virtfn)
1174 return NULL;
1176 switch (pdev->device) {
1177 case PCI_DEVICE_ID_HUAWEI_SEC_VF:
1178 pf_driver = hisi_sec_get_pf_driver();
1179 break;
1180 case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
1181 pf_driver = hisi_hpre_get_pf_driver();
1182 break;
1183 case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
1184 pf_driver = hisi_zip_get_pf_driver();
1185 break;
1186 default:
1187 return NULL;
1190 if (!pf_driver)
1191 return NULL;
1193 pf_qm = pci_iov_get_pf_drvdata(pdev, pf_driver);
1195 return !IS_ERR(pf_qm) ? pf_qm : NULL;
1198 static int hisi_acc_pci_rw_access_check(struct vfio_device *core_vdev,
1199 size_t count, loff_t *ppos,
1200 size_t *new_count)
1202 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1203 struct vfio_pci_core_device *vdev =
1204 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1206 if (index == VFIO_PCI_BAR2_REGION_INDEX) {
1207 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
1208 resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
1210 /* Check if access is for migration control region */
1211 if (pos >= end)
1212 return -EINVAL;
1214 *new_count = min(count, (size_t)(end - pos));
1217 return 0;
1220 static int hisi_acc_vfio_pci_mmap(struct vfio_device *core_vdev,
1221 struct vm_area_struct *vma)
1223 struct vfio_pci_core_device *vdev =
1224 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1225 unsigned int index;
1227 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1228 if (index == VFIO_PCI_BAR2_REGION_INDEX) {
1229 u64 req_len, pgoff, req_start;
1230 resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
1232 req_len = vma->vm_end - vma->vm_start;
1233 pgoff = vma->vm_pgoff &
1234 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1235 req_start = pgoff << PAGE_SHIFT;
1237 if (req_start + req_len > end)
1238 return -EINVAL;
1241 return vfio_pci_core_mmap(core_vdev, vma);
1244 static ssize_t hisi_acc_vfio_pci_write(struct vfio_device *core_vdev,
1245 const char __user *buf, size_t count,
1246 loff_t *ppos)
1248 size_t new_count = count;
1249 int ret;
1251 ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
1252 if (ret)
1253 return ret;
1255 return vfio_pci_core_write(core_vdev, buf, new_count, ppos);
1258 static ssize_t hisi_acc_vfio_pci_read(struct vfio_device *core_vdev,
1259 char __user *buf, size_t count,
1260 loff_t *ppos)
1262 size_t new_count = count;
1263 int ret;
1265 ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
1266 if (ret)
1267 return ret;
1269 return vfio_pci_core_read(core_vdev, buf, new_count, ppos);
1272 static long hisi_acc_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
1273 unsigned long arg)
1275 if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1276 struct vfio_pci_core_device *vdev =
1277 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1278 struct pci_dev *pdev = vdev->pdev;
1279 struct vfio_region_info info;
1280 unsigned long minsz;
1282 minsz = offsetofend(struct vfio_region_info, offset);
1284 if (copy_from_user(&info, (void __user *)arg, minsz))
1285 return -EFAULT;
1287 if (info.argsz < minsz)
1288 return -EINVAL;
1290 if (info.index == VFIO_PCI_BAR2_REGION_INDEX) {
1291 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1294 * ACC VF dev BAR2 region consists of both functional
1295 * register space and migration control register space.
1296 * Report only the functional region to Guest.
1298 info.size = pci_resource_len(pdev, info.index) / 2;
1300 info.flags = VFIO_REGION_INFO_FLAG_READ |
1301 VFIO_REGION_INFO_FLAG_WRITE |
1302 VFIO_REGION_INFO_FLAG_MMAP;
1304 return copy_to_user((void __user *)arg, &info, minsz) ?
1305 -EFAULT : 0;
1308 return vfio_pci_core_ioctl(core_vdev, cmd, arg);
1311 static int hisi_acc_vf_debug_check(struct seq_file *seq, struct vfio_device *vdev)
1313 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev);
1314 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
1315 int ret;
1317 lockdep_assert_held(&hisi_acc_vdev->open_mutex);
1319 * When the device is not opened, the io_base is not mapped.
1320 * The driver cannot perform device read and write operations.
1322 if (!hisi_acc_vdev->dev_opened) {
1323 seq_puts(seq, "device not opened!\n");
1324 return -EINVAL;
1327 ret = qm_wait_dev_not_ready(vf_qm);
1328 if (ret) {
1329 seq_puts(seq, "VF device not ready!\n");
1330 return -EBUSY;
1333 return 0;
1336 static int hisi_acc_vf_debug_cmd(struct seq_file *seq, void *data)
1338 struct device *vf_dev = seq->private;
1339 struct vfio_pci_core_device *core_device = dev_get_drvdata(vf_dev);
1340 struct vfio_device *vdev = &core_device->vdev;
1341 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev);
1342 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
1343 u64 value;
1344 int ret;
1346 mutex_lock(&hisi_acc_vdev->open_mutex);
1347 ret = hisi_acc_vf_debug_check(seq, vdev);
1348 if (ret) {
1349 mutex_unlock(&hisi_acc_vdev->open_mutex);
1350 return ret;
1353 value = readl(vf_qm->io_base + QM_MB_CMD_SEND_BASE);
1354 if (value == QM_MB_CMD_NOT_READY) {
1355 mutex_unlock(&hisi_acc_vdev->open_mutex);
1356 seq_puts(seq, "mailbox cmd channel not ready!\n");
1357 return -EINVAL;
1359 mutex_unlock(&hisi_acc_vdev->open_mutex);
1360 seq_puts(seq, "mailbox cmd channel ready!\n");
1362 return 0;
1365 static int hisi_acc_vf_dev_read(struct seq_file *seq, void *data)
1367 struct device *vf_dev = seq->private;
1368 struct vfio_pci_core_device *core_device = dev_get_drvdata(vf_dev);
1369 struct vfio_device *vdev = &core_device->vdev;
1370 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev);
1371 size_t vf_data_sz = offsetofend(struct acc_vf_data, padding);
1372 struct acc_vf_data *vf_data;
1373 int ret;
1375 mutex_lock(&hisi_acc_vdev->open_mutex);
1376 ret = hisi_acc_vf_debug_check(seq, vdev);
1377 if (ret) {
1378 mutex_unlock(&hisi_acc_vdev->open_mutex);
1379 return ret;
1382 mutex_lock(&hisi_acc_vdev->state_mutex);
1383 vf_data = kzalloc(sizeof(*vf_data), GFP_KERNEL);
1384 if (!vf_data) {
1385 ret = -ENOMEM;
1386 goto mutex_release;
1389 vf_data->vf_qm_state = hisi_acc_vdev->vf_qm_state;
1390 ret = vf_qm_read_data(&hisi_acc_vdev->vf_qm, vf_data);
1391 if (ret)
1392 goto migf_err;
1394 seq_hex_dump(seq, "Dev Data:", DUMP_PREFIX_OFFSET, 16, 1,
1395 (const void *)vf_data, vf_data_sz, false);
1397 seq_printf(seq,
1398 "guest driver load: %u\n"
1399 "data size: %lu\n",
1400 hisi_acc_vdev->vf_qm_state,
1401 sizeof(struct acc_vf_data));
1403 migf_err:
1404 kfree(vf_data);
1405 mutex_release:
1406 mutex_unlock(&hisi_acc_vdev->state_mutex);
1407 mutex_unlock(&hisi_acc_vdev->open_mutex);
1409 return ret;
1412 static int hisi_acc_vf_migf_read(struct seq_file *seq, void *data)
1414 struct device *vf_dev = seq->private;
1415 struct vfio_pci_core_device *core_device = dev_get_drvdata(vf_dev);
1416 struct vfio_device *vdev = &core_device->vdev;
1417 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev);
1418 size_t vf_data_sz = offsetofend(struct acc_vf_data, padding);
1419 struct hisi_acc_vf_migration_file *debug_migf = hisi_acc_vdev->debug_migf;
1421 /* Check whether the live migration operation has been performed */
1422 if (debug_migf->total_length < QM_MATCH_SIZE) {
1423 seq_puts(seq, "device not migrated!\n");
1424 return -EAGAIN;
1427 seq_hex_dump(seq, "Mig Data:", DUMP_PREFIX_OFFSET, 16, 1,
1428 (const void *)&debug_migf->vf_data, vf_data_sz, false);
1429 seq_printf(seq, "migrate data length: %lu\n", debug_migf->total_length);
1431 return 0;
1434 static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev)
1436 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev);
1437 struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
1438 int ret;
1440 ret = vfio_pci_core_enable(vdev);
1441 if (ret)
1442 return ret;
1444 if (core_vdev->mig_ops) {
1445 mutex_lock(&hisi_acc_vdev->open_mutex);
1446 ret = hisi_acc_vf_qm_init(hisi_acc_vdev);
1447 if (ret) {
1448 mutex_unlock(&hisi_acc_vdev->open_mutex);
1449 vfio_pci_core_disable(vdev);
1450 return ret;
1452 hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
1453 hisi_acc_vdev->dev_opened = true;
1454 mutex_unlock(&hisi_acc_vdev->open_mutex);
1457 vfio_pci_core_finish_enable(vdev);
1458 return 0;
1461 static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
1463 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev);
1464 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
1466 mutex_lock(&hisi_acc_vdev->open_mutex);
1467 hisi_acc_vdev->dev_opened = false;
1468 iounmap(vf_qm->io_base);
1469 mutex_unlock(&hisi_acc_vdev->open_mutex);
1470 vfio_pci_core_close_device(core_vdev);
1473 static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = {
1474 .migration_set_state = hisi_acc_vfio_pci_set_device_state,
1475 .migration_get_state = hisi_acc_vfio_pci_get_device_state,
1476 .migration_get_data_size = hisi_acc_vfio_pci_get_data_size,
1479 static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev)
1481 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev);
1482 struct pci_dev *pdev = to_pci_dev(core_vdev->dev);
1483 struct hisi_qm *pf_qm = hisi_acc_get_pf_qm(pdev);
1485 hisi_acc_vdev->vf_id = pci_iov_vf_id(pdev) + 1;
1486 hisi_acc_vdev->pf_qm = pf_qm;
1487 hisi_acc_vdev->vf_dev = pdev;
1488 mutex_init(&hisi_acc_vdev->state_mutex);
1489 mutex_init(&hisi_acc_vdev->open_mutex);
1491 core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY;
1492 core_vdev->mig_ops = &hisi_acc_vfio_pci_migrn_state_ops;
1494 return vfio_pci_core_init_dev(core_vdev);
1497 static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
1498 .name = "hisi-acc-vfio-pci-migration",
1499 .init = hisi_acc_vfio_pci_migrn_init_dev,
1500 .release = vfio_pci_core_release_dev,
1501 .open_device = hisi_acc_vfio_pci_open_device,
1502 .close_device = hisi_acc_vfio_pci_close_device,
1503 .ioctl = hisi_acc_vfio_pci_ioctl,
1504 .device_feature = vfio_pci_core_ioctl_feature,
1505 .read = hisi_acc_vfio_pci_read,
1506 .write = hisi_acc_vfio_pci_write,
1507 .mmap = hisi_acc_vfio_pci_mmap,
1508 .request = vfio_pci_core_request,
1509 .match = vfio_pci_core_match,
1510 .bind_iommufd = vfio_iommufd_physical_bind,
1511 .unbind_iommufd = vfio_iommufd_physical_unbind,
1512 .attach_ioas = vfio_iommufd_physical_attach_ioas,
1513 .detach_ioas = vfio_iommufd_physical_detach_ioas,
1516 static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
1517 .name = "hisi-acc-vfio-pci",
1518 .init = vfio_pci_core_init_dev,
1519 .release = vfio_pci_core_release_dev,
1520 .open_device = hisi_acc_vfio_pci_open_device,
1521 .close_device = vfio_pci_core_close_device,
1522 .ioctl = vfio_pci_core_ioctl,
1523 .device_feature = vfio_pci_core_ioctl_feature,
1524 .read = vfio_pci_core_read,
1525 .write = vfio_pci_core_write,
1526 .mmap = vfio_pci_core_mmap,
1527 .request = vfio_pci_core_request,
1528 .match = vfio_pci_core_match,
1529 .bind_iommufd = vfio_iommufd_physical_bind,
1530 .unbind_iommufd = vfio_iommufd_physical_unbind,
1531 .attach_ioas = vfio_iommufd_physical_attach_ioas,
1532 .detach_ioas = vfio_iommufd_physical_detach_ioas,
1535 static void hisi_acc_vfio_debug_init(struct hisi_acc_vf_core_device *hisi_acc_vdev)
1537 struct vfio_device *vdev = &hisi_acc_vdev->core_device.vdev;
1538 struct hisi_acc_vf_migration_file *migf;
1539 struct dentry *vfio_dev_migration;
1540 struct dentry *vfio_hisi_acc;
1541 struct device *dev = vdev->dev;
1543 if (!debugfs_initialized() ||
1544 !IS_ENABLED(CONFIG_VFIO_DEBUGFS))
1545 return;
1547 if (vdev->ops != &hisi_acc_vfio_pci_migrn_ops)
1548 return;
1550 vfio_dev_migration = debugfs_lookup("migration", vdev->debug_root);
1551 if (!vfio_dev_migration) {
1552 dev_err(dev, "failed to lookup migration debugfs file!\n");
1553 return;
1556 migf = kzalloc(sizeof(*migf), GFP_KERNEL);
1557 if (!migf)
1558 return;
1559 hisi_acc_vdev->debug_migf = migf;
1561 vfio_hisi_acc = debugfs_create_dir("hisi_acc", vfio_dev_migration);
1562 debugfs_create_devm_seqfile(dev, "dev_data", vfio_hisi_acc,
1563 hisi_acc_vf_dev_read);
1564 debugfs_create_devm_seqfile(dev, "migf_data", vfio_hisi_acc,
1565 hisi_acc_vf_migf_read);
1566 debugfs_create_devm_seqfile(dev, "cmd_state", vfio_hisi_acc,
1567 hisi_acc_vf_debug_cmd);
1570 static void hisi_acc_vf_debugfs_exit(struct hisi_acc_vf_core_device *hisi_acc_vdev)
1572 kfree(hisi_acc_vdev->debug_migf);
1573 hisi_acc_vdev->debug_migf = NULL;
1576 static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1578 struct hisi_acc_vf_core_device *hisi_acc_vdev;
1579 const struct vfio_device_ops *ops = &hisi_acc_vfio_pci_ops;
1580 struct hisi_qm *pf_qm;
1581 int vf_id;
1582 int ret;
1584 pf_qm = hisi_acc_get_pf_qm(pdev);
1585 if (pf_qm && pf_qm->ver >= QM_HW_V3) {
1586 vf_id = pci_iov_vf_id(pdev);
1587 if (vf_id >= 0)
1588 ops = &hisi_acc_vfio_pci_migrn_ops;
1589 else
1590 pci_warn(pdev, "migration support failed, continue with generic interface\n");
1593 hisi_acc_vdev = vfio_alloc_device(hisi_acc_vf_core_device,
1594 core_device.vdev, &pdev->dev, ops);
1595 if (IS_ERR(hisi_acc_vdev))
1596 return PTR_ERR(hisi_acc_vdev);
1598 dev_set_drvdata(&pdev->dev, &hisi_acc_vdev->core_device);
1599 ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device);
1600 if (ret)
1601 goto out_put_vdev;
1603 hisi_acc_vfio_debug_init(hisi_acc_vdev);
1604 return 0;
1606 out_put_vdev:
1607 vfio_put_device(&hisi_acc_vdev->core_device.vdev);
1608 return ret;
1611 static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev)
1613 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
1615 vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device);
1616 hisi_acc_vf_debugfs_exit(hisi_acc_vdev);
1617 vfio_put_device(&hisi_acc_vdev->core_device.vdev);
1620 static const struct pci_device_id hisi_acc_vfio_pci_table[] = {
1621 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) },
1622 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) },
1623 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) },
1627 MODULE_DEVICE_TABLE(pci, hisi_acc_vfio_pci_table);
1629 static const struct pci_error_handlers hisi_acc_vf_err_handlers = {
1630 .reset_done = hisi_acc_vf_pci_aer_reset_done,
1631 .error_detected = vfio_pci_core_aer_err_detected,
1634 static struct pci_driver hisi_acc_vfio_pci_driver = {
1635 .name = KBUILD_MODNAME,
1636 .id_table = hisi_acc_vfio_pci_table,
1637 .probe = hisi_acc_vfio_pci_probe,
1638 .remove = hisi_acc_vfio_pci_remove,
1639 .err_handler = &hisi_acc_vf_err_handlers,
1640 .driver_managed_dma = true,
1643 module_pci_driver(hisi_acc_vfio_pci_driver);
1645 MODULE_LICENSE("GPL v2");
1646 MODULE_AUTHOR("Liu Longfang <liulongfang@huawei.com>");
1647 MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
1648 MODULE_DESCRIPTION("HiSilicon VFIO PCI - VFIO PCI driver with live migration support for HiSilicon ACC device family");