1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel IFC VF NIC driver for virtio dataplane offloading
5 * Copyright (C) 2020 Intel Corporation.
7 * Author: Zhu Lingshan <lingshan.zhu@intel.com>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/sysfs.h>
15 #include "ifcvf_base.h"
17 #define DRIVER_AUTHOR "Intel Corporation"
18 #define IFCVF_DRIVER_NAME "ifcvf"
20 static irqreturn_t
ifcvf_config_changed(int irq
, void *arg
)
22 struct ifcvf_hw
*vf
= arg
;
24 if (vf
->config_cb
.callback
)
25 return vf
->config_cb
.callback(vf
->config_cb
.private);
30 static irqreturn_t
ifcvf_vq_intr_handler(int irq
, void *arg
)
32 struct vring_info
*vring
= arg
;
34 if (vring
->cb
.callback
)
35 return vring
->cb
.callback(vring
->cb
.private);
40 static irqreturn_t
ifcvf_vqs_reused_intr_handler(int irq
, void *arg
)
42 struct ifcvf_hw
*vf
= arg
;
43 struct vring_info
*vring
;
46 for (i
= 0; i
< vf
->nr_vring
; i
++) {
47 vring
= &vf
->vring
[i
];
48 if (vring
->cb
.callback
)
49 vring
->cb
.callback(vring
->cb
.private);
55 static irqreturn_t
ifcvf_dev_intr_handler(int irq
, void *arg
)
57 struct ifcvf_hw
*vf
= arg
;
60 isr
= vp_ioread8(vf
->isr
);
61 if (isr
& VIRTIO_PCI_ISR_CONFIG
)
62 ifcvf_config_changed(irq
, arg
);
64 return ifcvf_vqs_reused_intr_handler(irq
, arg
);
67 static void ifcvf_free_irq_vectors(void *data
)
69 pci_free_irq_vectors(data
);
72 static void ifcvf_free_per_vq_irq(struct ifcvf_hw
*vf
)
74 struct pci_dev
*pdev
= vf
->pdev
;
77 for (i
= 0; i
< vf
->nr_vring
; i
++) {
78 if (vf
->vring
[i
].irq
!= -EINVAL
) {
79 devm_free_irq(&pdev
->dev
, vf
->vring
[i
].irq
, &vf
->vring
[i
]);
80 vf
->vring
[i
].irq
= -EINVAL
;
85 static void ifcvf_free_vqs_reused_irq(struct ifcvf_hw
*vf
)
87 struct pci_dev
*pdev
= vf
->pdev
;
89 if (vf
->vqs_reused_irq
!= -EINVAL
) {
90 devm_free_irq(&pdev
->dev
, vf
->vqs_reused_irq
, vf
);
91 vf
->vqs_reused_irq
= -EINVAL
;
96 static void ifcvf_free_vq_irq(struct ifcvf_hw
*vf
)
98 if (vf
->msix_vector_status
== MSIX_VECTOR_PER_VQ_AND_CONFIG
)
99 ifcvf_free_per_vq_irq(vf
);
101 ifcvf_free_vqs_reused_irq(vf
);
104 static void ifcvf_free_config_irq(struct ifcvf_hw
*vf
)
106 struct pci_dev
*pdev
= vf
->pdev
;
108 if (vf
->config_irq
== -EINVAL
)
111 /* If the irq is shared by all vqs and the config interrupt,
112 * it is already freed in ifcvf_free_vq_irq, so here only
113 * need to free config irq when msix_vector_status != MSIX_VECTOR_DEV_SHARED
115 if (vf
->msix_vector_status
!= MSIX_VECTOR_DEV_SHARED
) {
116 devm_free_irq(&pdev
->dev
, vf
->config_irq
, vf
);
117 vf
->config_irq
= -EINVAL
;
121 static void ifcvf_free_irq(struct ifcvf_hw
*vf
)
123 struct pci_dev
*pdev
= vf
->pdev
;
125 ifcvf_free_vq_irq(vf
);
126 ifcvf_free_config_irq(vf
);
127 ifcvf_free_irq_vectors(pdev
);
128 vf
->num_msix_vectors
= 0;
131 /* ifcvf MSIX vectors allocator, this helper tries to allocate
132 * vectors for all virtqueues and the config interrupt.
133 * It returns the number of allocated vectors, negative
134 * return value when fails.
136 static int ifcvf_alloc_vectors(struct ifcvf_hw
*vf
)
138 struct pci_dev
*pdev
= vf
->pdev
;
141 /* all queues and config interrupt */
142 max_intr
= vf
->nr_vring
+ 1;
143 ret
= pci_alloc_irq_vectors(pdev
, 1, max_intr
, PCI_IRQ_MSIX
| PCI_IRQ_AFFINITY
);
146 IFCVF_ERR(pdev
, "Failed to alloc IRQ vectors\n");
152 "Requested %u vectors, however only %u allocated, lower performance\n",
158 static int ifcvf_request_per_vq_irq(struct ifcvf_hw
*vf
)
160 struct pci_dev
*pdev
= vf
->pdev
;
161 int i
, vector
, ret
, irq
;
163 vf
->vqs_reused_irq
= -EINVAL
;
164 for (i
= 0; i
< vf
->nr_vring
; i
++) {
165 snprintf(vf
->vring
[i
].msix_name
, 256, "ifcvf[%s]-%d\n", pci_name(pdev
), i
);
167 irq
= pci_irq_vector(pdev
, vector
);
168 ret
= devm_request_irq(&pdev
->dev
, irq
,
169 ifcvf_vq_intr_handler
, 0,
170 vf
->vring
[i
].msix_name
,
173 IFCVF_ERR(pdev
, "Failed to request irq for vq %d\n", i
);
177 vf
->vring
[i
].irq
= irq
;
178 ret
= ifcvf_set_vq_vector(vf
, i
, vector
);
179 if (ret
== VIRTIO_MSI_NO_VECTOR
) {
180 IFCVF_ERR(pdev
, "No msix vector for vq %u\n", i
);
192 static int ifcvf_request_vqs_reused_irq(struct ifcvf_hw
*vf
)
194 struct pci_dev
*pdev
= vf
->pdev
;
195 int i
, vector
, ret
, irq
;
198 snprintf(vf
->vring
[0].msix_name
, 256, "ifcvf[%s]-vqs-reused-irq\n", pci_name(pdev
));
199 irq
= pci_irq_vector(pdev
, vector
);
200 ret
= devm_request_irq(&pdev
->dev
, irq
,
201 ifcvf_vqs_reused_intr_handler
, 0,
202 vf
->vring
[0].msix_name
, vf
);
204 IFCVF_ERR(pdev
, "Failed to request reused irq for the device\n");
208 vf
->vqs_reused_irq
= irq
;
209 for (i
= 0; i
< vf
->nr_vring
; i
++) {
210 vf
->vring
[i
].irq
= -EINVAL
;
211 ret
= ifcvf_set_vq_vector(vf
, i
, vector
);
212 if (ret
== VIRTIO_MSI_NO_VECTOR
) {
213 IFCVF_ERR(pdev
, "No msix vector for vq %u\n", i
);
225 static int ifcvf_request_dev_irq(struct ifcvf_hw
*vf
)
227 struct pci_dev
*pdev
= vf
->pdev
;
228 int i
, vector
, ret
, irq
;
231 snprintf(vf
->vring
[0].msix_name
, 256, "ifcvf[%s]-dev-irq\n", pci_name(pdev
));
232 irq
= pci_irq_vector(pdev
, vector
);
233 ret
= devm_request_irq(&pdev
->dev
, irq
,
234 ifcvf_dev_intr_handler
, 0,
235 vf
->vring
[0].msix_name
, vf
);
237 IFCVF_ERR(pdev
, "Failed to request irq for the device\n");
241 vf
->vqs_reused_irq
= irq
;
242 for (i
= 0; i
< vf
->nr_vring
; i
++) {
243 vf
->vring
[i
].irq
= -EINVAL
;
244 ret
= ifcvf_set_vq_vector(vf
, i
, vector
);
245 if (ret
== VIRTIO_MSI_NO_VECTOR
) {
246 IFCVF_ERR(pdev
, "No msix vector for vq %u\n", i
);
251 vf
->config_irq
= irq
;
252 ret
= ifcvf_set_config_vector(vf
, vector
);
253 if (ret
== VIRTIO_MSI_NO_VECTOR
) {
254 IFCVF_ERR(pdev
, "No msix vector for device config\n");
266 static int ifcvf_request_vq_irq(struct ifcvf_hw
*vf
)
270 if (vf
->msix_vector_status
== MSIX_VECTOR_PER_VQ_AND_CONFIG
)
271 ret
= ifcvf_request_per_vq_irq(vf
);
273 ret
= ifcvf_request_vqs_reused_irq(vf
);
278 static int ifcvf_request_config_irq(struct ifcvf_hw
*vf
)
280 struct pci_dev
*pdev
= vf
->pdev
;
281 int config_vector
, ret
;
283 if (vf
->msix_vector_status
== MSIX_VECTOR_PER_VQ_AND_CONFIG
)
284 config_vector
= vf
->nr_vring
;
285 else if (vf
->msix_vector_status
== MSIX_VECTOR_SHARED_VQ_AND_CONFIG
)
286 /* vector 0 for vqs and 1 for config interrupt */
288 else if (vf
->msix_vector_status
== MSIX_VECTOR_DEV_SHARED
)
289 /* re-use the vqs vector */
294 snprintf(vf
->config_msix_name
, 256, "ifcvf[%s]-config\n",
296 vf
->config_irq
= pci_irq_vector(pdev
, config_vector
);
297 ret
= devm_request_irq(&pdev
->dev
, vf
->config_irq
,
298 ifcvf_config_changed
, 0,
299 vf
->config_msix_name
, vf
);
301 IFCVF_ERR(pdev
, "Failed to request config irq\n");
305 ret
= ifcvf_set_config_vector(vf
, config_vector
);
306 if (ret
== VIRTIO_MSI_NO_VECTOR
) {
307 IFCVF_ERR(pdev
, "No msix vector for device config\n");
318 static int ifcvf_request_irq(struct ifcvf_hw
*vf
)
320 int nvectors
, ret
, max_intr
;
322 nvectors
= ifcvf_alloc_vectors(vf
);
326 vf
->msix_vector_status
= MSIX_VECTOR_PER_VQ_AND_CONFIG
;
327 max_intr
= vf
->nr_vring
+ 1;
328 if (nvectors
< max_intr
)
329 vf
->msix_vector_status
= MSIX_VECTOR_SHARED_VQ_AND_CONFIG
;
332 vf
->msix_vector_status
= MSIX_VECTOR_DEV_SHARED
;
333 ret
= ifcvf_request_dev_irq(vf
);
338 ret
= ifcvf_request_vq_irq(vf
);
342 ret
= ifcvf_request_config_irq(vf
);
347 vf
->num_msix_vectors
= nvectors
;
352 static struct ifcvf_adapter
*vdpa_to_adapter(struct vdpa_device
*vdpa_dev
)
354 return container_of(vdpa_dev
, struct ifcvf_adapter
, vdpa
);
357 static struct ifcvf_hw
*vdpa_to_vf(struct vdpa_device
*vdpa_dev
)
359 struct ifcvf_adapter
*adapter
= vdpa_to_adapter(vdpa_dev
);
364 static u64
ifcvf_vdpa_get_device_features(struct vdpa_device
*vdpa_dev
)
366 struct ifcvf_adapter
*adapter
= vdpa_to_adapter(vdpa_dev
);
367 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
368 struct pci_dev
*pdev
= adapter
->pdev
;
369 u32 type
= vf
->dev_type
;
372 if (type
== VIRTIO_ID_NET
|| type
== VIRTIO_ID_BLOCK
)
373 features
= ifcvf_get_dev_features(vf
);
376 IFCVF_ERR(pdev
, "VIRTIO ID %u not supported\n", vf
->dev_type
);
382 static int ifcvf_vdpa_set_driver_features(struct vdpa_device
*vdpa_dev
, u64 features
)
384 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
387 ret
= ifcvf_verify_min_features(vf
, features
);
391 ifcvf_set_driver_features(vf
, features
);
396 static u64
ifcvf_vdpa_get_driver_features(struct vdpa_device
*vdpa_dev
)
398 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
401 features
= ifcvf_get_driver_features(vf
);
406 static u8
ifcvf_vdpa_get_status(struct vdpa_device
*vdpa_dev
)
408 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
410 return ifcvf_get_status(vf
);
413 static void ifcvf_vdpa_set_status(struct vdpa_device
*vdpa_dev
, u8 status
)
419 vf
= vdpa_to_vf(vdpa_dev
);
420 status_old
= ifcvf_get_status(vf
);
422 if (status_old
== status
)
425 if ((status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
426 !(status_old
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
427 ret
= ifcvf_request_irq(vf
);
429 IFCVF_ERR(vf
->pdev
, "failed to request irq with error %d\n", ret
);
434 ifcvf_set_status(vf
, status
);
437 static int ifcvf_vdpa_reset(struct vdpa_device
*vdpa_dev
)
439 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
440 u8 status
= ifcvf_get_status(vf
);
444 if (status
& VIRTIO_CONFIG_S_DRIVER_OK
)
452 static u16
ifcvf_vdpa_get_vq_num_max(struct vdpa_device
*vdpa_dev
)
454 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
456 return ifcvf_get_max_vq_size(vf
);
459 static u16
ifcvf_vdpa_get_vq_num_min(struct vdpa_device
*vdpa_dev
)
461 return IFCVF_MIN_VQ_SIZE
;
464 static int ifcvf_vdpa_get_vq_state(struct vdpa_device
*vdpa_dev
, u16 qid
,
465 struct vdpa_vq_state
*state
)
467 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
469 state
->split
.avail_index
= ifcvf_get_vq_state(vf
, qid
);
473 static int ifcvf_vdpa_set_vq_state(struct vdpa_device
*vdpa_dev
, u16 qid
,
474 const struct vdpa_vq_state
*state
)
476 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
478 return ifcvf_set_vq_state(vf
, qid
, state
->split
.avail_index
);
481 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device
*vdpa_dev
, u16 qid
,
482 struct vdpa_callback
*cb
)
484 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
486 vf
->vring
[qid
].cb
= *cb
;
489 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device
*vdpa_dev
,
492 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
494 ifcvf_set_vq_ready(vf
, qid
, ready
);
497 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device
*vdpa_dev
, u16 qid
)
499 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
501 return ifcvf_get_vq_ready(vf
, qid
);
504 static void ifcvf_vdpa_set_vq_num(struct vdpa_device
*vdpa_dev
, u16 qid
,
507 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
509 ifcvf_set_vq_num(vf
, qid
, num
);
512 static int ifcvf_vdpa_set_vq_address(struct vdpa_device
*vdpa_dev
, u16 qid
,
513 u64 desc_area
, u64 driver_area
,
516 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
518 return ifcvf_set_vq_address(vf
, qid
, desc_area
, driver_area
, device_area
);
521 static void ifcvf_vdpa_kick_vq(struct vdpa_device
*vdpa_dev
, u16 qid
)
523 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
525 ifcvf_notify_queue(vf
, qid
);
528 static u32
ifcvf_vdpa_get_generation(struct vdpa_device
*vdpa_dev
)
530 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
532 return vp_ioread8(&vf
->common_cfg
->config_generation
);
535 static u32
ifcvf_vdpa_get_device_id(struct vdpa_device
*vdpa_dev
)
537 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
542 static u32
ifcvf_vdpa_get_vendor_id(struct vdpa_device
*vdpa_dev
)
544 struct ifcvf_adapter
*adapter
= vdpa_to_adapter(vdpa_dev
);
545 struct pci_dev
*pdev
= adapter
->pdev
;
547 return pdev
->subsystem_vendor
;
550 static u32
ifcvf_vdpa_get_vq_align(struct vdpa_device
*vdpa_dev
)
552 return IFCVF_QUEUE_ALIGNMENT
;
555 static size_t ifcvf_vdpa_get_config_size(struct vdpa_device
*vdpa_dev
)
557 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
559 return vf
->config_size
;
562 static u32
ifcvf_vdpa_get_vq_group(struct vdpa_device
*vdpa
, u16 idx
)
567 static void ifcvf_vdpa_get_config(struct vdpa_device
*vdpa_dev
,
569 void *buf
, unsigned int len
)
571 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
573 ifcvf_read_dev_config(vf
, offset
, buf
, len
);
576 static void ifcvf_vdpa_set_config(struct vdpa_device
*vdpa_dev
,
577 unsigned int offset
, const void *buf
,
580 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
582 ifcvf_write_dev_config(vf
, offset
, buf
, len
);
585 static void ifcvf_vdpa_set_config_cb(struct vdpa_device
*vdpa_dev
,
586 struct vdpa_callback
*cb
)
588 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
590 vf
->config_cb
.callback
= cb
->callback
;
591 vf
->config_cb
.private = cb
->private;
594 static int ifcvf_vdpa_get_vq_irq(struct vdpa_device
*vdpa_dev
,
597 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
599 if (vf
->vqs_reused_irq
< 0)
600 return vf
->vring
[qid
].irq
;
605 static u16
ifcvf_vdpa_get_vq_size(struct vdpa_device
*vdpa_dev
,
608 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
610 return ifcvf_get_vq_size(vf
, qid
);
613 static struct vdpa_notification_area
ifcvf_get_vq_notification(struct vdpa_device
*vdpa_dev
,
616 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
617 struct vdpa_notification_area area
;
619 area
.addr
= vf
->vring
[idx
].notify_pa
;
620 if (!vf
->notify_off_multiplier
)
621 area
.size
= PAGE_SIZE
;
623 area
.size
= vf
->notify_off_multiplier
;
629 * IFCVF currently doesn't have on-chip IOMMU, so not
630 * implemented set_map()/dma_map()/dma_unmap()
632 static const struct vdpa_config_ops ifc_vdpa_ops
= {
633 .get_device_features
= ifcvf_vdpa_get_device_features
,
634 .set_driver_features
= ifcvf_vdpa_set_driver_features
,
635 .get_driver_features
= ifcvf_vdpa_get_driver_features
,
636 .get_status
= ifcvf_vdpa_get_status
,
637 .set_status
= ifcvf_vdpa_set_status
,
638 .reset
= ifcvf_vdpa_reset
,
639 .get_vq_num_max
= ifcvf_vdpa_get_vq_num_max
,
640 .get_vq_num_min
= ifcvf_vdpa_get_vq_num_min
,
641 .get_vq_state
= ifcvf_vdpa_get_vq_state
,
642 .set_vq_state
= ifcvf_vdpa_set_vq_state
,
643 .set_vq_cb
= ifcvf_vdpa_set_vq_cb
,
644 .set_vq_ready
= ifcvf_vdpa_set_vq_ready
,
645 .get_vq_ready
= ifcvf_vdpa_get_vq_ready
,
646 .set_vq_num
= ifcvf_vdpa_set_vq_num
,
647 .set_vq_address
= ifcvf_vdpa_set_vq_address
,
648 .get_vq_irq
= ifcvf_vdpa_get_vq_irq
,
649 .get_vq_size
= ifcvf_vdpa_get_vq_size
,
650 .kick_vq
= ifcvf_vdpa_kick_vq
,
651 .get_generation
= ifcvf_vdpa_get_generation
,
652 .get_device_id
= ifcvf_vdpa_get_device_id
,
653 .get_vendor_id
= ifcvf_vdpa_get_vendor_id
,
654 .get_vq_align
= ifcvf_vdpa_get_vq_align
,
655 .get_vq_group
= ifcvf_vdpa_get_vq_group
,
656 .get_config_size
= ifcvf_vdpa_get_config_size
,
657 .get_config
= ifcvf_vdpa_get_config
,
658 .set_config
= ifcvf_vdpa_set_config
,
659 .set_config_cb
= ifcvf_vdpa_set_config_cb
,
660 .get_vq_notification
= ifcvf_get_vq_notification
,
663 static struct virtio_device_id id_table_net
[] = {
664 {VIRTIO_ID_NET
, VIRTIO_DEV_ANY_ID
},
668 static struct virtio_device_id id_table_blk
[] = {
669 {VIRTIO_ID_BLOCK
, VIRTIO_DEV_ANY_ID
},
673 static u32
get_dev_type(struct pci_dev
*pdev
)
677 /* This drirver drives both modern virtio devices and transitional
678 * devices in modern mode.
679 * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
680 * so legacy devices and transitional devices in legacy
681 * mode will not work for vDPA, this driver will not
682 * drive devices with legacy interface.
685 if (pdev
->device
< 0x1040)
686 dev_type
= pdev
->subsystem_device
;
688 dev_type
= pdev
->device
- 0x1040;
693 static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev
*mdev
, const char *name
,
694 const struct vdpa_dev_set_config
*config
)
696 struct ifcvf_vdpa_mgmt_dev
*ifcvf_mgmt_dev
;
697 struct ifcvf_adapter
*adapter
;
698 struct vdpa_device
*vdpa_dev
;
699 struct pci_dev
*pdev
;
704 ifcvf_mgmt_dev
= container_of(mdev
, struct ifcvf_vdpa_mgmt_dev
, mdev
);
705 vf
= &ifcvf_mgmt_dev
->vf
;
707 adapter
= vdpa_alloc_device(struct ifcvf_adapter
, vdpa
,
708 &pdev
->dev
, &ifc_vdpa_ops
, 1, 1, NULL
, false);
709 if (IS_ERR(adapter
)) {
710 IFCVF_ERR(pdev
, "Failed to allocate vDPA structure");
711 return PTR_ERR(adapter
);
714 ifcvf_mgmt_dev
->adapter
= adapter
;
715 adapter
->pdev
= pdev
;
716 adapter
->vdpa
.dma_dev
= &pdev
->dev
;
717 adapter
->vdpa
.mdev
= mdev
;
719 vdpa_dev
= &adapter
->vdpa
;
721 device_features
= vf
->hw_features
;
722 if (config
->mask
& BIT_ULL(VDPA_ATTR_DEV_FEATURES
)) {
723 if (config
->device_features
& ~device_features
) {
724 IFCVF_ERR(pdev
, "The provisioned features 0x%llx are not supported by this device with features 0x%llx\n",
725 config
->device_features
, device_features
);
728 device_features
&= config
->device_features
;
730 vf
->dev_features
= device_features
;
733 ret
= dev_set_name(&vdpa_dev
->dev
, "%s", name
);
735 ret
= dev_set_name(&vdpa_dev
->dev
, "vdpa%u", vdpa_dev
->index
);
737 ret
= _vdpa_register_device(&adapter
->vdpa
, vf
->nr_vring
);
739 put_device(&adapter
->vdpa
.dev
);
740 IFCVF_ERR(pdev
, "Failed to register to vDPA bus");
747 static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev
*mdev
, struct vdpa_device
*dev
)
749 struct ifcvf_vdpa_mgmt_dev
*ifcvf_mgmt_dev
;
751 ifcvf_mgmt_dev
= container_of(mdev
, struct ifcvf_vdpa_mgmt_dev
, mdev
);
752 _vdpa_unregister_device(dev
);
753 ifcvf_mgmt_dev
->adapter
= NULL
;
756 static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops
= {
757 .dev_add
= ifcvf_vdpa_dev_add
,
758 .dev_del
= ifcvf_vdpa_dev_del
761 static int ifcvf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
763 struct ifcvf_vdpa_mgmt_dev
*ifcvf_mgmt_dev
;
764 struct device
*dev
= &pdev
->dev
;
769 ret
= pcim_enable_device(pdev
);
771 IFCVF_ERR(pdev
, "Failed to enable device\n");
774 ret
= pcim_iomap_regions(pdev
, BIT(0) | BIT(2) | BIT(4),
777 IFCVF_ERR(pdev
, "Failed to request MMIO region\n");
781 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
783 IFCVF_ERR(pdev
, "No usable DMA configuration\n");
787 ret
= devm_add_action_or_reset(dev
, ifcvf_free_irq_vectors
, pdev
);
790 "Failed for adding devres for freeing irq vectors\n");
794 pci_set_master(pdev
);
795 ifcvf_mgmt_dev
= kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev
), GFP_KERNEL
);
796 if (!ifcvf_mgmt_dev
) {
797 IFCVF_ERR(pdev
, "Failed to alloc memory for the vDPA management device\n");
801 vf
= &ifcvf_mgmt_dev
->vf
;
802 vf
->dev_type
= get_dev_type(pdev
);
803 vf
->base
= pcim_iomap_table(pdev
);
806 ret
= ifcvf_init_hw(vf
, pdev
);
808 IFCVF_ERR(pdev
, "Failed to init IFCVF hw\n");
812 for (i
= 0; i
< vf
->nr_vring
; i
++)
813 vf
->vring
[i
].irq
= -EINVAL
;
815 vf
->hw_features
= ifcvf_get_hw_features(vf
);
816 vf
->config_size
= ifcvf_get_config_size(vf
);
818 dev_type
= get_dev_type(pdev
);
821 ifcvf_mgmt_dev
->mdev
.id_table
= id_table_net
;
823 case VIRTIO_ID_BLOCK
:
824 ifcvf_mgmt_dev
->mdev
.id_table
= id_table_blk
;
827 IFCVF_ERR(pdev
, "VIRTIO ID %u not supported\n", dev_type
);
832 ifcvf_mgmt_dev
->mdev
.ops
= &ifcvf_vdpa_mgmt_dev_ops
;
833 ifcvf_mgmt_dev
->mdev
.device
= dev
;
834 ifcvf_mgmt_dev
->mdev
.max_supported_vqs
= vf
->nr_vring
;
835 ifcvf_mgmt_dev
->mdev
.supported_features
= vf
->hw_features
;
836 ifcvf_mgmt_dev
->mdev
.config_attr_mask
= (1 << VDPA_ATTR_DEV_FEATURES
);
838 ret
= vdpa_mgmtdev_register(&ifcvf_mgmt_dev
->mdev
);
841 "Failed to initialize the management interfaces\n");
845 pci_set_drvdata(pdev
, ifcvf_mgmt_dev
);
850 kfree(ifcvf_mgmt_dev
->vf
.vring
);
851 kfree(ifcvf_mgmt_dev
);
855 static void ifcvf_remove(struct pci_dev
*pdev
)
857 struct ifcvf_vdpa_mgmt_dev
*ifcvf_mgmt_dev
;
859 ifcvf_mgmt_dev
= pci_get_drvdata(pdev
);
860 vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev
->mdev
);
861 kfree(ifcvf_mgmt_dev
->vf
.vring
);
862 kfree(ifcvf_mgmt_dev
);
865 static struct pci_device_id ifcvf_pci_ids
[] = {
866 /* N3000 network device */
867 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET
,
870 N3000_SUBSYS_DEVICE_ID
) },
871 /* C5000X-PL network device
872 * F2000X-PL network device
874 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET
,
878 /* C5000X-PL block device */
879 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET
,
880 VIRTIO_TRANS_ID_BLOCK
,
886 MODULE_DEVICE_TABLE(pci
, ifcvf_pci_ids
);
888 static struct pci_driver ifcvf_driver
= {
889 .name
= IFCVF_DRIVER_NAME
,
890 .id_table
= ifcvf_pci_ids
,
891 .probe
= ifcvf_probe
,
892 .remove
= ifcvf_remove
,
895 module_pci_driver(ifcvf_driver
);
897 MODULE_DESCRIPTION("Intel IFC VF NIC driver for virtio dataplane offloading");
898 MODULE_LICENSE("GPL v2");