1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtio PCI driver - common functionality for all device versions
5 * This module allows virtio devices to be used over a virtual PCI device.
6 * This can be used with QEMU based VMMs like KVM or Xen.
8 * Copyright IBM Corp. 2007
9 * Copyright Red Hat, Inc. 2014
12 * Anthony Liguori <aliguori@us.ibm.com>
13 * Rusty Russell <rusty@rustcorp.com.au>
14 * Michael S. Tsirkin <mst@redhat.com>
17 #include "virtio_pci_common.h"
19 static bool force_legacy
= false;
21 #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
22 module_param(force_legacy
, bool, 0444);
23 MODULE_PARM_DESC(force_legacy
,
24 "Force legacy mode for transitional virtio 1 devices");
27 bool vp_is_avq(struct virtio_device
*vdev
, unsigned int index
)
29 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
31 if (!virtio_has_feature(vdev
, VIRTIO_F_ADMIN_VQ
))
34 return index
== vp_dev
->admin_vq
.vq_index
;
37 /* wait for pending irq handlers */
38 void vp_synchronize_vectors(struct virtio_device
*vdev
)
40 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
43 if (vp_dev
->intx_enabled
)
44 synchronize_irq(vp_dev
->pci_dev
->irq
);
46 for (i
= 0; i
< vp_dev
->msix_vectors
; ++i
)
47 synchronize_irq(pci_irq_vector(vp_dev
->pci_dev
, i
));
50 /* the notify function used when creating a virt queue */
51 bool vp_notify(struct virtqueue
*vq
)
53 /* we write the queue's selector into the notification register to
54 * signal the other end */
55 iowrite16(vq
->index
, (void __iomem
*)vq
->priv
);
59 /* Notify all slow path virtqueues on an interrupt. */
60 static void vp_vring_slow_path_interrupt(int irq
,
61 struct virtio_pci_device
*vp_dev
)
63 struct virtio_pci_vq_info
*info
;
66 spin_lock_irqsave(&vp_dev
->lock
, flags
);
67 list_for_each_entry(info
, &vp_dev
->slow_virtqueues
, node
)
68 vring_interrupt(irq
, info
->vq
);
69 spin_unlock_irqrestore(&vp_dev
->lock
, flags
);
72 /* Handle a configuration change: Tell driver if it wants to know. */
73 static irqreturn_t
vp_config_changed(int irq
, void *opaque
)
75 struct virtio_pci_device
*vp_dev
= opaque
;
77 virtio_config_changed(&vp_dev
->vdev
);
78 vp_vring_slow_path_interrupt(irq
, vp_dev
);
82 /* Notify all virtqueues on an interrupt. */
83 static irqreturn_t
vp_vring_interrupt(int irq
, void *opaque
)
85 struct virtio_pci_device
*vp_dev
= opaque
;
86 struct virtio_pci_vq_info
*info
;
87 irqreturn_t ret
= IRQ_NONE
;
90 spin_lock_irqsave(&vp_dev
->lock
, flags
);
91 list_for_each_entry(info
, &vp_dev
->virtqueues
, node
) {
92 if (vring_interrupt(irq
, info
->vq
) == IRQ_HANDLED
)
95 spin_unlock_irqrestore(&vp_dev
->lock
, flags
);
100 /* A small wrapper to also acknowledge the interrupt when it's handled.
101 * I really need an EIO hook for the vring so I can ack the interrupt once we
102 * know that we'll be handling the IRQ but before we invoke the callback since
103 * the callback may notify the host which results in the host attempting to
104 * raise an interrupt that we would then mask once we acknowledged the
106 static irqreturn_t
vp_interrupt(int irq
, void *opaque
)
108 struct virtio_pci_device
*vp_dev
= opaque
;
111 /* reading the ISR has the effect of also clearing it so it's very
112 * important to save off the value. */
113 isr
= ioread8(vp_dev
->isr
);
115 /* It's definitely not us if the ISR was not high */
119 /* Configuration change? Tell driver if it wants to know. */
120 if (isr
& VIRTIO_PCI_ISR_CONFIG
)
121 vp_config_changed(irq
, opaque
);
123 return vp_vring_interrupt(irq
, opaque
);
126 static int vp_request_msix_vectors(struct virtio_device
*vdev
, int nvectors
,
127 bool per_vq_vectors
, struct irq_affinity
*desc
)
129 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
130 const char *name
= dev_name(&vp_dev
->vdev
.dev
);
131 unsigned int flags
= PCI_IRQ_MSIX
;
135 vp_dev
->msix_vectors
= nvectors
;
137 vp_dev
->msix_names
= kmalloc_array(nvectors
,
138 sizeof(*vp_dev
->msix_names
),
140 if (!vp_dev
->msix_names
)
142 vp_dev
->msix_affinity_masks
143 = kcalloc(nvectors
, sizeof(*vp_dev
->msix_affinity_masks
),
145 if (!vp_dev
->msix_affinity_masks
)
147 for (i
= 0; i
< nvectors
; ++i
)
148 if (!alloc_cpumask_var(&vp_dev
->msix_affinity_masks
[i
],
156 flags
|= PCI_IRQ_AFFINITY
;
157 desc
->pre_vectors
++; /* virtio config vector */
160 err
= pci_alloc_irq_vectors_affinity(vp_dev
->pci_dev
, nvectors
,
161 nvectors
, flags
, desc
);
164 vp_dev
->msix_enabled
= 1;
166 /* Set the vector used for configuration */
167 v
= vp_dev
->msix_used_vectors
;
168 snprintf(vp_dev
->msix_names
[v
], sizeof *vp_dev
->msix_names
,
170 err
= request_irq(pci_irq_vector(vp_dev
->pci_dev
, v
),
171 vp_config_changed
, 0, vp_dev
->msix_names
[v
],
175 ++vp_dev
->msix_used_vectors
;
177 v
= vp_dev
->config_vector(vp_dev
, v
);
178 /* Verify we had enough resources to assign the vector */
179 if (v
== VIRTIO_MSI_NO_VECTOR
) {
184 if (!per_vq_vectors
) {
185 /* Shared vector for all VQs */
186 v
= vp_dev
->msix_used_vectors
;
187 snprintf(vp_dev
->msix_names
[v
], sizeof *vp_dev
->msix_names
,
188 "%s-virtqueues", name
);
189 err
= request_irq(pci_irq_vector(vp_dev
->pci_dev
, v
),
190 vp_vring_interrupt
, 0, vp_dev
->msix_names
[v
],
194 ++vp_dev
->msix_used_vectors
;
201 static bool vp_is_slow_path_vector(u16 msix_vec
)
203 return msix_vec
== VP_MSIX_CONFIG_VECTOR
;
206 static struct virtqueue
*vp_setup_vq(struct virtio_device
*vdev
, unsigned int index
,
207 void (*callback
)(struct virtqueue
*vq
),
211 struct virtio_pci_vq_info
**p_info
)
213 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
214 struct virtio_pci_vq_info
*info
= kmalloc(sizeof *info
, GFP_KERNEL
);
215 struct virtqueue
*vq
;
218 /* fill out our structure that represents an active queue */
220 return ERR_PTR(-ENOMEM
);
222 vq
= vp_dev
->setup_vq(vp_dev
, info
, index
, callback
, name
, ctx
,
229 spin_lock_irqsave(&vp_dev
->lock
, flags
);
230 if (!vp_is_slow_path_vector(msix_vec
))
231 list_add(&info
->node
, &vp_dev
->virtqueues
);
233 list_add(&info
->node
, &vp_dev
->slow_virtqueues
);
234 spin_unlock_irqrestore(&vp_dev
->lock
, flags
);
236 INIT_LIST_HEAD(&info
->node
);
247 static void vp_del_vq(struct virtqueue
*vq
, struct virtio_pci_vq_info
*info
)
249 struct virtio_pci_device
*vp_dev
= to_vp_device(vq
->vdev
);
253 * If it fails during re-enable reset vq. This way we won't rejoin
254 * info->node to the queue. Prevent unexpected irqs.
257 spin_lock_irqsave(&vp_dev
->lock
, flags
);
258 list_del(&info
->node
);
259 spin_unlock_irqrestore(&vp_dev
->lock
, flags
);
262 vp_dev
->del_vq(info
);
266 /* the config->del_vqs() implementation */
267 void vp_del_vqs(struct virtio_device
*vdev
)
269 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
270 struct virtio_pci_vq_info
*info
;
271 struct virtqueue
*vq
, *n
;
274 list_for_each_entry_safe(vq
, n
, &vdev
->vqs
, list
) {
275 info
= vp_is_avq(vdev
, vq
->index
) ? vp_dev
->admin_vq
.info
:
276 vp_dev
->vqs
[vq
->index
];
278 if (vp_dev
->per_vq_vectors
) {
279 int v
= info
->msix_vector
;
280 if (v
!= VIRTIO_MSI_NO_VECTOR
&&
281 !vp_is_slow_path_vector(v
)) {
282 int irq
= pci_irq_vector(vp_dev
->pci_dev
, v
);
284 irq_update_affinity_hint(irq
, NULL
);
290 vp_dev
->per_vq_vectors
= false;
292 if (vp_dev
->intx_enabled
) {
293 free_irq(vp_dev
->pci_dev
->irq
, vp_dev
);
294 vp_dev
->intx_enabled
= 0;
297 for (i
= 0; i
< vp_dev
->msix_used_vectors
; ++i
)
298 free_irq(pci_irq_vector(vp_dev
->pci_dev
, i
), vp_dev
);
300 if (vp_dev
->msix_affinity_masks
) {
301 for (i
= 0; i
< vp_dev
->msix_vectors
; i
++)
302 free_cpumask_var(vp_dev
->msix_affinity_masks
[i
]);
305 if (vp_dev
->msix_enabled
) {
306 /* Disable the vector used for configuration */
307 vp_dev
->config_vector(vp_dev
, VIRTIO_MSI_NO_VECTOR
);
309 pci_free_irq_vectors(vp_dev
->pci_dev
);
310 vp_dev
->msix_enabled
= 0;
313 vp_dev
->msix_vectors
= 0;
314 vp_dev
->msix_used_vectors
= 0;
315 kfree(vp_dev
->msix_names
);
316 vp_dev
->msix_names
= NULL
;
317 kfree(vp_dev
->msix_affinity_masks
);
318 vp_dev
->msix_affinity_masks
= NULL
;
323 enum vp_vq_vector_policy
{
324 VP_VQ_VECTOR_POLICY_EACH
,
325 VP_VQ_VECTOR_POLICY_SHARED_SLOW
,
326 VP_VQ_VECTOR_POLICY_SHARED
,
329 static struct virtqueue
*
330 vp_find_one_vq_msix(struct virtio_device
*vdev
, int queue_idx
,
331 vq_callback_t
*callback
, const char *name
, bool ctx
,
332 bool slow_path
, int *allocated_vectors
,
333 enum vp_vq_vector_policy vector_policy
,
334 struct virtio_pci_vq_info
**p_info
)
336 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
337 struct virtqueue
*vq
;
342 msix_vec
= VIRTIO_MSI_NO_VECTOR
;
343 else if (vector_policy
== VP_VQ_VECTOR_POLICY_EACH
||
344 (vector_policy
== VP_VQ_VECTOR_POLICY_SHARED_SLOW
&&
346 msix_vec
= (*allocated_vectors
)++;
347 else if (vector_policy
!= VP_VQ_VECTOR_POLICY_EACH
&&
349 msix_vec
= VP_MSIX_CONFIG_VECTOR
;
351 msix_vec
= VP_MSIX_VQ_VECTOR
;
352 vq
= vp_setup_vq(vdev
, queue_idx
, callback
, name
, ctx
, msix_vec
,
357 if (vector_policy
== VP_VQ_VECTOR_POLICY_SHARED
||
358 msix_vec
== VIRTIO_MSI_NO_VECTOR
||
359 vp_is_slow_path_vector(msix_vec
))
362 /* allocate per-vq irq if available and necessary */
363 snprintf(vp_dev
->msix_names
[msix_vec
], sizeof(*vp_dev
->msix_names
),
364 "%s-%s", dev_name(&vp_dev
->vdev
.dev
), name
);
365 err
= request_irq(pci_irq_vector(vp_dev
->pci_dev
, msix_vec
),
367 vp_dev
->msix_names
[msix_vec
], vq
);
369 vp_del_vq(vq
, *p_info
);
376 static int vp_find_vqs_msix(struct virtio_device
*vdev
, unsigned int nvqs
,
377 struct virtqueue
*vqs
[],
378 struct virtqueue_info vqs_info
[],
379 enum vp_vq_vector_policy vector_policy
,
380 struct irq_affinity
*desc
)
382 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
383 struct virtio_pci_admin_vq
*avq
= &vp_dev
->admin_vq
;
384 struct virtqueue_info
*vqi
;
385 int i
, err
, nvectors
, allocated_vectors
, queue_idx
= 0;
386 struct virtqueue
*vq
;
390 vp_dev
->vqs
= kcalloc(nvqs
, sizeof(*vp_dev
->vqs
), GFP_KERNEL
);
394 if (vp_dev
->avq_index
) {
395 err
= vp_dev
->avq_index(vdev
, &avq
->vq_index
, &avq_num
);
400 per_vq_vectors
= vector_policy
!= VP_VQ_VECTOR_POLICY_SHARED
;
402 if (per_vq_vectors
) {
403 /* Best option: one for change interrupt, one per vq. */
405 for (i
= 0; i
< nvqs
; ++i
) {
407 if (vqi
->name
&& vqi
->callback
)
410 if (avq_num
&& vector_policy
== VP_VQ_VECTOR_POLICY_EACH
)
413 /* Second best: one for change, shared for all vqs. */
417 err
= vp_request_msix_vectors(vdev
, nvectors
, per_vq_vectors
, desc
);
421 vp_dev
->per_vq_vectors
= per_vq_vectors
;
422 allocated_vectors
= vp_dev
->msix_used_vectors
;
423 for (i
= 0; i
< nvqs
; ++i
) {
429 vqs
[i
] = vp_find_one_vq_msix(vdev
, queue_idx
++, vqi
->callback
,
430 vqi
->name
, vqi
->ctx
, false,
431 &allocated_vectors
, vector_policy
,
433 if (IS_ERR(vqs
[i
])) {
434 err
= PTR_ERR(vqs
[i
]);
441 sprintf(avq
->name
, "avq.%u", avq
->vq_index
);
442 vq
= vp_find_one_vq_msix(vdev
, avq
->vq_index
, vp_modern_avq_done
,
443 avq
->name
, false, true, &allocated_vectors
,
444 vector_policy
, &vp_dev
->admin_vq
.info
);
457 static int vp_find_vqs_intx(struct virtio_device
*vdev
, unsigned int nvqs
,
458 struct virtqueue
*vqs
[],
459 struct virtqueue_info vqs_info
[])
461 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
462 struct virtio_pci_admin_vq
*avq
= &vp_dev
->admin_vq
;
463 int i
, err
, queue_idx
= 0;
464 struct virtqueue
*vq
;
467 vp_dev
->vqs
= kcalloc(nvqs
, sizeof(*vp_dev
->vqs
), GFP_KERNEL
);
471 if (vp_dev
->avq_index
) {
472 err
= vp_dev
->avq_index(vdev
, &avq
->vq_index
, &avq_num
);
477 err
= request_irq(vp_dev
->pci_dev
->irq
, vp_interrupt
, IRQF_SHARED
,
478 dev_name(&vdev
->dev
), vp_dev
);
482 vp_dev
->intx_enabled
= 1;
483 vp_dev
->per_vq_vectors
= false;
484 for (i
= 0; i
< nvqs
; ++i
) {
485 struct virtqueue_info
*vqi
= &vqs_info
[i
];
491 vqs
[i
] = vp_setup_vq(vdev
, queue_idx
++, vqi
->callback
,
493 VIRTIO_MSI_NO_VECTOR
, &vp_dev
->vqs
[i
]);
494 if (IS_ERR(vqs
[i
])) {
495 err
= PTR_ERR(vqs
[i
]);
502 sprintf(avq
->name
, "avq.%u", avq
->vq_index
);
503 vq
= vp_setup_vq(vdev
, queue_idx
++, vp_modern_avq_done
, avq
->name
,
504 false, VIRTIO_MSI_NO_VECTOR
,
505 &vp_dev
->admin_vq
.info
);
517 /* the config->find_vqs() implementation */
518 int vp_find_vqs(struct virtio_device
*vdev
, unsigned int nvqs
,
519 struct virtqueue
*vqs
[], struct virtqueue_info vqs_info
[],
520 struct irq_affinity
*desc
)
524 /* Try MSI-X with one vector per queue. */
525 err
= vp_find_vqs_msix(vdev
, nvqs
, vqs
, vqs_info
,
526 VP_VQ_VECTOR_POLICY_EACH
, desc
);
529 /* Fallback: MSI-X with one shared vector for config and
530 * slow path queues, one vector per queue for the rest.
532 err
= vp_find_vqs_msix(vdev
, nvqs
, vqs
, vqs_info
,
533 VP_VQ_VECTOR_POLICY_SHARED_SLOW
, desc
);
536 /* Fallback: MSI-X with one vector for config, one shared for queues. */
537 err
= vp_find_vqs_msix(vdev
, nvqs
, vqs
, vqs_info
,
538 VP_VQ_VECTOR_POLICY_SHARED
, desc
);
541 /* Is there an interrupt? If not give up. */
542 if (!(to_vp_device(vdev
)->pci_dev
->irq
))
544 /* Finally fall back to regular interrupts. */
545 return vp_find_vqs_intx(vdev
, nvqs
, vqs
, vqs_info
);
548 const char *vp_bus_name(struct virtio_device
*vdev
)
550 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
552 return pci_name(vp_dev
->pci_dev
);
555 /* Setup the affinity for a virtqueue:
556 * - force the affinity for per vq vector
557 * - OR over all affinities for shared MSI
558 * - ignore the affinity request if we're using INTX
560 int vp_set_vq_affinity(struct virtqueue
*vq
, const struct cpumask
*cpu_mask
)
562 struct virtio_device
*vdev
= vq
->vdev
;
563 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
564 struct virtio_pci_vq_info
*info
= vp_dev
->vqs
[vq
->index
];
565 struct cpumask
*mask
;
571 if (vp_dev
->msix_enabled
) {
572 mask
= vp_dev
->msix_affinity_masks
[info
->msix_vector
];
573 irq
= pci_irq_vector(vp_dev
->pci_dev
, info
->msix_vector
);
575 irq_update_affinity_hint(irq
, NULL
);
577 cpumask_copy(mask
, cpu_mask
);
578 irq_set_affinity_and_hint(irq
, mask
);
584 const struct cpumask
*vp_get_vq_affinity(struct virtio_device
*vdev
, int index
)
586 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
588 if (!vp_dev
->per_vq_vectors
||
589 vp_dev
->vqs
[index
]->msix_vector
== VIRTIO_MSI_NO_VECTOR
||
590 vp_is_slow_path_vector(vp_dev
->vqs
[index
]->msix_vector
))
593 return pci_irq_get_affinity(vp_dev
->pci_dev
,
594 vp_dev
->vqs
[index
]->msix_vector
);
597 #ifdef CONFIG_PM_SLEEP
598 static int virtio_pci_freeze(struct device
*dev
)
600 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
601 struct virtio_pci_device
*vp_dev
= pci_get_drvdata(pci_dev
);
604 ret
= virtio_device_freeze(&vp_dev
->vdev
);
607 pci_disable_device(pci_dev
);
611 static int virtio_pci_restore(struct device
*dev
)
613 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
614 struct virtio_pci_device
*vp_dev
= pci_get_drvdata(pci_dev
);
617 ret
= pci_enable_device(pci_dev
);
621 pci_set_master(pci_dev
);
622 return virtio_device_restore(&vp_dev
->vdev
);
625 static bool vp_supports_pm_no_reset(struct device
*dev
)
627 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
630 if (!pci_dev
->pm_cap
)
633 pci_read_config_word(pci_dev
, pci_dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
634 if (PCI_POSSIBLE_ERROR(pmcsr
)) {
635 dev_err(dev
, "Unable to query pmcsr");
639 return pmcsr
& PCI_PM_CTRL_NO_SOFT_RESET
;
642 static int virtio_pci_suspend(struct device
*dev
)
644 return vp_supports_pm_no_reset(dev
) ? 0 : virtio_pci_freeze(dev
);
647 static int virtio_pci_resume(struct device
*dev
)
649 return vp_supports_pm_no_reset(dev
) ? 0 : virtio_pci_restore(dev
);
652 static const struct dev_pm_ops virtio_pci_pm_ops
= {
653 .suspend
= virtio_pci_suspend
,
654 .resume
= virtio_pci_resume
,
655 .freeze
= virtio_pci_freeze
,
656 .thaw
= virtio_pci_restore
,
657 .poweroff
= virtio_pci_freeze
,
658 .restore
= virtio_pci_restore
,
663 /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
664 static const struct pci_device_id virtio_pci_id_table
[] = {
665 { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET
, PCI_ANY_ID
) },
669 MODULE_DEVICE_TABLE(pci
, virtio_pci_id_table
);
671 static void virtio_pci_release_dev(struct device
*_d
)
673 struct virtio_device
*vdev
= dev_to_virtio(_d
);
674 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
676 /* As struct device is a kobject, it's not safe to
677 * free the memory (including the reference counter itself)
678 * until it's release callback. */
682 static int virtio_pci_probe(struct pci_dev
*pci_dev
,
683 const struct pci_device_id
*id
)
685 struct virtio_pci_device
*vp_dev
, *reg_dev
= NULL
;
688 /* allocate our structure and fill it out */
689 vp_dev
= kzalloc(sizeof(struct virtio_pci_device
), GFP_KERNEL
);
693 pci_set_drvdata(pci_dev
, vp_dev
);
694 vp_dev
->vdev
.dev
.parent
= &pci_dev
->dev
;
695 vp_dev
->vdev
.dev
.release
= virtio_pci_release_dev
;
696 vp_dev
->pci_dev
= pci_dev
;
697 INIT_LIST_HEAD(&vp_dev
->virtqueues
);
698 INIT_LIST_HEAD(&vp_dev
->slow_virtqueues
);
699 spin_lock_init(&vp_dev
->lock
);
701 /* enable the device */
702 rc
= pci_enable_device(pci_dev
);
704 goto err_enable_device
;
707 rc
= virtio_pci_legacy_probe(vp_dev
);
708 /* Also try modern mode if we can't map BAR0 (no IO space). */
709 if (rc
== -ENODEV
|| rc
== -ENOMEM
)
710 rc
= virtio_pci_modern_probe(vp_dev
);
714 rc
= virtio_pci_modern_probe(vp_dev
);
716 rc
= virtio_pci_legacy_probe(vp_dev
);
721 pci_set_master(pci_dev
);
723 rc
= register_virtio_device(&vp_dev
->vdev
);
731 if (vp_dev
->is_legacy
)
732 virtio_pci_legacy_remove(vp_dev
);
734 virtio_pci_modern_remove(vp_dev
);
736 pci_disable_device(pci_dev
);
739 put_device(&vp_dev
->vdev
.dev
);
745 static void virtio_pci_remove(struct pci_dev
*pci_dev
)
747 struct virtio_pci_device
*vp_dev
= pci_get_drvdata(pci_dev
);
748 struct device
*dev
= get_device(&vp_dev
->vdev
.dev
);
751 * Device is marked broken on surprise removal so that virtio upper
752 * layers can abort any ongoing operation.
754 if (!pci_device_is_present(pci_dev
))
755 virtio_break_device(&vp_dev
->vdev
);
757 pci_disable_sriov(pci_dev
);
759 unregister_virtio_device(&vp_dev
->vdev
);
761 if (vp_dev
->is_legacy
)
762 virtio_pci_legacy_remove(vp_dev
);
764 virtio_pci_modern_remove(vp_dev
);
766 pci_disable_device(pci_dev
);
770 static int virtio_pci_sriov_configure(struct pci_dev
*pci_dev
, int num_vfs
)
772 struct virtio_pci_device
*vp_dev
= pci_get_drvdata(pci_dev
);
773 struct virtio_device
*vdev
= &vp_dev
->vdev
;
776 if (!(vdev
->config
->get_status(vdev
) & VIRTIO_CONFIG_S_DRIVER_OK
))
779 if (!__virtio_test_bit(vdev
, VIRTIO_F_SR_IOV
))
782 if (pci_vfs_assigned(pci_dev
))
786 pci_disable_sriov(pci_dev
);
790 ret
= pci_enable_sriov(pci_dev
, num_vfs
);
797 static struct pci_driver virtio_pci_driver
= {
798 .name
= "virtio-pci",
799 .id_table
= virtio_pci_id_table
,
800 .probe
= virtio_pci_probe
,
801 .remove
= virtio_pci_remove
,
802 #ifdef CONFIG_PM_SLEEP
803 .driver
.pm
= &virtio_pci_pm_ops
,
805 .sriov_configure
= virtio_pci_sriov_configure
,
808 struct virtio_device
*virtio_pci_vf_get_pf_dev(struct pci_dev
*pdev
)
810 struct virtio_pci_device
*pf_vp_dev
;
812 pf_vp_dev
= pci_iov_get_pf_drvdata(pdev
, &virtio_pci_driver
);
813 if (IS_ERR(pf_vp_dev
))
816 return &pf_vp_dev
->vdev
;
819 module_pci_driver(virtio_pci_driver
);
821 MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
822 MODULE_DESCRIPTION("virtio-pci");
823 MODULE_LICENSE("GPL");