1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel MIC Platform Software Stack (MPSS)
5 * Copyright(c) 2016 Intel Corporation.
9 * virtio for kvm on s390
11 * Copyright IBM Corp. 2008
13 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
15 * Intel Virtio Over PCIe (VOP) driver.
17 #include <linux/delay.h>
18 #include <linux/module.h>
19 #include <linux/sched.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/io-64-nonatomic-lo-hi.h>
25 #define VOP_MAX_VRINGS 4
28 * _vop_vdev - Allocated per virtio device instance injected by the peer.
30 * @vdev: Virtio device
31 * @desc: Virtio device page descriptor
32 * @dc: Virtio device control
33 * @vpdev: VOP device which is the parent for this virtio device
34 * @vr: Buffer for accessing the VRING
35 * @used_virt: Virtual address of used ring
36 * @used: DMA address of used ring
37 * @used_size: Size of the used buffer
38 * @reset_done: Track whether VOP reset is complete
39 * @virtio_cookie: Cookie returned upon requesting a interrupt
40 * @c2h_vdev_db: The doorbell used by the guest to interrupt the host
41 * @h2c_vdev_db: The doorbell used by the host to interrupt the guest
42 * @dnode: The destination node
45 struct virtio_device vdev
;
46 struct mic_device_desc __iomem
*desc
;
47 struct mic_device_ctrl __iomem
*dc
;
48 struct vop_device
*vpdev
;
49 void __iomem
*vr
[VOP_MAX_VRINGS
];
50 void *used_virt
[VOP_MAX_VRINGS
];
51 dma_addr_t used
[VOP_MAX_VRINGS
];
52 int used_size
[VOP_MAX_VRINGS
];
53 struct completion reset_done
;
54 struct mic_irq
*virtio_cookie
;
60 #define to_vopvdev(vd) container_of(vd, struct _vop_vdev, vdev)
62 #define _vop_aligned_desc_size(d) __mic_align(_vop_desc_size(d), 8)
64 /* Helper API to obtain the parent of the virtio device */
65 static inline struct device
*_vop_dev(struct _vop_vdev
*vdev
)
67 return vdev
->vdev
.dev
.parent
;
70 static inline unsigned _vop_desc_size(struct mic_device_desc __iomem
*desc
)
73 + ioread8(&desc
->num_vq
) * sizeof(struct mic_vqconfig
)
74 + ioread8(&desc
->feature_len
) * 2
75 + ioread8(&desc
->config_len
);
78 static inline struct mic_vqconfig __iomem
*
79 _vop_vq_config(struct mic_device_desc __iomem
*desc
)
81 return (struct mic_vqconfig __iomem
*)(desc
+ 1);
84 static inline u8 __iomem
*
85 _vop_vq_features(struct mic_device_desc __iomem
*desc
)
87 return (u8 __iomem
*)(_vop_vq_config(desc
) + ioread8(&desc
->num_vq
));
90 static inline u8 __iomem
*
91 _vop_vq_configspace(struct mic_device_desc __iomem
*desc
)
93 return _vop_vq_features(desc
) + ioread8(&desc
->feature_len
) * 2;
96 static inline unsigned
97 _vop_total_desc_size(struct mic_device_desc __iomem
*desc
)
99 return _vop_aligned_desc_size(desc
) + sizeof(struct mic_device_ctrl
);
102 /* This gets the device's feature bits. */
103 static u64
vop_get_features(struct virtio_device
*vdev
)
105 unsigned int i
, bits
;
107 struct mic_device_desc __iomem
*desc
= to_vopvdev(vdev
)->desc
;
108 u8 __iomem
*in_features
= _vop_vq_features(desc
);
109 int feature_len
= ioread8(&desc
->feature_len
);
111 bits
= min_t(unsigned, feature_len
, sizeof(vdev
->features
)) * 8;
112 for (i
= 0; i
< bits
; i
++)
113 if (ioread8(&in_features
[i
/ 8]) & (BIT(i
% 8)))
114 features
|= BIT_ULL(i
);
119 static void vop_transport_features(struct virtio_device
*vdev
)
122 * Packed ring isn't enabled on virtio_vop for now,
123 * because virtio_vop uses vring_new_virtqueue() which
124 * creates virtio rings on preallocated memory.
126 __virtio_clear_bit(vdev
, VIRTIO_F_RING_PACKED
);
129 static int vop_finalize_features(struct virtio_device
*vdev
)
131 unsigned int i
, bits
;
132 struct mic_device_desc __iomem
*desc
= to_vopvdev(vdev
)->desc
;
133 u8 feature_len
= ioread8(&desc
->feature_len
);
134 /* Second half of bitmap is features we accept. */
135 u8 __iomem
*out_features
=
136 _vop_vq_features(desc
) + feature_len
;
138 /* Give virtio_ring a chance to accept features. */
139 vring_transport_features(vdev
);
141 /* Give virtio_vop a chance to accept features. */
142 vop_transport_features(vdev
);
144 memset_io(out_features
, 0, feature_len
);
145 bits
= min_t(unsigned, feature_len
,
146 sizeof(vdev
->features
)) * 8;
147 for (i
= 0; i
< bits
; i
++) {
148 if (__virtio_test_bit(vdev
, i
))
149 iowrite8(ioread8(&out_features
[i
/ 8]) | (1 << (i
% 8)),
150 &out_features
[i
/ 8]);
156 * Reading and writing elements in config space
158 static void vop_get(struct virtio_device
*vdev
, unsigned int offset
,
159 void *buf
, unsigned len
)
161 struct mic_device_desc __iomem
*desc
= to_vopvdev(vdev
)->desc
;
163 if (offset
+ len
> ioread8(&desc
->config_len
))
165 memcpy_fromio(buf
, _vop_vq_configspace(desc
) + offset
, len
);
168 static void vop_set(struct virtio_device
*vdev
, unsigned int offset
,
169 const void *buf
, unsigned len
)
171 struct mic_device_desc __iomem
*desc
= to_vopvdev(vdev
)->desc
;
173 if (offset
+ len
> ioread8(&desc
->config_len
))
175 memcpy_toio(_vop_vq_configspace(desc
) + offset
, buf
, len
);
179 * The operations to get and set the status word just access the status
180 * field of the device descriptor. set_status also interrupts the host
181 * to tell about status changes.
183 static u8
vop_get_status(struct virtio_device
*vdev
)
185 return ioread8(&to_vopvdev(vdev
)->desc
->status
);
188 static void vop_set_status(struct virtio_device
*dev
, u8 status
)
190 struct _vop_vdev
*vdev
= to_vopvdev(dev
);
191 struct vop_device
*vpdev
= vdev
->vpdev
;
195 iowrite8(status
, &vdev
->desc
->status
);
196 vpdev
->hw_ops
->send_intr(vpdev
, vdev
->c2h_vdev_db
);
199 /* Inform host on a virtio device reset and wait for ack from host */
200 static void vop_reset_inform_host(struct virtio_device
*dev
)
202 struct _vop_vdev
*vdev
= to_vopvdev(dev
);
203 struct mic_device_ctrl __iomem
*dc
= vdev
->dc
;
204 struct vop_device
*vpdev
= vdev
->vpdev
;
207 iowrite8(0, &dc
->host_ack
);
208 iowrite8(1, &dc
->vdev_reset
);
209 vpdev
->hw_ops
->send_intr(vpdev
, vdev
->c2h_vdev_db
);
211 /* Wait till host completes all card accesses and acks the reset */
212 for (retry
= 100; retry
--;) {
213 if (ioread8(&dc
->host_ack
))
218 dev_dbg(_vop_dev(vdev
), "%s: retry: %d\n", __func__
, retry
);
220 /* Reset status to 0 in case we timed out */
221 iowrite8(0, &vdev
->desc
->status
);
224 static void vop_reset(struct virtio_device
*dev
)
226 struct _vop_vdev
*vdev
= to_vopvdev(dev
);
228 dev_dbg(_vop_dev(vdev
), "%s: virtio id %d\n",
229 __func__
, dev
->id
.device
);
231 vop_reset_inform_host(dev
);
232 complete_all(&vdev
->reset_done
);
236 * The virtio_ring code calls this API when it wants to notify the Host.
238 static bool vop_notify(struct virtqueue
*vq
)
240 struct _vop_vdev
*vdev
= vq
->priv
;
241 struct vop_device
*vpdev
= vdev
->vpdev
;
243 vpdev
->hw_ops
->send_intr(vpdev
, vdev
->c2h_vdev_db
);
247 static void vop_del_vq(struct virtqueue
*vq
, int n
)
249 struct _vop_vdev
*vdev
= to_vopvdev(vq
->vdev
);
250 struct vop_device
*vpdev
= vdev
->vpdev
;
252 dma_unmap_single(&vpdev
->dev
, vdev
->used
[n
],
253 vdev
->used_size
[n
], DMA_BIDIRECTIONAL
);
254 free_pages((unsigned long)vdev
->used_virt
[n
],
255 get_order(vdev
->used_size
[n
]));
256 vring_del_virtqueue(vq
);
257 vpdev
->hw_ops
->unmap(vpdev
, vdev
->vr
[n
]);
261 static void vop_del_vqs(struct virtio_device
*dev
)
263 struct _vop_vdev
*vdev
= to_vopvdev(dev
);
264 struct virtqueue
*vq
, *n
;
267 dev_dbg(_vop_dev(vdev
), "%s\n", __func__
);
269 list_for_each_entry_safe(vq
, n
, &dev
->vqs
, list
)
270 vop_del_vq(vq
, idx
++);
273 static struct virtqueue
*vop_new_virtqueue(unsigned int index
,
275 struct virtio_device
*vdev
,
278 bool (*notify
)(struct virtqueue
*vq
),
279 void (*callback
)(struct virtqueue
*vq
),
283 bool weak_barriers
= false;
286 vring_init(&vring
, num
, pages
, MIC_VIRTIO_RING_ALIGN
);
289 return __vring_new_virtqueue(index
, vring
, vdev
, weak_barriers
, context
,
290 notify
, callback
, name
);
294 * This routine will assign vring's allocated in host/io memory. Code in
295 * virtio_ring.c however continues to access this io memory as if it were local
296 * memory without io accessors.
298 static struct virtqueue
*vop_find_vq(struct virtio_device
*dev
,
300 void (*callback
)(struct virtqueue
*vq
),
301 const char *name
, bool ctx
)
303 struct _vop_vdev
*vdev
= to_vopvdev(dev
);
304 struct vop_device
*vpdev
= vdev
->vpdev
;
305 struct mic_vqconfig __iomem
*vqconfig
;
306 struct mic_vqconfig config
;
307 struct virtqueue
*vq
;
309 struct _mic_vring_info __iomem
*info
;
311 int vr_size
, _vr_size
, err
, magic
;
312 u8 type
= ioread8(&vdev
->desc
->type
);
314 if (index
>= ioread8(&vdev
->desc
->num_vq
))
315 return ERR_PTR(-ENOENT
);
318 return ERR_PTR(-ENOENT
);
320 /* First assign the vring's allocated in host memory */
321 vqconfig
= _vop_vq_config(vdev
->desc
) + index
;
322 memcpy_fromio(&config
, vqconfig
, sizeof(config
));
323 _vr_size
= vring_size(le16_to_cpu(config
.num
), MIC_VIRTIO_RING_ALIGN
);
324 vr_size
= PAGE_ALIGN(_vr_size
+ sizeof(struct _mic_vring_info
));
325 va
= vpdev
->hw_ops
->remap(vpdev
, le64_to_cpu(config
.address
), vr_size
);
327 return ERR_PTR(-ENOMEM
);
328 vdev
->vr
[index
] = va
;
329 memset_io(va
, 0x0, _vr_size
);
331 info
= va
+ _vr_size
;
332 magic
= ioread32(&info
->magic
);
334 if (WARN(magic
!= MIC_MAGIC
+ type
+ index
, "magic mismatch")) {
339 vdev
->used_size
[index
] = PAGE_ALIGN(sizeof(__u16
) * 3 +
340 sizeof(struct vring_used_elem
) *
341 le16_to_cpu(config
.num
));
342 used
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
343 get_order(vdev
->used_size
[index
]));
344 vdev
->used_virt
[index
] = used
;
347 dev_err(_vop_dev(vdev
), "%s %d err %d\n",
348 __func__
, __LINE__
, err
);
352 vq
= vop_new_virtqueue(index
, le16_to_cpu(config
.num
), dev
, ctx
,
353 (void __force
*)va
, vop_notify
, callback
,
360 vdev
->used
[index
] = dma_map_single(&vpdev
->dev
, used
,
361 vdev
->used_size
[index
],
363 if (dma_mapping_error(&vpdev
->dev
, vdev
->used
[index
])) {
365 dev_err(_vop_dev(vdev
), "%s %d err %d\n",
366 __func__
, __LINE__
, err
);
369 writeq(vdev
->used
[index
], &vqconfig
->used_address
);
374 vring_del_virtqueue(vq
);
376 free_pages((unsigned long)used
,
377 get_order(vdev
->used_size
[index
]));
379 vpdev
->hw_ops
->unmap(vpdev
, vdev
->vr
[index
]);
383 static int vop_find_vqs(struct virtio_device
*dev
, unsigned nvqs
,
384 struct virtqueue
*vqs
[],
385 vq_callback_t
*callbacks
[],
386 const char * const names
[], const bool *ctx
,
387 struct irq_affinity
*desc
)
389 struct _vop_vdev
*vdev
= to_vopvdev(dev
);
390 struct vop_device
*vpdev
= vdev
->vpdev
;
391 struct mic_device_ctrl __iomem
*dc
= vdev
->dc
;
392 int i
, err
, retry
, queue_idx
= 0;
394 /* We must have this many virtqueues. */
395 if (nvqs
> ioread8(&vdev
->desc
->num_vq
))
398 for (i
= 0; i
< nvqs
; ++i
) {
404 dev_dbg(_vop_dev(vdev
), "%s: %d: %s\n",
405 __func__
, i
, names
[i
]);
406 vqs
[i
] = vop_find_vq(dev
, queue_idx
++, callbacks
[i
], names
[i
],
407 ctx
? ctx
[i
] : false);
408 if (IS_ERR(vqs
[i
])) {
409 err
= PTR_ERR(vqs
[i
]);
414 iowrite8(1, &dc
->used_address_updated
);
416 * Send an interrupt to the host to inform it that used
417 * rings have been re-assigned.
419 vpdev
->hw_ops
->send_intr(vpdev
, vdev
->c2h_vdev_db
);
420 for (retry
= 100; --retry
;) {
421 if (!ioread8(&dc
->used_address_updated
))
426 dev_dbg(_vop_dev(vdev
), "%s: retry: %d\n", __func__
, retry
);
439 * The config ops structure as defined by virtio config
441 static struct virtio_config_ops vop_vq_config_ops
= {
442 .get_features
= vop_get_features
,
443 .finalize_features
= vop_finalize_features
,
446 .get_status
= vop_get_status
,
447 .set_status
= vop_set_status
,
449 .find_vqs
= vop_find_vqs
,
450 .del_vqs
= vop_del_vqs
,
453 static irqreturn_t
vop_virtio_intr_handler(int irq
, void *data
)
455 struct _vop_vdev
*vdev
= data
;
456 struct vop_device
*vpdev
= vdev
->vpdev
;
457 struct virtqueue
*vq
;
459 vpdev
->hw_ops
->ack_interrupt(vpdev
, vdev
->h2c_vdev_db
);
460 list_for_each_entry(vq
, &vdev
->vdev
.vqs
, list
)
461 vring_interrupt(0, vq
);
466 static void vop_virtio_release_dev(struct device
*_d
)
468 struct virtio_device
*vdev
=
469 container_of(_d
, struct virtio_device
, dev
);
470 struct _vop_vdev
*vop_vdev
=
471 container_of(vdev
, struct _vop_vdev
, vdev
);
477 * adds a new device and register it with virtio
478 * appropriate drivers are loaded by the device model
480 static int _vop_add_device(struct mic_device_desc __iomem
*d
,
481 unsigned int offset
, struct vop_device
*vpdev
,
484 struct _vop_vdev
*vdev
, *reg_dev
= NULL
;
486 u8 type
= ioread8(&d
->type
);
488 vdev
= kzalloc(sizeof(*vdev
), GFP_KERNEL
);
493 vdev
->vdev
.dev
.parent
= &vpdev
->dev
;
494 vdev
->vdev
.dev
.release
= vop_virtio_release_dev
;
495 vdev
->vdev
.id
.device
= type
;
496 vdev
->vdev
.config
= &vop_vq_config_ops
;
498 vdev
->dc
= (void __iomem
*)d
+ _vop_aligned_desc_size(d
);
500 vdev
->vdev
.priv
= (void *)(unsigned long)dnode
;
501 init_completion(&vdev
->reset_done
);
503 vdev
->h2c_vdev_db
= vpdev
->hw_ops
->next_db(vpdev
);
504 vdev
->virtio_cookie
= vpdev
->hw_ops
->request_irq(vpdev
,
505 vop_virtio_intr_handler
, "virtio intr",
506 vdev
, vdev
->h2c_vdev_db
);
507 if (IS_ERR(vdev
->virtio_cookie
)) {
508 ret
= PTR_ERR(vdev
->virtio_cookie
);
511 iowrite8((u8
)vdev
->h2c_vdev_db
, &vdev
->dc
->h2c_vdev_db
);
512 vdev
->c2h_vdev_db
= ioread8(&vdev
->dc
->c2h_vdev_db
);
514 ret
= register_virtio_device(&vdev
->vdev
);
517 dev_err(_vop_dev(vdev
),
518 "Failed to register vop device %u type %u\n",
522 writeq((unsigned long)vdev
, &vdev
->dc
->vdev
);
523 dev_dbg(_vop_dev(vdev
), "%s: registered vop device %u type %u vdev %p\n",
524 __func__
, offset
, type
, vdev
);
529 vpdev
->hw_ops
->free_irq(vpdev
, vdev
->virtio_cookie
, vdev
);
532 put_device(&vdev
->vdev
.dev
);
539 * match for a vop device with a specific desc pointer
541 static int vop_match_desc(struct device
*dev
, void *data
)
543 struct virtio_device
*_dev
= dev_to_virtio(dev
);
544 struct _vop_vdev
*vdev
= to_vopvdev(_dev
);
546 return vdev
->desc
== (void __iomem
*)data
;
549 static struct _vop_vdev
*vop_dc_to_vdev(struct mic_device_ctrl
*dc
)
551 return (struct _vop_vdev
*)(unsigned long)readq(&dc
->vdev
);
554 static void _vop_handle_config_change(struct mic_device_desc __iomem
*d
,
556 struct vop_device
*vpdev
)
558 struct mic_device_ctrl __iomem
*dc
559 = (void __iomem
*)d
+ _vop_aligned_desc_size(d
);
560 struct _vop_vdev
*vdev
= vop_dc_to_vdev(dc
);
562 if (ioread8(&dc
->config_change
) != MIC_VIRTIO_PARAM_CONFIG_CHANGED
)
565 dev_dbg(&vpdev
->dev
, "%s %d\n", __func__
, __LINE__
);
566 virtio_config_changed(&vdev
->vdev
);
567 iowrite8(1, &dc
->guest_ack
);
571 * removes a virtio device if a hot remove event has been
572 * requested by the host.
574 static int _vop_remove_device(struct mic_device_desc __iomem
*d
,
575 unsigned int offset
, struct vop_device
*vpdev
)
577 struct mic_device_ctrl __iomem
*dc
578 = (void __iomem
*)d
+ _vop_aligned_desc_size(d
);
579 struct _vop_vdev
*vdev
= vop_dc_to_vdev(dc
);
583 if (ioread8(&dc
->config_change
) == MIC_VIRTIO_PARAM_DEV_REMOVE
) {
584 struct device
*dev
= get_device(&vdev
->vdev
.dev
);
587 "%s %d config_change %d type %d vdev %p\n",
589 ioread8(&dc
->config_change
), ioread8(&d
->type
), vdev
);
590 status
= ioread8(&d
->status
);
591 reinit_completion(&vdev
->reset_done
);
592 unregister_virtio_device(&vdev
->vdev
);
593 vpdev
->hw_ops
->free_irq(vpdev
, vdev
->virtio_cookie
, vdev
);
594 iowrite8(-1, &dc
->h2c_vdev_db
);
595 if (status
& VIRTIO_CONFIG_S_DRIVER_OK
)
596 wait_for_completion(&vdev
->reset_done
);
598 iowrite8(1, &dc
->guest_ack
);
599 dev_dbg(&vpdev
->dev
, "%s %d guest_ack %d\n",
600 __func__
, __LINE__
, ioread8(&dc
->guest_ack
));
601 iowrite8(-1, &d
->type
);
607 #define REMOVE_DEVICES true
609 static void _vop_scan_devices(void __iomem
*dp
, struct vop_device
*vpdev
,
610 bool remove
, int dnode
)
614 struct mic_device_desc __iomem
*d
;
615 struct mic_device_ctrl __iomem
*dc
;
619 for (i
= sizeof(struct mic_bootparam
);
620 i
< MIC_DP_SIZE
; i
+= _vop_total_desc_size(d
)) {
622 dc
= (void __iomem
*)d
+ _vop_aligned_desc_size(d
);
624 * This read barrier is paired with the corresponding write
625 * barrier on the host which is inserted before adding or
626 * removing a virtio device descriptor, by updating the type.
629 type
= ioread8(&d
->type
);
638 /* device already exists */
639 dev
= device_find_child(&vpdev
->dev
, (void __force
*)d
,
643 iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE
,
646 _vop_handle_config_change(d
, i
, vpdev
);
647 ret
= _vop_remove_device(d
, i
, vpdev
);
649 iowrite8(0, &dc
->config_change
);
650 iowrite8(0, &dc
->guest_ack
);
656 dev_dbg(&vpdev
->dev
, "%s %d Adding new virtio device %p\n",
657 __func__
, __LINE__
, d
);
659 _vop_add_device(d
, i
, vpdev
, dnode
);
663 static void vop_scan_devices(struct vop_info
*vi
,
664 struct vop_device
*vpdev
, bool remove
)
666 void __iomem
*dp
= vpdev
->hw_ops
->get_remote_dp(vpdev
);
670 mutex_lock(&vi
->vop_mutex
);
671 _vop_scan_devices(dp
, vpdev
, remove
, vpdev
->dnode
);
672 mutex_unlock(&vi
->vop_mutex
);
676 * vop_hotplug_device tries to find changes in the device page.
678 static void vop_hotplug_devices(struct work_struct
*work
)
680 struct vop_info
*vi
= container_of(work
, struct vop_info
,
683 vop_scan_devices(vi
, vi
->vpdev
, !REMOVE_DEVICES
);
687 * Interrupt handler for hot plug/config changes etc.
689 static irqreturn_t
vop_extint_handler(int irq
, void *data
)
691 struct vop_info
*vi
= data
;
692 struct mic_bootparam __iomem
*bp
;
693 struct vop_device
*vpdev
= vi
->vpdev
;
695 bp
= vpdev
->hw_ops
->get_remote_dp(vpdev
);
696 dev_dbg(&vpdev
->dev
, "%s %d hotplug work\n",
698 vpdev
->hw_ops
->ack_interrupt(vpdev
, ioread8(&bp
->h2c_config_db
));
699 schedule_work(&vi
->hotplug_work
);
703 static int vop_driver_probe(struct vop_device
*vpdev
)
708 vi
= kzalloc(sizeof(*vi
), GFP_KERNEL
);
713 dev_set_drvdata(&vpdev
->dev
, vi
);
716 mutex_init(&vi
->vop_mutex
);
717 INIT_WORK(&vi
->hotplug_work
, vop_hotplug_devices
);
719 rc
= vop_host_init(vi
);
723 struct mic_bootparam __iomem
*bootparam
;
725 vop_scan_devices(vi
, vpdev
, !REMOVE_DEVICES
);
727 vi
->h2c_config_db
= vpdev
->hw_ops
->next_db(vpdev
);
728 vi
->cookie
= vpdev
->hw_ops
->request_irq(vpdev
,
730 "virtio_config_intr",
731 vi
, vi
->h2c_config_db
);
732 if (IS_ERR(vi
->cookie
)) {
733 rc
= PTR_ERR(vi
->cookie
);
736 bootparam
= vpdev
->hw_ops
->get_remote_dp(vpdev
);
737 iowrite8(vi
->h2c_config_db
, &bootparam
->h2c_config_db
);
739 vop_init_debugfs(vi
);
747 static void vop_driver_remove(struct vop_device
*vpdev
)
749 struct vop_info
*vi
= dev_get_drvdata(&vpdev
->dev
);
754 struct mic_bootparam __iomem
*bootparam
=
755 vpdev
->hw_ops
->get_remote_dp(vpdev
);
757 iowrite8(-1, &bootparam
->h2c_config_db
);
758 vpdev
->hw_ops
->free_irq(vpdev
, vi
->cookie
, vi
);
759 flush_work(&vi
->hotplug_work
);
760 vop_scan_devices(vi
, vpdev
, REMOVE_DEVICES
);
762 vop_exit_debugfs(vi
);
766 static struct vop_device_id id_table
[] = {
767 { VOP_DEV_TRNSP
, VOP_DEV_ANY_ID
},
771 static struct vop_driver vop_driver
= {
772 .driver
.name
= KBUILD_MODNAME
,
773 .driver
.owner
= THIS_MODULE
,
774 .id_table
= id_table
,
775 .probe
= vop_driver_probe
,
776 .remove
= vop_driver_remove
,
779 module_vop_driver(vop_driver
);
781 MODULE_DEVICE_TABLE(mbus
, id_table
);
782 MODULE_AUTHOR("Intel Corporation");
783 MODULE_DESCRIPTION("Intel(R) Virtio Over PCIe (VOP) driver");
784 MODULE_LICENSE("GPL v2");