2 * Intel MIC Platform Software Stack (MPSS)
4 * Copyright(c) 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
20 * virtio for kvm on s390
22 * Copyright IBM Corp. 2008
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License (version 2 only)
26 * as published by the Free Software Foundation.
28 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
30 * Intel Virtio Over PCIe (VOP) driver.
33 #include <linux/delay.h>
34 #include <linux/module.h>
35 #include <linux/sched.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/io-64-nonatomic-lo-hi.h>
41 #define VOP_MAX_VRINGS 4
44 * _vop_vdev - Allocated per virtio device instance injected by the peer.
46 * @vdev: Virtio device
47 * @desc: Virtio device page descriptor
48 * @dc: Virtio device control
49 * @vpdev: VOP device which is the parent for this virtio device
50 * @vr: Buffer for accessing the VRING
51 * @used_virt: Virtual address of used ring
52 * @used: DMA address of used ring
53 * @used_size: Size of the used buffer
54 * @reset_done: Track whether VOP reset is complete
55 * @virtio_cookie: Cookie returned upon requesting a interrupt
56 * @c2h_vdev_db: The doorbell used by the guest to interrupt the host
57 * @h2c_vdev_db: The doorbell used by the host to interrupt the guest
58 * @dnode: The destination node
61 struct virtio_device vdev
;
62 struct mic_device_desc __iomem
*desc
;
63 struct mic_device_ctrl __iomem
*dc
;
64 struct vop_device
*vpdev
;
65 void __iomem
*vr
[VOP_MAX_VRINGS
];
66 void *used_virt
[VOP_MAX_VRINGS
];
67 dma_addr_t used
[VOP_MAX_VRINGS
];
68 int used_size
[VOP_MAX_VRINGS
];
69 struct completion reset_done
;
70 struct mic_irq
*virtio_cookie
;
76 #define to_vopvdev(vd) container_of(vd, struct _vop_vdev, vdev)
78 #define _vop_aligned_desc_size(d) __mic_align(_vop_desc_size(d), 8)
80 /* Helper API to obtain the parent of the virtio device */
81 static inline struct device
*_vop_dev(struct _vop_vdev
*vdev
)
83 return vdev
->vdev
.dev
.parent
;
86 static inline unsigned _vop_desc_size(struct mic_device_desc __iomem
*desc
)
89 + ioread8(&desc
->num_vq
) * sizeof(struct mic_vqconfig
)
90 + ioread8(&desc
->feature_len
) * 2
91 + ioread8(&desc
->config_len
);
94 static inline struct mic_vqconfig __iomem
*
95 _vop_vq_config(struct mic_device_desc __iomem
*desc
)
97 return (struct mic_vqconfig __iomem
*)(desc
+ 1);
100 static inline u8 __iomem
*
101 _vop_vq_features(struct mic_device_desc __iomem
*desc
)
103 return (u8 __iomem
*)(_vop_vq_config(desc
) + ioread8(&desc
->num_vq
));
106 static inline u8 __iomem
*
107 _vop_vq_configspace(struct mic_device_desc __iomem
*desc
)
109 return _vop_vq_features(desc
) + ioread8(&desc
->feature_len
) * 2;
112 static inline unsigned
113 _vop_total_desc_size(struct mic_device_desc __iomem
*desc
)
115 return _vop_aligned_desc_size(desc
) + sizeof(struct mic_device_ctrl
);
118 /* This gets the device's feature bits. */
119 static u64
vop_get_features(struct virtio_device
*vdev
)
121 unsigned int i
, bits
;
123 struct mic_device_desc __iomem
*desc
= to_vopvdev(vdev
)->desc
;
124 u8 __iomem
*in_features
= _vop_vq_features(desc
);
125 int feature_len
= ioread8(&desc
->feature_len
);
127 bits
= min_t(unsigned, feature_len
, sizeof(vdev
->features
)) * 8;
128 for (i
= 0; i
< bits
; i
++)
129 if (ioread8(&in_features
[i
/ 8]) & (BIT(i
% 8)))
130 features
|= BIT_ULL(i
);
135 static void vop_transport_features(struct virtio_device
*vdev
)
138 * Packed ring isn't enabled on virtio_vop for now,
139 * because virtio_vop uses vring_new_virtqueue() which
140 * creates virtio rings on preallocated memory.
142 __virtio_clear_bit(vdev
, VIRTIO_F_RING_PACKED
);
145 static int vop_finalize_features(struct virtio_device
*vdev
)
147 unsigned int i
, bits
;
148 struct mic_device_desc __iomem
*desc
= to_vopvdev(vdev
)->desc
;
149 u8 feature_len
= ioread8(&desc
->feature_len
);
150 /* Second half of bitmap is features we accept. */
151 u8 __iomem
*out_features
=
152 _vop_vq_features(desc
) + feature_len
;
154 /* Give virtio_ring a chance to accept features. */
155 vring_transport_features(vdev
);
157 /* Give virtio_vop a chance to accept features. */
158 vop_transport_features(vdev
);
160 memset_io(out_features
, 0, feature_len
);
161 bits
= min_t(unsigned, feature_len
,
162 sizeof(vdev
->features
)) * 8;
163 for (i
= 0; i
< bits
; i
++) {
164 if (__virtio_test_bit(vdev
, i
))
165 iowrite8(ioread8(&out_features
[i
/ 8]) | (1 << (i
% 8)),
166 &out_features
[i
/ 8]);
172 * Reading and writing elements in config space
174 static void vop_get(struct virtio_device
*vdev
, unsigned int offset
,
175 void *buf
, unsigned len
)
177 struct mic_device_desc __iomem
*desc
= to_vopvdev(vdev
)->desc
;
179 if (offset
+ len
> ioread8(&desc
->config_len
))
181 memcpy_fromio(buf
, _vop_vq_configspace(desc
) + offset
, len
);
184 static void vop_set(struct virtio_device
*vdev
, unsigned int offset
,
185 const void *buf
, unsigned len
)
187 struct mic_device_desc __iomem
*desc
= to_vopvdev(vdev
)->desc
;
189 if (offset
+ len
> ioread8(&desc
->config_len
))
191 memcpy_toio(_vop_vq_configspace(desc
) + offset
, buf
, len
);
195 * The operations to get and set the status word just access the status
196 * field of the device descriptor. set_status also interrupts the host
197 * to tell about status changes.
199 static u8
vop_get_status(struct virtio_device
*vdev
)
201 return ioread8(&to_vopvdev(vdev
)->desc
->status
);
204 static void vop_set_status(struct virtio_device
*dev
, u8 status
)
206 struct _vop_vdev
*vdev
= to_vopvdev(dev
);
207 struct vop_device
*vpdev
= vdev
->vpdev
;
211 iowrite8(status
, &vdev
->desc
->status
);
212 vpdev
->hw_ops
->send_intr(vpdev
, vdev
->c2h_vdev_db
);
215 /* Inform host on a virtio device reset and wait for ack from host */
216 static void vop_reset_inform_host(struct virtio_device
*dev
)
218 struct _vop_vdev
*vdev
= to_vopvdev(dev
);
219 struct mic_device_ctrl __iomem
*dc
= vdev
->dc
;
220 struct vop_device
*vpdev
= vdev
->vpdev
;
223 iowrite8(0, &dc
->host_ack
);
224 iowrite8(1, &dc
->vdev_reset
);
225 vpdev
->hw_ops
->send_intr(vpdev
, vdev
->c2h_vdev_db
);
227 /* Wait till host completes all card accesses and acks the reset */
228 for (retry
= 100; retry
--;) {
229 if (ioread8(&dc
->host_ack
))
234 dev_dbg(_vop_dev(vdev
), "%s: retry: %d\n", __func__
, retry
);
236 /* Reset status to 0 in case we timed out */
237 iowrite8(0, &vdev
->desc
->status
);
240 static void vop_reset(struct virtio_device
*dev
)
242 struct _vop_vdev
*vdev
= to_vopvdev(dev
);
244 dev_dbg(_vop_dev(vdev
), "%s: virtio id %d\n",
245 __func__
, dev
->id
.device
);
247 vop_reset_inform_host(dev
);
248 complete_all(&vdev
->reset_done
);
252 * The virtio_ring code calls this API when it wants to notify the Host.
254 static bool vop_notify(struct virtqueue
*vq
)
256 struct _vop_vdev
*vdev
= vq
->priv
;
257 struct vop_device
*vpdev
= vdev
->vpdev
;
259 vpdev
->hw_ops
->send_intr(vpdev
, vdev
->c2h_vdev_db
);
263 static void vop_del_vq(struct virtqueue
*vq
, int n
)
265 struct _vop_vdev
*vdev
= to_vopvdev(vq
->vdev
);
266 struct vop_device
*vpdev
= vdev
->vpdev
;
268 dma_unmap_single(&vpdev
->dev
, vdev
->used
[n
],
269 vdev
->used_size
[n
], DMA_BIDIRECTIONAL
);
270 free_pages((unsigned long)vdev
->used_virt
[n
],
271 get_order(vdev
->used_size
[n
]));
272 vring_del_virtqueue(vq
);
273 vpdev
->hw_ops
->unmap(vpdev
, vdev
->vr
[n
]);
277 static void vop_del_vqs(struct virtio_device
*dev
)
279 struct _vop_vdev
*vdev
= to_vopvdev(dev
);
280 struct virtqueue
*vq
, *n
;
283 dev_dbg(_vop_dev(vdev
), "%s\n", __func__
);
285 list_for_each_entry_safe(vq
, n
, &dev
->vqs
, list
)
286 vop_del_vq(vq
, idx
++);
289 static struct virtqueue
*vop_new_virtqueue(unsigned int index
,
291 struct virtio_device
*vdev
,
294 bool (*notify
)(struct virtqueue
*vq
),
295 void (*callback
)(struct virtqueue
*vq
),
299 bool weak_barriers
= false;
302 vring_init(&vring
, num
, pages
, MIC_VIRTIO_RING_ALIGN
);
305 return __vring_new_virtqueue(index
, vring
, vdev
, weak_barriers
, context
,
306 notify
, callback
, name
);
310 * This routine will assign vring's allocated in host/io memory. Code in
311 * virtio_ring.c however continues to access this io memory as if it were local
312 * memory without io accessors.
314 static struct virtqueue
*vop_find_vq(struct virtio_device
*dev
,
316 void (*callback
)(struct virtqueue
*vq
),
317 const char *name
, bool ctx
)
319 struct _vop_vdev
*vdev
= to_vopvdev(dev
);
320 struct vop_device
*vpdev
= vdev
->vpdev
;
321 struct mic_vqconfig __iomem
*vqconfig
;
322 struct mic_vqconfig config
;
323 struct virtqueue
*vq
;
325 struct _mic_vring_info __iomem
*info
;
327 int vr_size
, _vr_size
, err
, magic
;
328 u8 type
= ioread8(&vdev
->desc
->type
);
330 if (index
>= ioread8(&vdev
->desc
->num_vq
))
331 return ERR_PTR(-ENOENT
);
334 return ERR_PTR(-ENOENT
);
336 /* First assign the vring's allocated in host memory */
337 vqconfig
= _vop_vq_config(vdev
->desc
) + index
;
338 memcpy_fromio(&config
, vqconfig
, sizeof(config
));
339 _vr_size
= vring_size(le16_to_cpu(config
.num
), MIC_VIRTIO_RING_ALIGN
);
340 vr_size
= PAGE_ALIGN(_vr_size
+ sizeof(struct _mic_vring_info
));
341 va
= vpdev
->hw_ops
->remap(vpdev
, le64_to_cpu(config
.address
), vr_size
);
343 return ERR_PTR(-ENOMEM
);
344 vdev
->vr
[index
] = va
;
345 memset_io(va
, 0x0, _vr_size
);
347 info
= va
+ _vr_size
;
348 magic
= ioread32(&info
->magic
);
350 if (WARN(magic
!= MIC_MAGIC
+ type
+ index
, "magic mismatch")) {
355 vdev
->used_size
[index
] = PAGE_ALIGN(sizeof(__u16
) * 3 +
356 sizeof(struct vring_used_elem
) *
357 le16_to_cpu(config
.num
));
358 used
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
359 get_order(vdev
->used_size
[index
]));
360 vdev
->used_virt
[index
] = used
;
363 dev_err(_vop_dev(vdev
), "%s %d err %d\n",
364 __func__
, __LINE__
, err
);
368 vq
= vop_new_virtqueue(index
, le16_to_cpu(config
.num
), dev
, ctx
,
369 (void __force
*)va
, vop_notify
, callback
,
376 vdev
->used
[index
] = dma_map_single(&vpdev
->dev
, used
,
377 vdev
->used_size
[index
],
379 if (dma_mapping_error(&vpdev
->dev
, vdev
->used
[index
])) {
381 dev_err(_vop_dev(vdev
), "%s %d err %d\n",
382 __func__
, __LINE__
, err
);
385 writeq(vdev
->used
[index
], &vqconfig
->used_address
);
390 vring_del_virtqueue(vq
);
392 free_pages((unsigned long)used
,
393 get_order(vdev
->used_size
[index
]));
395 vpdev
->hw_ops
->unmap(vpdev
, vdev
->vr
[index
]);
399 static int vop_find_vqs(struct virtio_device
*dev
, unsigned nvqs
,
400 struct virtqueue
*vqs
[],
401 vq_callback_t
*callbacks
[],
402 const char * const names
[], const bool *ctx
,
403 struct irq_affinity
*desc
)
405 struct _vop_vdev
*vdev
= to_vopvdev(dev
);
406 struct vop_device
*vpdev
= vdev
->vpdev
;
407 struct mic_device_ctrl __iomem
*dc
= vdev
->dc
;
408 int i
, err
, retry
, queue_idx
= 0;
410 /* We must have this many virtqueues. */
411 if (nvqs
> ioread8(&vdev
->desc
->num_vq
))
414 for (i
= 0; i
< nvqs
; ++i
) {
420 dev_dbg(_vop_dev(vdev
), "%s: %d: %s\n",
421 __func__
, i
, names
[i
]);
422 vqs
[i
] = vop_find_vq(dev
, queue_idx
++, callbacks
[i
], names
[i
],
423 ctx
? ctx
[i
] : false);
424 if (IS_ERR(vqs
[i
])) {
425 err
= PTR_ERR(vqs
[i
]);
430 iowrite8(1, &dc
->used_address_updated
);
432 * Send an interrupt to the host to inform it that used
433 * rings have been re-assigned.
435 vpdev
->hw_ops
->send_intr(vpdev
, vdev
->c2h_vdev_db
);
436 for (retry
= 100; --retry
;) {
437 if (!ioread8(&dc
->used_address_updated
))
442 dev_dbg(_vop_dev(vdev
), "%s: retry: %d\n", __func__
, retry
);
455 * The config ops structure as defined by virtio config
457 static struct virtio_config_ops vop_vq_config_ops
= {
458 .get_features
= vop_get_features
,
459 .finalize_features
= vop_finalize_features
,
462 .get_status
= vop_get_status
,
463 .set_status
= vop_set_status
,
465 .find_vqs
= vop_find_vqs
,
466 .del_vqs
= vop_del_vqs
,
469 static irqreturn_t
vop_virtio_intr_handler(int irq
, void *data
)
471 struct _vop_vdev
*vdev
= data
;
472 struct vop_device
*vpdev
= vdev
->vpdev
;
473 struct virtqueue
*vq
;
475 vpdev
->hw_ops
->ack_interrupt(vpdev
, vdev
->h2c_vdev_db
);
476 list_for_each_entry(vq
, &vdev
->vdev
.vqs
, list
)
477 vring_interrupt(0, vq
);
482 static void vop_virtio_release_dev(struct device
*_d
)
484 struct virtio_device
*vdev
=
485 container_of(_d
, struct virtio_device
, dev
);
486 struct _vop_vdev
*vop_vdev
=
487 container_of(vdev
, struct _vop_vdev
, vdev
);
493 * adds a new device and register it with virtio
494 * appropriate drivers are loaded by the device model
496 static int _vop_add_device(struct mic_device_desc __iomem
*d
,
497 unsigned int offset
, struct vop_device
*vpdev
,
500 struct _vop_vdev
*vdev
, *reg_dev
= NULL
;
502 u8 type
= ioread8(&d
->type
);
504 vdev
= kzalloc(sizeof(*vdev
), GFP_KERNEL
);
509 vdev
->vdev
.dev
.parent
= &vpdev
->dev
;
510 vdev
->vdev
.dev
.release
= vop_virtio_release_dev
;
511 vdev
->vdev
.id
.device
= type
;
512 vdev
->vdev
.config
= &vop_vq_config_ops
;
514 vdev
->dc
= (void __iomem
*)d
+ _vop_aligned_desc_size(d
);
516 vdev
->vdev
.priv
= (void *)(unsigned long)dnode
;
517 init_completion(&vdev
->reset_done
);
519 vdev
->h2c_vdev_db
= vpdev
->hw_ops
->next_db(vpdev
);
520 vdev
->virtio_cookie
= vpdev
->hw_ops
->request_irq(vpdev
,
521 vop_virtio_intr_handler
, "virtio intr",
522 vdev
, vdev
->h2c_vdev_db
);
523 if (IS_ERR(vdev
->virtio_cookie
)) {
524 ret
= PTR_ERR(vdev
->virtio_cookie
);
527 iowrite8((u8
)vdev
->h2c_vdev_db
, &vdev
->dc
->h2c_vdev_db
);
528 vdev
->c2h_vdev_db
= ioread8(&vdev
->dc
->c2h_vdev_db
);
530 ret
= register_virtio_device(&vdev
->vdev
);
533 dev_err(_vop_dev(vdev
),
534 "Failed to register vop device %u type %u\n",
538 writeq((unsigned long)vdev
, &vdev
->dc
->vdev
);
539 dev_dbg(_vop_dev(vdev
), "%s: registered vop device %u type %u vdev %p\n",
540 __func__
, offset
, type
, vdev
);
545 vpdev
->hw_ops
->free_irq(vpdev
, vdev
->virtio_cookie
, vdev
);
548 put_device(&vdev
->vdev
.dev
);
555 * match for a vop device with a specific desc pointer
557 static int vop_match_desc(struct device
*dev
, void *data
)
559 struct virtio_device
*_dev
= dev_to_virtio(dev
);
560 struct _vop_vdev
*vdev
= to_vopvdev(_dev
);
562 return vdev
->desc
== (void __iomem
*)data
;
565 static struct _vop_vdev
*vop_dc_to_vdev(struct mic_device_ctrl
*dc
)
567 return (struct _vop_vdev
*)(unsigned long)readq(&dc
->vdev
);
570 static void _vop_handle_config_change(struct mic_device_desc __iomem
*d
,
572 struct vop_device
*vpdev
)
574 struct mic_device_ctrl __iomem
*dc
575 = (void __iomem
*)d
+ _vop_aligned_desc_size(d
);
576 struct _vop_vdev
*vdev
= vop_dc_to_vdev(dc
);
578 if (ioread8(&dc
->config_change
) != MIC_VIRTIO_PARAM_CONFIG_CHANGED
)
581 dev_dbg(&vpdev
->dev
, "%s %d\n", __func__
, __LINE__
);
582 virtio_config_changed(&vdev
->vdev
);
583 iowrite8(1, &dc
->guest_ack
);
587 * removes a virtio device if a hot remove event has been
588 * requested by the host.
590 static int _vop_remove_device(struct mic_device_desc __iomem
*d
,
591 unsigned int offset
, struct vop_device
*vpdev
)
593 struct mic_device_ctrl __iomem
*dc
594 = (void __iomem
*)d
+ _vop_aligned_desc_size(d
);
595 struct _vop_vdev
*vdev
= vop_dc_to_vdev(dc
);
599 if (ioread8(&dc
->config_change
) == MIC_VIRTIO_PARAM_DEV_REMOVE
) {
600 struct device
*dev
= get_device(&vdev
->vdev
.dev
);
603 "%s %d config_change %d type %d vdev %p\n",
605 ioread8(&dc
->config_change
), ioread8(&d
->type
), vdev
);
606 status
= ioread8(&d
->status
);
607 reinit_completion(&vdev
->reset_done
);
608 unregister_virtio_device(&vdev
->vdev
);
609 vpdev
->hw_ops
->free_irq(vpdev
, vdev
->virtio_cookie
, vdev
);
610 iowrite8(-1, &dc
->h2c_vdev_db
);
611 if (status
& VIRTIO_CONFIG_S_DRIVER_OK
)
612 wait_for_completion(&vdev
->reset_done
);
614 iowrite8(1, &dc
->guest_ack
);
615 dev_dbg(&vpdev
->dev
, "%s %d guest_ack %d\n",
616 __func__
, __LINE__
, ioread8(&dc
->guest_ack
));
617 iowrite8(-1, &d
->type
);
623 #define REMOVE_DEVICES true
625 static void _vop_scan_devices(void __iomem
*dp
, struct vop_device
*vpdev
,
626 bool remove
, int dnode
)
630 struct mic_device_desc __iomem
*d
;
631 struct mic_device_ctrl __iomem
*dc
;
635 for (i
= sizeof(struct mic_bootparam
);
636 i
< MIC_DP_SIZE
; i
+= _vop_total_desc_size(d
)) {
638 dc
= (void __iomem
*)d
+ _vop_aligned_desc_size(d
);
640 * This read barrier is paired with the corresponding write
641 * barrier on the host which is inserted before adding or
642 * removing a virtio device descriptor, by updating the type.
645 type
= ioread8(&d
->type
);
654 /* device already exists */
655 dev
= device_find_child(&vpdev
->dev
, (void __force
*)d
,
659 iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE
,
662 _vop_handle_config_change(d
, i
, vpdev
);
663 ret
= _vop_remove_device(d
, i
, vpdev
);
665 iowrite8(0, &dc
->config_change
);
666 iowrite8(0, &dc
->guest_ack
);
672 dev_dbg(&vpdev
->dev
, "%s %d Adding new virtio device %p\n",
673 __func__
, __LINE__
, d
);
675 _vop_add_device(d
, i
, vpdev
, dnode
);
679 static void vop_scan_devices(struct vop_info
*vi
,
680 struct vop_device
*vpdev
, bool remove
)
682 void __iomem
*dp
= vpdev
->hw_ops
->get_remote_dp(vpdev
);
686 mutex_lock(&vi
->vop_mutex
);
687 _vop_scan_devices(dp
, vpdev
, remove
, vpdev
->dnode
);
688 mutex_unlock(&vi
->vop_mutex
);
692 * vop_hotplug_device tries to find changes in the device page.
694 static void vop_hotplug_devices(struct work_struct
*work
)
696 struct vop_info
*vi
= container_of(work
, struct vop_info
,
699 vop_scan_devices(vi
, vi
->vpdev
, !REMOVE_DEVICES
);
703 * Interrupt handler for hot plug/config changes etc.
705 static irqreturn_t
vop_extint_handler(int irq
, void *data
)
707 struct vop_info
*vi
= data
;
708 struct mic_bootparam __iomem
*bp
;
709 struct vop_device
*vpdev
= vi
->vpdev
;
711 bp
= vpdev
->hw_ops
->get_remote_dp(vpdev
);
712 dev_dbg(&vpdev
->dev
, "%s %d hotplug work\n",
714 vpdev
->hw_ops
->ack_interrupt(vpdev
, ioread8(&bp
->h2c_config_db
));
715 schedule_work(&vi
->hotplug_work
);
719 static int vop_driver_probe(struct vop_device
*vpdev
)
724 vi
= kzalloc(sizeof(*vi
), GFP_KERNEL
);
729 dev_set_drvdata(&vpdev
->dev
, vi
);
732 mutex_init(&vi
->vop_mutex
);
733 INIT_WORK(&vi
->hotplug_work
, vop_hotplug_devices
);
735 rc
= vop_host_init(vi
);
739 struct mic_bootparam __iomem
*bootparam
;
741 vop_scan_devices(vi
, vpdev
, !REMOVE_DEVICES
);
743 vi
->h2c_config_db
= vpdev
->hw_ops
->next_db(vpdev
);
744 vi
->cookie
= vpdev
->hw_ops
->request_irq(vpdev
,
746 "virtio_config_intr",
747 vi
, vi
->h2c_config_db
);
748 if (IS_ERR(vi
->cookie
)) {
749 rc
= PTR_ERR(vi
->cookie
);
752 bootparam
= vpdev
->hw_ops
->get_remote_dp(vpdev
);
753 iowrite8(vi
->h2c_config_db
, &bootparam
->h2c_config_db
);
755 vop_init_debugfs(vi
);
763 static void vop_driver_remove(struct vop_device
*vpdev
)
765 struct vop_info
*vi
= dev_get_drvdata(&vpdev
->dev
);
770 struct mic_bootparam __iomem
*bootparam
=
771 vpdev
->hw_ops
->get_remote_dp(vpdev
);
773 iowrite8(-1, &bootparam
->h2c_config_db
);
774 vpdev
->hw_ops
->free_irq(vpdev
, vi
->cookie
, vi
);
775 flush_work(&vi
->hotplug_work
);
776 vop_scan_devices(vi
, vpdev
, REMOVE_DEVICES
);
778 vop_exit_debugfs(vi
);
782 static struct vop_device_id id_table
[] = {
783 { VOP_DEV_TRNSP
, VOP_DEV_ANY_ID
},
787 static struct vop_driver vop_driver
= {
788 .driver
.name
= KBUILD_MODNAME
,
789 .driver
.owner
= THIS_MODULE
,
790 .id_table
= id_table
,
791 .probe
= vop_driver_probe
,
792 .remove
= vop_driver_remove
,
795 module_vop_driver(vop_driver
);
797 MODULE_DEVICE_TABLE(mbus
, id_table
);
798 MODULE_AUTHOR("Intel Corporation");
799 MODULE_DESCRIPTION("Intel(R) Virtio Over PCIe (VOP) driver");
800 MODULE_LICENSE("GPL v2");