1 // SPDX-License-Identifier: GPL-2.0-only
3 * VIRTIO based driver for vDPA device
5 * Copyright (c) 2020, Red Hat. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/uuid.h>
16 #include <linux/virtio.h>
17 #include <linux/vdpa.h>
18 #include <linux/virtio_config.h>
19 #include <linux/virtio_ring.h>
21 #define MOD_VERSION "0.1"
22 #define MOD_AUTHOR "Jason Wang <jasowang@redhat.com>"
23 #define MOD_DESC "vDPA bus driver for virtio devices"
24 #define MOD_LICENSE "GPL v2"
26 struct virtio_vdpa_device
{
27 struct virtio_device vdev
;
28 struct vdpa_device
*vdpa
;
31 /* The lock to protect virtqueue list */
33 /* List of virtio_vdpa_vq_info */
34 struct list_head virtqueues
;
37 struct virtio_vdpa_vq_info
{
38 /* the actual virtqueue */
41 /* the list node for the virtqueues list */
42 struct list_head node
;
45 static inline struct virtio_vdpa_device
*
46 to_virtio_vdpa_device(struct virtio_device
*dev
)
48 return container_of(dev
, struct virtio_vdpa_device
, vdev
);
51 static struct vdpa_device
*vd_get_vdpa(struct virtio_device
*vdev
)
53 return to_virtio_vdpa_device(vdev
)->vdpa
;
56 static void virtio_vdpa_get(struct virtio_device
*vdev
, unsigned offset
,
57 void *buf
, unsigned len
)
59 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
60 const struct vdpa_config_ops
*ops
= vdpa
->config
;
62 ops
->get_config(vdpa
, offset
, buf
, len
);
65 static void virtio_vdpa_set(struct virtio_device
*vdev
, unsigned offset
,
66 const void *buf
, unsigned len
)
68 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
69 const struct vdpa_config_ops
*ops
= vdpa
->config
;
71 ops
->set_config(vdpa
, offset
, buf
, len
);
74 static u32
virtio_vdpa_generation(struct virtio_device
*vdev
)
76 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
77 const struct vdpa_config_ops
*ops
= vdpa
->config
;
79 if (ops
->get_generation
)
80 return ops
->get_generation(vdpa
);
85 static u8
virtio_vdpa_get_status(struct virtio_device
*vdev
)
87 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
88 const struct vdpa_config_ops
*ops
= vdpa
->config
;
90 return ops
->get_status(vdpa
);
93 static void virtio_vdpa_set_status(struct virtio_device
*vdev
, u8 status
)
95 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
96 const struct vdpa_config_ops
*ops
= vdpa
->config
;
98 return ops
->set_status(vdpa
, status
);
101 static void virtio_vdpa_reset(struct virtio_device
*vdev
)
103 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
104 const struct vdpa_config_ops
*ops
= vdpa
->config
;
106 return ops
->set_status(vdpa
, 0);
109 static bool virtio_vdpa_notify(struct virtqueue
*vq
)
111 struct vdpa_device
*vdpa
= vd_get_vdpa(vq
->vdev
);
112 const struct vdpa_config_ops
*ops
= vdpa
->config
;
114 ops
->kick_vq(vdpa
, vq
->index
);
119 static irqreturn_t
virtio_vdpa_config_cb(void *private)
121 struct virtio_vdpa_device
*vd_dev
= private;
123 virtio_config_changed(&vd_dev
->vdev
);
128 static irqreturn_t
virtio_vdpa_virtqueue_cb(void *private)
130 struct virtio_vdpa_vq_info
*info
= private;
132 return vring_interrupt(0, info
->vq
);
135 static struct virtqueue
*
136 virtio_vdpa_setup_vq(struct virtio_device
*vdev
, unsigned int index
,
137 void (*callback
)(struct virtqueue
*vq
),
138 const char *name
, bool ctx
)
140 struct virtio_vdpa_device
*vd_dev
= to_virtio_vdpa_device(vdev
);
141 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
142 const struct vdpa_config_ops
*ops
= vdpa
->config
;
143 struct virtio_vdpa_vq_info
*info
;
144 struct vdpa_callback cb
;
145 struct virtqueue
*vq
;
146 u64 desc_addr
, driver_addr
, device_addr
;
154 /* Queue shouldn't already be set up. */
155 if (ops
->get_vq_ready(vdpa
, index
))
156 return ERR_PTR(-ENOENT
);
158 /* Allocate and fill out our active queue description */
159 info
= kmalloc(sizeof(*info
), GFP_KERNEL
);
161 return ERR_PTR(-ENOMEM
);
163 num
= ops
->get_vq_num_max(vdpa
);
166 goto error_new_virtqueue
;
169 /* Create the vring */
170 align
= ops
->get_vq_align(vdpa
);
171 vq
= vring_create_virtqueue(index
, num
, align
, vdev
,
173 virtio_vdpa_notify
, callback
, name
);
176 goto error_new_virtqueue
;
179 /* Setup virtqueue callback */
180 cb
.callback
= virtio_vdpa_virtqueue_cb
;
182 ops
->set_vq_cb(vdpa
, index
, &cb
);
183 ops
->set_vq_num(vdpa
, index
, virtqueue_get_vring_size(vq
));
185 desc_addr
= virtqueue_get_desc_addr(vq
);
186 driver_addr
= virtqueue_get_avail_addr(vq
);
187 device_addr
= virtqueue_get_used_addr(vq
);
189 if (ops
->set_vq_address(vdpa
, index
,
190 desc_addr
, driver_addr
,
196 ops
->set_vq_ready(vdpa
, index
, 1);
201 spin_lock_irqsave(&vd_dev
->lock
, flags
);
202 list_add(&info
->node
, &vd_dev
->virtqueues
);
203 spin_unlock_irqrestore(&vd_dev
->lock
, flags
);
208 vring_del_virtqueue(vq
);
210 ops
->set_vq_ready(vdpa
, index
, 0);
211 /* VDPA driver should make sure vq is stopeed here */
212 WARN_ON(ops
->get_vq_ready(vdpa
, index
));
217 static void virtio_vdpa_del_vq(struct virtqueue
*vq
)
219 struct virtio_vdpa_device
*vd_dev
= to_virtio_vdpa_device(vq
->vdev
);
220 struct vdpa_device
*vdpa
= vd_dev
->vdpa
;
221 const struct vdpa_config_ops
*ops
= vdpa
->config
;
222 struct virtio_vdpa_vq_info
*info
= vq
->priv
;
223 unsigned int index
= vq
->index
;
226 spin_lock_irqsave(&vd_dev
->lock
, flags
);
227 list_del(&info
->node
);
228 spin_unlock_irqrestore(&vd_dev
->lock
, flags
);
230 /* Select and deactivate the queue */
231 ops
->set_vq_ready(vdpa
, index
, 0);
232 WARN_ON(ops
->get_vq_ready(vdpa
, index
));
234 vring_del_virtqueue(vq
);
239 static void virtio_vdpa_del_vqs(struct virtio_device
*vdev
)
241 struct virtqueue
*vq
, *n
;
243 list_for_each_entry_safe(vq
, n
, &vdev
->vqs
, list
)
244 virtio_vdpa_del_vq(vq
);
247 static int virtio_vdpa_find_vqs(struct virtio_device
*vdev
, unsigned nvqs
,
248 struct virtqueue
*vqs
[],
249 vq_callback_t
*callbacks
[],
250 const char * const names
[],
252 struct irq_affinity
*desc
)
254 struct virtio_vdpa_device
*vd_dev
= to_virtio_vdpa_device(vdev
);
255 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
256 const struct vdpa_config_ops
*ops
= vdpa
->config
;
257 struct vdpa_callback cb
;
258 int i
, err
, queue_idx
= 0;
260 for (i
= 0; i
< nvqs
; ++i
) {
266 vqs
[i
] = virtio_vdpa_setup_vq(vdev
, queue_idx
++,
267 callbacks
[i
], names
[i
], ctx
?
269 if (IS_ERR(vqs
[i
])) {
270 err
= PTR_ERR(vqs
[i
]);
275 cb
.callback
= virtio_vdpa_config_cb
;
277 ops
->set_config_cb(vdpa
, &cb
);
282 virtio_vdpa_del_vqs(vdev
);
286 static u64
virtio_vdpa_get_features(struct virtio_device
*vdev
)
288 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
289 const struct vdpa_config_ops
*ops
= vdpa
->config
;
291 return ops
->get_features(vdpa
);
294 static int virtio_vdpa_finalize_features(struct virtio_device
*vdev
)
296 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
297 const struct vdpa_config_ops
*ops
= vdpa
->config
;
299 /* Give virtio_ring a chance to accept features. */
300 vring_transport_features(vdev
);
302 return ops
->set_features(vdpa
, vdev
->features
);
305 static const char *virtio_vdpa_bus_name(struct virtio_device
*vdev
)
307 struct virtio_vdpa_device
*vd_dev
= to_virtio_vdpa_device(vdev
);
308 struct vdpa_device
*vdpa
= vd_dev
->vdpa
;
310 return dev_name(&vdpa
->dev
);
313 static const struct virtio_config_ops virtio_vdpa_config_ops
= {
314 .get
= virtio_vdpa_get
,
315 .set
= virtio_vdpa_set
,
316 .generation
= virtio_vdpa_generation
,
317 .get_status
= virtio_vdpa_get_status
,
318 .set_status
= virtio_vdpa_set_status
,
319 .reset
= virtio_vdpa_reset
,
320 .find_vqs
= virtio_vdpa_find_vqs
,
321 .del_vqs
= virtio_vdpa_del_vqs
,
322 .get_features
= virtio_vdpa_get_features
,
323 .finalize_features
= virtio_vdpa_finalize_features
,
324 .bus_name
= virtio_vdpa_bus_name
,
327 static void virtio_vdpa_release_dev(struct device
*_d
)
329 struct virtio_device
*vdev
=
330 container_of(_d
, struct virtio_device
, dev
);
331 struct virtio_vdpa_device
*vd_dev
=
332 container_of(vdev
, struct virtio_vdpa_device
, vdev
);
337 static int virtio_vdpa_probe(struct vdpa_device
*vdpa
)
339 const struct vdpa_config_ops
*ops
= vdpa
->config
;
340 struct virtio_vdpa_device
*vd_dev
, *reg_dev
= NULL
;
343 vd_dev
= kzalloc(sizeof(*vd_dev
), GFP_KERNEL
);
347 vd_dev
->vdev
.dev
.parent
= vdpa_get_dma_dev(vdpa
);
348 vd_dev
->vdev
.dev
.release
= virtio_vdpa_release_dev
;
349 vd_dev
->vdev
.config
= &virtio_vdpa_config_ops
;
351 INIT_LIST_HEAD(&vd_dev
->virtqueues
);
352 spin_lock_init(&vd_dev
->lock
);
354 vd_dev
->vdev
.id
.device
= ops
->get_device_id(vdpa
);
355 if (vd_dev
->vdev
.id
.device
== 0)
358 vd_dev
->vdev
.id
.vendor
= ops
->get_vendor_id(vdpa
);
359 ret
= register_virtio_device(&vd_dev
->vdev
);
364 vdpa_set_drvdata(vdpa
, vd_dev
);
370 put_device(&vd_dev
->vdev
.dev
);
376 static void virtio_vdpa_remove(struct vdpa_device
*vdpa
)
378 struct virtio_vdpa_device
*vd_dev
= vdpa_get_drvdata(vdpa
);
380 unregister_virtio_device(&vd_dev
->vdev
);
383 static struct vdpa_driver virtio_vdpa_driver
= {
385 .name
= "virtio_vdpa",
387 .probe
= virtio_vdpa_probe
,
388 .remove
= virtio_vdpa_remove
,
391 module_vdpa_driver(virtio_vdpa_driver
);
393 MODULE_VERSION(MOD_VERSION
);
394 MODULE_LICENSE(MOD_LICENSE
);
395 MODULE_AUTHOR(MOD_AUTHOR
);
396 MODULE_DESCRIPTION(MOD_DESC
);