1 // SPDX-License-Identifier: GPL-2.0-only
3 * VIRTIO based driver for vDPA device
5 * Copyright (c) 2020, Red Hat. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/uuid.h>
16 #include <linux/group_cpus.h>
17 #include <linux/virtio.h>
18 #include <linux/vdpa.h>
19 #include <linux/virtio_config.h>
20 #include <linux/virtio_ring.h>
22 #define MOD_VERSION "0.1"
23 #define MOD_AUTHOR "Jason Wang <jasowang@redhat.com>"
24 #define MOD_DESC "vDPA bus driver for virtio devices"
25 #define MOD_LICENSE "GPL v2"
27 struct virtio_vdpa_device
{
28 struct virtio_device vdev
;
29 struct vdpa_device
*vdpa
;
32 /* The lock to protect virtqueue list */
34 /* List of virtio_vdpa_vq_info */
35 struct list_head virtqueues
;
38 struct virtio_vdpa_vq_info
{
39 /* the actual virtqueue */
42 /* the list node for the virtqueues list */
43 struct list_head node
;
46 static inline struct virtio_vdpa_device
*
47 to_virtio_vdpa_device(struct virtio_device
*dev
)
49 return container_of(dev
, struct virtio_vdpa_device
, vdev
);
52 static struct vdpa_device
*vd_get_vdpa(struct virtio_device
*vdev
)
54 return to_virtio_vdpa_device(vdev
)->vdpa
;
57 static void virtio_vdpa_get(struct virtio_device
*vdev
, unsigned int offset
,
58 void *buf
, unsigned int len
)
60 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
62 vdpa_get_config(vdpa
, offset
, buf
, len
);
65 static void virtio_vdpa_set(struct virtio_device
*vdev
, unsigned int offset
,
66 const void *buf
, unsigned int len
)
68 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
70 vdpa_set_config(vdpa
, offset
, buf
, len
);
73 static u32
virtio_vdpa_generation(struct virtio_device
*vdev
)
75 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
76 const struct vdpa_config_ops
*ops
= vdpa
->config
;
78 if (ops
->get_generation
)
79 return ops
->get_generation(vdpa
);
84 static u8
virtio_vdpa_get_status(struct virtio_device
*vdev
)
86 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
87 const struct vdpa_config_ops
*ops
= vdpa
->config
;
89 return ops
->get_status(vdpa
);
92 static void virtio_vdpa_set_status(struct virtio_device
*vdev
, u8 status
)
94 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
96 return vdpa_set_status(vdpa
, status
);
99 static void virtio_vdpa_reset(struct virtio_device
*vdev
)
101 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
106 static bool virtio_vdpa_notify(struct virtqueue
*vq
)
108 struct vdpa_device
*vdpa
= vd_get_vdpa(vq
->vdev
);
109 const struct vdpa_config_ops
*ops
= vdpa
->config
;
111 ops
->kick_vq(vdpa
, vq
->index
);
116 static bool virtio_vdpa_notify_with_data(struct virtqueue
*vq
)
118 struct vdpa_device
*vdpa
= vd_get_vdpa(vq
->vdev
);
119 const struct vdpa_config_ops
*ops
= vdpa
->config
;
120 u32 data
= vring_notification_data(vq
);
122 ops
->kick_vq_with_data(vdpa
, data
);
127 static irqreturn_t
virtio_vdpa_config_cb(void *private)
129 struct virtio_vdpa_device
*vd_dev
= private;
131 virtio_config_changed(&vd_dev
->vdev
);
136 static irqreturn_t
virtio_vdpa_virtqueue_cb(void *private)
138 struct virtio_vdpa_vq_info
*info
= private;
140 return vring_interrupt(0, info
->vq
);
143 static struct virtqueue
*
144 virtio_vdpa_setup_vq(struct virtio_device
*vdev
, unsigned int index
,
145 void (*callback
)(struct virtqueue
*vq
),
146 const char *name
, bool ctx
)
148 struct virtio_vdpa_device
*vd_dev
= to_virtio_vdpa_device(vdev
);
149 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
150 struct device
*dma_dev
;
151 const struct vdpa_config_ops
*ops
= vdpa
->config
;
152 struct virtio_vdpa_vq_info
*info
;
153 bool (*notify
)(struct virtqueue
*vq
) = virtio_vdpa_notify
;
154 struct vdpa_callback cb
;
155 struct virtqueue
*vq
;
156 u64 desc_addr
, driver_addr
, device_addr
;
157 /* Assume split virtqueue, switch to packed if necessary */
158 struct vdpa_vq_state state
= {0};
160 u32 align
, max_num
, min_num
= 1;
161 bool may_reduce_num
= true;
167 if (index
>= vdpa
->nvqs
)
168 return ERR_PTR(-ENOENT
);
170 /* We cannot accept VIRTIO_F_NOTIFICATION_DATA without kick_vq_with_data */
171 if (__virtio_test_bit(vdev
, VIRTIO_F_NOTIFICATION_DATA
)) {
172 if (ops
->kick_vq_with_data
)
173 notify
= virtio_vdpa_notify_with_data
;
175 __virtio_clear_bit(vdev
, VIRTIO_F_NOTIFICATION_DATA
);
178 /* Queue shouldn't already be set up. */
179 if (ops
->get_vq_ready(vdpa
, index
))
180 return ERR_PTR(-ENOENT
);
182 /* Allocate and fill out our active queue description */
183 info
= kmalloc(sizeof(*info
), GFP_KERNEL
);
185 return ERR_PTR(-ENOMEM
);
186 if (ops
->get_vq_size
)
187 max_num
= ops
->get_vq_size(vdpa
, index
);
189 max_num
= ops
->get_vq_num_max(vdpa
);
193 goto error_new_virtqueue
;
196 if (ops
->get_vq_num_min
)
197 min_num
= ops
->get_vq_num_min(vdpa
);
199 may_reduce_num
= (max_num
== min_num
) ? false : true;
201 /* Create the vring */
202 align
= ops
->get_vq_align(vdpa
);
204 if (ops
->get_vq_dma_dev
)
205 dma_dev
= ops
->get_vq_dma_dev(vdpa
, index
);
207 dma_dev
= vdpa_get_dma_dev(vdpa
);
208 vq
= vring_create_virtqueue_dma(index
, max_num
, align
, vdev
,
209 true, may_reduce_num
, ctx
,
210 notify
, callback
, name
, dma_dev
);
213 goto error_new_virtqueue
;
216 vq
->num_max
= max_num
;
218 /* Setup virtqueue callback */
219 cb
.callback
= callback
? virtio_vdpa_virtqueue_cb
: NULL
;
222 ops
->set_vq_cb(vdpa
, index
, &cb
);
223 ops
->set_vq_num(vdpa
, index
, virtqueue_get_vring_size(vq
));
225 desc_addr
= virtqueue_get_desc_addr(vq
);
226 driver_addr
= virtqueue_get_avail_addr(vq
);
227 device_addr
= virtqueue_get_used_addr(vq
);
229 if (ops
->set_vq_address(vdpa
, index
,
230 desc_addr
, driver_addr
,
236 /* reset virtqueue state index */
237 if (virtio_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
238 struct vdpa_vq_state_packed
*s
= &state
.packed
;
240 s
->last_avail_counter
= 1;
241 s
->last_avail_idx
= 0;
242 s
->last_used_counter
= 1;
243 s
->last_used_idx
= 0;
245 err
= ops
->set_vq_state(vdpa
, index
, &state
);
249 ops
->set_vq_ready(vdpa
, index
, 1);
254 spin_lock_irqsave(&vd_dev
->lock
, flags
);
255 list_add(&info
->node
, &vd_dev
->virtqueues
);
256 spin_unlock_irqrestore(&vd_dev
->lock
, flags
);
261 vring_del_virtqueue(vq
);
263 ops
->set_vq_ready(vdpa
, index
, 0);
264 /* VDPA driver should make sure vq is stopeed here */
265 WARN_ON(ops
->get_vq_ready(vdpa
, index
));
270 static void virtio_vdpa_del_vq(struct virtqueue
*vq
)
272 struct virtio_vdpa_device
*vd_dev
= to_virtio_vdpa_device(vq
->vdev
);
273 struct vdpa_device
*vdpa
= vd_dev
->vdpa
;
274 const struct vdpa_config_ops
*ops
= vdpa
->config
;
275 struct virtio_vdpa_vq_info
*info
= vq
->priv
;
276 unsigned int index
= vq
->index
;
279 spin_lock_irqsave(&vd_dev
->lock
, flags
);
280 list_del(&info
->node
);
281 spin_unlock_irqrestore(&vd_dev
->lock
, flags
);
283 /* Select and deactivate the queue (best effort) */
284 ops
->set_vq_ready(vdpa
, index
, 0);
286 vring_del_virtqueue(vq
);
291 static void virtio_vdpa_del_vqs(struct virtio_device
*vdev
)
293 struct virtqueue
*vq
, *n
;
295 list_for_each_entry_safe(vq
, n
, &vdev
->vqs
, list
)
296 virtio_vdpa_del_vq(vq
);
299 static void default_calc_sets(struct irq_affinity
*affd
, unsigned int affvecs
)
302 affd
->set_size
[0] = affvecs
;
305 static struct cpumask
*
306 create_affinity_masks(unsigned int nvecs
, struct irq_affinity
*affd
)
308 unsigned int affvecs
= 0, curvec
, usedvecs
, i
;
309 struct cpumask
*masks
= NULL
;
311 if (nvecs
> affd
->pre_vectors
+ affd
->post_vectors
)
312 affvecs
= nvecs
- affd
->pre_vectors
- affd
->post_vectors
;
314 if (!affd
->calc_sets
)
315 affd
->calc_sets
= default_calc_sets
;
317 affd
->calc_sets(affd
, affvecs
);
322 masks
= kcalloc(nvecs
, sizeof(*masks
), GFP_KERNEL
);
326 /* Fill out vectors at the beginning that don't need affinity */
327 for (curvec
= 0; curvec
< affd
->pre_vectors
; curvec
++)
328 cpumask_setall(&masks
[curvec
]);
330 for (i
= 0, usedvecs
= 0; i
< affd
->nr_sets
; i
++) {
331 unsigned int this_vecs
= affd
->set_size
[i
];
333 struct cpumask
*result
= group_cpus_evenly(this_vecs
);
340 for (j
= 0; j
< this_vecs
; j
++)
341 cpumask_copy(&masks
[curvec
+ j
], &result
[j
]);
345 usedvecs
+= this_vecs
;
348 /* Fill out vectors at the end that don't need affinity */
349 if (usedvecs
>= affvecs
)
350 curvec
= affd
->pre_vectors
+ affvecs
;
352 curvec
= affd
->pre_vectors
+ usedvecs
;
353 for (; curvec
< nvecs
; curvec
++)
354 cpumask_setall(&masks
[curvec
]);
359 static int virtio_vdpa_find_vqs(struct virtio_device
*vdev
, unsigned int nvqs
,
360 struct virtqueue
*vqs
[],
361 struct virtqueue_info vqs_info
[],
362 struct irq_affinity
*desc
)
364 struct virtio_vdpa_device
*vd_dev
= to_virtio_vdpa_device(vdev
);
365 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
366 const struct vdpa_config_ops
*ops
= vdpa
->config
;
367 struct cpumask
*masks
;
368 struct vdpa_callback cb
;
369 bool has_affinity
= desc
&& ops
->set_vq_affinity
;
370 int i
, err
, queue_idx
= 0;
373 masks
= create_affinity_masks(nvqs
, desc
);
378 for (i
= 0; i
< nvqs
; ++i
) {
379 struct virtqueue_info
*vqi
= &vqs_info
[i
];
386 vqs
[i
] = virtio_vdpa_setup_vq(vdev
, queue_idx
++, vqi
->callback
,
387 vqi
->name
, vqi
->ctx
);
388 if (IS_ERR(vqs
[i
])) {
389 err
= PTR_ERR(vqs
[i
]);
394 ops
->set_vq_affinity(vdpa
, i
, &masks
[i
]);
397 cb
.callback
= virtio_vdpa_config_cb
;
399 ops
->set_config_cb(vdpa
, &cb
);
406 virtio_vdpa_del_vqs(vdev
);
412 static u64
virtio_vdpa_get_features(struct virtio_device
*vdev
)
414 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
415 const struct vdpa_config_ops
*ops
= vdpa
->config
;
417 return ops
->get_device_features(vdpa
);
420 static int virtio_vdpa_finalize_features(struct virtio_device
*vdev
)
422 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
424 /* Give virtio_ring a chance to accept features. */
425 vring_transport_features(vdev
);
427 return vdpa_set_features(vdpa
, vdev
->features
);
430 static const char *virtio_vdpa_bus_name(struct virtio_device
*vdev
)
432 struct virtio_vdpa_device
*vd_dev
= to_virtio_vdpa_device(vdev
);
433 struct vdpa_device
*vdpa
= vd_dev
->vdpa
;
435 return dev_name(&vdpa
->dev
);
438 static int virtio_vdpa_set_vq_affinity(struct virtqueue
*vq
,
439 const struct cpumask
*cpu_mask
)
441 struct virtio_vdpa_device
*vd_dev
= to_virtio_vdpa_device(vq
->vdev
);
442 struct vdpa_device
*vdpa
= vd_dev
->vdpa
;
443 const struct vdpa_config_ops
*ops
= vdpa
->config
;
444 unsigned int index
= vq
->index
;
446 if (ops
->set_vq_affinity
)
447 return ops
->set_vq_affinity(vdpa
, index
, cpu_mask
);
452 static const struct cpumask
*
453 virtio_vdpa_get_vq_affinity(struct virtio_device
*vdev
, int index
)
455 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
456 const struct vdpa_config_ops
*ops
= vdpa
->config
;
458 if (ops
->get_vq_affinity
)
459 return ops
->get_vq_affinity(vdpa
, index
);
464 static const struct virtio_config_ops virtio_vdpa_config_ops
= {
465 .get
= virtio_vdpa_get
,
466 .set
= virtio_vdpa_set
,
467 .generation
= virtio_vdpa_generation
,
468 .get_status
= virtio_vdpa_get_status
,
469 .set_status
= virtio_vdpa_set_status
,
470 .reset
= virtio_vdpa_reset
,
471 .find_vqs
= virtio_vdpa_find_vqs
,
472 .del_vqs
= virtio_vdpa_del_vqs
,
473 .get_features
= virtio_vdpa_get_features
,
474 .finalize_features
= virtio_vdpa_finalize_features
,
475 .bus_name
= virtio_vdpa_bus_name
,
476 .set_vq_affinity
= virtio_vdpa_set_vq_affinity
,
477 .get_vq_affinity
= virtio_vdpa_get_vq_affinity
,
480 static void virtio_vdpa_release_dev(struct device
*_d
)
482 struct virtio_device
*vdev
=
483 container_of(_d
, struct virtio_device
, dev
);
484 struct virtio_vdpa_device
*vd_dev
=
485 container_of(vdev
, struct virtio_vdpa_device
, vdev
);
490 static int virtio_vdpa_probe(struct vdpa_device
*vdpa
)
492 const struct vdpa_config_ops
*ops
= vdpa
->config
;
493 struct virtio_vdpa_device
*vd_dev
, *reg_dev
= NULL
;
496 vd_dev
= kzalloc(sizeof(*vd_dev
), GFP_KERNEL
);
500 vd_dev
->vdev
.dev
.parent
= vdpa_get_dma_dev(vdpa
);
501 vd_dev
->vdev
.dev
.release
= virtio_vdpa_release_dev
;
502 vd_dev
->vdev
.config
= &virtio_vdpa_config_ops
;
504 INIT_LIST_HEAD(&vd_dev
->virtqueues
);
505 spin_lock_init(&vd_dev
->lock
);
507 vd_dev
->vdev
.id
.device
= ops
->get_device_id(vdpa
);
508 if (vd_dev
->vdev
.id
.device
== 0)
511 vd_dev
->vdev
.id
.vendor
= ops
->get_vendor_id(vdpa
);
512 ret
= register_virtio_device(&vd_dev
->vdev
);
517 vdpa_set_drvdata(vdpa
, vd_dev
);
523 put_device(&vd_dev
->vdev
.dev
);
529 static void virtio_vdpa_remove(struct vdpa_device
*vdpa
)
531 struct virtio_vdpa_device
*vd_dev
= vdpa_get_drvdata(vdpa
);
533 unregister_virtio_device(&vd_dev
->vdev
);
536 static struct vdpa_driver virtio_vdpa_driver
= {
538 .name
= "virtio_vdpa",
540 .probe
= virtio_vdpa_probe
,
541 .remove
= virtio_vdpa_remove
,
544 module_vdpa_driver(virtio_vdpa_driver
);
546 MODULE_VERSION(MOD_VERSION
);
547 MODULE_LICENSE(MOD_LICENSE
);
548 MODULE_AUTHOR(MOD_AUTHOR
);
549 MODULE_DESCRIPTION(MOD_DESC
);