1 /* Virtio ring implementation.
3 * Copyright 2007 Rusty Russell IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
26 /* virtio guest is communicating with a virtual "device" that actually runs on
27 * a host processor. Memory barriers are used to control SMP effects. */
29 /* Where possible, use SMP barriers which are more lightweight than mandatory
30 * barriers, because mandatory barriers control MMIO effects on accesses
31 * through relaxed memory I/O windows (which virtio does not use). */
32 #define virtio_mb() smp_mb()
33 #define virtio_rmb() smp_rmb()
34 #define virtio_wmb() smp_wmb()
36 /* We must force memory ordering even if guest is UP since host could be
37 * running on another CPU, but SMP barriers are defined to barrier() in that
38 * configuration. So fall back to mandatory barriers instead. */
39 #define virtio_mb() mb()
40 #define virtio_rmb() rmb()
41 #define virtio_wmb() wmb()
45 /* For development, we want to crash whenever the ring is screwed. */
46 #define BAD_RING(_vq, fmt, args...) \
48 dev_err(&(_vq)->vq.vdev->dev, \
49 "%s:"fmt, (_vq)->vq.name, ##args); \
52 /* Caller is supposed to guarantee no reentry. */
53 #define START_USE(_vq) \
56 panic("%s:in_use = %i\n", \
57 (_vq)->vq.name, (_vq)->in_use); \
58 (_vq)->in_use = __LINE__; \
60 #define END_USE(_vq) \
61 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
63 #define BAD_RING(_vq, fmt, args...) \
65 dev_err(&_vq->vq.vdev->dev, \
66 "%s:"fmt, (_vq)->vq.name, ##args); \
67 (_vq)->broken = true; \
73 struct vring_virtqueue
77 /* Actual memory layout for this queue */
80 /* Other side has made a mess, don't try any more. */
83 /* Host supports indirect buffers */
86 /* Host publishes avail event idx */
89 /* Number of free buffers */
90 unsigned int num_free
;
91 /* Head of free buffer list. */
92 unsigned int free_head
;
93 /* Number we've added since last sync. */
94 unsigned int num_added
;
96 /* Last used index we've seen. */
99 /* How to notify other side. FIXME: commonalize hcalls! */
100 void (*notify
)(struct virtqueue
*vq
);
103 /* They're supposed to lock for us. */
107 /* Tokens for callbacks. */
111 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
113 /* Set up an indirect table of descriptors and add it to the queue. */
114 static int vring_add_indirect(struct vring_virtqueue
*vq
,
115 struct scatterlist sg
[],
120 struct vring_desc
*desc
;
124 desc
= kmalloc((out
+ in
) * sizeof(struct vring_desc
), gfp
);
128 /* Transfer entries from the sg list into the indirect page */
129 for (i
= 0; i
< out
; i
++) {
130 desc
[i
].flags
= VRING_DESC_F_NEXT
;
131 desc
[i
].addr
= sg_phys(sg
);
132 desc
[i
].len
= sg
->length
;
136 for (; i
< (out
+ in
); i
++) {
137 desc
[i
].flags
= VRING_DESC_F_NEXT
|VRING_DESC_F_WRITE
;
138 desc
[i
].addr
= sg_phys(sg
);
139 desc
[i
].len
= sg
->length
;
144 /* Last one doesn't continue. */
145 desc
[i
-1].flags
&= ~VRING_DESC_F_NEXT
;
148 /* We're about to use a buffer */
151 /* Use a single buffer which doesn't continue */
152 head
= vq
->free_head
;
153 vq
->vring
.desc
[head
].flags
= VRING_DESC_F_INDIRECT
;
154 vq
->vring
.desc
[head
].addr
= virt_to_phys(desc
);
155 vq
->vring
.desc
[head
].len
= i
* sizeof(struct vring_desc
);
157 /* Update free pointer */
158 vq
->free_head
= vq
->vring
.desc
[head
].next
;
163 int virtqueue_add_buf_gfp(struct virtqueue
*_vq
,
164 struct scatterlist sg
[],
170 struct vring_virtqueue
*vq
= to_vvq(_vq
);
171 unsigned int i
, avail
, uninitialized_var(prev
);
176 BUG_ON(data
== NULL
);
178 /* If the host supports indirect descriptor tables, and we have multiple
179 * buffers, then go indirect. FIXME: tune this threshold */
180 if (vq
->indirect
&& (out
+ in
) > 1 && vq
->num_free
) {
181 head
= vring_add_indirect(vq
, sg
, out
, in
, gfp
);
182 if (likely(head
>= 0))
186 BUG_ON(out
+ in
> vq
->vring
.num
);
187 BUG_ON(out
+ in
== 0);
189 if (vq
->num_free
< out
+ in
) {
190 pr_debug("Can't add buf len %i - avail = %i\n",
191 out
+ in
, vq
->num_free
);
192 /* FIXME: for historical reasons, we force a notify here if
193 * there are outgoing parts to the buffer. Presumably the
194 * host should service the ring ASAP. */
201 /* We're about to use some buffers from the free list. */
202 vq
->num_free
-= out
+ in
;
204 head
= vq
->free_head
;
205 for (i
= vq
->free_head
; out
; i
= vq
->vring
.desc
[i
].next
, out
--) {
206 vq
->vring
.desc
[i
].flags
= VRING_DESC_F_NEXT
;
207 vq
->vring
.desc
[i
].addr
= sg_phys(sg
);
208 vq
->vring
.desc
[i
].len
= sg
->length
;
212 for (; in
; i
= vq
->vring
.desc
[i
].next
, in
--) {
213 vq
->vring
.desc
[i
].flags
= VRING_DESC_F_NEXT
|VRING_DESC_F_WRITE
;
214 vq
->vring
.desc
[i
].addr
= sg_phys(sg
);
215 vq
->vring
.desc
[i
].len
= sg
->length
;
219 /* Last one doesn't continue. */
220 vq
->vring
.desc
[prev
].flags
&= ~VRING_DESC_F_NEXT
;
222 /* Update free pointer */
227 vq
->data
[head
] = data
;
229 /* Put entry in available array (but don't update avail->idx until they
230 * do sync). FIXME: avoid modulus here? */
231 avail
= (vq
->vring
.avail
->idx
+ vq
->num_added
++) % vq
->vring
.num
;
232 vq
->vring
.avail
->ring
[avail
] = head
;
234 pr_debug("Added buffer head %i to %p\n", head
, vq
);
239 EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp
);
241 void virtqueue_kick(struct virtqueue
*_vq
)
243 struct vring_virtqueue
*vq
= to_vvq(_vq
);
246 /* Descriptors and available array need to be set before we expose the
247 * new available array entries. */
250 old
= vq
->vring
.avail
->idx
;
251 new = vq
->vring
.avail
->idx
= old
+ vq
->num_added
;
254 /* Need to update avail index before checking if we should notify */
258 vring_need_event(vring_avail_event(&vq
->vring
), new, old
) :
259 !(vq
->vring
.used
->flags
& VRING_USED_F_NO_NOTIFY
))
260 /* Prod other side to tell it about changes. */
265 EXPORT_SYMBOL_GPL(virtqueue_kick
);
267 static void detach_buf(struct vring_virtqueue
*vq
, unsigned int head
)
271 /* Clear data ptr. */
272 vq
->data
[head
] = NULL
;
274 /* Put back on free list: find end */
277 /* Free the indirect table */
278 if (vq
->vring
.desc
[i
].flags
& VRING_DESC_F_INDIRECT
)
279 kfree(phys_to_virt(vq
->vring
.desc
[i
].addr
));
281 while (vq
->vring
.desc
[i
].flags
& VRING_DESC_F_NEXT
) {
282 i
= vq
->vring
.desc
[i
].next
;
286 vq
->vring
.desc
[i
].next
= vq
->free_head
;
287 vq
->free_head
= head
;
288 /* Plus final descriptor */
292 static inline bool more_used(const struct vring_virtqueue
*vq
)
294 return vq
->last_used_idx
!= vq
->vring
.used
->idx
;
297 void *virtqueue_get_buf(struct virtqueue
*_vq
, unsigned int *len
)
299 struct vring_virtqueue
*vq
= to_vvq(_vq
);
305 if (unlikely(vq
->broken
)) {
310 if (!more_used(vq
)) {
311 pr_debug("No more buffers in queue\n");
316 /* Only get used array entries after they have been exposed by host. */
319 i
= vq
->vring
.used
->ring
[vq
->last_used_idx
%vq
->vring
.num
].id
;
320 *len
= vq
->vring
.used
->ring
[vq
->last_used_idx
%vq
->vring
.num
].len
;
322 if (unlikely(i
>= vq
->vring
.num
)) {
323 BAD_RING(vq
, "id %u out of range\n", i
);
326 if (unlikely(!vq
->data
[i
])) {
327 BAD_RING(vq
, "id %u is not a head!\n", i
);
331 /* detach_buf clears data, so grab it now. */
335 /* If we expect an interrupt for the next entry, tell host
336 * by writing event index and flush out the write before
337 * the read in the next get_buf call. */
338 if (!(vq
->vring
.avail
->flags
& VRING_AVAIL_F_NO_INTERRUPT
)) {
339 vring_used_event(&vq
->vring
) = vq
->last_used_idx
;
346 EXPORT_SYMBOL_GPL(virtqueue_get_buf
);
348 void virtqueue_disable_cb(struct virtqueue
*_vq
)
350 struct vring_virtqueue
*vq
= to_vvq(_vq
);
352 vq
->vring
.avail
->flags
|= VRING_AVAIL_F_NO_INTERRUPT
;
354 EXPORT_SYMBOL_GPL(virtqueue_disable_cb
);
356 bool virtqueue_enable_cb(struct virtqueue
*_vq
)
358 struct vring_virtqueue
*vq
= to_vvq(_vq
);
362 /* We optimistically turn back on interrupts, then check if there was
364 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
365 * either clear the flags bit or point the event index at the next
366 * entry. Always do both to keep code simple. */
367 vq
->vring
.avail
->flags
&= ~VRING_AVAIL_F_NO_INTERRUPT
;
368 vring_used_event(&vq
->vring
) = vq
->last_used_idx
;
370 if (unlikely(more_used(vq
))) {
378 EXPORT_SYMBOL_GPL(virtqueue_enable_cb
);
380 bool virtqueue_enable_cb_delayed(struct virtqueue
*_vq
)
382 struct vring_virtqueue
*vq
= to_vvq(_vq
);
387 /* We optimistically turn back on interrupts, then check if there was
389 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
390 * either clear the flags bit or point the event index at the next
391 * entry. Always do both to keep code simple. */
392 vq
->vring
.avail
->flags
&= ~VRING_AVAIL_F_NO_INTERRUPT
;
393 /* TODO: tune this threshold */
394 bufs
= (u16
)(vq
->vring
.avail
->idx
- vq
->last_used_idx
) * 3 / 4;
395 vring_used_event(&vq
->vring
) = vq
->last_used_idx
+ bufs
;
397 if (unlikely((u16
)(vq
->vring
.used
->idx
- vq
->last_used_idx
) > bufs
)) {
405 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed
);
407 void *virtqueue_detach_unused_buf(struct virtqueue
*_vq
)
409 struct vring_virtqueue
*vq
= to_vvq(_vq
);
415 for (i
= 0; i
< vq
->vring
.num
; i
++) {
418 /* detach_buf clears data, so grab it now. */
421 vq
->vring
.avail
->idx
--;
425 /* That should have freed everything. */
426 BUG_ON(vq
->num_free
!= vq
->vring
.num
);
431 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf
);
433 irqreturn_t
vring_interrupt(int irq
, void *_vq
)
435 struct vring_virtqueue
*vq
= to_vvq(_vq
);
437 if (!more_used(vq
)) {
438 pr_debug("virtqueue interrupt with no work for %p\n", vq
);
442 if (unlikely(vq
->broken
))
445 pr_debug("virtqueue callback for %p (%p)\n", vq
, vq
->vq
.callback
);
447 vq
->vq
.callback(&vq
->vq
);
451 EXPORT_SYMBOL_GPL(vring_interrupt
);
453 struct virtqueue
*vring_new_virtqueue(unsigned int num
,
454 unsigned int vring_align
,
455 struct virtio_device
*vdev
,
457 void (*notify
)(struct virtqueue
*),
458 void (*callback
)(struct virtqueue
*),
461 struct vring_virtqueue
*vq
;
464 /* We assume num is a power of 2. */
465 if (num
& (num
- 1)) {
466 dev_warn(&vdev
->dev
, "Bad virtqueue length %u\n", num
);
470 vq
= kmalloc(sizeof(*vq
) + sizeof(void *)*num
, GFP_KERNEL
);
474 vring_init(&vq
->vring
, num
, pages
, vring_align
);
475 vq
->vq
.callback
= callback
;
480 vq
->last_used_idx
= 0;
482 list_add_tail(&vq
->vq
.list
, &vdev
->vqs
);
487 vq
->indirect
= virtio_has_feature(vdev
, VIRTIO_RING_F_INDIRECT_DESC
);
488 vq
->event
= virtio_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
);
490 /* No callback? Tell other side not to bother us. */
492 vq
->vring
.avail
->flags
|= VRING_AVAIL_F_NO_INTERRUPT
;
494 /* Put everything in free lists. */
497 for (i
= 0; i
< num
-1; i
++) {
498 vq
->vring
.desc
[i
].next
= i
+1;
505 EXPORT_SYMBOL_GPL(vring_new_virtqueue
);
507 void vring_del_virtqueue(struct virtqueue
*vq
)
512 EXPORT_SYMBOL_GPL(vring_del_virtqueue
);
514 /* Manipulates transport-specific feature bits. */
515 void vring_transport_features(struct virtio_device
*vdev
)
519 for (i
= VIRTIO_TRANSPORT_F_START
; i
< VIRTIO_TRANSPORT_F_END
; i
++) {
521 case VIRTIO_RING_F_INDIRECT_DESC
:
523 case VIRTIO_RING_F_EVENT_IDX
:
526 /* We don't understand this bit. */
527 clear_bit(i
, vdev
->features
);
531 EXPORT_SYMBOL_GPL(vring_transport_features
);
533 MODULE_LICENSE("GPL");