1 /* Virtio ring implementation.
3 * Copyright 2007 Rusty Russell IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
26 #include <linux/kmemleak.h>
29 /* For development, we want to crash whenever the ring is screwed. */
30 #define BAD_RING(_vq, fmt, args...) \
32 dev_err(&(_vq)->vq.vdev->dev, \
33 "%s:"fmt, (_vq)->vq.name, ##args); \
36 /* Caller is supposed to guarantee no reentry. */
37 #define START_USE(_vq) \
40 panic("%s:in_use = %i\n", \
41 (_vq)->vq.name, (_vq)->in_use); \
42 (_vq)->in_use = __LINE__; \
44 #define END_USE(_vq) \
45 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
47 #define BAD_RING(_vq, fmt, args...) \
49 dev_err(&_vq->vq.vdev->dev, \
50 "%s:"fmt, (_vq)->vq.name, ##args); \
51 (_vq)->broken = true; \
57 struct vring_virtqueue
{
60 /* Actual memory layout for this queue */
63 /* Can we use weak barriers? */
66 /* Other side has made a mess, don't try any more. */
69 /* Host supports indirect buffers */
72 /* Host publishes avail event idx */
75 /* Head of free buffer list. */
76 unsigned int free_head
;
77 /* Number we've added since last sync. */
78 unsigned int num_added
;
80 /* Last used index we've seen. */
83 /* Last written value to avail->flags */
84 u16 avail_flags_shadow
;
86 /* Last written value to avail->idx in guest byte order */
89 /* How to notify other side. FIXME: commonalize hcalls! */
90 bool (*notify
)(struct virtqueue
*vq
);
93 /* They're supposed to lock for us. */
96 /* Figure out if their kicks are too delayed. */
97 bool last_add_time_valid
;
98 ktime_t last_add_time
;
101 /* Tokens for callbacks. */
105 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
107 static struct vring_desc
*alloc_indirect(struct virtqueue
*_vq
,
108 unsigned int total_sg
, gfp_t gfp
)
110 struct vring_desc
*desc
;
114 * We require lowmem mappings for the descriptors because
115 * otherwise virt_to_phys will give us bogus addresses in the
118 gfp
&= ~__GFP_HIGHMEM
;
120 desc
= kmalloc(total_sg
* sizeof(struct vring_desc
), gfp
);
124 for (i
= 0; i
< total_sg
; i
++)
125 desc
[i
].next
= cpu_to_virtio16(_vq
->vdev
, i
+ 1);
129 static inline int virtqueue_add(struct virtqueue
*_vq
,
130 struct scatterlist
*sgs
[],
131 unsigned int total_sg
,
132 unsigned int out_sgs
,
137 struct vring_virtqueue
*vq
= to_vvq(_vq
);
138 struct scatterlist
*sg
;
139 struct vring_desc
*desc
;
140 unsigned int i
, n
, avail
, descs_used
, uninitialized_var(prev
);
146 BUG_ON(data
== NULL
);
148 if (unlikely(vq
->broken
)) {
155 ktime_t now
= ktime_get();
157 /* No kick or get, with .1 second between? Warn. */
158 if (vq
->last_add_time_valid
)
159 WARN_ON(ktime_to_ms(ktime_sub(now
, vq
->last_add_time
))
161 vq
->last_add_time
= now
;
162 vq
->last_add_time_valid
= true;
166 BUG_ON(total_sg
> vq
->vring
.num
);
167 BUG_ON(total_sg
== 0);
169 head
= vq
->free_head
;
171 /* If the host supports indirect descriptor tables, and we have multiple
172 * buffers, then go indirect. FIXME: tune this threshold */
173 if (vq
->indirect
&& total_sg
> 1 && vq
->vq
.num_free
)
174 desc
= alloc_indirect(_vq
, total_sg
, gfp
);
179 /* Use a single buffer which doesn't continue */
180 vq
->vring
.desc
[head
].flags
= cpu_to_virtio16(_vq
->vdev
, VRING_DESC_F_INDIRECT
);
181 vq
->vring
.desc
[head
].addr
= cpu_to_virtio64(_vq
->vdev
, virt_to_phys(desc
));
182 /* avoid kmemleak false positive (hidden by virt_to_phys) */
183 kmemleak_ignore(desc
);
184 vq
->vring
.desc
[head
].len
= cpu_to_virtio32(_vq
->vdev
, total_sg
* sizeof(struct vring_desc
));
186 /* Set up rest to use this indirect table. */
191 desc
= vq
->vring
.desc
;
193 descs_used
= total_sg
;
197 if (vq
->vq
.num_free
< descs_used
) {
198 pr_debug("Can't add buf len %i - avail = %i\n",
199 descs_used
, vq
->vq
.num_free
);
200 /* FIXME: for historical reasons, we force a notify here if
201 * there are outgoing parts to the buffer. Presumably the
202 * host should service the ring ASAP. */
211 /* We're about to use some buffers from the free list. */
212 vq
->vq
.num_free
-= descs_used
;
214 for (n
= 0; n
< out_sgs
; n
++) {
215 for (sg
= sgs
[n
]; sg
; sg
= sg_next(sg
)) {
216 desc
[i
].flags
= cpu_to_virtio16(_vq
->vdev
, VRING_DESC_F_NEXT
);
217 desc
[i
].addr
= cpu_to_virtio64(_vq
->vdev
, sg_phys(sg
));
218 desc
[i
].len
= cpu_to_virtio32(_vq
->vdev
, sg
->length
);
220 i
= virtio16_to_cpu(_vq
->vdev
, desc
[i
].next
);
223 for (; n
< (out_sgs
+ in_sgs
); n
++) {
224 for (sg
= sgs
[n
]; sg
; sg
= sg_next(sg
)) {
225 desc
[i
].flags
= cpu_to_virtio16(_vq
->vdev
, VRING_DESC_F_NEXT
| VRING_DESC_F_WRITE
);
226 desc
[i
].addr
= cpu_to_virtio64(_vq
->vdev
, sg_phys(sg
));
227 desc
[i
].len
= cpu_to_virtio32(_vq
->vdev
, sg
->length
);
229 i
= virtio16_to_cpu(_vq
->vdev
, desc
[i
].next
);
232 /* Last one doesn't continue. */
233 desc
[prev
].flags
&= cpu_to_virtio16(_vq
->vdev
, ~VRING_DESC_F_NEXT
);
235 /* Update free pointer */
237 vq
->free_head
= virtio16_to_cpu(_vq
->vdev
, vq
->vring
.desc
[head
].next
);
242 vq
->data
[head
] = data
;
244 /* Put entry in available array (but don't update avail->idx until they
246 avail
= vq
->avail_idx_shadow
& (vq
->vring
.num
- 1);
247 vq
->vring
.avail
->ring
[avail
] = cpu_to_virtio16(_vq
->vdev
, head
);
249 /* Descriptors and available array need to be set before we expose the
250 * new available array entries. */
251 virtio_wmb(vq
->weak_barriers
);
252 vq
->avail_idx_shadow
++;
253 vq
->vring
.avail
->idx
= cpu_to_virtio16(_vq
->vdev
, vq
->avail_idx_shadow
);
256 pr_debug("Added buffer head %i to %p\n", head
, vq
);
259 /* This is very unlikely, but theoretically possible. Kick
261 if (unlikely(vq
->num_added
== (1 << 16) - 1))
268 * virtqueue_add_sgs - expose buffers to other end
269 * @vq: the struct virtqueue we're talking about.
270 * @sgs: array of terminated scatterlists.
271 * @out_num: the number of scatterlists readable by other side
272 * @in_num: the number of scatterlists which are writable (after readable ones)
273 * @data: the token identifying the buffer.
274 * @gfp: how to do memory allocations (if necessary).
276 * Caller must ensure we don't call this with other virtqueue operations
277 * at the same time (except where noted).
279 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
281 int virtqueue_add_sgs(struct virtqueue
*_vq
,
282 struct scatterlist
*sgs
[],
283 unsigned int out_sgs
,
288 unsigned int i
, total_sg
= 0;
290 /* Count them first. */
291 for (i
= 0; i
< out_sgs
+ in_sgs
; i
++) {
292 struct scatterlist
*sg
;
293 for (sg
= sgs
[i
]; sg
; sg
= sg_next(sg
))
296 return virtqueue_add(_vq
, sgs
, total_sg
, out_sgs
, in_sgs
, data
, gfp
);
298 EXPORT_SYMBOL_GPL(virtqueue_add_sgs
);
301 * virtqueue_add_outbuf - expose output buffers to other end
302 * @vq: the struct virtqueue we're talking about.
303 * @sg: scatterlist (must be well-formed and terminated!)
304 * @num: the number of entries in @sg readable by other side
305 * @data: the token identifying the buffer.
306 * @gfp: how to do memory allocations (if necessary).
308 * Caller must ensure we don't call this with other virtqueue operations
309 * at the same time (except where noted).
311 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
313 int virtqueue_add_outbuf(struct virtqueue
*vq
,
314 struct scatterlist
*sg
, unsigned int num
,
318 return virtqueue_add(vq
, &sg
, num
, 1, 0, data
, gfp
);
320 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf
);
323 * virtqueue_add_inbuf - expose input buffers to other end
324 * @vq: the struct virtqueue we're talking about.
325 * @sg: scatterlist (must be well-formed and terminated!)
326 * @num: the number of entries in @sg writable by other side
327 * @data: the token identifying the buffer.
328 * @gfp: how to do memory allocations (if necessary).
330 * Caller must ensure we don't call this with other virtqueue operations
331 * at the same time (except where noted).
333 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
335 int virtqueue_add_inbuf(struct virtqueue
*vq
,
336 struct scatterlist
*sg
, unsigned int num
,
340 return virtqueue_add(vq
, &sg
, num
, 0, 1, data
, gfp
);
342 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf
);
345 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
346 * @vq: the struct virtqueue
348 * Instead of virtqueue_kick(), you can do:
349 * if (virtqueue_kick_prepare(vq))
350 * virtqueue_notify(vq);
352 * This is sometimes useful because the virtqueue_kick_prepare() needs
353 * to be serialized, but the actual virtqueue_notify() call does not.
355 bool virtqueue_kick_prepare(struct virtqueue
*_vq
)
357 struct vring_virtqueue
*vq
= to_vvq(_vq
);
362 /* We need to expose available array entries before checking avail
364 virtio_mb(vq
->weak_barriers
);
366 old
= vq
->avail_idx_shadow
- vq
->num_added
;
367 new = vq
->avail_idx_shadow
;
371 if (vq
->last_add_time_valid
) {
372 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
373 vq
->last_add_time
)) > 100);
375 vq
->last_add_time_valid
= false;
379 needs_kick
= vring_need_event(virtio16_to_cpu(_vq
->vdev
, vring_avail_event(&vq
->vring
)),
382 needs_kick
= !(vq
->vring
.used
->flags
& cpu_to_virtio16(_vq
->vdev
, VRING_USED_F_NO_NOTIFY
));
387 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare
);
390 * virtqueue_notify - second half of split virtqueue_kick call.
391 * @vq: the struct virtqueue
393 * This does not need to be serialized.
395 * Returns false if host notify failed or queue is broken, otherwise true.
397 bool virtqueue_notify(struct virtqueue
*_vq
)
399 struct vring_virtqueue
*vq
= to_vvq(_vq
);
401 if (unlikely(vq
->broken
))
404 /* Prod other side to tell it about changes. */
405 if (!vq
->notify(_vq
)) {
411 EXPORT_SYMBOL_GPL(virtqueue_notify
);
414 * virtqueue_kick - update after add_buf
415 * @vq: the struct virtqueue
417 * After one or more virtqueue_add_* calls, invoke this to kick
420 * Caller must ensure we don't call this with other virtqueue
421 * operations at the same time (except where noted).
423 * Returns false if kick failed, otherwise true.
425 bool virtqueue_kick(struct virtqueue
*vq
)
427 if (virtqueue_kick_prepare(vq
))
428 return virtqueue_notify(vq
);
431 EXPORT_SYMBOL_GPL(virtqueue_kick
);
433 static void detach_buf(struct vring_virtqueue
*vq
, unsigned int head
)
437 /* Clear data ptr. */
438 vq
->data
[head
] = NULL
;
440 /* Put back on free list: find end */
443 /* Free the indirect table */
444 if (vq
->vring
.desc
[i
].flags
& cpu_to_virtio16(vq
->vq
.vdev
, VRING_DESC_F_INDIRECT
))
445 kfree(phys_to_virt(virtio64_to_cpu(vq
->vq
.vdev
, vq
->vring
.desc
[i
].addr
)));
447 while (vq
->vring
.desc
[i
].flags
& cpu_to_virtio16(vq
->vq
.vdev
, VRING_DESC_F_NEXT
)) {
448 i
= virtio16_to_cpu(vq
->vq
.vdev
, vq
->vring
.desc
[i
].next
);
452 vq
->vring
.desc
[i
].next
= cpu_to_virtio16(vq
->vq
.vdev
, vq
->free_head
);
453 vq
->free_head
= head
;
454 /* Plus final descriptor */
458 static inline bool more_used(const struct vring_virtqueue
*vq
)
460 return vq
->last_used_idx
!= virtio16_to_cpu(vq
->vq
.vdev
, vq
->vring
.used
->idx
);
464 * virtqueue_get_buf - get the next used buffer
465 * @vq: the struct virtqueue we're talking about.
466 * @len: the length written into the buffer
468 * If the driver wrote data into the buffer, @len will be set to the
469 * amount written. This means you don't need to clear the buffer
470 * beforehand to ensure there's no data leakage in the case of short
473 * Caller must ensure we don't call this with other virtqueue
474 * operations at the same time (except where noted).
476 * Returns NULL if there are no used buffers, or the "data" token
477 * handed to virtqueue_add_*().
479 void *virtqueue_get_buf(struct virtqueue
*_vq
, unsigned int *len
)
481 struct vring_virtqueue
*vq
= to_vvq(_vq
);
488 if (unlikely(vq
->broken
)) {
493 if (!more_used(vq
)) {
494 pr_debug("No more buffers in queue\n");
499 /* Only get used array entries after they have been exposed by host. */
500 virtio_rmb(vq
->weak_barriers
);
502 last_used
= (vq
->last_used_idx
& (vq
->vring
.num
- 1));
503 i
= virtio32_to_cpu(_vq
->vdev
, vq
->vring
.used
->ring
[last_used
].id
);
504 *len
= virtio32_to_cpu(_vq
->vdev
, vq
->vring
.used
->ring
[last_used
].len
);
506 if (unlikely(i
>= vq
->vring
.num
)) {
507 BAD_RING(vq
, "id %u out of range\n", i
);
510 if (unlikely(!vq
->data
[i
])) {
511 BAD_RING(vq
, "id %u is not a head!\n", i
);
515 /* detach_buf clears data, so grab it now. */
519 /* If we expect an interrupt for the next entry, tell host
520 * by writing event index and flush out the write before
521 * the read in the next get_buf call. */
522 if (!(vq
->avail_flags_shadow
& VRING_AVAIL_F_NO_INTERRUPT
)) {
523 vring_used_event(&vq
->vring
) = cpu_to_virtio16(_vq
->vdev
, vq
->last_used_idx
);
524 virtio_mb(vq
->weak_barriers
);
528 vq
->last_add_time_valid
= false;
534 EXPORT_SYMBOL_GPL(virtqueue_get_buf
);
537 * virtqueue_disable_cb - disable callbacks
538 * @vq: the struct virtqueue we're talking about.
540 * Note that this is not necessarily synchronous, hence unreliable and only
541 * useful as an optimization.
543 * Unlike other operations, this need not be serialized.
545 void virtqueue_disable_cb(struct virtqueue
*_vq
)
547 struct vring_virtqueue
*vq
= to_vvq(_vq
);
549 if (!(vq
->avail_flags_shadow
& VRING_AVAIL_F_NO_INTERRUPT
)) {
550 vq
->avail_flags_shadow
|= VRING_AVAIL_F_NO_INTERRUPT
;
552 vq
->vring
.avail
->flags
= cpu_to_virtio16(_vq
->vdev
, vq
->avail_flags_shadow
);
556 EXPORT_SYMBOL_GPL(virtqueue_disable_cb
);
559 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
560 * @vq: the struct virtqueue we're talking about.
562 * This re-enables callbacks; it returns current queue state
563 * in an opaque unsigned value. This value should be later tested by
564 * virtqueue_poll, to detect a possible race between the driver checking for
565 * more work, and enabling callbacks.
567 * Caller must ensure we don't call this with other virtqueue
568 * operations at the same time (except where noted).
570 unsigned virtqueue_enable_cb_prepare(struct virtqueue
*_vq
)
572 struct vring_virtqueue
*vq
= to_vvq(_vq
);
577 /* We optimistically turn back on interrupts, then check if there was
579 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
580 * either clear the flags bit or point the event index at the next
581 * entry. Always do both to keep code simple. */
582 if (vq
->avail_flags_shadow
& VRING_AVAIL_F_NO_INTERRUPT
) {
583 vq
->avail_flags_shadow
&= ~VRING_AVAIL_F_NO_INTERRUPT
;
585 vq
->vring
.avail
->flags
= cpu_to_virtio16(_vq
->vdev
, vq
->avail_flags_shadow
);
587 vring_used_event(&vq
->vring
) = cpu_to_virtio16(_vq
->vdev
, last_used_idx
= vq
->last_used_idx
);
589 return last_used_idx
;
591 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare
);
594 * virtqueue_poll - query pending used buffers
595 * @vq: the struct virtqueue we're talking about.
596 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
598 * Returns "true" if there are pending used buffers in the queue.
600 * This does not need to be serialized.
602 bool virtqueue_poll(struct virtqueue
*_vq
, unsigned last_used_idx
)
604 struct vring_virtqueue
*vq
= to_vvq(_vq
);
606 virtio_mb(vq
->weak_barriers
);
607 return (u16
)last_used_idx
!= virtio16_to_cpu(_vq
->vdev
, vq
->vring
.used
->idx
);
609 EXPORT_SYMBOL_GPL(virtqueue_poll
);
612 * virtqueue_enable_cb - restart callbacks after disable_cb.
613 * @vq: the struct virtqueue we're talking about.
615 * This re-enables callbacks; it returns "false" if there are pending
616 * buffers in the queue, to detect a possible race between the driver
617 * checking for more work, and enabling callbacks.
619 * Caller must ensure we don't call this with other virtqueue
620 * operations at the same time (except where noted).
622 bool virtqueue_enable_cb(struct virtqueue
*_vq
)
624 unsigned last_used_idx
= virtqueue_enable_cb_prepare(_vq
);
625 return !virtqueue_poll(_vq
, last_used_idx
);
627 EXPORT_SYMBOL_GPL(virtqueue_enable_cb
);
630 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
631 * @vq: the struct virtqueue we're talking about.
633 * This re-enables callbacks but hints to the other side to delay
634 * interrupts until most of the available buffers have been processed;
635 * it returns "false" if there are many pending buffers in the queue,
636 * to detect a possible race between the driver checking for more work,
637 * and enabling callbacks.
639 * Caller must ensure we don't call this with other virtqueue
640 * operations at the same time (except where noted).
642 bool virtqueue_enable_cb_delayed(struct virtqueue
*_vq
)
644 struct vring_virtqueue
*vq
= to_vvq(_vq
);
649 /* We optimistically turn back on interrupts, then check if there was
651 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
652 * either clear the flags bit or point the event index at the next
653 * entry. Always update the event index to keep code simple. */
654 if (vq
->avail_flags_shadow
& VRING_AVAIL_F_NO_INTERRUPT
) {
655 vq
->avail_flags_shadow
&= ~VRING_AVAIL_F_NO_INTERRUPT
;
657 vq
->vring
.avail
->flags
= cpu_to_virtio16(_vq
->vdev
, vq
->avail_flags_shadow
);
659 /* TODO: tune this threshold */
660 bufs
= (u16
)(vq
->avail_idx_shadow
- vq
->last_used_idx
) * 3 / 4;
661 vring_used_event(&vq
->vring
) = cpu_to_virtio16(_vq
->vdev
, vq
->last_used_idx
+ bufs
);
662 virtio_mb(vq
->weak_barriers
);
663 if (unlikely((u16
)(virtio16_to_cpu(_vq
->vdev
, vq
->vring
.used
->idx
) - vq
->last_used_idx
) > bufs
)) {
671 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed
);
674 * virtqueue_detach_unused_buf - detach first unused buffer
675 * @vq: the struct virtqueue we're talking about.
677 * Returns NULL or the "data" token handed to virtqueue_add_*().
678 * This is not valid on an active queue; it is useful only for device
681 void *virtqueue_detach_unused_buf(struct virtqueue
*_vq
)
683 struct vring_virtqueue
*vq
= to_vvq(_vq
);
689 for (i
= 0; i
< vq
->vring
.num
; i
++) {
692 /* detach_buf clears data, so grab it now. */
695 vq
->avail_idx_shadow
--;
696 vq
->vring
.avail
->idx
= cpu_to_virtio16(_vq
->vdev
, vq
->avail_idx_shadow
);
700 /* That should have freed everything. */
701 BUG_ON(vq
->vq
.num_free
!= vq
->vring
.num
);
706 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf
);
708 irqreturn_t
vring_interrupt(int irq
, void *_vq
)
710 struct vring_virtqueue
*vq
= to_vvq(_vq
);
712 if (!more_used(vq
)) {
713 pr_debug("virtqueue interrupt with no work for %p\n", vq
);
717 if (unlikely(vq
->broken
))
720 pr_debug("virtqueue callback for %p (%p)\n", vq
, vq
->vq
.callback
);
722 vq
->vq
.callback(&vq
->vq
);
726 EXPORT_SYMBOL_GPL(vring_interrupt
);
728 struct virtqueue
*vring_new_virtqueue(unsigned int index
,
730 unsigned int vring_align
,
731 struct virtio_device
*vdev
,
734 bool (*notify
)(struct virtqueue
*),
735 void (*callback
)(struct virtqueue
*),
738 struct vring_virtqueue
*vq
;
741 /* We assume num is a power of 2. */
742 if (num
& (num
- 1)) {
743 dev_warn(&vdev
->dev
, "Bad virtqueue length %u\n", num
);
747 vq
= kmalloc(sizeof(*vq
) + sizeof(void *)*num
, GFP_KERNEL
);
751 vring_init(&vq
->vring
, num
, pages
, vring_align
);
752 vq
->vq
.callback
= callback
;
755 vq
->vq
.num_free
= num
;
756 vq
->vq
.index
= index
;
758 vq
->weak_barriers
= weak_barriers
;
760 vq
->last_used_idx
= 0;
761 vq
->avail_flags_shadow
= 0;
762 vq
->avail_idx_shadow
= 0;
764 list_add_tail(&vq
->vq
.list
, &vdev
->vqs
);
767 vq
->last_add_time_valid
= false;
770 vq
->indirect
= virtio_has_feature(vdev
, VIRTIO_RING_F_INDIRECT_DESC
);
771 vq
->event
= virtio_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
);
773 /* No callback? Tell other side not to bother us. */
775 vq
->avail_flags_shadow
|= VRING_AVAIL_F_NO_INTERRUPT
;
777 vq
->vring
.avail
->flags
= cpu_to_virtio16(vdev
, vq
->avail_flags_shadow
);
780 /* Put everything in free lists. */
782 for (i
= 0; i
< num
-1; i
++) {
783 vq
->vring
.desc
[i
].next
= cpu_to_virtio16(vdev
, i
+ 1);
790 EXPORT_SYMBOL_GPL(vring_new_virtqueue
);
792 void vring_del_virtqueue(struct virtqueue
*vq
)
797 EXPORT_SYMBOL_GPL(vring_del_virtqueue
);
799 /* Manipulates transport-specific feature bits. */
800 void vring_transport_features(struct virtio_device
*vdev
)
804 for (i
= VIRTIO_TRANSPORT_F_START
; i
< VIRTIO_TRANSPORT_F_END
; i
++) {
806 case VIRTIO_RING_F_INDIRECT_DESC
:
808 case VIRTIO_RING_F_EVENT_IDX
:
810 case VIRTIO_F_VERSION_1
:
813 /* We don't understand this bit. */
814 __virtio_clear_bit(vdev
, i
);
818 EXPORT_SYMBOL_GPL(vring_transport_features
);
821 * virtqueue_get_vring_size - return the size of the virtqueue's vring
822 * @vq: the struct virtqueue containing the vring of interest.
824 * Returns the size of the vring. This is mainly used for boasting to
825 * userspace. Unlike other operations, this need not be serialized.
827 unsigned int virtqueue_get_vring_size(struct virtqueue
*_vq
)
830 struct vring_virtqueue
*vq
= to_vvq(_vq
);
832 return vq
->vring
.num
;
834 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size
);
836 bool virtqueue_is_broken(struct virtqueue
*_vq
)
838 struct vring_virtqueue
*vq
= to_vvq(_vq
);
842 EXPORT_SYMBOL_GPL(virtqueue_is_broken
);
845 * This should prevent the device from being used, allowing drivers to
846 * recover. You may need to grab appropriate locks to flush.
848 void virtio_break_device(struct virtio_device
*dev
)
850 struct virtqueue
*_vq
;
852 list_for_each_entry(_vq
, &dev
->vqs
, list
) {
853 struct vring_virtqueue
*vq
= to_vvq(_vq
);
857 EXPORT_SYMBOL_GPL(virtio_break_device
);
859 void *virtqueue_get_avail(struct virtqueue
*_vq
)
861 struct vring_virtqueue
*vq
= to_vvq(_vq
);
863 return vq
->vring
.avail
;
865 EXPORT_SYMBOL_GPL(virtqueue_get_avail
);
867 void *virtqueue_get_used(struct virtqueue
*_vq
)
869 struct vring_virtqueue
*vq
= to_vvq(_vq
);
871 return vq
->vring
.used
;
873 EXPORT_SYMBOL_GPL(virtqueue_get_used
);
875 MODULE_LICENSE("GPL");