staging: vt6655: MACvSaveContext use memcpy_fromio to read context.
[linux/fpc-iii.git] / drivers / virtio / virtio_ring.c
blobe12e385f7ac3507e60a3c77b0ff1b25a3c168b26
1 /* Virtio ring implementation.
3 * Copyright 2007 Rusty Russell IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
26 #include <linux/kmemleak.h>
28 #ifdef DEBUG
29 /* For development, we want to crash whenever the ring is screwed. */
30 #define BAD_RING(_vq, fmt, args...) \
31 do { \
32 dev_err(&(_vq)->vq.vdev->dev, \
33 "%s:"fmt, (_vq)->vq.name, ##args); \
34 BUG(); \
35 } while (0)
36 /* Caller is supposed to guarantee no reentry. */
37 #define START_USE(_vq) \
38 do { \
39 if ((_vq)->in_use) \
40 panic("%s:in_use = %i\n", \
41 (_vq)->vq.name, (_vq)->in_use); \
42 (_vq)->in_use = __LINE__; \
43 } while (0)
44 #define END_USE(_vq) \
45 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
46 #else
47 #define BAD_RING(_vq, fmt, args...) \
48 do { \
49 dev_err(&_vq->vq.vdev->dev, \
50 "%s:"fmt, (_vq)->vq.name, ##args); \
51 (_vq)->broken = true; \
52 } while (0)
53 #define START_USE(vq)
54 #define END_USE(vq)
55 #endif
57 struct vring_virtqueue {
58 struct virtqueue vq;
60 /* Actual memory layout for this queue */
61 struct vring vring;
63 /* Can we use weak barriers? */
64 bool weak_barriers;
66 /* Other side has made a mess, don't try any more. */
67 bool broken;
69 /* Host supports indirect buffers */
70 bool indirect;
72 /* Host publishes avail event idx */
73 bool event;
75 /* Head of free buffer list. */
76 unsigned int free_head;
77 /* Number we've added since last sync. */
78 unsigned int num_added;
80 /* Last used index we've seen. */
81 u16 last_used_idx;
83 /* Last written value to avail->flags */
84 u16 avail_flags_shadow;
86 /* Last written value to avail->idx in guest byte order */
87 u16 avail_idx_shadow;
89 /* How to notify other side. FIXME: commonalize hcalls! */
90 bool (*notify)(struct virtqueue *vq);
92 #ifdef DEBUG
93 /* They're supposed to lock for us. */
94 unsigned int in_use;
96 /* Figure out if their kicks are too delayed. */
97 bool last_add_time_valid;
98 ktime_t last_add_time;
99 #endif
101 /* Tokens for callbacks. */
102 void *data[];
105 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
107 static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
108 unsigned int total_sg, gfp_t gfp)
110 struct vring_desc *desc;
111 unsigned int i;
114 * We require lowmem mappings for the descriptors because
115 * otherwise virt_to_phys will give us bogus addresses in the
116 * virtqueue.
118 gfp &= ~__GFP_HIGHMEM;
120 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
121 if (!desc)
122 return NULL;
124 for (i = 0; i < total_sg; i++)
125 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
126 return desc;
129 static inline int virtqueue_add(struct virtqueue *_vq,
130 struct scatterlist *sgs[],
131 unsigned int total_sg,
132 unsigned int out_sgs,
133 unsigned int in_sgs,
134 void *data,
135 gfp_t gfp)
137 struct vring_virtqueue *vq = to_vvq(_vq);
138 struct scatterlist *sg;
139 struct vring_desc *desc;
140 unsigned int i, n, avail, descs_used, uninitialized_var(prev);
141 int head;
142 bool indirect;
144 START_USE(vq);
146 BUG_ON(data == NULL);
148 if (unlikely(vq->broken)) {
149 END_USE(vq);
150 return -EIO;
153 #ifdef DEBUG
155 ktime_t now = ktime_get();
157 /* No kick or get, with .1 second between? Warn. */
158 if (vq->last_add_time_valid)
159 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
160 > 100);
161 vq->last_add_time = now;
162 vq->last_add_time_valid = true;
164 #endif
166 BUG_ON(total_sg > vq->vring.num);
167 BUG_ON(total_sg == 0);
169 head = vq->free_head;
171 /* If the host supports indirect descriptor tables, and we have multiple
172 * buffers, then go indirect. FIXME: tune this threshold */
173 if (vq->indirect && total_sg > 1 && vq->vq.num_free)
174 desc = alloc_indirect(_vq, total_sg, gfp);
175 else
176 desc = NULL;
178 if (desc) {
179 /* Use a single buffer which doesn't continue */
180 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
181 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc));
182 /* avoid kmemleak false positive (hidden by virt_to_phys) */
183 kmemleak_ignore(desc);
184 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
186 /* Set up rest to use this indirect table. */
187 i = 0;
188 descs_used = 1;
189 indirect = true;
190 } else {
191 desc = vq->vring.desc;
192 i = head;
193 descs_used = total_sg;
194 indirect = false;
197 if (vq->vq.num_free < descs_used) {
198 pr_debug("Can't add buf len %i - avail = %i\n",
199 descs_used, vq->vq.num_free);
200 /* FIXME: for historical reasons, we force a notify here if
201 * there are outgoing parts to the buffer. Presumably the
202 * host should service the ring ASAP. */
203 if (out_sgs)
204 vq->notify(&vq->vq);
205 END_USE(vq);
206 return -ENOSPC;
209 /* We're about to use some buffers from the free list. */
210 vq->vq.num_free -= descs_used;
212 for (n = 0; n < out_sgs; n++) {
213 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
214 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
215 desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
216 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
217 prev = i;
218 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
221 for (; n < (out_sgs + in_sgs); n++) {
222 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
223 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
224 desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
225 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
226 prev = i;
227 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
230 /* Last one doesn't continue. */
231 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
233 /* Update free pointer */
234 if (indirect)
235 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
236 else
237 vq->free_head = i;
239 /* Set token. */
240 vq->data[head] = data;
242 /* Put entry in available array (but don't update avail->idx until they
243 * do sync). */
244 avail = vq->avail_idx_shadow & (vq->vring.num - 1);
245 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
247 /* Descriptors and available array need to be set before we expose the
248 * new available array entries. */
249 virtio_wmb(vq->weak_barriers);
250 vq->avail_idx_shadow++;
251 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
252 vq->num_added++;
254 pr_debug("Added buffer head %i to %p\n", head, vq);
255 END_USE(vq);
257 /* This is very unlikely, but theoretically possible. Kick
258 * just in case. */
259 if (unlikely(vq->num_added == (1 << 16) - 1))
260 virtqueue_kick(_vq);
262 return 0;
266 * virtqueue_add_sgs - expose buffers to other end
267 * @vq: the struct virtqueue we're talking about.
268 * @sgs: array of terminated scatterlists.
269 * @out_num: the number of scatterlists readable by other side
270 * @in_num: the number of scatterlists which are writable (after readable ones)
271 * @data: the token identifying the buffer.
272 * @gfp: how to do memory allocations (if necessary).
274 * Caller must ensure we don't call this with other virtqueue operations
275 * at the same time (except where noted).
277 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
279 int virtqueue_add_sgs(struct virtqueue *_vq,
280 struct scatterlist *sgs[],
281 unsigned int out_sgs,
282 unsigned int in_sgs,
283 void *data,
284 gfp_t gfp)
286 unsigned int i, total_sg = 0;
288 /* Count them first. */
289 for (i = 0; i < out_sgs + in_sgs; i++) {
290 struct scatterlist *sg;
291 for (sg = sgs[i]; sg; sg = sg_next(sg))
292 total_sg++;
294 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp);
296 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
299 * virtqueue_add_outbuf - expose output buffers to other end
300 * @vq: the struct virtqueue we're talking about.
301 * @sg: scatterlist (must be well-formed and terminated!)
302 * @num: the number of entries in @sg readable by other side
303 * @data: the token identifying the buffer.
304 * @gfp: how to do memory allocations (if necessary).
306 * Caller must ensure we don't call this with other virtqueue operations
307 * at the same time (except where noted).
309 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
311 int virtqueue_add_outbuf(struct virtqueue *vq,
312 struct scatterlist *sg, unsigned int num,
313 void *data,
314 gfp_t gfp)
316 return virtqueue_add(vq, &sg, num, 1, 0, data, gfp);
318 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
321 * virtqueue_add_inbuf - expose input buffers to other end
322 * @vq: the struct virtqueue we're talking about.
323 * @sg: scatterlist (must be well-formed and terminated!)
324 * @num: the number of entries in @sg writable by other side
325 * @data: the token identifying the buffer.
326 * @gfp: how to do memory allocations (if necessary).
328 * Caller must ensure we don't call this with other virtqueue operations
329 * at the same time (except where noted).
331 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
333 int virtqueue_add_inbuf(struct virtqueue *vq,
334 struct scatterlist *sg, unsigned int num,
335 void *data,
336 gfp_t gfp)
338 return virtqueue_add(vq, &sg, num, 0, 1, data, gfp);
340 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
343 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
344 * @vq: the struct virtqueue
346 * Instead of virtqueue_kick(), you can do:
347 * if (virtqueue_kick_prepare(vq))
348 * virtqueue_notify(vq);
350 * This is sometimes useful because the virtqueue_kick_prepare() needs
351 * to be serialized, but the actual virtqueue_notify() call does not.
353 bool virtqueue_kick_prepare(struct virtqueue *_vq)
355 struct vring_virtqueue *vq = to_vvq(_vq);
356 u16 new, old;
357 bool needs_kick;
359 START_USE(vq);
360 /* We need to expose available array entries before checking avail
361 * event. */
362 virtio_mb(vq->weak_barriers);
364 old = vq->avail_idx_shadow - vq->num_added;
365 new = vq->avail_idx_shadow;
366 vq->num_added = 0;
368 #ifdef DEBUG
369 if (vq->last_add_time_valid) {
370 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
371 vq->last_add_time)) > 100);
373 vq->last_add_time_valid = false;
374 #endif
376 if (vq->event) {
377 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
378 new, old);
379 } else {
380 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
382 END_USE(vq);
383 return needs_kick;
385 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
388 * virtqueue_notify - second half of split virtqueue_kick call.
389 * @vq: the struct virtqueue
391 * This does not need to be serialized.
393 * Returns false if host notify failed or queue is broken, otherwise true.
395 bool virtqueue_notify(struct virtqueue *_vq)
397 struct vring_virtqueue *vq = to_vvq(_vq);
399 if (unlikely(vq->broken))
400 return false;
402 /* Prod other side to tell it about changes. */
403 if (!vq->notify(_vq)) {
404 vq->broken = true;
405 return false;
407 return true;
409 EXPORT_SYMBOL_GPL(virtqueue_notify);
412 * virtqueue_kick - update after add_buf
413 * @vq: the struct virtqueue
415 * After one or more virtqueue_add_* calls, invoke this to kick
416 * the other side.
418 * Caller must ensure we don't call this with other virtqueue
419 * operations at the same time (except where noted).
421 * Returns false if kick failed, otherwise true.
423 bool virtqueue_kick(struct virtqueue *vq)
425 if (virtqueue_kick_prepare(vq))
426 return virtqueue_notify(vq);
427 return true;
429 EXPORT_SYMBOL_GPL(virtqueue_kick);
431 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
433 unsigned int i;
435 /* Clear data ptr. */
436 vq->data[head] = NULL;
438 /* Put back on free list: find end */
439 i = head;
441 /* Free the indirect table */
442 if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))
443 kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr)));
445 while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) {
446 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
447 vq->vq.num_free++;
450 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
451 vq->free_head = head;
452 /* Plus final descriptor */
453 vq->vq.num_free++;
456 static inline bool more_used(const struct vring_virtqueue *vq)
458 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
462 * virtqueue_get_buf - get the next used buffer
463 * @vq: the struct virtqueue we're talking about.
464 * @len: the length written into the buffer
466 * If the driver wrote data into the buffer, @len will be set to the
467 * amount written. This means you don't need to clear the buffer
468 * beforehand to ensure there's no data leakage in the case of short
469 * writes.
471 * Caller must ensure we don't call this with other virtqueue
472 * operations at the same time (except where noted).
474 * Returns NULL if there are no used buffers, or the "data" token
475 * handed to virtqueue_add_*().
477 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
479 struct vring_virtqueue *vq = to_vvq(_vq);
480 void *ret;
481 unsigned int i;
482 u16 last_used;
484 START_USE(vq);
486 if (unlikely(vq->broken)) {
487 END_USE(vq);
488 return NULL;
491 if (!more_used(vq)) {
492 pr_debug("No more buffers in queue\n");
493 END_USE(vq);
494 return NULL;
497 /* Only get used array entries after they have been exposed by host. */
498 virtio_rmb(vq->weak_barriers);
500 last_used = (vq->last_used_idx & (vq->vring.num - 1));
501 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
502 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
504 if (unlikely(i >= vq->vring.num)) {
505 BAD_RING(vq, "id %u out of range\n", i);
506 return NULL;
508 if (unlikely(!vq->data[i])) {
509 BAD_RING(vq, "id %u is not a head!\n", i);
510 return NULL;
513 /* detach_buf clears data, so grab it now. */
514 ret = vq->data[i];
515 detach_buf(vq, i);
516 vq->last_used_idx++;
517 /* If we expect an interrupt for the next entry, tell host
518 * by writing event index and flush out the write before
519 * the read in the next get_buf call. */
520 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
521 virtio_store_mb(vq->weak_barriers,
522 &vring_used_event(&vq->vring),
523 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
525 #ifdef DEBUG
526 vq->last_add_time_valid = false;
527 #endif
529 END_USE(vq);
530 return ret;
532 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
535 * virtqueue_disable_cb - disable callbacks
536 * @vq: the struct virtqueue we're talking about.
538 * Note that this is not necessarily synchronous, hence unreliable and only
539 * useful as an optimization.
541 * Unlike other operations, this need not be serialized.
543 void virtqueue_disable_cb(struct virtqueue *_vq)
545 struct vring_virtqueue *vq = to_vvq(_vq);
547 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
548 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
549 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
553 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
556 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
557 * @vq: the struct virtqueue we're talking about.
559 * This re-enables callbacks; it returns current queue state
560 * in an opaque unsigned value. This value should be later tested by
561 * virtqueue_poll, to detect a possible race between the driver checking for
562 * more work, and enabling callbacks.
564 * Caller must ensure we don't call this with other virtqueue
565 * operations at the same time (except where noted).
567 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
569 struct vring_virtqueue *vq = to_vvq(_vq);
570 u16 last_used_idx;
572 START_USE(vq);
574 /* We optimistically turn back on interrupts, then check if there was
575 * more to do. */
576 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
577 * either clear the flags bit or point the event index at the next
578 * entry. Always do both to keep code simple. */
579 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
580 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
581 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
583 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
584 END_USE(vq);
585 return last_used_idx;
587 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
590 * virtqueue_poll - query pending used buffers
591 * @vq: the struct virtqueue we're talking about.
592 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
594 * Returns "true" if there are pending used buffers in the queue.
596 * This does not need to be serialized.
598 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
600 struct vring_virtqueue *vq = to_vvq(_vq);
602 virtio_mb(vq->weak_barriers);
603 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
605 EXPORT_SYMBOL_GPL(virtqueue_poll);
608 * virtqueue_enable_cb - restart callbacks after disable_cb.
609 * @vq: the struct virtqueue we're talking about.
611 * This re-enables callbacks; it returns "false" if there are pending
612 * buffers in the queue, to detect a possible race between the driver
613 * checking for more work, and enabling callbacks.
615 * Caller must ensure we don't call this with other virtqueue
616 * operations at the same time (except where noted).
618 bool virtqueue_enable_cb(struct virtqueue *_vq)
620 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
621 return !virtqueue_poll(_vq, last_used_idx);
623 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
626 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
627 * @vq: the struct virtqueue we're talking about.
629 * This re-enables callbacks but hints to the other side to delay
630 * interrupts until most of the available buffers have been processed;
631 * it returns "false" if there are many pending buffers in the queue,
632 * to detect a possible race between the driver checking for more work,
633 * and enabling callbacks.
635 * Caller must ensure we don't call this with other virtqueue
636 * operations at the same time (except where noted).
638 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
640 struct vring_virtqueue *vq = to_vvq(_vq);
641 u16 bufs;
643 START_USE(vq);
645 /* We optimistically turn back on interrupts, then check if there was
646 * more to do. */
647 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
648 * either clear the flags bit or point the event index at the next
649 * entry. Always do both to keep code simple. */
650 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
651 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
652 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
654 /* TODO: tune this threshold */
655 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
657 virtio_store_mb(vq->weak_barriers,
658 &vring_used_event(&vq->vring),
659 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
661 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
662 END_USE(vq);
663 return false;
666 END_USE(vq);
667 return true;
669 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
672 * virtqueue_detach_unused_buf - detach first unused buffer
673 * @vq: the struct virtqueue we're talking about.
675 * Returns NULL or the "data" token handed to virtqueue_add_*().
676 * This is not valid on an active queue; it is useful only for device
677 * shutdown.
679 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
681 struct vring_virtqueue *vq = to_vvq(_vq);
682 unsigned int i;
683 void *buf;
685 START_USE(vq);
687 for (i = 0; i < vq->vring.num; i++) {
688 if (!vq->data[i])
689 continue;
690 /* detach_buf clears data, so grab it now. */
691 buf = vq->data[i];
692 detach_buf(vq, i);
693 vq->avail_idx_shadow--;
694 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
695 END_USE(vq);
696 return buf;
698 /* That should have freed everything. */
699 BUG_ON(vq->vq.num_free != vq->vring.num);
701 END_USE(vq);
702 return NULL;
704 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
706 irqreturn_t vring_interrupt(int irq, void *_vq)
708 struct vring_virtqueue *vq = to_vvq(_vq);
710 if (!more_used(vq)) {
711 pr_debug("virtqueue interrupt with no work for %p\n", vq);
712 return IRQ_NONE;
715 if (unlikely(vq->broken))
716 return IRQ_HANDLED;
718 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
719 if (vq->vq.callback)
720 vq->vq.callback(&vq->vq);
722 return IRQ_HANDLED;
724 EXPORT_SYMBOL_GPL(vring_interrupt);
726 struct virtqueue *vring_new_virtqueue(unsigned int index,
727 unsigned int num,
728 unsigned int vring_align,
729 struct virtio_device *vdev,
730 bool weak_barriers,
731 void *pages,
732 bool (*notify)(struct virtqueue *),
733 void (*callback)(struct virtqueue *),
734 const char *name)
736 struct vring_virtqueue *vq;
737 unsigned int i;
739 /* We assume num is a power of 2. */
740 if (num & (num - 1)) {
741 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
742 return NULL;
745 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
746 if (!vq)
747 return NULL;
749 vring_init(&vq->vring, num, pages, vring_align);
750 vq->vq.callback = callback;
751 vq->vq.vdev = vdev;
752 vq->vq.name = name;
753 vq->vq.num_free = num;
754 vq->vq.index = index;
755 vq->notify = notify;
756 vq->weak_barriers = weak_barriers;
757 vq->broken = false;
758 vq->last_used_idx = 0;
759 vq->avail_flags_shadow = 0;
760 vq->avail_idx_shadow = 0;
761 vq->num_added = 0;
762 list_add_tail(&vq->vq.list, &vdev->vqs);
763 #ifdef DEBUG
764 vq->in_use = false;
765 vq->last_add_time_valid = false;
766 #endif
768 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
769 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
771 /* No callback? Tell other side not to bother us. */
772 if (!callback) {
773 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
774 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
777 /* Put everything in free lists. */
778 vq->free_head = 0;
779 for (i = 0; i < num-1; i++) {
780 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
781 vq->data[i] = NULL;
783 vq->data[i] = NULL;
785 return &vq->vq;
787 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
789 void vring_del_virtqueue(struct virtqueue *vq)
791 list_del(&vq->list);
792 kfree(to_vvq(vq));
794 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
796 /* Manipulates transport-specific feature bits. */
797 void vring_transport_features(struct virtio_device *vdev)
799 unsigned int i;
801 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
802 switch (i) {
803 case VIRTIO_RING_F_INDIRECT_DESC:
804 break;
805 case VIRTIO_RING_F_EVENT_IDX:
806 break;
807 case VIRTIO_F_VERSION_1:
808 break;
809 default:
810 /* We don't understand this bit. */
811 __virtio_clear_bit(vdev, i);
815 EXPORT_SYMBOL_GPL(vring_transport_features);
818 * virtqueue_get_vring_size - return the size of the virtqueue's vring
819 * @vq: the struct virtqueue containing the vring of interest.
821 * Returns the size of the vring. This is mainly used for boasting to
822 * userspace. Unlike other operations, this need not be serialized.
824 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
827 struct vring_virtqueue *vq = to_vvq(_vq);
829 return vq->vring.num;
831 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
833 bool virtqueue_is_broken(struct virtqueue *_vq)
835 struct vring_virtqueue *vq = to_vvq(_vq);
837 return vq->broken;
839 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
842 * This should prevent the device from being used, allowing drivers to
843 * recover. You may need to grab appropriate locks to flush.
845 void virtio_break_device(struct virtio_device *dev)
847 struct virtqueue *_vq;
849 list_for_each_entry(_vq, &dev->vqs, list) {
850 struct vring_virtqueue *vq = to_vvq(_vq);
851 vq->broken = true;
854 EXPORT_SYMBOL_GPL(virtio_break_device);
856 void *virtqueue_get_avail(struct virtqueue *_vq)
858 struct vring_virtqueue *vq = to_vvq(_vq);
860 return vq->vring.avail;
862 EXPORT_SYMBOL_GPL(virtqueue_get_avail);
864 void *virtqueue_get_used(struct virtqueue *_vq)
866 struct vring_virtqueue *vq = to_vvq(_vq);
868 return vq->vring.used;
870 EXPORT_SYMBOL_GPL(virtqueue_get_used);
872 MODULE_LICENSE("GPL");