1 /* Virtio ring implementation.
3 * Copyright 2007 Rusty Russell IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
26 #include <linux/kmemleak.h>
27 #include <linux/dma-mapping.h>
31 /* For development, we want to crash whenever the ring is screwed. */
32 #define BAD_RING(_vq, fmt, args...) \
34 dev_err(&(_vq)->vq.vdev->dev, \
35 "%s:"fmt, (_vq)->vq.name, ##args); \
38 /* Caller is supposed to guarantee no reentry. */
39 #define START_USE(_vq) \
42 panic("%s:in_use = %i\n", \
43 (_vq)->vq.name, (_vq)->in_use); \
44 (_vq)->in_use = __LINE__; \
46 #define END_USE(_vq) \
47 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
49 #define BAD_RING(_vq, fmt, args...) \
51 dev_err(&_vq->vq.vdev->dev, \
52 "%s:"fmt, (_vq)->vq.name, ##args); \
53 (_vq)->broken = true; \
59 struct vring_desc_state
{
60 void *data
; /* Data for callback. */
61 struct vring_desc
*indir_desc
; /* Indirect descriptor, if any. */
64 struct vring_virtqueue
{
67 /* Actual memory layout for this queue */
70 /* Can we use weak barriers? */
73 /* Other side has made a mess, don't try any more. */
76 /* Host supports indirect buffers */
79 /* Host publishes avail event idx */
82 /* Head of free buffer list. */
83 unsigned int free_head
;
84 /* Number we've added since last sync. */
85 unsigned int num_added
;
87 /* Last used index we've seen. */
90 /* Last written value to avail->flags */
91 u16 avail_flags_shadow
;
93 /* Last written value to avail->idx in guest byte order */
96 /* How to notify other side. FIXME: commonalize hcalls! */
97 bool (*notify
)(struct virtqueue
*vq
);
99 /* DMA, allocation, and size information */
101 size_t queue_size_in_bytes
;
102 dma_addr_t queue_dma_addr
;
105 /* They're supposed to lock for us. */
108 /* Figure out if their kicks are too delayed. */
109 bool last_add_time_valid
;
110 ktime_t last_add_time
;
113 /* Per-descriptor state. */
114 struct vring_desc_state desc_state
[];
117 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
120 * Modern virtio devices have feature bits to specify whether they need a
121 * quirk and bypass the IOMMU. If not there, just use the DMA API.
123 * If there, the interaction between virtio and DMA API is messy.
125 * On most systems with virtio, physical addresses match bus addresses,
126 * and it doesn't particularly matter whether we use the DMA API.
128 * On some systems, including Xen and any system with a physical device
129 * that speaks virtio behind a physical IOMMU, we must use the DMA API
130 * for virtio DMA to work at all.
132 * On other systems, including SPARC and PPC64, virtio-pci devices are
133 * enumerated as though they are behind an IOMMU, but the virtio host
134 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
135 * there or somehow map everything as the identity.
137 * For the time being, we preserve historic behavior and bypass the DMA
140 * TODO: install a per-device DMA ops structure that does the right thing
141 * taking into account all the above quirks, and use the DMA API
142 * unconditionally on data path.
145 static bool vring_use_dma_api(struct virtio_device
*vdev
)
147 if (!virtio_has_iommu_quirk(vdev
))
150 /* Otherwise, we are left to guess. */
152 * In theory, it's possible to have a buggy QEMU-supposed
153 * emulated Q35 IOMMU and Xen enabled at the same time. On
154 * such a configuration, virtio has never worked and will
155 * not work without an even larger kludge. Instead, enable
156 * the DMA API if we're a Xen guest, which at least allows
157 * all of the sensible Xen configurations to work correctly.
166 * The DMA ops on various arches are rather gnarly right now, and
167 * making all of the arch DMA ops work on the vring device itself
168 * is a mess. For now, we use the parent device for DMA ops.
170 static inline struct device
*vring_dma_dev(const struct vring_virtqueue
*vq
)
172 return vq
->vq
.vdev
->dev
.parent
;
175 /* Map one sg entry. */
176 static dma_addr_t
vring_map_one_sg(const struct vring_virtqueue
*vq
,
177 struct scatterlist
*sg
,
178 enum dma_data_direction direction
)
180 if (!vring_use_dma_api(vq
->vq
.vdev
))
181 return (dma_addr_t
)sg_phys(sg
);
184 * We can't use dma_map_sg, because we don't use scatterlists in
185 * the way it expects (we don't guarantee that the scatterlist
186 * will exist for the lifetime of the mapping).
188 return dma_map_page(vring_dma_dev(vq
),
189 sg_page(sg
), sg
->offset
, sg
->length
,
193 static dma_addr_t
vring_map_single(const struct vring_virtqueue
*vq
,
194 void *cpu_addr
, size_t size
,
195 enum dma_data_direction direction
)
197 if (!vring_use_dma_api(vq
->vq
.vdev
))
198 return (dma_addr_t
)virt_to_phys(cpu_addr
);
200 return dma_map_single(vring_dma_dev(vq
),
201 cpu_addr
, size
, direction
);
204 static void vring_unmap_one(const struct vring_virtqueue
*vq
,
205 struct vring_desc
*desc
)
209 if (!vring_use_dma_api(vq
->vq
.vdev
))
212 flags
= virtio16_to_cpu(vq
->vq
.vdev
, desc
->flags
);
214 if (flags
& VRING_DESC_F_INDIRECT
) {
215 dma_unmap_single(vring_dma_dev(vq
),
216 virtio64_to_cpu(vq
->vq
.vdev
, desc
->addr
),
217 virtio32_to_cpu(vq
->vq
.vdev
, desc
->len
),
218 (flags
& VRING_DESC_F_WRITE
) ?
219 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
221 dma_unmap_page(vring_dma_dev(vq
),
222 virtio64_to_cpu(vq
->vq
.vdev
, desc
->addr
),
223 virtio32_to_cpu(vq
->vq
.vdev
, desc
->len
),
224 (flags
& VRING_DESC_F_WRITE
) ?
225 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
229 static int vring_mapping_error(const struct vring_virtqueue
*vq
,
232 if (!vring_use_dma_api(vq
->vq
.vdev
))
235 return dma_mapping_error(vring_dma_dev(vq
), addr
);
238 static struct vring_desc
*alloc_indirect(struct virtqueue
*_vq
,
239 unsigned int total_sg
, gfp_t gfp
)
241 struct vring_desc
*desc
;
245 * We require lowmem mappings for the descriptors because
246 * otherwise virt_to_phys will give us bogus addresses in the
249 gfp
&= ~__GFP_HIGHMEM
;
251 desc
= kmalloc(total_sg
* sizeof(struct vring_desc
), gfp
);
255 for (i
= 0; i
< total_sg
; i
++)
256 desc
[i
].next
= cpu_to_virtio16(_vq
->vdev
, i
+ 1);
260 static inline int virtqueue_add(struct virtqueue
*_vq
,
261 struct scatterlist
*sgs
[],
262 unsigned int total_sg
,
263 unsigned int out_sgs
,
269 struct vring_virtqueue
*vq
= to_vvq(_vq
);
270 struct scatterlist
*sg
;
271 struct vring_desc
*desc
;
272 unsigned int i
, n
, avail
, descs_used
, uninitialized_var(prev
), err_idx
;
278 BUG_ON(data
== NULL
);
279 BUG_ON(ctx
&& vq
->indirect
);
281 if (unlikely(vq
->broken
)) {
288 ktime_t now
= ktime_get();
290 /* No kick or get, with .1 second between? Warn. */
291 if (vq
->last_add_time_valid
)
292 WARN_ON(ktime_to_ms(ktime_sub(now
, vq
->last_add_time
))
294 vq
->last_add_time
= now
;
295 vq
->last_add_time_valid
= true;
299 BUG_ON(total_sg
> vq
->vring
.num
);
300 BUG_ON(total_sg
== 0);
302 head
= vq
->free_head
;
304 /* If the host supports indirect descriptor tables, and we have multiple
305 * buffers, then go indirect. FIXME: tune this threshold */
306 if (vq
->indirect
&& total_sg
> 1 && vq
->vq
.num_free
)
307 desc
= alloc_indirect(_vq
, total_sg
, gfp
);
312 /* Use a single buffer which doesn't continue */
314 /* Set up rest to use this indirect table. */
319 desc
= vq
->vring
.desc
;
321 descs_used
= total_sg
;
324 if (vq
->vq
.num_free
< descs_used
) {
325 pr_debug("Can't add buf len %i - avail = %i\n",
326 descs_used
, vq
->vq
.num_free
);
327 /* FIXME: for historical reasons, we force a notify here if
328 * there are outgoing parts to the buffer. Presumably the
329 * host should service the ring ASAP. */
338 for (n
= 0; n
< out_sgs
; n
++) {
339 for (sg
= sgs
[n
]; sg
; sg
= sg_next(sg
)) {
340 dma_addr_t addr
= vring_map_one_sg(vq
, sg
, DMA_TO_DEVICE
);
341 if (vring_mapping_error(vq
, addr
))
344 desc
[i
].flags
= cpu_to_virtio16(_vq
->vdev
, VRING_DESC_F_NEXT
);
345 desc
[i
].addr
= cpu_to_virtio64(_vq
->vdev
, addr
);
346 desc
[i
].len
= cpu_to_virtio32(_vq
->vdev
, sg
->length
);
348 i
= virtio16_to_cpu(_vq
->vdev
, desc
[i
].next
);
351 for (; n
< (out_sgs
+ in_sgs
); n
++) {
352 for (sg
= sgs
[n
]; sg
; sg
= sg_next(sg
)) {
353 dma_addr_t addr
= vring_map_one_sg(vq
, sg
, DMA_FROM_DEVICE
);
354 if (vring_mapping_error(vq
, addr
))
357 desc
[i
].flags
= cpu_to_virtio16(_vq
->vdev
, VRING_DESC_F_NEXT
| VRING_DESC_F_WRITE
);
358 desc
[i
].addr
= cpu_to_virtio64(_vq
->vdev
, addr
);
359 desc
[i
].len
= cpu_to_virtio32(_vq
->vdev
, sg
->length
);
361 i
= virtio16_to_cpu(_vq
->vdev
, desc
[i
].next
);
364 /* Last one doesn't continue. */
365 desc
[prev
].flags
&= cpu_to_virtio16(_vq
->vdev
, ~VRING_DESC_F_NEXT
);
368 /* Now that the indirect table is filled in, map it. */
369 dma_addr_t addr
= vring_map_single(
370 vq
, desc
, total_sg
* sizeof(struct vring_desc
),
372 if (vring_mapping_error(vq
, addr
))
375 vq
->vring
.desc
[head
].flags
= cpu_to_virtio16(_vq
->vdev
, VRING_DESC_F_INDIRECT
);
376 vq
->vring
.desc
[head
].addr
= cpu_to_virtio64(_vq
->vdev
, addr
);
378 vq
->vring
.desc
[head
].len
= cpu_to_virtio32(_vq
->vdev
, total_sg
* sizeof(struct vring_desc
));
381 /* We're using some buffers from the free list. */
382 vq
->vq
.num_free
-= descs_used
;
384 /* Update free pointer */
386 vq
->free_head
= virtio16_to_cpu(_vq
->vdev
, vq
->vring
.desc
[head
].next
);
390 /* Store token and indirect buffer state. */
391 vq
->desc_state
[head
].data
= data
;
393 vq
->desc_state
[head
].indir_desc
= desc
;
395 vq
->desc_state
[head
].indir_desc
= ctx
;
397 /* Put entry in available array (but don't update avail->idx until they
399 avail
= vq
->avail_idx_shadow
& (vq
->vring
.num
- 1);
400 vq
->vring
.avail
->ring
[avail
] = cpu_to_virtio16(_vq
->vdev
, head
);
402 /* Descriptors and available array need to be set before we expose the
403 * new available array entries. */
404 virtio_wmb(vq
->weak_barriers
);
405 vq
->avail_idx_shadow
++;
406 vq
->vring
.avail
->idx
= cpu_to_virtio16(_vq
->vdev
, vq
->avail_idx_shadow
);
409 pr_debug("Added buffer head %i to %p\n", head
, vq
);
412 /* This is very unlikely, but theoretically possible. Kick
414 if (unlikely(vq
->num_added
== (1 << 16) - 1))
423 for (n
= 0; n
< total_sg
; n
++) {
426 vring_unmap_one(vq
, &desc
[i
]);
427 i
= virtio16_to_cpu(_vq
->vdev
, vq
->vring
.desc
[i
].next
);
430 vq
->vq
.num_free
+= total_sg
;
440 * virtqueue_add_sgs - expose buffers to other end
441 * @vq: the struct virtqueue we're talking about.
442 * @sgs: array of terminated scatterlists.
443 * @out_num: the number of scatterlists readable by other side
444 * @in_num: the number of scatterlists which are writable (after readable ones)
445 * @data: the token identifying the buffer.
446 * @gfp: how to do memory allocations (if necessary).
448 * Caller must ensure we don't call this with other virtqueue operations
449 * at the same time (except where noted).
451 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
453 int virtqueue_add_sgs(struct virtqueue
*_vq
,
454 struct scatterlist
*sgs
[],
455 unsigned int out_sgs
,
460 unsigned int i
, total_sg
= 0;
462 /* Count them first. */
463 for (i
= 0; i
< out_sgs
+ in_sgs
; i
++) {
464 struct scatterlist
*sg
;
465 for (sg
= sgs
[i
]; sg
; sg
= sg_next(sg
))
468 return virtqueue_add(_vq
, sgs
, total_sg
, out_sgs
, in_sgs
,
471 EXPORT_SYMBOL_GPL(virtqueue_add_sgs
);
474 * virtqueue_add_outbuf - expose output buffers to other end
475 * @vq: the struct virtqueue we're talking about.
476 * @sg: scatterlist (must be well-formed and terminated!)
477 * @num: the number of entries in @sg readable by other side
478 * @data: the token identifying the buffer.
479 * @gfp: how to do memory allocations (if necessary).
481 * Caller must ensure we don't call this with other virtqueue operations
482 * at the same time (except where noted).
484 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
486 int virtqueue_add_outbuf(struct virtqueue
*vq
,
487 struct scatterlist
*sg
, unsigned int num
,
491 return virtqueue_add(vq
, &sg
, num
, 1, 0, data
, NULL
, gfp
);
493 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf
);
496 * virtqueue_add_inbuf - expose input buffers to other end
497 * @vq: the struct virtqueue we're talking about.
498 * @sg: scatterlist (must be well-formed and terminated!)
499 * @num: the number of entries in @sg writable by other side
500 * @data: the token identifying the buffer.
501 * @gfp: how to do memory allocations (if necessary).
503 * Caller must ensure we don't call this with other virtqueue operations
504 * at the same time (except where noted).
506 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
508 int virtqueue_add_inbuf(struct virtqueue
*vq
,
509 struct scatterlist
*sg
, unsigned int num
,
513 return virtqueue_add(vq
, &sg
, num
, 0, 1, data
, NULL
, gfp
);
515 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf
);
518 * virtqueue_add_inbuf_ctx - expose input buffers to other end
519 * @vq: the struct virtqueue we're talking about.
520 * @sg: scatterlist (must be well-formed and terminated!)
521 * @num: the number of entries in @sg writable by other side
522 * @data: the token identifying the buffer.
523 * @ctx: extra context for the token
524 * @gfp: how to do memory allocations (if necessary).
526 * Caller must ensure we don't call this with other virtqueue operations
527 * at the same time (except where noted).
529 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
531 int virtqueue_add_inbuf_ctx(struct virtqueue
*vq
,
532 struct scatterlist
*sg
, unsigned int num
,
537 return virtqueue_add(vq
, &sg
, num
, 0, 1, data
, ctx
, gfp
);
539 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx
);
542 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
543 * @vq: the struct virtqueue
545 * Instead of virtqueue_kick(), you can do:
546 * if (virtqueue_kick_prepare(vq))
547 * virtqueue_notify(vq);
549 * This is sometimes useful because the virtqueue_kick_prepare() needs
550 * to be serialized, but the actual virtqueue_notify() call does not.
552 bool virtqueue_kick_prepare(struct virtqueue
*_vq
)
554 struct vring_virtqueue
*vq
= to_vvq(_vq
);
559 /* We need to expose available array entries before checking avail
561 virtio_mb(vq
->weak_barriers
);
563 old
= vq
->avail_idx_shadow
- vq
->num_added
;
564 new = vq
->avail_idx_shadow
;
568 if (vq
->last_add_time_valid
) {
569 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
570 vq
->last_add_time
)) > 100);
572 vq
->last_add_time_valid
= false;
576 needs_kick
= vring_need_event(virtio16_to_cpu(_vq
->vdev
, vring_avail_event(&vq
->vring
)),
579 needs_kick
= !(vq
->vring
.used
->flags
& cpu_to_virtio16(_vq
->vdev
, VRING_USED_F_NO_NOTIFY
));
584 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare
);
587 * virtqueue_notify - second half of split virtqueue_kick call.
588 * @vq: the struct virtqueue
590 * This does not need to be serialized.
592 * Returns false if host notify failed or queue is broken, otherwise true.
594 bool virtqueue_notify(struct virtqueue
*_vq
)
596 struct vring_virtqueue
*vq
= to_vvq(_vq
);
598 if (unlikely(vq
->broken
))
601 /* Prod other side to tell it about changes. */
602 if (!vq
->notify(_vq
)) {
608 EXPORT_SYMBOL_GPL(virtqueue_notify
);
611 * virtqueue_kick - update after add_buf
612 * @vq: the struct virtqueue
614 * After one or more virtqueue_add_* calls, invoke this to kick
617 * Caller must ensure we don't call this with other virtqueue
618 * operations at the same time (except where noted).
620 * Returns false if kick failed, otherwise true.
622 bool virtqueue_kick(struct virtqueue
*vq
)
624 if (virtqueue_kick_prepare(vq
))
625 return virtqueue_notify(vq
);
628 EXPORT_SYMBOL_GPL(virtqueue_kick
);
630 static void detach_buf(struct vring_virtqueue
*vq
, unsigned int head
,
634 __virtio16 nextflag
= cpu_to_virtio16(vq
->vq
.vdev
, VRING_DESC_F_NEXT
);
636 /* Clear data ptr. */
637 vq
->desc_state
[head
].data
= NULL
;
639 /* Put back on free list: unmap first-level descriptors and find end */
642 while (vq
->vring
.desc
[i
].flags
& nextflag
) {
643 vring_unmap_one(vq
, &vq
->vring
.desc
[i
]);
644 i
= virtio16_to_cpu(vq
->vq
.vdev
, vq
->vring
.desc
[i
].next
);
648 vring_unmap_one(vq
, &vq
->vring
.desc
[i
]);
649 vq
->vring
.desc
[i
].next
= cpu_to_virtio16(vq
->vq
.vdev
, vq
->free_head
);
650 vq
->free_head
= head
;
652 /* Plus final descriptor */
656 struct vring_desc
*indir_desc
= vq
->desc_state
[head
].indir_desc
;
659 /* Free the indirect table, if any, now that it's unmapped. */
663 len
= virtio32_to_cpu(vq
->vq
.vdev
, vq
->vring
.desc
[head
].len
);
665 BUG_ON(!(vq
->vring
.desc
[head
].flags
&
666 cpu_to_virtio16(vq
->vq
.vdev
, VRING_DESC_F_INDIRECT
)));
667 BUG_ON(len
== 0 || len
% sizeof(struct vring_desc
));
669 for (j
= 0; j
< len
/ sizeof(struct vring_desc
); j
++)
670 vring_unmap_one(vq
, &indir_desc
[j
]);
673 vq
->desc_state
[head
].indir_desc
= NULL
;
675 *ctx
= vq
->desc_state
[head
].indir_desc
;
679 static inline bool more_used(const struct vring_virtqueue
*vq
)
681 return vq
->last_used_idx
!= virtio16_to_cpu(vq
->vq
.vdev
, vq
->vring
.used
->idx
);
685 * virtqueue_get_buf - get the next used buffer
686 * @vq: the struct virtqueue we're talking about.
687 * @len: the length written into the buffer
689 * If the device wrote data into the buffer, @len will be set to the
690 * amount written. This means you don't need to clear the buffer
691 * beforehand to ensure there's no data leakage in the case of short
694 * Caller must ensure we don't call this with other virtqueue
695 * operations at the same time (except where noted).
697 * Returns NULL if there are no used buffers, or the "data" token
698 * handed to virtqueue_add_*().
700 void *virtqueue_get_buf_ctx(struct virtqueue
*_vq
, unsigned int *len
,
703 struct vring_virtqueue
*vq
= to_vvq(_vq
);
710 if (unlikely(vq
->broken
)) {
715 if (!more_used(vq
)) {
716 pr_debug("No more buffers in queue\n");
721 /* Only get used array entries after they have been exposed by host. */
722 virtio_rmb(vq
->weak_barriers
);
724 last_used
= (vq
->last_used_idx
& (vq
->vring
.num
- 1));
725 i
= virtio32_to_cpu(_vq
->vdev
, vq
->vring
.used
->ring
[last_used
].id
);
726 *len
= virtio32_to_cpu(_vq
->vdev
, vq
->vring
.used
->ring
[last_used
].len
);
728 if (unlikely(i
>= vq
->vring
.num
)) {
729 BAD_RING(vq
, "id %u out of range\n", i
);
732 if (unlikely(!vq
->desc_state
[i
].data
)) {
733 BAD_RING(vq
, "id %u is not a head!\n", i
);
737 /* detach_buf clears data, so grab it now. */
738 ret
= vq
->desc_state
[i
].data
;
739 detach_buf(vq
, i
, ctx
);
741 /* If we expect an interrupt for the next entry, tell host
742 * by writing event index and flush out the write before
743 * the read in the next get_buf call. */
744 if (!(vq
->avail_flags_shadow
& VRING_AVAIL_F_NO_INTERRUPT
))
745 virtio_store_mb(vq
->weak_barriers
,
746 &vring_used_event(&vq
->vring
),
747 cpu_to_virtio16(_vq
->vdev
, vq
->last_used_idx
));
750 vq
->last_add_time_valid
= false;
756 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx
);
758 void *virtqueue_get_buf(struct virtqueue
*_vq
, unsigned int *len
)
760 return virtqueue_get_buf_ctx(_vq
, len
, NULL
);
762 EXPORT_SYMBOL_GPL(virtqueue_get_buf
);
764 * virtqueue_disable_cb - disable callbacks
765 * @vq: the struct virtqueue we're talking about.
767 * Note that this is not necessarily synchronous, hence unreliable and only
768 * useful as an optimization.
770 * Unlike other operations, this need not be serialized.
772 void virtqueue_disable_cb(struct virtqueue
*_vq
)
774 struct vring_virtqueue
*vq
= to_vvq(_vq
);
776 if (!(vq
->avail_flags_shadow
& VRING_AVAIL_F_NO_INTERRUPT
)) {
777 vq
->avail_flags_shadow
|= VRING_AVAIL_F_NO_INTERRUPT
;
779 vq
->vring
.avail
->flags
= cpu_to_virtio16(_vq
->vdev
, vq
->avail_flags_shadow
);
783 EXPORT_SYMBOL_GPL(virtqueue_disable_cb
);
786 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
787 * @vq: the struct virtqueue we're talking about.
789 * This re-enables callbacks; it returns current queue state
790 * in an opaque unsigned value. This value should be later tested by
791 * virtqueue_poll, to detect a possible race between the driver checking for
792 * more work, and enabling callbacks.
794 * Caller must ensure we don't call this with other virtqueue
795 * operations at the same time (except where noted).
797 unsigned virtqueue_enable_cb_prepare(struct virtqueue
*_vq
)
799 struct vring_virtqueue
*vq
= to_vvq(_vq
);
804 /* We optimistically turn back on interrupts, then check if there was
806 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
807 * either clear the flags bit or point the event index at the next
808 * entry. Always do both to keep code simple. */
809 if (vq
->avail_flags_shadow
& VRING_AVAIL_F_NO_INTERRUPT
) {
810 vq
->avail_flags_shadow
&= ~VRING_AVAIL_F_NO_INTERRUPT
;
812 vq
->vring
.avail
->flags
= cpu_to_virtio16(_vq
->vdev
, vq
->avail_flags_shadow
);
814 vring_used_event(&vq
->vring
) = cpu_to_virtio16(_vq
->vdev
, last_used_idx
= vq
->last_used_idx
);
816 return last_used_idx
;
818 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare
);
821 * virtqueue_poll - query pending used buffers
822 * @vq: the struct virtqueue we're talking about.
823 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
825 * Returns "true" if there are pending used buffers in the queue.
827 * This does not need to be serialized.
829 bool virtqueue_poll(struct virtqueue
*_vq
, unsigned last_used_idx
)
831 struct vring_virtqueue
*vq
= to_vvq(_vq
);
833 virtio_mb(vq
->weak_barriers
);
834 return (u16
)last_used_idx
!= virtio16_to_cpu(_vq
->vdev
, vq
->vring
.used
->idx
);
836 EXPORT_SYMBOL_GPL(virtqueue_poll
);
839 * virtqueue_enable_cb - restart callbacks after disable_cb.
840 * @vq: the struct virtqueue we're talking about.
842 * This re-enables callbacks; it returns "false" if there are pending
843 * buffers in the queue, to detect a possible race between the driver
844 * checking for more work, and enabling callbacks.
846 * Caller must ensure we don't call this with other virtqueue
847 * operations at the same time (except where noted).
849 bool virtqueue_enable_cb(struct virtqueue
*_vq
)
851 unsigned last_used_idx
= virtqueue_enable_cb_prepare(_vq
);
852 return !virtqueue_poll(_vq
, last_used_idx
);
854 EXPORT_SYMBOL_GPL(virtqueue_enable_cb
);
857 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
858 * @vq: the struct virtqueue we're talking about.
860 * This re-enables callbacks but hints to the other side to delay
861 * interrupts until most of the available buffers have been processed;
862 * it returns "false" if there are many pending buffers in the queue,
863 * to detect a possible race between the driver checking for more work,
864 * and enabling callbacks.
866 * Caller must ensure we don't call this with other virtqueue
867 * operations at the same time (except where noted).
869 bool virtqueue_enable_cb_delayed(struct virtqueue
*_vq
)
871 struct vring_virtqueue
*vq
= to_vvq(_vq
);
876 /* We optimistically turn back on interrupts, then check if there was
878 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
879 * either clear the flags bit or point the event index at the next
880 * entry. Always update the event index to keep code simple. */
881 if (vq
->avail_flags_shadow
& VRING_AVAIL_F_NO_INTERRUPT
) {
882 vq
->avail_flags_shadow
&= ~VRING_AVAIL_F_NO_INTERRUPT
;
884 vq
->vring
.avail
->flags
= cpu_to_virtio16(_vq
->vdev
, vq
->avail_flags_shadow
);
886 /* TODO: tune this threshold */
887 bufs
= (u16
)(vq
->avail_idx_shadow
- vq
->last_used_idx
) * 3 / 4;
889 virtio_store_mb(vq
->weak_barriers
,
890 &vring_used_event(&vq
->vring
),
891 cpu_to_virtio16(_vq
->vdev
, vq
->last_used_idx
+ bufs
));
893 if (unlikely((u16
)(virtio16_to_cpu(_vq
->vdev
, vq
->vring
.used
->idx
) - vq
->last_used_idx
) > bufs
)) {
901 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed
);
904 * virtqueue_detach_unused_buf - detach first unused buffer
905 * @vq: the struct virtqueue we're talking about.
907 * Returns NULL or the "data" token handed to virtqueue_add_*().
908 * This is not valid on an active queue; it is useful only for device
911 void *virtqueue_detach_unused_buf(struct virtqueue
*_vq
)
913 struct vring_virtqueue
*vq
= to_vvq(_vq
);
919 for (i
= 0; i
< vq
->vring
.num
; i
++) {
920 if (!vq
->desc_state
[i
].data
)
922 /* detach_buf clears data, so grab it now. */
923 buf
= vq
->desc_state
[i
].data
;
924 detach_buf(vq
, i
, NULL
);
925 vq
->avail_idx_shadow
--;
926 vq
->vring
.avail
->idx
= cpu_to_virtio16(_vq
->vdev
, vq
->avail_idx_shadow
);
930 /* That should have freed everything. */
931 BUG_ON(vq
->vq
.num_free
!= vq
->vring
.num
);
936 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf
);
938 irqreturn_t
vring_interrupt(int irq
, void *_vq
)
940 struct vring_virtqueue
*vq
= to_vvq(_vq
);
942 if (!more_used(vq
)) {
943 pr_debug("virtqueue interrupt with no work for %p\n", vq
);
947 if (unlikely(vq
->broken
))
950 pr_debug("virtqueue callback for %p (%p)\n", vq
, vq
->vq
.callback
);
952 vq
->vq
.callback(&vq
->vq
);
956 EXPORT_SYMBOL_GPL(vring_interrupt
);
958 struct virtqueue
*__vring_new_virtqueue(unsigned int index
,
960 struct virtio_device
*vdev
,
963 bool (*notify
)(struct virtqueue
*),
964 void (*callback
)(struct virtqueue
*),
968 struct vring_virtqueue
*vq
;
970 vq
= kmalloc(sizeof(*vq
) + vring
.num
* sizeof(struct vring_desc_state
),
976 vq
->vq
.callback
= callback
;
979 vq
->vq
.num_free
= vring
.num
;
980 vq
->vq
.index
= index
;
981 vq
->we_own_ring
= false;
982 vq
->queue_dma_addr
= 0;
983 vq
->queue_size_in_bytes
= 0;
985 vq
->weak_barriers
= weak_barriers
;
987 vq
->last_used_idx
= 0;
988 vq
->avail_flags_shadow
= 0;
989 vq
->avail_idx_shadow
= 0;
991 list_add_tail(&vq
->vq
.list
, &vdev
->vqs
);
994 vq
->last_add_time_valid
= false;
997 vq
->indirect
= virtio_has_feature(vdev
, VIRTIO_RING_F_INDIRECT_DESC
) &&
999 vq
->event
= virtio_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
);
1001 /* No callback? Tell other side not to bother us. */
1003 vq
->avail_flags_shadow
|= VRING_AVAIL_F_NO_INTERRUPT
;
1005 vq
->vring
.avail
->flags
= cpu_to_virtio16(vdev
, vq
->avail_flags_shadow
);
1008 /* Put everything in free lists. */
1010 for (i
= 0; i
< vring
.num
-1; i
++)
1011 vq
->vring
.desc
[i
].next
= cpu_to_virtio16(vdev
, i
+ 1);
1012 memset(vq
->desc_state
, 0, vring
.num
* sizeof(struct vring_desc_state
));
1016 EXPORT_SYMBOL_GPL(__vring_new_virtqueue
);
1018 static void *vring_alloc_queue(struct virtio_device
*vdev
, size_t size
,
1019 dma_addr_t
*dma_handle
, gfp_t flag
)
1021 if (vring_use_dma_api(vdev
)) {
1022 return dma_alloc_coherent(vdev
->dev
.parent
, size
,
1025 void *queue
= alloc_pages_exact(PAGE_ALIGN(size
), flag
);
1027 phys_addr_t phys_addr
= virt_to_phys(queue
);
1028 *dma_handle
= (dma_addr_t
)phys_addr
;
1031 * Sanity check: make sure we dind't truncate
1032 * the address. The only arches I can find that
1033 * have 64-bit phys_addr_t but 32-bit dma_addr_t
1034 * are certain non-highmem MIPS and x86
1035 * configurations, but these configurations
1036 * should never allocate physical pages above 32
1037 * bits, so this is fine. Just in case, throw a
1038 * warning and abort if we end up with an
1039 * unrepresentable address.
1041 if (WARN_ON_ONCE(*dma_handle
!= phys_addr
)) {
1042 free_pages_exact(queue
, PAGE_ALIGN(size
));
1050 static void vring_free_queue(struct virtio_device
*vdev
, size_t size
,
1051 void *queue
, dma_addr_t dma_handle
)
1053 if (vring_use_dma_api(vdev
)) {
1054 dma_free_coherent(vdev
->dev
.parent
, size
, queue
, dma_handle
);
1056 free_pages_exact(queue
, PAGE_ALIGN(size
));
1060 struct virtqueue
*vring_create_virtqueue(
1063 unsigned int vring_align
,
1064 struct virtio_device
*vdev
,
1066 bool may_reduce_num
,
1068 bool (*notify
)(struct virtqueue
*),
1069 void (*callback
)(struct virtqueue
*),
1072 struct virtqueue
*vq
;
1074 dma_addr_t dma_addr
;
1075 size_t queue_size_in_bytes
;
1078 /* We assume num is a power of 2. */
1079 if (num
& (num
- 1)) {
1080 dev_warn(&vdev
->dev
, "Bad virtqueue length %u\n", num
);
1084 /* TODO: allocate each queue chunk individually */
1085 for (; num
&& vring_size(num
, vring_align
) > PAGE_SIZE
; num
/= 2) {
1086 queue
= vring_alloc_queue(vdev
, vring_size(num
, vring_align
),
1088 GFP_KERNEL
|__GFP_NOWARN
|__GFP_ZERO
);
1097 /* Try to get a single page. You are my only hope! */
1098 queue
= vring_alloc_queue(vdev
, vring_size(num
, vring_align
),
1099 &dma_addr
, GFP_KERNEL
|__GFP_ZERO
);
1104 queue_size_in_bytes
= vring_size(num
, vring_align
);
1105 vring_init(&vring
, num
, queue
, vring_align
);
1107 vq
= __vring_new_virtqueue(index
, vring
, vdev
, weak_barriers
, context
,
1108 notify
, callback
, name
);
1110 vring_free_queue(vdev
, queue_size_in_bytes
, queue
,
1115 to_vvq(vq
)->queue_dma_addr
= dma_addr
;
1116 to_vvq(vq
)->queue_size_in_bytes
= queue_size_in_bytes
;
1117 to_vvq(vq
)->we_own_ring
= true;
1121 EXPORT_SYMBOL_GPL(vring_create_virtqueue
);
1123 struct virtqueue
*vring_new_virtqueue(unsigned int index
,
1125 unsigned int vring_align
,
1126 struct virtio_device
*vdev
,
1130 bool (*notify
)(struct virtqueue
*vq
),
1131 void (*callback
)(struct virtqueue
*vq
),
1135 vring_init(&vring
, num
, pages
, vring_align
);
1136 return __vring_new_virtqueue(index
, vring
, vdev
, weak_barriers
, context
,
1137 notify
, callback
, name
);
1139 EXPORT_SYMBOL_GPL(vring_new_virtqueue
);
1141 void vring_del_virtqueue(struct virtqueue
*_vq
)
1143 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1145 if (vq
->we_own_ring
) {
1146 vring_free_queue(vq
->vq
.vdev
, vq
->queue_size_in_bytes
,
1147 vq
->vring
.desc
, vq
->queue_dma_addr
);
1149 list_del(&_vq
->list
);
1152 EXPORT_SYMBOL_GPL(vring_del_virtqueue
);
1154 /* Manipulates transport-specific feature bits. */
1155 void vring_transport_features(struct virtio_device
*vdev
)
1159 for (i
= VIRTIO_TRANSPORT_F_START
; i
< VIRTIO_TRANSPORT_F_END
; i
++) {
1161 case VIRTIO_RING_F_INDIRECT_DESC
:
1163 case VIRTIO_RING_F_EVENT_IDX
:
1165 case VIRTIO_F_VERSION_1
:
1167 case VIRTIO_F_IOMMU_PLATFORM
:
1170 /* We don't understand this bit. */
1171 __virtio_clear_bit(vdev
, i
);
1175 EXPORT_SYMBOL_GPL(vring_transport_features
);
1178 * virtqueue_get_vring_size - return the size of the virtqueue's vring
1179 * @vq: the struct virtqueue containing the vring of interest.
1181 * Returns the size of the vring. This is mainly used for boasting to
1182 * userspace. Unlike other operations, this need not be serialized.
1184 unsigned int virtqueue_get_vring_size(struct virtqueue
*_vq
)
1187 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1189 return vq
->vring
.num
;
1191 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size
);
1193 bool virtqueue_is_broken(struct virtqueue
*_vq
)
1195 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1199 EXPORT_SYMBOL_GPL(virtqueue_is_broken
);
1202 * This should prevent the device from being used, allowing drivers to
1203 * recover. You may need to grab appropriate locks to flush.
1205 void virtio_break_device(struct virtio_device
*dev
)
1207 struct virtqueue
*_vq
;
1209 list_for_each_entry(_vq
, &dev
->vqs
, list
) {
1210 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1214 EXPORT_SYMBOL_GPL(virtio_break_device
);
1216 dma_addr_t
virtqueue_get_desc_addr(struct virtqueue
*_vq
)
1218 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1220 BUG_ON(!vq
->we_own_ring
);
1222 return vq
->queue_dma_addr
;
1224 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr
);
1226 dma_addr_t
virtqueue_get_avail_addr(struct virtqueue
*_vq
)
1228 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1230 BUG_ON(!vq
->we_own_ring
);
1232 return vq
->queue_dma_addr
+
1233 ((char *)vq
->vring
.avail
- (char *)vq
->vring
.desc
);
1235 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr
);
1237 dma_addr_t
virtqueue_get_used_addr(struct virtqueue
*_vq
)
1239 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1241 BUG_ON(!vq
->we_own_ring
);
1243 return vq
->queue_dma_addr
+
1244 ((char *)vq
->vring
.used
- (char *)vq
->vring
.desc
);
1246 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr
);
1248 const struct vring
*virtqueue_get_vring(struct virtqueue
*vq
)
1250 return &to_vvq(vq
)->vring
;
1252 EXPORT_SYMBOL_GPL(virtqueue_get_vring
);
1254 MODULE_LICENSE("GPL");