1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Virtio ring implementation.
4 * Copyright 2007 Rusty Russell IBM Corporation
6 #include <linux/virtio.h>
7 #include <linux/virtio_ring.h>
8 #include <linux/virtio_config.h>
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/hrtimer.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/kmsan.h>
15 #include <linux/spinlock.h>
19 /* For development, we want to crash whenever the ring is screwed. */
20 #define BAD_RING(_vq, fmt, args...) \
22 dev_err(&(_vq)->vq.vdev->dev, \
23 "%s:"fmt, (_vq)->vq.name, ##args); \
26 /* Caller is supposed to guarantee no reentry. */
27 #define START_USE(_vq) \
30 panic("%s:in_use = %i\n", \
31 (_vq)->vq.name, (_vq)->in_use); \
32 (_vq)->in_use = __LINE__; \
34 #define END_USE(_vq) \
35 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
36 #define LAST_ADD_TIME_UPDATE(_vq) \
38 ktime_t now = ktime_get(); \
40 /* No kick or get, with .1 second between? Warn. */ \
41 if ((_vq)->last_add_time_valid) \
42 WARN_ON(ktime_to_ms(ktime_sub(now, \
43 (_vq)->last_add_time)) > 100); \
44 (_vq)->last_add_time = now; \
45 (_vq)->last_add_time_valid = true; \
47 #define LAST_ADD_TIME_CHECK(_vq) \
49 if ((_vq)->last_add_time_valid) { \
50 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
51 (_vq)->last_add_time)) > 100); \
54 #define LAST_ADD_TIME_INVALID(_vq) \
55 ((_vq)->last_add_time_valid = false)
57 #define BAD_RING(_vq, fmt, args...) \
59 dev_err(&_vq->vq.vdev->dev, \
60 "%s:"fmt, (_vq)->vq.name, ##args); \
61 (_vq)->broken = true; \
65 #define LAST_ADD_TIME_UPDATE(vq)
66 #define LAST_ADD_TIME_CHECK(vq)
67 #define LAST_ADD_TIME_INVALID(vq)
70 struct vring_desc_state_split
{
71 void *data
; /* Data for callback. */
73 /* Indirect desc table and extra table, if any. These two will be
74 * allocated together. So we won't stress more to the memory allocator.
76 struct vring_desc
*indir_desc
;
79 struct vring_desc_state_packed
{
80 void *data
; /* Data for callback. */
82 /* Indirect desc table and extra table, if any. These two will be
83 * allocated together. So we won't stress more to the memory allocator.
85 struct vring_packed_desc
*indir_desc
;
86 u16 num
; /* Descriptor list length. */
87 u16 last
; /* The last desc state in a list. */
90 struct vring_desc_extra
{
91 dma_addr_t addr
; /* Descriptor DMA addr. */
92 u32 len
; /* Descriptor length. */
93 u16 flags
; /* Descriptor flags. */
94 u16 next
; /* The next desc state in a list. */
97 struct vring_virtqueue_split
{
98 /* Actual memory layout for this queue. */
101 /* Last written value to avail->flags */
102 u16 avail_flags_shadow
;
105 * Last written value to avail->idx in
108 u16 avail_idx_shadow
;
110 /* Per-descriptor state. */
111 struct vring_desc_state_split
*desc_state
;
112 struct vring_desc_extra
*desc_extra
;
114 /* DMA address and size information */
115 dma_addr_t queue_dma_addr
;
116 size_t queue_size_in_bytes
;
119 * The parameters for creating vrings are reserved for creating new
126 struct vring_virtqueue_packed
{
127 /* Actual memory layout for this queue. */
130 struct vring_packed_desc
*desc
;
131 struct vring_packed_desc_event
*driver
;
132 struct vring_packed_desc_event
*device
;
135 /* Driver ring wrap counter. */
136 bool avail_wrap_counter
;
138 /* Avail used flags. */
139 u16 avail_used_flags
;
141 /* Index of the next avail descriptor. */
145 * Last written value to driver->flags in
148 u16 event_flags_shadow
;
150 /* Per-descriptor state. */
151 struct vring_desc_state_packed
*desc_state
;
152 struct vring_desc_extra
*desc_extra
;
154 /* DMA address and size information */
155 dma_addr_t ring_dma_addr
;
156 dma_addr_t driver_event_dma_addr
;
157 dma_addr_t device_event_dma_addr
;
158 size_t ring_size_in_bytes
;
159 size_t event_size_in_bytes
;
162 struct vring_virtqueue
{
165 /* Is this a packed ring? */
168 /* Is DMA API used? */
171 /* Can we use weak barriers? */
174 /* Other side has made a mess, don't try any more. */
177 /* Host supports indirect buffers */
180 /* Host publishes avail event idx */
183 /* Head of free buffer list. */
184 unsigned int free_head
;
185 /* Number we've added since last sync. */
186 unsigned int num_added
;
188 /* Last used index we've seen.
189 * for split ring, it just contains last used index
191 * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index.
192 * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter.
196 /* Hint for event idx: already triggered no need to disable. */
197 bool event_triggered
;
200 /* Available for split ring */
201 struct vring_virtqueue_split split
;
203 /* Available for packed ring */
204 struct vring_virtqueue_packed packed
;
207 /* How to notify other side. FIXME: commonalize hcalls! */
208 bool (*notify
)(struct virtqueue
*vq
);
210 /* DMA, allocation, and size information */
213 /* Device used for doing DMA */
214 struct device
*dma_dev
;
217 /* They're supposed to lock for us. */
220 /* Figure out if their kicks are too delayed. */
221 bool last_add_time_valid
;
222 ktime_t last_add_time
;
226 static struct vring_desc_extra
*vring_alloc_desc_extra(unsigned int num
);
227 static void vring_free(struct virtqueue
*_vq
);
233 #define to_vvq(_vq) container_of_const(_vq, struct vring_virtqueue, vq)
235 static bool virtqueue_use_indirect(const struct vring_virtqueue
*vq
,
236 unsigned int total_sg
)
239 * If the host supports indirect descriptor tables, and we have multiple
240 * buffers, then go indirect. FIXME: tune this threshold
242 return (vq
->indirect
&& total_sg
> 1 && vq
->vq
.num_free
);
246 * Modern virtio devices have feature bits to specify whether they need a
247 * quirk and bypass the IOMMU. If not there, just use the DMA API.
249 * If there, the interaction between virtio and DMA API is messy.
251 * On most systems with virtio, physical addresses match bus addresses,
252 * and it doesn't particularly matter whether we use the DMA API.
254 * On some systems, including Xen and any system with a physical device
255 * that speaks virtio behind a physical IOMMU, we must use the DMA API
256 * for virtio DMA to work at all.
258 * On other systems, including SPARC and PPC64, virtio-pci devices are
259 * enumerated as though they are behind an IOMMU, but the virtio host
260 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
261 * there or somehow map everything as the identity.
263 * For the time being, we preserve historic behavior and bypass the DMA
266 * TODO: install a per-device DMA ops structure that does the right thing
267 * taking into account all the above quirks, and use the DMA API
268 * unconditionally on data path.
271 static bool vring_use_dma_api(const struct virtio_device
*vdev
)
273 if (!virtio_has_dma_quirk(vdev
))
276 /* Otherwise, we are left to guess. */
278 * In theory, it's possible to have a buggy QEMU-supposed
279 * emulated Q35 IOMMU and Xen enabled at the same time. On
280 * such a configuration, virtio has never worked and will
281 * not work without an even larger kludge. Instead, enable
282 * the DMA API if we're a Xen guest, which at least allows
283 * all of the sensible Xen configurations to work correctly.
291 static bool vring_need_unmap_buffer(const struct vring_virtqueue
*vring
,
292 const struct vring_desc_extra
*extra
)
294 return vring
->use_dma_api
&& (extra
->addr
!= DMA_MAPPING_ERROR
);
297 size_t virtio_max_dma_size(const struct virtio_device
*vdev
)
299 size_t max_segment_size
= SIZE_MAX
;
301 if (vring_use_dma_api(vdev
))
302 max_segment_size
= dma_max_mapping_size(vdev
->dev
.parent
);
304 return max_segment_size
;
306 EXPORT_SYMBOL_GPL(virtio_max_dma_size
);
308 static void *vring_alloc_queue(struct virtio_device
*vdev
, size_t size
,
309 dma_addr_t
*dma_handle
, gfp_t flag
,
310 struct device
*dma_dev
)
312 if (vring_use_dma_api(vdev
)) {
313 return dma_alloc_coherent(dma_dev
, size
,
316 void *queue
= alloc_pages_exact(PAGE_ALIGN(size
), flag
);
319 phys_addr_t phys_addr
= virt_to_phys(queue
);
320 *dma_handle
= (dma_addr_t
)phys_addr
;
323 * Sanity check: make sure we dind't truncate
324 * the address. The only arches I can find that
325 * have 64-bit phys_addr_t but 32-bit dma_addr_t
326 * are certain non-highmem MIPS and x86
327 * configurations, but these configurations
328 * should never allocate physical pages above 32
329 * bits, so this is fine. Just in case, throw a
330 * warning and abort if we end up with an
331 * unrepresentable address.
333 if (WARN_ON_ONCE(*dma_handle
!= phys_addr
)) {
334 free_pages_exact(queue
, PAGE_ALIGN(size
));
342 static void vring_free_queue(struct virtio_device
*vdev
, size_t size
,
343 void *queue
, dma_addr_t dma_handle
,
344 struct device
*dma_dev
)
346 if (vring_use_dma_api(vdev
))
347 dma_free_coherent(dma_dev
, size
, queue
, dma_handle
);
349 free_pages_exact(queue
, PAGE_ALIGN(size
));
353 * The DMA ops on various arches are rather gnarly right now, and
354 * making all of the arch DMA ops work on the vring device itself
357 static struct device
*vring_dma_dev(const struct vring_virtqueue
*vq
)
362 /* Map one sg entry. */
363 static int vring_map_one_sg(const struct vring_virtqueue
*vq
, struct scatterlist
*sg
,
364 enum dma_data_direction direction
, dma_addr_t
*addr
,
365 u32
*len
, bool premapped
)
368 *addr
= sg_dma_address(sg
);
369 *len
= sg_dma_len(sg
);
375 if (!vq
->use_dma_api
) {
377 * If DMA is not used, KMSAN doesn't know that the scatterlist
378 * is initialized by the hardware. Explicitly check/unpoison it
379 * depending on the direction.
381 kmsan_handle_dma(sg_page(sg
), sg
->offset
, sg
->length
, direction
);
382 *addr
= (dma_addr_t
)sg_phys(sg
);
387 * We can't use dma_map_sg, because we don't use scatterlists in
388 * the way it expects (we don't guarantee that the scatterlist
389 * will exist for the lifetime of the mapping).
391 *addr
= dma_map_page(vring_dma_dev(vq
),
392 sg_page(sg
), sg
->offset
, sg
->length
,
395 if (dma_mapping_error(vring_dma_dev(vq
), *addr
))
401 static dma_addr_t
vring_map_single(const struct vring_virtqueue
*vq
,
402 void *cpu_addr
, size_t size
,
403 enum dma_data_direction direction
)
405 if (!vq
->use_dma_api
)
406 return (dma_addr_t
)virt_to_phys(cpu_addr
);
408 return dma_map_single(vring_dma_dev(vq
),
409 cpu_addr
, size
, direction
);
412 static int vring_mapping_error(const struct vring_virtqueue
*vq
,
415 if (!vq
->use_dma_api
)
418 return dma_mapping_error(vring_dma_dev(vq
), addr
);
421 static void virtqueue_init(struct vring_virtqueue
*vq
, u32 num
)
423 vq
->vq
.num_free
= num
;
426 vq
->last_used_idx
= 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR
);
428 vq
->last_used_idx
= 0;
430 vq
->event_triggered
= false;
435 vq
->last_add_time_valid
= false;
441 * Split ring specific functions - *_split().
444 static unsigned int vring_unmap_one_split(const struct vring_virtqueue
*vq
,
445 struct vring_desc_extra
*extra
)
449 flags
= extra
->flags
;
451 if (flags
& VRING_DESC_F_INDIRECT
) {
452 if (!vq
->use_dma_api
)
455 dma_unmap_single(vring_dma_dev(vq
),
458 (flags
& VRING_DESC_F_WRITE
) ?
459 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
461 if (!vring_need_unmap_buffer(vq
, extra
))
464 dma_unmap_page(vring_dma_dev(vq
),
467 (flags
& VRING_DESC_F_WRITE
) ?
468 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
475 static struct vring_desc
*alloc_indirect_split(struct virtqueue
*_vq
,
476 unsigned int total_sg
,
479 struct vring_desc_extra
*extra
;
480 struct vring_desc
*desc
;
481 unsigned int i
, size
;
484 * We require lowmem mappings for the descriptors because
485 * otherwise virt_to_phys will give us bogus addresses in the
488 gfp
&= ~__GFP_HIGHMEM
;
490 size
= sizeof(*desc
) * total_sg
+ sizeof(*extra
) * total_sg
;
492 desc
= kmalloc(size
, gfp
);
496 extra
= (struct vring_desc_extra
*)&desc
[total_sg
];
498 for (i
= 0; i
< total_sg
; i
++)
499 extra
[i
].next
= i
+ 1;
504 static inline unsigned int virtqueue_add_desc_split(struct virtqueue
*vq
,
505 struct vring_desc
*desc
,
506 struct vring_desc_extra
*extra
,
510 u16 flags
, bool premapped
)
514 desc
[i
].flags
= cpu_to_virtio16(vq
->vdev
, flags
);
515 desc
[i
].addr
= cpu_to_virtio64(vq
->vdev
, addr
);
516 desc
[i
].len
= cpu_to_virtio32(vq
->vdev
, len
);
518 extra
[i
].addr
= premapped
? DMA_MAPPING_ERROR
: addr
;
520 extra
[i
].flags
= flags
;
522 next
= extra
[i
].next
;
524 desc
[i
].next
= cpu_to_virtio16(vq
->vdev
, next
);
529 static inline int virtqueue_add_split(struct virtqueue
*_vq
,
530 struct scatterlist
*sgs
[],
531 unsigned int total_sg
,
532 unsigned int out_sgs
,
539 struct vring_virtqueue
*vq
= to_vvq(_vq
);
540 struct vring_desc_extra
*extra
;
541 struct scatterlist
*sg
;
542 struct vring_desc
*desc
;
543 unsigned int i
, n
, avail
, descs_used
, prev
, err_idx
;
549 BUG_ON(data
== NULL
);
550 BUG_ON(ctx
&& vq
->indirect
);
552 if (unlikely(vq
->broken
)) {
557 LAST_ADD_TIME_UPDATE(vq
);
559 BUG_ON(total_sg
== 0);
561 head
= vq
->free_head
;
563 if (virtqueue_use_indirect(vq
, total_sg
))
564 desc
= alloc_indirect_split(_vq
, total_sg
, gfp
);
567 WARN_ON_ONCE(total_sg
> vq
->split
.vring
.num
&& !vq
->indirect
);
571 /* Use a single buffer which doesn't continue */
573 /* Set up rest to use this indirect table. */
576 extra
= (struct vring_desc_extra
*)&desc
[total_sg
];
579 desc
= vq
->split
.vring
.desc
;
580 extra
= vq
->split
.desc_extra
;
582 descs_used
= total_sg
;
585 if (unlikely(vq
->vq
.num_free
< descs_used
)) {
586 pr_debug("Can't add buf len %i - avail = %i\n",
587 descs_used
, vq
->vq
.num_free
);
588 /* FIXME: for historical reasons, we force a notify here if
589 * there are outgoing parts to the buffer. Presumably the
590 * host should service the ring ASAP. */
599 for (n
= 0; n
< out_sgs
; n
++) {
600 for (sg
= sgs
[n
]; sg
; sg
= sg_next(sg
)) {
604 if (vring_map_one_sg(vq
, sg
, DMA_TO_DEVICE
, &addr
, &len
, premapped
))
608 /* Note that we trust indirect descriptor
609 * table since it use stream DMA mapping.
611 i
= virtqueue_add_desc_split(_vq
, desc
, extra
, i
, addr
, len
,
616 for (; n
< (out_sgs
+ in_sgs
); n
++) {
617 for (sg
= sgs
[n
]; sg
; sg
= sg_next(sg
)) {
621 if (vring_map_one_sg(vq
, sg
, DMA_FROM_DEVICE
, &addr
, &len
, premapped
))
625 /* Note that we trust indirect descriptor
626 * table since it use stream DMA mapping.
628 i
= virtqueue_add_desc_split(_vq
, desc
, extra
, i
, addr
, len
,
634 /* Last one doesn't continue. */
635 desc
[prev
].flags
&= cpu_to_virtio16(_vq
->vdev
, ~VRING_DESC_F_NEXT
);
636 if (!indirect
&& vring_need_unmap_buffer(vq
, &extra
[prev
]))
637 vq
->split
.desc_extra
[prev
& (vq
->split
.vring
.num
- 1)].flags
&=
641 /* Now that the indirect table is filled in, map it. */
642 dma_addr_t addr
= vring_map_single(
643 vq
, desc
, total_sg
* sizeof(struct vring_desc
),
645 if (vring_mapping_error(vq
, addr
))
648 virtqueue_add_desc_split(_vq
, vq
->split
.vring
.desc
,
649 vq
->split
.desc_extra
,
651 total_sg
* sizeof(struct vring_desc
),
652 VRING_DESC_F_INDIRECT
, false);
655 /* We're using some buffers from the free list. */
656 vq
->vq
.num_free
-= descs_used
;
658 /* Update free pointer */
660 vq
->free_head
= vq
->split
.desc_extra
[head
].next
;
664 /* Store token and indirect buffer state. */
665 vq
->split
.desc_state
[head
].data
= data
;
667 vq
->split
.desc_state
[head
].indir_desc
= desc
;
669 vq
->split
.desc_state
[head
].indir_desc
= ctx
;
671 /* Put entry in available array (but don't update avail->idx until they
673 avail
= vq
->split
.avail_idx_shadow
& (vq
->split
.vring
.num
- 1);
674 vq
->split
.vring
.avail
->ring
[avail
] = cpu_to_virtio16(_vq
->vdev
, head
);
676 /* Descriptors and available array need to be set before we expose the
677 * new available array entries. */
678 virtio_wmb(vq
->weak_barriers
);
679 vq
->split
.avail_idx_shadow
++;
680 vq
->split
.vring
.avail
->idx
= cpu_to_virtio16(_vq
->vdev
,
681 vq
->split
.avail_idx_shadow
);
684 pr_debug("Added buffer head %i to %p\n", head
, vq
);
687 /* This is very unlikely, but theoretically possible. Kick
689 if (unlikely(vq
->num_added
== (1 << 16) - 1))
702 for (n
= 0; n
< total_sg
; n
++) {
706 i
= vring_unmap_one_split(vq
, &extra
[i
]);
716 static bool virtqueue_kick_prepare_split(struct virtqueue
*_vq
)
718 struct vring_virtqueue
*vq
= to_vvq(_vq
);
723 /* We need to expose available array entries before checking avail
725 virtio_mb(vq
->weak_barriers
);
727 old
= vq
->split
.avail_idx_shadow
- vq
->num_added
;
728 new = vq
->split
.avail_idx_shadow
;
731 LAST_ADD_TIME_CHECK(vq
);
732 LAST_ADD_TIME_INVALID(vq
);
735 needs_kick
= vring_need_event(virtio16_to_cpu(_vq
->vdev
,
736 vring_avail_event(&vq
->split
.vring
)),
739 needs_kick
= !(vq
->split
.vring
.used
->flags
&
740 cpu_to_virtio16(_vq
->vdev
,
741 VRING_USED_F_NO_NOTIFY
));
747 static void detach_buf_split(struct vring_virtqueue
*vq
, unsigned int head
,
750 struct vring_desc_extra
*extra
;
752 __virtio16 nextflag
= cpu_to_virtio16(vq
->vq
.vdev
, VRING_DESC_F_NEXT
);
754 /* Clear data ptr. */
755 vq
->split
.desc_state
[head
].data
= NULL
;
757 extra
= vq
->split
.desc_extra
;
759 /* Put back on free list: unmap first-level descriptors and find end */
762 while (vq
->split
.vring
.desc
[i
].flags
& nextflag
) {
763 vring_unmap_one_split(vq
, &extra
[i
]);
764 i
= vq
->split
.desc_extra
[i
].next
;
768 vring_unmap_one_split(vq
, &extra
[i
]);
769 vq
->split
.desc_extra
[i
].next
= vq
->free_head
;
770 vq
->free_head
= head
;
772 /* Plus final descriptor */
776 struct vring_desc
*indir_desc
=
777 vq
->split
.desc_state
[head
].indir_desc
;
780 /* Free the indirect table, if any, now that it's unmapped. */
783 len
= vq
->split
.desc_extra
[head
].len
;
785 BUG_ON(!(vq
->split
.desc_extra
[head
].flags
&
786 VRING_DESC_F_INDIRECT
));
787 BUG_ON(len
== 0 || len
% sizeof(struct vring_desc
));
789 num
= len
/ sizeof(struct vring_desc
);
791 extra
= (struct vring_desc_extra
*)&indir_desc
[num
];
793 if (vq
->use_dma_api
) {
794 for (j
= 0; j
< num
; j
++)
795 vring_unmap_one_split(vq
, &extra
[j
]);
799 vq
->split
.desc_state
[head
].indir_desc
= NULL
;
801 *ctx
= vq
->split
.desc_state
[head
].indir_desc
;
805 static bool more_used_split(const struct vring_virtqueue
*vq
)
807 return vq
->last_used_idx
!= virtio16_to_cpu(vq
->vq
.vdev
,
808 vq
->split
.vring
.used
->idx
);
811 static void *virtqueue_get_buf_ctx_split(struct virtqueue
*_vq
,
815 struct vring_virtqueue
*vq
= to_vvq(_vq
);
822 if (unlikely(vq
->broken
)) {
827 if (!more_used_split(vq
)) {
828 pr_debug("No more buffers in queue\n");
833 /* Only get used array entries after they have been exposed by host. */
834 virtio_rmb(vq
->weak_barriers
);
836 last_used
= (vq
->last_used_idx
& (vq
->split
.vring
.num
- 1));
837 i
= virtio32_to_cpu(_vq
->vdev
,
838 vq
->split
.vring
.used
->ring
[last_used
].id
);
839 *len
= virtio32_to_cpu(_vq
->vdev
,
840 vq
->split
.vring
.used
->ring
[last_used
].len
);
842 if (unlikely(i
>= vq
->split
.vring
.num
)) {
843 BAD_RING(vq
, "id %u out of range\n", i
);
846 if (unlikely(!vq
->split
.desc_state
[i
].data
)) {
847 BAD_RING(vq
, "id %u is not a head!\n", i
);
851 /* detach_buf_split clears data, so grab it now. */
852 ret
= vq
->split
.desc_state
[i
].data
;
853 detach_buf_split(vq
, i
, ctx
);
855 /* If we expect an interrupt for the next entry, tell host
856 * by writing event index and flush out the write before
857 * the read in the next get_buf call. */
858 if (!(vq
->split
.avail_flags_shadow
& VRING_AVAIL_F_NO_INTERRUPT
))
859 virtio_store_mb(vq
->weak_barriers
,
860 &vring_used_event(&vq
->split
.vring
),
861 cpu_to_virtio16(_vq
->vdev
, vq
->last_used_idx
));
863 LAST_ADD_TIME_INVALID(vq
);
869 static void virtqueue_disable_cb_split(struct virtqueue
*_vq
)
871 struct vring_virtqueue
*vq
= to_vvq(_vq
);
873 if (!(vq
->split
.avail_flags_shadow
& VRING_AVAIL_F_NO_INTERRUPT
)) {
874 vq
->split
.avail_flags_shadow
|= VRING_AVAIL_F_NO_INTERRUPT
;
877 * If device triggered an event already it won't trigger one again:
878 * no need to disable.
880 if (vq
->event_triggered
)
884 /* TODO: this is a hack. Figure out a cleaner value to write. */
885 vring_used_event(&vq
->split
.vring
) = 0x0;
887 vq
->split
.vring
.avail
->flags
=
888 cpu_to_virtio16(_vq
->vdev
,
889 vq
->split
.avail_flags_shadow
);
893 static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue
*_vq
)
895 struct vring_virtqueue
*vq
= to_vvq(_vq
);
900 /* We optimistically turn back on interrupts, then check if there was
902 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
903 * either clear the flags bit or point the event index at the next
904 * entry. Always do both to keep code simple. */
905 if (vq
->split
.avail_flags_shadow
& VRING_AVAIL_F_NO_INTERRUPT
) {
906 vq
->split
.avail_flags_shadow
&= ~VRING_AVAIL_F_NO_INTERRUPT
;
908 vq
->split
.vring
.avail
->flags
=
909 cpu_to_virtio16(_vq
->vdev
,
910 vq
->split
.avail_flags_shadow
);
912 vring_used_event(&vq
->split
.vring
) = cpu_to_virtio16(_vq
->vdev
,
913 last_used_idx
= vq
->last_used_idx
);
915 return last_used_idx
;
918 static bool virtqueue_poll_split(struct virtqueue
*_vq
, unsigned int last_used_idx
)
920 struct vring_virtqueue
*vq
= to_vvq(_vq
);
922 return (u16
)last_used_idx
!= virtio16_to_cpu(_vq
->vdev
,
923 vq
->split
.vring
.used
->idx
);
926 static bool virtqueue_enable_cb_delayed_split(struct virtqueue
*_vq
)
928 struct vring_virtqueue
*vq
= to_vvq(_vq
);
933 /* We optimistically turn back on interrupts, then check if there was
935 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
936 * either clear the flags bit or point the event index at the next
937 * entry. Always update the event index to keep code simple. */
938 if (vq
->split
.avail_flags_shadow
& VRING_AVAIL_F_NO_INTERRUPT
) {
939 vq
->split
.avail_flags_shadow
&= ~VRING_AVAIL_F_NO_INTERRUPT
;
941 vq
->split
.vring
.avail
->flags
=
942 cpu_to_virtio16(_vq
->vdev
,
943 vq
->split
.avail_flags_shadow
);
945 /* TODO: tune this threshold */
946 bufs
= (u16
)(vq
->split
.avail_idx_shadow
- vq
->last_used_idx
) * 3 / 4;
948 virtio_store_mb(vq
->weak_barriers
,
949 &vring_used_event(&vq
->split
.vring
),
950 cpu_to_virtio16(_vq
->vdev
, vq
->last_used_idx
+ bufs
));
952 if (unlikely((u16
)(virtio16_to_cpu(_vq
->vdev
, vq
->split
.vring
.used
->idx
)
953 - vq
->last_used_idx
) > bufs
)) {
962 static void *virtqueue_detach_unused_buf_split(struct virtqueue
*_vq
)
964 struct vring_virtqueue
*vq
= to_vvq(_vq
);
970 for (i
= 0; i
< vq
->split
.vring
.num
; i
++) {
971 if (!vq
->split
.desc_state
[i
].data
)
973 /* detach_buf_split clears data, so grab it now. */
974 buf
= vq
->split
.desc_state
[i
].data
;
975 detach_buf_split(vq
, i
, NULL
);
976 vq
->split
.avail_idx_shadow
--;
977 vq
->split
.vring
.avail
->idx
= cpu_to_virtio16(_vq
->vdev
,
978 vq
->split
.avail_idx_shadow
);
982 /* That should have freed everything. */
983 BUG_ON(vq
->vq
.num_free
!= vq
->split
.vring
.num
);
989 static void virtqueue_vring_init_split(struct vring_virtqueue_split
*vring_split
,
990 struct vring_virtqueue
*vq
)
992 struct virtio_device
*vdev
;
996 vring_split
->avail_flags_shadow
= 0;
997 vring_split
->avail_idx_shadow
= 0;
999 /* No callback? Tell other side not to bother us. */
1000 if (!vq
->vq
.callback
) {
1001 vring_split
->avail_flags_shadow
|= VRING_AVAIL_F_NO_INTERRUPT
;
1003 vring_split
->vring
.avail
->flags
= cpu_to_virtio16(vdev
,
1004 vring_split
->avail_flags_shadow
);
1008 static void virtqueue_reinit_split(struct vring_virtqueue
*vq
)
1012 num
= vq
->split
.vring
.num
;
1014 vq
->split
.vring
.avail
->flags
= 0;
1015 vq
->split
.vring
.avail
->idx
= 0;
1017 /* reset avail event */
1018 vq
->split
.vring
.avail
->ring
[num
] = 0;
1020 vq
->split
.vring
.used
->flags
= 0;
1021 vq
->split
.vring
.used
->idx
= 0;
1023 /* reset used event */
1024 *(__virtio16
*)&(vq
->split
.vring
.used
->ring
[num
]) = 0;
1026 virtqueue_init(vq
, num
);
1028 virtqueue_vring_init_split(&vq
->split
, vq
);
1031 static void virtqueue_vring_attach_split(struct vring_virtqueue
*vq
,
1032 struct vring_virtqueue_split
*vring_split
)
1034 vq
->split
= *vring_split
;
1036 /* Put everything in free lists. */
1040 static int vring_alloc_state_extra_split(struct vring_virtqueue_split
*vring_split
)
1042 struct vring_desc_state_split
*state
;
1043 struct vring_desc_extra
*extra
;
1044 u32 num
= vring_split
->vring
.num
;
1046 state
= kmalloc_array(num
, sizeof(struct vring_desc_state_split
), GFP_KERNEL
);
1050 extra
= vring_alloc_desc_extra(num
);
1054 memset(state
, 0, num
* sizeof(struct vring_desc_state_split
));
1056 vring_split
->desc_state
= state
;
1057 vring_split
->desc_extra
= extra
;
1066 static void vring_free_split(struct vring_virtqueue_split
*vring_split
,
1067 struct virtio_device
*vdev
, struct device
*dma_dev
)
1069 vring_free_queue(vdev
, vring_split
->queue_size_in_bytes
,
1070 vring_split
->vring
.desc
,
1071 vring_split
->queue_dma_addr
,
1074 kfree(vring_split
->desc_state
);
1075 kfree(vring_split
->desc_extra
);
1078 static int vring_alloc_queue_split(struct vring_virtqueue_split
*vring_split
,
1079 struct virtio_device
*vdev
,
1081 unsigned int vring_align
,
1082 bool may_reduce_num
,
1083 struct device
*dma_dev
)
1086 dma_addr_t dma_addr
;
1088 /* We assume num is a power of 2. */
1089 if (!is_power_of_2(num
)) {
1090 dev_warn(&vdev
->dev
, "Bad virtqueue length %u\n", num
);
1094 /* TODO: allocate each queue chunk individually */
1095 for (; num
&& vring_size(num
, vring_align
) > PAGE_SIZE
; num
/= 2) {
1096 queue
= vring_alloc_queue(vdev
, vring_size(num
, vring_align
),
1098 GFP_KERNEL
| __GFP_NOWARN
| __GFP_ZERO
,
1102 if (!may_reduce_num
)
1110 /* Try to get a single page. You are my only hope! */
1111 queue
= vring_alloc_queue(vdev
, vring_size(num
, vring_align
),
1112 &dma_addr
, GFP_KERNEL
| __GFP_ZERO
,
1118 vring_init(&vring_split
->vring
, num
, queue
, vring_align
);
1120 vring_split
->queue_dma_addr
= dma_addr
;
1121 vring_split
->queue_size_in_bytes
= vring_size(num
, vring_align
);
1123 vring_split
->vring_align
= vring_align
;
1124 vring_split
->may_reduce_num
= may_reduce_num
;
1129 static struct virtqueue
*__vring_new_virtqueue_split(unsigned int index
,
1130 struct vring_virtqueue_split
*vring_split
,
1131 struct virtio_device
*vdev
,
1134 bool (*notify
)(struct virtqueue
*),
1135 void (*callback
)(struct virtqueue
*),
1137 struct device
*dma_dev
)
1139 struct vring_virtqueue
*vq
;
1142 vq
= kmalloc(sizeof(*vq
), GFP_KERNEL
);
1146 vq
->packed_ring
= false;
1147 vq
->vq
.callback
= callback
;
1150 vq
->vq
.index
= index
;
1151 vq
->vq
.reset
= false;
1152 vq
->we_own_ring
= false;
1153 vq
->notify
= notify
;
1154 vq
->weak_barriers
= weak_barriers
;
1155 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
1160 vq
->dma_dev
= dma_dev
;
1161 vq
->use_dma_api
= vring_use_dma_api(vdev
);
1163 vq
->indirect
= virtio_has_feature(vdev
, VIRTIO_RING_F_INDIRECT_DESC
) &&
1165 vq
->event
= virtio_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
);
1167 if (virtio_has_feature(vdev
, VIRTIO_F_ORDER_PLATFORM
))
1168 vq
->weak_barriers
= false;
1170 err
= vring_alloc_state_extra_split(vring_split
);
1176 virtqueue_vring_init_split(vring_split
, vq
);
1178 virtqueue_init(vq
, vring_split
->vring
.num
);
1179 virtqueue_vring_attach_split(vq
, vring_split
);
1181 spin_lock(&vdev
->vqs_list_lock
);
1182 list_add_tail(&vq
->vq
.list
, &vdev
->vqs
);
1183 spin_unlock(&vdev
->vqs_list_lock
);
1187 static struct virtqueue
*vring_create_virtqueue_split(
1190 unsigned int vring_align
,
1191 struct virtio_device
*vdev
,
1193 bool may_reduce_num
,
1195 bool (*notify
)(struct virtqueue
*),
1196 void (*callback
)(struct virtqueue
*),
1198 struct device
*dma_dev
)
1200 struct vring_virtqueue_split vring_split
= {};
1201 struct virtqueue
*vq
;
1204 err
= vring_alloc_queue_split(&vring_split
, vdev
, num
, vring_align
,
1205 may_reduce_num
, dma_dev
);
1209 vq
= __vring_new_virtqueue_split(index
, &vring_split
, vdev
, weak_barriers
,
1210 context
, notify
, callback
, name
, dma_dev
);
1212 vring_free_split(&vring_split
, vdev
, dma_dev
);
1216 to_vvq(vq
)->we_own_ring
= true;
1221 static int virtqueue_resize_split(struct virtqueue
*_vq
, u32 num
)
1223 struct vring_virtqueue_split vring_split
= {};
1224 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1225 struct virtio_device
*vdev
= _vq
->vdev
;
1228 err
= vring_alloc_queue_split(&vring_split
, vdev
, num
,
1229 vq
->split
.vring_align
,
1230 vq
->split
.may_reduce_num
,
1235 err
= vring_alloc_state_extra_split(&vring_split
);
1237 goto err_state_extra
;
1239 vring_free(&vq
->vq
);
1241 virtqueue_vring_init_split(&vring_split
, vq
);
1243 virtqueue_init(vq
, vring_split
.vring
.num
);
1244 virtqueue_vring_attach_split(vq
, &vring_split
);
1249 vring_free_split(&vring_split
, vdev
, vring_dma_dev(vq
));
1251 virtqueue_reinit_split(vq
);
1257 * Packed ring specific functions - *_packed().
1259 static bool packed_used_wrap_counter(u16 last_used_idx
)
1261 return !!(last_used_idx
& (1 << VRING_PACKED_EVENT_F_WRAP_CTR
));
1264 static u16
packed_last_used(u16 last_used_idx
)
1266 return last_used_idx
& ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR
));
1269 static void vring_unmap_extra_packed(const struct vring_virtqueue
*vq
,
1270 const struct vring_desc_extra
*extra
)
1274 flags
= extra
->flags
;
1276 if (flags
& VRING_DESC_F_INDIRECT
) {
1277 if (!vq
->use_dma_api
)
1280 dma_unmap_single(vring_dma_dev(vq
),
1281 extra
->addr
, extra
->len
,
1282 (flags
& VRING_DESC_F_WRITE
) ?
1283 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
1285 if (!vring_need_unmap_buffer(vq
, extra
))
1288 dma_unmap_page(vring_dma_dev(vq
),
1289 extra
->addr
, extra
->len
,
1290 (flags
& VRING_DESC_F_WRITE
) ?
1291 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
1295 static struct vring_packed_desc
*alloc_indirect_packed(unsigned int total_sg
,
1298 struct vring_desc_extra
*extra
;
1299 struct vring_packed_desc
*desc
;
1303 * We require lowmem mappings for the descriptors because
1304 * otherwise virt_to_phys will give us bogus addresses in the
1307 gfp
&= ~__GFP_HIGHMEM
;
1309 size
= (sizeof(*desc
) + sizeof(*extra
)) * total_sg
;
1311 desc
= kmalloc(size
, gfp
);
1315 extra
= (struct vring_desc_extra
*)&desc
[total_sg
];
1317 for (i
= 0; i
< total_sg
; i
++)
1318 extra
[i
].next
= i
+ 1;
1323 static int virtqueue_add_indirect_packed(struct vring_virtqueue
*vq
,
1324 struct scatterlist
*sgs
[],
1325 unsigned int total_sg
,
1326 unsigned int out_sgs
,
1327 unsigned int in_sgs
,
1332 struct vring_desc_extra
*extra
;
1333 struct vring_packed_desc
*desc
;
1334 struct scatterlist
*sg
;
1335 unsigned int i
, n
, err_idx
, len
;
1339 head
= vq
->packed
.next_avail_idx
;
1340 desc
= alloc_indirect_packed(total_sg
, gfp
);
1344 extra
= (struct vring_desc_extra
*)&desc
[total_sg
];
1346 if (unlikely(vq
->vq
.num_free
< 1)) {
1347 pr_debug("Can't add buf len 1 - avail = 0\n");
1355 BUG_ON(id
== vq
->packed
.vring
.num
);
1357 for (n
= 0; n
< out_sgs
+ in_sgs
; n
++) {
1358 for (sg
= sgs
[n
]; sg
; sg
= sg_next(sg
)) {
1359 if (vring_map_one_sg(vq
, sg
, n
< out_sgs
?
1360 DMA_TO_DEVICE
: DMA_FROM_DEVICE
,
1361 &addr
, &len
, premapped
))
1364 desc
[i
].flags
= cpu_to_le16(n
< out_sgs
?
1365 0 : VRING_DESC_F_WRITE
);
1366 desc
[i
].addr
= cpu_to_le64(addr
);
1367 desc
[i
].len
= cpu_to_le32(len
);
1369 if (unlikely(vq
->use_dma_api
)) {
1370 extra
[i
].addr
= premapped
? DMA_MAPPING_ERROR
: addr
;
1372 extra
[i
].flags
= n
< out_sgs
? 0 : VRING_DESC_F_WRITE
;
1379 /* Now that the indirect table is filled in, map it. */
1380 addr
= vring_map_single(vq
, desc
,
1381 total_sg
* sizeof(struct vring_packed_desc
),
1383 if (vring_mapping_error(vq
, addr
))
1386 vq
->packed
.vring
.desc
[head
].addr
= cpu_to_le64(addr
);
1387 vq
->packed
.vring
.desc
[head
].len
= cpu_to_le32(total_sg
*
1388 sizeof(struct vring_packed_desc
));
1389 vq
->packed
.vring
.desc
[head
].id
= cpu_to_le16(id
);
1391 if (vq
->use_dma_api
) {
1392 vq
->packed
.desc_extra
[id
].addr
= addr
;
1393 vq
->packed
.desc_extra
[id
].len
= total_sg
*
1394 sizeof(struct vring_packed_desc
);
1395 vq
->packed
.desc_extra
[id
].flags
= VRING_DESC_F_INDIRECT
|
1396 vq
->packed
.avail_used_flags
;
1400 * A driver MUST NOT make the first descriptor in the list
1401 * available before all subsequent descriptors comprising
1402 * the list are made available.
1404 virtio_wmb(vq
->weak_barriers
);
1405 vq
->packed
.vring
.desc
[head
].flags
= cpu_to_le16(VRING_DESC_F_INDIRECT
|
1406 vq
->packed
.avail_used_flags
);
1408 /* We're using some buffers from the free list. */
1409 vq
->vq
.num_free
-= 1;
1411 /* Update free pointer */
1413 if (n
>= vq
->packed
.vring
.num
) {
1415 vq
->packed
.avail_wrap_counter
^= 1;
1416 vq
->packed
.avail_used_flags
^=
1417 1 << VRING_PACKED_DESC_F_AVAIL
|
1418 1 << VRING_PACKED_DESC_F_USED
;
1420 vq
->packed
.next_avail_idx
= n
;
1421 vq
->free_head
= vq
->packed
.desc_extra
[id
].next
;
1423 /* Store token and indirect buffer state. */
1424 vq
->packed
.desc_state
[id
].num
= 1;
1425 vq
->packed
.desc_state
[id
].data
= data
;
1426 vq
->packed
.desc_state
[id
].indir_desc
= desc
;
1427 vq
->packed
.desc_state
[id
].last
= id
;
1431 pr_debug("Added buffer head %i to %p\n", head
, vq
);
1439 for (i
= 0; i
< err_idx
; i
++)
1440 vring_unmap_extra_packed(vq
, &extra
[i
]);
1448 static inline int virtqueue_add_packed(struct virtqueue
*_vq
,
1449 struct scatterlist
*sgs
[],
1450 unsigned int total_sg
,
1451 unsigned int out_sgs
,
1452 unsigned int in_sgs
,
1458 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1459 struct vring_packed_desc
*desc
;
1460 struct scatterlist
*sg
;
1461 unsigned int i
, n
, c
, descs_used
, err_idx
, len
;
1462 __le16 head_flags
, flags
;
1463 u16 head
, id
, prev
, curr
, avail_used_flags
;
1468 BUG_ON(data
== NULL
);
1469 BUG_ON(ctx
&& vq
->indirect
);
1471 if (unlikely(vq
->broken
)) {
1476 LAST_ADD_TIME_UPDATE(vq
);
1478 BUG_ON(total_sg
== 0);
1480 if (virtqueue_use_indirect(vq
, total_sg
)) {
1481 err
= virtqueue_add_indirect_packed(vq
, sgs
, total_sg
, out_sgs
,
1482 in_sgs
, data
, premapped
, gfp
);
1483 if (err
!= -ENOMEM
) {
1488 /* fall back on direct */
1491 head
= vq
->packed
.next_avail_idx
;
1492 avail_used_flags
= vq
->packed
.avail_used_flags
;
1494 WARN_ON_ONCE(total_sg
> vq
->packed
.vring
.num
&& !vq
->indirect
);
1496 desc
= vq
->packed
.vring
.desc
;
1498 descs_used
= total_sg
;
1500 if (unlikely(vq
->vq
.num_free
< descs_used
)) {
1501 pr_debug("Can't add buf len %i - avail = %i\n",
1502 descs_used
, vq
->vq
.num_free
);
1508 BUG_ON(id
== vq
->packed
.vring
.num
);
1512 for (n
= 0; n
< out_sgs
+ in_sgs
; n
++) {
1513 for (sg
= sgs
[n
]; sg
; sg
= sg_next(sg
)) {
1516 if (vring_map_one_sg(vq
, sg
, n
< out_sgs
?
1517 DMA_TO_DEVICE
: DMA_FROM_DEVICE
,
1518 &addr
, &len
, premapped
))
1521 flags
= cpu_to_le16(vq
->packed
.avail_used_flags
|
1522 (++c
== total_sg
? 0 : VRING_DESC_F_NEXT
) |
1523 (n
< out_sgs
? 0 : VRING_DESC_F_WRITE
));
1527 desc
[i
].flags
= flags
;
1529 desc
[i
].addr
= cpu_to_le64(addr
);
1530 desc
[i
].len
= cpu_to_le32(len
);
1531 desc
[i
].id
= cpu_to_le16(id
);
1533 if (unlikely(vq
->use_dma_api
)) {
1534 vq
->packed
.desc_extra
[curr
].addr
= premapped
?
1535 DMA_MAPPING_ERROR
: addr
;
1536 vq
->packed
.desc_extra
[curr
].len
= len
;
1537 vq
->packed
.desc_extra
[curr
].flags
=
1541 curr
= vq
->packed
.desc_extra
[curr
].next
;
1543 if ((unlikely(++i
>= vq
->packed
.vring
.num
))) {
1545 vq
->packed
.avail_used_flags
^=
1546 1 << VRING_PACKED_DESC_F_AVAIL
|
1547 1 << VRING_PACKED_DESC_F_USED
;
1553 vq
->packed
.avail_wrap_counter
^= 1;
1555 /* We're using some buffers from the free list. */
1556 vq
->vq
.num_free
-= descs_used
;
1558 /* Update free pointer */
1559 vq
->packed
.next_avail_idx
= i
;
1560 vq
->free_head
= curr
;
1563 vq
->packed
.desc_state
[id
].num
= descs_used
;
1564 vq
->packed
.desc_state
[id
].data
= data
;
1565 vq
->packed
.desc_state
[id
].indir_desc
= ctx
;
1566 vq
->packed
.desc_state
[id
].last
= prev
;
1569 * A driver MUST NOT make the first descriptor in the list
1570 * available before all subsequent descriptors comprising
1571 * the list are made available.
1573 virtio_wmb(vq
->weak_barriers
);
1574 vq
->packed
.vring
.desc
[head
].flags
= head_flags
;
1575 vq
->num_added
+= descs_used
;
1577 pr_debug("Added buffer head %i to %p\n", head
, vq
);
1585 curr
= vq
->free_head
;
1587 vq
->packed
.avail_used_flags
= avail_used_flags
;
1589 for (n
= 0; n
< total_sg
; n
++) {
1592 vring_unmap_extra_packed(vq
, &vq
->packed
.desc_extra
[curr
]);
1593 curr
= vq
->packed
.desc_extra
[curr
].next
;
1595 if (i
>= vq
->packed
.vring
.num
)
1603 static bool virtqueue_kick_prepare_packed(struct virtqueue
*_vq
)
1605 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1606 u16
new, old
, off_wrap
, flags
, wrap_counter
, event_idx
;
1619 * We need to expose the new flags value before checking notification
1622 virtio_mb(vq
->weak_barriers
);
1624 old
= vq
->packed
.next_avail_idx
- vq
->num_added
;
1625 new = vq
->packed
.next_avail_idx
;
1628 snapshot
.u32
= *(u32
*)vq
->packed
.vring
.device
;
1629 flags
= le16_to_cpu(snapshot
.flags
);
1631 LAST_ADD_TIME_CHECK(vq
);
1632 LAST_ADD_TIME_INVALID(vq
);
1634 if (flags
!= VRING_PACKED_EVENT_FLAG_DESC
) {
1635 needs_kick
= (flags
!= VRING_PACKED_EVENT_FLAG_DISABLE
);
1639 off_wrap
= le16_to_cpu(snapshot
.off_wrap
);
1641 wrap_counter
= off_wrap
>> VRING_PACKED_EVENT_F_WRAP_CTR
;
1642 event_idx
= off_wrap
& ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR
);
1643 if (wrap_counter
!= vq
->packed
.avail_wrap_counter
)
1644 event_idx
-= vq
->packed
.vring
.num
;
1646 needs_kick
= vring_need_event(event_idx
, new, old
);
1652 static void detach_buf_packed(struct vring_virtqueue
*vq
,
1653 unsigned int id
, void **ctx
)
1655 struct vring_desc_state_packed
*state
= NULL
;
1656 struct vring_packed_desc
*desc
;
1657 unsigned int i
, curr
;
1659 state
= &vq
->packed
.desc_state
[id
];
1661 /* Clear data ptr. */
1664 vq
->packed
.desc_extra
[state
->last
].next
= vq
->free_head
;
1666 vq
->vq
.num_free
+= state
->num
;
1668 if (unlikely(vq
->use_dma_api
)) {
1670 for (i
= 0; i
< state
->num
; i
++) {
1671 vring_unmap_extra_packed(vq
,
1672 &vq
->packed
.desc_extra
[curr
]);
1673 curr
= vq
->packed
.desc_extra
[curr
].next
;
1678 struct vring_desc_extra
*extra
;
1681 /* Free the indirect table, if any, now that it's unmapped. */
1682 desc
= state
->indir_desc
;
1686 if (vq
->use_dma_api
) {
1687 len
= vq
->packed
.desc_extra
[id
].len
;
1688 num
= len
/ sizeof(struct vring_packed_desc
);
1690 extra
= (struct vring_desc_extra
*)&desc
[num
];
1692 for (i
= 0; i
< num
; i
++)
1693 vring_unmap_extra_packed(vq
, &extra
[i
]);
1696 state
->indir_desc
= NULL
;
1698 *ctx
= state
->indir_desc
;
1702 static inline bool is_used_desc_packed(const struct vring_virtqueue
*vq
,
1703 u16 idx
, bool used_wrap_counter
)
1708 flags
= le16_to_cpu(vq
->packed
.vring
.desc
[idx
].flags
);
1709 avail
= !!(flags
& (1 << VRING_PACKED_DESC_F_AVAIL
));
1710 used
= !!(flags
& (1 << VRING_PACKED_DESC_F_USED
));
1712 return avail
== used
&& used
== used_wrap_counter
;
1715 static bool more_used_packed(const struct vring_virtqueue
*vq
)
1719 bool used_wrap_counter
;
1721 last_used_idx
= READ_ONCE(vq
->last_used_idx
);
1722 last_used
= packed_last_used(last_used_idx
);
1723 used_wrap_counter
= packed_used_wrap_counter(last_used_idx
);
1724 return is_used_desc_packed(vq
, last_used
, used_wrap_counter
);
1727 static void *virtqueue_get_buf_ctx_packed(struct virtqueue
*_vq
,
1731 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1732 u16 last_used
, id
, last_used_idx
;
1733 bool used_wrap_counter
;
1738 if (unlikely(vq
->broken
)) {
1743 if (!more_used_packed(vq
)) {
1744 pr_debug("No more buffers in queue\n");
1749 /* Only get used elements after they have been exposed by host. */
1750 virtio_rmb(vq
->weak_barriers
);
1752 last_used_idx
= READ_ONCE(vq
->last_used_idx
);
1753 used_wrap_counter
= packed_used_wrap_counter(last_used_idx
);
1754 last_used
= packed_last_used(last_used_idx
);
1755 id
= le16_to_cpu(vq
->packed
.vring
.desc
[last_used
].id
);
1756 *len
= le32_to_cpu(vq
->packed
.vring
.desc
[last_used
].len
);
1758 if (unlikely(id
>= vq
->packed
.vring
.num
)) {
1759 BAD_RING(vq
, "id %u out of range\n", id
);
1762 if (unlikely(!vq
->packed
.desc_state
[id
].data
)) {
1763 BAD_RING(vq
, "id %u is not a head!\n", id
);
1767 /* detach_buf_packed clears data, so grab it now. */
1768 ret
= vq
->packed
.desc_state
[id
].data
;
1769 detach_buf_packed(vq
, id
, ctx
);
1771 last_used
+= vq
->packed
.desc_state
[id
].num
;
1772 if (unlikely(last_used
>= vq
->packed
.vring
.num
)) {
1773 last_used
-= vq
->packed
.vring
.num
;
1774 used_wrap_counter
^= 1;
1777 last_used
= (last_used
| (used_wrap_counter
<< VRING_PACKED_EVENT_F_WRAP_CTR
));
1778 WRITE_ONCE(vq
->last_used_idx
, last_used
);
1781 * If we expect an interrupt for the next entry, tell host
1782 * by writing event index and flush out the write before
1783 * the read in the next get_buf call.
1785 if (vq
->packed
.event_flags_shadow
== VRING_PACKED_EVENT_FLAG_DESC
)
1786 virtio_store_mb(vq
->weak_barriers
,
1787 &vq
->packed
.vring
.driver
->off_wrap
,
1788 cpu_to_le16(vq
->last_used_idx
));
1790 LAST_ADD_TIME_INVALID(vq
);
1796 static void virtqueue_disable_cb_packed(struct virtqueue
*_vq
)
1798 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1800 if (vq
->packed
.event_flags_shadow
!= VRING_PACKED_EVENT_FLAG_DISABLE
) {
1801 vq
->packed
.event_flags_shadow
= VRING_PACKED_EVENT_FLAG_DISABLE
;
1804 * If device triggered an event already it won't trigger one again:
1805 * no need to disable.
1807 if (vq
->event_triggered
)
1810 vq
->packed
.vring
.driver
->flags
=
1811 cpu_to_le16(vq
->packed
.event_flags_shadow
);
1815 static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue
*_vq
)
1817 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1822 * We optimistically turn back on interrupts, then check if there was
1827 vq
->packed
.vring
.driver
->off_wrap
=
1828 cpu_to_le16(vq
->last_used_idx
);
1830 * We need to update event offset and event wrap
1831 * counter first before updating event flags.
1833 virtio_wmb(vq
->weak_barriers
);
1836 if (vq
->packed
.event_flags_shadow
== VRING_PACKED_EVENT_FLAG_DISABLE
) {
1837 vq
->packed
.event_flags_shadow
= vq
->event
?
1838 VRING_PACKED_EVENT_FLAG_DESC
:
1839 VRING_PACKED_EVENT_FLAG_ENABLE
;
1840 vq
->packed
.vring
.driver
->flags
=
1841 cpu_to_le16(vq
->packed
.event_flags_shadow
);
1845 return vq
->last_used_idx
;
1848 static bool virtqueue_poll_packed(struct virtqueue
*_vq
, u16 off_wrap
)
1850 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1854 wrap_counter
= off_wrap
>> VRING_PACKED_EVENT_F_WRAP_CTR
;
1855 used_idx
= off_wrap
& ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR
);
1857 return is_used_desc_packed(vq
, used_idx
, wrap_counter
);
1860 static bool virtqueue_enable_cb_delayed_packed(struct virtqueue
*_vq
)
1862 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1863 u16 used_idx
, wrap_counter
, last_used_idx
;
1869 * We optimistically turn back on interrupts, then check if there was
1874 /* TODO: tune this threshold */
1875 bufs
= (vq
->packed
.vring
.num
- vq
->vq
.num_free
) * 3 / 4;
1876 last_used_idx
= READ_ONCE(vq
->last_used_idx
);
1877 wrap_counter
= packed_used_wrap_counter(last_used_idx
);
1879 used_idx
= packed_last_used(last_used_idx
) + bufs
;
1880 if (used_idx
>= vq
->packed
.vring
.num
) {
1881 used_idx
-= vq
->packed
.vring
.num
;
1885 vq
->packed
.vring
.driver
->off_wrap
= cpu_to_le16(used_idx
|
1886 (wrap_counter
<< VRING_PACKED_EVENT_F_WRAP_CTR
));
1889 * We need to update event offset and event wrap
1890 * counter first before updating event flags.
1892 virtio_wmb(vq
->weak_barriers
);
1895 if (vq
->packed
.event_flags_shadow
== VRING_PACKED_EVENT_FLAG_DISABLE
) {
1896 vq
->packed
.event_flags_shadow
= vq
->event
?
1897 VRING_PACKED_EVENT_FLAG_DESC
:
1898 VRING_PACKED_EVENT_FLAG_ENABLE
;
1899 vq
->packed
.vring
.driver
->flags
=
1900 cpu_to_le16(vq
->packed
.event_flags_shadow
);
1904 * We need to update event suppression structure first
1905 * before re-checking for more used buffers.
1907 virtio_mb(vq
->weak_barriers
);
1909 last_used_idx
= READ_ONCE(vq
->last_used_idx
);
1910 wrap_counter
= packed_used_wrap_counter(last_used_idx
);
1911 used_idx
= packed_last_used(last_used_idx
);
1912 if (is_used_desc_packed(vq
, used_idx
, wrap_counter
)) {
1921 static void *virtqueue_detach_unused_buf_packed(struct virtqueue
*_vq
)
1923 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1929 for (i
= 0; i
< vq
->packed
.vring
.num
; i
++) {
1930 if (!vq
->packed
.desc_state
[i
].data
)
1932 /* detach_buf clears data, so grab it now. */
1933 buf
= vq
->packed
.desc_state
[i
].data
;
1934 detach_buf_packed(vq
, i
, NULL
);
1938 /* That should have freed everything. */
1939 BUG_ON(vq
->vq
.num_free
!= vq
->packed
.vring
.num
);
1945 static struct vring_desc_extra
*vring_alloc_desc_extra(unsigned int num
)
1947 struct vring_desc_extra
*desc_extra
;
1950 desc_extra
= kmalloc_array(num
, sizeof(struct vring_desc_extra
),
1955 memset(desc_extra
, 0, num
* sizeof(struct vring_desc_extra
));
1957 for (i
= 0; i
< num
- 1; i
++)
1958 desc_extra
[i
].next
= i
+ 1;
1963 static void vring_free_packed(struct vring_virtqueue_packed
*vring_packed
,
1964 struct virtio_device
*vdev
,
1965 struct device
*dma_dev
)
1967 if (vring_packed
->vring
.desc
)
1968 vring_free_queue(vdev
, vring_packed
->ring_size_in_bytes
,
1969 vring_packed
->vring
.desc
,
1970 vring_packed
->ring_dma_addr
,
1973 if (vring_packed
->vring
.driver
)
1974 vring_free_queue(vdev
, vring_packed
->event_size_in_bytes
,
1975 vring_packed
->vring
.driver
,
1976 vring_packed
->driver_event_dma_addr
,
1979 if (vring_packed
->vring
.device
)
1980 vring_free_queue(vdev
, vring_packed
->event_size_in_bytes
,
1981 vring_packed
->vring
.device
,
1982 vring_packed
->device_event_dma_addr
,
1985 kfree(vring_packed
->desc_state
);
1986 kfree(vring_packed
->desc_extra
);
1989 static int vring_alloc_queue_packed(struct vring_virtqueue_packed
*vring_packed
,
1990 struct virtio_device
*vdev
,
1991 u32 num
, struct device
*dma_dev
)
1993 struct vring_packed_desc
*ring
;
1994 struct vring_packed_desc_event
*driver
, *device
;
1995 dma_addr_t ring_dma_addr
, driver_event_dma_addr
, device_event_dma_addr
;
1996 size_t ring_size_in_bytes
, event_size_in_bytes
;
1998 ring_size_in_bytes
= num
* sizeof(struct vring_packed_desc
);
2000 ring
= vring_alloc_queue(vdev
, ring_size_in_bytes
,
2002 GFP_KERNEL
| __GFP_NOWARN
| __GFP_ZERO
,
2007 vring_packed
->vring
.desc
= ring
;
2008 vring_packed
->ring_dma_addr
= ring_dma_addr
;
2009 vring_packed
->ring_size_in_bytes
= ring_size_in_bytes
;
2011 event_size_in_bytes
= sizeof(struct vring_packed_desc_event
);
2013 driver
= vring_alloc_queue(vdev
, event_size_in_bytes
,
2014 &driver_event_dma_addr
,
2015 GFP_KERNEL
| __GFP_NOWARN
| __GFP_ZERO
,
2020 vring_packed
->vring
.driver
= driver
;
2021 vring_packed
->event_size_in_bytes
= event_size_in_bytes
;
2022 vring_packed
->driver_event_dma_addr
= driver_event_dma_addr
;
2024 device
= vring_alloc_queue(vdev
, event_size_in_bytes
,
2025 &device_event_dma_addr
,
2026 GFP_KERNEL
| __GFP_NOWARN
| __GFP_ZERO
,
2031 vring_packed
->vring
.device
= device
;
2032 vring_packed
->device_event_dma_addr
= device_event_dma_addr
;
2034 vring_packed
->vring
.num
= num
;
2039 vring_free_packed(vring_packed
, vdev
, dma_dev
);
2043 static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed
*vring_packed
)
2045 struct vring_desc_state_packed
*state
;
2046 struct vring_desc_extra
*extra
;
2047 u32 num
= vring_packed
->vring
.num
;
2049 state
= kmalloc_array(num
, sizeof(struct vring_desc_state_packed
), GFP_KERNEL
);
2051 goto err_desc_state
;
2053 memset(state
, 0, num
* sizeof(struct vring_desc_state_packed
));
2055 extra
= vring_alloc_desc_extra(num
);
2057 goto err_desc_extra
;
2059 vring_packed
->desc_state
= state
;
2060 vring_packed
->desc_extra
= extra
;
2070 static void virtqueue_vring_init_packed(struct vring_virtqueue_packed
*vring_packed
,
2073 vring_packed
->next_avail_idx
= 0;
2074 vring_packed
->avail_wrap_counter
= 1;
2075 vring_packed
->event_flags_shadow
= 0;
2076 vring_packed
->avail_used_flags
= 1 << VRING_PACKED_DESC_F_AVAIL
;
2078 /* No callback? Tell other side not to bother us. */
2080 vring_packed
->event_flags_shadow
= VRING_PACKED_EVENT_FLAG_DISABLE
;
2081 vring_packed
->vring
.driver
->flags
=
2082 cpu_to_le16(vring_packed
->event_flags_shadow
);
2086 static void virtqueue_vring_attach_packed(struct vring_virtqueue
*vq
,
2087 struct vring_virtqueue_packed
*vring_packed
)
2089 vq
->packed
= *vring_packed
;
2091 /* Put everything in free lists. */
2095 static void virtqueue_reinit_packed(struct vring_virtqueue
*vq
)
2097 memset(vq
->packed
.vring
.device
, 0, vq
->packed
.event_size_in_bytes
);
2098 memset(vq
->packed
.vring
.driver
, 0, vq
->packed
.event_size_in_bytes
);
2100 /* we need to reset the desc.flags. For more, see is_used_desc_packed() */
2101 memset(vq
->packed
.vring
.desc
, 0, vq
->packed
.ring_size_in_bytes
);
2103 virtqueue_init(vq
, vq
->packed
.vring
.num
);
2104 virtqueue_vring_init_packed(&vq
->packed
, !!vq
->vq
.callback
);
2107 static struct virtqueue
*__vring_new_virtqueue_packed(unsigned int index
,
2108 struct vring_virtqueue_packed
*vring_packed
,
2109 struct virtio_device
*vdev
,
2112 bool (*notify
)(struct virtqueue
*),
2113 void (*callback
)(struct virtqueue
*),
2115 struct device
*dma_dev
)
2117 struct vring_virtqueue
*vq
;
2120 vq
= kmalloc(sizeof(*vq
), GFP_KERNEL
);
2124 vq
->vq
.callback
= callback
;
2127 vq
->vq
.index
= index
;
2128 vq
->vq
.reset
= false;
2129 vq
->we_own_ring
= false;
2130 vq
->notify
= notify
;
2131 vq
->weak_barriers
= weak_barriers
;
2132 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
2137 vq
->packed_ring
= true;
2138 vq
->dma_dev
= dma_dev
;
2139 vq
->use_dma_api
= vring_use_dma_api(vdev
);
2141 vq
->indirect
= virtio_has_feature(vdev
, VIRTIO_RING_F_INDIRECT_DESC
) &&
2143 vq
->event
= virtio_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
);
2145 if (virtio_has_feature(vdev
, VIRTIO_F_ORDER_PLATFORM
))
2146 vq
->weak_barriers
= false;
2148 err
= vring_alloc_state_extra_packed(vring_packed
);
2154 virtqueue_vring_init_packed(vring_packed
, !!callback
);
2156 virtqueue_init(vq
, vring_packed
->vring
.num
);
2157 virtqueue_vring_attach_packed(vq
, vring_packed
);
2159 spin_lock(&vdev
->vqs_list_lock
);
2160 list_add_tail(&vq
->vq
.list
, &vdev
->vqs
);
2161 spin_unlock(&vdev
->vqs_list_lock
);
2165 static struct virtqueue
*vring_create_virtqueue_packed(
2168 unsigned int vring_align
,
2169 struct virtio_device
*vdev
,
2171 bool may_reduce_num
,
2173 bool (*notify
)(struct virtqueue
*),
2174 void (*callback
)(struct virtqueue
*),
2176 struct device
*dma_dev
)
2178 struct vring_virtqueue_packed vring_packed
= {};
2179 struct virtqueue
*vq
;
2181 if (vring_alloc_queue_packed(&vring_packed
, vdev
, num
, dma_dev
))
2184 vq
= __vring_new_virtqueue_packed(index
, &vring_packed
, vdev
, weak_barriers
,
2185 context
, notify
, callback
, name
, dma_dev
);
2187 vring_free_packed(&vring_packed
, vdev
, dma_dev
);
2191 to_vvq(vq
)->we_own_ring
= true;
2196 static int virtqueue_resize_packed(struct virtqueue
*_vq
, u32 num
)
2198 struct vring_virtqueue_packed vring_packed
= {};
2199 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2200 struct virtio_device
*vdev
= _vq
->vdev
;
2203 if (vring_alloc_queue_packed(&vring_packed
, vdev
, num
, vring_dma_dev(vq
)))
2206 err
= vring_alloc_state_extra_packed(&vring_packed
);
2208 goto err_state_extra
;
2210 vring_free(&vq
->vq
);
2212 virtqueue_vring_init_packed(&vring_packed
, !!vq
->vq
.callback
);
2214 virtqueue_init(vq
, vring_packed
.vring
.num
);
2215 virtqueue_vring_attach_packed(vq
, &vring_packed
);
2220 vring_free_packed(&vring_packed
, vdev
, vring_dma_dev(vq
));
2222 virtqueue_reinit_packed(vq
);
2226 static int virtqueue_disable_and_recycle(struct virtqueue
*_vq
,
2227 void (*recycle
)(struct virtqueue
*vq
, void *buf
))
2229 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2230 struct virtio_device
*vdev
= vq
->vq
.vdev
;
2234 if (!vq
->we_own_ring
)
2237 if (!vdev
->config
->disable_vq_and_reset
)
2240 if (!vdev
->config
->enable_vq_after_reset
)
2243 err
= vdev
->config
->disable_vq_and_reset(_vq
);
2247 while ((buf
= virtqueue_detach_unused_buf(_vq
)) != NULL
)
2253 static int virtqueue_enable_after_reset(struct virtqueue
*_vq
)
2255 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2256 struct virtio_device
*vdev
= vq
->vq
.vdev
;
2258 if (vdev
->config
->enable_vq_after_reset(_vq
))
2265 * Generic functions and exported symbols.
2268 static inline int virtqueue_add(struct virtqueue
*_vq
,
2269 struct scatterlist
*sgs
[],
2270 unsigned int total_sg
,
2271 unsigned int out_sgs
,
2272 unsigned int in_sgs
,
2278 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2280 return vq
->packed_ring
? virtqueue_add_packed(_vq
, sgs
, total_sg
,
2281 out_sgs
, in_sgs
, data
, ctx
, premapped
, gfp
) :
2282 virtqueue_add_split(_vq
, sgs
, total_sg
,
2283 out_sgs
, in_sgs
, data
, ctx
, premapped
, gfp
);
2287 * virtqueue_add_sgs - expose buffers to other end
2288 * @_vq: the struct virtqueue we're talking about.
2289 * @sgs: array of terminated scatterlists.
2290 * @out_sgs: the number of scatterlists readable by other side
2291 * @in_sgs: the number of scatterlists which are writable (after readable ones)
2292 * @data: the token identifying the buffer.
2293 * @gfp: how to do memory allocations (if necessary).
2295 * Caller must ensure we don't call this with other virtqueue operations
2296 * at the same time (except where noted).
2298 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2300 int virtqueue_add_sgs(struct virtqueue
*_vq
,
2301 struct scatterlist
*sgs
[],
2302 unsigned int out_sgs
,
2303 unsigned int in_sgs
,
2307 unsigned int i
, total_sg
= 0;
2309 /* Count them first. */
2310 for (i
= 0; i
< out_sgs
+ in_sgs
; i
++) {
2311 struct scatterlist
*sg
;
2313 for (sg
= sgs
[i
]; sg
; sg
= sg_next(sg
))
2316 return virtqueue_add(_vq
, sgs
, total_sg
, out_sgs
, in_sgs
,
2317 data
, NULL
, false, gfp
);
2319 EXPORT_SYMBOL_GPL(virtqueue_add_sgs
);
2322 * virtqueue_add_outbuf - expose output buffers to other end
2323 * @vq: the struct virtqueue we're talking about.
2324 * @sg: scatterlist (must be well-formed and terminated!)
2325 * @num: the number of entries in @sg readable by other side
2326 * @data: the token identifying the buffer.
2327 * @gfp: how to do memory allocations (if necessary).
2329 * Caller must ensure we don't call this with other virtqueue operations
2330 * at the same time (except where noted).
2332 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2334 int virtqueue_add_outbuf(struct virtqueue
*vq
,
2335 struct scatterlist
*sg
, unsigned int num
,
2339 return virtqueue_add(vq
, &sg
, num
, 1, 0, data
, NULL
, false, gfp
);
2341 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf
);
2344 * virtqueue_add_outbuf_premapped - expose output buffers to other end
2345 * @vq: the struct virtqueue we're talking about.
2346 * @sg: scatterlist (must be well-formed and terminated!)
2347 * @num: the number of entries in @sg readable by other side
2348 * @data: the token identifying the buffer.
2349 * @gfp: how to do memory allocations (if necessary).
2351 * Caller must ensure we don't call this with other virtqueue operations
2352 * at the same time (except where noted).
2355 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2357 int virtqueue_add_outbuf_premapped(struct virtqueue
*vq
,
2358 struct scatterlist
*sg
, unsigned int num
,
2362 return virtqueue_add(vq
, &sg
, num
, 1, 0, data
, NULL
, true, gfp
);
2364 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf_premapped
);
2367 * virtqueue_add_inbuf - expose input buffers to other end
2368 * @vq: the struct virtqueue we're talking about.
2369 * @sg: scatterlist (must be well-formed and terminated!)
2370 * @num: the number of entries in @sg writable by other side
2371 * @data: the token identifying the buffer.
2372 * @gfp: how to do memory allocations (if necessary).
2374 * Caller must ensure we don't call this with other virtqueue operations
2375 * at the same time (except where noted).
2377 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2379 int virtqueue_add_inbuf(struct virtqueue
*vq
,
2380 struct scatterlist
*sg
, unsigned int num
,
2384 return virtqueue_add(vq
, &sg
, num
, 0, 1, data
, NULL
, false, gfp
);
2386 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf
);
2389 * virtqueue_add_inbuf_ctx - expose input buffers to other end
2390 * @vq: the struct virtqueue we're talking about.
2391 * @sg: scatterlist (must be well-formed and terminated!)
2392 * @num: the number of entries in @sg writable by other side
2393 * @data: the token identifying the buffer.
2394 * @ctx: extra context for the token
2395 * @gfp: how to do memory allocations (if necessary).
2397 * Caller must ensure we don't call this with other virtqueue operations
2398 * at the same time (except where noted).
2400 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2402 int virtqueue_add_inbuf_ctx(struct virtqueue
*vq
,
2403 struct scatterlist
*sg
, unsigned int num
,
2408 return virtqueue_add(vq
, &sg
, num
, 0, 1, data
, ctx
, false, gfp
);
2410 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx
);
2413 * virtqueue_add_inbuf_premapped - expose input buffers to other end
2414 * @vq: the struct virtqueue we're talking about.
2415 * @sg: scatterlist (must be well-formed and terminated!)
2416 * @num: the number of entries in @sg writable by other side
2417 * @data: the token identifying the buffer.
2418 * @ctx: extra context for the token
2419 * @gfp: how to do memory allocations (if necessary).
2421 * Caller must ensure we don't call this with other virtqueue operations
2422 * at the same time (except where noted).
2425 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2427 int virtqueue_add_inbuf_premapped(struct virtqueue
*vq
,
2428 struct scatterlist
*sg
, unsigned int num
,
2433 return virtqueue_add(vq
, &sg
, num
, 0, 1, data
, ctx
, true, gfp
);
2435 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_premapped
);
2438 * virtqueue_dma_dev - get the dma dev
2439 * @_vq: the struct virtqueue we're talking about.
2441 * Returns the dma dev. That can been used for dma api.
2443 struct device
*virtqueue_dma_dev(struct virtqueue
*_vq
)
2445 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2447 if (vq
->use_dma_api
)
2448 return vring_dma_dev(vq
);
2452 EXPORT_SYMBOL_GPL(virtqueue_dma_dev
);
2455 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
2456 * @_vq: the struct virtqueue
2458 * Instead of virtqueue_kick(), you can do:
2459 * if (virtqueue_kick_prepare(vq))
2460 * virtqueue_notify(vq);
2462 * This is sometimes useful because the virtqueue_kick_prepare() needs
2463 * to be serialized, but the actual virtqueue_notify() call does not.
2465 bool virtqueue_kick_prepare(struct virtqueue
*_vq
)
2467 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2469 return vq
->packed_ring
? virtqueue_kick_prepare_packed(_vq
) :
2470 virtqueue_kick_prepare_split(_vq
);
2472 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare
);
2475 * virtqueue_notify - second half of split virtqueue_kick call.
2476 * @_vq: the struct virtqueue
2478 * This does not need to be serialized.
2480 * Returns false if host notify failed or queue is broken, otherwise true.
2482 bool virtqueue_notify(struct virtqueue
*_vq
)
2484 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2486 if (unlikely(vq
->broken
))
2489 /* Prod other side to tell it about changes. */
2490 if (!vq
->notify(_vq
)) {
2496 EXPORT_SYMBOL_GPL(virtqueue_notify
);
2499 * virtqueue_kick - update after add_buf
2500 * @vq: the struct virtqueue
2502 * After one or more virtqueue_add_* calls, invoke this to kick
2505 * Caller must ensure we don't call this with other virtqueue
2506 * operations at the same time (except where noted).
2508 * Returns false if kick failed, otherwise true.
2510 bool virtqueue_kick(struct virtqueue
*vq
)
2512 if (virtqueue_kick_prepare(vq
))
2513 return virtqueue_notify(vq
);
2516 EXPORT_SYMBOL_GPL(virtqueue_kick
);
2519 * virtqueue_get_buf_ctx - get the next used buffer
2520 * @_vq: the struct virtqueue we're talking about.
2521 * @len: the length written into the buffer
2522 * @ctx: extra context for the token
2524 * If the device wrote data into the buffer, @len will be set to the
2525 * amount written. This means you don't need to clear the buffer
2526 * beforehand to ensure there's no data leakage in the case of short
2529 * Caller must ensure we don't call this with other virtqueue
2530 * operations at the same time (except where noted).
2532 * Returns NULL if there are no used buffers, or the "data" token
2533 * handed to virtqueue_add_*().
2535 void *virtqueue_get_buf_ctx(struct virtqueue
*_vq
, unsigned int *len
,
2538 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2540 return vq
->packed_ring
? virtqueue_get_buf_ctx_packed(_vq
, len
, ctx
) :
2541 virtqueue_get_buf_ctx_split(_vq
, len
, ctx
);
2543 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx
);
2545 void *virtqueue_get_buf(struct virtqueue
*_vq
, unsigned int *len
)
2547 return virtqueue_get_buf_ctx(_vq
, len
, NULL
);
2549 EXPORT_SYMBOL_GPL(virtqueue_get_buf
);
2551 * virtqueue_disable_cb - disable callbacks
2552 * @_vq: the struct virtqueue we're talking about.
2554 * Note that this is not necessarily synchronous, hence unreliable and only
2555 * useful as an optimization.
2557 * Unlike other operations, this need not be serialized.
2559 void virtqueue_disable_cb(struct virtqueue
*_vq
)
2561 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2563 if (vq
->packed_ring
)
2564 virtqueue_disable_cb_packed(_vq
);
2566 virtqueue_disable_cb_split(_vq
);
2568 EXPORT_SYMBOL_GPL(virtqueue_disable_cb
);
2571 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
2572 * @_vq: the struct virtqueue we're talking about.
2574 * This re-enables callbacks; it returns current queue state
2575 * in an opaque unsigned value. This value should be later tested by
2576 * virtqueue_poll, to detect a possible race between the driver checking for
2577 * more work, and enabling callbacks.
2579 * Caller must ensure we don't call this with other virtqueue
2580 * operations at the same time (except where noted).
2582 unsigned int virtqueue_enable_cb_prepare(struct virtqueue
*_vq
)
2584 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2586 if (vq
->event_triggered
)
2587 vq
->event_triggered
= false;
2589 return vq
->packed_ring
? virtqueue_enable_cb_prepare_packed(_vq
) :
2590 virtqueue_enable_cb_prepare_split(_vq
);
2592 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare
);
2595 * virtqueue_poll - query pending used buffers
2596 * @_vq: the struct virtqueue we're talking about.
2597 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
2599 * Returns "true" if there are pending used buffers in the queue.
2601 * This does not need to be serialized.
2603 bool virtqueue_poll(struct virtqueue
*_vq
, unsigned int last_used_idx
)
2605 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2607 if (unlikely(vq
->broken
))
2610 virtio_mb(vq
->weak_barriers
);
2611 return vq
->packed_ring
? virtqueue_poll_packed(_vq
, last_used_idx
) :
2612 virtqueue_poll_split(_vq
, last_used_idx
);
2614 EXPORT_SYMBOL_GPL(virtqueue_poll
);
2617 * virtqueue_enable_cb - restart callbacks after disable_cb.
2618 * @_vq: the struct virtqueue we're talking about.
2620 * This re-enables callbacks; it returns "false" if there are pending
2621 * buffers in the queue, to detect a possible race between the driver
2622 * checking for more work, and enabling callbacks.
2624 * Caller must ensure we don't call this with other virtqueue
2625 * operations at the same time (except where noted).
2627 bool virtqueue_enable_cb(struct virtqueue
*_vq
)
2629 unsigned int last_used_idx
= virtqueue_enable_cb_prepare(_vq
);
2631 return !virtqueue_poll(_vq
, last_used_idx
);
2633 EXPORT_SYMBOL_GPL(virtqueue_enable_cb
);
2636 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2637 * @_vq: the struct virtqueue we're talking about.
2639 * This re-enables callbacks but hints to the other side to delay
2640 * interrupts until most of the available buffers have been processed;
2641 * it returns "false" if there are many pending buffers in the queue,
2642 * to detect a possible race between the driver checking for more work,
2643 * and enabling callbacks.
2645 * Caller must ensure we don't call this with other virtqueue
2646 * operations at the same time (except where noted).
2648 bool virtqueue_enable_cb_delayed(struct virtqueue
*_vq
)
2650 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2652 if (vq
->event_triggered
)
2653 vq
->event_triggered
= false;
2655 return vq
->packed_ring
? virtqueue_enable_cb_delayed_packed(_vq
) :
2656 virtqueue_enable_cb_delayed_split(_vq
);
2658 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed
);
2661 * virtqueue_detach_unused_buf - detach first unused buffer
2662 * @_vq: the struct virtqueue we're talking about.
2664 * Returns NULL or the "data" token handed to virtqueue_add_*().
2665 * This is not valid on an active queue; it is useful for device
2666 * shutdown or the reset queue.
2668 void *virtqueue_detach_unused_buf(struct virtqueue
*_vq
)
2670 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2672 return vq
->packed_ring
? virtqueue_detach_unused_buf_packed(_vq
) :
2673 virtqueue_detach_unused_buf_split(_vq
);
2675 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf
);
2677 static inline bool more_used(const struct vring_virtqueue
*vq
)
2679 return vq
->packed_ring
? more_used_packed(vq
) : more_used_split(vq
);
2683 * vring_interrupt - notify a virtqueue on an interrupt
2684 * @irq: the IRQ number (ignored)
2685 * @_vq: the struct virtqueue to notify
2687 * Calls the callback function of @_vq to process the virtqueue
2690 irqreturn_t
vring_interrupt(int irq
, void *_vq
)
2692 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2694 if (!more_used(vq
)) {
2695 pr_debug("virtqueue interrupt with no work for %p\n", vq
);
2699 if (unlikely(vq
->broken
)) {
2700 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
2701 dev_warn_once(&vq
->vq
.vdev
->dev
,
2702 "virtio vring IRQ raised before DRIVER_OK");
2709 /* Just a hint for performance: so it's ok that this can be racy! */
2711 data_race(vq
->event_triggered
= true);
2713 pr_debug("virtqueue callback for %p (%p)\n", vq
, vq
->vq
.callback
);
2714 if (vq
->vq
.callback
)
2715 vq
->vq
.callback(&vq
->vq
);
2719 EXPORT_SYMBOL_GPL(vring_interrupt
);
2721 struct virtqueue
*vring_create_virtqueue(
2724 unsigned int vring_align
,
2725 struct virtio_device
*vdev
,
2727 bool may_reduce_num
,
2729 bool (*notify
)(struct virtqueue
*),
2730 void (*callback
)(struct virtqueue
*),
2734 if (virtio_has_feature(vdev
, VIRTIO_F_RING_PACKED
))
2735 return vring_create_virtqueue_packed(index
, num
, vring_align
,
2736 vdev
, weak_barriers
, may_reduce_num
,
2737 context
, notify
, callback
, name
, vdev
->dev
.parent
);
2739 return vring_create_virtqueue_split(index
, num
, vring_align
,
2740 vdev
, weak_barriers
, may_reduce_num
,
2741 context
, notify
, callback
, name
, vdev
->dev
.parent
);
2743 EXPORT_SYMBOL_GPL(vring_create_virtqueue
);
2745 struct virtqueue
*vring_create_virtqueue_dma(
2748 unsigned int vring_align
,
2749 struct virtio_device
*vdev
,
2751 bool may_reduce_num
,
2753 bool (*notify
)(struct virtqueue
*),
2754 void (*callback
)(struct virtqueue
*),
2756 struct device
*dma_dev
)
2759 if (virtio_has_feature(vdev
, VIRTIO_F_RING_PACKED
))
2760 return vring_create_virtqueue_packed(index
, num
, vring_align
,
2761 vdev
, weak_barriers
, may_reduce_num
,
2762 context
, notify
, callback
, name
, dma_dev
);
2764 return vring_create_virtqueue_split(index
, num
, vring_align
,
2765 vdev
, weak_barriers
, may_reduce_num
,
2766 context
, notify
, callback
, name
, dma_dev
);
2768 EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma
);
2771 * virtqueue_resize - resize the vring of vq
2772 * @_vq: the struct virtqueue we're talking about.
2773 * @num: new ring num
2774 * @recycle: callback to recycle unused buffers
2776 * When it is really necessary to create a new vring, it will set the current vq
2777 * into the reset state. Then call the passed callback to recycle the buffer
2778 * that is no longer used. Only after the new vring is successfully created, the
2779 * old vring will be released.
2781 * Caller must ensure we don't call this with other virtqueue operations
2782 * at the same time (except where noted).
2784 * Returns zero or a negative error.
2786 * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
2787 * vq can still work normally
2788 * -EBUSY: Failed to sync with device, vq may not work properly
2789 * -ENOENT: Transport or device not supported
2790 * -E2BIG/-EINVAL: num error
2791 * -EPERM: Operation not permitted
2794 int virtqueue_resize(struct virtqueue
*_vq
, u32 num
,
2795 void (*recycle
)(struct virtqueue
*vq
, void *buf
))
2797 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2800 if (num
> vq
->vq
.num_max
)
2806 if ((vq
->packed_ring
? vq
->packed
.vring
.num
: vq
->split
.vring
.num
) == num
)
2809 err
= virtqueue_disable_and_recycle(_vq
, recycle
);
2813 if (vq
->packed_ring
)
2814 err
= virtqueue_resize_packed(_vq
, num
);
2816 err
= virtqueue_resize_split(_vq
, num
);
2818 return virtqueue_enable_after_reset(_vq
);
2820 EXPORT_SYMBOL_GPL(virtqueue_resize
);
2823 * virtqueue_reset - detach and recycle all unused buffers
2824 * @_vq: the struct virtqueue we're talking about.
2825 * @recycle: callback to recycle unused buffers
2827 * Caller must ensure we don't call this with other virtqueue operations
2828 * at the same time (except where noted).
2830 * Returns zero or a negative error.
2832 * -EBUSY: Failed to sync with device, vq may not work properly
2833 * -ENOENT: Transport or device not supported
2834 * -EPERM: Operation not permitted
2836 int virtqueue_reset(struct virtqueue
*_vq
,
2837 void (*recycle
)(struct virtqueue
*vq
, void *buf
))
2839 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2842 err
= virtqueue_disable_and_recycle(_vq
, recycle
);
2846 if (vq
->packed_ring
)
2847 virtqueue_reinit_packed(vq
);
2849 virtqueue_reinit_split(vq
);
2851 return virtqueue_enable_after_reset(_vq
);
2853 EXPORT_SYMBOL_GPL(virtqueue_reset
);
2855 struct virtqueue
*vring_new_virtqueue(unsigned int index
,
2857 unsigned int vring_align
,
2858 struct virtio_device
*vdev
,
2862 bool (*notify
)(struct virtqueue
*vq
),
2863 void (*callback
)(struct virtqueue
*vq
),
2866 struct vring_virtqueue_split vring_split
= {};
2868 if (virtio_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
2869 struct vring_virtqueue_packed vring_packed
= {};
2871 vring_packed
.vring
.num
= num
;
2872 vring_packed
.vring
.desc
= pages
;
2873 return __vring_new_virtqueue_packed(index
, &vring_packed
,
2874 vdev
, weak_barriers
,
2875 context
, notify
, callback
,
2876 name
, vdev
->dev
.parent
);
2879 vring_init(&vring_split
.vring
, num
, pages
, vring_align
);
2880 return __vring_new_virtqueue_split(index
, &vring_split
, vdev
, weak_barriers
,
2881 context
, notify
, callback
, name
,
2884 EXPORT_SYMBOL_GPL(vring_new_virtqueue
);
2886 static void vring_free(struct virtqueue
*_vq
)
2888 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2890 if (vq
->we_own_ring
) {
2891 if (vq
->packed_ring
) {
2892 vring_free_queue(vq
->vq
.vdev
,
2893 vq
->packed
.ring_size_in_bytes
,
2894 vq
->packed
.vring
.desc
,
2895 vq
->packed
.ring_dma_addr
,
2898 vring_free_queue(vq
->vq
.vdev
,
2899 vq
->packed
.event_size_in_bytes
,
2900 vq
->packed
.vring
.driver
,
2901 vq
->packed
.driver_event_dma_addr
,
2904 vring_free_queue(vq
->vq
.vdev
,
2905 vq
->packed
.event_size_in_bytes
,
2906 vq
->packed
.vring
.device
,
2907 vq
->packed
.device_event_dma_addr
,
2910 kfree(vq
->packed
.desc_state
);
2911 kfree(vq
->packed
.desc_extra
);
2913 vring_free_queue(vq
->vq
.vdev
,
2914 vq
->split
.queue_size_in_bytes
,
2915 vq
->split
.vring
.desc
,
2916 vq
->split
.queue_dma_addr
,
2920 if (!vq
->packed_ring
) {
2921 kfree(vq
->split
.desc_state
);
2922 kfree(vq
->split
.desc_extra
);
2926 void vring_del_virtqueue(struct virtqueue
*_vq
)
2928 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2930 spin_lock(&vq
->vq
.vdev
->vqs_list_lock
);
2931 list_del(&_vq
->list
);
2932 spin_unlock(&vq
->vq
.vdev
->vqs_list_lock
);
2938 EXPORT_SYMBOL_GPL(vring_del_virtqueue
);
2940 u32
vring_notification_data(struct virtqueue
*_vq
)
2942 struct vring_virtqueue
*vq
= to_vvq(_vq
);
2945 if (vq
->packed_ring
)
2946 next
= (vq
->packed
.next_avail_idx
&
2947 ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR
))) |
2948 vq
->packed
.avail_wrap_counter
<<
2949 VRING_PACKED_EVENT_F_WRAP_CTR
;
2951 next
= vq
->split
.avail_idx_shadow
;
2953 return next
<< 16 | _vq
->index
;
2955 EXPORT_SYMBOL_GPL(vring_notification_data
);
2957 /* Manipulates transport-specific feature bits. */
2958 void vring_transport_features(struct virtio_device
*vdev
)
2962 for (i
= VIRTIO_TRANSPORT_F_START
; i
< VIRTIO_TRANSPORT_F_END
; i
++) {
2964 case VIRTIO_RING_F_INDIRECT_DESC
:
2966 case VIRTIO_RING_F_EVENT_IDX
:
2968 case VIRTIO_F_VERSION_1
:
2970 case VIRTIO_F_ACCESS_PLATFORM
:
2972 case VIRTIO_F_RING_PACKED
:
2974 case VIRTIO_F_ORDER_PLATFORM
:
2976 case VIRTIO_F_NOTIFICATION_DATA
:
2979 /* We don't understand this bit. */
2980 __virtio_clear_bit(vdev
, i
);
2984 EXPORT_SYMBOL_GPL(vring_transport_features
);
2987 * virtqueue_get_vring_size - return the size of the virtqueue's vring
2988 * @_vq: the struct virtqueue containing the vring of interest.
2990 * Returns the size of the vring. This is mainly used for boasting to
2991 * userspace. Unlike other operations, this need not be serialized.
2993 unsigned int virtqueue_get_vring_size(const struct virtqueue
*_vq
)
2996 const struct vring_virtqueue
*vq
= to_vvq(_vq
);
2998 return vq
->packed_ring
? vq
->packed
.vring
.num
: vq
->split
.vring
.num
;
3000 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size
);
3003 * This function should only be called by the core, not directly by the driver.
3005 void __virtqueue_break(struct virtqueue
*_vq
)
3007 struct vring_virtqueue
*vq
= to_vvq(_vq
);
3009 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
3010 WRITE_ONCE(vq
->broken
, true);
3012 EXPORT_SYMBOL_GPL(__virtqueue_break
);
3015 * This function should only be called by the core, not directly by the driver.
3017 void __virtqueue_unbreak(struct virtqueue
*_vq
)
3019 struct vring_virtqueue
*vq
= to_vvq(_vq
);
3021 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
3022 WRITE_ONCE(vq
->broken
, false);
3024 EXPORT_SYMBOL_GPL(__virtqueue_unbreak
);
3026 bool virtqueue_is_broken(const struct virtqueue
*_vq
)
3028 const struct vring_virtqueue
*vq
= to_vvq(_vq
);
3030 return READ_ONCE(vq
->broken
);
3032 EXPORT_SYMBOL_GPL(virtqueue_is_broken
);
3035 * This should prevent the device from being used, allowing drivers to
3036 * recover. You may need to grab appropriate locks to flush.
3038 void virtio_break_device(struct virtio_device
*dev
)
3040 struct virtqueue
*_vq
;
3042 spin_lock(&dev
->vqs_list_lock
);
3043 list_for_each_entry(_vq
, &dev
->vqs
, list
) {
3044 struct vring_virtqueue
*vq
= to_vvq(_vq
);
3046 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
3047 WRITE_ONCE(vq
->broken
, true);
3049 spin_unlock(&dev
->vqs_list_lock
);
3051 EXPORT_SYMBOL_GPL(virtio_break_device
);
3054 * This should allow the device to be used by the driver. You may
3055 * need to grab appropriate locks to flush the write to
3056 * vq->broken. This should only be used in some specific case e.g
3057 * (probing and restoring). This function should only be called by the
3058 * core, not directly by the driver.
3060 void __virtio_unbreak_device(struct virtio_device
*dev
)
3062 struct virtqueue
*_vq
;
3064 spin_lock(&dev
->vqs_list_lock
);
3065 list_for_each_entry(_vq
, &dev
->vqs
, list
) {
3066 struct vring_virtqueue
*vq
= to_vvq(_vq
);
3068 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
3069 WRITE_ONCE(vq
->broken
, false);
3071 spin_unlock(&dev
->vqs_list_lock
);
3073 EXPORT_SYMBOL_GPL(__virtio_unbreak_device
);
3075 dma_addr_t
virtqueue_get_desc_addr(const struct virtqueue
*_vq
)
3077 const struct vring_virtqueue
*vq
= to_vvq(_vq
);
3079 BUG_ON(!vq
->we_own_ring
);
3081 if (vq
->packed_ring
)
3082 return vq
->packed
.ring_dma_addr
;
3084 return vq
->split
.queue_dma_addr
;
3086 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr
);
3088 dma_addr_t
virtqueue_get_avail_addr(const struct virtqueue
*_vq
)
3090 const struct vring_virtqueue
*vq
= to_vvq(_vq
);
3092 BUG_ON(!vq
->we_own_ring
);
3094 if (vq
->packed_ring
)
3095 return vq
->packed
.driver_event_dma_addr
;
3097 return vq
->split
.queue_dma_addr
+
3098 ((char *)vq
->split
.vring
.avail
- (char *)vq
->split
.vring
.desc
);
3100 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr
);
3102 dma_addr_t
virtqueue_get_used_addr(const struct virtqueue
*_vq
)
3104 const struct vring_virtqueue
*vq
= to_vvq(_vq
);
3106 BUG_ON(!vq
->we_own_ring
);
3108 if (vq
->packed_ring
)
3109 return vq
->packed
.device_event_dma_addr
;
3111 return vq
->split
.queue_dma_addr
+
3112 ((char *)vq
->split
.vring
.used
- (char *)vq
->split
.vring
.desc
);
3114 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr
);
3116 /* Only available for split ring */
3117 const struct vring
*virtqueue_get_vring(const struct virtqueue
*vq
)
3119 return &to_vvq(vq
)->split
.vring
;
3121 EXPORT_SYMBOL_GPL(virtqueue_get_vring
);
3124 * virtqueue_dma_map_single_attrs - map DMA for _vq
3125 * @_vq: the struct virtqueue we're talking about.
3126 * @ptr: the pointer of the buffer to do dma
3127 * @size: the size of the buffer to do dma
3128 * @dir: DMA direction
3131 * The caller calls this to do dma mapping in advance. The DMA address can be
3132 * passed to this _vq when it is in pre-mapped mode.
3134 * return DMA address. Caller should check that by virtqueue_dma_mapping_error().
3136 dma_addr_t
virtqueue_dma_map_single_attrs(struct virtqueue
*_vq
, void *ptr
,
3138 enum dma_data_direction dir
,
3139 unsigned long attrs
)
3141 struct vring_virtqueue
*vq
= to_vvq(_vq
);
3143 if (!vq
->use_dma_api
) {
3144 kmsan_handle_dma(virt_to_page(ptr
), offset_in_page(ptr
), size
, dir
);
3145 return (dma_addr_t
)virt_to_phys(ptr
);
3148 return dma_map_single_attrs(vring_dma_dev(vq
), ptr
, size
, dir
, attrs
);
3150 EXPORT_SYMBOL_GPL(virtqueue_dma_map_single_attrs
);
3153 * virtqueue_dma_unmap_single_attrs - unmap DMA for _vq
3154 * @_vq: the struct virtqueue we're talking about.
3155 * @addr: the dma address to unmap
3156 * @size: the size of the buffer
3157 * @dir: DMA direction
3160 * Unmap the address that is mapped by the virtqueue_dma_map_* APIs.
3163 void virtqueue_dma_unmap_single_attrs(struct virtqueue
*_vq
, dma_addr_t addr
,
3164 size_t size
, enum dma_data_direction dir
,
3165 unsigned long attrs
)
3167 struct vring_virtqueue
*vq
= to_vvq(_vq
);
3169 if (!vq
->use_dma_api
)
3172 dma_unmap_single_attrs(vring_dma_dev(vq
), addr
, size
, dir
, attrs
);
3174 EXPORT_SYMBOL_GPL(virtqueue_dma_unmap_single_attrs
);
3177 * virtqueue_dma_mapping_error - check dma address
3178 * @_vq: the struct virtqueue we're talking about.
3179 * @addr: DMA address
3181 * Returns 0 means dma valid. Other means invalid dma address.
3183 int virtqueue_dma_mapping_error(struct virtqueue
*_vq
, dma_addr_t addr
)
3185 struct vring_virtqueue
*vq
= to_vvq(_vq
);
3187 if (!vq
->use_dma_api
)
3190 return dma_mapping_error(vring_dma_dev(vq
), addr
);
3192 EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error
);
3195 * virtqueue_dma_need_sync - check a dma address needs sync
3196 * @_vq: the struct virtqueue we're talking about.
3197 * @addr: DMA address
3199 * Check if the dma address mapped by the virtqueue_dma_map_* APIs needs to be
3204 bool virtqueue_dma_need_sync(struct virtqueue
*_vq
, dma_addr_t addr
)
3206 struct vring_virtqueue
*vq
= to_vvq(_vq
);
3208 if (!vq
->use_dma_api
)
3211 return dma_need_sync(vring_dma_dev(vq
), addr
);
3213 EXPORT_SYMBOL_GPL(virtqueue_dma_need_sync
);
3216 * virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu
3217 * @_vq: the struct virtqueue we're talking about.
3218 * @addr: DMA address
3219 * @offset: DMA address offset
3220 * @size: buf size for sync
3221 * @dir: DMA direction
3223 * Before calling this function, use virtqueue_dma_need_sync() to confirm that
3224 * the DMA address really needs to be synchronized
3227 void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue
*_vq
,
3229 unsigned long offset
, size_t size
,
3230 enum dma_data_direction dir
)
3232 struct vring_virtqueue
*vq
= to_vvq(_vq
);
3233 struct device
*dev
= vring_dma_dev(vq
);
3235 if (!vq
->use_dma_api
)
3238 dma_sync_single_range_for_cpu(dev
, addr
, offset
, size
, dir
);
3240 EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_cpu
);
3243 * virtqueue_dma_sync_single_range_for_device - dma sync for device
3244 * @_vq: the struct virtqueue we're talking about.
3245 * @addr: DMA address
3246 * @offset: DMA address offset
3247 * @size: buf size for sync
3248 * @dir: DMA direction
3250 * Before calling this function, use virtqueue_dma_need_sync() to confirm that
3251 * the DMA address really needs to be synchronized
3253 void virtqueue_dma_sync_single_range_for_device(struct virtqueue
*_vq
,
3255 unsigned long offset
, size_t size
,
3256 enum dma_data_direction dir
)
3258 struct vring_virtqueue
*vq
= to_vvq(_vq
);
3259 struct device
*dev
= vring_dma_dev(vq
);
3261 if (!vq
->use_dma_api
)
3264 dma_sync_single_range_for_device(dev
, addr
, offset
, size
, dir
);
3266 EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_device
);
3268 MODULE_DESCRIPTION("Virtio ring implementation");
3269 MODULE_LICENSE("GPL");