1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Red Hat, Inc.
4 * Author: Michael S. Tsirkin <mst@redhat.com>
6 * Partial implementation of virtio 0.9. event index is used for signalling,
7 * unconditionally. Design roughly follows linux kernel implementation in order
8 * to be able to judge its performance.
16 #include <linux/virtio_ring.h>
24 /* enabling the below activates experimental ring polling code
25 * (which skips index reads on consumer in favor of looking at
26 * high bits of ring id ^ 0x8000).
28 /* #ifdef RING_POLL */
29 /* enabling the below activates experimental in-order code
30 * (which skips ring updates and reads and writes len in descriptor).
34 #if defined(RING_POLL) && defined(INORDER)
35 #error "RING_POLL and INORDER are mutually exclusive"
38 /* how much padding is needed to avoid false cache sharing */
39 #define HOST_GUEST_PADDING 0x80
42 unsigned short avail_idx
;
43 unsigned short last_used_idx
;
44 unsigned short num_free
;
45 unsigned short kicked_avail_idx
;
47 unsigned short free_head
;
49 unsigned short reserved_free_head
;
51 unsigned char reserved
[HOST_GUEST_PADDING
- 10];
55 /* we do not need to track last avail index
56 * unless we have more than one in flight.
58 unsigned short used_idx
;
59 unsigned short called_used_idx
;
60 unsigned char reserved
[HOST_GUEST_PADDING
- 4];
63 /* implemented by ring */
70 ret
= posix_memalign(&p
, 0x1000, vring_size(ring_size
, 0x1000));
72 perror("Unable to allocate ring buffer.\n");
75 memset(p
, 0, vring_size(ring_size
, 0x1000));
76 vring_init(&ring
, ring_size
, p
, 0x1000);
79 guest
.kicked_avail_idx
= -1;
80 guest
.last_used_idx
= 0;
82 /* Put everything in free lists. */
85 for (i
= 0; i
< ring_size
- 1; i
++)
86 ring
.desc
[i
].next
= i
+ 1;
88 host
.called_used_idx
= -1;
89 guest
.num_free
= ring_size
;
90 data
= malloc(ring_size
* sizeof *data
);
92 perror("Unable to allocate data buffer.\n");
95 memset(data
, 0, ring_size
* sizeof *data
);
99 int add_inbuf(unsigned len
, void *buf
, void *datap
)
105 struct vring_desc
*desc
;
111 head
= (ring_size
- 1) & (guest
.avail_idx
++);
113 head
= guest
.free_head
;
118 desc
[head
].flags
= VRING_DESC_F_NEXT
;
119 desc
[head
].addr
= (unsigned long)(void *)buf
;
120 desc
[head
].len
= len
;
121 /* We do it like this to simulate the way
122 * we'd have to flip it if we had multiple
125 desc
[head
].flags
&= ~VRING_DESC_F_NEXT
;
127 guest
.free_head
= desc
[head
].next
;
130 data
[head
].data
= datap
;
133 /* Barrier A (for pairing) */
135 avail
= guest
.avail_idx
++;
136 ring
.avail
->ring
[avail
& (ring_size
- 1)] =
137 (head
| (avail
& ~(ring_size
- 1))) ^ 0x8000;
140 /* Barrier A (for pairing) */
142 avail
= (ring_size
- 1) & (guest
.avail_idx
++);
143 ring
.avail
->ring
[avail
] = head
;
145 /* Barrier A (for pairing) */
148 ring
.avail
->idx
= guest
.avail_idx
;
152 void *get_buf(unsigned *lenp
, void **bufp
)
159 head
= (ring_size
- 1) & guest
.last_used_idx
;
160 index
= ring
.used
->ring
[head
].id
;
161 if ((index
^ guest
.last_used_idx
^ 0x8000) & ~(ring_size
- 1))
163 /* Barrier B (for pairing) */
165 index
&= ring_size
- 1;
167 if (ring
.used
->idx
== guest
.last_used_idx
)
169 /* Barrier B (for pairing) */
172 head
= (ring_size
- 1) & guest
.last_used_idx
;
175 head
= (ring_size
- 1) & guest
.last_used_idx
;
176 index
= ring
.used
->ring
[head
].id
;
181 *lenp
= ring
.desc
[index
].len
;
183 *lenp
= ring
.used
->ring
[head
].len
;
185 datap
= data
[index
].data
;
186 *bufp
= (void*)(unsigned long)ring
.desc
[index
].addr
;
187 data
[index
].data
= NULL
;
189 ring
.desc
[index
].next
= guest
.free_head
;
190 guest
.free_head
= index
;
193 guest
.last_used_idx
++;
199 unsigned short last_used_idx
= guest
.last_used_idx
;
201 unsigned short head
= last_used_idx
& (ring_size
- 1);
202 unsigned index
= ring
.used
->ring
[head
].id
;
204 return (index
^ last_used_idx
^ 0x8000) & ~(ring_size
- 1);
206 return ring
.used
->idx
== last_used_idx
;
212 /* Doing nothing to disable calls might cause
213 * extra interrupts, but reduces the number of cache misses.
219 vring_used_event(&ring
) = guest
.last_used_idx
;
220 /* Flush call index write */
221 /* Barrier D (for pairing) */
226 void kick_available(void)
230 /* Flush in previous flags write */
231 /* Barrier C (for pairing) */
233 need
= vring_need_event(vring_avail_event(&ring
),
235 guest
.kicked_avail_idx
);
237 guest
.kicked_avail_idx
= guest
.avail_idx
;
245 /* Doing nothing to disable kicks might cause
246 * extra interrupts, but reduces the number of cache misses.
252 vring_avail_event(&ring
) = host
.used_idx
;
253 /* Barrier C (for pairing) */
255 return avail_empty();
260 unsigned head
= host
.used_idx
;
262 unsigned index
= ring
.avail
->ring
[head
& (ring_size
- 1)];
264 return ((index
^ head
^ 0x8000) & ~(ring_size
- 1));
266 return head
== ring
.avail
->idx
;
270 bool use_buf(unsigned *lenp
, void **bufp
)
272 unsigned used_idx
= host
.used_idx
;
273 struct vring_desc
*desc
;
277 head
= ring
.avail
->ring
[used_idx
& (ring_size
- 1)];
278 if ((used_idx
^ head
^ 0x8000) & ~(ring_size
- 1))
280 /* Barrier A (for pairing) */
283 used_idx
&= ring_size
- 1;
284 desc
= &ring
.desc
[head
& (ring_size
- 1)];
286 if (used_idx
== ring
.avail
->idx
)
289 /* Barrier A (for pairing) */
292 used_idx
&= ring_size
- 1;
296 head
= ring
.avail
->ring
[used_idx
];
298 desc
= &ring
.desc
[head
];
302 *bufp
= (void *)(unsigned long)desc
->addr
;
305 desc
->len
= desc
->len
- 1;
307 /* now update used ring */
308 ring
.used
->ring
[used_idx
].id
= head
;
309 ring
.used
->ring
[used_idx
].len
= desc
->len
- 1;
311 /* Barrier B (for pairing) */
314 ring
.used
->idx
= host
.used_idx
;
323 /* Flush in previous flags write */
324 /* Barrier D (for pairing) */
326 need
= vring_need_event(vring_used_event(&ring
),
328 host
.called_used_idx
);
330 host
.called_used_idx
= host
.used_idx
;