1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2009, Microsoft Corporation.
7 * Haiyang Zhang <haiyangz@microsoft.com>
8 * Hank Janssen <hjanssen@microsoft.com>
9 * K. Y. Srinivasan <kys@microsoft.com>
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
15 #include <linux/hyperv.h>
16 #include <linux/uio.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/prefetch.h>
21 #include "hyperv_vmbus.h"
23 #define VMBUS_PKT_TRAILER 8
26 * When we write to the ring buffer, check if the host needs to
27 * be signaled. Here is the details of this protocol:
29 * 1. The host guarantees that while it is draining the
30 * ring buffer, it will set the interrupt_mask to
31 * indicate it does not need to be interrupted when
34 * 2. The host guarantees that it will completely drain
35 * the ring buffer before exiting the read loop. Further,
36 * once the ring buffer is empty, it will clear the
37 * interrupt_mask and re-check to see if new data has
41 * It looks like Windows hosts have logic to deal with DOS attacks that
42 * can be triggered if it receives interrupts when it is not expecting
43 * the interrupt. The host expects interrupts only when the ring
44 * transitions from empty to non-empty (or full to non full on the guest
46 * So, base the signaling decision solely on the ring state until the
47 * host logic is fixed.
50 static void hv_signal_on_write(u32 old_write
, struct vmbus_channel
*channel
)
52 struct hv_ring_buffer_info
*rbi
= &channel
->outbound
;
55 if (READ_ONCE(rbi
->ring_buffer
->interrupt_mask
))
58 /* check interrupt_mask before read_index */
61 * This is the only case we need to signal when the
62 * ring transitions from being empty to non-empty.
64 if (old_write
== READ_ONCE(rbi
->ring_buffer
->read_index
)) {
65 ++channel
->intr_out_empty
;
66 vmbus_setevent(channel
);
70 /* Get the next write location for the specified ring buffer. */
72 hv_get_next_write_location(struct hv_ring_buffer_info
*ring_info
)
74 u32 next
= ring_info
->ring_buffer
->write_index
;
79 /* Set the next write location for the specified ring buffer. */
81 hv_set_next_write_location(struct hv_ring_buffer_info
*ring_info
,
82 u32 next_write_location
)
84 ring_info
->ring_buffer
->write_index
= next_write_location
;
87 /* Set the next read location for the specified ring buffer. */
89 hv_set_next_read_location(struct hv_ring_buffer_info
*ring_info
,
90 u32 next_read_location
)
92 ring_info
->ring_buffer
->read_index
= next_read_location
;
93 ring_info
->priv_read_index
= next_read_location
;
96 /* Get the size of the ring buffer. */
98 hv_get_ring_buffersize(const struct hv_ring_buffer_info
*ring_info
)
100 return ring_info
->ring_datasize
;
103 /* Get the read and write indices as u64 of the specified ring buffer. */
105 hv_get_ring_bufferindices(struct hv_ring_buffer_info
*ring_info
)
107 return (u64
)ring_info
->ring_buffer
->write_index
<< 32;
111 * Helper routine to copy from source to ring buffer.
112 * Assume there is enough room. Handles wrap-around in dest case only!!
114 static u32
hv_copyto_ringbuffer(
115 struct hv_ring_buffer_info
*ring_info
,
116 u32 start_write_offset
,
120 void *ring_buffer
= hv_get_ring_buffer(ring_info
);
121 u32 ring_buffer_size
= hv_get_ring_buffersize(ring_info
);
123 memcpy(ring_buffer
+ start_write_offset
, src
, srclen
);
125 start_write_offset
+= srclen
;
126 if (start_write_offset
>= ring_buffer_size
)
127 start_write_offset
-= ring_buffer_size
;
129 return start_write_offset
;
134 * hv_get_ringbuffer_availbytes()
136 * Get number of bytes available to read and to write to
137 * for the specified ring buffer
140 hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info
*rbi
,
141 u32
*read
, u32
*write
)
143 u32 read_loc
, write_loc
, dsize
;
145 /* Capture the read/write indices before they changed */
146 read_loc
= READ_ONCE(rbi
->ring_buffer
->read_index
);
147 write_loc
= READ_ONCE(rbi
->ring_buffer
->write_index
);
148 dsize
= rbi
->ring_datasize
;
150 *write
= write_loc
>= read_loc
? dsize
- (write_loc
- read_loc
) :
151 read_loc
- write_loc
;
152 *read
= dsize
- *write
;
155 /* Get various debug metrics for the specified ring buffer. */
156 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info
*ring_info
,
157 struct hv_ring_buffer_debug_info
*debug_info
)
159 u32 bytes_avail_towrite
;
160 u32 bytes_avail_toread
;
162 mutex_lock(&ring_info
->ring_buffer_mutex
);
164 if (!ring_info
->ring_buffer
) {
165 mutex_unlock(&ring_info
->ring_buffer_mutex
);
169 hv_get_ringbuffer_availbytes(ring_info
,
171 &bytes_avail_towrite
);
172 debug_info
->bytes_avail_toread
= bytes_avail_toread
;
173 debug_info
->bytes_avail_towrite
= bytes_avail_towrite
;
174 debug_info
->current_read_index
= ring_info
->ring_buffer
->read_index
;
175 debug_info
->current_write_index
= ring_info
->ring_buffer
->write_index
;
176 debug_info
->current_interrupt_mask
177 = ring_info
->ring_buffer
->interrupt_mask
;
178 mutex_unlock(&ring_info
->ring_buffer_mutex
);
182 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo
);
184 /* Initialize a channel's ring buffer info mutex locks */
185 void hv_ringbuffer_pre_init(struct vmbus_channel
*channel
)
187 mutex_init(&channel
->inbound
.ring_buffer_mutex
);
188 mutex_init(&channel
->outbound
.ring_buffer_mutex
);
191 /* Initialize the ring buffer. */
192 int hv_ringbuffer_init(struct hv_ring_buffer_info
*ring_info
,
193 struct page
*pages
, u32 page_cnt
)
196 struct page
**pages_wraparound
;
198 BUILD_BUG_ON((sizeof(struct hv_ring_buffer
) != PAGE_SIZE
));
201 * First page holds struct hv_ring_buffer, do wraparound mapping for
204 pages_wraparound
= kcalloc(page_cnt
* 2 - 1, sizeof(struct page
*),
206 if (!pages_wraparound
)
209 pages_wraparound
[0] = pages
;
210 for (i
= 0; i
< 2 * (page_cnt
- 1); i
++)
211 pages_wraparound
[i
+ 1] = &pages
[i
% (page_cnt
- 1) + 1];
213 ring_info
->ring_buffer
= (struct hv_ring_buffer
*)
214 vmap(pages_wraparound
, page_cnt
* 2 - 1, VM_MAP
, PAGE_KERNEL
);
216 kfree(pages_wraparound
);
219 if (!ring_info
->ring_buffer
)
222 ring_info
->ring_buffer
->read_index
=
223 ring_info
->ring_buffer
->write_index
= 0;
225 /* Set the feature bit for enabling flow control. */
226 ring_info
->ring_buffer
->feature_bits
.value
= 1;
228 ring_info
->ring_size
= page_cnt
<< PAGE_SHIFT
;
229 ring_info
->ring_size_div10_reciprocal
=
230 reciprocal_value(ring_info
->ring_size
/ 10);
231 ring_info
->ring_datasize
= ring_info
->ring_size
-
232 sizeof(struct hv_ring_buffer
);
233 ring_info
->priv_read_index
= 0;
235 spin_lock_init(&ring_info
->ring_lock
);
240 /* Cleanup the ring buffer. */
241 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info
*ring_info
)
243 mutex_lock(&ring_info
->ring_buffer_mutex
);
244 vunmap(ring_info
->ring_buffer
);
245 ring_info
->ring_buffer
= NULL
;
246 mutex_unlock(&ring_info
->ring_buffer_mutex
);
249 /* Write to the ring buffer. */
250 int hv_ringbuffer_write(struct vmbus_channel
*channel
,
251 const struct kvec
*kv_list
, u32 kv_count
)
254 u32 bytes_avail_towrite
;
255 u32 totalbytes_towrite
= sizeof(u64
);
256 u32 next_write_location
;
260 struct hv_ring_buffer_info
*outring_info
= &channel
->outbound
;
262 if (channel
->rescind
)
265 for (i
= 0; i
< kv_count
; i
++)
266 totalbytes_towrite
+= kv_list
[i
].iov_len
;
268 spin_lock_irqsave(&outring_info
->ring_lock
, flags
);
270 bytes_avail_towrite
= hv_get_bytes_to_write(outring_info
);
273 * If there is only room for the packet, assume it is full.
274 * Otherwise, the next time around, we think the ring buffer
275 * is empty since the read index == write index.
277 if (bytes_avail_towrite
<= totalbytes_towrite
) {
278 ++channel
->out_full_total
;
280 if (!channel
->out_full_flag
) {
281 ++channel
->out_full_first
;
282 channel
->out_full_flag
= true;
285 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
289 channel
->out_full_flag
= false;
291 /* Write to the ring buffer */
292 next_write_location
= hv_get_next_write_location(outring_info
);
294 old_write
= next_write_location
;
296 for (i
= 0; i
< kv_count
; i
++) {
297 next_write_location
= hv_copyto_ringbuffer(outring_info
,
303 /* Set previous packet start */
304 prev_indices
= hv_get_ring_bufferindices(outring_info
);
306 next_write_location
= hv_copyto_ringbuffer(outring_info
,
311 /* Issue a full memory barrier before updating the write index */
314 /* Now, update the write location */
315 hv_set_next_write_location(outring_info
, next_write_location
);
318 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
320 hv_signal_on_write(old_write
, channel
);
322 if (channel
->rescind
)
328 int hv_ringbuffer_read(struct vmbus_channel
*channel
,
329 void *buffer
, u32 buflen
, u32
*buffer_actual_len
,
330 u64
*requestid
, bool raw
)
332 struct vmpacket_descriptor
*desc
;
333 u32 packetlen
, offset
;
335 if (unlikely(buflen
== 0))
338 *buffer_actual_len
= 0;
341 /* Make sure there is something to read */
342 desc
= hv_pkt_iter_first(channel
);
345 * No error is set when there is even no header, drivers are
346 * supposed to analyze buffer_actual_len.
351 offset
= raw
? 0 : (desc
->offset8
<< 3);
352 packetlen
= (desc
->len8
<< 3) - offset
;
353 *buffer_actual_len
= packetlen
;
354 *requestid
= desc
->trans_id
;
356 if (unlikely(packetlen
> buflen
))
359 /* since ring is double mapped, only one copy is necessary */
360 memcpy(buffer
, (const char *)desc
+ offset
, packetlen
);
362 /* Advance ring index to next packet descriptor */
363 __hv_pkt_iter_next(channel
, desc
);
365 /* Notify host of update */
366 hv_pkt_iter_close(channel
);
372 * Determine number of bytes available in ring buffer after
373 * the current iterator (priv_read_index) location.
375 * This is similar to hv_get_bytes_to_read but with private
376 * read index instead.
378 static u32
hv_pkt_iter_avail(const struct hv_ring_buffer_info
*rbi
)
380 u32 priv_read_loc
= rbi
->priv_read_index
;
381 u32 write_loc
= READ_ONCE(rbi
->ring_buffer
->write_index
);
383 if (write_loc
>= priv_read_loc
)
384 return write_loc
- priv_read_loc
;
386 return (rbi
->ring_datasize
- priv_read_loc
) + write_loc
;
390 * Get first vmbus packet from ring buffer after read_index
392 * If ring buffer is empty, returns NULL and no other action needed.
394 struct vmpacket_descriptor
*hv_pkt_iter_first(struct vmbus_channel
*channel
)
396 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
397 struct vmpacket_descriptor
*desc
;
399 if (hv_pkt_iter_avail(rbi
) < sizeof(struct vmpacket_descriptor
))
402 desc
= hv_get_ring_buffer(rbi
) + rbi
->priv_read_index
;
404 prefetch((char *)desc
+ (desc
->len8
<< 3));
408 EXPORT_SYMBOL_GPL(hv_pkt_iter_first
);
411 * Get next vmbus packet from ring buffer.
413 * Advances the current location (priv_read_index) and checks for more
414 * data. If the end of the ring buffer is reached, then return NULL.
416 struct vmpacket_descriptor
*
417 __hv_pkt_iter_next(struct vmbus_channel
*channel
,
418 const struct vmpacket_descriptor
*desc
)
420 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
421 u32 packetlen
= desc
->len8
<< 3;
422 u32 dsize
= rbi
->ring_datasize
;
424 /* bump offset to next potential packet */
425 rbi
->priv_read_index
+= packetlen
+ VMBUS_PKT_TRAILER
;
426 if (rbi
->priv_read_index
>= dsize
)
427 rbi
->priv_read_index
-= dsize
;
430 return hv_pkt_iter_first(channel
);
432 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next
);
434 /* How many bytes were read in this iterator cycle */
435 static u32
hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info
*rbi
,
436 u32 start_read_index
)
438 if (rbi
->priv_read_index
>= start_read_index
)
439 return rbi
->priv_read_index
- start_read_index
;
441 return rbi
->ring_datasize
- start_read_index
+
442 rbi
->priv_read_index
;
446 * Update host ring buffer after iterating over packets. If the host has
447 * stopped queuing new entries because it found the ring buffer full, and
448 * sufficient space is being freed up, signal the host. But be careful to
449 * only signal the host when necessary, both for performance reasons and
450 * because Hyper-V protects itself by throttling guests that signal
453 * Determining when to signal is tricky. There are three key data inputs
454 * that must be handled in this order to avoid race conditions:
456 * 1. Update the read_index
457 * 2. Read the pending_send_sz
458 * 3. Read the current write_index
460 * The interrupt_mask is not used to determine when to signal. The
461 * interrupt_mask is used only on the guest->host ring buffer when
462 * sending requests to the host. The host does not use it on the host->
463 * guest ring buffer to indicate whether it should be signaled.
465 void hv_pkt_iter_close(struct vmbus_channel
*channel
)
467 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
468 u32 curr_write_sz
, pending_sz
, bytes_read
, start_read_index
;
471 * Make sure all reads are done before we update the read index since
472 * the writer may start writing to the read area once the read index
476 start_read_index
= rbi
->ring_buffer
->read_index
;
477 rbi
->ring_buffer
->read_index
= rbi
->priv_read_index
;
480 * Older versions of Hyper-V (before WS2102 and Win8) do not
481 * implement pending_send_sz and simply poll if the host->guest
482 * ring buffer is full. No signaling is needed or expected.
484 if (!rbi
->ring_buffer
->feature_bits
.feat_pending_send_sz
)
488 * Issue a full memory barrier before making the signaling decision.
489 * If reading pending_send_sz were to be reordered and happen
490 * before we commit the new read_index, a race could occur. If the
491 * host were to set the pending_send_sz after we have sampled
492 * pending_send_sz, and the ring buffer blocks before we commit the
493 * read index, we could miss sending the interrupt. Issue a full
494 * memory barrier to address this.
499 * If the pending_send_sz is zero, then the ring buffer is not
500 * blocked and there is no need to signal. This is far by the
501 * most common case, so exit quickly for best performance.
503 pending_sz
= READ_ONCE(rbi
->ring_buffer
->pending_send_sz
);
508 * Ensure the read of write_index in hv_get_bytes_to_write()
509 * happens after the read of pending_send_sz.
512 curr_write_sz
= hv_get_bytes_to_write(rbi
);
513 bytes_read
= hv_pkt_iter_bytes_read(rbi
, start_read_index
);
516 * We want to signal the host only if we're transitioning
517 * from a "not enough free space" state to a "enough free
518 * space" state. For example, it's possible that this function
519 * could run and free up enough space to signal the host, and then
520 * run again and free up additional space before the host has a
521 * chance to clear the pending_send_sz. The 2nd invocation would
522 * be a null transition from "enough free space" to "enough free
523 * space", which doesn't warrant a signal.
525 * Exactly filling the ring buffer is treated as "not enough
526 * space". The ring buffer always must have at least one byte
527 * empty so the empty and full conditions are distinguishable.
528 * hv_get_bytes_to_write() doesn't fully tell the truth in
531 * So first check if we were in the "enough free space" state
532 * before we began the iteration. If so, the host was not
533 * blocked, and there's no need to signal.
535 if (curr_write_sz
- bytes_read
> pending_sz
)
539 * Similarly, if the new state is "not enough space", then
540 * there's no need to signal.
542 if (curr_write_sz
<= pending_sz
)
545 ++channel
->intr_in_full
;
546 vmbus_setevent(channel
);
548 EXPORT_SYMBOL_GPL(hv_pkt_iter_close
);