1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2009, Microsoft Corporation.
7 * Haiyang Zhang <haiyangz@microsoft.com>
8 * Hank Janssen <hjanssen@microsoft.com>
9 * K. Y. Srinivasan <kys@microsoft.com>
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
15 #include <linux/hyperv.h>
16 #include <linux/uio.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/prefetch.h>
21 #include "hyperv_vmbus.h"
23 #define VMBUS_PKT_TRAILER 8
26 * When we write to the ring buffer, check if the host needs to
27 * be signaled. Here is the details of this protocol:
29 * 1. The host guarantees that while it is draining the
30 * ring buffer, it will set the interrupt_mask to
31 * indicate it does not need to be interrupted when
34 * 2. The host guarantees that it will completely drain
35 * the ring buffer before exiting the read loop. Further,
36 * once the ring buffer is empty, it will clear the
37 * interrupt_mask and re-check to see if new data has
41 * It looks like Windows hosts have logic to deal with DOS attacks that
42 * can be triggered if it receives interrupts when it is not expecting
43 * the interrupt. The host expects interrupts only when the ring
44 * transitions from empty to non-empty (or full to non full on the guest
46 * So, base the signaling decision solely on the ring state until the
47 * host logic is fixed.
50 static void hv_signal_on_write(u32 old_write
, struct vmbus_channel
*channel
)
52 struct hv_ring_buffer_info
*rbi
= &channel
->outbound
;
55 if (READ_ONCE(rbi
->ring_buffer
->interrupt_mask
))
58 /* check interrupt_mask before read_index */
61 * This is the only case we need to signal when the
62 * ring transitions from being empty to non-empty.
64 if (old_write
== READ_ONCE(rbi
->ring_buffer
->read_index
)) {
65 ++channel
->intr_out_empty
;
66 vmbus_setevent(channel
);
70 /* Get the next write location for the specified ring buffer. */
72 hv_get_next_write_location(struct hv_ring_buffer_info
*ring_info
)
74 u32 next
= ring_info
->ring_buffer
->write_index
;
79 /* Set the next write location for the specified ring buffer. */
81 hv_set_next_write_location(struct hv_ring_buffer_info
*ring_info
,
82 u32 next_write_location
)
84 ring_info
->ring_buffer
->write_index
= next_write_location
;
87 /* Set the next read location for the specified ring buffer. */
89 hv_set_next_read_location(struct hv_ring_buffer_info
*ring_info
,
90 u32 next_read_location
)
92 ring_info
->ring_buffer
->read_index
= next_read_location
;
93 ring_info
->priv_read_index
= next_read_location
;
96 /* Get the size of the ring buffer. */
98 hv_get_ring_buffersize(const struct hv_ring_buffer_info
*ring_info
)
100 return ring_info
->ring_datasize
;
103 /* Get the read and write indices as u64 of the specified ring buffer. */
105 hv_get_ring_bufferindices(struct hv_ring_buffer_info
*ring_info
)
107 return (u64
)ring_info
->ring_buffer
->write_index
<< 32;
111 * Helper routine to copy from source to ring buffer.
112 * Assume there is enough room. Handles wrap-around in dest case only!!
114 static u32
hv_copyto_ringbuffer(
115 struct hv_ring_buffer_info
*ring_info
,
116 u32 start_write_offset
,
120 void *ring_buffer
= hv_get_ring_buffer(ring_info
);
121 u32 ring_buffer_size
= hv_get_ring_buffersize(ring_info
);
123 memcpy(ring_buffer
+ start_write_offset
, src
, srclen
);
125 start_write_offset
+= srclen
;
126 if (start_write_offset
>= ring_buffer_size
)
127 start_write_offset
-= ring_buffer_size
;
129 return start_write_offset
;
134 * hv_get_ringbuffer_availbytes()
136 * Get number of bytes available to read and to write to
137 * for the specified ring buffer
140 hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info
*rbi
,
141 u32
*read
, u32
*write
)
143 u32 read_loc
, write_loc
, dsize
;
145 /* Capture the read/write indices before they changed */
146 read_loc
= READ_ONCE(rbi
->ring_buffer
->read_index
);
147 write_loc
= READ_ONCE(rbi
->ring_buffer
->write_index
);
148 dsize
= rbi
->ring_datasize
;
150 *write
= write_loc
>= read_loc
? dsize
- (write_loc
- read_loc
) :
151 read_loc
- write_loc
;
152 *read
= dsize
- *write
;
155 /* Get various debug metrics for the specified ring buffer. */
156 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info
*ring_info
,
157 struct hv_ring_buffer_debug_info
*debug_info
)
159 u32 bytes_avail_towrite
;
160 u32 bytes_avail_toread
;
162 mutex_lock(&ring_info
->ring_buffer_mutex
);
164 if (!ring_info
->ring_buffer
) {
165 mutex_unlock(&ring_info
->ring_buffer_mutex
);
169 hv_get_ringbuffer_availbytes(ring_info
,
171 &bytes_avail_towrite
);
172 debug_info
->bytes_avail_toread
= bytes_avail_toread
;
173 debug_info
->bytes_avail_towrite
= bytes_avail_towrite
;
174 debug_info
->current_read_index
= ring_info
->ring_buffer
->read_index
;
175 debug_info
->current_write_index
= ring_info
->ring_buffer
->write_index
;
176 debug_info
->current_interrupt_mask
177 = ring_info
->ring_buffer
->interrupt_mask
;
178 mutex_unlock(&ring_info
->ring_buffer_mutex
);
182 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo
);
184 /* Initialize a channel's ring buffer info mutex locks */
185 void hv_ringbuffer_pre_init(struct vmbus_channel
*channel
)
187 mutex_init(&channel
->inbound
.ring_buffer_mutex
);
188 mutex_init(&channel
->outbound
.ring_buffer_mutex
);
191 /* Initialize the ring buffer. */
192 int hv_ringbuffer_init(struct hv_ring_buffer_info
*ring_info
,
193 struct page
*pages
, u32 page_cnt
)
196 struct page
**pages_wraparound
;
198 BUILD_BUG_ON((sizeof(struct hv_ring_buffer
) != PAGE_SIZE
));
201 * First page holds struct hv_ring_buffer, do wraparound mapping for
204 pages_wraparound
= kcalloc(page_cnt
* 2 - 1, sizeof(struct page
*),
206 if (!pages_wraparound
)
209 pages_wraparound
[0] = pages
;
210 for (i
= 0; i
< 2 * (page_cnt
- 1); i
++)
211 pages_wraparound
[i
+ 1] = &pages
[i
% (page_cnt
- 1) + 1];
213 ring_info
->ring_buffer
= (struct hv_ring_buffer
*)
214 vmap(pages_wraparound
, page_cnt
* 2 - 1, VM_MAP
, PAGE_KERNEL
);
216 kfree(pages_wraparound
);
219 if (!ring_info
->ring_buffer
)
222 ring_info
->ring_buffer
->read_index
=
223 ring_info
->ring_buffer
->write_index
= 0;
225 /* Set the feature bit for enabling flow control. */
226 ring_info
->ring_buffer
->feature_bits
.value
= 1;
228 ring_info
->ring_size
= page_cnt
<< PAGE_SHIFT
;
229 ring_info
->ring_size_div10_reciprocal
=
230 reciprocal_value(ring_info
->ring_size
/ 10);
231 ring_info
->ring_datasize
= ring_info
->ring_size
-
232 sizeof(struct hv_ring_buffer
);
233 ring_info
->priv_read_index
= 0;
235 spin_lock_init(&ring_info
->ring_lock
);
240 /* Cleanup the ring buffer. */
241 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info
*ring_info
)
243 mutex_lock(&ring_info
->ring_buffer_mutex
);
244 vunmap(ring_info
->ring_buffer
);
245 ring_info
->ring_buffer
= NULL
;
246 mutex_unlock(&ring_info
->ring_buffer_mutex
);
249 /* Write to the ring buffer. */
250 int hv_ringbuffer_write(struct vmbus_channel
*channel
,
251 const struct kvec
*kv_list
, u32 kv_count
,
255 u32 bytes_avail_towrite
;
256 u32 totalbytes_towrite
= sizeof(u64
);
257 u32 next_write_location
;
261 struct hv_ring_buffer_info
*outring_info
= &channel
->outbound
;
262 struct vmpacket_descriptor
*desc
= kv_list
[0].iov_base
;
263 u64 rqst_id
= VMBUS_NO_RQSTOR
;
265 if (channel
->rescind
)
268 for (i
= 0; i
< kv_count
; i
++)
269 totalbytes_towrite
+= kv_list
[i
].iov_len
;
271 spin_lock_irqsave(&outring_info
->ring_lock
, flags
);
273 bytes_avail_towrite
= hv_get_bytes_to_write(outring_info
);
276 * If there is only room for the packet, assume it is full.
277 * Otherwise, the next time around, we think the ring buffer
278 * is empty since the read index == write index.
280 if (bytes_avail_towrite
<= totalbytes_towrite
) {
281 ++channel
->out_full_total
;
283 if (!channel
->out_full_flag
) {
284 ++channel
->out_full_first
;
285 channel
->out_full_flag
= true;
288 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
292 channel
->out_full_flag
= false;
294 /* Write to the ring buffer */
295 next_write_location
= hv_get_next_write_location(outring_info
);
297 old_write
= next_write_location
;
299 for (i
= 0; i
< kv_count
; i
++) {
300 next_write_location
= hv_copyto_ringbuffer(outring_info
,
307 * Allocate the request ID after the data has been copied into the
308 * ring buffer. Once this request ID is allocated, the completion
309 * path could find the data and free it.
312 if (desc
->flags
== VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
) {
313 rqst_id
= vmbus_next_request_id(&channel
->requestor
, requestid
);
314 if (rqst_id
== VMBUS_RQST_ERROR
) {
315 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
316 pr_err("No request id available\n");
320 desc
= hv_get_ring_buffer(outring_info
) + old_write
;
321 desc
->trans_id
= (rqst_id
== VMBUS_NO_RQSTOR
) ? requestid
: rqst_id
;
323 /* Set previous packet start */
324 prev_indices
= hv_get_ring_bufferindices(outring_info
);
326 next_write_location
= hv_copyto_ringbuffer(outring_info
,
331 /* Issue a full memory barrier before updating the write index */
334 /* Now, update the write location */
335 hv_set_next_write_location(outring_info
, next_write_location
);
338 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
340 hv_signal_on_write(old_write
, channel
);
342 if (channel
->rescind
) {
343 if (rqst_id
!= VMBUS_NO_RQSTOR
) {
344 /* Reclaim request ID to avoid leak of IDs */
345 vmbus_request_addr(&channel
->requestor
, rqst_id
);
353 int hv_ringbuffer_read(struct vmbus_channel
*channel
,
354 void *buffer
, u32 buflen
, u32
*buffer_actual_len
,
355 u64
*requestid
, bool raw
)
357 struct vmpacket_descriptor
*desc
;
358 u32 packetlen
, offset
;
360 if (unlikely(buflen
== 0))
363 *buffer_actual_len
= 0;
366 /* Make sure there is something to read */
367 desc
= hv_pkt_iter_first(channel
);
370 * No error is set when there is even no header, drivers are
371 * supposed to analyze buffer_actual_len.
376 offset
= raw
? 0 : (desc
->offset8
<< 3);
377 packetlen
= (desc
->len8
<< 3) - offset
;
378 *buffer_actual_len
= packetlen
;
379 *requestid
= desc
->trans_id
;
381 if (unlikely(packetlen
> buflen
))
384 /* since ring is double mapped, only one copy is necessary */
385 memcpy(buffer
, (const char *)desc
+ offset
, packetlen
);
387 /* Advance ring index to next packet descriptor */
388 __hv_pkt_iter_next(channel
, desc
);
390 /* Notify host of update */
391 hv_pkt_iter_close(channel
);
397 * Determine number of bytes available in ring buffer after
398 * the current iterator (priv_read_index) location.
400 * This is similar to hv_get_bytes_to_read but with private
401 * read index instead.
403 static u32
hv_pkt_iter_avail(const struct hv_ring_buffer_info
*rbi
)
405 u32 priv_read_loc
= rbi
->priv_read_index
;
406 u32 write_loc
= READ_ONCE(rbi
->ring_buffer
->write_index
);
408 if (write_loc
>= priv_read_loc
)
409 return write_loc
- priv_read_loc
;
411 return (rbi
->ring_datasize
- priv_read_loc
) + write_loc
;
415 * Get first vmbus packet from ring buffer after read_index
417 * If ring buffer is empty, returns NULL and no other action needed.
419 struct vmpacket_descriptor
*hv_pkt_iter_first(struct vmbus_channel
*channel
)
421 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
422 struct vmpacket_descriptor
*desc
;
424 hv_debug_delay_test(channel
, MESSAGE_DELAY
);
425 if (hv_pkt_iter_avail(rbi
) < sizeof(struct vmpacket_descriptor
))
428 desc
= hv_get_ring_buffer(rbi
) + rbi
->priv_read_index
;
430 prefetch((char *)desc
+ (desc
->len8
<< 3));
434 EXPORT_SYMBOL_GPL(hv_pkt_iter_first
);
437 * Get next vmbus packet from ring buffer.
439 * Advances the current location (priv_read_index) and checks for more
440 * data. If the end of the ring buffer is reached, then return NULL.
442 struct vmpacket_descriptor
*
443 __hv_pkt_iter_next(struct vmbus_channel
*channel
,
444 const struct vmpacket_descriptor
*desc
)
446 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
447 u32 packetlen
= desc
->len8
<< 3;
448 u32 dsize
= rbi
->ring_datasize
;
450 hv_debug_delay_test(channel
, MESSAGE_DELAY
);
451 /* bump offset to next potential packet */
452 rbi
->priv_read_index
+= packetlen
+ VMBUS_PKT_TRAILER
;
453 if (rbi
->priv_read_index
>= dsize
)
454 rbi
->priv_read_index
-= dsize
;
457 return hv_pkt_iter_first(channel
);
459 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next
);
461 /* How many bytes were read in this iterator cycle */
462 static u32
hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info
*rbi
,
463 u32 start_read_index
)
465 if (rbi
->priv_read_index
>= start_read_index
)
466 return rbi
->priv_read_index
- start_read_index
;
468 return rbi
->ring_datasize
- start_read_index
+
469 rbi
->priv_read_index
;
473 * Update host ring buffer after iterating over packets. If the host has
474 * stopped queuing new entries because it found the ring buffer full, and
475 * sufficient space is being freed up, signal the host. But be careful to
476 * only signal the host when necessary, both for performance reasons and
477 * because Hyper-V protects itself by throttling guests that signal
480 * Determining when to signal is tricky. There are three key data inputs
481 * that must be handled in this order to avoid race conditions:
483 * 1. Update the read_index
484 * 2. Read the pending_send_sz
485 * 3. Read the current write_index
487 * The interrupt_mask is not used to determine when to signal. The
488 * interrupt_mask is used only on the guest->host ring buffer when
489 * sending requests to the host. The host does not use it on the host->
490 * guest ring buffer to indicate whether it should be signaled.
492 void hv_pkt_iter_close(struct vmbus_channel
*channel
)
494 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
495 u32 curr_write_sz
, pending_sz
, bytes_read
, start_read_index
;
498 * Make sure all reads are done before we update the read index since
499 * the writer may start writing to the read area once the read index
503 start_read_index
= rbi
->ring_buffer
->read_index
;
504 rbi
->ring_buffer
->read_index
= rbi
->priv_read_index
;
507 * Older versions of Hyper-V (before WS2102 and Win8) do not
508 * implement pending_send_sz and simply poll if the host->guest
509 * ring buffer is full. No signaling is needed or expected.
511 if (!rbi
->ring_buffer
->feature_bits
.feat_pending_send_sz
)
515 * Issue a full memory barrier before making the signaling decision.
516 * If reading pending_send_sz were to be reordered and happen
517 * before we commit the new read_index, a race could occur. If the
518 * host were to set the pending_send_sz after we have sampled
519 * pending_send_sz, and the ring buffer blocks before we commit the
520 * read index, we could miss sending the interrupt. Issue a full
521 * memory barrier to address this.
526 * If the pending_send_sz is zero, then the ring buffer is not
527 * blocked and there is no need to signal. This is far by the
528 * most common case, so exit quickly for best performance.
530 pending_sz
= READ_ONCE(rbi
->ring_buffer
->pending_send_sz
);
535 * Ensure the read of write_index in hv_get_bytes_to_write()
536 * happens after the read of pending_send_sz.
539 curr_write_sz
= hv_get_bytes_to_write(rbi
);
540 bytes_read
= hv_pkt_iter_bytes_read(rbi
, start_read_index
);
543 * We want to signal the host only if we're transitioning
544 * from a "not enough free space" state to a "enough free
545 * space" state. For example, it's possible that this function
546 * could run and free up enough space to signal the host, and then
547 * run again and free up additional space before the host has a
548 * chance to clear the pending_send_sz. The 2nd invocation would
549 * be a null transition from "enough free space" to "enough free
550 * space", which doesn't warrant a signal.
552 * Exactly filling the ring buffer is treated as "not enough
553 * space". The ring buffer always must have at least one byte
554 * empty so the empty and full conditions are distinguishable.
555 * hv_get_bytes_to_write() doesn't fully tell the truth in
558 * So first check if we were in the "enough free space" state
559 * before we began the iteration. If so, the host was not
560 * blocked, and there's no need to signal.
562 if (curr_write_sz
- bytes_read
> pending_sz
)
566 * Similarly, if the new state is "not enough space", then
567 * there's no need to signal.
569 if (curr_write_sz
<= pending_sz
)
572 ++channel
->intr_in_full
;
573 vmbus_setevent(channel
);
575 EXPORT_SYMBOL_GPL(hv_pkt_iter_close
);