1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2009, Microsoft Corporation.
7 * Haiyang Zhang <haiyangz@microsoft.com>
8 * Hank Janssen <hjanssen@microsoft.com>
9 * K. Y. Srinivasan <kys@microsoft.com>
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
15 #include <linux/hyperv.h>
16 #include <linux/uio.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/prefetch.h>
21 #include <asm/mshyperv.h>
23 #include "hyperv_vmbus.h"
25 #define VMBUS_PKT_TRAILER 8
28 * When we write to the ring buffer, check if the host needs to
29 * be signaled. Here is the details of this protocol:
31 * 1. The host guarantees that while it is draining the
32 * ring buffer, it will set the interrupt_mask to
33 * indicate it does not need to be interrupted when
36 * 2. The host guarantees that it will completely drain
37 * the ring buffer before exiting the read loop. Further,
38 * once the ring buffer is empty, it will clear the
39 * interrupt_mask and re-check to see if new data has
43 * It looks like Windows hosts have logic to deal with DOS attacks that
44 * can be triggered if it receives interrupts when it is not expecting
45 * the interrupt. The host expects interrupts only when the ring
46 * transitions from empty to non-empty (or full to non full on the guest
48 * So, base the signaling decision solely on the ring state until the
49 * host logic is fixed.
52 static void hv_signal_on_write(u32 old_write
, struct vmbus_channel
*channel
)
54 struct hv_ring_buffer_info
*rbi
= &channel
->outbound
;
57 if (READ_ONCE(rbi
->ring_buffer
->interrupt_mask
))
60 /* check interrupt_mask before read_index */
63 * This is the only case we need to signal when the
64 * ring transitions from being empty to non-empty.
66 if (old_write
== READ_ONCE(rbi
->ring_buffer
->read_index
)) {
67 ++channel
->intr_out_empty
;
68 vmbus_setevent(channel
);
72 /* Get the next write location for the specified ring buffer. */
74 hv_get_next_write_location(struct hv_ring_buffer_info
*ring_info
)
76 u32 next
= ring_info
->ring_buffer
->write_index
;
81 /* Set the next write location for the specified ring buffer. */
83 hv_set_next_write_location(struct hv_ring_buffer_info
*ring_info
,
84 u32 next_write_location
)
86 ring_info
->ring_buffer
->write_index
= next_write_location
;
89 /* Get the size of the ring buffer. */
91 hv_get_ring_buffersize(const struct hv_ring_buffer_info
*ring_info
)
93 return ring_info
->ring_datasize
;
96 /* Get the read and write indices as u64 of the specified ring buffer. */
98 hv_get_ring_bufferindices(struct hv_ring_buffer_info
*ring_info
)
100 return (u64
)ring_info
->ring_buffer
->write_index
<< 32;
104 * Helper routine to copy from source to ring buffer.
105 * Assume there is enough room. Handles wrap-around in dest case only!!
107 static u32
hv_copyto_ringbuffer(
108 struct hv_ring_buffer_info
*ring_info
,
109 u32 start_write_offset
,
113 void *ring_buffer
= hv_get_ring_buffer(ring_info
);
114 u32 ring_buffer_size
= hv_get_ring_buffersize(ring_info
);
116 memcpy(ring_buffer
+ start_write_offset
, src
, srclen
);
118 start_write_offset
+= srclen
;
119 if (start_write_offset
>= ring_buffer_size
)
120 start_write_offset
-= ring_buffer_size
;
122 return start_write_offset
;
127 * hv_get_ringbuffer_availbytes()
129 * Get number of bytes available to read and to write to
130 * for the specified ring buffer
133 hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info
*rbi
,
134 u32
*read
, u32
*write
)
136 u32 read_loc
, write_loc
, dsize
;
138 /* Capture the read/write indices before they changed */
139 read_loc
= READ_ONCE(rbi
->ring_buffer
->read_index
);
140 write_loc
= READ_ONCE(rbi
->ring_buffer
->write_index
);
141 dsize
= rbi
->ring_datasize
;
143 *write
= write_loc
>= read_loc
? dsize
- (write_loc
- read_loc
) :
144 read_loc
- write_loc
;
145 *read
= dsize
- *write
;
148 /* Get various debug metrics for the specified ring buffer. */
149 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info
*ring_info
,
150 struct hv_ring_buffer_debug_info
*debug_info
)
152 u32 bytes_avail_towrite
;
153 u32 bytes_avail_toread
;
155 mutex_lock(&ring_info
->ring_buffer_mutex
);
157 if (!ring_info
->ring_buffer
) {
158 mutex_unlock(&ring_info
->ring_buffer_mutex
);
162 hv_get_ringbuffer_availbytes(ring_info
,
164 &bytes_avail_towrite
);
165 debug_info
->bytes_avail_toread
= bytes_avail_toread
;
166 debug_info
->bytes_avail_towrite
= bytes_avail_towrite
;
167 debug_info
->current_read_index
= ring_info
->ring_buffer
->read_index
;
168 debug_info
->current_write_index
= ring_info
->ring_buffer
->write_index
;
169 debug_info
->current_interrupt_mask
170 = ring_info
->ring_buffer
->interrupt_mask
;
171 mutex_unlock(&ring_info
->ring_buffer_mutex
);
175 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo
);
177 /* Initialize a channel's ring buffer info mutex locks */
178 void hv_ringbuffer_pre_init(struct vmbus_channel
*channel
)
180 mutex_init(&channel
->inbound
.ring_buffer_mutex
);
181 mutex_init(&channel
->outbound
.ring_buffer_mutex
);
184 /* Initialize the ring buffer. */
185 int hv_ringbuffer_init(struct hv_ring_buffer_info
*ring_info
,
186 struct page
*pages
, u32 page_cnt
, u32 max_pkt_size
)
188 struct page
**pages_wraparound
;
191 BUILD_BUG_ON((sizeof(struct hv_ring_buffer
) != PAGE_SIZE
));
194 * First page holds struct hv_ring_buffer, do wraparound mapping for
197 pages_wraparound
= kcalloc(page_cnt
* 2 - 1,
198 sizeof(struct page
*),
200 if (!pages_wraparound
)
203 pages_wraparound
[0] = pages
;
204 for (i
= 0; i
< 2 * (page_cnt
- 1); i
++)
205 pages_wraparound
[i
+ 1] =
206 &pages
[i
% (page_cnt
- 1) + 1];
208 ring_info
->ring_buffer
= (struct hv_ring_buffer
*)
209 vmap(pages_wraparound
, page_cnt
* 2 - 1, VM_MAP
,
210 pgprot_decrypted(PAGE_KERNEL
));
212 kfree(pages_wraparound
);
213 if (!ring_info
->ring_buffer
)
217 * Ensure the header page is zero'ed since
218 * encryption status may have changed.
220 memset(ring_info
->ring_buffer
, 0, HV_HYP_PAGE_SIZE
);
222 ring_info
->ring_buffer
->read_index
=
223 ring_info
->ring_buffer
->write_index
= 0;
225 /* Set the feature bit for enabling flow control. */
226 ring_info
->ring_buffer
->feature_bits
.value
= 1;
228 ring_info
->ring_size
= page_cnt
<< PAGE_SHIFT
;
229 ring_info
->ring_size_div10_reciprocal
=
230 reciprocal_value(ring_info
->ring_size
/ 10);
231 ring_info
->ring_datasize
= ring_info
->ring_size
-
232 sizeof(struct hv_ring_buffer
);
233 ring_info
->priv_read_index
= 0;
235 /* Initialize buffer that holds copies of incoming packets */
237 ring_info
->pkt_buffer
= kzalloc(max_pkt_size
, GFP_KERNEL
);
238 if (!ring_info
->pkt_buffer
)
240 ring_info
->pkt_buffer_size
= max_pkt_size
;
243 spin_lock_init(&ring_info
->ring_lock
);
248 /* Cleanup the ring buffer. */
249 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info
*ring_info
)
251 mutex_lock(&ring_info
->ring_buffer_mutex
);
252 vunmap(ring_info
->ring_buffer
);
253 ring_info
->ring_buffer
= NULL
;
254 mutex_unlock(&ring_info
->ring_buffer_mutex
);
256 kfree(ring_info
->pkt_buffer
);
257 ring_info
->pkt_buffer
= NULL
;
258 ring_info
->pkt_buffer_size
= 0;
262 * Check if the ring buffer spinlock is available to take or not; used on
263 * atomic contexts, like panic path (see the Hyper-V framebuffer driver).
266 bool hv_ringbuffer_spinlock_busy(struct vmbus_channel
*channel
)
268 struct hv_ring_buffer_info
*rinfo
= &channel
->outbound
;
270 return spin_is_locked(&rinfo
->ring_lock
);
272 EXPORT_SYMBOL_GPL(hv_ringbuffer_spinlock_busy
);
274 /* Write to the ring buffer. */
275 int hv_ringbuffer_write(struct vmbus_channel
*channel
,
276 const struct kvec
*kv_list
, u32 kv_count
,
277 u64 requestid
, u64
*trans_id
)
280 u32 bytes_avail_towrite
;
281 u32 totalbytes_towrite
= sizeof(u64
);
282 u32 next_write_location
;
286 struct hv_ring_buffer_info
*outring_info
= &channel
->outbound
;
287 struct vmpacket_descriptor
*desc
= kv_list
[0].iov_base
;
288 u64 __trans_id
, rqst_id
= VMBUS_NO_RQSTOR
;
290 if (channel
->rescind
)
293 for (i
= 0; i
< kv_count
; i
++)
294 totalbytes_towrite
+= kv_list
[i
].iov_len
;
296 spin_lock_irqsave(&outring_info
->ring_lock
, flags
);
298 bytes_avail_towrite
= hv_get_bytes_to_write(outring_info
);
301 * If there is only room for the packet, assume it is full.
302 * Otherwise, the next time around, we think the ring buffer
303 * is empty since the read index == write index.
305 if (bytes_avail_towrite
<= totalbytes_towrite
) {
306 ++channel
->out_full_total
;
308 if (!channel
->out_full_flag
) {
309 ++channel
->out_full_first
;
310 channel
->out_full_flag
= true;
313 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
317 channel
->out_full_flag
= false;
319 /* Write to the ring buffer */
320 next_write_location
= hv_get_next_write_location(outring_info
);
322 old_write
= next_write_location
;
324 for (i
= 0; i
< kv_count
; i
++) {
325 next_write_location
= hv_copyto_ringbuffer(outring_info
,
332 * Allocate the request ID after the data has been copied into the
333 * ring buffer. Once this request ID is allocated, the completion
334 * path could find the data and free it.
337 if (desc
->flags
== VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
) {
338 if (channel
->next_request_id_callback
!= NULL
) {
339 rqst_id
= channel
->next_request_id_callback(channel
, requestid
);
340 if (rqst_id
== VMBUS_RQST_ERROR
) {
341 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
346 desc
= hv_get_ring_buffer(outring_info
) + old_write
;
347 __trans_id
= (rqst_id
== VMBUS_NO_RQSTOR
) ? requestid
: rqst_id
;
349 * Ensure the compiler doesn't generate code that reads the value of
350 * the transaction ID from the ring buffer, which is shared with the
351 * Hyper-V host and subject to being changed at any time.
353 WRITE_ONCE(desc
->trans_id
, __trans_id
);
355 *trans_id
= __trans_id
;
357 /* Set previous packet start */
358 prev_indices
= hv_get_ring_bufferindices(outring_info
);
360 next_write_location
= hv_copyto_ringbuffer(outring_info
,
365 /* Issue a full memory barrier before updating the write index */
368 /* Now, update the write location */
369 hv_set_next_write_location(outring_info
, next_write_location
);
372 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
374 hv_signal_on_write(old_write
, channel
);
376 if (channel
->rescind
) {
377 if (rqst_id
!= VMBUS_NO_RQSTOR
) {
378 /* Reclaim request ID to avoid leak of IDs */
379 if (channel
->request_addr_callback
!= NULL
)
380 channel
->request_addr_callback(channel
, rqst_id
);
388 int hv_ringbuffer_read(struct vmbus_channel
*channel
,
389 void *buffer
, u32 buflen
, u32
*buffer_actual_len
,
390 u64
*requestid
, bool raw
)
392 struct vmpacket_descriptor
*desc
;
393 u32 packetlen
, offset
;
395 if (unlikely(buflen
== 0))
398 *buffer_actual_len
= 0;
401 /* Make sure there is something to read */
402 desc
= hv_pkt_iter_first(channel
);
405 * No error is set when there is even no header, drivers are
406 * supposed to analyze buffer_actual_len.
411 offset
= raw
? 0 : (desc
->offset8
<< 3);
412 packetlen
= (desc
->len8
<< 3) - offset
;
413 *buffer_actual_len
= packetlen
;
414 *requestid
= desc
->trans_id
;
416 if (unlikely(packetlen
> buflen
))
419 /* since ring is double mapped, only one copy is necessary */
420 memcpy(buffer
, (const char *)desc
+ offset
, packetlen
);
422 /* Advance ring index to next packet descriptor */
423 __hv_pkt_iter_next(channel
, desc
);
425 /* Notify host of update */
426 hv_pkt_iter_close(channel
);
432 * Determine number of bytes available in ring buffer after
433 * the current iterator (priv_read_index) location.
435 * This is similar to hv_get_bytes_to_read but with private
436 * read index instead.
438 static u32
hv_pkt_iter_avail(const struct hv_ring_buffer_info
*rbi
)
440 u32 priv_read_loc
= rbi
->priv_read_index
;
444 * The Hyper-V host writes the packet data, then uses
445 * store_release() to update the write_index. Use load_acquire()
446 * here to prevent loads of the packet data from being re-ordered
447 * before the read of the write_index and potentially getting
450 write_loc
= virt_load_acquire(&rbi
->ring_buffer
->write_index
);
452 if (write_loc
>= priv_read_loc
)
453 return write_loc
- priv_read_loc
;
455 return (rbi
->ring_datasize
- priv_read_loc
) + write_loc
;
459 * Get first vmbus packet from ring buffer after read_index
461 * If ring buffer is empty, returns NULL and no other action needed.
463 struct vmpacket_descriptor
*hv_pkt_iter_first(struct vmbus_channel
*channel
)
465 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
466 struct vmpacket_descriptor
*desc
, *desc_copy
;
467 u32 bytes_avail
, pkt_len
, pkt_offset
;
469 hv_debug_delay_test(channel
, MESSAGE_DELAY
);
471 bytes_avail
= hv_pkt_iter_avail(rbi
);
472 if (bytes_avail
< sizeof(struct vmpacket_descriptor
))
474 bytes_avail
= min(rbi
->pkt_buffer_size
, bytes_avail
);
476 desc
= (struct vmpacket_descriptor
*)(hv_get_ring_buffer(rbi
) + rbi
->priv_read_index
);
479 * Ensure the compiler does not use references to incoming Hyper-V values (which
480 * could change at any moment) when reading local variables later in the code
482 pkt_len
= READ_ONCE(desc
->len8
) << 3;
483 pkt_offset
= READ_ONCE(desc
->offset8
) << 3;
486 * If pkt_len is invalid, set it to the smaller of hv_pkt_iter_avail() and
487 * rbi->pkt_buffer_size
489 if (pkt_len
< sizeof(struct vmpacket_descriptor
) || pkt_len
> bytes_avail
)
490 pkt_len
= bytes_avail
;
493 * If pkt_offset is invalid, arbitrarily set it to
494 * the size of vmpacket_descriptor
496 if (pkt_offset
< sizeof(struct vmpacket_descriptor
) || pkt_offset
> pkt_len
)
497 pkt_offset
= sizeof(struct vmpacket_descriptor
);
499 /* Copy the Hyper-V packet out of the ring buffer */
500 desc_copy
= (struct vmpacket_descriptor
*)rbi
->pkt_buffer
;
501 memcpy(desc_copy
, desc
, pkt_len
);
504 * Hyper-V could still change len8 and offset8 after the earlier read.
505 * Ensure that desc_copy has legal values for len8 and offset8 that
506 * are consistent with the copy we just made
508 desc_copy
->len8
= pkt_len
>> 3;
509 desc_copy
->offset8
= pkt_offset
>> 3;
513 EXPORT_SYMBOL_GPL(hv_pkt_iter_first
);
516 * Get next vmbus packet from ring buffer.
518 * Advances the current location (priv_read_index) and checks for more
519 * data. If the end of the ring buffer is reached, then return NULL.
521 struct vmpacket_descriptor
*
522 __hv_pkt_iter_next(struct vmbus_channel
*channel
,
523 const struct vmpacket_descriptor
*desc
)
525 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
526 u32 packetlen
= desc
->len8
<< 3;
527 u32 dsize
= rbi
->ring_datasize
;
529 hv_debug_delay_test(channel
, MESSAGE_DELAY
);
530 /* bump offset to next potential packet */
531 rbi
->priv_read_index
+= packetlen
+ VMBUS_PKT_TRAILER
;
532 if (rbi
->priv_read_index
>= dsize
)
533 rbi
->priv_read_index
-= dsize
;
536 return hv_pkt_iter_first(channel
);
538 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next
);
540 /* How many bytes were read in this iterator cycle */
541 static u32
hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info
*rbi
,
542 u32 start_read_index
)
544 if (rbi
->priv_read_index
>= start_read_index
)
545 return rbi
->priv_read_index
- start_read_index
;
547 return rbi
->ring_datasize
- start_read_index
+
548 rbi
->priv_read_index
;
552 * Update host ring buffer after iterating over packets. If the host has
553 * stopped queuing new entries because it found the ring buffer full, and
554 * sufficient space is being freed up, signal the host. But be careful to
555 * only signal the host when necessary, both for performance reasons and
556 * because Hyper-V protects itself by throttling guests that signal
559 * Determining when to signal is tricky. There are three key data inputs
560 * that must be handled in this order to avoid race conditions:
562 * 1. Update the read_index
563 * 2. Read the pending_send_sz
564 * 3. Read the current write_index
566 * The interrupt_mask is not used to determine when to signal. The
567 * interrupt_mask is used only on the guest->host ring buffer when
568 * sending requests to the host. The host does not use it on the host->
569 * guest ring buffer to indicate whether it should be signaled.
571 void hv_pkt_iter_close(struct vmbus_channel
*channel
)
573 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
574 u32 curr_write_sz
, pending_sz
, bytes_read
, start_read_index
;
577 * Make sure all reads are done before we update the read index since
578 * the writer may start writing to the read area once the read index
582 start_read_index
= rbi
->ring_buffer
->read_index
;
583 rbi
->ring_buffer
->read_index
= rbi
->priv_read_index
;
586 * Older versions of Hyper-V (before WS2102 and Win8) do not
587 * implement pending_send_sz and simply poll if the host->guest
588 * ring buffer is full. No signaling is needed or expected.
590 if (!rbi
->ring_buffer
->feature_bits
.feat_pending_send_sz
)
594 * Issue a full memory barrier before making the signaling decision.
595 * If reading pending_send_sz were to be reordered and happen
596 * before we commit the new read_index, a race could occur. If the
597 * host were to set the pending_send_sz after we have sampled
598 * pending_send_sz, and the ring buffer blocks before we commit the
599 * read index, we could miss sending the interrupt. Issue a full
600 * memory barrier to address this.
605 * If the pending_send_sz is zero, then the ring buffer is not
606 * blocked and there is no need to signal. This is far by the
607 * most common case, so exit quickly for best performance.
609 pending_sz
= READ_ONCE(rbi
->ring_buffer
->pending_send_sz
);
614 * Ensure the read of write_index in hv_get_bytes_to_write()
615 * happens after the read of pending_send_sz.
618 curr_write_sz
= hv_get_bytes_to_write(rbi
);
619 bytes_read
= hv_pkt_iter_bytes_read(rbi
, start_read_index
);
622 * We want to signal the host only if we're transitioning
623 * from a "not enough free space" state to a "enough free
624 * space" state. For example, it's possible that this function
625 * could run and free up enough space to signal the host, and then
626 * run again and free up additional space before the host has a
627 * chance to clear the pending_send_sz. The 2nd invocation would
628 * be a null transition from "enough free space" to "enough free
629 * space", which doesn't warrant a signal.
631 * Exactly filling the ring buffer is treated as "not enough
632 * space". The ring buffer always must have at least one byte
633 * empty so the empty and full conditions are distinguishable.
634 * hv_get_bytes_to_write() doesn't fully tell the truth in
637 * So first check if we were in the "enough free space" state
638 * before we began the iteration. If so, the host was not
639 * blocked, and there's no need to signal.
641 if (curr_write_sz
- bytes_read
> pending_sz
)
645 * Similarly, if the new state is "not enough space", then
646 * there's no need to signal.
648 if (curr_write_sz
<= pending_sz
)
651 ++channel
->intr_in_full
;
652 vmbus_setevent(channel
);
654 EXPORT_SYMBOL_GPL(hv_pkt_iter_close
);