3 * Copyright (c) 2009, Microsoft Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/kernel.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
31 #include "hyperv_vmbus.h"
33 void hv_begin_read(struct hv_ring_buffer_info
*rbi
)
35 rbi
->ring_buffer
->interrupt_mask
= 1;
39 u32
hv_end_read(struct hv_ring_buffer_info
*rbi
)
44 rbi
->ring_buffer
->interrupt_mask
= 0;
48 * Now check to see if the ring buffer is still empty.
49 * If it is not, we raced and we need to process new
52 hv_get_ringbuffer_availbytes(rbi
, &read
, &write
);
58 * When we write to the ring buffer, check if the host needs to
59 * be signaled. Here is the details of this protocol:
61 * 1. The host guarantees that while it is draining the
62 * ring buffer, it will set the interrupt_mask to
63 * indicate it does not need to be interrupted when
66 * 2. The host guarantees that it will completely drain
67 * the ring buffer before exiting the read loop. Further,
68 * once the ring buffer is empty, it will clear the
69 * interrupt_mask and re-check to see if new data has
73 static bool hv_need_to_signal(u32 old_write
, struct hv_ring_buffer_info
*rbi
)
76 if (rbi
->ring_buffer
->interrupt_mask
)
79 /* check interrupt_mask before read_index */
82 * This is the only case we need to signal when the
83 * ring transitions from being empty to non-empty.
85 if (old_write
== rbi
->ring_buffer
->read_index
)
92 * To optimize the flow management on the send-side,
93 * when the sender is blocked because of lack of
94 * sufficient space in the ring buffer, potential the
95 * consumer of the ring buffer can signal the producer.
96 * This is controlled by the following parameters:
98 * 1. pending_send_sz: This is the size in bytes that the
99 * producer is trying to send.
100 * 2. The feature bit feat_pending_send_sz set to indicate if
101 * the consumer of the ring will signal when the ring
102 * state transitions from being full to a state where
103 * there is room for the producer to send the pending packet.
106 static bool hv_need_to_signal_on_read(u32 prev_write_sz
,
107 struct hv_ring_buffer_info
*rbi
)
111 u32 write_loc
= rbi
->ring_buffer
->write_index
;
112 u32 read_loc
= rbi
->ring_buffer
->read_index
;
113 u32 pending_sz
= rbi
->ring_buffer
->pending_send_sz
;
116 * If the other end is not blocked on write don't bother.
121 r_size
= rbi
->ring_datasize
;
122 cur_write_sz
= write_loc
>= read_loc
? r_size
- (write_loc
- read_loc
) :
123 read_loc
- write_loc
;
125 if ((prev_write_sz
< pending_sz
) && (cur_write_sz
>= pending_sz
))
132 * hv_get_next_write_location()
134 * Get the next write location for the specified ring buffer
138 hv_get_next_write_location(struct hv_ring_buffer_info
*ring_info
)
140 u32 next
= ring_info
->ring_buffer
->write_index
;
146 * hv_set_next_write_location()
148 * Set the next write location for the specified ring buffer
152 hv_set_next_write_location(struct hv_ring_buffer_info
*ring_info
,
153 u32 next_write_location
)
155 ring_info
->ring_buffer
->write_index
= next_write_location
;
159 * hv_get_next_read_location()
161 * Get the next read location for the specified ring buffer
164 hv_get_next_read_location(struct hv_ring_buffer_info
*ring_info
)
166 u32 next
= ring_info
->ring_buffer
->read_index
;
172 * hv_get_next_readlocation_withoffset()
174 * Get the next read location + offset for the specified ring buffer.
175 * This allows the caller to skip
178 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info
*ring_info
,
181 u32 next
= ring_info
->ring_buffer
->read_index
;
184 next
%= ring_info
->ring_datasize
;
191 * hv_set_next_read_location()
193 * Set the next read location for the specified ring buffer
197 hv_set_next_read_location(struct hv_ring_buffer_info
*ring_info
,
198 u32 next_read_location
)
200 ring_info
->ring_buffer
->read_index
= next_read_location
;
206 * hv_get_ring_buffer()
208 * Get the start of the ring buffer
211 hv_get_ring_buffer(struct hv_ring_buffer_info
*ring_info
)
213 return (void *)ring_info
->ring_buffer
->buffer
;
219 * hv_get_ring_buffersize()
221 * Get the size of the ring buffer
224 hv_get_ring_buffersize(struct hv_ring_buffer_info
*ring_info
)
226 return ring_info
->ring_datasize
;
231 * hv_get_ring_bufferindices()
233 * Get the read and write indices as u64 of the specified ring buffer
237 hv_get_ring_bufferindices(struct hv_ring_buffer_info
*ring_info
)
239 return (u64
)ring_info
->ring_buffer
->write_index
<< 32;
244 * hv_copyfrom_ringbuffer()
246 * Helper routine to copy to source from ring buffer.
247 * Assume there is enough room. Handles wrap-around in src case only!!
250 static u32
hv_copyfrom_ringbuffer(
251 struct hv_ring_buffer_info
*ring_info
,
254 u32 start_read_offset
)
256 void *ring_buffer
= hv_get_ring_buffer(ring_info
);
257 u32 ring_buffer_size
= hv_get_ring_buffersize(ring_info
);
261 /* wrap-around detected at the src */
262 if (destlen
> ring_buffer_size
- start_read_offset
) {
263 frag_len
= ring_buffer_size
- start_read_offset
;
265 memcpy(dest
, ring_buffer
+ start_read_offset
, frag_len
);
266 memcpy(dest
+ frag_len
, ring_buffer
, destlen
- frag_len
);
269 memcpy(dest
, ring_buffer
+ start_read_offset
, destlen
);
272 start_read_offset
+= destlen
;
273 start_read_offset
%= ring_buffer_size
;
275 return start_read_offset
;
281 * hv_copyto_ringbuffer()
283 * Helper routine to copy from source to ring buffer.
284 * Assume there is enough room. Handles wrap-around in dest case only!!
287 static u32
hv_copyto_ringbuffer(
288 struct hv_ring_buffer_info
*ring_info
,
289 u32 start_write_offset
,
293 void *ring_buffer
= hv_get_ring_buffer(ring_info
);
294 u32 ring_buffer_size
= hv_get_ring_buffersize(ring_info
);
297 /* wrap-around detected! */
298 if (srclen
> ring_buffer_size
- start_write_offset
) {
299 frag_len
= ring_buffer_size
- start_write_offset
;
300 memcpy(ring_buffer
+ start_write_offset
, src
, frag_len
);
301 memcpy(ring_buffer
, src
+ frag_len
, srclen
- frag_len
);
303 memcpy(ring_buffer
+ start_write_offset
, src
, srclen
);
305 start_write_offset
+= srclen
;
306 start_write_offset
%= ring_buffer_size
;
308 return start_write_offset
;
313 * hv_ringbuffer_get_debuginfo()
315 * Get various debug metrics for the specified ring buffer
318 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info
*ring_info
,
319 struct hv_ring_buffer_debug_info
*debug_info
)
321 u32 bytes_avail_towrite
;
322 u32 bytes_avail_toread
;
324 if (ring_info
->ring_buffer
) {
325 hv_get_ringbuffer_availbytes(ring_info
,
327 &bytes_avail_towrite
);
329 debug_info
->bytes_avail_toread
= bytes_avail_toread
;
330 debug_info
->bytes_avail_towrite
= bytes_avail_towrite
;
331 debug_info
->current_read_index
=
332 ring_info
->ring_buffer
->read_index
;
333 debug_info
->current_write_index
=
334 ring_info
->ring_buffer
->write_index
;
335 debug_info
->current_interrupt_mask
=
336 ring_info
->ring_buffer
->interrupt_mask
;
342 * hv_ringbuffer_init()
344 *Initialize the ring buffer
347 int hv_ringbuffer_init(struct hv_ring_buffer_info
*ring_info
,
348 void *buffer
, u32 buflen
)
350 if (sizeof(struct hv_ring_buffer
) != PAGE_SIZE
)
353 memset(ring_info
, 0, sizeof(struct hv_ring_buffer_info
));
355 ring_info
->ring_buffer
= (struct hv_ring_buffer
*)buffer
;
356 ring_info
->ring_buffer
->read_index
=
357 ring_info
->ring_buffer
->write_index
= 0;
360 * Set the feature bit for enabling flow control.
362 ring_info
->ring_buffer
->feature_bits
.value
= 1;
364 ring_info
->ring_size
= buflen
;
365 ring_info
->ring_datasize
= buflen
- sizeof(struct hv_ring_buffer
);
367 spin_lock_init(&ring_info
->ring_lock
);
374 * hv_ringbuffer_cleanup()
376 * Cleanup the ring buffer
379 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info
*ring_info
)
385 * hv_ringbuffer_write()
387 * Write to the ring buffer
390 int hv_ringbuffer_write(struct hv_ring_buffer_info
*outring_info
,
391 struct kvec
*kv_list
, u32 kv_count
, bool *signal
)
394 u32 bytes_avail_towrite
;
395 u32 bytes_avail_toread
;
396 u32 totalbytes_towrite
= 0;
398 u32 next_write_location
;
400 u64 prev_indices
= 0;
403 for (i
= 0; i
< kv_count
; i
++)
404 totalbytes_towrite
+= kv_list
[i
].iov_len
;
406 totalbytes_towrite
+= sizeof(u64
);
408 spin_lock_irqsave(&outring_info
->ring_lock
, flags
);
410 hv_get_ringbuffer_availbytes(outring_info
,
412 &bytes_avail_towrite
);
415 /* If there is only room for the packet, assume it is full. */
416 /* Otherwise, the next time around, we think the ring buffer */
417 /* is empty since the read index == write index */
418 if (bytes_avail_towrite
<= totalbytes_towrite
) {
419 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
423 /* Write to the ring buffer */
424 next_write_location
= hv_get_next_write_location(outring_info
);
426 old_write
= next_write_location
;
428 for (i
= 0; i
< kv_count
; i
++) {
429 next_write_location
= hv_copyto_ringbuffer(outring_info
,
435 /* Set previous packet start */
436 prev_indices
= hv_get_ring_bufferindices(outring_info
);
438 next_write_location
= hv_copyto_ringbuffer(outring_info
,
443 /* Issue a full memory barrier before updating the write index */
446 /* Now, update the write location */
447 hv_set_next_write_location(outring_info
, next_write_location
);
450 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
452 *signal
= hv_need_to_signal(old_write
, outring_info
);
459 * hv_ringbuffer_peek()
461 * Read without advancing the read index
464 int hv_ringbuffer_peek(struct hv_ring_buffer_info
*Inring_info
,
465 void *Buffer
, u32 buflen
)
467 u32 bytes_avail_towrite
;
468 u32 bytes_avail_toread
;
469 u32 next_read_location
= 0;
472 spin_lock_irqsave(&Inring_info
->ring_lock
, flags
);
474 hv_get_ringbuffer_availbytes(Inring_info
,
476 &bytes_avail_towrite
);
478 /* Make sure there is something to read */
479 if (bytes_avail_toread
< buflen
) {
481 spin_unlock_irqrestore(&Inring_info
->ring_lock
, flags
);
486 /* Convert to byte offset */
487 next_read_location
= hv_get_next_read_location(Inring_info
);
489 next_read_location
= hv_copyfrom_ringbuffer(Inring_info
,
494 spin_unlock_irqrestore(&Inring_info
->ring_lock
, flags
);
502 * hv_ringbuffer_read()
504 * Read and advance the read index
507 int hv_ringbuffer_read(struct hv_ring_buffer_info
*inring_info
, void *buffer
,
508 u32 buflen
, u32 offset
, bool *signal
)
510 u32 bytes_avail_towrite
;
511 u32 bytes_avail_toread
;
512 u32 next_read_location
= 0;
513 u64 prev_indices
= 0;
519 spin_lock_irqsave(&inring_info
->ring_lock
, flags
);
521 hv_get_ringbuffer_availbytes(inring_info
,
523 &bytes_avail_towrite
);
525 /* Make sure there is something to read */
526 if (bytes_avail_toread
< buflen
) {
527 spin_unlock_irqrestore(&inring_info
->ring_lock
, flags
);
533 hv_get_next_readlocation_withoffset(inring_info
, offset
);
535 next_read_location
= hv_copyfrom_ringbuffer(inring_info
,
540 next_read_location
= hv_copyfrom_ringbuffer(inring_info
,
545 /* Make sure all reads are done before we update the read index since */
546 /* the writer may start writing to the read area once the read index */
550 /* Update the read index */
551 hv_set_next_read_location(inring_info
, next_read_location
);
553 spin_unlock_irqrestore(&inring_info
->ring_lock
, flags
);
555 *signal
= hv_need_to_signal_on_read(bytes_avail_towrite
, inring_info
);