3 * Copyright (c) 2009, Microsoft Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/kernel.h>
30 #include "hyperv_vmbus.h"
36 /* Amount of space to write to */
37 #define BYTES_AVAIL_TO_WRITE(r, w, z) \
38 ((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w))
43 * hv_get_ringbuffer_availbytes()
45 * Get number of bytes available to read and to write to
46 * for the specified ring buffer
49 hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info
*rbi
,
50 u32
*read
, u32
*write
)
52 u32 read_loc
, write_loc
;
54 smp_read_barrier_depends();
56 /* Capture the read/write indices before they changed */
57 read_loc
= rbi
->ring_buffer
->read_index
;
58 write_loc
= rbi
->ring_buffer
->write_index
;
60 *write
= BYTES_AVAIL_TO_WRITE(read_loc
, write_loc
, rbi
->ring_datasize
);
61 *read
= rbi
->ring_datasize
- *write
;
65 * hv_get_next_write_location()
67 * Get the next write location for the specified ring buffer
71 hv_get_next_write_location(struct hv_ring_buffer_info
*ring_info
)
73 u32 next
= ring_info
->ring_buffer
->write_index
;
79 * hv_set_next_write_location()
81 * Set the next write location for the specified ring buffer
85 hv_set_next_write_location(struct hv_ring_buffer_info
*ring_info
,
86 u32 next_write_location
)
88 ring_info
->ring_buffer
->write_index
= next_write_location
;
92 * hv_get_next_read_location()
94 * Get the next read location for the specified ring buffer
97 hv_get_next_read_location(struct hv_ring_buffer_info
*ring_info
)
99 u32 next
= ring_info
->ring_buffer
->read_index
;
105 * hv_get_next_readlocation_withoffset()
107 * Get the next read location + offset for the specified ring buffer.
108 * This allows the caller to skip
111 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info
*ring_info
,
114 u32 next
= ring_info
->ring_buffer
->read_index
;
117 next
%= ring_info
->ring_datasize
;
124 * hv_set_next_read_location()
126 * Set the next read location for the specified ring buffer
130 hv_set_next_read_location(struct hv_ring_buffer_info
*ring_info
,
131 u32 next_read_location
)
133 ring_info
->ring_buffer
->read_index
= next_read_location
;
139 * hv_get_ring_buffer()
141 * Get the start of the ring buffer
144 hv_get_ring_buffer(struct hv_ring_buffer_info
*ring_info
)
146 return (void *)ring_info
->ring_buffer
->buffer
;
152 * hv_get_ring_buffersize()
154 * Get the size of the ring buffer
157 hv_get_ring_buffersize(struct hv_ring_buffer_info
*ring_info
)
159 return ring_info
->ring_datasize
;
164 * hv_get_ring_bufferindices()
166 * Get the read and write indices as u64 of the specified ring buffer
170 hv_get_ring_bufferindices(struct hv_ring_buffer_info
*ring_info
)
172 return (u64
)ring_info
->ring_buffer
->write_index
<< 32;
178 * hv_dump_ring_info()
180 * Dump out to console the ring buffer info
183 void hv_dump_ring_info(struct hv_ring_buffer_info
*ring_info
, char *prefix
)
185 u32 bytes_avail_towrite
;
186 u32 bytes_avail_toread
;
188 hv_get_ringbuffer_availbytes(ring_info
,
190 &bytes_avail_towrite
);
194 "%s <<ringinfo %p buffer %p avail write %u "
195 "avail read %u read idx %u write idx %u>>",
198 ring_info
->ring_buffer
->buffer
,
201 ring_info
->ring_buffer
->read_index
,
202 ring_info
->ring_buffer
->write_index
);
208 * hv_copyfrom_ringbuffer()
210 * Helper routine to copy to source from ring buffer.
211 * Assume there is enough room. Handles wrap-around in src case only!!
214 static u32
hv_copyfrom_ringbuffer(
215 struct hv_ring_buffer_info
*ring_info
,
218 u32 start_read_offset
)
220 void *ring_buffer
= hv_get_ring_buffer(ring_info
);
221 u32 ring_buffer_size
= hv_get_ring_buffersize(ring_info
);
225 /* wrap-around detected at the src */
226 if (destlen
> ring_buffer_size
- start_read_offset
) {
227 frag_len
= ring_buffer_size
- start_read_offset
;
229 memcpy(dest
, ring_buffer
+ start_read_offset
, frag_len
);
230 memcpy(dest
+ frag_len
, ring_buffer
, destlen
- frag_len
);
233 memcpy(dest
, ring_buffer
+ start_read_offset
, destlen
);
236 start_read_offset
+= destlen
;
237 start_read_offset
%= ring_buffer_size
;
239 return start_read_offset
;
245 * hv_copyto_ringbuffer()
247 * Helper routine to copy from source to ring buffer.
248 * Assume there is enough room. Handles wrap-around in dest case only!!
251 static u32
hv_copyto_ringbuffer(
252 struct hv_ring_buffer_info
*ring_info
,
253 u32 start_write_offset
,
257 void *ring_buffer
= hv_get_ring_buffer(ring_info
);
258 u32 ring_buffer_size
= hv_get_ring_buffersize(ring_info
);
261 /* wrap-around detected! */
262 if (srclen
> ring_buffer_size
- start_write_offset
) {
263 frag_len
= ring_buffer_size
- start_write_offset
;
264 memcpy(ring_buffer
+ start_write_offset
, src
, frag_len
);
265 memcpy(ring_buffer
, src
+ frag_len
, srclen
- frag_len
);
267 memcpy(ring_buffer
+ start_write_offset
, src
, srclen
);
269 start_write_offset
+= srclen
;
270 start_write_offset
%= ring_buffer_size
;
272 return start_write_offset
;
277 * hv_ringbuffer_get_debuginfo()
279 * Get various debug metrics for the specified ring buffer
282 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info
*ring_info
,
283 struct hv_ring_buffer_debug_info
*debug_info
)
285 u32 bytes_avail_towrite
;
286 u32 bytes_avail_toread
;
288 if (ring_info
->ring_buffer
) {
289 hv_get_ringbuffer_availbytes(ring_info
,
291 &bytes_avail_towrite
);
293 debug_info
->bytes_avail_toread
= bytes_avail_toread
;
294 debug_info
->bytes_avail_towrite
= bytes_avail_towrite
;
295 debug_info
->current_read_index
=
296 ring_info
->ring_buffer
->read_index
;
297 debug_info
->current_write_index
=
298 ring_info
->ring_buffer
->write_index
;
299 debug_info
->current_interrupt_mask
=
300 ring_info
->ring_buffer
->interrupt_mask
;
307 * hv_get_ringbuffer_interrupt_mask()
309 * Get the interrupt mask for the specified ring buffer
312 u32
hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info
*rbi
)
314 return rbi
->ring_buffer
->interrupt_mask
;
319 * hv_ringbuffer_init()
321 *Initialize the ring buffer
324 int hv_ringbuffer_init(struct hv_ring_buffer_info
*ring_info
,
325 void *buffer
, u32 buflen
)
327 if (sizeof(struct hv_ring_buffer
) != PAGE_SIZE
)
330 memset(ring_info
, 0, sizeof(struct hv_ring_buffer_info
));
332 ring_info
->ring_buffer
= (struct hv_ring_buffer
*)buffer
;
333 ring_info
->ring_buffer
->read_index
=
334 ring_info
->ring_buffer
->write_index
= 0;
336 ring_info
->ring_size
= buflen
;
337 ring_info
->ring_datasize
= buflen
- sizeof(struct hv_ring_buffer
);
339 spin_lock_init(&ring_info
->ring_lock
);
346 * hv_ringbuffer_cleanup()
348 * Cleanup the ring buffer
351 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info
*ring_info
)
357 * hv_ringbuffer_write()
359 * Write to the ring buffer
362 int hv_ringbuffer_write(struct hv_ring_buffer_info
*outring_info
,
363 struct scatterlist
*sglist
, u32 sgcount
)
366 u32 bytes_avail_towrite
;
367 u32 bytes_avail_toread
;
368 u32 totalbytes_towrite
= 0;
370 struct scatterlist
*sg
;
371 u32 next_write_location
;
372 u64 prev_indices
= 0;
375 for_each_sg(sglist
, sg
, sgcount
, i
)
377 totalbytes_towrite
+= sg
->length
;
380 totalbytes_towrite
+= sizeof(u64
);
382 spin_lock_irqsave(&outring_info
->ring_lock
, flags
);
384 hv_get_ringbuffer_availbytes(outring_info
,
386 &bytes_avail_towrite
);
389 /* If there is only room for the packet, assume it is full. */
390 /* Otherwise, the next time around, we think the ring buffer */
391 /* is empty since the read index == write index */
392 if (bytes_avail_towrite
<= totalbytes_towrite
) {
393 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
397 /* Write to the ring buffer */
398 next_write_location
= hv_get_next_write_location(outring_info
);
400 for_each_sg(sglist
, sg
, sgcount
, i
)
402 next_write_location
= hv_copyto_ringbuffer(outring_info
,
408 /* Set previous packet start */
409 prev_indices
= hv_get_ring_bufferindices(outring_info
);
411 next_write_location
= hv_copyto_ringbuffer(outring_info
,
416 /* Make sure we flush all writes before updating the writeIndex */
419 /* Now, update the write location */
420 hv_set_next_write_location(outring_info
, next_write_location
);
423 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
430 * hv_ringbuffer_peek()
432 * Read without advancing the read index
435 int hv_ringbuffer_peek(struct hv_ring_buffer_info
*Inring_info
,
436 void *Buffer
, u32 buflen
)
438 u32 bytes_avail_towrite
;
439 u32 bytes_avail_toread
;
440 u32 next_read_location
= 0;
443 spin_lock_irqsave(&Inring_info
->ring_lock
, flags
);
445 hv_get_ringbuffer_availbytes(Inring_info
,
447 &bytes_avail_towrite
);
449 /* Make sure there is something to read */
450 if (bytes_avail_toread
< buflen
) {
452 spin_unlock_irqrestore(&Inring_info
->ring_lock
, flags
);
457 /* Convert to byte offset */
458 next_read_location
= hv_get_next_read_location(Inring_info
);
460 next_read_location
= hv_copyfrom_ringbuffer(Inring_info
,
465 spin_unlock_irqrestore(&Inring_info
->ring_lock
, flags
);
473 * hv_ringbuffer_read()
475 * Read and advance the read index
478 int hv_ringbuffer_read(struct hv_ring_buffer_info
*inring_info
, void *buffer
,
479 u32 buflen
, u32 offset
)
481 u32 bytes_avail_towrite
;
482 u32 bytes_avail_toread
;
483 u32 next_read_location
= 0;
484 u64 prev_indices
= 0;
490 spin_lock_irqsave(&inring_info
->ring_lock
, flags
);
492 hv_get_ringbuffer_availbytes(inring_info
,
494 &bytes_avail_towrite
);
496 /* Make sure there is something to read */
497 if (bytes_avail_toread
< buflen
) {
498 spin_unlock_irqrestore(&inring_info
->ring_lock
, flags
);
504 hv_get_next_readlocation_withoffset(inring_info
, offset
);
506 next_read_location
= hv_copyfrom_ringbuffer(inring_info
,
511 next_read_location
= hv_copyfrom_ringbuffer(inring_info
,
516 /* Make sure all reads are done before we update the read index since */
517 /* the writer may start writing to the read area once the read index */
521 /* Update the read index */
522 hv_set_next_read_location(inring_info
, next_read_location
);
524 spin_unlock_irqrestore(&inring_info
->ring_lock
, flags
);