3 * Copyright (c) 2009, Microsoft Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/kernel.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
33 #include "hyperv_vmbus.h"
35 void hv_begin_read(struct hv_ring_buffer_info
*rbi
)
37 rbi
->ring_buffer
->interrupt_mask
= 1;
41 u32
hv_end_read(struct hv_ring_buffer_info
*rbi
)
44 rbi
->ring_buffer
->interrupt_mask
= 0;
48 * Now check to see if the ring buffer is still empty.
49 * If it is not, we raced and we need to process new
52 return hv_get_bytes_to_read(rbi
);
56 * When we write to the ring buffer, check if the host needs to
57 * be signaled. Here is the details of this protocol:
59 * 1. The host guarantees that while it is draining the
60 * ring buffer, it will set the interrupt_mask to
61 * indicate it does not need to be interrupted when
64 * 2. The host guarantees that it will completely drain
65 * the ring buffer before exiting the read loop. Further,
66 * once the ring buffer is empty, it will clear the
67 * interrupt_mask and re-check to see if new data has
71 static bool hv_need_to_signal(u32 old_write
, struct hv_ring_buffer_info
*rbi
,
72 enum hv_signal_policy policy
)
75 if (READ_ONCE(rbi
->ring_buffer
->interrupt_mask
))
79 * When the client wants to control signaling,
80 * we only honour the host interrupt mask.
82 if (policy
== HV_SIGNAL_POLICY_EXPLICIT
)
85 /* check interrupt_mask before read_index */
88 * This is the only case we need to signal when the
89 * ring transitions from being empty to non-empty.
91 if (old_write
== READ_ONCE(rbi
->ring_buffer
->read_index
))
97 /* Get the next write location for the specified ring buffer. */
99 hv_get_next_write_location(struct hv_ring_buffer_info
*ring_info
)
101 u32 next
= ring_info
->ring_buffer
->write_index
;
106 /* Set the next write location for the specified ring buffer. */
108 hv_set_next_write_location(struct hv_ring_buffer_info
*ring_info
,
109 u32 next_write_location
)
111 ring_info
->ring_buffer
->write_index
= next_write_location
;
114 /* Get the next read location for the specified ring buffer. */
116 hv_get_next_read_location(struct hv_ring_buffer_info
*ring_info
)
118 u32 next
= ring_info
->ring_buffer
->read_index
;
124 * Get the next read location + offset for the specified ring buffer.
125 * This allows the caller to skip.
128 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info
*ring_info
,
131 u32 next
= ring_info
->ring_buffer
->read_index
;
134 next
%= ring_info
->ring_datasize
;
139 /* Set the next read location for the specified ring buffer. */
141 hv_set_next_read_location(struct hv_ring_buffer_info
*ring_info
,
142 u32 next_read_location
)
144 ring_info
->ring_buffer
->read_index
= next_read_location
;
145 ring_info
->priv_read_index
= next_read_location
;
148 /* Get the size of the ring buffer. */
150 hv_get_ring_buffersize(struct hv_ring_buffer_info
*ring_info
)
152 return ring_info
->ring_datasize
;
155 /* Get the read and write indices as u64 of the specified ring buffer. */
157 hv_get_ring_bufferindices(struct hv_ring_buffer_info
*ring_info
)
159 return (u64
)ring_info
->ring_buffer
->write_index
<< 32;
163 * Helper routine to copy to source from ring buffer.
164 * Assume there is enough room. Handles wrap-around in src case only!!
166 static u32
hv_copyfrom_ringbuffer(
167 struct hv_ring_buffer_info
*ring_info
,
170 u32 start_read_offset
)
172 void *ring_buffer
= hv_get_ring_buffer(ring_info
);
173 u32 ring_buffer_size
= hv_get_ring_buffersize(ring_info
);
175 memcpy(dest
, ring_buffer
+ start_read_offset
, destlen
);
177 start_read_offset
+= destlen
;
178 start_read_offset
%= ring_buffer_size
;
180 return start_read_offset
;
185 * Helper routine to copy from source to ring buffer.
186 * Assume there is enough room. Handles wrap-around in dest case only!!
188 static u32
hv_copyto_ringbuffer(
189 struct hv_ring_buffer_info
*ring_info
,
190 u32 start_write_offset
,
194 void *ring_buffer
= hv_get_ring_buffer(ring_info
);
195 u32 ring_buffer_size
= hv_get_ring_buffersize(ring_info
);
197 memcpy(ring_buffer
+ start_write_offset
, src
, srclen
);
199 start_write_offset
+= srclen
;
200 start_write_offset
%= ring_buffer_size
;
202 return start_write_offset
;
205 /* Get various debug metrics for the specified ring buffer. */
206 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info
*ring_info
,
207 struct hv_ring_buffer_debug_info
*debug_info
)
209 u32 bytes_avail_towrite
;
210 u32 bytes_avail_toread
;
212 if (ring_info
->ring_buffer
) {
213 hv_get_ringbuffer_availbytes(ring_info
,
215 &bytes_avail_towrite
);
217 debug_info
->bytes_avail_toread
= bytes_avail_toread
;
218 debug_info
->bytes_avail_towrite
= bytes_avail_towrite
;
219 debug_info
->current_read_index
=
220 ring_info
->ring_buffer
->read_index
;
221 debug_info
->current_write_index
=
222 ring_info
->ring_buffer
->write_index
;
223 debug_info
->current_interrupt_mask
=
224 ring_info
->ring_buffer
->interrupt_mask
;
228 /* Initialize the ring buffer. */
229 int hv_ringbuffer_init(struct hv_ring_buffer_info
*ring_info
,
230 struct page
*pages
, u32 page_cnt
)
233 struct page
**pages_wraparound
;
235 BUILD_BUG_ON((sizeof(struct hv_ring_buffer
) != PAGE_SIZE
));
237 memset(ring_info
, 0, sizeof(struct hv_ring_buffer_info
));
240 * First page holds struct hv_ring_buffer, do wraparound mapping for
243 pages_wraparound
= kzalloc(sizeof(struct page
*) * (page_cnt
* 2 - 1),
245 if (!pages_wraparound
)
248 pages_wraparound
[0] = pages
;
249 for (i
= 0; i
< 2 * (page_cnt
- 1); i
++)
250 pages_wraparound
[i
+ 1] = &pages
[i
% (page_cnt
- 1) + 1];
252 ring_info
->ring_buffer
= (struct hv_ring_buffer
*)
253 vmap(pages_wraparound
, page_cnt
* 2 - 1, VM_MAP
, PAGE_KERNEL
);
255 kfree(pages_wraparound
);
258 if (!ring_info
->ring_buffer
)
261 ring_info
->ring_buffer
->read_index
=
262 ring_info
->ring_buffer
->write_index
= 0;
264 /* Set the feature bit for enabling flow control. */
265 ring_info
->ring_buffer
->feature_bits
.value
= 1;
267 ring_info
->ring_size
= page_cnt
<< PAGE_SHIFT
;
268 ring_info
->ring_datasize
= ring_info
->ring_size
-
269 sizeof(struct hv_ring_buffer
);
271 spin_lock_init(&ring_info
->ring_lock
);
276 /* Cleanup the ring buffer. */
277 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info
*ring_info
)
279 vunmap(ring_info
->ring_buffer
);
282 /* Write to the ring buffer. */
283 int hv_ringbuffer_write(struct hv_ring_buffer_info
*outring_info
,
284 struct kvec
*kv_list
, u32 kv_count
, bool *signal
, bool lock
,
285 enum hv_signal_policy policy
)
288 u32 bytes_avail_towrite
;
289 u32 totalbytes_towrite
= 0;
291 u32 next_write_location
;
293 u64 prev_indices
= 0;
294 unsigned long flags
= 0;
296 for (i
= 0; i
< kv_count
; i
++)
297 totalbytes_towrite
+= kv_list
[i
].iov_len
;
299 totalbytes_towrite
+= sizeof(u64
);
302 spin_lock_irqsave(&outring_info
->ring_lock
, flags
);
304 bytes_avail_towrite
= hv_get_bytes_to_write(outring_info
);
307 * If there is only room for the packet, assume it is full.
308 * Otherwise, the next time around, we think the ring buffer
309 * is empty since the read index == write index.
311 if (bytes_avail_towrite
<= totalbytes_towrite
) {
313 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
317 /* Write to the ring buffer */
318 next_write_location
= hv_get_next_write_location(outring_info
);
320 old_write
= next_write_location
;
322 for (i
= 0; i
< kv_count
; i
++) {
323 next_write_location
= hv_copyto_ringbuffer(outring_info
,
329 /* Set previous packet start */
330 prev_indices
= hv_get_ring_bufferindices(outring_info
);
332 next_write_location
= hv_copyto_ringbuffer(outring_info
,
337 /* Issue a full memory barrier before updating the write index */
340 /* Now, update the write location */
341 hv_set_next_write_location(outring_info
, next_write_location
);
345 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
347 *signal
= hv_need_to_signal(old_write
, outring_info
, policy
);
351 int hv_ringbuffer_read(struct hv_ring_buffer_info
*inring_info
,
352 void *buffer
, u32 buflen
, u32
*buffer_actual_len
,
353 u64
*requestid
, bool *signal
, bool raw
)
355 u32 bytes_avail_toread
;
356 u32 next_read_location
= 0;
357 u64 prev_indices
= 0;
358 struct vmpacket_descriptor desc
;
367 *buffer_actual_len
= 0;
370 bytes_avail_toread
= hv_get_bytes_to_read(inring_info
);
371 /* Make sure there is something to read */
372 if (bytes_avail_toread
< sizeof(desc
)) {
374 * No error is set when there is even no header, drivers are
375 * supposed to analyze buffer_actual_len.
380 next_read_location
= hv_get_next_read_location(inring_info
);
381 next_read_location
= hv_copyfrom_ringbuffer(inring_info
, &desc
,
385 offset
= raw
? 0 : (desc
.offset8
<< 3);
386 packetlen
= (desc
.len8
<< 3) - offset
;
387 *buffer_actual_len
= packetlen
;
388 *requestid
= desc
.trans_id
;
390 if (bytes_avail_toread
< packetlen
+ offset
)
393 if (packetlen
> buflen
)
397 hv_get_next_readlocation_withoffset(inring_info
, offset
);
399 next_read_location
= hv_copyfrom_ringbuffer(inring_info
,
404 next_read_location
= hv_copyfrom_ringbuffer(inring_info
,
410 * Make sure all reads are done before we update the read index since
411 * the writer may start writing to the read area once the read index
416 /* Update the read index */
417 hv_set_next_read_location(inring_info
, next_read_location
);
419 *signal
= hv_need_to_signal_on_read(inring_info
);