x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / drivers / hv / ring_buffer.c
blob87799e81af97697cb4879acb227a30ea0bf792bd
1 /*
3 * Copyright (c) 2009, Microsoft Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
33 #include "hyperv_vmbus.h"
36 * When we write to the ring buffer, check if the host needs to
37 * be signaled. Here is the details of this protocol:
39 * 1. The host guarantees that while it is draining the
40 * ring buffer, it will set the interrupt_mask to
41 * indicate it does not need to be interrupted when
42 * new data is placed.
44 * 2. The host guarantees that it will completely drain
45 * the ring buffer before exiting the read loop. Further,
46 * once the ring buffer is empty, it will clear the
47 * interrupt_mask and re-check to see if new data has
48 * arrived.
50 * KYS: Oct. 30, 2016:
51 * It looks like Windows hosts have logic to deal with DOS attacks that
52 * can be triggered if it receives interrupts when it is not expecting
53 * the interrupt. The host expects interrupts only when the ring
54 * transitions from empty to non-empty (or full to non full on the guest
55 * to host ring).
56 * So, base the signaling decision solely on the ring state until the
57 * host logic is fixed.
60 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
62 struct hv_ring_buffer_info *rbi = &channel->outbound;
64 virt_mb();
65 if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
66 return;
68 /* check interrupt_mask before read_index */
69 virt_rmb();
71 * This is the only case we need to signal when the
72 * ring transitions from being empty to non-empty.
74 if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
75 vmbus_setevent(channel);
77 return;
80 /* Get the next write location for the specified ring buffer. */
81 static inline u32
82 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
84 u32 next = ring_info->ring_buffer->write_index;
86 return next;
89 /* Set the next write location for the specified ring buffer. */
90 static inline void
91 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
92 u32 next_write_location)
94 ring_info->ring_buffer->write_index = next_write_location;
97 /* Get the next read location for the specified ring buffer. */
98 static inline u32
99 hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info)
101 return ring_info->ring_buffer->read_index;
105 * Get the next read location + offset for the specified ring buffer.
106 * This allows the caller to skip.
108 static inline u32
109 hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info,
110 u32 offset)
112 u32 next = ring_info->ring_buffer->read_index;
114 next += offset;
115 if (next >= ring_info->ring_datasize)
116 next -= ring_info->ring_datasize;
118 return next;
121 /* Set the next read location for the specified ring buffer. */
122 static inline void
123 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
124 u32 next_read_location)
126 ring_info->ring_buffer->read_index = next_read_location;
127 ring_info->priv_read_index = next_read_location;
130 /* Get the size of the ring buffer. */
131 static inline u32
132 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
134 return ring_info->ring_datasize;
137 /* Get the read and write indices as u64 of the specified ring buffer. */
138 static inline u64
139 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
141 return (u64)ring_info->ring_buffer->write_index << 32;
145 * Helper routine to copy to source from ring buffer.
146 * Assume there is enough room. Handles wrap-around in src case only!!
148 static u32 hv_copyfrom_ringbuffer(
149 const struct hv_ring_buffer_info *ring_info,
150 void *dest,
151 u32 destlen,
152 u32 start_read_offset)
154 void *ring_buffer = hv_get_ring_buffer(ring_info);
155 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
157 memcpy(dest, ring_buffer + start_read_offset, destlen);
159 start_read_offset += destlen;
160 if (start_read_offset >= ring_buffer_size)
161 start_read_offset -= ring_buffer_size;
163 return start_read_offset;
168 * Helper routine to copy from source to ring buffer.
169 * Assume there is enough room. Handles wrap-around in dest case only!!
171 static u32 hv_copyto_ringbuffer(
172 struct hv_ring_buffer_info *ring_info,
173 u32 start_write_offset,
174 const void *src,
175 u32 srclen)
177 void *ring_buffer = hv_get_ring_buffer(ring_info);
178 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
180 memcpy(ring_buffer + start_write_offset, src, srclen);
182 start_write_offset += srclen;
183 if (start_write_offset >= ring_buffer_size)
184 start_write_offset -= ring_buffer_size;
186 return start_write_offset;
189 /* Get various debug metrics for the specified ring buffer. */
190 void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
191 struct hv_ring_buffer_debug_info *debug_info)
193 u32 bytes_avail_towrite;
194 u32 bytes_avail_toread;
196 if (ring_info->ring_buffer) {
197 hv_get_ringbuffer_availbytes(ring_info,
198 &bytes_avail_toread,
199 &bytes_avail_towrite);
201 debug_info->bytes_avail_toread = bytes_avail_toread;
202 debug_info->bytes_avail_towrite = bytes_avail_towrite;
203 debug_info->current_read_index =
204 ring_info->ring_buffer->read_index;
205 debug_info->current_write_index =
206 ring_info->ring_buffer->write_index;
207 debug_info->current_interrupt_mask =
208 ring_info->ring_buffer->interrupt_mask;
212 /* Initialize the ring buffer. */
213 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
214 struct page *pages, u32 page_cnt)
216 int i;
217 struct page **pages_wraparound;
219 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
221 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
224 * First page holds struct hv_ring_buffer, do wraparound mapping for
225 * the rest.
227 pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
228 GFP_KERNEL);
229 if (!pages_wraparound)
230 return -ENOMEM;
232 pages_wraparound[0] = pages;
233 for (i = 0; i < 2 * (page_cnt - 1); i++)
234 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
236 ring_info->ring_buffer = (struct hv_ring_buffer *)
237 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
239 kfree(pages_wraparound);
242 if (!ring_info->ring_buffer)
243 return -ENOMEM;
245 ring_info->ring_buffer->read_index =
246 ring_info->ring_buffer->write_index = 0;
248 /* Set the feature bit for enabling flow control. */
249 ring_info->ring_buffer->feature_bits.value = 1;
251 ring_info->ring_size = page_cnt << PAGE_SHIFT;
252 ring_info->ring_datasize = ring_info->ring_size -
253 sizeof(struct hv_ring_buffer);
255 spin_lock_init(&ring_info->ring_lock);
257 return 0;
260 /* Cleanup the ring buffer. */
261 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
263 vunmap(ring_info->ring_buffer);
266 /* Write to the ring buffer. */
267 int hv_ringbuffer_write(struct vmbus_channel *channel,
268 const struct kvec *kv_list, u32 kv_count)
270 int i = 0;
271 u32 bytes_avail_towrite;
272 u32 totalbytes_towrite = 0;
274 u32 next_write_location;
275 u32 old_write;
276 u64 prev_indices = 0;
277 unsigned long flags = 0;
278 struct hv_ring_buffer_info *outring_info = &channel->outbound;
280 if (channel->rescind)
281 return -ENODEV;
283 for (i = 0; i < kv_count; i++)
284 totalbytes_towrite += kv_list[i].iov_len;
286 totalbytes_towrite += sizeof(u64);
288 spin_lock_irqsave(&outring_info->ring_lock, flags);
290 bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
293 * If there is only room for the packet, assume it is full.
294 * Otherwise, the next time around, we think the ring buffer
295 * is empty since the read index == write index.
297 if (bytes_avail_towrite <= totalbytes_towrite) {
298 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
299 return -EAGAIN;
302 /* Write to the ring buffer */
303 next_write_location = hv_get_next_write_location(outring_info);
305 old_write = next_write_location;
307 for (i = 0; i < kv_count; i++) {
308 next_write_location = hv_copyto_ringbuffer(outring_info,
309 next_write_location,
310 kv_list[i].iov_base,
311 kv_list[i].iov_len);
314 /* Set previous packet start */
315 prev_indices = hv_get_ring_bufferindices(outring_info);
317 next_write_location = hv_copyto_ringbuffer(outring_info,
318 next_write_location,
319 &prev_indices,
320 sizeof(u64));
322 /* Issue a full memory barrier before updating the write index */
323 virt_mb();
325 /* Now, update the write location */
326 hv_set_next_write_location(outring_info, next_write_location);
329 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
331 hv_signal_on_write(old_write, channel);
333 if (channel->rescind)
334 return -ENODEV;
336 return 0;
339 int hv_ringbuffer_read(struct vmbus_channel *channel,
340 void *buffer, u32 buflen, u32 *buffer_actual_len,
341 u64 *requestid, bool raw)
343 u32 bytes_avail_toread;
344 u32 next_read_location = 0;
345 u64 prev_indices = 0;
346 struct vmpacket_descriptor desc;
347 u32 offset;
348 u32 packetlen;
349 int ret = 0;
350 struct hv_ring_buffer_info *inring_info = &channel->inbound;
352 if (buflen <= 0)
353 return -EINVAL;
356 *buffer_actual_len = 0;
357 *requestid = 0;
359 bytes_avail_toread = hv_get_bytes_to_read(inring_info);
360 /* Make sure there is something to read */
361 if (bytes_avail_toread < sizeof(desc)) {
363 * No error is set when there is even no header, drivers are
364 * supposed to analyze buffer_actual_len.
366 return ret;
369 init_cached_read_index(channel);
370 next_read_location = hv_get_next_read_location(inring_info);
371 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
372 sizeof(desc),
373 next_read_location);
375 offset = raw ? 0 : (desc.offset8 << 3);
376 packetlen = (desc.len8 << 3) - offset;
377 *buffer_actual_len = packetlen;
378 *requestid = desc.trans_id;
380 if (bytes_avail_toread < packetlen + offset)
381 return -EAGAIN;
383 if (packetlen > buflen)
384 return -ENOBUFS;
386 next_read_location =
387 hv_get_next_readlocation_withoffset(inring_info, offset);
389 next_read_location = hv_copyfrom_ringbuffer(inring_info,
390 buffer,
391 packetlen,
392 next_read_location);
394 next_read_location = hv_copyfrom_ringbuffer(inring_info,
395 &prev_indices,
396 sizeof(u64),
397 next_read_location);
400 * Make sure all reads are done before we update the read index since
401 * the writer may start writing to the read area once the read index
402 * is updated.
404 virt_mb();
406 /* Update the read index */
407 hv_set_next_read_location(inring_info, next_read_location);
409 hv_signal_on_read(channel);
411 return ret;