gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / misc / vmw_vmci / vmci_queue_pair.c
blob8531ae7811956b1699fbd291dc3615782b6fdcf0
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VMware VMCI Driver
5 * Copyright (C) 2012 VMware, Inc. All rights reserved.
6 */
8 #include <linux/vmw_vmci_defs.h>
9 #include <linux/vmw_vmci_api.h>
10 #include <linux/highmem.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/pagemap.h>
16 #include <linux/pci.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/uio.h>
20 #include <linux/wait.h>
21 #include <linux/vmalloc.h>
22 #include <linux/skbuff.h>
24 #include "vmci_handle_array.h"
25 #include "vmci_queue_pair.h"
26 #include "vmci_datagram.h"
27 #include "vmci_resource.h"
28 #include "vmci_context.h"
29 #include "vmci_driver.h"
30 #include "vmci_event.h"
31 #include "vmci_route.h"
34 * In the following, we will distinguish between two kinds of VMX processes -
35 * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized
36 * VMCI page files in the VMX and supporting VM to VM communication and the
37 * newer ones that use the guest memory directly. We will in the following
38 * refer to the older VMX versions as old-style VMX'en, and the newer ones as
39 * new-style VMX'en.
41 * The state transition datagram is as follows (the VMCIQPB_ prefix has been
42 * removed for readability) - see below for more details on the transtions:
44 * -------------- NEW -------------
45 * | |
46 * \_/ \_/
47 * CREATED_NO_MEM <-----------------> CREATED_MEM
48 * | | |
49 * | o-----------------------o |
50 * | | |
51 * \_/ \_/ \_/
52 * ATTACHED_NO_MEM <----------------> ATTACHED_MEM
53 * | | |
54 * | o----------------------o |
55 * | | |
56 * \_/ \_/ \_/
57 * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM
58 * | |
59 * | |
60 * -------------> gone <-------------
62 * In more detail. When a VMCI queue pair is first created, it will be in the
63 * VMCIQPB_NEW state. It will then move into one of the following states:
65 * - VMCIQPB_CREATED_NO_MEM: this state indicates that either:
67 * - the created was performed by a host endpoint, in which case there is
68 * no backing memory yet.
70 * - the create was initiated by an old-style VMX, that uses
71 * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at
72 * a later point in time. This state can be distinguished from the one
73 * above by the context ID of the creator. A host side is not allowed to
74 * attach until the page store has been set.
76 * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
77 * is created by a VMX using the queue pair device backend that
78 * sets the UVAs of the queue pair immediately and stores the
79 * information for later attachers. At this point, it is ready for
80 * the host side to attach to it.
82 * Once the queue pair is in one of the created states (with the exception of
83 * the case mentioned for older VMX'en above), it is possible to attach to the
84 * queue pair. Again we have two new states possible:
86 * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following
87 * paths:
89 * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
90 * pair, and attaches to a queue pair previously created by the host side.
92 * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair
93 * already created by a guest.
95 * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls
96 * vmci_qp_broker_set_page_store (see below).
98 * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the
99 * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will
100 * bring the queue pair into this state. Once vmci_qp_broker_set_page_store
101 * is called to register the user memory, the VMCIQPB_ATTACH_MEM state
102 * will be entered.
104 * From the attached queue pair, the queue pair can enter the shutdown states
105 * when either side of the queue pair detaches. If the guest side detaches
106 * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where
107 * the content of the queue pair will no longer be available. If the host
108 * side detaches first, the queue pair will either enter the
109 * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or
110 * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped
111 * (e.g., the host detaches while a guest is stunned).
113 * New-style VMX'en will also unmap guest memory, if the guest is
114 * quiesced, e.g., during a snapshot operation. In that case, the guest
115 * memory will no longer be available, and the queue pair will transition from
116 * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more,
117 * in which case the queue pair will transition from the *_NO_MEM state at that
118 * point back to the *_MEM state. Note that the *_NO_MEM state may have changed,
119 * since the peer may have either attached or detached in the meantime. The
120 * values are laid out such that ++ on a state will move from a *_NO_MEM to a
121 * *_MEM state, and vice versa.
124 /* The Kernel specific component of the struct vmci_queue structure. */
125 struct vmci_queue_kern_if {
126 struct mutex __mutex; /* Protects the queue. */
127 struct mutex *mutex; /* Shared by producer and consumer queues. */
128 size_t num_pages; /* Number of pages incl. header. */
129 bool host; /* Host or guest? */
130 union {
131 struct {
132 dma_addr_t *pas;
133 void **vas;
134 } g; /* Used by the guest. */
135 struct {
136 struct page **page;
137 struct page **header_page;
138 } h; /* Used by the host. */
139 } u;
143 * This structure is opaque to the clients.
145 struct vmci_qp {
146 struct vmci_handle handle;
147 struct vmci_queue *produce_q;
148 struct vmci_queue *consume_q;
149 u64 produce_q_size;
150 u64 consume_q_size;
151 u32 peer;
152 u32 flags;
153 u32 priv_flags;
154 bool guest_endpoint;
155 unsigned int blocked;
156 unsigned int generation;
157 wait_queue_head_t event;
160 enum qp_broker_state {
161 VMCIQPB_NEW,
162 VMCIQPB_CREATED_NO_MEM,
163 VMCIQPB_CREATED_MEM,
164 VMCIQPB_ATTACHED_NO_MEM,
165 VMCIQPB_ATTACHED_MEM,
166 VMCIQPB_SHUTDOWN_NO_MEM,
167 VMCIQPB_SHUTDOWN_MEM,
168 VMCIQPB_GONE
171 #define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
172 _qpb->state == VMCIQPB_ATTACHED_MEM || \
173 _qpb->state == VMCIQPB_SHUTDOWN_MEM)
176 * In the queue pair broker, we always use the guest point of view for
177 * the produce and consume queue values and references, e.g., the
178 * produce queue size stored is the guests produce queue size. The
179 * host endpoint will need to swap these around. The only exception is
180 * the local queue pairs on the host, in which case the host endpoint
181 * that creates the queue pair will have the right orientation, and
182 * the attaching host endpoint will need to swap.
184 struct qp_entry {
185 struct list_head list_item;
186 struct vmci_handle handle;
187 u32 peer;
188 u32 flags;
189 u64 produce_size;
190 u64 consume_size;
191 u32 ref_count;
194 struct qp_broker_entry {
195 struct vmci_resource resource;
196 struct qp_entry qp;
197 u32 create_id;
198 u32 attach_id;
199 enum qp_broker_state state;
200 bool require_trusted_attach;
201 bool created_by_trusted;
202 bool vmci_page_files; /* Created by VMX using VMCI page files */
203 struct vmci_queue *produce_q;
204 struct vmci_queue *consume_q;
205 struct vmci_queue_header saved_produce_q;
206 struct vmci_queue_header saved_consume_q;
207 vmci_event_release_cb wakeup_cb;
208 void *client_data;
209 void *local_mem; /* Kernel memory for local queue pair */
212 struct qp_guest_endpoint {
213 struct vmci_resource resource;
214 struct qp_entry qp;
215 u64 num_ppns;
216 void *produce_q;
217 void *consume_q;
218 struct ppn_set ppn_set;
221 struct qp_list {
222 struct list_head head;
223 struct mutex mutex; /* Protect queue list. */
226 static struct qp_list qp_broker_list = {
227 .head = LIST_HEAD_INIT(qp_broker_list.head),
228 .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
231 static struct qp_list qp_guest_endpoints = {
232 .head = LIST_HEAD_INIT(qp_guest_endpoints.head),
233 .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
236 #define INVALID_VMCI_GUEST_MEM_ID 0
237 #define QPE_NUM_PAGES(_QPE) ((u32) \
238 (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
239 DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
243 * Frees kernel VA space for a given queue and its queue header, and
244 * frees physical data pages.
246 static void qp_free_queue(void *q, u64 size)
248 struct vmci_queue *queue = q;
250 if (queue) {
251 u64 i;
253 /* Given size does not include header, so add in a page here. */
254 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
255 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
256 queue->kernel_if->u.g.vas[i],
257 queue->kernel_if->u.g.pas[i]);
260 vfree(queue);
265 * Allocates kernel queue pages of specified size with IOMMU mappings,
266 * plus space for the queue structure/kernel interface and the queue
267 * header.
269 static void *qp_alloc_queue(u64 size, u32 flags)
271 u64 i;
272 struct vmci_queue *queue;
273 size_t pas_size;
274 size_t vas_size;
275 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
276 u64 num_pages;
278 if (size > SIZE_MAX - PAGE_SIZE)
279 return NULL;
280 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
281 if (num_pages >
282 (SIZE_MAX - queue_size) /
283 (sizeof(*queue->kernel_if->u.g.pas) +
284 sizeof(*queue->kernel_if->u.g.vas)))
285 return NULL;
287 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
288 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
289 queue_size += pas_size + vas_size;
291 queue = vmalloc(queue_size);
292 if (!queue)
293 return NULL;
295 queue->q_header = NULL;
296 queue->saved_header = NULL;
297 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
298 queue->kernel_if->mutex = NULL;
299 queue->kernel_if->num_pages = num_pages;
300 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
301 queue->kernel_if->u.g.vas =
302 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
303 queue->kernel_if->host = false;
305 for (i = 0; i < num_pages; i++) {
306 queue->kernel_if->u.g.vas[i] =
307 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
308 &queue->kernel_if->u.g.pas[i],
309 GFP_KERNEL);
310 if (!queue->kernel_if->u.g.vas[i]) {
311 /* Size excl. the header. */
312 qp_free_queue(queue, i * PAGE_SIZE);
313 return NULL;
317 /* Queue header is the first page. */
318 queue->q_header = queue->kernel_if->u.g.vas[0];
320 return queue;
324 * Copies from a given buffer or iovector to a VMCI Queue. Uses
325 * kmap()/kunmap() to dynamically map/unmap required portions of the queue
326 * by traversing the offset -> page translation structure for the queue.
327 * Assumes that offset + size does not wrap around in the queue.
329 static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
330 u64 queue_offset,
331 struct iov_iter *from,
332 size_t size)
334 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
335 size_t bytes_copied = 0;
337 while (bytes_copied < size) {
338 const u64 page_index =
339 (queue_offset + bytes_copied) / PAGE_SIZE;
340 const size_t page_offset =
341 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
342 void *va;
343 size_t to_copy;
345 if (kernel_if->host)
346 va = kmap(kernel_if->u.h.page[page_index]);
347 else
348 va = kernel_if->u.g.vas[page_index + 1];
349 /* Skip header. */
351 if (size - bytes_copied > PAGE_SIZE - page_offset)
352 /* Enough payload to fill up from this page. */
353 to_copy = PAGE_SIZE - page_offset;
354 else
355 to_copy = size - bytes_copied;
357 if (!copy_from_iter_full((u8 *)va + page_offset, to_copy,
358 from)) {
359 if (kernel_if->host)
360 kunmap(kernel_if->u.h.page[page_index]);
361 return VMCI_ERROR_INVALID_ARGS;
363 bytes_copied += to_copy;
364 if (kernel_if->host)
365 kunmap(kernel_if->u.h.page[page_index]);
368 return VMCI_SUCCESS;
372 * Copies to a given buffer or iovector from a VMCI Queue. Uses
373 * kmap()/kunmap() to dynamically map/unmap required portions of the queue
374 * by traversing the offset -> page translation structure for the queue.
375 * Assumes that offset + size does not wrap around in the queue.
377 static int qp_memcpy_from_queue_iter(struct iov_iter *to,
378 const struct vmci_queue *queue,
379 u64 queue_offset, size_t size)
381 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
382 size_t bytes_copied = 0;
384 while (bytes_copied < size) {
385 const u64 page_index =
386 (queue_offset + bytes_copied) / PAGE_SIZE;
387 const size_t page_offset =
388 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
389 void *va;
390 size_t to_copy;
391 int err;
393 if (kernel_if->host)
394 va = kmap(kernel_if->u.h.page[page_index]);
395 else
396 va = kernel_if->u.g.vas[page_index + 1];
397 /* Skip header. */
399 if (size - bytes_copied > PAGE_SIZE - page_offset)
400 /* Enough payload to fill up this page. */
401 to_copy = PAGE_SIZE - page_offset;
402 else
403 to_copy = size - bytes_copied;
405 err = copy_to_iter((u8 *)va + page_offset, to_copy, to);
406 if (err != to_copy) {
407 if (kernel_if->host)
408 kunmap(kernel_if->u.h.page[page_index]);
409 return VMCI_ERROR_INVALID_ARGS;
411 bytes_copied += to_copy;
412 if (kernel_if->host)
413 kunmap(kernel_if->u.h.page[page_index]);
416 return VMCI_SUCCESS;
420 * Allocates two list of PPNs --- one for the pages in the produce queue,
421 * and the other for the pages in the consume queue. Intializes the list
422 * of PPNs with the page frame numbers of the KVA for the two queues (and
423 * the queue headers).
425 static int qp_alloc_ppn_set(void *prod_q,
426 u64 num_produce_pages,
427 void *cons_q,
428 u64 num_consume_pages, struct ppn_set *ppn_set)
430 u64 *produce_ppns;
431 u64 *consume_ppns;
432 struct vmci_queue *produce_q = prod_q;
433 struct vmci_queue *consume_q = cons_q;
434 u64 i;
436 if (!produce_q || !num_produce_pages || !consume_q ||
437 !num_consume_pages || !ppn_set)
438 return VMCI_ERROR_INVALID_ARGS;
440 if (ppn_set->initialized)
441 return VMCI_ERROR_ALREADY_EXISTS;
443 produce_ppns =
444 kmalloc_array(num_produce_pages, sizeof(*produce_ppns),
445 GFP_KERNEL);
446 if (!produce_ppns)
447 return VMCI_ERROR_NO_MEM;
449 consume_ppns =
450 kmalloc_array(num_consume_pages, sizeof(*consume_ppns),
451 GFP_KERNEL);
452 if (!consume_ppns) {
453 kfree(produce_ppns);
454 return VMCI_ERROR_NO_MEM;
457 for (i = 0; i < num_produce_pages; i++)
458 produce_ppns[i] =
459 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
461 for (i = 0; i < num_consume_pages; i++)
462 consume_ppns[i] =
463 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
465 ppn_set->num_produce_pages = num_produce_pages;
466 ppn_set->num_consume_pages = num_consume_pages;
467 ppn_set->produce_ppns = produce_ppns;
468 ppn_set->consume_ppns = consume_ppns;
469 ppn_set->initialized = true;
470 return VMCI_SUCCESS;
474 * Frees the two list of PPNs for a queue pair.
476 static void qp_free_ppn_set(struct ppn_set *ppn_set)
478 if (ppn_set->initialized) {
479 /* Do not call these functions on NULL inputs. */
480 kfree(ppn_set->produce_ppns);
481 kfree(ppn_set->consume_ppns);
483 memset(ppn_set, 0, sizeof(*ppn_set));
487 * Populates the list of PPNs in the hypercall structure with the PPNS
488 * of the produce queue and the consume queue.
490 static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
492 if (vmci_use_ppn64()) {
493 memcpy(call_buf, ppn_set->produce_ppns,
494 ppn_set->num_produce_pages *
495 sizeof(*ppn_set->produce_ppns));
496 memcpy(call_buf +
497 ppn_set->num_produce_pages *
498 sizeof(*ppn_set->produce_ppns),
499 ppn_set->consume_ppns,
500 ppn_set->num_consume_pages *
501 sizeof(*ppn_set->consume_ppns));
502 } else {
503 int i;
504 u32 *ppns = (u32 *) call_buf;
506 for (i = 0; i < ppn_set->num_produce_pages; i++)
507 ppns[i] = (u32) ppn_set->produce_ppns[i];
509 ppns = &ppns[ppn_set->num_produce_pages];
511 for (i = 0; i < ppn_set->num_consume_pages; i++)
512 ppns[i] = (u32) ppn_set->consume_ppns[i];
515 return VMCI_SUCCESS;
519 * Allocates kernel VA space of specified size plus space for the queue
520 * and kernel interface. This is different from the guest queue allocator,
521 * because we do not allocate our own queue header/data pages here but
522 * share those of the guest.
524 static struct vmci_queue *qp_host_alloc_queue(u64 size)
526 struct vmci_queue *queue;
527 size_t queue_page_size;
528 u64 num_pages;
529 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
531 if (size > SIZE_MAX - PAGE_SIZE)
532 return NULL;
533 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
534 if (num_pages > (SIZE_MAX - queue_size) /
535 sizeof(*queue->kernel_if->u.h.page))
536 return NULL;
538 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
540 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
541 if (queue) {
542 queue->q_header = NULL;
543 queue->saved_header = NULL;
544 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
545 queue->kernel_if->host = true;
546 queue->kernel_if->mutex = NULL;
547 queue->kernel_if->num_pages = num_pages;
548 queue->kernel_if->u.h.header_page =
549 (struct page **)((u8 *)queue + queue_size);
550 queue->kernel_if->u.h.page =
551 &queue->kernel_if->u.h.header_page[1];
554 return queue;
558 * Frees kernel memory for a given queue (header plus translation
559 * structure).
561 static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
563 kfree(queue);
567 * Initialize the mutex for the pair of queues. This mutex is used to
568 * protect the q_header and the buffer from changing out from under any
569 * users of either queue. Of course, it's only any good if the mutexes
570 * are actually acquired. Queue structure must lie on non-paged memory
571 * or we cannot guarantee access to the mutex.
573 static void qp_init_queue_mutex(struct vmci_queue *produce_q,
574 struct vmci_queue *consume_q)
577 * Only the host queue has shared state - the guest queues do not
578 * need to synchronize access using a queue mutex.
581 if (produce_q->kernel_if->host) {
582 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
583 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
584 mutex_init(produce_q->kernel_if->mutex);
589 * Cleans up the mutex for the pair of queues.
591 static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
592 struct vmci_queue *consume_q)
594 if (produce_q->kernel_if->host) {
595 produce_q->kernel_if->mutex = NULL;
596 consume_q->kernel_if->mutex = NULL;
601 * Acquire the mutex for the queue. Note that the produce_q and
602 * the consume_q share a mutex. So, only one of the two need to
603 * be passed in to this routine. Either will work just fine.
605 static void qp_acquire_queue_mutex(struct vmci_queue *queue)
607 if (queue->kernel_if->host)
608 mutex_lock(queue->kernel_if->mutex);
612 * Release the mutex for the queue. Note that the produce_q and
613 * the consume_q share a mutex. So, only one of the two need to
614 * be passed in to this routine. Either will work just fine.
616 static void qp_release_queue_mutex(struct vmci_queue *queue)
618 if (queue->kernel_if->host)
619 mutex_unlock(queue->kernel_if->mutex);
623 * Helper function to release pages in the PageStoreAttachInfo
624 * previously obtained using get_user_pages.
626 static void qp_release_pages(struct page **pages,
627 u64 num_pages, bool dirty)
629 int i;
631 for (i = 0; i < num_pages; i++) {
632 if (dirty)
633 set_page_dirty(pages[i]);
635 put_page(pages[i]);
636 pages[i] = NULL;
641 * Lock the user pages referenced by the {produce,consume}Buffer
642 * struct into memory and populate the {produce,consume}Pages
643 * arrays in the attach structure with them.
645 static int qp_host_get_user_memory(u64 produce_uva,
646 u64 consume_uva,
647 struct vmci_queue *produce_q,
648 struct vmci_queue *consume_q)
650 int retval;
651 int err = VMCI_SUCCESS;
653 retval = get_user_pages_fast((uintptr_t) produce_uva,
654 produce_q->kernel_if->num_pages,
655 FOLL_WRITE,
656 produce_q->kernel_if->u.h.header_page);
657 if (retval < (int)produce_q->kernel_if->num_pages) {
658 pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
659 retval);
660 qp_release_pages(produce_q->kernel_if->u.h.header_page,
661 retval, false);
662 err = VMCI_ERROR_NO_MEM;
663 goto out;
666 retval = get_user_pages_fast((uintptr_t) consume_uva,
667 consume_q->kernel_if->num_pages,
668 FOLL_WRITE,
669 consume_q->kernel_if->u.h.header_page);
670 if (retval < (int)consume_q->kernel_if->num_pages) {
671 pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
672 retval);
673 qp_release_pages(consume_q->kernel_if->u.h.header_page,
674 retval, false);
675 qp_release_pages(produce_q->kernel_if->u.h.header_page,
676 produce_q->kernel_if->num_pages, false);
677 err = VMCI_ERROR_NO_MEM;
680 out:
681 return err;
685 * Registers the specification of the user pages used for backing a queue
686 * pair. Enough information to map in pages is stored in the OS specific
687 * part of the struct vmci_queue structure.
689 static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
690 struct vmci_queue *produce_q,
691 struct vmci_queue *consume_q)
693 u64 produce_uva;
694 u64 consume_uva;
697 * The new style and the old style mapping only differs in
698 * that we either get a single or two UVAs, so we split the
699 * single UVA range at the appropriate spot.
701 produce_uva = page_store->pages;
702 consume_uva = page_store->pages +
703 produce_q->kernel_if->num_pages * PAGE_SIZE;
704 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
705 consume_q);
709 * Releases and removes the references to user pages stored in the attach
710 * struct. Pages are released from the page cache and may become
711 * swappable again.
713 static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
714 struct vmci_queue *consume_q)
716 qp_release_pages(produce_q->kernel_if->u.h.header_page,
717 produce_q->kernel_if->num_pages, true);
718 memset(produce_q->kernel_if->u.h.header_page, 0,
719 sizeof(*produce_q->kernel_if->u.h.header_page) *
720 produce_q->kernel_if->num_pages);
721 qp_release_pages(consume_q->kernel_if->u.h.header_page,
722 consume_q->kernel_if->num_pages, true);
723 memset(consume_q->kernel_if->u.h.header_page, 0,
724 sizeof(*consume_q->kernel_if->u.h.header_page) *
725 consume_q->kernel_if->num_pages);
729 * Once qp_host_register_user_memory has been performed on a
730 * queue, the queue pair headers can be mapped into the
731 * kernel. Once mapped, they must be unmapped with
732 * qp_host_unmap_queues prior to calling
733 * qp_host_unregister_user_memory.
734 * Pages are pinned.
736 static int qp_host_map_queues(struct vmci_queue *produce_q,
737 struct vmci_queue *consume_q)
739 int result;
741 if (!produce_q->q_header || !consume_q->q_header) {
742 struct page *headers[2];
744 if (produce_q->q_header != consume_q->q_header)
745 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
747 if (produce_q->kernel_if->u.h.header_page == NULL ||
748 *produce_q->kernel_if->u.h.header_page == NULL)
749 return VMCI_ERROR_UNAVAILABLE;
751 headers[0] = *produce_q->kernel_if->u.h.header_page;
752 headers[1] = *consume_q->kernel_if->u.h.header_page;
754 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
755 if (produce_q->q_header != NULL) {
756 consume_q->q_header =
757 (struct vmci_queue_header *)((u8 *)
758 produce_q->q_header +
759 PAGE_SIZE);
760 result = VMCI_SUCCESS;
761 } else {
762 pr_warn("vmap failed\n");
763 result = VMCI_ERROR_NO_MEM;
765 } else {
766 result = VMCI_SUCCESS;
769 return result;
773 * Unmaps previously mapped queue pair headers from the kernel.
774 * Pages are unpinned.
776 static int qp_host_unmap_queues(u32 gid,
777 struct vmci_queue *produce_q,
778 struct vmci_queue *consume_q)
780 if (produce_q->q_header) {
781 if (produce_q->q_header < consume_q->q_header)
782 vunmap(produce_q->q_header);
783 else
784 vunmap(consume_q->q_header);
786 produce_q->q_header = NULL;
787 consume_q->q_header = NULL;
790 return VMCI_SUCCESS;
794 * Finds the entry in the list corresponding to a given handle. Assumes
795 * that the list is locked.
797 static struct qp_entry *qp_list_find(struct qp_list *qp_list,
798 struct vmci_handle handle)
800 struct qp_entry *entry;
802 if (vmci_handle_is_invalid(handle))
803 return NULL;
805 list_for_each_entry(entry, &qp_list->head, list_item) {
806 if (vmci_handle_is_equal(entry->handle, handle))
807 return entry;
810 return NULL;
814 * Finds the entry in the list corresponding to a given handle.
816 static struct qp_guest_endpoint *
817 qp_guest_handle_to_entry(struct vmci_handle handle)
819 struct qp_guest_endpoint *entry;
820 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
822 entry = qp ? container_of(
823 qp, struct qp_guest_endpoint, qp) : NULL;
824 return entry;
828 * Finds the entry in the list corresponding to a given handle.
830 static struct qp_broker_entry *
831 qp_broker_handle_to_entry(struct vmci_handle handle)
833 struct qp_broker_entry *entry;
834 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
836 entry = qp ? container_of(
837 qp, struct qp_broker_entry, qp) : NULL;
838 return entry;
842 * Dispatches a queue pair event message directly into the local event
843 * queue.
845 static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
847 u32 context_id = vmci_get_context_id();
848 struct vmci_event_qp ev;
850 ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
851 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
852 VMCI_CONTEXT_RESOURCE_ID);
853 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
854 ev.msg.event_data.event =
855 attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
856 ev.payload.peer_id = context_id;
857 ev.payload.handle = handle;
859 return vmci_event_dispatch(&ev.msg.hdr);
863 * Allocates and initializes a qp_guest_endpoint structure.
864 * Allocates a queue_pair rid (and handle) iff the given entry has
865 * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX
866 * are reserved handles. Assumes that the QP list mutex is held
867 * by the caller.
869 static struct qp_guest_endpoint *
870 qp_guest_endpoint_create(struct vmci_handle handle,
871 u32 peer,
872 u32 flags,
873 u64 produce_size,
874 u64 consume_size,
875 void *produce_q,
876 void *consume_q)
878 int result;
879 struct qp_guest_endpoint *entry;
880 /* One page each for the queue headers. */
881 const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
882 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
884 if (vmci_handle_is_invalid(handle)) {
885 u32 context_id = vmci_get_context_id();
887 handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
890 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
891 if (entry) {
892 entry->qp.peer = peer;
893 entry->qp.flags = flags;
894 entry->qp.produce_size = produce_size;
895 entry->qp.consume_size = consume_size;
896 entry->qp.ref_count = 0;
897 entry->num_ppns = num_ppns;
898 entry->produce_q = produce_q;
899 entry->consume_q = consume_q;
900 INIT_LIST_HEAD(&entry->qp.list_item);
902 /* Add resource obj */
903 result = vmci_resource_add(&entry->resource,
904 VMCI_RESOURCE_TYPE_QPAIR_GUEST,
905 handle);
906 entry->qp.handle = vmci_resource_handle(&entry->resource);
907 if ((result != VMCI_SUCCESS) ||
908 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
909 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
910 handle.context, handle.resource, result);
911 kfree(entry);
912 entry = NULL;
915 return entry;
919 * Frees a qp_guest_endpoint structure.
921 static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
923 qp_free_ppn_set(&entry->ppn_set);
924 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
925 qp_free_queue(entry->produce_q, entry->qp.produce_size);
926 qp_free_queue(entry->consume_q, entry->qp.consume_size);
927 /* Unlink from resource hash table and free callback */
928 vmci_resource_remove(&entry->resource);
930 kfree(entry);
934 * Helper to make a queue_pairAlloc hypercall when the driver is
935 * supporting a guest device.
937 static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
939 struct vmci_qp_alloc_msg *alloc_msg;
940 size_t msg_size;
941 size_t ppn_size;
942 int result;
944 if (!entry || entry->num_ppns <= 2)
945 return VMCI_ERROR_INVALID_ARGS;
947 ppn_size = vmci_use_ppn64() ? sizeof(u64) : sizeof(u32);
948 msg_size = sizeof(*alloc_msg) +
949 (size_t) entry->num_ppns * ppn_size;
950 alloc_msg = kmalloc(msg_size, GFP_KERNEL);
951 if (!alloc_msg)
952 return VMCI_ERROR_NO_MEM;
954 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
955 VMCI_QUEUEPAIR_ALLOC);
956 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
957 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
958 alloc_msg->handle = entry->qp.handle;
959 alloc_msg->peer = entry->qp.peer;
960 alloc_msg->flags = entry->qp.flags;
961 alloc_msg->produce_size = entry->qp.produce_size;
962 alloc_msg->consume_size = entry->qp.consume_size;
963 alloc_msg->num_ppns = entry->num_ppns;
965 result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
966 &entry->ppn_set);
967 if (result == VMCI_SUCCESS)
968 result = vmci_send_datagram(&alloc_msg->hdr);
970 kfree(alloc_msg);
972 return result;
976 * Helper to make a queue_pairDetach hypercall when the driver is
977 * supporting a guest device.
979 static int qp_detatch_hypercall(struct vmci_handle handle)
981 struct vmci_qp_detach_msg detach_msg;
983 detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
984 VMCI_QUEUEPAIR_DETACH);
985 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
986 detach_msg.hdr.payload_size = sizeof(handle);
987 detach_msg.handle = handle;
989 return vmci_send_datagram(&detach_msg.hdr);
993 * Adds the given entry to the list. Assumes that the list is locked.
995 static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
997 if (entry)
998 list_add(&entry->list_item, &qp_list->head);
1002 * Removes the given entry from the list. Assumes that the list is locked.
1004 static void qp_list_remove_entry(struct qp_list *qp_list,
1005 struct qp_entry *entry)
1007 if (entry)
1008 list_del(&entry->list_item);
1012 * Helper for VMCI queue_pair detach interface. Frees the physical
1013 * pages for the queue pair.
1015 static int qp_detatch_guest_work(struct vmci_handle handle)
1017 int result;
1018 struct qp_guest_endpoint *entry;
1019 u32 ref_count = ~0; /* To avoid compiler warning below */
1021 mutex_lock(&qp_guest_endpoints.mutex);
1023 entry = qp_guest_handle_to_entry(handle);
1024 if (!entry) {
1025 mutex_unlock(&qp_guest_endpoints.mutex);
1026 return VMCI_ERROR_NOT_FOUND;
1029 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1030 result = VMCI_SUCCESS;
1032 if (entry->qp.ref_count > 1) {
1033 result = qp_notify_peer_local(false, handle);
1035 * We can fail to notify a local queuepair
1036 * because we can't allocate. We still want
1037 * to release the entry if that happens, so
1038 * don't bail out yet.
1041 } else {
1042 result = qp_detatch_hypercall(handle);
1043 if (result < VMCI_SUCCESS) {
1045 * We failed to notify a non-local queuepair.
1046 * That other queuepair might still be
1047 * accessing the shared memory, so don't
1048 * release the entry yet. It will get cleaned
1049 * up by VMCIqueue_pair_Exit() if necessary
1050 * (assuming we are going away, otherwise why
1051 * did this fail?).
1054 mutex_unlock(&qp_guest_endpoints.mutex);
1055 return result;
1060 * If we get here then we either failed to notify a local queuepair, or
1061 * we succeeded in all cases. Release the entry if required.
1064 entry->qp.ref_count--;
1065 if (entry->qp.ref_count == 0)
1066 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
1068 /* If we didn't remove the entry, this could change once we unlock. */
1069 if (entry)
1070 ref_count = entry->qp.ref_count;
1072 mutex_unlock(&qp_guest_endpoints.mutex);
1074 if (ref_count == 0)
1075 qp_guest_endpoint_destroy(entry);
1077 return result;
1081 * This functions handles the actual allocation of a VMCI queue
1082 * pair guest endpoint. Allocates physical pages for the queue
1083 * pair. It makes OS dependent calls through generic wrappers.
1085 static int qp_alloc_guest_work(struct vmci_handle *handle,
1086 struct vmci_queue **produce_q,
1087 u64 produce_size,
1088 struct vmci_queue **consume_q,
1089 u64 consume_size,
1090 u32 peer,
1091 u32 flags,
1092 u32 priv_flags)
1094 const u64 num_produce_pages =
1095 DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
1096 const u64 num_consume_pages =
1097 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
1098 void *my_produce_q = NULL;
1099 void *my_consume_q = NULL;
1100 int result;
1101 struct qp_guest_endpoint *queue_pair_entry = NULL;
1103 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
1104 return VMCI_ERROR_NO_ACCESS;
1106 mutex_lock(&qp_guest_endpoints.mutex);
1108 queue_pair_entry = qp_guest_handle_to_entry(*handle);
1109 if (queue_pair_entry) {
1110 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1111 /* Local attach case. */
1112 if (queue_pair_entry->qp.ref_count > 1) {
1113 pr_devel("Error attempting to attach more than once\n");
1114 result = VMCI_ERROR_UNAVAILABLE;
1115 goto error_keep_entry;
1118 if (queue_pair_entry->qp.produce_size != consume_size ||
1119 queue_pair_entry->qp.consume_size !=
1120 produce_size ||
1121 queue_pair_entry->qp.flags !=
1122 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
1123 pr_devel("Error mismatched queue pair in local attach\n");
1124 result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
1125 goto error_keep_entry;
1129 * Do a local attach. We swap the consume and
1130 * produce queues for the attacher and deliver
1131 * an attach event.
1133 result = qp_notify_peer_local(true, *handle);
1134 if (result < VMCI_SUCCESS)
1135 goto error_keep_entry;
1137 my_produce_q = queue_pair_entry->consume_q;
1138 my_consume_q = queue_pair_entry->produce_q;
1139 goto out;
1142 result = VMCI_ERROR_ALREADY_EXISTS;
1143 goto error_keep_entry;
1146 my_produce_q = qp_alloc_queue(produce_size, flags);
1147 if (!my_produce_q) {
1148 pr_warn("Error allocating pages for produce queue\n");
1149 result = VMCI_ERROR_NO_MEM;
1150 goto error;
1153 my_consume_q = qp_alloc_queue(consume_size, flags);
1154 if (!my_consume_q) {
1155 pr_warn("Error allocating pages for consume queue\n");
1156 result = VMCI_ERROR_NO_MEM;
1157 goto error;
1160 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
1161 produce_size, consume_size,
1162 my_produce_q, my_consume_q);
1163 if (!queue_pair_entry) {
1164 pr_warn("Error allocating memory in %s\n", __func__);
1165 result = VMCI_ERROR_NO_MEM;
1166 goto error;
1169 result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
1170 num_consume_pages,
1171 &queue_pair_entry->ppn_set);
1172 if (result < VMCI_SUCCESS) {
1173 pr_warn("qp_alloc_ppn_set failed\n");
1174 goto error;
1178 * It's only necessary to notify the host if this queue pair will be
1179 * attached to from another context.
1181 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1182 /* Local create case. */
1183 u32 context_id = vmci_get_context_id();
1186 * Enforce similar checks on local queue pairs as we
1187 * do for regular ones. The handle's context must
1188 * match the creator or attacher context id (here they
1189 * are both the current context id) and the
1190 * attach-only flag cannot exist during create. We
1191 * also ensure specified peer is this context or an
1192 * invalid one.
1194 if (queue_pair_entry->qp.handle.context != context_id ||
1195 (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
1196 queue_pair_entry->qp.peer != context_id)) {
1197 result = VMCI_ERROR_NO_ACCESS;
1198 goto error;
1201 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
1202 result = VMCI_ERROR_NOT_FOUND;
1203 goto error;
1205 } else {
1206 result = qp_alloc_hypercall(queue_pair_entry);
1207 if (result < VMCI_SUCCESS) {
1208 pr_warn("qp_alloc_hypercall result = %d\n", result);
1209 goto error;
1213 qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
1214 (struct vmci_queue *)my_consume_q);
1216 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
1218 out:
1219 queue_pair_entry->qp.ref_count++;
1220 *handle = queue_pair_entry->qp.handle;
1221 *produce_q = (struct vmci_queue *)my_produce_q;
1222 *consume_q = (struct vmci_queue *)my_consume_q;
1225 * We should initialize the queue pair header pages on a local
1226 * queue pair create. For non-local queue pairs, the
1227 * hypervisor initializes the header pages in the create step.
1229 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
1230 queue_pair_entry->qp.ref_count == 1) {
1231 vmci_q_header_init((*produce_q)->q_header, *handle);
1232 vmci_q_header_init((*consume_q)->q_header, *handle);
1235 mutex_unlock(&qp_guest_endpoints.mutex);
1237 return VMCI_SUCCESS;
1239 error:
1240 mutex_unlock(&qp_guest_endpoints.mutex);
1241 if (queue_pair_entry) {
1242 /* The queues will be freed inside the destroy routine. */
1243 qp_guest_endpoint_destroy(queue_pair_entry);
1244 } else {
1245 qp_free_queue(my_produce_q, produce_size);
1246 qp_free_queue(my_consume_q, consume_size);
1248 return result;
1250 error_keep_entry:
1251 /* This path should only be used when an existing entry was found. */
1252 mutex_unlock(&qp_guest_endpoints.mutex);
1253 return result;
1257 * The first endpoint issuing a queue pair allocation will create the state
1258 * of the queue pair in the queue pair broker.
1260 * If the creator is a guest, it will associate a VMX virtual address range
1261 * with the queue pair as specified by the page_store. For compatibility with
1262 * older VMX'en, that would use a separate step to set the VMX virtual
1263 * address range, the virtual address range can be registered later using
1264 * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be
1265 * used.
1267 * If the creator is the host, a page_store of NULL should be used as well,
1268 * since the host is not able to supply a page store for the queue pair.
1270 * For older VMX and host callers, the queue pair will be created in the
1271 * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be
1272 * created in VMCOQPB_CREATED_MEM state.
1274 static int qp_broker_create(struct vmci_handle handle,
1275 u32 peer,
1276 u32 flags,
1277 u32 priv_flags,
1278 u64 produce_size,
1279 u64 consume_size,
1280 struct vmci_qp_page_store *page_store,
1281 struct vmci_ctx *context,
1282 vmci_event_release_cb wakeup_cb,
1283 void *client_data, struct qp_broker_entry **ent)
1285 struct qp_broker_entry *entry = NULL;
1286 const u32 context_id = vmci_ctx_get_id(context);
1287 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1288 int result;
1289 u64 guest_produce_size;
1290 u64 guest_consume_size;
1292 /* Do not create if the caller asked not to. */
1293 if (flags & VMCI_QPFLAG_ATTACH_ONLY)
1294 return VMCI_ERROR_NOT_FOUND;
1297 * Creator's context ID should match handle's context ID or the creator
1298 * must allow the context in handle's context ID as the "peer".
1300 if (handle.context != context_id && handle.context != peer)
1301 return VMCI_ERROR_NO_ACCESS;
1303 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
1304 return VMCI_ERROR_DST_UNREACHABLE;
1307 * Creator's context ID for local queue pairs should match the
1308 * peer, if a peer is specified.
1310 if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
1311 return VMCI_ERROR_NO_ACCESS;
1313 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1314 if (!entry)
1315 return VMCI_ERROR_NO_MEM;
1317 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
1319 * The queue pair broker entry stores values from the guest
1320 * point of view, so a creating host side endpoint should swap
1321 * produce and consume values -- unless it is a local queue
1322 * pair, in which case no swapping is necessary, since the local
1323 * attacher will swap queues.
1326 guest_produce_size = consume_size;
1327 guest_consume_size = produce_size;
1328 } else {
1329 guest_produce_size = produce_size;
1330 guest_consume_size = consume_size;
1333 entry->qp.handle = handle;
1334 entry->qp.peer = peer;
1335 entry->qp.flags = flags;
1336 entry->qp.produce_size = guest_produce_size;
1337 entry->qp.consume_size = guest_consume_size;
1338 entry->qp.ref_count = 1;
1339 entry->create_id = context_id;
1340 entry->attach_id = VMCI_INVALID_ID;
1341 entry->state = VMCIQPB_NEW;
1342 entry->require_trusted_attach =
1343 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
1344 entry->created_by_trusted =
1345 !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
1346 entry->vmci_page_files = false;
1347 entry->wakeup_cb = wakeup_cb;
1348 entry->client_data = client_data;
1349 entry->produce_q = qp_host_alloc_queue(guest_produce_size);
1350 if (entry->produce_q == NULL) {
1351 result = VMCI_ERROR_NO_MEM;
1352 goto error;
1354 entry->consume_q = qp_host_alloc_queue(guest_consume_size);
1355 if (entry->consume_q == NULL) {
1356 result = VMCI_ERROR_NO_MEM;
1357 goto error;
1360 qp_init_queue_mutex(entry->produce_q, entry->consume_q);
1362 INIT_LIST_HEAD(&entry->qp.list_item);
1364 if (is_local) {
1365 u8 *tmp;
1367 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
1368 PAGE_SIZE, GFP_KERNEL);
1369 if (entry->local_mem == NULL) {
1370 result = VMCI_ERROR_NO_MEM;
1371 goto error;
1373 entry->state = VMCIQPB_CREATED_MEM;
1374 entry->produce_q->q_header = entry->local_mem;
1375 tmp = (u8 *)entry->local_mem + PAGE_SIZE *
1376 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
1377 entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
1378 } else if (page_store) {
1380 * The VMX already initialized the queue pair headers, so no
1381 * need for the kernel side to do that.
1383 result = qp_host_register_user_memory(page_store,
1384 entry->produce_q,
1385 entry->consume_q);
1386 if (result < VMCI_SUCCESS)
1387 goto error;
1389 entry->state = VMCIQPB_CREATED_MEM;
1390 } else {
1392 * A create without a page_store may be either a host
1393 * side create (in which case we are waiting for the
1394 * guest side to supply the memory) or an old style
1395 * queue pair create (in which case we will expect a
1396 * set page store call as the next step).
1398 entry->state = VMCIQPB_CREATED_NO_MEM;
1401 qp_list_add_entry(&qp_broker_list, &entry->qp);
1402 if (ent != NULL)
1403 *ent = entry;
1405 /* Add to resource obj */
1406 result = vmci_resource_add(&entry->resource,
1407 VMCI_RESOURCE_TYPE_QPAIR_HOST,
1408 handle);
1409 if (result != VMCI_SUCCESS) {
1410 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
1411 handle.context, handle.resource, result);
1412 goto error;
1415 entry->qp.handle = vmci_resource_handle(&entry->resource);
1416 if (is_local) {
1417 vmci_q_header_init(entry->produce_q->q_header,
1418 entry->qp.handle);
1419 vmci_q_header_init(entry->consume_q->q_header,
1420 entry->qp.handle);
1423 vmci_ctx_qp_create(context, entry->qp.handle);
1425 return VMCI_SUCCESS;
1427 error:
1428 if (entry != NULL) {
1429 qp_host_free_queue(entry->produce_q, guest_produce_size);
1430 qp_host_free_queue(entry->consume_q, guest_consume_size);
1431 kfree(entry);
1434 return result;
1438 * Enqueues an event datagram to notify the peer VM attached to
1439 * the given queue pair handle about attach/detach event by the
1440 * given VM. Returns Payload size of datagram enqueued on
1441 * success, error code otherwise.
1443 static int qp_notify_peer(bool attach,
1444 struct vmci_handle handle,
1445 u32 my_id,
1446 u32 peer_id)
1448 int rv;
1449 struct vmci_event_qp ev;
1451 if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
1452 peer_id == VMCI_INVALID_ID)
1453 return VMCI_ERROR_INVALID_ARGS;
1456 * In vmci_ctx_enqueue_datagram() we enforce the upper limit on
1457 * number of pending events from the hypervisor to a given VM
1458 * otherwise a rogue VM could do an arbitrary number of attach
1459 * and detach operations causing memory pressure in the host
1460 * kernel.
1463 ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
1464 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1465 VMCI_CONTEXT_RESOURCE_ID);
1466 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
1467 ev.msg.event_data.event = attach ?
1468 VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
1469 ev.payload.handle = handle;
1470 ev.payload.peer_id = my_id;
1472 rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
1473 &ev.msg.hdr, false);
1474 if (rv < VMCI_SUCCESS)
1475 pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
1476 attach ? "ATTACH" : "DETACH", peer_id);
1478 return rv;
1482 * The second endpoint issuing a queue pair allocation will attach to
1483 * the queue pair registered with the queue pair broker.
1485 * If the attacher is a guest, it will associate a VMX virtual address
1486 * range with the queue pair as specified by the page_store. At this
1487 * point, the already attach host endpoint may start using the queue
1488 * pair, and an attach event is sent to it. For compatibility with
1489 * older VMX'en, that used a separate step to set the VMX virtual
1490 * address range, the virtual address range can be registered later
1491 * using vmci_qp_broker_set_page_store. In that case, a page_store of
1492 * NULL should be used, and the attach event will be generated once
1493 * the actual page store has been set.
1495 * If the attacher is the host, a page_store of NULL should be used as
1496 * well, since the page store information is already set by the guest.
1498 * For new VMX and host callers, the queue pair will be moved to the
1499 * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be
1500 * moved to the VMCOQPB_ATTACHED_NO_MEM state.
1502 static int qp_broker_attach(struct qp_broker_entry *entry,
1503 u32 peer,
1504 u32 flags,
1505 u32 priv_flags,
1506 u64 produce_size,
1507 u64 consume_size,
1508 struct vmci_qp_page_store *page_store,
1509 struct vmci_ctx *context,
1510 vmci_event_release_cb wakeup_cb,
1511 void *client_data,
1512 struct qp_broker_entry **ent)
1514 const u32 context_id = vmci_ctx_get_id(context);
1515 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1516 int result;
1518 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
1519 entry->state != VMCIQPB_CREATED_MEM)
1520 return VMCI_ERROR_UNAVAILABLE;
1522 if (is_local) {
1523 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
1524 context_id != entry->create_id) {
1525 return VMCI_ERROR_INVALID_ARGS;
1527 } else if (context_id == entry->create_id ||
1528 context_id == entry->attach_id) {
1529 return VMCI_ERROR_ALREADY_EXISTS;
1532 if (VMCI_CONTEXT_IS_VM(context_id) &&
1533 VMCI_CONTEXT_IS_VM(entry->create_id))
1534 return VMCI_ERROR_DST_UNREACHABLE;
1537 * If we are attaching from a restricted context then the queuepair
1538 * must have been created by a trusted endpoint.
1540 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
1541 !entry->created_by_trusted)
1542 return VMCI_ERROR_NO_ACCESS;
1545 * If we are attaching to a queuepair that was created by a restricted
1546 * context then we must be trusted.
1548 if (entry->require_trusted_attach &&
1549 (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
1550 return VMCI_ERROR_NO_ACCESS;
1553 * If the creator specifies VMCI_INVALID_ID in "peer" field, access
1554 * control check is not performed.
1556 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
1557 return VMCI_ERROR_NO_ACCESS;
1559 if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
1561 * Do not attach if the caller doesn't support Host Queue Pairs
1562 * and a host created this queue pair.
1565 if (!vmci_ctx_supports_host_qp(context))
1566 return VMCI_ERROR_INVALID_RESOURCE;
1568 } else if (context_id == VMCI_HOST_CONTEXT_ID) {
1569 struct vmci_ctx *create_context;
1570 bool supports_host_qp;
1573 * Do not attach a host to a user created queue pair if that
1574 * user doesn't support host queue pair end points.
1577 create_context = vmci_ctx_get(entry->create_id);
1578 supports_host_qp = vmci_ctx_supports_host_qp(create_context);
1579 vmci_ctx_put(create_context);
1581 if (!supports_host_qp)
1582 return VMCI_ERROR_INVALID_RESOURCE;
1585 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
1586 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1588 if (context_id != VMCI_HOST_CONTEXT_ID) {
1590 * The queue pair broker entry stores values from the guest
1591 * point of view, so an attaching guest should match the values
1592 * stored in the entry.
1595 if (entry->qp.produce_size != produce_size ||
1596 entry->qp.consume_size != consume_size) {
1597 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1599 } else if (entry->qp.produce_size != consume_size ||
1600 entry->qp.consume_size != produce_size) {
1601 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1604 if (context_id != VMCI_HOST_CONTEXT_ID) {
1606 * If a guest attached to a queue pair, it will supply
1607 * the backing memory. If this is a pre NOVMVM vmx,
1608 * the backing memory will be supplied by calling
1609 * vmci_qp_broker_set_page_store() following the
1610 * return of the vmci_qp_broker_alloc() call. If it is
1611 * a vmx of version NOVMVM or later, the page store
1612 * must be supplied as part of the
1613 * vmci_qp_broker_alloc call. Under all circumstances
1614 * must the initially created queue pair not have any
1615 * memory associated with it already.
1618 if (entry->state != VMCIQPB_CREATED_NO_MEM)
1619 return VMCI_ERROR_INVALID_ARGS;
1621 if (page_store != NULL) {
1623 * Patch up host state to point to guest
1624 * supplied memory. The VMX already
1625 * initialized the queue pair headers, so no
1626 * need for the kernel side to do that.
1629 result = qp_host_register_user_memory(page_store,
1630 entry->produce_q,
1631 entry->consume_q);
1632 if (result < VMCI_SUCCESS)
1633 return result;
1635 entry->state = VMCIQPB_ATTACHED_MEM;
1636 } else {
1637 entry->state = VMCIQPB_ATTACHED_NO_MEM;
1639 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
1641 * The host side is attempting to attach to a queue
1642 * pair that doesn't have any memory associated with
1643 * it. This must be a pre NOVMVM vmx that hasn't set
1644 * the page store information yet, or a quiesced VM.
1647 return VMCI_ERROR_UNAVAILABLE;
1648 } else {
1649 /* The host side has successfully attached to a queue pair. */
1650 entry->state = VMCIQPB_ATTACHED_MEM;
1653 if (entry->state == VMCIQPB_ATTACHED_MEM) {
1654 result =
1655 qp_notify_peer(true, entry->qp.handle, context_id,
1656 entry->create_id);
1657 if (result < VMCI_SUCCESS)
1658 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
1659 entry->create_id, entry->qp.handle.context,
1660 entry->qp.handle.resource);
1663 entry->attach_id = context_id;
1664 entry->qp.ref_count++;
1665 if (wakeup_cb) {
1666 entry->wakeup_cb = wakeup_cb;
1667 entry->client_data = client_data;
1671 * When attaching to local queue pairs, the context already has
1672 * an entry tracking the queue pair, so don't add another one.
1674 if (!is_local)
1675 vmci_ctx_qp_create(context, entry->qp.handle);
1677 if (ent != NULL)
1678 *ent = entry;
1680 return VMCI_SUCCESS;
1684 * queue_pair_Alloc for use when setting up queue pair endpoints
1685 * on the host.
1687 static int qp_broker_alloc(struct vmci_handle handle,
1688 u32 peer,
1689 u32 flags,
1690 u32 priv_flags,
1691 u64 produce_size,
1692 u64 consume_size,
1693 struct vmci_qp_page_store *page_store,
1694 struct vmci_ctx *context,
1695 vmci_event_release_cb wakeup_cb,
1696 void *client_data,
1697 struct qp_broker_entry **ent,
1698 bool *swap)
1700 const u32 context_id = vmci_ctx_get_id(context);
1701 bool create;
1702 struct qp_broker_entry *entry = NULL;
1703 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1704 int result;
1706 if (vmci_handle_is_invalid(handle) ||
1707 (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
1708 !(produce_size || consume_size) ||
1709 !context || context_id == VMCI_INVALID_ID ||
1710 handle.context == VMCI_INVALID_ID) {
1711 return VMCI_ERROR_INVALID_ARGS;
1714 if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
1715 return VMCI_ERROR_INVALID_ARGS;
1718 * In the initial argument check, we ensure that non-vmkernel hosts
1719 * are not allowed to create local queue pairs.
1722 mutex_lock(&qp_broker_list.mutex);
1724 if (!is_local && vmci_ctx_qp_exists(context, handle)) {
1725 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
1726 context_id, handle.context, handle.resource);
1727 mutex_unlock(&qp_broker_list.mutex);
1728 return VMCI_ERROR_ALREADY_EXISTS;
1731 if (handle.resource != VMCI_INVALID_ID)
1732 entry = qp_broker_handle_to_entry(handle);
1734 if (!entry) {
1735 create = true;
1736 result =
1737 qp_broker_create(handle, peer, flags, priv_flags,
1738 produce_size, consume_size, page_store,
1739 context, wakeup_cb, client_data, ent);
1740 } else {
1741 create = false;
1742 result =
1743 qp_broker_attach(entry, peer, flags, priv_flags,
1744 produce_size, consume_size, page_store,
1745 context, wakeup_cb, client_data, ent);
1748 mutex_unlock(&qp_broker_list.mutex);
1750 if (swap)
1751 *swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
1752 !(create && is_local);
1754 return result;
1758 * This function implements the kernel API for allocating a queue
1759 * pair.
1761 static int qp_alloc_host_work(struct vmci_handle *handle,
1762 struct vmci_queue **produce_q,
1763 u64 produce_size,
1764 struct vmci_queue **consume_q,
1765 u64 consume_size,
1766 u32 peer,
1767 u32 flags,
1768 u32 priv_flags,
1769 vmci_event_release_cb wakeup_cb,
1770 void *client_data)
1772 struct vmci_handle new_handle;
1773 struct vmci_ctx *context;
1774 struct qp_broker_entry *entry;
1775 int result;
1776 bool swap;
1778 if (vmci_handle_is_invalid(*handle)) {
1779 new_handle = vmci_make_handle(
1780 VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
1781 } else
1782 new_handle = *handle;
1784 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1785 entry = NULL;
1786 result =
1787 qp_broker_alloc(new_handle, peer, flags, priv_flags,
1788 produce_size, consume_size, NULL, context,
1789 wakeup_cb, client_data, &entry, &swap);
1790 if (result == VMCI_SUCCESS) {
1791 if (swap) {
1793 * If this is a local queue pair, the attacher
1794 * will swap around produce and consume
1795 * queues.
1798 *produce_q = entry->consume_q;
1799 *consume_q = entry->produce_q;
1800 } else {
1801 *produce_q = entry->produce_q;
1802 *consume_q = entry->consume_q;
1805 *handle = vmci_resource_handle(&entry->resource);
1806 } else {
1807 *handle = VMCI_INVALID_HANDLE;
1808 pr_devel("queue pair broker failed to alloc (result=%d)\n",
1809 result);
1811 vmci_ctx_put(context);
1812 return result;
1816 * Allocates a VMCI queue_pair. Only checks validity of input
1817 * arguments. The real work is done in the host or guest
1818 * specific function.
1820 int vmci_qp_alloc(struct vmci_handle *handle,
1821 struct vmci_queue **produce_q,
1822 u64 produce_size,
1823 struct vmci_queue **consume_q,
1824 u64 consume_size,
1825 u32 peer,
1826 u32 flags,
1827 u32 priv_flags,
1828 bool guest_endpoint,
1829 vmci_event_release_cb wakeup_cb,
1830 void *client_data)
1832 if (!handle || !produce_q || !consume_q ||
1833 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
1834 return VMCI_ERROR_INVALID_ARGS;
1836 if (guest_endpoint) {
1837 return qp_alloc_guest_work(handle, produce_q,
1838 produce_size, consume_q,
1839 consume_size, peer,
1840 flags, priv_flags);
1841 } else {
1842 return qp_alloc_host_work(handle, produce_q,
1843 produce_size, consume_q,
1844 consume_size, peer, flags,
1845 priv_flags, wakeup_cb, client_data);
1850 * This function implements the host kernel API for detaching from
1851 * a queue pair.
1853 static int qp_detatch_host_work(struct vmci_handle handle)
1855 int result;
1856 struct vmci_ctx *context;
1858 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1860 result = vmci_qp_broker_detach(handle, context);
1862 vmci_ctx_put(context);
1863 return result;
1867 * Detaches from a VMCI queue_pair. Only checks validity of input argument.
1868 * Real work is done in the host or guest specific function.
1870 static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
1872 if (vmci_handle_is_invalid(handle))
1873 return VMCI_ERROR_INVALID_ARGS;
1875 if (guest_endpoint)
1876 return qp_detatch_guest_work(handle);
1877 else
1878 return qp_detatch_host_work(handle);
1882 * Returns the entry from the head of the list. Assumes that the list is
1883 * locked.
1885 static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
1887 if (!list_empty(&qp_list->head)) {
1888 struct qp_entry *entry =
1889 list_first_entry(&qp_list->head, struct qp_entry,
1890 list_item);
1891 return entry;
1894 return NULL;
1897 void vmci_qp_broker_exit(void)
1899 struct qp_entry *entry;
1900 struct qp_broker_entry *be;
1902 mutex_lock(&qp_broker_list.mutex);
1904 while ((entry = qp_list_get_head(&qp_broker_list))) {
1905 be = (struct qp_broker_entry *)entry;
1907 qp_list_remove_entry(&qp_broker_list, entry);
1908 kfree(be);
1911 mutex_unlock(&qp_broker_list.mutex);
1915 * Requests that a queue pair be allocated with the VMCI queue
1916 * pair broker. Allocates a queue pair entry if one does not
1917 * exist. Attaches to one if it exists, and retrieves the page
1918 * files backing that queue_pair. Assumes that the queue pair
1919 * broker lock is held.
1921 int vmci_qp_broker_alloc(struct vmci_handle handle,
1922 u32 peer,
1923 u32 flags,
1924 u32 priv_flags,
1925 u64 produce_size,
1926 u64 consume_size,
1927 struct vmci_qp_page_store *page_store,
1928 struct vmci_ctx *context)
1930 return qp_broker_alloc(handle, peer, flags, priv_flags,
1931 produce_size, consume_size,
1932 page_store, context, NULL, NULL, NULL, NULL);
1936 * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate
1937 * step to add the UVAs of the VMX mapping of the queue pair. This function
1938 * provides backwards compatibility with such VMX'en, and takes care of
1939 * registering the page store for a queue pair previously allocated by the
1940 * VMX during create or attach. This function will move the queue pair state
1941 * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or
1942 * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the
1943 * attached state with memory, the queue pair is ready to be used by the
1944 * host peer, and an attached event will be generated.
1946 * Assumes that the queue pair broker lock is held.
1948 * This function is only used by the hosted platform, since there is no
1949 * issue with backwards compatibility for vmkernel.
1951 int vmci_qp_broker_set_page_store(struct vmci_handle handle,
1952 u64 produce_uva,
1953 u64 consume_uva,
1954 struct vmci_ctx *context)
1956 struct qp_broker_entry *entry;
1957 int result;
1958 const u32 context_id = vmci_ctx_get_id(context);
1960 if (vmci_handle_is_invalid(handle) || !context ||
1961 context_id == VMCI_INVALID_ID)
1962 return VMCI_ERROR_INVALID_ARGS;
1965 * We only support guest to host queue pairs, so the VMX must
1966 * supply UVAs for the mapped page files.
1969 if (produce_uva == 0 || consume_uva == 0)
1970 return VMCI_ERROR_INVALID_ARGS;
1972 mutex_lock(&qp_broker_list.mutex);
1974 if (!vmci_ctx_qp_exists(context, handle)) {
1975 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
1976 context_id, handle.context, handle.resource);
1977 result = VMCI_ERROR_NOT_FOUND;
1978 goto out;
1981 entry = qp_broker_handle_to_entry(handle);
1982 if (!entry) {
1983 result = VMCI_ERROR_NOT_FOUND;
1984 goto out;
1988 * If I'm the owner then I can set the page store.
1990 * Or, if a host created the queue_pair and I'm the attached peer
1991 * then I can set the page store.
1993 if (entry->create_id != context_id &&
1994 (entry->create_id != VMCI_HOST_CONTEXT_ID ||
1995 entry->attach_id != context_id)) {
1996 result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
1997 goto out;
2000 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
2001 entry->state != VMCIQPB_ATTACHED_NO_MEM) {
2002 result = VMCI_ERROR_UNAVAILABLE;
2003 goto out;
2006 result = qp_host_get_user_memory(produce_uva, consume_uva,
2007 entry->produce_q, entry->consume_q);
2008 if (result < VMCI_SUCCESS)
2009 goto out;
2011 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2012 if (result < VMCI_SUCCESS) {
2013 qp_host_unregister_user_memory(entry->produce_q,
2014 entry->consume_q);
2015 goto out;
2018 if (entry->state == VMCIQPB_CREATED_NO_MEM)
2019 entry->state = VMCIQPB_CREATED_MEM;
2020 else
2021 entry->state = VMCIQPB_ATTACHED_MEM;
2023 entry->vmci_page_files = true;
2025 if (entry->state == VMCIQPB_ATTACHED_MEM) {
2026 result =
2027 qp_notify_peer(true, handle, context_id, entry->create_id);
2028 if (result < VMCI_SUCCESS) {
2029 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
2030 entry->create_id, entry->qp.handle.context,
2031 entry->qp.handle.resource);
2035 result = VMCI_SUCCESS;
2036 out:
2037 mutex_unlock(&qp_broker_list.mutex);
2038 return result;
2042 * Resets saved queue headers for the given QP broker
2043 * entry. Should be used when guest memory becomes available
2044 * again, or the guest detaches.
2046 static void qp_reset_saved_headers(struct qp_broker_entry *entry)
2048 entry->produce_q->saved_header = NULL;
2049 entry->consume_q->saved_header = NULL;
2053 * The main entry point for detaching from a queue pair registered with the
2054 * queue pair broker. If more than one endpoint is attached to the queue
2055 * pair, the first endpoint will mainly decrement a reference count and
2056 * generate a notification to its peer. The last endpoint will clean up
2057 * the queue pair state registered with the broker.
2059 * When a guest endpoint detaches, it will unmap and unregister the guest
2060 * memory backing the queue pair. If the host is still attached, it will
2061 * no longer be able to access the queue pair content.
2063 * If the queue pair is already in a state where there is no memory
2064 * registered for the queue pair (any *_NO_MEM state), it will transition to
2065 * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest
2066 * endpoint is the first of two endpoints to detach. If the host endpoint is
2067 * the first out of two to detach, the queue pair will move to the
2068 * VMCIQPB_SHUTDOWN_MEM state.
2070 int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
2072 struct qp_broker_entry *entry;
2073 const u32 context_id = vmci_ctx_get_id(context);
2074 u32 peer_id;
2075 bool is_local = false;
2076 int result;
2078 if (vmci_handle_is_invalid(handle) || !context ||
2079 context_id == VMCI_INVALID_ID) {
2080 return VMCI_ERROR_INVALID_ARGS;
2083 mutex_lock(&qp_broker_list.mutex);
2085 if (!vmci_ctx_qp_exists(context, handle)) {
2086 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2087 context_id, handle.context, handle.resource);
2088 result = VMCI_ERROR_NOT_FOUND;
2089 goto out;
2092 entry = qp_broker_handle_to_entry(handle);
2093 if (!entry) {
2094 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
2095 context_id, handle.context, handle.resource);
2096 result = VMCI_ERROR_NOT_FOUND;
2097 goto out;
2100 if (context_id != entry->create_id && context_id != entry->attach_id) {
2101 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2102 goto out;
2105 if (context_id == entry->create_id) {
2106 peer_id = entry->attach_id;
2107 entry->create_id = VMCI_INVALID_ID;
2108 } else {
2109 peer_id = entry->create_id;
2110 entry->attach_id = VMCI_INVALID_ID;
2112 entry->qp.ref_count--;
2114 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2116 if (context_id != VMCI_HOST_CONTEXT_ID) {
2117 bool headers_mapped;
2120 * Pre NOVMVM vmx'en may detach from a queue pair
2121 * before setting the page store, and in that case
2122 * there is no user memory to detach from. Also, more
2123 * recent VMX'en may detach from a queue pair in the
2124 * quiesced state.
2127 qp_acquire_queue_mutex(entry->produce_q);
2128 headers_mapped = entry->produce_q->q_header ||
2129 entry->consume_q->q_header;
2130 if (QPBROKERSTATE_HAS_MEM(entry)) {
2131 result =
2132 qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
2133 entry->produce_q,
2134 entry->consume_q);
2135 if (result < VMCI_SUCCESS)
2136 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2137 handle.context, handle.resource,
2138 result);
2140 qp_host_unregister_user_memory(entry->produce_q,
2141 entry->consume_q);
2145 if (!headers_mapped)
2146 qp_reset_saved_headers(entry);
2148 qp_release_queue_mutex(entry->produce_q);
2150 if (!headers_mapped && entry->wakeup_cb)
2151 entry->wakeup_cb(entry->client_data);
2153 } else {
2154 if (entry->wakeup_cb) {
2155 entry->wakeup_cb = NULL;
2156 entry->client_data = NULL;
2160 if (entry->qp.ref_count == 0) {
2161 qp_list_remove_entry(&qp_broker_list, &entry->qp);
2163 if (is_local)
2164 kfree(entry->local_mem);
2166 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
2167 qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
2168 qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
2169 /* Unlink from resource hash table and free callback */
2170 vmci_resource_remove(&entry->resource);
2172 kfree(entry);
2174 vmci_ctx_qp_destroy(context, handle);
2175 } else {
2176 qp_notify_peer(false, handle, context_id, peer_id);
2177 if (context_id == VMCI_HOST_CONTEXT_ID &&
2178 QPBROKERSTATE_HAS_MEM(entry)) {
2179 entry->state = VMCIQPB_SHUTDOWN_MEM;
2180 } else {
2181 entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
2184 if (!is_local)
2185 vmci_ctx_qp_destroy(context, handle);
2188 result = VMCI_SUCCESS;
2189 out:
2190 mutex_unlock(&qp_broker_list.mutex);
2191 return result;
2195 * Establishes the necessary mappings for a queue pair given a
2196 * reference to the queue pair guest memory. This is usually
2197 * called when a guest is unquiesced and the VMX is allowed to
2198 * map guest memory once again.
2200 int vmci_qp_broker_map(struct vmci_handle handle,
2201 struct vmci_ctx *context,
2202 u64 guest_mem)
2204 struct qp_broker_entry *entry;
2205 const u32 context_id = vmci_ctx_get_id(context);
2206 int result;
2208 if (vmci_handle_is_invalid(handle) || !context ||
2209 context_id == VMCI_INVALID_ID)
2210 return VMCI_ERROR_INVALID_ARGS;
2212 mutex_lock(&qp_broker_list.mutex);
2214 if (!vmci_ctx_qp_exists(context, handle)) {
2215 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2216 context_id, handle.context, handle.resource);
2217 result = VMCI_ERROR_NOT_FOUND;
2218 goto out;
2221 entry = qp_broker_handle_to_entry(handle);
2222 if (!entry) {
2223 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2224 context_id, handle.context, handle.resource);
2225 result = VMCI_ERROR_NOT_FOUND;
2226 goto out;
2229 if (context_id != entry->create_id && context_id != entry->attach_id) {
2230 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2231 goto out;
2234 result = VMCI_SUCCESS;
2236 if (context_id != VMCI_HOST_CONTEXT_ID) {
2237 struct vmci_qp_page_store page_store;
2239 page_store.pages = guest_mem;
2240 page_store.len = QPE_NUM_PAGES(entry->qp);
2242 qp_acquire_queue_mutex(entry->produce_q);
2243 qp_reset_saved_headers(entry);
2244 result =
2245 qp_host_register_user_memory(&page_store,
2246 entry->produce_q,
2247 entry->consume_q);
2248 qp_release_queue_mutex(entry->produce_q);
2249 if (result == VMCI_SUCCESS) {
2250 /* Move state from *_NO_MEM to *_MEM */
2252 entry->state++;
2254 if (entry->wakeup_cb)
2255 entry->wakeup_cb(entry->client_data);
2259 out:
2260 mutex_unlock(&qp_broker_list.mutex);
2261 return result;
2265 * Saves a snapshot of the queue headers for the given QP broker
2266 * entry. Should be used when guest memory is unmapped.
2267 * Results:
2268 * VMCI_SUCCESS on success, appropriate error code if guest memory
2269 * can't be accessed..
2271 static int qp_save_headers(struct qp_broker_entry *entry)
2273 int result;
2275 if (entry->produce_q->saved_header != NULL &&
2276 entry->consume_q->saved_header != NULL) {
2278 * If the headers have already been saved, we don't need to do
2279 * it again, and we don't want to map in the headers
2280 * unnecessarily.
2283 return VMCI_SUCCESS;
2286 if (NULL == entry->produce_q->q_header ||
2287 NULL == entry->consume_q->q_header) {
2288 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2289 if (result < VMCI_SUCCESS)
2290 return result;
2293 memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
2294 sizeof(entry->saved_produce_q));
2295 entry->produce_q->saved_header = &entry->saved_produce_q;
2296 memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
2297 sizeof(entry->saved_consume_q));
2298 entry->consume_q->saved_header = &entry->saved_consume_q;
2300 return VMCI_SUCCESS;
2304 * Removes all references to the guest memory of a given queue pair, and
2305 * will move the queue pair from state *_MEM to *_NO_MEM. It is usually
2306 * called when a VM is being quiesced where access to guest memory should
2307 * avoided.
2309 int vmci_qp_broker_unmap(struct vmci_handle handle,
2310 struct vmci_ctx *context,
2311 u32 gid)
2313 struct qp_broker_entry *entry;
2314 const u32 context_id = vmci_ctx_get_id(context);
2315 int result;
2317 if (vmci_handle_is_invalid(handle) || !context ||
2318 context_id == VMCI_INVALID_ID)
2319 return VMCI_ERROR_INVALID_ARGS;
2321 mutex_lock(&qp_broker_list.mutex);
2323 if (!vmci_ctx_qp_exists(context, handle)) {
2324 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2325 context_id, handle.context, handle.resource);
2326 result = VMCI_ERROR_NOT_FOUND;
2327 goto out;
2330 entry = qp_broker_handle_to_entry(handle);
2331 if (!entry) {
2332 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2333 context_id, handle.context, handle.resource);
2334 result = VMCI_ERROR_NOT_FOUND;
2335 goto out;
2338 if (context_id != entry->create_id && context_id != entry->attach_id) {
2339 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2340 goto out;
2343 if (context_id != VMCI_HOST_CONTEXT_ID) {
2344 qp_acquire_queue_mutex(entry->produce_q);
2345 result = qp_save_headers(entry);
2346 if (result < VMCI_SUCCESS)
2347 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2348 handle.context, handle.resource, result);
2350 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
2353 * On hosted, when we unmap queue pairs, the VMX will also
2354 * unmap the guest memory, so we invalidate the previously
2355 * registered memory. If the queue pair is mapped again at a
2356 * later point in time, we will need to reregister the user
2357 * memory with a possibly new user VA.
2359 qp_host_unregister_user_memory(entry->produce_q,
2360 entry->consume_q);
2363 * Move state from *_MEM to *_NO_MEM.
2365 entry->state--;
2367 qp_release_queue_mutex(entry->produce_q);
2370 result = VMCI_SUCCESS;
2372 out:
2373 mutex_unlock(&qp_broker_list.mutex);
2374 return result;
2378 * Destroys all guest queue pair endpoints. If active guest queue
2379 * pairs still exist, hypercalls to attempt detach from these
2380 * queue pairs will be made. Any failure to detach is silently
2381 * ignored.
2383 void vmci_qp_guest_endpoints_exit(void)
2385 struct qp_entry *entry;
2386 struct qp_guest_endpoint *ep;
2388 mutex_lock(&qp_guest_endpoints.mutex);
2390 while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
2391 ep = (struct qp_guest_endpoint *)entry;
2393 /* Don't make a hypercall for local queue_pairs. */
2394 if (!(entry->flags & VMCI_QPFLAG_LOCAL))
2395 qp_detatch_hypercall(entry->handle);
2397 /* We cannot fail the exit, so let's reset ref_count. */
2398 entry->ref_count = 0;
2399 qp_list_remove_entry(&qp_guest_endpoints, entry);
2401 qp_guest_endpoint_destroy(ep);
2404 mutex_unlock(&qp_guest_endpoints.mutex);
2408 * Helper routine that will lock the queue pair before subsequent
2409 * operations.
2410 * Note: Non-blocking on the host side is currently only implemented in ESX.
2411 * Since non-blocking isn't yet implemented on the host personality we
2412 * have no reason to acquire a spin lock. So to avoid the use of an
2413 * unnecessary lock only acquire the mutex if we can block.
2415 static void qp_lock(const struct vmci_qp *qpair)
2417 qp_acquire_queue_mutex(qpair->produce_q);
2421 * Helper routine that unlocks the queue pair after calling
2422 * qp_lock.
2424 static void qp_unlock(const struct vmci_qp *qpair)
2426 qp_release_queue_mutex(qpair->produce_q);
2430 * The queue headers may not be mapped at all times. If a queue is
2431 * currently not mapped, it will be attempted to do so.
2433 static int qp_map_queue_headers(struct vmci_queue *produce_q,
2434 struct vmci_queue *consume_q)
2436 int result;
2438 if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
2439 result = qp_host_map_queues(produce_q, consume_q);
2440 if (result < VMCI_SUCCESS)
2441 return (produce_q->saved_header &&
2442 consume_q->saved_header) ?
2443 VMCI_ERROR_QUEUEPAIR_NOT_READY :
2444 VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2447 return VMCI_SUCCESS;
2451 * Helper routine that will retrieve the produce and consume
2452 * headers of a given queue pair. If the guest memory of the
2453 * queue pair is currently not available, the saved queue headers
2454 * will be returned, if these are available.
2456 static int qp_get_queue_headers(const struct vmci_qp *qpair,
2457 struct vmci_queue_header **produce_q_header,
2458 struct vmci_queue_header **consume_q_header)
2460 int result;
2462 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
2463 if (result == VMCI_SUCCESS) {
2464 *produce_q_header = qpair->produce_q->q_header;
2465 *consume_q_header = qpair->consume_q->q_header;
2466 } else if (qpair->produce_q->saved_header &&
2467 qpair->consume_q->saved_header) {
2468 *produce_q_header = qpair->produce_q->saved_header;
2469 *consume_q_header = qpair->consume_q->saved_header;
2470 result = VMCI_SUCCESS;
2473 return result;
2477 * Callback from VMCI queue pair broker indicating that a queue
2478 * pair that was previously not ready, now either is ready or
2479 * gone forever.
2481 static int qp_wakeup_cb(void *client_data)
2483 struct vmci_qp *qpair = (struct vmci_qp *)client_data;
2485 qp_lock(qpair);
2486 while (qpair->blocked > 0) {
2487 qpair->blocked--;
2488 qpair->generation++;
2489 wake_up(&qpair->event);
2491 qp_unlock(qpair);
2493 return VMCI_SUCCESS;
2497 * Makes the calling thread wait for the queue pair to become
2498 * ready for host side access. Returns true when thread is
2499 * woken up after queue pair state change, false otherwise.
2501 static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
2503 unsigned int generation;
2505 qpair->blocked++;
2506 generation = qpair->generation;
2507 qp_unlock(qpair);
2508 wait_event(qpair->event, generation != qpair->generation);
2509 qp_lock(qpair);
2511 return true;
2515 * Enqueues a given buffer to the produce queue using the provided
2516 * function. As many bytes as possible (space available in the queue)
2517 * are enqueued. Assumes the queue->mutex has been acquired. Returns
2518 * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue
2519 * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the
2520 * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if
2521 * an error occured when accessing the buffer,
2522 * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't
2523 * available. Otherwise, the number of bytes written to the queue is
2524 * returned. Updates the tail pointer of the produce queue.
2526 static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2527 struct vmci_queue *consume_q,
2528 const u64 produce_q_size,
2529 struct iov_iter *from)
2531 s64 free_space;
2532 u64 tail;
2533 size_t buf_size = iov_iter_count(from);
2534 size_t written;
2535 ssize_t result;
2537 result = qp_map_queue_headers(produce_q, consume_q);
2538 if (unlikely(result != VMCI_SUCCESS))
2539 return result;
2541 free_space = vmci_q_header_free_space(produce_q->q_header,
2542 consume_q->q_header,
2543 produce_q_size);
2544 if (free_space == 0)
2545 return VMCI_ERROR_QUEUEPAIR_NOSPACE;
2547 if (free_space < VMCI_SUCCESS)
2548 return (ssize_t) free_space;
2550 written = (size_t) (free_space > buf_size ? buf_size : free_space);
2551 tail = vmci_q_header_producer_tail(produce_q->q_header);
2552 if (likely(tail + written < produce_q_size)) {
2553 result = qp_memcpy_to_queue_iter(produce_q, tail, from, written);
2554 } else {
2555 /* Tail pointer wraps around. */
2557 const size_t tmp = (size_t) (produce_q_size - tail);
2559 result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp);
2560 if (result >= VMCI_SUCCESS)
2561 result = qp_memcpy_to_queue_iter(produce_q, 0, from,
2562 written - tmp);
2565 if (result < VMCI_SUCCESS)
2566 return result;
2568 vmci_q_header_add_producer_tail(produce_q->q_header, written,
2569 produce_q_size);
2570 return written;
2574 * Dequeues data (if available) from the given consume queue. Writes data
2575 * to the user provided buffer using the provided function.
2576 * Assumes the queue->mutex has been acquired.
2577 * Results:
2578 * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
2579 * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
2580 * (as defined by the queue size).
2581 * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer.
2582 * Otherwise the number of bytes dequeued is returned.
2583 * Side effects:
2584 * Updates the head pointer of the consume queue.
2586 static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
2587 struct vmci_queue *consume_q,
2588 const u64 consume_q_size,
2589 struct iov_iter *to,
2590 bool update_consumer)
2592 size_t buf_size = iov_iter_count(to);
2593 s64 buf_ready;
2594 u64 head;
2595 size_t read;
2596 ssize_t result;
2598 result = qp_map_queue_headers(produce_q, consume_q);
2599 if (unlikely(result != VMCI_SUCCESS))
2600 return result;
2602 buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
2603 produce_q->q_header,
2604 consume_q_size);
2605 if (buf_ready == 0)
2606 return VMCI_ERROR_QUEUEPAIR_NODATA;
2608 if (buf_ready < VMCI_SUCCESS)
2609 return (ssize_t) buf_ready;
2611 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
2612 head = vmci_q_header_consumer_head(produce_q->q_header);
2613 if (likely(head + read < consume_q_size)) {
2614 result = qp_memcpy_from_queue_iter(to, consume_q, head, read);
2615 } else {
2616 /* Head pointer wraps around. */
2618 const size_t tmp = (size_t) (consume_q_size - head);
2620 result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp);
2621 if (result >= VMCI_SUCCESS)
2622 result = qp_memcpy_from_queue_iter(to, consume_q, 0,
2623 read - tmp);
2627 if (result < VMCI_SUCCESS)
2628 return result;
2630 if (update_consumer)
2631 vmci_q_header_add_consumer_head(produce_q->q_header,
2632 read, consume_q_size);
2634 return read;
2638 * vmci_qpair_alloc() - Allocates a queue pair.
2639 * @qpair: Pointer for the new vmci_qp struct.
2640 * @handle: Handle to track the resource.
2641 * @produce_qsize: Desired size of the producer queue.
2642 * @consume_qsize: Desired size of the consumer queue.
2643 * @peer: ContextID of the peer.
2644 * @flags: VMCI flags.
2645 * @priv_flags: VMCI priviledge flags.
2647 * This is the client interface for allocating the memory for a
2648 * vmci_qp structure and then attaching to the underlying
2649 * queue. If an error occurs allocating the memory for the
2650 * vmci_qp structure no attempt is made to attach. If an
2651 * error occurs attaching, then the structure is freed.
2653 int vmci_qpair_alloc(struct vmci_qp **qpair,
2654 struct vmci_handle *handle,
2655 u64 produce_qsize,
2656 u64 consume_qsize,
2657 u32 peer,
2658 u32 flags,
2659 u32 priv_flags)
2661 struct vmci_qp *my_qpair;
2662 int retval;
2663 struct vmci_handle src = VMCI_INVALID_HANDLE;
2664 struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
2665 enum vmci_route route;
2666 vmci_event_release_cb wakeup_cb;
2667 void *client_data;
2670 * Restrict the size of a queuepair. The device already
2671 * enforces a limit on the total amount of memory that can be
2672 * allocated to queuepairs for a guest. However, we try to
2673 * allocate this memory before we make the queuepair
2674 * allocation hypercall. On Linux, we allocate each page
2675 * separately, which means rather than fail, the guest will
2676 * thrash while it tries to allocate, and will become
2677 * increasingly unresponsive to the point where it appears to
2678 * be hung. So we place a limit on the size of an individual
2679 * queuepair here, and leave the device to enforce the
2680 * restriction on total queuepair memory. (Note that this
2681 * doesn't prevent all cases; a user with only this much
2682 * physical memory could still get into trouble.) The error
2683 * used by the device is NO_RESOURCES, so use that here too.
2686 if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
2687 produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
2688 return VMCI_ERROR_NO_RESOURCES;
2690 retval = vmci_route(&src, &dst, false, &route);
2691 if (retval < VMCI_SUCCESS)
2692 route = vmci_guest_code_active() ?
2693 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
2695 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
2696 pr_devel("NONBLOCK OR PINNED set");
2697 return VMCI_ERROR_INVALID_ARGS;
2700 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
2701 if (!my_qpair)
2702 return VMCI_ERROR_NO_MEM;
2704 my_qpair->produce_q_size = produce_qsize;
2705 my_qpair->consume_q_size = consume_qsize;
2706 my_qpair->peer = peer;
2707 my_qpair->flags = flags;
2708 my_qpair->priv_flags = priv_flags;
2710 wakeup_cb = NULL;
2711 client_data = NULL;
2713 if (VMCI_ROUTE_AS_HOST == route) {
2714 my_qpair->guest_endpoint = false;
2715 if (!(flags & VMCI_QPFLAG_LOCAL)) {
2716 my_qpair->blocked = 0;
2717 my_qpair->generation = 0;
2718 init_waitqueue_head(&my_qpair->event);
2719 wakeup_cb = qp_wakeup_cb;
2720 client_data = (void *)my_qpair;
2722 } else {
2723 my_qpair->guest_endpoint = true;
2726 retval = vmci_qp_alloc(handle,
2727 &my_qpair->produce_q,
2728 my_qpair->produce_q_size,
2729 &my_qpair->consume_q,
2730 my_qpair->consume_q_size,
2731 my_qpair->peer,
2732 my_qpair->flags,
2733 my_qpair->priv_flags,
2734 my_qpair->guest_endpoint,
2735 wakeup_cb, client_data);
2737 if (retval < VMCI_SUCCESS) {
2738 kfree(my_qpair);
2739 return retval;
2742 *qpair = my_qpair;
2743 my_qpair->handle = *handle;
2745 return retval;
2747 EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
2750 * vmci_qpair_detach() - Detatches the client from a queue pair.
2751 * @qpair: Reference of a pointer to the qpair struct.
2753 * This is the client interface for detaching from a VMCIQPair.
2754 * Note that this routine will free the memory allocated for the
2755 * vmci_qp structure too.
2757 int vmci_qpair_detach(struct vmci_qp **qpair)
2759 int result;
2760 struct vmci_qp *old_qpair;
2762 if (!qpair || !(*qpair))
2763 return VMCI_ERROR_INVALID_ARGS;
2765 old_qpair = *qpair;
2766 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
2769 * The guest can fail to detach for a number of reasons, and
2770 * if it does so, it will cleanup the entry (if there is one).
2771 * The host can fail too, but it won't cleanup the entry
2772 * immediately, it will do that later when the context is
2773 * freed. Either way, we need to release the qpair struct
2774 * here; there isn't much the caller can do, and we don't want
2775 * to leak.
2778 memset(old_qpair, 0, sizeof(*old_qpair));
2779 old_qpair->handle = VMCI_INVALID_HANDLE;
2780 old_qpair->peer = VMCI_INVALID_ID;
2781 kfree(old_qpair);
2782 *qpair = NULL;
2784 return result;
2786 EXPORT_SYMBOL_GPL(vmci_qpair_detach);
2789 * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer.
2790 * @qpair: Pointer to the queue pair struct.
2791 * @producer_tail: Reference used for storing producer tail index.
2792 * @consumer_head: Reference used for storing the consumer head index.
2794 * This is the client interface for getting the current indexes of the
2795 * QPair from the point of the view of the caller as the producer.
2797 int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
2798 u64 *producer_tail,
2799 u64 *consumer_head)
2801 struct vmci_queue_header *produce_q_header;
2802 struct vmci_queue_header *consume_q_header;
2803 int result;
2805 if (!qpair)
2806 return VMCI_ERROR_INVALID_ARGS;
2808 qp_lock(qpair);
2809 result =
2810 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2811 if (result == VMCI_SUCCESS)
2812 vmci_q_header_get_pointers(produce_q_header, consume_q_header,
2813 producer_tail, consumer_head);
2814 qp_unlock(qpair);
2816 if (result == VMCI_SUCCESS &&
2817 ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
2818 (consumer_head && *consumer_head >= qpair->produce_q_size)))
2819 return VMCI_ERROR_INVALID_SIZE;
2821 return result;
2823 EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
2826 * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the consumer.
2827 * @qpair: Pointer to the queue pair struct.
2828 * @consumer_tail: Reference used for storing consumer tail index.
2829 * @producer_head: Reference used for storing the producer head index.
2831 * This is the client interface for getting the current indexes of the
2832 * QPair from the point of the view of the caller as the consumer.
2834 int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
2835 u64 *consumer_tail,
2836 u64 *producer_head)
2838 struct vmci_queue_header *produce_q_header;
2839 struct vmci_queue_header *consume_q_header;
2840 int result;
2842 if (!qpair)
2843 return VMCI_ERROR_INVALID_ARGS;
2845 qp_lock(qpair);
2846 result =
2847 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2848 if (result == VMCI_SUCCESS)
2849 vmci_q_header_get_pointers(consume_q_header, produce_q_header,
2850 consumer_tail, producer_head);
2851 qp_unlock(qpair);
2853 if (result == VMCI_SUCCESS &&
2854 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
2855 (producer_head && *producer_head >= qpair->consume_q_size)))
2856 return VMCI_ERROR_INVALID_SIZE;
2858 return result;
2860 EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
2863 * vmci_qpair_produce_free_space() - Retrieves free space in producer queue.
2864 * @qpair: Pointer to the queue pair struct.
2866 * This is the client interface for getting the amount of free
2867 * space in the QPair from the point of the view of the caller as
2868 * the producer which is the common case. Returns < 0 if err, else
2869 * available bytes into which data can be enqueued if > 0.
2871 s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
2873 struct vmci_queue_header *produce_q_header;
2874 struct vmci_queue_header *consume_q_header;
2875 s64 result;
2877 if (!qpair)
2878 return VMCI_ERROR_INVALID_ARGS;
2880 qp_lock(qpair);
2881 result =
2882 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2883 if (result == VMCI_SUCCESS)
2884 result = vmci_q_header_free_space(produce_q_header,
2885 consume_q_header,
2886 qpair->produce_q_size);
2887 else
2888 result = 0;
2890 qp_unlock(qpair);
2892 return result;
2894 EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
2897 * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue.
2898 * @qpair: Pointer to the queue pair struct.
2900 * This is the client interface for getting the amount of free
2901 * space in the QPair from the point of the view of the caller as
2902 * the consumer which is not the common case. Returns < 0 if err, else
2903 * available bytes into which data can be enqueued if > 0.
2905 s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
2907 struct vmci_queue_header *produce_q_header;
2908 struct vmci_queue_header *consume_q_header;
2909 s64 result;
2911 if (!qpair)
2912 return VMCI_ERROR_INVALID_ARGS;
2914 qp_lock(qpair);
2915 result =
2916 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2917 if (result == VMCI_SUCCESS)
2918 result = vmci_q_header_free_space(consume_q_header,
2919 produce_q_header,
2920 qpair->consume_q_size);
2921 else
2922 result = 0;
2924 qp_unlock(qpair);
2926 return result;
2928 EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
2931 * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from
2932 * producer queue.
2933 * @qpair: Pointer to the queue pair struct.
2935 * This is the client interface for getting the amount of
2936 * enqueued data in the QPair from the point of the view of the
2937 * caller as the producer which is not the common case. Returns < 0 if err,
2938 * else available bytes that may be read.
2940 s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
2942 struct vmci_queue_header *produce_q_header;
2943 struct vmci_queue_header *consume_q_header;
2944 s64 result;
2946 if (!qpair)
2947 return VMCI_ERROR_INVALID_ARGS;
2949 qp_lock(qpair);
2950 result =
2951 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2952 if (result == VMCI_SUCCESS)
2953 result = vmci_q_header_buf_ready(produce_q_header,
2954 consume_q_header,
2955 qpair->produce_q_size);
2956 else
2957 result = 0;
2959 qp_unlock(qpair);
2961 return result;
2963 EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
2966 * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from
2967 * consumer queue.
2968 * @qpair: Pointer to the queue pair struct.
2970 * This is the client interface for getting the amount of
2971 * enqueued data in the QPair from the point of the view of the
2972 * caller as the consumer which is the normal case. Returns < 0 if err,
2973 * else available bytes that may be read.
2975 s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
2977 struct vmci_queue_header *produce_q_header;
2978 struct vmci_queue_header *consume_q_header;
2979 s64 result;
2981 if (!qpair)
2982 return VMCI_ERROR_INVALID_ARGS;
2984 qp_lock(qpair);
2985 result =
2986 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2987 if (result == VMCI_SUCCESS)
2988 result = vmci_q_header_buf_ready(consume_q_header,
2989 produce_q_header,
2990 qpair->consume_q_size);
2991 else
2992 result = 0;
2994 qp_unlock(qpair);
2996 return result;
2998 EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
3001 * vmci_qpair_enqueue() - Throw data on the queue.
3002 * @qpair: Pointer to the queue pair struct.
3003 * @buf: Pointer to buffer containing data
3004 * @buf_size: Length of buffer.
3005 * @buf_type: Buffer type (Unused).
3007 * This is the client interface for enqueueing data into the queue.
3008 * Returns number of bytes enqueued or < 0 on error.
3010 ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
3011 const void *buf,
3012 size_t buf_size,
3013 int buf_type)
3015 ssize_t result;
3016 struct iov_iter from;
3017 struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size};
3019 if (!qpair || !buf)
3020 return VMCI_ERROR_INVALID_ARGS;
3022 iov_iter_kvec(&from, WRITE, &v, 1, buf_size);
3024 qp_lock(qpair);
3026 do {
3027 result = qp_enqueue_locked(qpair->produce_q,
3028 qpair->consume_q,
3029 qpair->produce_q_size,
3030 &from);
3032 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3033 !qp_wait_for_ready_queue(qpair))
3034 result = VMCI_ERROR_WOULD_BLOCK;
3036 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3038 qp_unlock(qpair);
3040 return result;
3042 EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
3045 * vmci_qpair_dequeue() - Get data from the queue.
3046 * @qpair: Pointer to the queue pair struct.
3047 * @buf: Pointer to buffer for the data
3048 * @buf_size: Length of buffer.
3049 * @buf_type: Buffer type (Unused).
3051 * This is the client interface for dequeueing data from the queue.
3052 * Returns number of bytes dequeued or < 0 on error.
3054 ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
3055 void *buf,
3056 size_t buf_size,
3057 int buf_type)
3059 ssize_t result;
3060 struct iov_iter to;
3061 struct kvec v = {.iov_base = buf, .iov_len = buf_size};
3063 if (!qpair || !buf)
3064 return VMCI_ERROR_INVALID_ARGS;
3066 iov_iter_kvec(&to, READ, &v, 1, buf_size);
3068 qp_lock(qpair);
3070 do {
3071 result = qp_dequeue_locked(qpair->produce_q,
3072 qpair->consume_q,
3073 qpair->consume_q_size,
3074 &to, true);
3076 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3077 !qp_wait_for_ready_queue(qpair))
3078 result = VMCI_ERROR_WOULD_BLOCK;
3080 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3082 qp_unlock(qpair);
3084 return result;
3086 EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
3089 * vmci_qpair_peek() - Peek at the data in the queue.
3090 * @qpair: Pointer to the queue pair struct.
3091 * @buf: Pointer to buffer for the data
3092 * @buf_size: Length of buffer.
3093 * @buf_type: Buffer type (Unused on Linux).
3095 * This is the client interface for peeking into a queue. (I.e.,
3096 * copy data from the queue without updating the head pointer.)
3097 * Returns number of bytes dequeued or < 0 on error.
3099 ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
3100 void *buf,
3101 size_t buf_size,
3102 int buf_type)
3104 struct iov_iter to;
3105 struct kvec v = {.iov_base = buf, .iov_len = buf_size};
3106 ssize_t result;
3108 if (!qpair || !buf)
3109 return VMCI_ERROR_INVALID_ARGS;
3111 iov_iter_kvec(&to, READ, &v, 1, buf_size);
3113 qp_lock(qpair);
3115 do {
3116 result = qp_dequeue_locked(qpair->produce_q,
3117 qpair->consume_q,
3118 qpair->consume_q_size,
3119 &to, false);
3121 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3122 !qp_wait_for_ready_queue(qpair))
3123 result = VMCI_ERROR_WOULD_BLOCK;
3125 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3127 qp_unlock(qpair);
3129 return result;
3131 EXPORT_SYMBOL_GPL(vmci_qpair_peek);
3134 * vmci_qpair_enquev() - Throw data on the queue using iov.
3135 * @qpair: Pointer to the queue pair struct.
3136 * @iov: Pointer to buffer containing data
3137 * @iov_size: Length of buffer.
3138 * @buf_type: Buffer type (Unused).
3140 * This is the client interface for enqueueing data into the queue.
3141 * This function uses IO vectors to handle the work. Returns number
3142 * of bytes enqueued or < 0 on error.
3144 ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
3145 struct msghdr *msg,
3146 size_t iov_size,
3147 int buf_type)
3149 ssize_t result;
3151 if (!qpair)
3152 return VMCI_ERROR_INVALID_ARGS;
3154 qp_lock(qpair);
3156 do {
3157 result = qp_enqueue_locked(qpair->produce_q,
3158 qpair->consume_q,
3159 qpair->produce_q_size,
3160 &msg->msg_iter);
3162 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3163 !qp_wait_for_ready_queue(qpair))
3164 result = VMCI_ERROR_WOULD_BLOCK;
3166 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3168 qp_unlock(qpair);
3170 return result;
3172 EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
3175 * vmci_qpair_dequev() - Get data from the queue using iov.
3176 * @qpair: Pointer to the queue pair struct.
3177 * @iov: Pointer to buffer for the data
3178 * @iov_size: Length of buffer.
3179 * @buf_type: Buffer type (Unused).
3181 * This is the client interface for dequeueing data from the queue.
3182 * This function uses IO vectors to handle the work. Returns number
3183 * of bytes dequeued or < 0 on error.
3185 ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
3186 struct msghdr *msg,
3187 size_t iov_size,
3188 int buf_type)
3190 ssize_t result;
3192 if (!qpair)
3193 return VMCI_ERROR_INVALID_ARGS;
3195 qp_lock(qpair);
3197 do {
3198 result = qp_dequeue_locked(qpair->produce_q,
3199 qpair->consume_q,
3200 qpair->consume_q_size,
3201 &msg->msg_iter, true);
3203 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3204 !qp_wait_for_ready_queue(qpair))
3205 result = VMCI_ERROR_WOULD_BLOCK;
3207 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3209 qp_unlock(qpair);
3211 return result;
3213 EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
3216 * vmci_qpair_peekv() - Peek at the data in the queue using iov.
3217 * @qpair: Pointer to the queue pair struct.
3218 * @iov: Pointer to buffer for the data
3219 * @iov_size: Length of buffer.
3220 * @buf_type: Buffer type (Unused on Linux).
3222 * This is the client interface for peeking into a queue. (I.e.,
3223 * copy data from the queue without updating the head pointer.)
3224 * This function uses IO vectors to handle the work. Returns number
3225 * of bytes peeked or < 0 on error.
3227 ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
3228 struct msghdr *msg,
3229 size_t iov_size,
3230 int buf_type)
3232 ssize_t result;
3234 if (!qpair)
3235 return VMCI_ERROR_INVALID_ARGS;
3237 qp_lock(qpair);
3239 do {
3240 result = qp_dequeue_locked(qpair->produce_q,
3241 qpair->consume_q,
3242 qpair->consume_q_size,
3243 &msg->msg_iter, false);
3245 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3246 !qp_wait_for_ready_queue(qpair))
3247 result = VMCI_ERROR_WOULD_BLOCK;
3249 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3251 qp_unlock(qpair);
3252 return result;
3254 EXPORT_SYMBOL_GPL(vmci_qpair_peekv);