4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 #ifndef _VMW_VMCI_DEF_H_
17 #define _VMW_VMCI_DEF_H_
19 #include <linux/atomic.h>
21 /* Register offsets. */
22 #define VMCI_STATUS_ADDR 0x00
23 #define VMCI_CONTROL_ADDR 0x04
24 #define VMCI_ICR_ADDR 0x08
25 #define VMCI_IMR_ADDR 0x0c
26 #define VMCI_DATA_OUT_ADDR 0x10
27 #define VMCI_DATA_IN_ADDR 0x14
28 #define VMCI_CAPS_ADDR 0x18
29 #define VMCI_RESULT_LOW_ADDR 0x1c
30 #define VMCI_RESULT_HIGH_ADDR 0x20
32 /* Max number of devices. */
33 #define VMCI_MAX_DEVICES 1
35 /* Status register bits. */
36 #define VMCI_STATUS_INT_ON 0x1
38 /* Control register bits. */
39 #define VMCI_CONTROL_RESET 0x1
40 #define VMCI_CONTROL_INT_ENABLE 0x2
41 #define VMCI_CONTROL_INT_DISABLE 0x4
43 /* Capabilities register bits. */
44 #define VMCI_CAPS_HYPERCALL 0x1
45 #define VMCI_CAPS_GUESTCALL 0x2
46 #define VMCI_CAPS_DATAGRAM 0x4
47 #define VMCI_CAPS_NOTIFICATIONS 0x8
49 /* Interrupt Cause register bits. */
50 #define VMCI_ICR_DATAGRAM 0x1
51 #define VMCI_ICR_NOTIFICATION 0x2
53 /* Interrupt Mask register bits. */
54 #define VMCI_IMR_DATAGRAM 0x1
55 #define VMCI_IMR_NOTIFICATION 0x2
57 /* Maximum MSI/MSI-X interrupt vectors in the device. */
58 #define VMCI_MAX_INTRS 2
61 * Supported interrupt vectors. There is one for each ICR value above,
62 * but here they indicate the position in the vector array/message ID.
65 VMCI_INTR_DATAGRAM
= 0,
66 VMCI_INTR_NOTIFICATION
= 1,
70 * A single VMCI device has an upper limit of 128MB on the amount of
71 * memory that can be used for queue pairs.
73 #define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
76 * Queues with pre-mapped data pages must be small, so that we don't pin
77 * too much kernel memory (especially on vmkernel). We limit a queuepair to
78 * 32 KB, or 16 KB per queue for symmetrical pairs.
80 #define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024)
83 * We have a fixed set of resource IDs available in the VMX.
84 * This allows us to have a very simple implementation since we statically
85 * know how many will create datagram handles. If a new caller arrives and
86 * we have run out of slots we can manually increment the maximum size of
87 * available resource IDs.
89 * VMCI reserved hypervisor datagram resource IDs.
92 VMCI_RESOURCES_QUERY
= 0,
93 VMCI_GET_CONTEXT_ID
= 1,
94 VMCI_SET_NOTIFY_BITMAP
= 2,
95 VMCI_DOORBELL_LINK
= 3,
96 VMCI_DOORBELL_UNLINK
= 4,
97 VMCI_DOORBELL_NOTIFY
= 5,
99 * VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are
100 * obsoleted by the removal of VM to VM communication.
102 VMCI_DATAGRAM_REQUEST_MAP
= 6,
103 VMCI_DATAGRAM_REMOVE_MAP
= 7,
104 VMCI_EVENT_SUBSCRIBE
= 8,
105 VMCI_EVENT_UNSUBSCRIBE
= 9,
106 VMCI_QUEUEPAIR_ALLOC
= 10,
107 VMCI_QUEUEPAIR_DETACH
= 11,
110 * VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1,
111 * WS 7.0/7.1 and ESX 4.1
113 VMCI_HGFS_TRANSPORT
= 13,
114 VMCI_UNITY_PBRPC_REGISTER
= 14,
115 VMCI_RPC_PRIVILEGED
= 15,
116 VMCI_RPC_UNPRIVILEGED
= 16,
117 VMCI_RESOURCE_MAX
= 17,
121 * struct vmci_handle - Ownership information structure
122 * @context: The VMX context ID.
123 * @resource: The resource ID (used for locating in resource hash).
125 * The vmci_handle structure is used to track resources used within
133 #define vmci_make_handle(_cid, _rid) \
134 (struct vmci_handle){ .context = _cid, .resource = _rid }
136 static inline bool vmci_handle_is_equal(struct vmci_handle h1
,
137 struct vmci_handle h2
)
139 return h1
.context
== h2
.context
&& h1
.resource
== h2
.resource
;
142 #define VMCI_INVALID_ID ~0
143 static const struct vmci_handle VMCI_INVALID_HANDLE
= {
144 .context
= VMCI_INVALID_ID
,
145 .resource
= VMCI_INVALID_ID
148 static inline bool vmci_handle_is_invalid(struct vmci_handle h
)
150 return vmci_handle_is_equal(h
, VMCI_INVALID_HANDLE
);
154 * The below defines can be used to send anonymous requests.
155 * This also indicates that no response is expected.
157 #define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID
158 #define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID
159 static const struct vmci_handle VMCI_ANON_SRC_HANDLE
= {
160 .context
= VMCI_ANON_SRC_CONTEXT_ID
,
161 .resource
= VMCI_ANON_SRC_RESOURCE_ID
164 /* The lowest 16 context ids are reserved for internal use. */
165 #define VMCI_RESERVED_CID_LIMIT ((u32) 16)
168 * Hypervisor context id, used for calling into hypervisor
169 * supplied services from the VM.
171 #define VMCI_HYPERVISOR_CONTEXT_ID 0
174 * Well-known context id, a logical context that contains a set of
175 * well-known services. This context ID is now obsolete.
177 #define VMCI_WELL_KNOWN_CONTEXT_ID 1
180 * Context ID used by host endpoints.
182 #define VMCI_HOST_CONTEXT_ID 2
184 #define VMCI_CONTEXT_IS_VM(_cid) (VMCI_INVALID_ID != (_cid) && \
185 (_cid) > VMCI_HOST_CONTEXT_ID)
188 * The VMCI_CONTEXT_RESOURCE_ID is used together with vmci_make_handle to make
189 * handles that refer to a specific context.
191 #define VMCI_CONTEXT_RESOURCE_ID 0
197 VMCI_SUCCESS_QUEUEPAIR_ATTACH
= 5,
198 VMCI_SUCCESS_QUEUEPAIR_CREATE
= 4,
199 VMCI_SUCCESS_LAST_DETACH
= 3,
200 VMCI_SUCCESS_ACCESS_GRANTED
= 2,
201 VMCI_SUCCESS_ENTRY_DEAD
= 1,
203 VMCI_ERROR_INVALID_RESOURCE
= (-1),
204 VMCI_ERROR_INVALID_ARGS
= (-2),
205 VMCI_ERROR_NO_MEM
= (-3),
206 VMCI_ERROR_DATAGRAM_FAILED
= (-4),
207 VMCI_ERROR_MORE_DATA
= (-5),
208 VMCI_ERROR_NO_MORE_DATAGRAMS
= (-6),
209 VMCI_ERROR_NO_ACCESS
= (-7),
210 VMCI_ERROR_NO_HANDLE
= (-8),
211 VMCI_ERROR_DUPLICATE_ENTRY
= (-9),
212 VMCI_ERROR_DST_UNREACHABLE
= (-10),
213 VMCI_ERROR_PAYLOAD_TOO_LARGE
= (-11),
214 VMCI_ERROR_INVALID_PRIV
= (-12),
215 VMCI_ERROR_GENERIC
= (-13),
216 VMCI_ERROR_PAGE_ALREADY_SHARED
= (-14),
217 VMCI_ERROR_CANNOT_SHARE_PAGE
= (-15),
218 VMCI_ERROR_CANNOT_UNSHARE_PAGE
= (-16),
219 VMCI_ERROR_NO_PROCESS
= (-17),
220 VMCI_ERROR_NO_DATAGRAM
= (-18),
221 VMCI_ERROR_NO_RESOURCES
= (-19),
222 VMCI_ERROR_UNAVAILABLE
= (-20),
223 VMCI_ERROR_NOT_FOUND
= (-21),
224 VMCI_ERROR_ALREADY_EXISTS
= (-22),
225 VMCI_ERROR_NOT_PAGE_ALIGNED
= (-23),
226 VMCI_ERROR_INVALID_SIZE
= (-24),
227 VMCI_ERROR_REGION_ALREADY_SHARED
= (-25),
228 VMCI_ERROR_TIMEOUT
= (-26),
229 VMCI_ERROR_DATAGRAM_INCOMPLETE
= (-27),
230 VMCI_ERROR_INCORRECT_IRQL
= (-28),
231 VMCI_ERROR_EVENT_UNKNOWN
= (-29),
232 VMCI_ERROR_OBSOLETE
= (-30),
233 VMCI_ERROR_QUEUEPAIR_MISMATCH
= (-31),
234 VMCI_ERROR_QUEUEPAIR_NOTSET
= (-32),
235 VMCI_ERROR_QUEUEPAIR_NOTOWNER
= (-33),
236 VMCI_ERROR_QUEUEPAIR_NOTATTACHED
= (-34),
237 VMCI_ERROR_QUEUEPAIR_NOSPACE
= (-35),
238 VMCI_ERROR_QUEUEPAIR_NODATA
= (-36),
239 VMCI_ERROR_BUSMEM_INVALIDATION
= (-37),
240 VMCI_ERROR_MODULE_NOT_LOADED
= (-38),
241 VMCI_ERROR_DEVICE_NOT_FOUND
= (-39),
242 VMCI_ERROR_QUEUEPAIR_NOT_READY
= (-40),
243 VMCI_ERROR_WOULD_BLOCK
= (-41),
245 /* VMCI clients should return error code within this range */
246 VMCI_ERROR_CLIENT_MIN
= (-500),
247 VMCI_ERROR_CLIENT_MAX
= (-550),
249 /* Internal error codes. */
250 VMCI_SHAREDMEM_ERROR_BAD_CONTEXT
= (-1000),
253 /* VMCI reserved events. */
255 /* Only applicable to guest endpoints */
256 VMCI_EVENT_CTX_ID_UPDATE
= 0,
258 /* Applicable to guest and host */
259 VMCI_EVENT_CTX_REMOVED
= 1,
261 /* Only applicable to guest endpoints */
262 VMCI_EVENT_QP_RESUMED
= 2,
264 /* Applicable to guest and host */
265 VMCI_EVENT_QP_PEER_ATTACH
= 3,
267 /* Applicable to guest and host */
268 VMCI_EVENT_QP_PEER_DETACH
= 4,
271 * Applicable to VMX and vmk. On vmk,
272 * this event has the Context payload type.
274 VMCI_EVENT_MEM_ACCESS_ON
= 5,
277 * Applicable to VMX and vmk. Same as
278 * above for the payload type.
280 VMCI_EVENT_MEM_ACCESS_OFF
= 6,
285 * Of the above events, a few are reserved for use in the VMX, and
286 * other endpoints (guest and host kernel) should not use them. For
287 * the rest of the events, we allow both host and guest endpoints to
288 * subscribe to them, to maintain the same API for host and guest
291 #define VMCI_EVENT_VALID_VMX(_event) ((_event) == VMCI_EVENT_MEM_ACCESS_ON || \
292 (_event) == VMCI_EVENT_MEM_ACCESS_OFF)
294 #define VMCI_EVENT_VALID(_event) ((_event) < VMCI_EVENT_MAX && \
295 !VMCI_EVENT_VALID_VMX(_event))
297 /* Reserved guest datagram resource ids. */
298 #define VMCI_EVENT_HANDLER 0
301 * VMCI coarse-grained privileges (per context or host
302 * process/endpoint. An entity with the restricted flag is only
303 * allowed to interact with the hypervisor and trusted entities.
306 VMCI_NO_PRIVILEGE_FLAGS
= 0,
307 VMCI_PRIVILEGE_FLAG_RESTRICTED
= 1,
308 VMCI_PRIVILEGE_FLAG_TRUSTED
= 2,
309 VMCI_PRIVILEGE_ALL_FLAGS
= (VMCI_PRIVILEGE_FLAG_RESTRICTED
|
310 VMCI_PRIVILEGE_FLAG_TRUSTED
),
311 VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS
= VMCI_NO_PRIVILEGE_FLAGS
,
312 VMCI_LEAST_PRIVILEGE_FLAGS
= VMCI_PRIVILEGE_FLAG_RESTRICTED
,
313 VMCI_MAX_PRIVILEGE_FLAGS
= VMCI_PRIVILEGE_FLAG_TRUSTED
,
316 /* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */
317 #define VMCI_RESERVED_RESOURCE_ID_MAX 1023
322 * Increment major version when you make an incompatible change.
323 * Compatibility goes both ways (old driver with new executable
324 * as well as new driver with old executable).
327 /* Never change VMCI_VERSION_SHIFT_WIDTH */
328 #define VMCI_VERSION_SHIFT_WIDTH 16
329 #define VMCI_MAKE_VERSION(_major, _minor) \
330 ((_major) << VMCI_VERSION_SHIFT_WIDTH | (u16) (_minor))
332 #define VMCI_VERSION_MAJOR(v) ((u32) (v) >> VMCI_VERSION_SHIFT_WIDTH)
333 #define VMCI_VERSION_MINOR(v) ((u16) (v))
336 * VMCI_VERSION is always the current version. Subsequently listed
337 * versions are ways of detecting previous versions of the connecting
338 * application (i.e., VMX).
340 * VMCI_VERSION_NOVMVM: This version removed support for VM to VM
343 * VMCI_VERSION_NOTIFY: This version introduced doorbell notification
346 * VMCI_VERSION_HOSTQP: This version introduced host end point support
347 * for hosted products.
349 * VMCI_VERSION_PREHOSTQP: This is the version prior to the adoption of
350 * support for host end-points.
352 * VMCI_VERSION_PREVERS2: This fictional version number is intended to
353 * represent the version of a VMX which doesn't call into the driver
354 * with ioctl VERSION2 and thus doesn't establish its version with the
358 #define VMCI_VERSION VMCI_VERSION_NOVMVM
359 #define VMCI_VERSION_NOVMVM VMCI_MAKE_VERSION(11, 0)
360 #define VMCI_VERSION_NOTIFY VMCI_MAKE_VERSION(10, 0)
361 #define VMCI_VERSION_HOSTQP VMCI_MAKE_VERSION(9, 0)
362 #define VMCI_VERSION_PREHOSTQP VMCI_MAKE_VERSION(8, 0)
363 #define VMCI_VERSION_PREVERS2 VMCI_MAKE_VERSION(1, 0)
365 #define VMCI_SOCKETS_MAKE_VERSION(_p) \
366 ((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2]))
369 * The VMCI IOCTLs. We use identity code 7, as noted in ioctl-number.h, and
370 * we start at sequence 9f. This gives us the same values that our shipping
371 * products use, starting at 1951, provided we leave out the direction and
372 * structure size. Note that VMMon occupies the block following us, starting
375 #define IOCTL_VMCI_VERSION _IO(7, 0x9f) /* 1951 */
376 #define IOCTL_VMCI_INIT_CONTEXT _IO(7, 0xa0)
377 #define IOCTL_VMCI_QUEUEPAIR_SETVA _IO(7, 0xa4)
378 #define IOCTL_VMCI_NOTIFY_RESOURCE _IO(7, 0xa5)
379 #define IOCTL_VMCI_NOTIFICATIONS_RECEIVE _IO(7, 0xa6)
380 #define IOCTL_VMCI_VERSION2 _IO(7, 0xa7)
381 #define IOCTL_VMCI_QUEUEPAIR_ALLOC _IO(7, 0xa8)
382 #define IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE _IO(7, 0xa9)
383 #define IOCTL_VMCI_QUEUEPAIR_DETACH _IO(7, 0xaa)
384 #define IOCTL_VMCI_DATAGRAM_SEND _IO(7, 0xab)
385 #define IOCTL_VMCI_DATAGRAM_RECEIVE _IO(7, 0xac)
386 #define IOCTL_VMCI_CTX_ADD_NOTIFICATION _IO(7, 0xaf)
387 #define IOCTL_VMCI_CTX_REMOVE_NOTIFICATION _IO(7, 0xb0)
388 #define IOCTL_VMCI_CTX_GET_CPT_STATE _IO(7, 0xb1)
389 #define IOCTL_VMCI_CTX_SET_CPT_STATE _IO(7, 0xb2)
390 #define IOCTL_VMCI_GET_CONTEXT_ID _IO(7, 0xb3)
391 #define IOCTL_VMCI_SOCKETS_VERSION _IO(7, 0xb4)
392 #define IOCTL_VMCI_SOCKETS_GET_AF_VALUE _IO(7, 0xb8)
393 #define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
394 #define IOCTL_VMCI_SET_NOTIFY _IO(7, 0xcb) /* 1995 */
395 /*IOCTL_VMMON_START _IO(7, 0xd1)*/ /* 2001 */
398 * struct vmci_queue_header - VMCI Queue Header information.
400 * A Queue cannot stand by itself as designed. Each Queue's header
401 * contains a pointer into itself (the producer_tail) and into its peer
402 * (consumer_head). The reason for the separation is one of
403 * accessibility: Each end-point can modify two things: where the next
404 * location to enqueue is within its produce_q (producer_tail); and
405 * where the next dequeue location is in its consume_q (consumer_head).
407 * An end-point cannot modify the pointers of its peer (guest to
408 * guest; NOTE that in the host both queue headers are mapped r/w).
409 * But, each end-point needs read access to both Queue header
410 * structures in order to determine how much space is used (or left)
411 * in the Queue. This is because for an end-point to know how full
412 * its produce_q is, it needs to use the consumer_head that points into
413 * the produce_q but -that- consumer_head is in the Queue header for
414 * that end-points consume_q.
416 * Thoroughly confused? Sorry.
418 * producer_tail: the point to enqueue new entrants. When you approach
419 * a line in a store, for example, you walk up to the tail.
421 * consumer_head: the point in the queue from which the next element is
422 * dequeued. In other words, who is next in line is he who is at the
425 * Also, producer_tail points to an empty byte in the Queue, whereas
426 * consumer_head points to a valid byte of data (unless producer_tail ==
427 * consumer_head in which case consumer_head does not point to a valid
430 * For a queue of buffer 'size' bytes, the tail and head pointers will be in
431 * the range [0, size-1].
433 * If produce_q_header->producer_tail == consume_q_header->consumer_head
434 * then the produce_q is empty.
436 struct vmci_queue_header
{
437 /* All fields are 64bit and aligned. */
438 struct vmci_handle handle
; /* Identifier. */
439 atomic64_t producer_tail
; /* Offset in this queue. */
440 atomic64_t consumer_head
; /* Offset in peer queue. */
444 * struct vmci_datagram - Base struct for vmci datagrams.
445 * @dst: A vmci_handle that tracks the destination of the datagram.
446 * @src: A vmci_handle that tracks the source of the datagram.
447 * @payload_size: The size of the payload.
449 * vmci_datagram structs are used when sending vmci datagrams. They include
450 * the necessary source and destination information to properly route
451 * the information along with the size of the package.
453 struct vmci_datagram
{
454 struct vmci_handle dst
;
455 struct vmci_handle src
;
460 * Second flag is for creating a well-known handle instead of a per context
461 * handle. Next flag is for deferring datagram delivery, so that the
462 * datagram callback is invoked in a delayed context (not interrupt context).
464 #define VMCI_FLAG_DG_NONE 0
465 #define VMCI_FLAG_WELLKNOWN_DG_HND 0x1
466 #define VMCI_FLAG_ANYCID_DG_HND 0x2
467 #define VMCI_FLAG_DG_DELAYED_CB 0x4
470 * Maximum supported size of a VMCI datagram for routable datagrams.
471 * Datagrams going to the hypervisor are allowed to be larger.
473 #define VMCI_MAX_DG_SIZE (17 * 4096)
474 #define VMCI_MAX_DG_PAYLOAD_SIZE (VMCI_MAX_DG_SIZE - \
475 sizeof(struct vmci_datagram))
476 #define VMCI_DG_PAYLOAD(_dg) (void *)((char *)(_dg) + \
477 sizeof(struct vmci_datagram))
478 #define VMCI_DG_HEADERSIZE sizeof(struct vmci_datagram)
479 #define VMCI_DG_SIZE(_dg) (VMCI_DG_HEADERSIZE + (size_t)(_dg)->payload_size)
480 #define VMCI_DG_SIZE_ALIGNED(_dg) ((VMCI_DG_SIZE(_dg) + 7) & (~((size_t) 0x7)))
481 #define VMCI_MAX_DATAGRAM_QUEUE_SIZE (VMCI_MAX_DG_SIZE * 2)
483 struct vmci_event_payload_qp
{
484 struct vmci_handle handle
; /* queue_pair handle. */
485 u32 peer_id
; /* Context id of attaching/detaching VM. */
489 /* Flags for VMCI queue_pair API. */
491 /* Fail alloc if QP not created by peer. */
492 VMCI_QPFLAG_ATTACH_ONLY
= 1 << 0,
494 /* Only allow attaches from local context. */
495 VMCI_QPFLAG_LOCAL
= 1 << 1,
497 /* Host won't block when guest is quiesced. */
498 VMCI_QPFLAG_NONBLOCK
= 1 << 2,
500 /* Pin data pages in ESX. Used with NONBLOCK */
501 VMCI_QPFLAG_PINNED
= 1 << 3,
503 /* Update the following flag when adding new flags. */
504 VMCI_QP_ALL_FLAGS
= (VMCI_QPFLAG_ATTACH_ONLY
| VMCI_QPFLAG_LOCAL
|
505 VMCI_QPFLAG_NONBLOCK
| VMCI_QPFLAG_PINNED
),
507 /* Convenience flags */
508 VMCI_QP_ASYMM
= (VMCI_QPFLAG_NONBLOCK
| VMCI_QPFLAG_PINNED
),
509 VMCI_QP_ASYMM_PEER
= (VMCI_QPFLAG_ATTACH_ONLY
| VMCI_QP_ASYMM
),
513 * We allow at least 1024 more event datagrams from the hypervisor past the
514 * normally allowed datagrams pending for a given context. We define this
515 * limit on event datagrams from the hypervisor to guard against DoS attack
516 * from a malicious VM which could repeatedly attach to and detach from a queue
517 * pair, causing events to be queued at the destination VM. However, the rate
518 * at which such events can be generated is small since it requires a VM exit
519 * and handling of queue pair attach/detach call at the hypervisor. Event
520 * datagrams may be queued up at the destination VM if it has interrupts
521 * disabled or if it is not draining events for some other reason. 1024
522 * datagrams is a grossly conservative estimate of the time for which
523 * interrupts may be disabled in the destination VM, but at the same time does
524 * not exacerbate the memory pressure problem on the host by much (size of each
525 * event datagram is small).
527 #define VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE \
528 (VMCI_MAX_DATAGRAM_QUEUE_SIZE + \
529 1024 * (sizeof(struct vmci_datagram) + \
530 sizeof(struct vmci_event_data_max)))
533 * Struct used for querying, via VMCI_RESOURCES_QUERY, the availability of
534 * hypervisor resources. Struct size is 16 bytes. All fields in struct are
535 * aligned to their natural alignment.
537 struct vmci_resource_query_hdr
{
538 struct vmci_datagram hdr
;
544 * Convenience struct for negotiating vectors. Must match layout of
545 * VMCIResourceQueryHdr minus the struct vmci_datagram header.
547 struct vmci_resource_query_msg
{
554 * The maximum number of resources that can be queried using
555 * VMCI_RESOURCE_QUERY is 31, as the result is encoded in the lower 31
556 * bits of a positive return value. Negative values are reserved for
559 #define VMCI_RESOURCE_QUERY_MAX_NUM 31
561 /* Maximum size for the VMCI_RESOURCE_QUERY request. */
562 #define VMCI_RESOURCE_QUERY_MAX_SIZE \
563 (sizeof(struct vmci_resource_query_hdr) + \
564 sizeof(u32) * VMCI_RESOURCE_QUERY_MAX_NUM)
567 * Struct used for setting the notification bitmap. All fields in
568 * struct are aligned to their natural alignment.
570 struct vmci_notify_bm_set_msg
{
571 struct vmci_datagram hdr
;
577 * Struct used for linking a doorbell handle with an index in the
578 * notify bitmap. All fields in struct are aligned to their natural
581 struct vmci_doorbell_link_msg
{
582 struct vmci_datagram hdr
;
583 struct vmci_handle handle
;
588 * Struct used for unlinking a doorbell handle from an index in the
589 * notify bitmap. All fields in struct are aligned to their natural
592 struct vmci_doorbell_unlink_msg
{
593 struct vmci_datagram hdr
;
594 struct vmci_handle handle
;
598 * Struct used for generating a notification on a doorbell handle. All
599 * fields in struct are aligned to their natural alignment.
601 struct vmci_doorbell_notify_msg
{
602 struct vmci_datagram hdr
;
603 struct vmci_handle handle
;
607 * This struct is used to contain data for events. Size of this struct is a
608 * multiple of 8 bytes, and all fields are aligned to their natural alignment.
610 struct vmci_event_data
{
611 u32 event
; /* 4 bytes. */
613 /* Event payload is put here. */
617 * Define the different VMCI_EVENT payload data types here. All structs must
618 * be a multiple of 8 bytes, and fields must be aligned to their natural
621 struct vmci_event_payld_ctx
{
622 u32 context_id
; /* 4 bytes. */
626 struct vmci_event_payld_qp
{
627 struct vmci_handle handle
; /* queue_pair handle. */
628 u32 peer_id
; /* Context id of attaching/detaching VM. */
633 * We define the following struct to get the size of the maximum event
634 * data the hypervisor may send to the guest. If adding a new event
635 * payload type above, add it to the following struct too (inside the
638 struct vmci_event_data_max
{
639 struct vmci_event_data event_data
;
641 struct vmci_event_payld_ctx context_payload
;
642 struct vmci_event_payld_qp qp_payload
;
647 * Struct used for VMCI_EVENT_SUBSCRIBE/UNSUBSCRIBE and
648 * VMCI_EVENT_HANDLER messages. Struct size is 32 bytes. All fields
649 * in struct are aligned to their natural alignment.
651 struct vmci_event_msg
{
652 struct vmci_datagram hdr
;
654 /* Has event type and payload. */
655 struct vmci_event_data event_data
;
657 /* Payload gets put here. */
660 /* Event with context payload. */
661 struct vmci_event_ctx
{
662 struct vmci_event_msg msg
;
663 struct vmci_event_payld_ctx payload
;
666 /* Event with QP payload. */
667 struct vmci_event_qp
{
668 struct vmci_event_msg msg
;
669 struct vmci_event_payld_qp payload
;
673 * Structs used for queue_pair alloc and detach messages. We align fields of
674 * these structs to 64bit boundaries.
676 struct vmci_qp_alloc_msg
{
677 struct vmci_datagram hdr
;
678 struct vmci_handle handle
;
685 /* List of PPNs placed here. */
688 struct vmci_qp_detach_msg
{
689 struct vmci_datagram hdr
;
690 struct vmci_handle handle
;
693 /* VMCI Doorbell API. */
694 #define VMCI_FLAG_DELAYED_CB 0x01
696 typedef void (*vmci_callback
) (void *client_data
);
699 * struct vmci_qp - A vmw_vmci queue pair handle.
701 * This structure is used as a handle to a queue pair created by
702 * VMCI. It is intentionally left opaque to clients.
706 /* Callback needed for correctly waiting on events. */
707 typedef int (*vmci_datagram_recv_cb
) (void *client_data
,
708 struct vmci_datagram
*msg
);
710 /* VMCI Event API. */
711 typedef void (*vmci_event_cb
) (u32 sub_id
, const struct vmci_event_data
*ed
,
715 * We use the following inline function to access the payload data
716 * associated with an event data.
718 static inline const void *
719 vmci_event_data_const_payload(const struct vmci_event_data
*ev_data
)
721 return (const char *)ev_data
+ sizeof(*ev_data
);
724 static inline void *vmci_event_data_payload(struct vmci_event_data
*ev_data
)
726 return (void *)vmci_event_data_const_payload(ev_data
);
730 * Helper to read a value from a head or tail pointer. For X86_32, the
731 * pointer is treated as a 32bit value, since the pointer value
732 * never exceeds a 32bit value in this case. Also, doing an
733 * atomic64_read on X86_32 uniprocessor systems may be implemented
734 * as a non locked cmpxchg8b, that may end up overwriting updates done
735 * by the VMCI device to the memory location. On 32bit SMP, the lock
736 * prefix will be used, so correctness isn't an issue, but using a
737 * 64bit operation still adds unnecessary overhead.
739 static inline u64
vmci_q_read_pointer(atomic64_t
*var
)
741 #if defined(CONFIG_X86_32)
742 return atomic_read((atomic_t
*)var
);
744 return atomic64_read(var
);
749 * Helper to set the value of a head or tail pointer. For X86_32, the
750 * pointer is treated as a 32bit value, since the pointer value
751 * never exceeds a 32bit value in this case. On 32bit SMP, using a
752 * locked cmpxchg8b adds unnecessary overhead.
754 static inline void vmci_q_set_pointer(atomic64_t
*var
,
757 #if defined(CONFIG_X86_32)
758 return atomic_set((atomic_t
*)var
, (u32
)new_val
);
760 return atomic64_set(var
, new_val
);
765 * Helper to add a given offset to a head or tail pointer. Wraps the
766 * value of the pointer around the max size of the queue.
768 static inline void vmci_qp_add_pointer(atomic64_t
*var
,
772 u64 new_val
= vmci_q_read_pointer(var
);
774 if (new_val
>= size
- add
)
779 vmci_q_set_pointer(var
, new_val
);
783 * Helper routine to get the Producer Tail from the supplied queue.
786 vmci_q_header_producer_tail(const struct vmci_queue_header
*q_header
)
788 struct vmci_queue_header
*qh
= (struct vmci_queue_header
*)q_header
;
789 return vmci_q_read_pointer(&qh
->producer_tail
);
793 * Helper routine to get the Consumer Head from the supplied queue.
796 vmci_q_header_consumer_head(const struct vmci_queue_header
*q_header
)
798 struct vmci_queue_header
*qh
= (struct vmci_queue_header
*)q_header
;
799 return vmci_q_read_pointer(&qh
->consumer_head
);
803 * Helper routine to increment the Producer Tail. Fundamentally,
804 * vmci_qp_add_pointer() is used to manipulate the tail itself.
807 vmci_q_header_add_producer_tail(struct vmci_queue_header
*q_header
,
811 vmci_qp_add_pointer(&q_header
->producer_tail
, add
, queue_size
);
815 * Helper routine to increment the Consumer Head. Fundamentally,
816 * vmci_qp_add_pointer() is used to manipulate the head itself.
819 vmci_q_header_add_consumer_head(struct vmci_queue_header
*q_header
,
823 vmci_qp_add_pointer(&q_header
->consumer_head
, add
, queue_size
);
827 * Helper routine for getting the head and the tail pointer for a queue.
828 * Both the VMCIQueues are needed to get both the pointers for one queue.
831 vmci_q_header_get_pointers(const struct vmci_queue_header
*produce_q_header
,
832 const struct vmci_queue_header
*consume_q_header
,
837 *producer_tail
= vmci_q_header_producer_tail(produce_q_header
);
840 *consumer_head
= vmci_q_header_consumer_head(consume_q_header
);
843 static inline void vmci_q_header_init(struct vmci_queue_header
*q_header
,
844 const struct vmci_handle handle
)
846 q_header
->handle
= handle
;
847 atomic64_set(&q_header
->producer_tail
, 0);
848 atomic64_set(&q_header
->consumer_head
, 0);
852 * Finds available free space in a produce queue to enqueue more
853 * data or reports an error if queue pair corruption is detected.
856 vmci_q_header_free_space(const struct vmci_queue_header
*produce_q_header
,
857 const struct vmci_queue_header
*consume_q_header
,
858 const u64 produce_q_size
)
864 tail
= vmci_q_header_producer_tail(produce_q_header
);
865 head
= vmci_q_header_consumer_head(consume_q_header
);
867 if (tail
>= produce_q_size
|| head
>= produce_q_size
)
868 return VMCI_ERROR_INVALID_SIZE
;
871 * Deduct 1 to avoid tail becoming equal to head which causes
872 * ambiguity. If head and tail are equal it means that the
876 free_space
= produce_q_size
- (tail
- head
) - 1;
878 free_space
= head
- tail
- 1;
884 * vmci_q_header_free_space() does all the heavy lifting of
885 * determing the number of free bytes in a Queue. This routine,
886 * then subtracts that size from the full size of the Queue so
887 * the caller knows how many bytes are ready to be dequeued.
889 * On success, available data size in bytes (up to MAX_INT64).
890 * On failure, appropriate error code.
893 vmci_q_header_buf_ready(const struct vmci_queue_header
*consume_q_header
,
894 const struct vmci_queue_header
*produce_q_header
,
895 const u64 consume_q_size
)
899 free_space
= vmci_q_header_free_space(consume_q_header
,
900 produce_q_header
, consume_q_size
);
901 if (free_space
< VMCI_SUCCESS
)
904 return consume_q_size
- free_space
- 1;
908 #endif /* _VMW_VMCI_DEF_H_ */