1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2012 VMware, Inc. All rights reserved.
8 #include <linux/vmw_vmci_defs.h>
9 #include <linux/vmw_vmci_api.h>
10 #include <linux/moduleparam.h>
11 #include <linux/interrupt.h>
12 #include <linux/highmem.h>
13 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/processor.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/init.h>
20 #include <linux/pci.h>
21 #include <linux/smp.h>
23 #include <linux/vmalloc.h>
25 #include "vmci_datagram.h"
26 #include "vmci_doorbell.h"
27 #include "vmci_context.h"
28 #include "vmci_driver.h"
29 #include "vmci_event.h"
31 #define PCI_DEVICE_ID_VMWARE_VMCI 0x0740
33 #define VMCI_UTIL_NUM_RESOURCES 1
36 * Datagram buffers for DMA send/receive must accommodate at least
37 * a maximum sized datagram and the header.
39 #define VMCI_DMA_DG_BUFFER_SIZE (VMCI_MAX_DG_SIZE + PAGE_SIZE)
41 static bool vmci_disable_msi
;
42 module_param_named(disable_msi
, vmci_disable_msi
, bool, 0);
43 MODULE_PARM_DESC(disable_msi
, "Disable MSI use in driver - (default=0)");
45 static bool vmci_disable_msix
;
46 module_param_named(disable_msix
, vmci_disable_msix
, bool, 0);
47 MODULE_PARM_DESC(disable_msix
, "Disable MSI-X use in driver - (default=0)");
49 static u32 ctx_update_sub_id
= VMCI_INVALID_ID
;
50 static u32 vm_context_id
= VMCI_INVALID_ID
;
52 struct vmci_guest_device
{
53 struct device
*dev
; /* PCI device we are attached to */
55 void __iomem
*mmio_base
;
57 bool exclusive_vectors
;
59 struct wait_queue_head inout_wq
;
62 dma_addr_t data_buffer_base
;
64 dma_addr_t tx_buffer_base
;
65 void *notification_bitmap
;
66 dma_addr_t notification_base
;
69 static bool use_ppn64
;
71 bool vmci_use_ppn64(void)
76 /* vmci_dev singleton device and supporting data*/
77 struct pci_dev
*vmci_pdev
;
78 static struct vmci_guest_device
*vmci_dev_g
;
79 static DEFINE_SPINLOCK(vmci_dev_spinlock
);
81 static atomic_t vmci_num_guest_devices
= ATOMIC_INIT(0);
83 bool vmci_guest_code_active(void)
85 return atomic_read(&vmci_num_guest_devices
) != 0;
88 u32
vmci_get_vm_context_id(void)
90 if (vm_context_id
== VMCI_INVALID_ID
) {
91 struct vmci_datagram get_cid_msg
;
93 vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID
,
95 get_cid_msg
.src
= VMCI_ANON_SRC_HANDLE
;
96 get_cid_msg
.payload_size
= 0;
97 vm_context_id
= vmci_send_datagram(&get_cid_msg
);
102 static unsigned int vmci_read_reg(struct vmci_guest_device
*dev
, u32 reg
)
104 if (dev
->mmio_base
!= NULL
)
105 return readl(dev
->mmio_base
+ reg
);
106 return ioread32(dev
->iobase
+ reg
);
109 static void vmci_write_reg(struct vmci_guest_device
*dev
, u32 val
, u32 reg
)
111 if (dev
->mmio_base
!= NULL
)
112 writel(val
, dev
->mmio_base
+ reg
);
114 iowrite32(val
, dev
->iobase
+ reg
);
117 static void vmci_read_data(struct vmci_guest_device
*vmci_dev
,
118 void *dest
, size_t size
)
120 if (vmci_dev
->mmio_base
== NULL
)
121 ioread8_rep(vmci_dev
->iobase
+ VMCI_DATA_IN_ADDR
,
125 * For DMA datagrams, the data_buffer will contain the header on the
126 * first page, followed by the incoming datagram(s) on the following
127 * pages. The header uses an S/G element immediately following the
128 * header on the first page to point to the data area.
130 struct vmci_data_in_out_header
*buffer_header
= vmci_dev
->data_buffer
;
131 struct vmci_sg_elem
*sg_array
= (struct vmci_sg_elem
*)(buffer_header
+ 1);
132 size_t buffer_offset
= dest
- vmci_dev
->data_buffer
;
134 buffer_header
->opcode
= 1;
135 buffer_header
->size
= 1;
136 buffer_header
->busy
= 0;
137 sg_array
[0].addr
= vmci_dev
->data_buffer_base
+ buffer_offset
;
138 sg_array
[0].size
= size
;
140 vmci_write_reg(vmci_dev
, lower_32_bits(vmci_dev
->data_buffer_base
),
141 VMCI_DATA_IN_LOW_ADDR
);
143 wait_event(vmci_dev
->inout_wq
, buffer_header
->busy
== 1);
147 static int vmci_write_data(struct vmci_guest_device
*dev
,
148 struct vmci_datagram
*dg
)
152 if (dev
->mmio_base
!= NULL
) {
153 struct vmci_data_in_out_header
*buffer_header
= dev
->tx_buffer
;
154 u8
*dg_out_buffer
= (u8
*)(buffer_header
+ 1);
156 if (VMCI_DG_SIZE(dg
) > VMCI_MAX_DG_SIZE
)
157 return VMCI_ERROR_INVALID_ARGS
;
160 * Initialize send buffer with outgoing datagram
161 * and set up header for inline data. Device will
162 * not access buffer asynchronously - only after
163 * the write to VMCI_DATA_OUT_LOW_ADDR.
165 memcpy(dg_out_buffer
, dg
, VMCI_DG_SIZE(dg
));
166 buffer_header
->opcode
= 0;
167 buffer_header
->size
= VMCI_DG_SIZE(dg
);
168 buffer_header
->busy
= 1;
170 vmci_write_reg(dev
, lower_32_bits(dev
->tx_buffer_base
),
171 VMCI_DATA_OUT_LOW_ADDR
);
173 /* Caller holds a spinlock, so cannot block. */
174 spin_until_cond(buffer_header
->busy
== 0);
176 result
= vmci_read_reg(vmci_dev_g
, VMCI_RESULT_LOW_ADDR
);
177 if (result
== VMCI_SUCCESS
)
178 result
= (int)buffer_header
->result
;
180 iowrite8_rep(dev
->iobase
+ VMCI_DATA_OUT_ADDR
,
181 dg
, VMCI_DG_SIZE(dg
));
182 result
= vmci_read_reg(vmci_dev_g
, VMCI_RESULT_LOW_ADDR
);
189 * VM to hypervisor call mechanism. We use the standard VMware naming
190 * convention since shared code is calling this function as well.
192 int vmci_send_datagram(struct vmci_datagram
*dg
)
199 return VMCI_ERROR_INVALID_ARGS
;
202 * Need to acquire spinlock on the device because the datagram
203 * data may be spread over multiple pages and the monitor may
204 * interleave device user rpc calls from multiple
205 * VCPUs. Acquiring the spinlock precludes that
206 * possibility. Disabling interrupts to avoid incoming
207 * datagrams during a "rep out" and possibly landing up in
210 spin_lock_irqsave(&vmci_dev_spinlock
, flags
);
213 vmci_write_data(vmci_dev_g
, dg
);
214 result
= vmci_read_reg(vmci_dev_g
, VMCI_RESULT_LOW_ADDR
);
216 result
= VMCI_ERROR_UNAVAILABLE
;
219 spin_unlock_irqrestore(&vmci_dev_spinlock
, flags
);
223 EXPORT_SYMBOL_GPL(vmci_send_datagram
);
226 * Gets called with the new context id if updated or resumed.
229 static void vmci_guest_cid_update(u32 sub_id
,
230 const struct vmci_event_data
*event_data
,
233 const struct vmci_event_payld_ctx
*ev_payload
=
234 vmci_event_data_const_payload(event_data
);
236 if (sub_id
!= ctx_update_sub_id
) {
237 pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id
);
241 if (!event_data
|| ev_payload
->context_id
== VMCI_INVALID_ID
) {
242 pr_devel("Invalid event data\n");
246 pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n",
247 vm_context_id
, ev_payload
->context_id
, event_data
->event
);
249 vm_context_id
= ev_payload
->context_id
;
253 * Verify that the host supports the hypercalls we need. If it does not,
254 * try to find fallback hypercalls and use those instead. Returns 0 if
255 * required hypercalls (or fallback hypercalls) are supported by the host,
256 * an error code otherwise.
258 static int vmci_check_host_caps(struct pci_dev
*pdev
)
261 struct vmci_resource_query_msg
*msg
;
262 u32 msg_size
= sizeof(struct vmci_resource_query_hdr
) +
263 VMCI_UTIL_NUM_RESOURCES
* sizeof(u32
);
264 struct vmci_datagram
*check_msg
;
266 check_msg
= kzalloc(msg_size
, GFP_KERNEL
);
268 dev_err(&pdev
->dev
, "%s: Insufficient memory\n", __func__
);
272 check_msg
->dst
= vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID
,
273 VMCI_RESOURCES_QUERY
);
274 check_msg
->src
= VMCI_ANON_SRC_HANDLE
;
275 check_msg
->payload_size
= msg_size
- VMCI_DG_HEADERSIZE
;
276 msg
= (struct vmci_resource_query_msg
*)VMCI_DG_PAYLOAD(check_msg
);
278 msg
->num_resources
= VMCI_UTIL_NUM_RESOURCES
;
279 msg
->resources
[0] = VMCI_GET_CONTEXT_ID
;
281 /* Checks that hyper calls are supported */
282 result
= vmci_send_datagram(check_msg
) == 0x01;
285 dev_dbg(&pdev
->dev
, "%s: Host capability check: %s\n",
286 __func__
, result
? "PASSED" : "FAILED");
288 /* We need the vector. There are no fallbacks. */
289 return result
? 0 : -ENXIO
;
293 * Reads datagrams from the device and dispatches them. For IO port
294 * based access to the device, we always start reading datagrams into
295 * only the first page of the datagram buffer. If the datagrams don't
296 * fit into one page, we use the maximum datagram buffer size for the
297 * remainder of the invocation. This is a simple heuristic for not
298 * penalizing small datagrams. For DMA-based datagrams, we always
299 * use the maximum datagram buffer size, since there is no performance
300 * penalty for doing so.
302 * This function assumes that it has exclusive access to the data
303 * in register(s) for the duration of the call.
305 static void vmci_dispatch_dgs(struct vmci_guest_device
*vmci_dev
)
307 u8
*dg_in_buffer
= vmci_dev
->data_buffer
;
308 struct vmci_datagram
*dg
;
309 size_t dg_in_buffer_size
= VMCI_MAX_DG_SIZE
;
310 size_t current_dg_in_buffer_size
;
311 size_t remaining_bytes
;
312 bool is_io_port
= vmci_dev
->mmio_base
== NULL
;
314 BUILD_BUG_ON(VMCI_MAX_DG_SIZE
< PAGE_SIZE
);
317 /* For mmio, the first page is used for the header. */
318 dg_in_buffer
+= PAGE_SIZE
;
321 * For DMA-based datagram operations, there is no performance
322 * penalty for reading the maximum buffer size.
324 current_dg_in_buffer_size
= VMCI_MAX_DG_SIZE
;
326 current_dg_in_buffer_size
= PAGE_SIZE
;
328 vmci_read_data(vmci_dev
, dg_in_buffer
, current_dg_in_buffer_size
);
329 dg
= (struct vmci_datagram
*)dg_in_buffer
;
330 remaining_bytes
= current_dg_in_buffer_size
;
333 * Read through the buffer until an invalid datagram header is
334 * encountered. The exit condition for datagrams read through
335 * VMCI_DATA_IN_ADDR is a bit more complicated, since a datagram
336 * can start on any page boundary in the buffer.
338 while (dg
->dst
.resource
!= VMCI_INVALID_ID
||
339 (is_io_port
&& remaining_bytes
> PAGE_SIZE
)) {
343 * If using VMCI_DATA_IN_ADDR, skip to the next page
344 * as a datagram can start on any page boundary.
346 if (dg
->dst
.resource
== VMCI_INVALID_ID
) {
347 dg
= (struct vmci_datagram
*)roundup(
348 (uintptr_t)dg
+ 1, PAGE_SIZE
);
350 (size_t)(dg_in_buffer
+
351 current_dg_in_buffer_size
-
356 dg_in_size
= VMCI_DG_SIZE_ALIGNED(dg
);
358 if (dg_in_size
<= dg_in_buffer_size
) {
362 * If the remaining bytes in the datagram
363 * buffer doesn't contain the complete
364 * datagram, we first make sure we have enough
365 * room for it and then we read the reminder
366 * of the datagram and possibly any following
369 if (dg_in_size
> remaining_bytes
) {
370 if (remaining_bytes
!=
371 current_dg_in_buffer_size
) {
374 * We move the partial
375 * datagram to the front and
376 * read the reminder of the
377 * datagram and possibly
378 * following calls into the
381 memmove(dg_in_buffer
, dg_in_buffer
+
382 current_dg_in_buffer_size
-
385 dg
= (struct vmci_datagram
*)
389 if (current_dg_in_buffer_size
!=
391 current_dg_in_buffer_size
=
394 vmci_read_data(vmci_dev
,
397 current_dg_in_buffer_size
-
402 * We special case event datagrams from the
405 if (dg
->src
.context
== VMCI_HYPERVISOR_CONTEXT_ID
&&
406 dg
->dst
.resource
== VMCI_EVENT_HANDLER
) {
407 result
= vmci_event_dispatch(dg
);
409 result
= vmci_datagram_invoke_guest_handler(dg
);
411 if (result
< VMCI_SUCCESS
)
412 dev_dbg(vmci_dev
->dev
,
413 "Datagram with resource (ID=0x%x) failed (err=%d)\n",
414 dg
->dst
.resource
, result
);
416 /* On to the next datagram. */
417 dg
= (struct vmci_datagram
*)((u8
*)dg
+
420 size_t bytes_to_skip
;
423 * Datagram doesn't fit in datagram buffer of maximal
426 dev_dbg(vmci_dev
->dev
,
427 "Failed to receive datagram (size=%u bytes)\n",
430 bytes_to_skip
= dg_in_size
- remaining_bytes
;
431 if (current_dg_in_buffer_size
!= dg_in_buffer_size
)
432 current_dg_in_buffer_size
= dg_in_buffer_size
;
435 vmci_read_data(vmci_dev
, dg_in_buffer
,
436 current_dg_in_buffer_size
);
437 if (bytes_to_skip
<= current_dg_in_buffer_size
)
440 bytes_to_skip
-= current_dg_in_buffer_size
;
442 dg
= (struct vmci_datagram
*)(dg_in_buffer
+
447 (size_t) (dg_in_buffer
+ current_dg_in_buffer_size
-
450 if (remaining_bytes
< VMCI_DG_HEADERSIZE
) {
451 /* Get the next batch of datagrams. */
453 vmci_read_data(vmci_dev
, dg_in_buffer
,
454 current_dg_in_buffer_size
);
455 dg
= (struct vmci_datagram
*)dg_in_buffer
;
456 remaining_bytes
= current_dg_in_buffer_size
;
462 * Scans the notification bitmap for raised flags, clears them
463 * and handles the notifications.
465 static void vmci_process_bitmap(struct vmci_guest_device
*dev
)
467 if (!dev
->notification_bitmap
) {
468 dev_dbg(dev
->dev
, "No bitmap present in %s\n", __func__
);
472 vmci_dbell_scan_notification_entries(dev
->notification_bitmap
);
476 * Interrupt handler for legacy or MSI interrupt, or for first MSI-X
477 * interrupt (vector VMCI_INTR_DATAGRAM).
479 static irqreturn_t
vmci_interrupt(int irq
, void *_dev
)
481 struct vmci_guest_device
*dev
= _dev
;
484 * If we are using MSI-X with exclusive vectors then we simply call
485 * vmci_dispatch_dgs(), since we know the interrupt was meant for us.
486 * Otherwise we must read the ICR to determine what to do.
489 if (dev
->exclusive_vectors
) {
490 vmci_dispatch_dgs(dev
);
494 /* Acknowledge interrupt and determine what needs doing. */
495 icr
= vmci_read_reg(dev
, VMCI_ICR_ADDR
);
496 if (icr
== 0 || icr
== ~0)
499 if (icr
& VMCI_ICR_DATAGRAM
) {
500 vmci_dispatch_dgs(dev
);
501 icr
&= ~VMCI_ICR_DATAGRAM
;
504 if (icr
& VMCI_ICR_NOTIFICATION
) {
505 vmci_process_bitmap(dev
);
506 icr
&= ~VMCI_ICR_NOTIFICATION
;
510 if (icr
& VMCI_ICR_DMA_DATAGRAM
) {
511 wake_up_all(&dev
->inout_wq
);
512 icr
&= ~VMCI_ICR_DMA_DATAGRAM
;
517 "Ignoring unknown interrupt cause (%d)\n",
525 * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION,
526 * which is for the notification bitmap. Will only get called if we are
527 * using MSI-X with exclusive vectors.
529 static irqreturn_t
vmci_interrupt_bm(int irq
, void *_dev
)
531 struct vmci_guest_device
*dev
= _dev
;
533 /* For MSI-X we can just assume it was meant for us. */
534 vmci_process_bitmap(dev
);
540 * Interrupt handler for MSI-X interrupt vector VMCI_INTR_DMA_DATAGRAM,
541 * which is for the completion of a DMA datagram send or receive operation.
542 * Will only get called if we are using MSI-X with exclusive vectors.
544 static irqreturn_t
vmci_interrupt_dma_datagram(int irq
, void *_dev
)
546 struct vmci_guest_device
*dev
= _dev
;
548 wake_up_all(&dev
->inout_wq
);
553 static void vmci_free_dg_buffers(struct vmci_guest_device
*vmci_dev
)
555 if (vmci_dev
->mmio_base
!= NULL
) {
556 if (vmci_dev
->tx_buffer
!= NULL
)
557 dma_free_coherent(vmci_dev
->dev
,
558 VMCI_DMA_DG_BUFFER_SIZE
,
560 vmci_dev
->tx_buffer_base
);
561 if (vmci_dev
->data_buffer
!= NULL
)
562 dma_free_coherent(vmci_dev
->dev
,
563 VMCI_DMA_DG_BUFFER_SIZE
,
564 vmci_dev
->data_buffer
,
565 vmci_dev
->data_buffer_base
);
567 vfree(vmci_dev
->data_buffer
);
572 * Most of the initialization at module load time is done here.
574 static int vmci_guest_probe_device(struct pci_dev
*pdev
,
575 const struct pci_device_id
*id
)
577 struct vmci_guest_device
*vmci_dev
;
578 void __iomem
*iobase
= NULL
;
579 void __iomem
*mmio_base
= NULL
;
580 unsigned int num_irq_vectors
;
581 unsigned int capabilities
;
582 unsigned int caps_in_use
;
587 dev_dbg(&pdev
->dev
, "Probing for vmci/PCI guest device\n");
589 error
= pcim_enable_device(pdev
);
592 "Failed to enable VMCI device: %d\n", error
);
597 * The VMCI device with mmio access to registers requests 256KB
598 * for BAR1. If present, driver will use new VMCI device
599 * functionality for register access and datagram send/recv.
602 if (pci_resource_len(pdev
, 1) == VMCI_WITH_MMIO_ACCESS_BAR_SIZE
) {
603 dev_info(&pdev
->dev
, "MMIO register access is available\n");
604 mmio_base
= pci_iomap_range(pdev
, 1, VMCI_MMIO_ACCESS_OFFSET
,
605 VMCI_MMIO_ACCESS_SIZE
);
606 /* If the map fails, we fall back to IOIO access. */
608 dev_warn(&pdev
->dev
, "Failed to map MMIO register access\n");
612 if (IS_ENABLED(CONFIG_ARM64
)) {
613 dev_err(&pdev
->dev
, "MMIO base is invalid\n");
616 error
= pcim_iomap_regions(pdev
, BIT(0), KBUILD_MODNAME
);
618 dev_err(&pdev
->dev
, "Failed to reserve/map IO regions\n");
621 iobase
= pcim_iomap_table(pdev
)[0];
624 vmci_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*vmci_dev
), GFP_KERNEL
);
627 "Can't allocate memory for VMCI device\n");
629 goto err_unmap_mmio_base
;
632 vmci_dev
->dev
= &pdev
->dev
;
633 vmci_dev
->exclusive_vectors
= false;
634 vmci_dev
->iobase
= iobase
;
635 vmci_dev
->mmio_base
= mmio_base
;
637 init_waitqueue_head(&vmci_dev
->inout_wq
);
639 if (mmio_base
!= NULL
) {
640 vmci_dev
->tx_buffer
= dma_alloc_coherent(&pdev
->dev
, VMCI_DMA_DG_BUFFER_SIZE
,
641 &vmci_dev
->tx_buffer_base
,
643 if (!vmci_dev
->tx_buffer
) {
645 "Can't allocate memory for datagram tx buffer\n");
647 goto err_unmap_mmio_base
;
650 vmci_dev
->data_buffer
= dma_alloc_coherent(&pdev
->dev
, VMCI_DMA_DG_BUFFER_SIZE
,
651 &vmci_dev
->data_buffer_base
,
654 vmci_dev
->data_buffer
= vmalloc(VMCI_MAX_DG_SIZE
);
656 if (!vmci_dev
->data_buffer
) {
658 "Can't allocate memory for datagram buffer\n");
660 goto err_free_data_buffers
;
663 pci_set_master(pdev
); /* To enable queue_pair functionality. */
666 * Verify that the VMCI Device supports the capabilities that
667 * we need. If the device is missing capabilities that we would
668 * like to use, check for fallback capabilities and use those
669 * instead (so we can run a new VM on old hosts). Fail the load if
670 * a required capability is missing and there is no fallback.
672 * Right now, we need datagrams. There are no fallbacks.
674 capabilities
= vmci_read_reg(vmci_dev
, VMCI_CAPS_ADDR
);
675 if (!(capabilities
& VMCI_CAPS_DATAGRAM
)) {
676 dev_err(&pdev
->dev
, "Device does not support datagrams\n");
678 goto err_free_data_buffers
;
680 caps_in_use
= VMCI_CAPS_DATAGRAM
;
683 * Use 64-bit PPNs if the device supports.
685 * There is no check for the return value of dma_set_mask_and_coherent
686 * since this driver can handle the default mask values if
687 * dma_set_mask_and_coherent fails.
689 if (capabilities
& VMCI_CAPS_PPN64
) {
690 dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
692 caps_in_use
|= VMCI_CAPS_PPN64
;
694 dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(44));
699 * If the hardware supports notifications, we will use that as
702 if (capabilities
& VMCI_CAPS_NOTIFICATIONS
) {
703 vmci_dev
->notification_bitmap
= dma_alloc_coherent(
704 &pdev
->dev
, PAGE_SIZE
, &vmci_dev
->notification_base
,
706 if (!vmci_dev
->notification_bitmap
)
708 "Unable to allocate notification bitmap\n");
710 caps_in_use
|= VMCI_CAPS_NOTIFICATIONS
;
713 if (mmio_base
!= NULL
) {
714 if (capabilities
& VMCI_CAPS_DMA_DATAGRAM
) {
715 caps_in_use
|= VMCI_CAPS_DMA_DATAGRAM
;
718 "Missing capability: VMCI_CAPS_DMA_DATAGRAM\n");
720 goto err_free_notification_bitmap
;
724 dev_info(&pdev
->dev
, "Using capabilities 0x%x\n", caps_in_use
);
726 /* Let the host know which capabilities we intend to use. */
727 vmci_write_reg(vmci_dev
, caps_in_use
, VMCI_CAPS_ADDR
);
729 if (caps_in_use
& VMCI_CAPS_DMA_DATAGRAM
) {
730 /* Let the device know the size for pages passed down. */
731 vmci_write_reg(vmci_dev
, PAGE_SHIFT
, VMCI_GUEST_PAGE_SHIFT
);
733 /* Configure the high order parts of the data in/out buffers. */
734 vmci_write_reg(vmci_dev
, upper_32_bits(vmci_dev
->data_buffer_base
),
735 VMCI_DATA_IN_HIGH_ADDR
);
736 vmci_write_reg(vmci_dev
, upper_32_bits(vmci_dev
->tx_buffer_base
),
737 VMCI_DATA_OUT_HIGH_ADDR
);
740 /* Set up global device so that we can start sending datagrams */
741 spin_lock_irq(&vmci_dev_spinlock
);
742 vmci_dev_g
= vmci_dev
;
744 spin_unlock_irq(&vmci_dev_spinlock
);
747 * Register notification bitmap with device if that capability is
750 if (caps_in_use
& VMCI_CAPS_NOTIFICATIONS
) {
751 unsigned long bitmap_ppn
=
752 vmci_dev
->notification_base
>> PAGE_SHIFT
;
753 if (!vmci_dbell_register_notification_bitmap(bitmap_ppn
)) {
755 "VMCI device unable to register notification bitmap with PPN 0x%lx\n",
758 goto err_remove_vmci_dev_g
;
762 /* Check host capabilities. */
763 error
= vmci_check_host_caps(pdev
);
765 goto err_remove_vmci_dev_g
;
770 * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can
771 * update the internal context id when needed.
773 vmci_err
= vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE
,
774 vmci_guest_cid_update
, NULL
,
776 if (vmci_err
< VMCI_SUCCESS
)
778 "Failed to subscribe to event (type=%d): %d\n",
779 VMCI_EVENT_CTX_ID_UPDATE
, vmci_err
);
782 * Enable interrupts. Try MSI-X first, then MSI, and then fallback on
785 if (vmci_dev
->mmio_base
!= NULL
)
786 num_irq_vectors
= VMCI_MAX_INTRS
;
788 num_irq_vectors
= VMCI_MAX_INTRS_NOTIFICATION
;
789 error
= pci_alloc_irq_vectors(pdev
, num_irq_vectors
, num_irq_vectors
,
792 error
= pci_alloc_irq_vectors(pdev
, 1, 1, PCI_IRQ_ALL_TYPES
);
794 goto err_unsubscribe_event
;
796 vmci_dev
->exclusive_vectors
= true;
800 * Request IRQ for legacy or MSI interrupts, or for first
803 error
= request_threaded_irq(pci_irq_vector(pdev
, 0), NULL
,
804 vmci_interrupt
, IRQF_SHARED
,
805 KBUILD_MODNAME
, vmci_dev
);
807 dev_err(&pdev
->dev
, "Irq %u in use: %d\n",
808 pci_irq_vector(pdev
, 0), error
);
809 goto err_disable_msi
;
813 * For MSI-X with exclusive vectors we need to request an
814 * interrupt for each vector so that we get a separate
815 * interrupt handler routine. This allows us to distinguish
816 * between the vectors.
818 if (vmci_dev
->exclusive_vectors
) {
819 error
= request_threaded_irq(pci_irq_vector(pdev
, 1), NULL
,
820 vmci_interrupt_bm
, 0,
821 KBUILD_MODNAME
, vmci_dev
);
824 "Failed to allocate irq %u: %d\n",
825 pci_irq_vector(pdev
, 1), error
);
828 if (caps_in_use
& VMCI_CAPS_DMA_DATAGRAM
) {
829 error
= request_threaded_irq(pci_irq_vector(pdev
, 2),
831 vmci_interrupt_dma_datagram
,
836 "Failed to allocate irq %u: %d\n",
837 pci_irq_vector(pdev
, 2), error
);
838 goto err_free_bm_irq
;
843 dev_dbg(&pdev
->dev
, "Registered device\n");
845 atomic_inc(&vmci_num_guest_devices
);
847 /* Enable specific interrupt bits. */
848 cmd
= VMCI_IMR_DATAGRAM
;
849 if (caps_in_use
& VMCI_CAPS_NOTIFICATIONS
)
850 cmd
|= VMCI_IMR_NOTIFICATION
;
851 if (caps_in_use
& VMCI_CAPS_DMA_DATAGRAM
)
852 cmd
|= VMCI_IMR_DMA_DATAGRAM
;
853 vmci_write_reg(vmci_dev
, cmd
, VMCI_IMR_ADDR
);
855 /* Enable interrupts. */
856 vmci_write_reg(vmci_dev
, VMCI_CONTROL_INT_ENABLE
, VMCI_CONTROL_ADDR
);
858 pci_set_drvdata(pdev
, vmci_dev
);
860 vmci_call_vsock_callback(false);
864 if (vmci_dev
->exclusive_vectors
)
865 free_irq(pci_irq_vector(pdev
, 1), vmci_dev
);
868 free_irq(pci_irq_vector(pdev
, 0), vmci_dev
);
871 pci_free_irq_vectors(pdev
);
873 err_unsubscribe_event
:
874 vmci_err
= vmci_event_unsubscribe(ctx_update_sub_id
);
875 if (vmci_err
< VMCI_SUCCESS
)
877 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
878 VMCI_EVENT_CTX_ID_UPDATE
, ctx_update_sub_id
, vmci_err
);
880 err_remove_vmci_dev_g
:
881 spin_lock_irq(&vmci_dev_spinlock
);
884 spin_unlock_irq(&vmci_dev_spinlock
);
886 err_free_notification_bitmap
:
887 if (vmci_dev
->notification_bitmap
) {
888 vmci_write_reg(vmci_dev
, VMCI_CONTROL_RESET
, VMCI_CONTROL_ADDR
);
889 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
,
890 vmci_dev
->notification_bitmap
,
891 vmci_dev
->notification_base
);
894 err_free_data_buffers
:
895 vmci_free_dg_buffers(vmci_dev
);
898 if (mmio_base
!= NULL
)
899 pci_iounmap(pdev
, mmio_base
);
901 /* The rest are managed resources and will be freed by PCI core */
905 static void vmci_guest_remove_device(struct pci_dev
*pdev
)
907 struct vmci_guest_device
*vmci_dev
= pci_get_drvdata(pdev
);
910 dev_dbg(&pdev
->dev
, "Removing device\n");
912 atomic_dec(&vmci_num_guest_devices
);
914 vmci_qp_guest_endpoints_exit();
916 vmci_err
= vmci_event_unsubscribe(ctx_update_sub_id
);
917 if (vmci_err
< VMCI_SUCCESS
)
919 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
920 VMCI_EVENT_CTX_ID_UPDATE
, ctx_update_sub_id
, vmci_err
);
922 spin_lock_irq(&vmci_dev_spinlock
);
925 spin_unlock_irq(&vmci_dev_spinlock
);
927 dev_dbg(&pdev
->dev
, "Resetting vmci device\n");
928 vmci_write_reg(vmci_dev
, VMCI_CONTROL_RESET
, VMCI_CONTROL_ADDR
);
931 * Free IRQ and then disable MSI/MSI-X as appropriate. For
932 * MSI-X, we might have multiple vectors, each with their own
933 * IRQ, which we must free too.
935 if (vmci_dev
->exclusive_vectors
) {
936 free_irq(pci_irq_vector(pdev
, 1), vmci_dev
);
937 if (vmci_dev
->mmio_base
!= NULL
)
938 free_irq(pci_irq_vector(pdev
, 2), vmci_dev
);
940 free_irq(pci_irq_vector(pdev
, 0), vmci_dev
);
941 pci_free_irq_vectors(pdev
);
943 if (vmci_dev
->notification_bitmap
) {
945 * The device reset above cleared the bitmap state of the
946 * device, so we can safely free it here.
949 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
,
950 vmci_dev
->notification_bitmap
,
951 vmci_dev
->notification_base
);
954 vmci_free_dg_buffers(vmci_dev
);
956 if (vmci_dev
->mmio_base
!= NULL
)
957 pci_iounmap(pdev
, vmci_dev
->mmio_base
);
959 /* The rest are managed resources and will be freed by PCI core */
962 static const struct pci_device_id vmci_ids
[] = {
963 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE
, PCI_DEVICE_ID_VMWARE_VMCI
), },
966 MODULE_DEVICE_TABLE(pci
, vmci_ids
);
968 static struct pci_driver vmci_guest_driver
= {
969 .name
= KBUILD_MODNAME
,
970 .id_table
= vmci_ids
,
971 .probe
= vmci_guest_probe_device
,
972 .remove
= vmci_guest_remove_device
,
975 int __init
vmci_guest_init(void)
977 return pci_register_driver(&vmci_guest_driver
);
980 void __exit
vmci_guest_exit(void)
982 pci_unregister_driver(&vmci_guest_driver
);