1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2008 Bull S.A.S.
6 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
8 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
12 #include <kvm/iodev.h>
14 #include <linux/kvm_host.h>
15 #include <linux/slab.h>
16 #include <linux/kvm.h>
18 #include "coalesced_mmio.h"
20 static inline struct kvm_coalesced_mmio_dev
*to_mmio(struct kvm_io_device
*dev
)
22 return container_of(dev
, struct kvm_coalesced_mmio_dev
, dev
);
25 static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev
*dev
,
28 /* is it in a batchable area ?
29 * (addr,len) is fully included in
30 * (zone->addr, zone->size)
34 if (addr
+ len
< addr
)
36 if (addr
< dev
->zone
.addr
)
38 if (addr
+ len
> dev
->zone
.addr
+ dev
->zone
.size
)
43 static int coalesced_mmio_write(struct kvm_vcpu
*vcpu
,
44 struct kvm_io_device
*this, gpa_t addr
,
45 int len
, const void *val
)
47 struct kvm_coalesced_mmio_dev
*dev
= to_mmio(this);
48 struct kvm_coalesced_mmio_ring
*ring
= dev
->kvm
->coalesced_mmio_ring
;
51 if (!coalesced_mmio_in_range(dev
, addr
, len
))
54 spin_lock(&dev
->kvm
->ring_lock
);
57 * last is the index of the entry to fill. Verify userspace hasn't
58 * set last to be out of range, and that there is room in the ring.
59 * Leave one entry free in the ring so that userspace can differentiate
60 * between an empty ring and a full ring.
62 insert
= READ_ONCE(ring
->last
);
63 if (insert
>= KVM_COALESCED_MMIO_MAX
||
64 (insert
+ 1) % KVM_COALESCED_MMIO_MAX
== READ_ONCE(ring
->first
)) {
65 spin_unlock(&dev
->kvm
->ring_lock
);
69 /* copy data in first free entry of the ring */
71 ring
->coalesced_mmio
[insert
].phys_addr
= addr
;
72 ring
->coalesced_mmio
[insert
].len
= len
;
73 memcpy(ring
->coalesced_mmio
[insert
].data
, val
, len
);
74 ring
->coalesced_mmio
[insert
].pio
= dev
->zone
.pio
;
76 ring
->last
= (insert
+ 1) % KVM_COALESCED_MMIO_MAX
;
77 spin_unlock(&dev
->kvm
->ring_lock
);
81 static void coalesced_mmio_destructor(struct kvm_io_device
*this)
83 struct kvm_coalesced_mmio_dev
*dev
= to_mmio(this);
90 static const struct kvm_io_device_ops coalesced_mmio_ops
= {
91 .write
= coalesced_mmio_write
,
92 .destructor
= coalesced_mmio_destructor
,
95 int kvm_coalesced_mmio_init(struct kvm
*kvm
)
99 page
= alloc_page(GFP_KERNEL_ACCOUNT
| __GFP_ZERO
);
103 kvm
->coalesced_mmio_ring
= page_address(page
);
106 * We're using this spinlock to sync access to the coalesced ring.
107 * The list doesn't need its own lock since device registration and
108 * unregistration should only happen when kvm->slots_lock is held.
110 spin_lock_init(&kvm
->ring_lock
);
111 INIT_LIST_HEAD(&kvm
->coalesced_zones
);
116 void kvm_coalesced_mmio_free(struct kvm
*kvm
)
118 if (kvm
->coalesced_mmio_ring
)
119 free_page((unsigned long)kvm
->coalesced_mmio_ring
);
122 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm
*kvm
,
123 struct kvm_coalesced_mmio_zone
*zone
)
126 struct kvm_coalesced_mmio_dev
*dev
;
128 if (zone
->pio
!= 1 && zone
->pio
!= 0)
131 dev
= kzalloc(sizeof(struct kvm_coalesced_mmio_dev
),
136 kvm_iodevice_init(&dev
->dev
, &coalesced_mmio_ops
);
140 mutex_lock(&kvm
->slots_lock
);
141 ret
= kvm_io_bus_register_dev(kvm
,
142 zone
->pio
? KVM_PIO_BUS
: KVM_MMIO_BUS
,
143 zone
->addr
, zone
->size
, &dev
->dev
);
146 list_add_tail(&dev
->list
, &kvm
->coalesced_zones
);
147 mutex_unlock(&kvm
->slots_lock
);
152 mutex_unlock(&kvm
->slots_lock
);
158 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm
*kvm
,
159 struct kvm_coalesced_mmio_zone
*zone
)
161 struct kvm_coalesced_mmio_dev
*dev
, *tmp
;
164 if (zone
->pio
!= 1 && zone
->pio
!= 0)
167 mutex_lock(&kvm
->slots_lock
);
169 list_for_each_entry_safe(dev
, tmp
, &kvm
->coalesced_zones
, list
) {
170 if (zone
->pio
== dev
->zone
.pio
&&
171 coalesced_mmio_in_range(dev
, zone
->addr
, zone
->size
)) {
172 r
= kvm_io_bus_unregister_dev(kvm
,
173 zone
->pio
? KVM_PIO_BUS
: KVM_MMIO_BUS
, &dev
->dev
);
175 * On failure, unregister destroys all devices on the
176 * bus, including the target device. There's no need
177 * to restart the walk as there aren't any zones left.
184 mutex_unlock(&kvm
->slots_lock
);
187 * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's
188 * perspective, the coalesced MMIO is most definitely unregistered.