4 * Copyright (c) 2008 Bull S.A.S.
5 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
13 #include <linux/kvm_host.h>
14 #include <linux/slab.h>
15 #include <linux/kvm.h>
17 #include "coalesced_mmio.h"
19 static inline struct kvm_coalesced_mmio_dev
*to_mmio(struct kvm_io_device
*dev
)
21 return container_of(dev
, struct kvm_coalesced_mmio_dev
, dev
);
24 static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev
*dev
,
27 /* is it in a batchable area ?
28 * (addr,len) is fully included in
29 * (zone->addr, zone->size)
33 if (addr
+ len
< addr
)
35 if (addr
< dev
->zone
.addr
)
37 if (addr
+ len
> dev
->zone
.addr
+ dev
->zone
.size
)
42 static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev
*dev
)
44 struct kvm_coalesced_mmio_ring
*ring
;
47 /* Are we able to batch it ? */
49 /* last is the first free entry
50 * check if we don't meet the first used entry
51 * there is always one unused entry in the buffer
53 ring
= dev
->kvm
->coalesced_mmio_ring
;
54 avail
= (ring
->first
- ring
->last
- 1) % KVM_COALESCED_MMIO_MAX
;
63 static int coalesced_mmio_write(struct kvm_io_device
*this,
64 gpa_t addr
, int len
, const void *val
)
66 struct kvm_coalesced_mmio_dev
*dev
= to_mmio(this);
67 struct kvm_coalesced_mmio_ring
*ring
= dev
->kvm
->coalesced_mmio_ring
;
69 if (!coalesced_mmio_in_range(dev
, addr
, len
))
72 spin_lock(&dev
->kvm
->ring_lock
);
74 if (!coalesced_mmio_has_room(dev
)) {
75 spin_unlock(&dev
->kvm
->ring_lock
);
79 /* copy data in first free entry of the ring */
81 ring
->coalesced_mmio
[ring
->last
].phys_addr
= addr
;
82 ring
->coalesced_mmio
[ring
->last
].len
= len
;
83 memcpy(ring
->coalesced_mmio
[ring
->last
].data
, val
, len
);
85 ring
->last
= (ring
->last
+ 1) % KVM_COALESCED_MMIO_MAX
;
86 spin_unlock(&dev
->kvm
->ring_lock
);
90 static void coalesced_mmio_destructor(struct kvm_io_device
*this)
92 struct kvm_coalesced_mmio_dev
*dev
= to_mmio(this);
99 static const struct kvm_io_device_ops coalesced_mmio_ops
= {
100 .write
= coalesced_mmio_write
,
101 .destructor
= coalesced_mmio_destructor
,
104 int kvm_coalesced_mmio_init(struct kvm
*kvm
)
110 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
115 kvm
->coalesced_mmio_ring
= page_address(page
);
118 * We're using this spinlock to sync access to the coalesced ring.
119 * The list doesn't need it's own lock since device registration and
120 * unregistration should only happen when kvm->slots_lock is held.
122 spin_lock_init(&kvm
->ring_lock
);
123 INIT_LIST_HEAD(&kvm
->coalesced_zones
);
129 void kvm_coalesced_mmio_free(struct kvm
*kvm
)
131 if (kvm
->coalesced_mmio_ring
)
132 free_page((unsigned long)kvm
->coalesced_mmio_ring
);
135 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm
*kvm
,
136 struct kvm_coalesced_mmio_zone
*zone
)
139 struct kvm_coalesced_mmio_dev
*dev
;
141 dev
= kzalloc(sizeof(struct kvm_coalesced_mmio_dev
), GFP_KERNEL
);
145 kvm_iodevice_init(&dev
->dev
, &coalesced_mmio_ops
);
149 mutex_lock(&kvm
->slots_lock
);
150 ret
= kvm_io_bus_register_dev(kvm
, KVM_MMIO_BUS
, zone
->addr
,
151 zone
->size
, &dev
->dev
);
154 list_add_tail(&dev
->list
, &kvm
->coalesced_zones
);
155 mutex_unlock(&kvm
->slots_lock
);
160 mutex_unlock(&kvm
->slots_lock
);
166 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm
*kvm
,
167 struct kvm_coalesced_mmio_zone
*zone
)
169 struct kvm_coalesced_mmio_dev
*dev
, *tmp
;
171 mutex_lock(&kvm
->slots_lock
);
173 list_for_each_entry_safe(dev
, tmp
, &kvm
->coalesced_zones
, list
)
174 if (coalesced_mmio_in_range(dev
, zone
->addr
, zone
->size
)) {
175 kvm_io_bus_unregister_dev(kvm
, KVM_MMIO_BUS
, &dev
->dev
);
176 kvm_iodevice_destructor(&dev
->dev
);
179 mutex_unlock(&kvm
->slots_lock
);