batman-adv: Fix double free during fragment merge error
[linux/fpc-iii.git] / virt / kvm / coalesced_mmio.c
blob5c1efb869df240f4cb0b600307dbe7fbf40d8c39
1 /*
2 * KVM coalesced MMIO
4 * Copyright (c) 2008 Bull S.A.S.
5 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
9 */
11 #include <kvm/iodev.h>
13 #include <linux/kvm_host.h>
14 #include <linux/slab.h>
15 #include <linux/kvm.h>
17 #include "coalesced_mmio.h"
19 static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
21 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
24 static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
25 gpa_t addr, int len)
27 /* is it in a batchable area ?
28 * (addr,len) is fully included in
29 * (zone->addr, zone->size)
31 if (len < 0)
32 return 0;
33 if (addr + len < addr)
34 return 0;
35 if (addr < dev->zone.addr)
36 return 0;
37 if (addr + len > dev->zone.addr + dev->zone.size)
38 return 0;
39 return 1;
42 static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
44 struct kvm_coalesced_mmio_ring *ring;
45 unsigned avail;
47 /* Are we able to batch it ? */
49 /* last is the first free entry
50 * check if we don't meet the first used entry
51 * there is always one unused entry in the buffer
53 ring = dev->kvm->coalesced_mmio_ring;
54 avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
55 if (avail == 0) {
56 /* full */
57 return 0;
60 return 1;
63 static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
64 struct kvm_io_device *this, gpa_t addr,
65 int len, const void *val)
67 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
68 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
69 __u32 insert;
71 if (!coalesced_mmio_in_range(dev, addr, len))
72 return -EOPNOTSUPP;
74 spin_lock(&dev->kvm->ring_lock);
76 insert = READ_ONCE(ring->last);
77 if (!coalesced_mmio_has_room(dev, insert) ||
78 insert >= KVM_COALESCED_MMIO_MAX) {
79 spin_unlock(&dev->kvm->ring_lock);
80 return -EOPNOTSUPP;
83 /* copy data in first free entry of the ring */
85 ring->coalesced_mmio[insert].phys_addr = addr;
86 ring->coalesced_mmio[insert].len = len;
87 memcpy(ring->coalesced_mmio[insert].data, val, len);
88 smp_wmb();
89 ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
90 spin_unlock(&dev->kvm->ring_lock);
91 return 0;
94 static void coalesced_mmio_destructor(struct kvm_io_device *this)
96 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
98 list_del(&dev->list);
100 kfree(dev);
103 static const struct kvm_io_device_ops coalesced_mmio_ops = {
104 .write = coalesced_mmio_write,
105 .destructor = coalesced_mmio_destructor,
108 int kvm_coalesced_mmio_init(struct kvm *kvm)
110 struct page *page;
111 int ret;
113 ret = -ENOMEM;
114 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
115 if (!page)
116 goto out_err;
118 ret = 0;
119 kvm->coalesced_mmio_ring = page_address(page);
122 * We're using this spinlock to sync access to the coalesced ring.
123 * The list doesn't need it's own lock since device registration and
124 * unregistration should only happen when kvm->slots_lock is held.
126 spin_lock_init(&kvm->ring_lock);
127 INIT_LIST_HEAD(&kvm->coalesced_zones);
129 out_err:
130 return ret;
133 void kvm_coalesced_mmio_free(struct kvm *kvm)
135 if (kvm->coalesced_mmio_ring)
136 free_page((unsigned long)kvm->coalesced_mmio_ring);
139 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
140 struct kvm_coalesced_mmio_zone *zone)
142 int ret;
143 struct kvm_coalesced_mmio_dev *dev;
145 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
146 if (!dev)
147 return -ENOMEM;
149 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
150 dev->kvm = kvm;
151 dev->zone = *zone;
153 mutex_lock(&kvm->slots_lock);
154 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
155 zone->size, &dev->dev);
156 if (ret < 0)
157 goto out_free_dev;
158 list_add_tail(&dev->list, &kvm->coalesced_zones);
159 mutex_unlock(&kvm->slots_lock);
161 return 0;
163 out_free_dev:
164 mutex_unlock(&kvm->slots_lock);
165 kfree(dev);
167 return ret;
170 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
171 struct kvm_coalesced_mmio_zone *zone)
173 struct kvm_coalesced_mmio_dev *dev, *tmp;
175 mutex_lock(&kvm->slots_lock);
177 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
178 if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
179 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
180 kvm_iodevice_destructor(&dev->dev);
183 mutex_unlock(&kvm->slots_lock);
185 return 0;