kvm tools: Add ivshmem device
[linux-2.6/next.git] / tools / kvm / virtio / pci.c
blob2745b250148f023920839f3f8c5a73026f323c0d
1 #include "kvm/virtio-pci.h"
3 #include "kvm/ioport.h"
4 #include "kvm/kvm.h"
5 #include "kvm/virtio-pci-dev.h"
6 #include "kvm/irq.h"
7 #include "kvm/virtio.h"
8 #include "kvm/ioeventfd.h"
10 #include <linux/virtio_pci.h>
11 #include <string.h>
13 static void virtio_pci__ioevent_callback(struct kvm *kvm, void *param)
15 struct virtio_pci_ioevent_param *ioeventfd = param;
17 ioeventfd->vpci->ops.notify_vq(kvm, ioeventfd->vpci->dev, ioeventfd->vq);
20 static int virtio_pci__init_ioeventfd(struct kvm *kvm, struct virtio_pci *vpci, u32 vq)
22 struct ioevent ioevent;
24 vpci->ioeventfds[vq] = (struct virtio_pci_ioevent_param) {
25 .vpci = vpci,
26 .vq = vq,
29 ioevent = (struct ioevent) {
30 .io_addr = vpci->base_addr + VIRTIO_PCI_QUEUE_NOTIFY,
31 .io_len = sizeof(u16),
32 .fn = virtio_pci__ioevent_callback,
33 .fn_ptr = &vpci->ioeventfds[vq],
34 .datamatch = vq,
35 .fn_kvm = kvm,
36 .fd = eventfd(0, 0),
39 ioeventfd__add_event(&ioevent);
41 return 0;
44 static inline bool virtio_pci__msix_enabled(struct virtio_pci *vpci)
46 return vpci->pci_hdr.msix.ctrl & PCI_MSIX_FLAGS_ENABLE;
49 static bool virtio_pci__specific_io_in(struct kvm *kvm, struct virtio_pci *vpci, u16 port,
50 void *data, int size, int offset)
52 u32 config_offset;
53 int type = virtio__get_dev_specific_field(offset - 20,
54 virtio_pci__msix_enabled(vpci),
55 0, &config_offset);
56 if (type == VIRTIO_PCI_O_MSIX) {
57 switch (offset) {
58 case VIRTIO_MSI_CONFIG_VECTOR:
59 ioport__write16(data, vpci->config_vector);
60 break;
61 case VIRTIO_MSI_QUEUE_VECTOR:
62 ioport__write16(data, vpci->vq_vector[vpci->queue_selector]);
63 break;
66 return true;
67 } else if (type == VIRTIO_PCI_O_CONFIG) {
68 u8 cfg;
70 cfg = vpci->ops.get_config(kvm, vpci->dev, config_offset);
71 ioport__write8(data, cfg);
72 return true;
75 return false;
78 static bool virtio_pci__io_in(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size)
80 unsigned long offset;
81 bool ret = true;
82 struct virtio_pci *vpci;
83 u32 val;
85 vpci = ioport->priv;
86 offset = port - vpci->base_addr;
88 switch (offset) {
89 case VIRTIO_PCI_HOST_FEATURES:
90 val = vpci->ops.get_host_features(kvm, vpci->dev);
91 ioport__write32(data, val);
92 break;
93 case VIRTIO_PCI_QUEUE_PFN:
94 val = vpci->ops.get_pfn_vq(kvm, vpci->dev, vpci->queue_selector);
95 ioport__write32(data, val);
96 break;
97 case VIRTIO_PCI_QUEUE_NUM:
98 val = vpci->ops.get_size_vq(kvm, vpci->dev, vpci->queue_selector);
99 ioport__write32(data, val);
100 break;
101 break;
102 case VIRTIO_PCI_STATUS:
103 ioport__write8(data, vpci->status);
104 break;
105 case VIRTIO_PCI_ISR:
106 ioport__write8(data, vpci->isr);
107 kvm__irq_line(kvm, vpci->pci_hdr.irq_line, VIRTIO_IRQ_LOW);
108 vpci->isr = VIRTIO_IRQ_LOW;
109 break;
110 default:
111 ret = virtio_pci__specific_io_in(kvm, vpci, port, data, size, offset);
112 break;
115 return ret;
118 static bool virtio_pci__specific_io_out(struct kvm *kvm, struct virtio_pci *vpci, u16 port,
119 void *data, int size, int offset)
121 u32 config_offset, gsi, vec;
122 int type = virtio__get_dev_specific_field(offset - 20, virtio_pci__msix_enabled(vpci),
123 0, &config_offset);
124 if (type == VIRTIO_PCI_O_MSIX) {
125 switch (offset) {
126 case VIRTIO_MSI_CONFIG_VECTOR:
127 vec = vpci->config_vector = ioport__read16(data);
129 gsi = irq__add_msix_route(kvm,
130 vpci->msix_table[vec].low,
131 vpci->msix_table[vec].high,
132 vpci->msix_table[vec].data);
134 vpci->config_gsi = gsi;
135 break;
136 case VIRTIO_MSI_QUEUE_VECTOR: {
137 vec = vpci->vq_vector[vpci->queue_selector] = ioport__read16(data);
139 gsi = irq__add_msix_route(kvm,
140 vpci->msix_table[vec].low,
141 vpci->msix_table[vec].high,
142 vpci->msix_table[vec].data);
143 vpci->gsis[vpci->queue_selector] = gsi;
144 break;
148 return true;
149 } else if (type == VIRTIO_PCI_O_CONFIG) {
150 vpci->ops.set_config(kvm, vpci->dev, *(u8 *)data, config_offset);
152 return true;
155 return false;
158 static bool virtio_pci__io_out(struct ioport *ioport, struct kvm *kvm, u16 port, void *data, int size)
160 unsigned long offset;
161 bool ret = true;
162 struct virtio_pci *vpci;
163 u32 val;
165 vpci = ioport->priv;
166 offset = port - vpci->base_addr;
168 switch (offset) {
169 case VIRTIO_PCI_GUEST_FEATURES:
170 val = ioport__read32(data);
171 vpci->ops.set_guest_features(kvm, vpci, val);
172 break;
173 case VIRTIO_PCI_QUEUE_PFN:
174 val = ioport__read32(data);
175 virtio_pci__init_ioeventfd(kvm, vpci, vpci->queue_selector);
176 vpci->ops.init_vq(kvm, vpci->dev, vpci->queue_selector, val);
177 break;
178 case VIRTIO_PCI_QUEUE_SEL:
179 vpci->queue_selector = ioport__read16(data);
180 break;
181 case VIRTIO_PCI_QUEUE_NOTIFY:
182 val = ioport__read16(data);
183 vpci->ops.notify_vq(kvm, vpci->dev, val);
184 break;
185 case VIRTIO_PCI_STATUS:
186 vpci->status = ioport__read8(data);
187 break;
188 default:
189 ret = virtio_pci__specific_io_out(kvm, vpci, port, data, size, offset);
190 break;
193 return ret;
196 static struct ioport_operations virtio_pci__io_ops = {
197 .io_in = virtio_pci__io_in,
198 .io_out = virtio_pci__io_out,
201 static void callback_mmio_table(u64 addr, u8 *data, u32 len, u8 is_write, void *ptr)
203 struct virtio_pci *vpci = ptr;
204 void *table = &vpci->msix_table;
206 if (is_write)
207 memcpy(table + addr - vpci->msix_io_block, data, len);
208 else
209 memcpy(data, table + addr - vpci->msix_io_block, len);
212 static void callback_mmio_pba(u64 addr, u8 *data, u32 len, u8 is_write, void *ptr)
214 struct virtio_pci *vpci = ptr;
215 void *pba = &vpci->msix_pba;
217 if (is_write)
218 memcpy(pba + addr - vpci->msix_pba_block, data, len);
219 else
220 memcpy(data, pba + addr - vpci->msix_pba_block, len);
223 int virtio_pci__signal_vq(struct kvm *kvm, struct virtio_pci *vpci, u32 vq)
225 int tbl = vpci->vq_vector[vq];
227 if (virtio_pci__msix_enabled(vpci)) {
228 if (vpci->pci_hdr.msix.ctrl & PCI_MSIX_FLAGS_MASKALL ||
229 vpci->msix_table[tbl].ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
231 vpci->msix_pba |= 1 << tbl;
232 return 0;
235 kvm__irq_trigger(kvm, vpci->gsis[vq]);
236 } else {
237 kvm__irq_trigger(kvm, vpci->pci_hdr.irq_line);
239 return 0;
242 int virtio_pci__signal_config(struct kvm *kvm, struct virtio_pci *vpci)
244 int tbl = vpci->config_vector;
246 if (virtio_pci__msix_enabled(vpci)) {
247 if (vpci->pci_hdr.msix.ctrl & PCI_MSIX_FLAGS_MASKALL ||
248 vpci->msix_table[tbl].ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
250 vpci->msix_pba |= 1 << tbl;
251 return 0;
254 kvm__irq_trigger(kvm, vpci->config_gsi);
255 } else {
256 vpci->isr = VIRTIO_PCI_ISR_CONFIG;
257 kvm__irq_trigger(kvm, vpci->pci_hdr.irq_line);
260 return 0;
263 int virtio_pci__init(struct kvm *kvm, struct virtio_pci *vpci, void *dev,
264 int device_id, int subsys_id)
266 u8 pin, line, ndev;
268 vpci->dev = dev;
269 vpci->msix_io_block = pci_get_io_space_block(PCI_IO_SIZE);
270 vpci->msix_pba_block = pci_get_io_space_block(PCI_IO_SIZE);
272 vpci->base_addr = ioport__register(IOPORT_EMPTY, &virtio_pci__io_ops, IOPORT_SIZE, vpci);
273 kvm__register_mmio(kvm, vpci->msix_io_block, 0x100, callback_mmio_table, vpci);
274 kvm__register_mmio(kvm, vpci->msix_pba_block, 0x100, callback_mmio_pba, vpci);
276 vpci->pci_hdr = (struct pci_device_header) {
277 .vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET,
278 .device_id = device_id,
279 .header_type = PCI_HEADER_TYPE_NORMAL,
280 .revision_id = 0,
281 .class = 0x010000,
282 .subsys_vendor_id = PCI_SUBSYSTEM_VENDOR_ID_REDHAT_QUMRANET,
283 .subsys_id = subsys_id,
284 .bar[0] = vpci->base_addr | PCI_BASE_ADDRESS_SPACE_IO,
285 .bar[1] = vpci->msix_io_block | PCI_BASE_ADDRESS_SPACE_MEMORY
286 | PCI_BASE_ADDRESS_MEM_TYPE_64,
287 .bar[3] = vpci->msix_pba_block | PCI_BASE_ADDRESS_SPACE_MEMORY
288 | PCI_BASE_ADDRESS_MEM_TYPE_64,
289 .status = PCI_STATUS_CAP_LIST,
290 .capabilities = (void *)&vpci->pci_hdr.msix - (void *)&vpci->pci_hdr,
293 vpci->pci_hdr.msix.cap = PCI_CAP_ID_MSIX;
294 vpci->pci_hdr.msix.next = 0;
295 vpci->pci_hdr.msix.ctrl = (VIRTIO_PCI_MAX_VQ + 1);
298 * Both table and PBA could be mapped on the same BAR, but for now
299 * we're not in short of BARs
301 vpci->pci_hdr.msix.table_offset = 1; /* Use BAR 1 */
302 vpci->pci_hdr.msix.pba_offset = 3; /* Use BAR 3 */
303 vpci->config_vector = 0;
305 if (irq__register_device(VIRTIO_ID_RNG, &ndev, &pin, &line) < 0)
306 return -1;
308 vpci->pci_hdr.irq_pin = pin;
309 vpci->pci_hdr.irq_line = line;
310 pci__register(&vpci->pci_hdr, ndev);
312 return 0;