1 #include "kvm/virtio-pci.h"
3 #include "kvm/ioport.h"
5 #include "kvm/virtio-pci-dev.h"
7 #include "kvm/virtio.h"
8 #include "kvm/ioeventfd.h"
10 #include <linux/virtio_pci.h>
13 static void virtio_pci__ioevent_callback(struct kvm
*kvm
, void *param
)
15 struct virtio_pci_ioevent_param
*ioeventfd
= param
;
17 ioeventfd
->vpci
->ops
.notify_vq(kvm
, ioeventfd
->vpci
->dev
, ioeventfd
->vq
);
20 static int virtio_pci__init_ioeventfd(struct kvm
*kvm
, struct virtio_pci
*vpci
, u32 vq
)
22 struct ioevent ioevent
;
24 vpci
->ioeventfds
[vq
] = (struct virtio_pci_ioevent_param
) {
29 ioevent
= (struct ioevent
) {
30 .io_addr
= vpci
->base_addr
+ VIRTIO_PCI_QUEUE_NOTIFY
,
31 .io_len
= sizeof(u16
),
32 .fn
= virtio_pci__ioevent_callback
,
33 .fn_ptr
= &vpci
->ioeventfds
[vq
],
39 ioeventfd__add_event(&ioevent
);
44 static inline bool virtio_pci__msix_enabled(struct virtio_pci
*vpci
)
46 return vpci
->pci_hdr
.msix
.ctrl
& PCI_MSIX_FLAGS_ENABLE
;
49 static bool virtio_pci__specific_io_in(struct kvm
*kvm
, struct virtio_pci
*vpci
, u16 port
,
50 void *data
, int size
, int offset
)
53 int type
= virtio__get_dev_specific_field(offset
- 20,
54 virtio_pci__msix_enabled(vpci
),
56 if (type
== VIRTIO_PCI_O_MSIX
) {
58 case VIRTIO_MSI_CONFIG_VECTOR
:
59 ioport__write16(data
, vpci
->config_vector
);
61 case VIRTIO_MSI_QUEUE_VECTOR
:
62 ioport__write16(data
, vpci
->vq_vector
[vpci
->queue_selector
]);
67 } else if (type
== VIRTIO_PCI_O_CONFIG
) {
70 cfg
= vpci
->ops
.get_config(kvm
, vpci
->dev
, config_offset
);
71 ioport__write8(data
, cfg
);
78 static bool virtio_pci__io_in(struct ioport
*ioport
, struct kvm
*kvm
, u16 port
, void *data
, int size
)
82 struct virtio_pci
*vpci
;
86 offset
= port
- vpci
->base_addr
;
89 case VIRTIO_PCI_HOST_FEATURES
:
90 val
= vpci
->ops
.get_host_features(kvm
, vpci
->dev
);
91 ioport__write32(data
, val
);
93 case VIRTIO_PCI_QUEUE_PFN
:
94 val
= vpci
->ops
.get_pfn_vq(kvm
, vpci
->dev
, vpci
->queue_selector
);
95 ioport__write32(data
, val
);
97 case VIRTIO_PCI_QUEUE_NUM
:
98 val
= vpci
->ops
.get_size_vq(kvm
, vpci
->dev
, vpci
->queue_selector
);
99 ioport__write32(data
, val
);
102 case VIRTIO_PCI_STATUS
:
103 ioport__write8(data
, vpci
->status
);
106 ioport__write8(data
, vpci
->isr
);
107 kvm__irq_line(kvm
, vpci
->pci_hdr
.irq_line
, VIRTIO_IRQ_LOW
);
108 vpci
->isr
= VIRTIO_IRQ_LOW
;
111 ret
= virtio_pci__specific_io_in(kvm
, vpci
, port
, data
, size
, offset
);
118 static bool virtio_pci__specific_io_out(struct kvm
*kvm
, struct virtio_pci
*vpci
, u16 port
,
119 void *data
, int size
, int offset
)
121 u32 config_offset
, gsi
, vec
;
122 int type
= virtio__get_dev_specific_field(offset
- 20, virtio_pci__msix_enabled(vpci
),
124 if (type
== VIRTIO_PCI_O_MSIX
) {
126 case VIRTIO_MSI_CONFIG_VECTOR
:
127 vec
= vpci
->config_vector
= ioport__read16(data
);
129 gsi
= irq__add_msix_route(kvm
,
130 vpci
->msix_table
[vec
].low
,
131 vpci
->msix_table
[vec
].high
,
132 vpci
->msix_table
[vec
].data
);
134 vpci
->config_gsi
= gsi
;
136 case VIRTIO_MSI_QUEUE_VECTOR
: {
137 vec
= vpci
->vq_vector
[vpci
->queue_selector
] = ioport__read16(data
);
139 gsi
= irq__add_msix_route(kvm
,
140 vpci
->msix_table
[vec
].low
,
141 vpci
->msix_table
[vec
].high
,
142 vpci
->msix_table
[vec
].data
);
143 vpci
->gsis
[vpci
->queue_selector
] = gsi
;
149 } else if (type
== VIRTIO_PCI_O_CONFIG
) {
150 vpci
->ops
.set_config(kvm
, vpci
->dev
, *(u8
*)data
, config_offset
);
158 static bool virtio_pci__io_out(struct ioport
*ioport
, struct kvm
*kvm
, u16 port
, void *data
, int size
)
160 unsigned long offset
;
162 struct virtio_pci
*vpci
;
166 offset
= port
- vpci
->base_addr
;
169 case VIRTIO_PCI_GUEST_FEATURES
:
170 val
= ioport__read32(data
);
171 vpci
->ops
.set_guest_features(kvm
, vpci
, val
);
173 case VIRTIO_PCI_QUEUE_PFN
:
174 val
= ioport__read32(data
);
175 virtio_pci__init_ioeventfd(kvm
, vpci
, vpci
->queue_selector
);
176 vpci
->ops
.init_vq(kvm
, vpci
->dev
, vpci
->queue_selector
, val
);
178 case VIRTIO_PCI_QUEUE_SEL
:
179 vpci
->queue_selector
= ioport__read16(data
);
181 case VIRTIO_PCI_QUEUE_NOTIFY
:
182 val
= ioport__read16(data
);
183 vpci
->ops
.notify_vq(kvm
, vpci
->dev
, val
);
185 case VIRTIO_PCI_STATUS
:
186 vpci
->status
= ioport__read8(data
);
189 ret
= virtio_pci__specific_io_out(kvm
, vpci
, port
, data
, size
, offset
);
196 static struct ioport_operations virtio_pci__io_ops
= {
197 .io_in
= virtio_pci__io_in
,
198 .io_out
= virtio_pci__io_out
,
201 static void callback_mmio_table(u64 addr
, u8
*data
, u32 len
, u8 is_write
, void *ptr
)
203 struct virtio_pci
*vpci
= ptr
;
204 void *table
= &vpci
->msix_table
;
207 memcpy(table
+ addr
- vpci
->msix_io_block
, data
, len
);
209 memcpy(data
, table
+ addr
- vpci
->msix_io_block
, len
);
212 static void callback_mmio_pba(u64 addr
, u8
*data
, u32 len
, u8 is_write
, void *ptr
)
214 struct virtio_pci
*vpci
= ptr
;
215 void *pba
= &vpci
->msix_pba
;
218 memcpy(pba
+ addr
- vpci
->msix_pba_block
, data
, len
);
220 memcpy(data
, pba
+ addr
- vpci
->msix_pba_block
, len
);
223 int virtio_pci__signal_vq(struct kvm
*kvm
, struct virtio_pci
*vpci
, u32 vq
)
225 int tbl
= vpci
->vq_vector
[vq
];
227 if (virtio_pci__msix_enabled(vpci
)) {
228 if (vpci
->pci_hdr
.msix
.ctrl
& PCI_MSIX_FLAGS_MASKALL
||
229 vpci
->msix_table
[tbl
].ctrl
& PCI_MSIX_ENTRY_CTRL_MASKBIT
) {
231 vpci
->msix_pba
|= 1 << tbl
;
235 kvm__irq_trigger(kvm
, vpci
->gsis
[vq
]);
237 kvm__irq_trigger(kvm
, vpci
->pci_hdr
.irq_line
);
242 int virtio_pci__signal_config(struct kvm
*kvm
, struct virtio_pci
*vpci
)
244 int tbl
= vpci
->config_vector
;
246 if (virtio_pci__msix_enabled(vpci
)) {
247 if (vpci
->pci_hdr
.msix
.ctrl
& PCI_MSIX_FLAGS_MASKALL
||
248 vpci
->msix_table
[tbl
].ctrl
& PCI_MSIX_ENTRY_CTRL_MASKBIT
) {
250 vpci
->msix_pba
|= 1 << tbl
;
254 kvm__irq_trigger(kvm
, vpci
->config_gsi
);
256 vpci
->isr
= VIRTIO_PCI_ISR_CONFIG
;
257 kvm__irq_trigger(kvm
, vpci
->pci_hdr
.irq_line
);
263 int virtio_pci__init(struct kvm
*kvm
, struct virtio_pci
*vpci
, void *dev
,
264 int device_id
, int subsys_id
)
269 vpci
->msix_io_block
= pci_get_io_space_block(PCI_IO_SIZE
);
270 vpci
->msix_pba_block
= pci_get_io_space_block(PCI_IO_SIZE
);
272 vpci
->base_addr
= ioport__register(IOPORT_EMPTY
, &virtio_pci__io_ops
, IOPORT_SIZE
, vpci
);
273 kvm__register_mmio(kvm
, vpci
->msix_io_block
, 0x100, callback_mmio_table
, vpci
);
274 kvm__register_mmio(kvm
, vpci
->msix_pba_block
, 0x100, callback_mmio_pba
, vpci
);
276 vpci
->pci_hdr
= (struct pci_device_header
) {
277 .vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
,
278 .device_id
= device_id
,
279 .header_type
= PCI_HEADER_TYPE_NORMAL
,
282 .subsys_vendor_id
= PCI_SUBSYSTEM_VENDOR_ID_REDHAT_QUMRANET
,
283 .subsys_id
= subsys_id
,
284 .bar
[0] = vpci
->base_addr
| PCI_BASE_ADDRESS_SPACE_IO
,
285 .bar
[1] = vpci
->msix_io_block
| PCI_BASE_ADDRESS_SPACE_MEMORY
286 | PCI_BASE_ADDRESS_MEM_TYPE_64
,
287 .bar
[3] = vpci
->msix_pba_block
| PCI_BASE_ADDRESS_SPACE_MEMORY
288 | PCI_BASE_ADDRESS_MEM_TYPE_64
,
289 .status
= PCI_STATUS_CAP_LIST
,
290 .capabilities
= (void *)&vpci
->pci_hdr
.msix
- (void *)&vpci
->pci_hdr
,
293 vpci
->pci_hdr
.msix
.cap
= PCI_CAP_ID_MSIX
;
294 vpci
->pci_hdr
.msix
.next
= 0;
295 vpci
->pci_hdr
.msix
.ctrl
= (VIRTIO_PCI_MAX_VQ
+ 1);
298 * Both table and PBA could be mapped on the same BAR, but for now
299 * we're not in short of BARs
301 vpci
->pci_hdr
.msix
.table_offset
= 1; /* Use BAR 1 */
302 vpci
->pci_hdr
.msix
.pba_offset
= 3; /* Use BAR 3 */
303 vpci
->config_vector
= 0;
305 if (irq__register_device(VIRTIO_ID_RNG
, &ndev
, &pin
, &line
) < 0)
308 vpci
->pci_hdr
.irq_pin
= pin
;
309 vpci
->pci_hdr
.irq_line
= line
;
310 pci__register(&vpci
->pci_hdr
, ndev
);