4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
9 #include "config-host.h"
15 #include "qemu-common.h"
25 #define EXPECTED_KVM_API_VERSION 12
27 #if EXPECTED_KVM_API_VERSION != KVM_API_VERSION
28 #error libkvm: userspace and kernel version mismatch
33 int kvm_pit_reinject
= 1;
36 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
38 static inline void set_gsi(KVMState
*s
, unsigned int gsi
)
40 uint32_t *bitmap
= s
->used_gsi_bitmap
;
42 if (gsi
< s
->max_gsi
) {
43 bitmap
[gsi
/ 32] |= 1U << (gsi
% 32);
45 DPRINTF("Invalid GSI %u\n", gsi
);
49 static inline void clear_gsi(KVMState
*s
, unsigned int gsi
)
51 uint32_t *bitmap
= s
->used_gsi_bitmap
;
53 if (gsi
< s
->max_gsi
) {
54 bitmap
[gsi
/ 32] &= ~(1U << (gsi
% 32));
56 DPRINTF("Invalid GSI %u\n", gsi
);
60 static int kvm_init_irq_routing(KVMState
*s
)
62 #ifdef KVM_CAP_IRQ_ROUTING
65 gsi_count
= kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
);
69 /* Round up so we can search ints using ffs */
70 gsi_bits
= ALIGN(gsi_count
, 32);
71 s
->used_gsi_bitmap
= qemu_mallocz(gsi_bits
/ 8);
72 s
->max_gsi
= gsi_bits
;
74 /* Mark any over-allocated bits as already in use */
75 for (i
= gsi_count
; i
< gsi_bits
; i
++) {
80 s
->irq_routes
= qemu_mallocz(sizeof(*s
->irq_routes
));
81 s
->nr_allocated_irq_routes
= 0;
83 r
= kvm_arch_init_irq_routing();
92 int kvm_create_irqchip(KVMState
*s
)
94 #ifdef KVM_CAP_IRQCHIP
97 if (!kvm_irqchip
|| !kvm_check_extension(s
, KVM_CAP_IRQCHIP
)) {
101 r
= kvm_vm_ioctl(s
, KVM_CREATE_IRQCHIP
);
103 fprintf(stderr
, "Create kernel PIC irqchip failed\n");
107 s
->irqchip_inject_ioctl
= KVM_IRQ_LINE
;
108 #if defined(KVM_CAP_IRQ_INJECT_STATUS) && defined(KVM_IRQ_LINE_STATUS)
109 if (kvm_check_extension(s
, KVM_CAP_IRQ_INJECT_STATUS
)) {
110 s
->irqchip_inject_ioctl
= KVM_IRQ_LINE_STATUS
;
113 s
->irqchip_in_kernel
= 1;
115 r
= kvm_init_irq_routing(s
);
124 #ifdef KVM_CAP_IRQCHIP
126 int kvm_set_irq(int irq
, int level
, int *status
)
128 struct kvm_irq_level event
;
131 if (!kvm_state
->irqchip_in_kernel
) {
136 r
= kvm_vm_ioctl(kvm_state
, kvm_state
->irqchip_inject_ioctl
,
139 perror("kvm_set_irq");
143 #ifdef KVM_CAP_IRQ_INJECT_STATUS
144 *status
= (kvm_state
->irqchip_inject_ioctl
== KVM_IRQ_LINE
) ?
154 int kvm_get_irqchip(KVMState
*s
, struct kvm_irqchip
*chip
)
158 if (!s
->irqchip_in_kernel
) {
161 r
= kvm_vm_ioctl(s
, KVM_GET_IRQCHIP
, chip
);
163 perror("kvm_get_irqchip\n");
168 int kvm_set_irqchip(KVMState
*s
, struct kvm_irqchip
*chip
)
172 if (!s
->irqchip_in_kernel
) {
175 r
= kvm_vm_ioctl(s
, KVM_SET_IRQCHIP
, chip
);
177 perror("kvm_set_irqchip\n");
184 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
185 int kvm_assign_pci_device(KVMState
*s
,
186 struct kvm_assigned_pci_dev
*assigned_dev
)
188 return kvm_vm_ioctl(s
, KVM_ASSIGN_PCI_DEVICE
, assigned_dev
);
191 static int kvm_old_assign_irq(KVMState
*s
,
192 struct kvm_assigned_irq
*assigned_irq
)
194 return kvm_vm_ioctl(s
, KVM_ASSIGN_IRQ
, assigned_irq
);
197 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
198 int kvm_assign_irq(KVMState
*s
, struct kvm_assigned_irq
*assigned_irq
)
202 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, KVM_CAP_ASSIGN_DEV_IRQ
);
204 return kvm_vm_ioctl(s
, KVM_ASSIGN_DEV_IRQ
, assigned_irq
);
207 return kvm_old_assign_irq(s
, assigned_irq
);
210 int kvm_deassign_irq(KVMState
*s
, struct kvm_assigned_irq
*assigned_irq
)
212 return kvm_vm_ioctl(s
, KVM_DEASSIGN_DEV_IRQ
, assigned_irq
);
215 int kvm_assign_irq(KVMState
*s
, struct kvm_assigned_irq
*assigned_irq
)
217 return kvm_old_assign_irq(s
, assigned_irq
);
222 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
223 int kvm_deassign_pci_device(KVMState
*s
,
224 struct kvm_assigned_pci_dev
*assigned_dev
)
226 return kvm_vm_ioctl(s
, KVM_DEASSIGN_PCI_DEVICE
, assigned_dev
);
230 int kvm_reinject_control(KVMState
*s
, int pit_reinject
)
232 #ifdef KVM_CAP_REINJECT_CONTROL
234 struct kvm_reinject_control control
;
236 control
.pit_reinject
= pit_reinject
;
238 r
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, KVM_CAP_REINJECT_CONTROL
);
240 return kvm_vm_ioctl(s
, KVM_REINJECT_CONTROL
, &control
);
246 int kvm_has_gsi_routing(void)
250 #ifdef KVM_CAP_IRQ_ROUTING
251 r
= kvm_check_extension(kvm_state
, KVM_CAP_IRQ_ROUTING
);
256 int kvm_clear_gsi_routes(void)
258 #ifdef KVM_CAP_IRQ_ROUTING
259 kvm_state
->irq_routes
->nr
= 0;
266 int kvm_add_routing_entry(struct kvm_irq_routing_entry
*entry
)
268 #ifdef KVM_CAP_IRQ_ROUTING
269 KVMState
*s
= kvm_state
;
270 struct kvm_irq_routing
*z
;
271 struct kvm_irq_routing_entry
*new;
274 if (s
->irq_routes
->nr
== s
->nr_allocated_irq_routes
) {
275 n
= s
->nr_allocated_irq_routes
* 2;
279 size
= sizeof(struct kvm_irq_routing
);
280 size
+= n
* sizeof(*new);
281 z
= realloc(s
->irq_routes
, size
);
285 s
->nr_allocated_irq_routes
= n
;
288 n
= s
->irq_routes
->nr
++;
289 new = &s
->irq_routes
->entries
[n
];
290 memset(new, 0, sizeof(*new));
291 new->gsi
= entry
->gsi
;
292 new->type
= entry
->type
;
293 new->flags
= entry
->flags
;
296 set_gsi(s
, entry
->gsi
);
304 int kvm_add_irq_route(int gsi
, int irqchip
, int pin
)
306 #ifdef KVM_CAP_IRQ_ROUTING
307 struct kvm_irq_routing_entry e
;
310 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
312 e
.u
.irqchip
.irqchip
= irqchip
;
313 e
.u
.irqchip
.pin
= pin
;
314 return kvm_add_routing_entry(&e
);
320 int kvm_del_routing_entry(struct kvm_irq_routing_entry
*entry
)
322 #ifdef KVM_CAP_IRQ_ROUTING
323 KVMState
*s
= kvm_state
;
324 struct kvm_irq_routing_entry
*e
, *p
;
325 int i
, gsi
, found
= 0;
329 for (i
= 0; i
< s
->irq_routes
->nr
; ++i
) {
330 e
= &s
->irq_routes
->entries
[i
];
331 if (e
->type
== entry
->type
&& e
->gsi
== gsi
) {
333 case KVM_IRQ_ROUTING_IRQCHIP
:{
334 if (e
->u
.irqchip
.irqchip
==
335 entry
->u
.irqchip
.irqchip
336 && e
->u
.irqchip
.pin
== entry
->u
.irqchip
.pin
) {
337 p
= &s
->irq_routes
->entries
[--s
->irq_routes
->nr
];
343 case KVM_IRQ_ROUTING_MSI
:{
344 if (e
->u
.msi
.address_lo
==
345 entry
->u
.msi
.address_lo
346 && e
->u
.msi
.address_hi
==
347 entry
->u
.msi
.address_hi
348 && e
->u
.msi
.data
== entry
->u
.msi
.data
) {
349 p
= &s
->irq_routes
->entries
[--s
->irq_routes
->nr
];
359 /* If there are no other users of this GSI
360 * mark it available in the bitmap */
361 for (i
= 0; i
< s
->irq_routes
->nr
; i
++) {
362 e
= &s
->irq_routes
->entries
[i
];
366 if (i
== s
->irq_routes
->nr
) {
380 int kvm_update_routing_entry(struct kvm_irq_routing_entry
*entry
,
381 struct kvm_irq_routing_entry
*newentry
)
383 #ifdef KVM_CAP_IRQ_ROUTING
384 KVMState
*s
= kvm_state
;
385 struct kvm_irq_routing_entry
*e
;
388 if (entry
->gsi
!= newentry
->gsi
|| entry
->type
!= newentry
->type
) {
392 for (i
= 0; i
< s
->irq_routes
->nr
; ++i
) {
393 e
= &s
->irq_routes
->entries
[i
];
394 if (e
->type
!= entry
->type
|| e
->gsi
!= entry
->gsi
) {
398 case KVM_IRQ_ROUTING_IRQCHIP
:
399 if (e
->u
.irqchip
.irqchip
== entry
->u
.irqchip
.irqchip
&&
400 e
->u
.irqchip
.pin
== entry
->u
.irqchip
.pin
) {
401 memcpy(&e
->u
.irqchip
, &newentry
->u
.irqchip
,
402 sizeof e
->u
.irqchip
);
406 case KVM_IRQ_ROUTING_MSI
:
407 if (e
->u
.msi
.address_lo
== entry
->u
.msi
.address_lo
&&
408 e
->u
.msi
.address_hi
== entry
->u
.msi
.address_hi
&&
409 e
->u
.msi
.data
== entry
->u
.msi
.data
) {
410 memcpy(&e
->u
.msi
, &newentry
->u
.msi
, sizeof e
->u
.msi
);
424 int kvm_del_irq_route(int gsi
, int irqchip
, int pin
)
426 #ifdef KVM_CAP_IRQ_ROUTING
427 struct kvm_irq_routing_entry e
;
430 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
432 e
.u
.irqchip
.irqchip
= irqchip
;
433 e
.u
.irqchip
.pin
= pin
;
434 return kvm_del_routing_entry(&e
);
440 int kvm_commit_irq_routes(void)
442 #ifdef KVM_CAP_IRQ_ROUTING
443 KVMState
*s
= kvm_state
;
445 s
->irq_routes
->flags
= 0;
446 return kvm_vm_ioctl(s
, KVM_SET_GSI_ROUTING
, s
->irq_routes
);
452 int kvm_get_irq_route_gsi(void)
454 KVMState
*s
= kvm_state
;
456 uint32_t *buf
= s
->used_gsi_bitmap
;
458 /* Return the lowest unused GSI in the bitmap */
459 for (i
= 0; i
< s
->max_gsi
/ 32; i
++) {
465 return bit
- 1 + i
* 32;
471 static void kvm_msi_routing_entry(struct kvm_irq_routing_entry
*e
,
476 e
->type
= KVM_IRQ_ROUTING_MSI
;
478 e
->u
.msi
.address_lo
= msg
->addr_lo
;
479 e
->u
.msi
.address_hi
= msg
->addr_hi
;
480 e
->u
.msi
.data
= msg
->data
;
483 int kvm_msi_message_add(KVMMsiMessage
*msg
)
485 struct kvm_irq_routing_entry e
;
488 ret
= kvm_get_irq_route_gsi();
494 kvm_msi_routing_entry(&e
, msg
);
495 return kvm_add_routing_entry(&e
);
498 int kvm_msi_message_del(KVMMsiMessage
*msg
)
500 struct kvm_irq_routing_entry e
;
502 kvm_msi_routing_entry(&e
, msg
);
503 return kvm_del_routing_entry(&e
);
506 int kvm_msi_message_update(KVMMsiMessage
*old
, KVMMsiMessage
*new)
508 struct kvm_irq_routing_entry e1
, e2
;
512 if (memcmp(old
, new, sizeof(KVMMsiMessage
)) == 0) {
516 kvm_msi_routing_entry(&e1
, old
);
517 kvm_msi_routing_entry(&e2
, new);
519 ret
= kvm_update_routing_entry(&e1
, &e2
);
528 #ifdef KVM_CAP_DEVICE_MSIX
529 int kvm_assign_set_msix_nr(KVMState
*s
, struct kvm_assigned_msix_nr
*msix_nr
)
531 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_NR
, msix_nr
);
534 int kvm_assign_set_msix_entry(KVMState
*s
,
535 struct kvm_assigned_msix_entry
*entry
)
537 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_ENTRY
, entry
);
542 void kvm_hpet_disable_kpit(void)
544 struct kvm_pit_state2 ps2
;
546 kvm_get_pit2(kvm_state
, &ps2
);
547 ps2
.flags
|= KVM_PIT_FLAGS_HPET_LEGACY
;
548 kvm_set_pit2(kvm_state
, &ps2
);
551 void kvm_hpet_enable_kpit(void)
553 struct kvm_pit_state2 ps2
;
555 kvm_get_pit2(kvm_state
, &ps2
);
556 ps2
.flags
&= ~KVM_PIT_FLAGS_HPET_LEGACY
;
557 kvm_set_pit2(kvm_state
, &ps2
);
561 #if !defined(TARGET_I386)
562 int kvm_arch_init_irq_routing(void)
568 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
569 typedef struct KVMIOPortRegion
{
573 QLIST_ENTRY(KVMIOPortRegion
) entry
;
576 static QLIST_HEAD(, KVMIOPortRegion
) ioport_regions
;
578 static void do_set_ioport_access(void *data
)
580 KVMIOPortRegion
*region
= data
;
581 bool enable
= region
->status
> 0;
584 r
= kvm_arch_set_ioport_access(region
->start
, region
->size
, enable
);
592 int kvm_add_ioport_region(unsigned long start
, unsigned long size
)
594 KVMIOPortRegion
*region
= qemu_mallocz(sizeof(KVMIOPortRegion
));
598 region
->start
= start
;
601 QLIST_INSERT_HEAD(&ioport_regions
, region
, entry
);
603 if (qemu_system_is_ready()) {
604 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
605 run_on_cpu(env
, do_set_ioport_access
, region
);
606 if (region
->status
< 0) {
608 kvm_remove_ioport_region(start
, size
);
616 int kvm_remove_ioport_region(unsigned long start
, unsigned long size
)
618 KVMIOPortRegion
*region
, *tmp
;
622 QLIST_FOREACH_SAFE(region
, &ioport_regions
, entry
, tmp
) {
623 if (region
->start
== start
&& region
->size
== size
) {
626 if (qemu_system_is_ready()) {
627 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
628 run_on_cpu(env
, do_set_ioport_access
, region
);
631 QLIST_REMOVE(region
, entry
);
637 #endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */
639 int kvm_update_ioport_access(CPUState
*env
)
641 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
642 KVMIOPortRegion
*region
;
645 assert(qemu_cpu_is_self(env
));
647 QLIST_FOREACH(region
, &ioport_regions
, entry
) {
648 bool enable
= region
->status
> 0;
650 r
= kvm_arch_set_ioport_access(region
->start
, region
->size
, enable
);
655 #endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */
659 int kvm_set_boot_cpu_id(KVMState
*s
, uint32_t id
)
661 #ifdef KVM_CAP_SET_BOOT_CPU_ID
662 int r
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_BOOT_CPU_ID
);
664 return kvm_vm_ioctl(s
, KVM_SET_BOOT_CPU_ID
, id
);