1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * KVM Microsoft Hyper-V emulation
5 * derived from arch/x86/kvm/x86.c
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 * Amit Shah <amit.shah@qumranet.com>
17 * Ben-Ami Yassour <benami@il.ibm.com>
18 * Andrey Smetanin <asmetanin@virtuozzo.com>
21 #ifndef __ARCH_X86_KVM_HYPERV_H__
22 #define __ARCH_X86_KVM_HYPERV_H__
24 #include <linux/kvm_host.h>
26 static inline struct kvm_vcpu_hv
*vcpu_to_hv_vcpu(struct kvm_vcpu
*vcpu
)
28 return &vcpu
->arch
.hyperv
;
31 static inline struct kvm_vcpu
*hv_vcpu_to_vcpu(struct kvm_vcpu_hv
*hv_vcpu
)
33 struct kvm_vcpu_arch
*arch
;
35 arch
= container_of(hv_vcpu
, struct kvm_vcpu_arch
, hyperv
);
36 return container_of(arch
, struct kvm_vcpu
, arch
);
39 static inline struct kvm_vcpu_hv_synic
*vcpu_to_synic(struct kvm_vcpu
*vcpu
)
41 return &vcpu
->arch
.hyperv
.synic
;
44 static inline struct kvm_vcpu
*synic_to_vcpu(struct kvm_vcpu_hv_synic
*synic
)
46 return hv_vcpu_to_vcpu(container_of(synic
, struct kvm_vcpu_hv
, synic
));
49 int kvm_hv_set_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
, bool host
);
50 int kvm_hv_get_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
, bool host
);
52 bool kvm_hv_hypercall_enabled(struct kvm
*kvm
);
53 int kvm_hv_hypercall(struct kvm_vcpu
*vcpu
);
55 void kvm_hv_irq_routing_update(struct kvm
*kvm
);
56 int kvm_hv_synic_set_irq(struct kvm
*kvm
, u32 vcpu_id
, u32 sint
);
57 void kvm_hv_synic_send_eoi(struct kvm_vcpu
*vcpu
, int vector
);
58 int kvm_hv_activate_synic(struct kvm_vcpu
*vcpu
, bool dont_zero_synic_pages
);
60 void kvm_hv_vcpu_init(struct kvm_vcpu
*vcpu
);
61 void kvm_hv_vcpu_postcreate(struct kvm_vcpu
*vcpu
);
62 void kvm_hv_vcpu_uninit(struct kvm_vcpu
*vcpu
);
64 bool kvm_hv_assist_page_enabled(struct kvm_vcpu
*vcpu
);
65 bool kvm_hv_get_assist_page(struct kvm_vcpu
*vcpu
,
66 struct hv_vp_assist_page
*assist_page
);
68 static inline struct kvm_vcpu_hv_stimer
*vcpu_to_stimer(struct kvm_vcpu
*vcpu
,
71 return &vcpu_to_hv_vcpu(vcpu
)->stimer
[timer_index
];
74 static inline struct kvm_vcpu
*stimer_to_vcpu(struct kvm_vcpu_hv_stimer
*stimer
)
76 struct kvm_vcpu_hv
*hv_vcpu
;
78 hv_vcpu
= container_of(stimer
- stimer
->index
, struct kvm_vcpu_hv
,
80 return hv_vcpu_to_vcpu(hv_vcpu
);
83 static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu
*vcpu
)
85 return !bitmap_empty(vcpu
->arch
.hyperv
.stimer_pending_bitmap
,
86 HV_SYNIC_STIMER_COUNT
);
89 void kvm_hv_process_stimers(struct kvm_vcpu
*vcpu
);
91 void kvm_hv_setup_tsc_page(struct kvm
*kvm
,
92 struct pvclock_vcpu_time_info
*hv_clock
);
94 void kvm_hv_init_vm(struct kvm
*kvm
);
95 void kvm_hv_destroy_vm(struct kvm
*kvm
);
96 int kvm_vm_ioctl_hv_eventfd(struct kvm
*kvm
, struct kvm_hyperv_eventfd
*args
);
97 int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu
*vcpu
, struct kvm_cpuid2
*cpuid
,
98 struct kvm_cpuid_entry2 __user
*entries
);