drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / arch / x86 / kvm / vmx / vmx_onhyperv.h
blobbba24ed99ee6cac80c528c5da0fc74c37c502f81
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #ifndef __ARCH_X86_KVM_VMX_ONHYPERV_H__
4 #define __ARCH_X86_KVM_VMX_ONHYPERV_H__
6 #include <asm/hyperv-tlfs.h>
7 #include <asm/mshyperv.h>
9 #include <linux/jump_label.h>
11 #include "capabilities.h"
12 #include "hyperv_evmcs.h"
13 #include "vmcs12.h"
15 #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
17 #if IS_ENABLED(CONFIG_HYPERV)
19 DECLARE_STATIC_KEY_FALSE(__kvm_is_using_evmcs);
21 static __always_inline bool kvm_is_using_evmcs(void)
23 return static_branch_unlikely(&__kvm_is_using_evmcs);
26 static __always_inline int get_evmcs_offset(unsigned long field,
27 u16 *clean_field)
29 int offset = evmcs_field_offset(field, clean_field);
31 WARN_ONCE(offset < 0, "accessing unsupported EVMCS field %lx\n", field);
32 return offset;
35 static __always_inline void evmcs_write64(unsigned long field, u64 value)
37 u16 clean_field;
38 int offset = get_evmcs_offset(field, &clean_field);
40 if (offset < 0)
41 return;
43 *(u64 *)((char *)current_evmcs + offset) = value;
45 current_evmcs->hv_clean_fields &= ~clean_field;
48 static __always_inline void evmcs_write32(unsigned long field, u32 value)
50 u16 clean_field;
51 int offset = get_evmcs_offset(field, &clean_field);
53 if (offset < 0)
54 return;
56 *(u32 *)((char *)current_evmcs + offset) = value;
57 current_evmcs->hv_clean_fields &= ~clean_field;
60 static __always_inline void evmcs_write16(unsigned long field, u16 value)
62 u16 clean_field;
63 int offset = get_evmcs_offset(field, &clean_field);
65 if (offset < 0)
66 return;
68 *(u16 *)((char *)current_evmcs + offset) = value;
69 current_evmcs->hv_clean_fields &= ~clean_field;
72 static __always_inline u64 evmcs_read64(unsigned long field)
74 int offset = get_evmcs_offset(field, NULL);
76 if (offset < 0)
77 return 0;
79 return *(u64 *)((char *)current_evmcs + offset);
82 static __always_inline u32 evmcs_read32(unsigned long field)
84 int offset = get_evmcs_offset(field, NULL);
86 if (offset < 0)
87 return 0;
89 return *(u32 *)((char *)current_evmcs + offset);
92 static __always_inline u16 evmcs_read16(unsigned long field)
94 int offset = get_evmcs_offset(field, NULL);
96 if (offset < 0)
97 return 0;
99 return *(u16 *)((char *)current_evmcs + offset);
102 static inline void evmcs_load(u64 phys_addr)
104 struct hv_vp_assist_page *vp_ap =
105 hv_get_vp_assist_page(smp_processor_id());
108 * When enabling eVMCS, KVM verifies that every CPU has a valid hv_vp_assist_page()
109 * and aborts enabling the feature otherwise. CPU onlining path is also checked in
110 * vmx_hardware_enable().
112 if (KVM_BUG_ON(!vp_ap, kvm_get_running_vcpu()->kvm))
113 return;
115 if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
116 vp_ap->nested_control.features.directhypercall = 1;
117 vp_ap->current_nested_vmcs = phys_addr;
118 vp_ap->enlighten_vmentry = 1;
121 void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
122 #else /* !IS_ENABLED(CONFIG_HYPERV) */
123 static __always_inline bool kvm_is_using_evmcs(void) { return false; }
124 static __always_inline void evmcs_write64(unsigned long field, u64 value) {}
125 static __always_inline void evmcs_write32(unsigned long field, u32 value) {}
126 static __always_inline void evmcs_write16(unsigned long field, u16 value) {}
127 static __always_inline u64 evmcs_read64(unsigned long field) { return 0; }
128 static __always_inline u32 evmcs_read32(unsigned long field) { return 0; }
129 static __always_inline u16 evmcs_read16(unsigned long field) { return 0; }
130 static inline void evmcs_load(u64 phys_addr) {}
131 #endif /* IS_ENABLED(CONFIG_HYPERV) */
133 #endif /* __ARCH_X86_KVM_VMX_ONHYPERV_H__ */