1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM L1 hypervisor optimizations on Hyper-V.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/kvm_host.h>
8 #include <asm/mshyperv.h>
11 #include "kvm_onhyperv.h"
13 struct kvm_hv_tlb_range
{
18 static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list
*flush
,
21 struct kvm_hv_tlb_range
*range
= data
;
23 return hyperv_fill_flush_guest_mapping_list(flush
, range
->start_gfn
,
27 static inline int hv_remote_flush_root_tdp(hpa_t root_tdp
,
28 struct kvm_hv_tlb_range
*range
)
31 return hyperv_flush_guest_mapping_range(root_tdp
,
32 kvm_fill_hv_flush_list_func
, (void *)range
);
34 return hyperv_flush_guest_mapping(root_tdp
);
37 static int __hv_flush_remote_tlbs_range(struct kvm
*kvm
,
38 struct kvm_hv_tlb_range
*range
)
40 struct kvm_arch
*kvm_arch
= &kvm
->arch
;
41 struct kvm_vcpu
*vcpu
;
42 int ret
= 0, nr_unique_valid_roots
;
46 spin_lock(&kvm_arch
->hv_root_tdp_lock
);
48 if (!VALID_PAGE(kvm_arch
->hv_root_tdp
)) {
49 nr_unique_valid_roots
= 0;
52 * Flush all valid roots, and see if all vCPUs have converged
53 * on a common root, in which case future flushes can skip the
54 * loop and flush the common root.
56 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
57 root
= vcpu
->arch
.hv_root_tdp
;
58 if (!VALID_PAGE(root
) || root
== kvm_arch
->hv_root_tdp
)
62 * Set the tracked root to the first valid root. Keep
63 * this root for the entirety of the loop even if more
64 * roots are encountered as a low effort optimization
65 * to avoid flushing the same (first) root again.
67 if (++nr_unique_valid_roots
== 1)
68 kvm_arch
->hv_root_tdp
= root
;
71 ret
= hv_remote_flush_root_tdp(root
, range
);
74 * Stop processing roots if a failure occurred and
75 * multiple valid roots have already been detected.
77 if (ret
&& nr_unique_valid_roots
> 1)
82 * The optimized flush of a single root can't be used if there
83 * are multiple valid roots (obviously).
85 if (nr_unique_valid_roots
> 1)
86 kvm_arch
->hv_root_tdp
= INVALID_PAGE
;
88 ret
= hv_remote_flush_root_tdp(kvm_arch
->hv_root_tdp
, range
);
91 spin_unlock(&kvm_arch
->hv_root_tdp_lock
);
95 int hv_flush_remote_tlbs_range(struct kvm
*kvm
, gfn_t start_gfn
, gfn_t nr_pages
)
97 struct kvm_hv_tlb_range range
= {
98 .start_gfn
= start_gfn
,
102 return __hv_flush_remote_tlbs_range(kvm
, &range
);
104 EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range
);
106 int hv_flush_remote_tlbs(struct kvm
*kvm
)
108 return __hv_flush_remote_tlbs_range(kvm
, NULL
);
110 EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs
);
112 void hv_track_root_tdp(struct kvm_vcpu
*vcpu
, hpa_t root_tdp
)
114 struct kvm_arch
*kvm_arch
= &vcpu
->kvm
->arch
;
116 if (kvm_x86_ops
.flush_remote_tlbs
== hv_flush_remote_tlbs
) {
117 spin_lock(&kvm_arch
->hv_root_tdp_lock
);
118 vcpu
->arch
.hv_root_tdp
= root_tdp
;
119 if (root_tdp
!= kvm_arch
->hv_root_tdp
)
120 kvm_arch
->hv_root_tdp
= INVALID_PAGE
;
121 spin_unlock(&kvm_arch
->hv_root_tdp_lock
);
124 EXPORT_SYMBOL_GPL(hv_track_root_tdp
);