1 // SPDX-License-Identifier: GPL-2.0-only
3 * X86 specific Hyper-V initialization code.
5 * Copyright (C) 2016, Microsoft, Inc.
7 * Author : K. Y. Srinivasan <kys@microsoft.com>
10 #include <linux/acpi.h>
11 #include <linux/efi.h>
12 #include <linux/types.h>
15 #include <asm/hypervisor.h>
16 #include <asm/hyperv-tlfs.h>
17 #include <asm/mshyperv.h>
18 #include <linux/version.h>
19 #include <linux/vmalloc.h>
21 #include <linux/hyperv.h>
22 #include <linux/slab.h>
23 #include <linux/cpuhotplug.h>
24 #include <linux/syscore_ops.h>
25 #include <clocksource/hyperv_timer.h>
27 void *hv_hypercall_pg
;
28 EXPORT_SYMBOL_GPL(hv_hypercall_pg
);
30 /* Storage to save the hypercall page temporarily for hibernation */
31 static void *hv_hypercall_pg_saved
;
34 EXPORT_SYMBOL_GPL(hv_vp_index
);
36 struct hv_vp_assist_page
**hv_vp_assist_page
;
37 EXPORT_SYMBOL_GPL(hv_vp_assist_page
);
39 void __percpu
**hyperv_pcpu_input_arg
;
40 EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg
);
43 EXPORT_SYMBOL_GPL(hv_max_vp_index
);
45 void *hv_alloc_hyperv_page(void)
47 BUILD_BUG_ON(PAGE_SIZE
!= HV_HYP_PAGE_SIZE
);
49 return (void *)__get_free_page(GFP_KERNEL
);
51 EXPORT_SYMBOL_GPL(hv_alloc_hyperv_page
);
53 void *hv_alloc_hyperv_zeroed_page(void)
55 BUILD_BUG_ON(PAGE_SIZE
!= HV_HYP_PAGE_SIZE
);
57 return (void *)__get_free_page(GFP_KERNEL
| __GFP_ZERO
);
59 EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page
);
61 void hv_free_hyperv_page(unsigned long addr
)
65 EXPORT_SYMBOL_GPL(hv_free_hyperv_page
);
67 static int hv_cpu_init(unsigned int cpu
)
70 struct hv_vp_assist_page
**hvp
= &hv_vp_assist_page
[smp_processor_id()];
74 input_arg
= (void **)this_cpu_ptr(hyperv_pcpu_input_arg
);
75 pg
= alloc_page(GFP_KERNEL
);
78 *input_arg
= page_address(pg
);
80 hv_get_vp_index(msr_vp_index
);
82 hv_vp_index
[smp_processor_id()] = msr_vp_index
;
84 if (msr_vp_index
> hv_max_vp_index
)
85 hv_max_vp_index
= msr_vp_index
;
87 if (!hv_vp_assist_page
)
91 * The VP ASSIST PAGE is an "overlay" page (see Hyper-V TLFS's Section
92 * 5.2.1 "GPA Overlay Pages"). Here it must be zeroed out to make sure
93 * we always write the EOI MSR in hv_apic_eoi_write() *after* the
94 * EOI optimization is disabled in hv_cpu_die(), otherwise a CPU may
95 * not be stopped in the case of CPU offlining and the VM will hang.
98 *hvp
= __vmalloc(PAGE_SIZE
, GFP_KERNEL
| __GFP_ZERO
,
105 val
= vmalloc_to_pfn(*hvp
);
106 val
= (val
<< HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT
) |
107 HV_X64_MSR_VP_ASSIST_PAGE_ENABLE
;
109 wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE
, val
);
115 static void (*hv_reenlightenment_cb
)(void);
117 static void hv_reenlightenment_notify(struct work_struct
*dummy
)
119 struct hv_tsc_emulation_status emu_status
;
121 rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS
, *(u64
*)&emu_status
);
123 /* Don't issue the callback if TSC accesses are not emulated */
124 if (hv_reenlightenment_cb
&& emu_status
.inprogress
)
125 hv_reenlightenment_cb();
127 static DECLARE_DELAYED_WORK(hv_reenlightenment_work
, hv_reenlightenment_notify
);
129 void hyperv_stop_tsc_emulation(void)
132 struct hv_tsc_emulation_status emu_status
;
134 rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS
, *(u64
*)&emu_status
);
135 emu_status
.inprogress
= 0;
136 wrmsrl(HV_X64_MSR_TSC_EMULATION_STATUS
, *(u64
*)&emu_status
);
138 rdmsrl(HV_X64_MSR_TSC_FREQUENCY
, freq
);
139 tsc_khz
= div64_u64(freq
, 1000);
141 EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation
);
143 static inline bool hv_reenlightenment_available(void)
146 * Check for required features and priviliges to make TSC frequency
147 * change notifications work.
149 return ms_hyperv
.features
& HV_X64_ACCESS_FREQUENCY_MSRS
&&
150 ms_hyperv
.misc_features
& HV_FEATURE_FREQUENCY_MSRS_AVAILABLE
&&
151 ms_hyperv
.features
& HV_X64_ACCESS_REENLIGHTENMENT
;
154 __visible
void __irq_entry
hyperv_reenlightenment_intr(struct pt_regs
*regs
)
158 inc_irq_stat(irq_hv_reenlightenment_count
);
160 schedule_delayed_work(&hv_reenlightenment_work
, HZ
/10);
165 void set_hv_tscchange_cb(void (*cb
)(void))
167 struct hv_reenlightenment_control re_ctrl
= {
168 .vector
= HYPERV_REENLIGHTENMENT_VECTOR
,
170 .target_vp
= hv_vp_index
[smp_processor_id()]
172 struct hv_tsc_emulation_control emu_ctrl
= {.enabled
= 1};
174 if (!hv_reenlightenment_available()) {
175 pr_warn("Hyper-V: reenlightenment support is unavailable\n");
179 hv_reenlightenment_cb
= cb
;
181 /* Make sure callback is registered before we write to MSRs */
184 wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL
, *((u64
*)&re_ctrl
));
185 wrmsrl(HV_X64_MSR_TSC_EMULATION_CONTROL
, *((u64
*)&emu_ctrl
));
187 EXPORT_SYMBOL_GPL(set_hv_tscchange_cb
);
189 void clear_hv_tscchange_cb(void)
191 struct hv_reenlightenment_control re_ctrl
;
193 if (!hv_reenlightenment_available())
196 rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL
, *(u64
*)&re_ctrl
);
198 wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL
, *(u64
*)&re_ctrl
);
200 hv_reenlightenment_cb
= NULL
;
202 EXPORT_SYMBOL_GPL(clear_hv_tscchange_cb
);
204 static int hv_cpu_die(unsigned int cpu
)
206 struct hv_reenlightenment_control re_ctrl
;
207 unsigned int new_cpu
;
210 void *input_pg
= NULL
;
212 local_irq_save(flags
);
213 input_arg
= (void **)this_cpu_ptr(hyperv_pcpu_input_arg
);
214 input_pg
= *input_arg
;
216 local_irq_restore(flags
);
217 free_page((unsigned long)input_pg
);
219 if (hv_vp_assist_page
&& hv_vp_assist_page
[cpu
])
220 wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE
, 0);
222 if (hv_reenlightenment_cb
== NULL
)
225 rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL
, *((u64
*)&re_ctrl
));
226 if (re_ctrl
.target_vp
== hv_vp_index
[cpu
]) {
227 /* Reassign to some other online CPU */
228 new_cpu
= cpumask_any_but(cpu_online_mask
, cpu
);
230 re_ctrl
.target_vp
= hv_vp_index
[new_cpu
];
231 wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL
, *((u64
*)&re_ctrl
));
237 static int __init
hv_pci_init(void)
239 int gen2vm
= efi_enabled(EFI_BOOT
);
242 * For Generation-2 VM, we exit from pci_arch_init() by returning 0.
243 * The purpose is to suppress the harmless warning:
244 * "PCI: Fatal: No config space access function found"
249 /* For Generation-1 VM, we'll proceed in pci_arch_init(). */
253 static int hv_suspend(void)
255 union hv_x64_msr_hypercall_contents hypercall_msr
;
258 * Reset the hypercall page as it is going to be invalidated
259 * accross hibernation. Setting hv_hypercall_pg to NULL ensures
260 * that any subsequent hypercall operation fails safely instead of
261 * crashing due to an access of an invalid page. The hypercall page
262 * pointer is restored on resume.
264 hv_hypercall_pg_saved
= hv_hypercall_pg
;
265 hv_hypercall_pg
= NULL
;
267 /* Disable the hypercall page in the hypervisor */
268 rdmsrl(HV_X64_MSR_HYPERCALL
, hypercall_msr
.as_uint64
);
269 hypercall_msr
.enable
= 0;
270 wrmsrl(HV_X64_MSR_HYPERCALL
, hypercall_msr
.as_uint64
);
275 static void hv_resume(void)
277 union hv_x64_msr_hypercall_contents hypercall_msr
;
279 /* Re-enable the hypercall page */
280 rdmsrl(HV_X64_MSR_HYPERCALL
, hypercall_msr
.as_uint64
);
281 hypercall_msr
.enable
= 1;
282 hypercall_msr
.guest_physical_address
=
283 vmalloc_to_pfn(hv_hypercall_pg_saved
);
284 wrmsrl(HV_X64_MSR_HYPERCALL
, hypercall_msr
.as_uint64
);
286 hv_hypercall_pg
= hv_hypercall_pg_saved
;
287 hv_hypercall_pg_saved
= NULL
;
290 static struct syscore_ops hv_syscore_ops
= {
291 .suspend
= hv_suspend
,
296 * This function is to be invoked early in the boot sequence after the
297 * hypervisor has been detected.
299 * 1. Setup the hypercall page.
300 * 2. Register Hyper-V specific clocksource.
301 * 3. Setup Hyper-V specific APIC entry points.
303 void __init
hyperv_init(void)
305 u64 guest_id
, required_msrs
;
306 union hv_x64_msr_hypercall_contents hypercall_msr
;
309 if (x86_hyper_type
!= X86_HYPER_MS_HYPERV
)
312 /* Absolutely required MSRs */
313 required_msrs
= HV_X64_MSR_HYPERCALL_AVAILABLE
|
314 HV_X64_MSR_VP_INDEX_AVAILABLE
;
316 if ((ms_hyperv
.features
& required_msrs
) != required_msrs
)
320 * Allocate the per-CPU state for the hypercall input arg.
321 * If this allocation fails, we will not be able to setup
322 * (per-CPU) hypercall input page and thus this failure is
325 hyperv_pcpu_input_arg
= alloc_percpu(void *);
327 BUG_ON(hyperv_pcpu_input_arg
== NULL
);
329 /* Allocate percpu VP index */
330 hv_vp_index
= kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index
),
335 for (i
= 0; i
< num_possible_cpus(); i
++)
336 hv_vp_index
[i
] = VP_INVAL
;
338 hv_vp_assist_page
= kcalloc(num_possible_cpus(),
339 sizeof(*hv_vp_assist_page
), GFP_KERNEL
);
340 if (!hv_vp_assist_page
) {
341 ms_hyperv
.hints
&= ~HV_X64_ENLIGHTENED_VMCS_RECOMMENDED
;
345 cpuhp
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "x86/hyperv_init:online",
346 hv_cpu_init
, hv_cpu_die
);
348 goto free_vp_assist_page
;
351 * Setup the hypercall page and enable hypercalls.
352 * 1. Register the guest ID
353 * 2. Enable the hypercall and register the hypercall page
355 guest_id
= generate_guest_id(0, LINUX_VERSION_CODE
, 0);
356 wrmsrl(HV_X64_MSR_GUEST_OS_ID
, guest_id
);
358 hv_hypercall_pg
= __vmalloc(PAGE_SIZE
, GFP_KERNEL
, PAGE_KERNEL_RX
);
359 if (hv_hypercall_pg
== NULL
) {
360 wrmsrl(HV_X64_MSR_GUEST_OS_ID
, 0);
361 goto remove_cpuhp_state
;
364 rdmsrl(HV_X64_MSR_HYPERCALL
, hypercall_msr
.as_uint64
);
365 hypercall_msr
.enable
= 1;
366 hypercall_msr
.guest_physical_address
= vmalloc_to_pfn(hv_hypercall_pg
);
367 wrmsrl(HV_X64_MSR_HYPERCALL
, hypercall_msr
.as_uint64
);
370 * Ignore any errors in setting up stimer clockevents
371 * as we can run with the LAPIC timer as a fallback.
373 (void)hv_stimer_alloc();
377 x86_init
.pci
.arch_init
= hv_pci_init
;
379 register_syscore_ops(&hv_syscore_ops
);
384 cpuhp_remove_state(cpuhp
);
386 kfree(hv_vp_assist_page
);
387 hv_vp_assist_page
= NULL
;
394 * This routine is called before kexec/kdump, it does the required cleanup.
396 void hyperv_cleanup(void)
398 union hv_x64_msr_hypercall_contents hypercall_msr
;
400 unregister_syscore_ops(&hv_syscore_ops
);
402 /* Reset our OS id */
403 wrmsrl(HV_X64_MSR_GUEST_OS_ID
, 0);
406 * Reset hypercall page reference before reset the page,
407 * let hypercall operations fail safely rather than
408 * panic the kernel for using invalid hypercall page
410 hv_hypercall_pg
= NULL
;
412 /* Reset the hypercall page */
413 hypercall_msr
.as_uint64
= 0;
414 wrmsrl(HV_X64_MSR_HYPERCALL
, hypercall_msr
.as_uint64
);
416 /* Reset the TSC page */
417 hypercall_msr
.as_uint64
= 0;
418 wrmsrl(HV_X64_MSR_REFERENCE_TSC
, hypercall_msr
.as_uint64
);
420 EXPORT_SYMBOL_GPL(hyperv_cleanup
);
422 void hyperv_report_panic(struct pt_regs
*regs
, long err
)
424 static bool panic_reported
;
428 * We prefer to report panic on 'die' chain as we have proper
429 * registers to report, but if we miss it (e.g. on BUG()) we need
430 * to report it on 'panic'.
434 panic_reported
= true;
436 rdmsrl(HV_X64_MSR_GUEST_OS_ID
, guest_id
);
438 wrmsrl(HV_X64_MSR_CRASH_P0
, err
);
439 wrmsrl(HV_X64_MSR_CRASH_P1
, guest_id
);
440 wrmsrl(HV_X64_MSR_CRASH_P2
, regs
->ip
);
441 wrmsrl(HV_X64_MSR_CRASH_P3
, regs
->ax
);
442 wrmsrl(HV_X64_MSR_CRASH_P4
, regs
->sp
);
445 * Let Hyper-V know there is crash data available
447 wrmsrl(HV_X64_MSR_CRASH_CTL
, HV_CRASH_CTL_CRASH_NOTIFY
);
449 EXPORT_SYMBOL_GPL(hyperv_report_panic
);
452 * hyperv_report_panic_msg - report panic message to Hyper-V
453 * @pa: physical address of the panic page containing the message
454 * @size: size of the message in the page
456 void hyperv_report_panic_msg(phys_addr_t pa
, size_t size
)
459 * P3 to contain the physical address of the panic page & P4 to
460 * contain the size of the panic data in that page. Rest of the
461 * registers are no-op when the NOTIFY_MSG flag is set.
463 wrmsrl(HV_X64_MSR_CRASH_P0
, 0);
464 wrmsrl(HV_X64_MSR_CRASH_P1
, 0);
465 wrmsrl(HV_X64_MSR_CRASH_P2
, 0);
466 wrmsrl(HV_X64_MSR_CRASH_P3
, pa
);
467 wrmsrl(HV_X64_MSR_CRASH_P4
, size
);
470 * Let Hyper-V know there is crash data available along with
473 wrmsrl(HV_X64_MSR_CRASH_CTL
,
474 (HV_CRASH_CTL_CRASH_NOTIFY
| HV_CRASH_CTL_CRASH_NOTIFY_MSG
));
476 EXPORT_SYMBOL_GPL(hyperv_report_panic_msg
);
478 bool hv_is_hyperv_initialized(void)
480 union hv_x64_msr_hypercall_contents hypercall_msr
;
483 * Ensure that we're really on Hyper-V, and not a KVM or Xen
484 * emulation of Hyper-V
486 if (x86_hyper_type
!= X86_HYPER_MS_HYPERV
)
490 * Verify that earlier initialization succeeded by checking
491 * that the hypercall page is setup
493 hypercall_msr
.as_uint64
= 0;
494 rdmsrl(HV_X64_MSR_HYPERCALL
, hypercall_msr
.as_uint64
);
496 return hypercall_msr
.enable
;
498 EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized
);
500 bool hv_is_hibernation_supported(void)
502 return acpi_sleep_state_supported(ACPI_STATE_S4
);
504 EXPORT_SYMBOL_GPL(hv_is_hibernation_supported
);