1 #include <linux/acpi.h>
3 #include <linux/kexec.h>
4 #include <linux/memblock.h>
6 #include <xen/features.h>
7 #include <xen/events.h>
8 #include <xen/interface/memory.h>
12 #include <asm/reboot.h>
13 #include <asm/setup.h>
14 #include <asm/hypervisor.h>
15 #include <asm/e820/api.h>
16 #include <asm/early_ioremap.h>
18 #include <asm/xen/cpuid.h>
19 #include <asm/xen/hypervisor.h>
20 #include <asm/xen/page.h>
26 static unsigned long shared_info_pfn
;
28 void xen_hvm_init_shared_info(void)
30 struct xen_add_to_physmap xatp
;
32 xatp
.domid
= DOMID_SELF
;
34 xatp
.space
= XENMAPSPACE_shared_info
;
35 xatp
.gpfn
= shared_info_pfn
;
36 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap
, &xatp
))
40 static void __init
reserve_shared_info(void)
45 * Search for a free page starting at 4kB physical address.
46 * Low memory is preferred to avoid an EPT large page split up
48 * Starting below X86_RESERVE_LOW (usually 64kB) is fine as
49 * the BIOS used for HVM guests is well behaved and won't
50 * clobber memory other than the first 4kB.
53 !e820__mapped_all(pa
, pa
+ PAGE_SIZE
, E820_TYPE_RAM
) ||
54 memblock_is_reserved(pa
);
58 shared_info_pfn
= PHYS_PFN(pa
);
60 memblock_reserve(pa
, PAGE_SIZE
);
61 HYPERVISOR_shared_info
= early_memremap(pa
, PAGE_SIZE
);
64 static void __init
xen_hvm_init_mem_mapping(void)
66 early_memunmap(HYPERVISOR_shared_info
, PAGE_SIZE
);
67 HYPERVISOR_shared_info
= __va(PFN_PHYS(shared_info_pfn
));
70 static void __init
init_hvm_pv_info(void)
73 uint32_t eax
, ebx
, ecx
, edx
, base
;
75 base
= xen_cpuid_base();
76 eax
= cpuid_eax(base
+ 1);
80 printk(KERN_INFO
"Xen version %d.%d.\n", major
, minor
);
82 xen_domain_type
= XEN_HVM_DOMAIN
;
84 /* PVH set up hypercall page in xen_prepare_pvh(). */
86 pv_info
.name
= "Xen PVH";
91 pv_info
.name
= "Xen HVM";
92 msr
= cpuid_ebx(base
+ 2);
93 pfn
= __pa(hypercall_page
);
94 wrmsr_safe(msr
, (u32
)pfn
, (u32
)(pfn
>> 32));
99 cpuid(base
+ 4, &eax
, &ebx
, &ecx
, &edx
);
100 if (eax
& XEN_HVM_CPUID_VCPU_ID_PRESENT
)
101 this_cpu_write(xen_vcpu_id
, ebx
);
103 this_cpu_write(xen_vcpu_id
, smp_processor_id());
106 #ifdef CONFIG_KEXEC_CORE
107 static void xen_hvm_shutdown(void)
109 native_machine_shutdown();
110 if (kexec_in_progress
)
111 xen_reboot(SHUTDOWN_soft_reset
);
114 static void xen_hvm_crash_shutdown(struct pt_regs
*regs
)
116 native_machine_crash_shutdown(regs
);
117 xen_reboot(SHUTDOWN_soft_reset
);
121 static int xen_cpu_up_prepare_hvm(unsigned int cpu
)
126 * This can happen if CPU was offlined earlier and
127 * offlining timed out in common_cpu_die().
129 if (cpu_report_state(cpu
) == CPU_DEAD_FROZEN
) {
130 xen_smp_intr_free(cpu
);
131 xen_uninit_lock_cpu(cpu
);
134 if (cpu_acpi_id(cpu
) != U32_MAX
)
135 per_cpu(xen_vcpu_id
, cpu
) = cpu_acpi_id(cpu
);
137 per_cpu(xen_vcpu_id
, cpu
) = cpu
;
138 rc
= xen_vcpu_setup(cpu
);
142 if (xen_have_vector_callback
&& xen_feature(XENFEAT_hvm_safe_pvclock
))
143 xen_setup_timer(cpu
);
145 rc
= xen_smp_intr_init(cpu
);
147 WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
153 static int xen_cpu_dead_hvm(unsigned int cpu
)
155 xen_smp_intr_free(cpu
);
157 if (xen_have_vector_callback
&& xen_feature(XENFEAT_hvm_safe_pvclock
))
158 xen_teardown_timer(cpu
);
163 static void __init
xen_hvm_guest_init(void)
170 reserve_shared_info();
171 xen_hvm_init_shared_info();
174 * xen_vcpu is a pointer to the vcpu_info struct in the shared_info
175 * page, we use it in the event channel upcall and in some pvclock
178 xen_vcpu_info_reset(0);
180 xen_panic_handler_init();
182 if (xen_feature(XENFEAT_hvm_callback_vector
))
183 xen_have_vector_callback
= 1;
186 WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm
, xen_cpu_dead_hvm
));
187 xen_unplug_emulated_devices();
188 x86_init
.irqs
.intr_init
= xen_init_IRQ
;
189 xen_hvm_init_time_ops();
190 xen_hvm_init_mmu_ops();
192 #ifdef CONFIG_KEXEC_CORE
193 machine_ops
.shutdown
= xen_hvm_shutdown
;
194 machine_ops
.crash_shutdown
= xen_hvm_crash_shutdown
;
198 static bool xen_nopv
;
199 static __init
int xen_parse_nopv(char *arg
)
204 early_param("xen_nopv", xen_parse_nopv
);
206 bool xen_hvm_need_lapic(void)
212 if (!xen_hvm_domain())
214 if (xen_feature(XENFEAT_hvm_pirqs
) && xen_have_vector_callback
)
218 EXPORT_SYMBOL_GPL(xen_hvm_need_lapic
);
220 static uint32_t __init
xen_platform_hvm(void)
222 if (xen_pv_domain() || xen_nopv
)
225 return xen_cpuid_base();
228 static __init
void xen_hvm_guest_late_init(void)
230 #ifdef CONFIG_XEN_PVH
231 /* Test for PVH domain (PVH boot path taken overrides ACPI flags). */
233 (x86_platform
.legacy
.rtc
|| !x86_platform
.legacy
.no_vga
))
239 /* Make sure we don't fall back to (default) ACPI_IRQ_MODEL_PIC. */
240 if (!nr_ioapics
&& acpi_irq_model
== ACPI_IRQ_MODEL_PIC
)
241 acpi_irq_model
= ACPI_IRQ_MODEL_PLATFORM
;
243 machine_ops
.emergency_restart
= xen_emergency_restart
;
244 pv_info
.name
= "Xen PVH";
248 const __initconst
struct hypervisor_x86 x86_hyper_xen_hvm
= {
250 .detect
= xen_platform_hvm
,
251 .type
= X86_HYPER_XEN_HVM
,
252 .init
.init_platform
= xen_hvm_guest_init
,
253 .init
.x2apic_available
= xen_x2apic_para_available
,
254 .init
.init_mem_mapping
= xen_hvm_init_mem_mapping
,
255 .init
.guest_late_init
= xen_hvm_guest_late_init
,
256 .runtime
.pin_vcpu
= xen_pin_vcpu
,