1 #include <linux/acpi.h>
3 #include <linux/kexec.h>
4 #include <linux/memblock.h>
6 #include <xen/features.h>
7 #include <xen/events.h>
8 #include <xen/interface/memory.h>
12 #include <asm/reboot.h>
13 #include <asm/setup.h>
14 #include <asm/hypervisor.h>
15 #include <asm/e820/api.h>
16 #include <asm/early_ioremap.h>
18 #include <asm/xen/cpuid.h>
19 #include <asm/xen/hypervisor.h>
20 #include <asm/xen/page.h>
26 static unsigned long shared_info_pfn
;
28 void xen_hvm_init_shared_info(void)
30 struct xen_add_to_physmap xatp
;
32 xatp
.domid
= DOMID_SELF
;
34 xatp
.space
= XENMAPSPACE_shared_info
;
35 xatp
.gpfn
= shared_info_pfn
;
36 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap
, &xatp
))
40 static void __init
reserve_shared_info(void)
45 * Search for a free page starting at 4kB physical address.
46 * Low memory is preferred to avoid an EPT large page split up
48 * Starting below X86_RESERVE_LOW (usually 64kB) is fine as
49 * the BIOS used for HVM guests is well behaved and won't
50 * clobber memory other than the first 4kB.
53 !e820__mapped_all(pa
, pa
+ PAGE_SIZE
, E820_TYPE_RAM
) ||
54 memblock_is_reserved(pa
);
58 shared_info_pfn
= PHYS_PFN(pa
);
60 memblock_reserve(pa
, PAGE_SIZE
);
61 HYPERVISOR_shared_info
= early_memremap(pa
, PAGE_SIZE
);
64 static void __init
xen_hvm_init_mem_mapping(void)
66 early_memunmap(HYPERVISOR_shared_info
, PAGE_SIZE
);
67 HYPERVISOR_shared_info
= __va(PFN_PHYS(shared_info_pfn
));
70 * The virtual address of the shared_info page has changed, so
71 * the vcpu_info pointer for VCPU 0 is now stale.
73 * The prepare_boot_cpu callback will re-initialize it via
74 * xen_vcpu_setup, but we can't rely on that to be called for
75 * old Xen versions (xen_have_vector_callback == 0).
77 * It is, in any case, bad to have a stale vcpu_info pointer
80 xen_vcpu_info_reset(0);
83 static void __init
init_hvm_pv_info(void)
86 uint32_t eax
, ebx
, ecx
, edx
, base
;
88 base
= xen_cpuid_base();
89 eax
= cpuid_eax(base
+ 1);
93 printk(KERN_INFO
"Xen version %d.%d.\n", major
, minor
);
95 xen_domain_type
= XEN_HVM_DOMAIN
;
97 /* PVH set up hypercall page in xen_prepare_pvh(). */
99 pv_info
.name
= "Xen PVH";
104 pv_info
.name
= "Xen HVM";
105 msr
= cpuid_ebx(base
+ 2);
106 pfn
= __pa(hypercall_page
);
107 wrmsr_safe(msr
, (u32
)pfn
, (u32
)(pfn
>> 32));
110 xen_setup_features();
112 cpuid(base
+ 4, &eax
, &ebx
, &ecx
, &edx
);
113 if (eax
& XEN_HVM_CPUID_VCPU_ID_PRESENT
)
114 this_cpu_write(xen_vcpu_id
, ebx
);
116 this_cpu_write(xen_vcpu_id
, smp_processor_id());
119 #ifdef CONFIG_KEXEC_CORE
120 static void xen_hvm_shutdown(void)
122 native_machine_shutdown();
123 if (kexec_in_progress
)
124 xen_reboot(SHUTDOWN_soft_reset
);
127 static void xen_hvm_crash_shutdown(struct pt_regs
*regs
)
129 native_machine_crash_shutdown(regs
);
130 xen_reboot(SHUTDOWN_soft_reset
);
134 static int xen_cpu_up_prepare_hvm(unsigned int cpu
)
139 * This can happen if CPU was offlined earlier and
140 * offlining timed out in common_cpu_die().
142 if (cpu_report_state(cpu
) == CPU_DEAD_FROZEN
) {
143 xen_smp_intr_free(cpu
);
144 xen_uninit_lock_cpu(cpu
);
147 if (cpu_acpi_id(cpu
) != U32_MAX
)
148 per_cpu(xen_vcpu_id
, cpu
) = cpu_acpi_id(cpu
);
150 per_cpu(xen_vcpu_id
, cpu
) = cpu
;
151 rc
= xen_vcpu_setup(cpu
);
155 if (xen_have_vector_callback
&& xen_feature(XENFEAT_hvm_safe_pvclock
))
156 xen_setup_timer(cpu
);
158 rc
= xen_smp_intr_init(cpu
);
160 WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
166 static int xen_cpu_dead_hvm(unsigned int cpu
)
168 xen_smp_intr_free(cpu
);
170 if (xen_have_vector_callback
&& xen_feature(XENFEAT_hvm_safe_pvclock
))
171 xen_teardown_timer(cpu
);
176 static void __init
xen_hvm_guest_init(void)
183 reserve_shared_info();
184 xen_hvm_init_shared_info();
187 * xen_vcpu is a pointer to the vcpu_info struct in the shared_info
188 * page, we use it in the event channel upcall and in some pvclock
191 xen_vcpu_info_reset(0);
193 xen_panic_handler_init();
195 if (xen_feature(XENFEAT_hvm_callback_vector
))
196 xen_have_vector_callback
= 1;
199 WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm
, xen_cpu_dead_hvm
));
200 xen_unplug_emulated_devices();
201 x86_init
.irqs
.intr_init
= xen_init_IRQ
;
202 xen_hvm_init_time_ops();
203 xen_hvm_init_mmu_ops();
205 #ifdef CONFIG_KEXEC_CORE
206 machine_ops
.shutdown
= xen_hvm_shutdown
;
207 machine_ops
.crash_shutdown
= xen_hvm_crash_shutdown
;
211 static bool xen_nopv
;
212 static __init
int xen_parse_nopv(char *arg
)
217 early_param("xen_nopv", xen_parse_nopv
);
219 bool xen_hvm_need_lapic(void)
225 if (!xen_hvm_domain())
227 if (xen_feature(XENFEAT_hvm_pirqs
) && xen_have_vector_callback
)
231 EXPORT_SYMBOL_GPL(xen_hvm_need_lapic
);
233 static uint32_t __init
xen_platform_hvm(void)
235 if (xen_pv_domain() || xen_nopv
)
238 return xen_cpuid_base();
241 static __init
void xen_hvm_guest_late_init(void)
243 #ifdef CONFIG_XEN_PVH
244 /* Test for PVH domain (PVH boot path taken overrides ACPI flags). */
246 (x86_platform
.legacy
.rtc
|| !x86_platform
.legacy
.no_vga
))
252 /* Make sure we don't fall back to (default) ACPI_IRQ_MODEL_PIC. */
253 if (!nr_ioapics
&& acpi_irq_model
== ACPI_IRQ_MODEL_PIC
)
254 acpi_irq_model
= ACPI_IRQ_MODEL_PLATFORM
;
256 machine_ops
.emergency_restart
= xen_emergency_restart
;
257 pv_info
.name
= "Xen PVH";
261 const __initconst
struct hypervisor_x86 x86_hyper_xen_hvm
= {
263 .detect
= xen_platform_hvm
,
264 .type
= X86_HYPER_XEN_HVM
,
265 .init
.init_platform
= xen_hvm_guest_init
,
266 .init
.x2apic_available
= xen_x2apic_para_available
,
267 .init
.init_mem_mapping
= xen_hvm_init_mem_mapping
,
268 .init
.guest_late_init
= xen_hvm_guest_late_init
,
269 .runtime
.pin_vcpu
= xen_pin_vcpu
,