Linux 4.13.16
[linux/fpc-iii.git] / arch / x86 / xen / smp_hvm.c
blobfd60abedf65804e0379d751cfb79192d81c6127b
1 #include <asm/smp.h>
3 #include <xen/events.h>
5 #include "xen-ops.h"
6 #include "smp.h"
9 static void __init xen_hvm_smp_prepare_boot_cpu(void)
11 BUG_ON(smp_processor_id() != 0);
12 native_smp_prepare_boot_cpu();
15 * Setup vcpu_info for boot CPU. Secondary CPUs get their vcpu_info
16 * in xen_cpu_up_prepare_hvm().
18 xen_vcpu_setup(0);
21 * The alternative logic (which patches the unlock/lock) runs before
22 * the smp bootup up code is activated. Hence we need to set this up
23 * the core kernel is being patched. Otherwise we will have only
24 * modules patched but not core code.
26 xen_init_spinlocks();
29 static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
31 int cpu;
33 native_smp_prepare_cpus(max_cpus);
34 WARN_ON(xen_smp_intr_init(0));
36 xen_init_lock_cpu(0);
38 for_each_possible_cpu(cpu) {
39 if (cpu == 0)
40 continue;
42 /* Set default vcpu_id to make sure that we don't use cpu-0's */
43 per_cpu(xen_vcpu_id, cpu) = XEN_VCPU_ID_INVALID;
47 #ifdef CONFIG_HOTPLUG_CPU
48 static void xen_hvm_cpu_die(unsigned int cpu)
50 if (common_cpu_die(cpu) == 0) {
51 xen_smp_intr_free(cpu);
52 xen_uninit_lock_cpu(cpu);
53 xen_teardown_timer(cpu);
56 #else
57 static void xen_hvm_cpu_die(unsigned int cpu)
59 BUG();
61 #endif
63 void __init xen_hvm_smp_init(void)
65 if (!xen_have_vector_callback)
66 return;
68 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
69 smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
70 smp_ops.cpu_die = xen_hvm_cpu_die;
71 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
72 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
73 smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
74 smp_ops.smp_cpus_done = xen_smp_cpus_done;