4 * This file implements the Xen versions of smp_ops. SMP under Xen is
5 * very straightforward. Bringing a CPU up is simply a matter of
6 * loading its initial context and setting it running.
8 * IPIs are handled through the Xen event mechanism.
10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and
15 #include <linux/sched.h>
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/smp.h>
19 #include <linux/irq_work.h>
20 #include <linux/tick.h>
22 #include <asm/paravirt.h>
24 #include <asm/pgtable.h>
27 #include <xen/interface/xen.h>
28 #include <xen/interface/vcpu.h>
30 #include <asm/xen/interface.h>
31 #include <asm/xen/hypercall.h>
35 #include <xen/events.h>
37 #include <xen/hvc-console.h>
42 cpumask_var_t xen_cpu_initialized_map
;
44 struct xen_common_irq
{
48 static DEFINE_PER_CPU(struct xen_common_irq
, xen_resched_irq
) = { .irq
= -1 };
49 static DEFINE_PER_CPU(struct xen_common_irq
, xen_callfunc_irq
) = { .irq
= -1 };
50 static DEFINE_PER_CPU(struct xen_common_irq
, xen_callfuncsingle_irq
) = { .irq
= -1 };
51 static DEFINE_PER_CPU(struct xen_common_irq
, xen_irq_work
) = { .irq
= -1 };
52 static DEFINE_PER_CPU(struct xen_common_irq
, xen_debug_irq
) = { .irq
= -1 };
54 static irqreturn_t
xen_call_function_interrupt(int irq
, void *dev_id
);
55 static irqreturn_t
xen_call_function_single_interrupt(int irq
, void *dev_id
);
56 static irqreturn_t
xen_irq_work_interrupt(int irq
, void *dev_id
);
59 * Reschedule call back.
61 static irqreturn_t
xen_reschedule_interrupt(int irq
, void *dev_id
)
63 inc_irq_stat(irq_resched_count
);
69 static void cpu_bringup(void)
74 touch_softlockup_watchdog();
77 /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
78 if (!xen_feature(XENFEAT_supervisor_mode_kernel
)) {
79 xen_enable_sysenter();
82 cpu
= smp_processor_id();
83 smp_store_cpu_info(cpu
);
84 cpu_data(cpu
).x86_max_cores
= 1;
85 set_cpu_sibling_map(cpu
);
87 xen_setup_cpu_clockevents();
89 notify_cpu_starting(cpu
);
91 set_cpu_online(cpu
, true);
93 cpu_set_state_online(cpu
); /* Implies full memory barrier. */
95 /* We can take interrupts now: we're officially "up". */
100 * Note: cpu parameter is only relevant for PVH. The reason for passing it
101 * is we can't do smp_processor_id until the percpu segments are loaded, for
102 * which we need the cpu number! So we pass it in rdi as first parameter.
104 asmlinkage __visible
void cpu_bringup_and_idle(int cpu
)
106 #ifdef CONFIG_XEN_PVH
107 if (xen_feature(XENFEAT_auto_translated_physmap
) &&
108 xen_feature(XENFEAT_supervisor_mode_kernel
))
109 xen_pvh_secondary_vcpu_init(cpu
);
112 cpu_startup_entry(CPUHP_ONLINE
);
115 static void xen_smp_intr_free(unsigned int cpu
)
117 if (per_cpu(xen_resched_irq
, cpu
).irq
>= 0) {
118 unbind_from_irqhandler(per_cpu(xen_resched_irq
, cpu
).irq
, NULL
);
119 per_cpu(xen_resched_irq
, cpu
).irq
= -1;
120 kfree(per_cpu(xen_resched_irq
, cpu
).name
);
121 per_cpu(xen_resched_irq
, cpu
).name
= NULL
;
123 if (per_cpu(xen_callfunc_irq
, cpu
).irq
>= 0) {
124 unbind_from_irqhandler(per_cpu(xen_callfunc_irq
, cpu
).irq
, NULL
);
125 per_cpu(xen_callfunc_irq
, cpu
).irq
= -1;
126 kfree(per_cpu(xen_callfunc_irq
, cpu
).name
);
127 per_cpu(xen_callfunc_irq
, cpu
).name
= NULL
;
129 if (per_cpu(xen_debug_irq
, cpu
).irq
>= 0) {
130 unbind_from_irqhandler(per_cpu(xen_debug_irq
, cpu
).irq
, NULL
);
131 per_cpu(xen_debug_irq
, cpu
).irq
= -1;
132 kfree(per_cpu(xen_debug_irq
, cpu
).name
);
133 per_cpu(xen_debug_irq
, cpu
).name
= NULL
;
135 if (per_cpu(xen_callfuncsingle_irq
, cpu
).irq
>= 0) {
136 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq
, cpu
).irq
,
138 per_cpu(xen_callfuncsingle_irq
, cpu
).irq
= -1;
139 kfree(per_cpu(xen_callfuncsingle_irq
, cpu
).name
);
140 per_cpu(xen_callfuncsingle_irq
, cpu
).name
= NULL
;
142 if (xen_hvm_domain())
145 if (per_cpu(xen_irq_work
, cpu
).irq
>= 0) {
146 unbind_from_irqhandler(per_cpu(xen_irq_work
, cpu
).irq
, NULL
);
147 per_cpu(xen_irq_work
, cpu
).irq
= -1;
148 kfree(per_cpu(xen_irq_work
, cpu
).name
);
149 per_cpu(xen_irq_work
, cpu
).name
= NULL
;
152 static int xen_smp_intr_init(unsigned int cpu
)
155 char *resched_name
, *callfunc_name
, *debug_name
;
157 resched_name
= kasprintf(GFP_KERNEL
, "resched%d", cpu
);
158 rc
= bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR
,
160 xen_reschedule_interrupt
,
161 IRQF_PERCPU
|IRQF_NOBALANCING
,
166 per_cpu(xen_resched_irq
, cpu
).irq
= rc
;
167 per_cpu(xen_resched_irq
, cpu
).name
= resched_name
;
169 callfunc_name
= kasprintf(GFP_KERNEL
, "callfunc%d", cpu
);
170 rc
= bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR
,
172 xen_call_function_interrupt
,
173 IRQF_PERCPU
|IRQF_NOBALANCING
,
178 per_cpu(xen_callfunc_irq
, cpu
).irq
= rc
;
179 per_cpu(xen_callfunc_irq
, cpu
).name
= callfunc_name
;
181 debug_name
= kasprintf(GFP_KERNEL
, "debug%d", cpu
);
182 rc
= bind_virq_to_irqhandler(VIRQ_DEBUG
, cpu
, xen_debug_interrupt
,
183 IRQF_PERCPU
| IRQF_NOBALANCING
,
187 per_cpu(xen_debug_irq
, cpu
).irq
= rc
;
188 per_cpu(xen_debug_irq
, cpu
).name
= debug_name
;
190 callfunc_name
= kasprintf(GFP_KERNEL
, "callfuncsingle%d", cpu
);
191 rc
= bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR
,
193 xen_call_function_single_interrupt
,
194 IRQF_PERCPU
|IRQF_NOBALANCING
,
199 per_cpu(xen_callfuncsingle_irq
, cpu
).irq
= rc
;
200 per_cpu(xen_callfuncsingle_irq
, cpu
).name
= callfunc_name
;
203 * The IRQ worker on PVHVM goes through the native path and uses the
206 if (xen_hvm_domain())
209 callfunc_name
= kasprintf(GFP_KERNEL
, "irqwork%d", cpu
);
210 rc
= bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR
,
212 xen_irq_work_interrupt
,
213 IRQF_PERCPU
|IRQF_NOBALANCING
,
218 per_cpu(xen_irq_work
, cpu
).irq
= rc
;
219 per_cpu(xen_irq_work
, cpu
).name
= callfunc_name
;
224 xen_smp_intr_free(cpu
);
228 static void __init
xen_fill_possible_map(void)
232 if (xen_initial_domain())
235 for (i
= 0; i
< nr_cpu_ids
; i
++) {
236 rc
= HYPERVISOR_vcpu_op(VCPUOP_is_up
, i
, NULL
);
239 set_cpu_possible(i
, true);
244 static void __init
xen_filter_cpu_maps(void)
247 unsigned int subtract
= 0;
249 if (!xen_initial_domain())
254 for (i
= 0; i
< nr_cpu_ids
; i
++) {
255 rc
= HYPERVISOR_vcpu_op(VCPUOP_is_up
, i
, NULL
);
258 set_cpu_possible(i
, true);
260 set_cpu_possible(i
, false);
261 set_cpu_present(i
, false);
265 #ifdef CONFIG_HOTPLUG_CPU
266 /* This is akin to using 'nr_cpus' on the Linux command line.
267 * Which is OK as when we use 'dom0_max_vcpus=X' we can only
268 * have up to X, while nr_cpu_ids is greater than X. This
269 * normally is not a problem, except when CPU hotplugging
270 * is involved and then there might be more than X CPUs
271 * in the guest - which will not work as there is no
272 * hypercall to expand the max number of VCPUs an already
273 * running guest has. So cap it up to X. */
275 nr_cpu_ids
= nr_cpu_ids
- subtract
;
280 static void __init
xen_smp_prepare_boot_cpu(void)
282 BUG_ON(smp_processor_id() != 0);
283 native_smp_prepare_boot_cpu();
285 if (xen_pv_domain()) {
286 if (!xen_feature(XENFEAT_writable_page_tables
))
287 /* We've switched to the "real" per-cpu gdt, so make
288 * sure the old memory can be recycled. */
289 make_lowmem_page_readwrite(xen_initial_gdt
);
293 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
296 loadsegment(ds
, __USER_DS
);
297 loadsegment(es
, __USER_DS
);
300 xen_filter_cpu_maps();
301 xen_setup_vcpu_info_placement();
304 * The alternative logic (which patches the unlock/lock) runs before
305 * the smp bootup up code is activated. Hence we need to set this up
306 * the core kernel is being patched. Otherwise we will have only
307 * modules patched but not core code.
309 xen_init_spinlocks();
312 static void __init
xen_smp_prepare_cpus(unsigned int max_cpus
)
317 if (skip_ioapic_setup
) {
318 char *m
= (max_cpus
== 0) ?
319 "The nosmp parameter is incompatible with Xen; " \
320 "use Xen dom0_max_vcpus=1 parameter" :
321 "The noapic parameter is incompatible with Xen";
326 xen_init_lock_cpu(0);
328 smp_store_boot_cpu_info();
329 cpu_data(0).x86_max_cores
= 1;
331 for_each_possible_cpu(i
) {
332 zalloc_cpumask_var(&per_cpu(cpu_sibling_map
, i
), GFP_KERNEL
);
333 zalloc_cpumask_var(&per_cpu(cpu_core_map
, i
), GFP_KERNEL
);
334 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map
, i
), GFP_KERNEL
);
336 set_cpu_sibling_map(0);
338 if (xen_smp_intr_init(0))
341 if (!alloc_cpumask_var(&xen_cpu_initialized_map
, GFP_KERNEL
))
342 panic("could not allocate xen_cpu_initialized_map\n");
344 cpumask_copy(xen_cpu_initialized_map
, cpumask_of(0));
346 /* Restrict the possible_map according to max_cpus. */
347 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus
)) {
348 for (cpu
= nr_cpu_ids
- 1; !cpu_possible(cpu
); cpu
--)
350 set_cpu_possible(cpu
, false);
353 for_each_possible_cpu(cpu
)
354 set_cpu_present(cpu
, true);
358 cpu_initialize_context(unsigned int cpu
, struct task_struct
*idle
)
360 struct vcpu_guest_context
*ctxt
;
361 struct desc_struct
*gdt
;
362 unsigned long gdt_mfn
;
364 /* used to tell cpu_init() that it can proceed with initialization */
365 cpumask_set_cpu(cpu
, cpu_callout_mask
);
366 if (cpumask_test_and_set_cpu(cpu
, xen_cpu_initialized_map
))
369 ctxt
= kzalloc(sizeof(*ctxt
), GFP_KERNEL
);
373 gdt
= get_cpu_gdt_table(cpu
);
376 /* Note: PVH is not yet supported on x86_32. */
377 ctxt
->user_regs
.fs
= __KERNEL_PERCPU
;
378 ctxt
->user_regs
.gs
= __KERNEL_STACK_CANARY
;
380 memset(&ctxt
->fpu_ctxt
, 0, sizeof(ctxt
->fpu_ctxt
));
382 if (!xen_feature(XENFEAT_auto_translated_physmap
)) {
383 ctxt
->user_regs
.eip
= (unsigned long)cpu_bringup_and_idle
;
384 ctxt
->flags
= VGCF_IN_KERNEL
;
385 ctxt
->user_regs
.eflags
= 0x1000; /* IOPL_RING1 */
386 ctxt
->user_regs
.ds
= __USER_DS
;
387 ctxt
->user_regs
.es
= __USER_DS
;
388 ctxt
->user_regs
.ss
= __KERNEL_DS
;
390 xen_copy_trap_info(ctxt
->trap_ctxt
);
394 BUG_ON((unsigned long)gdt
& ~PAGE_MASK
);
396 gdt_mfn
= arbitrary_virt_to_mfn(gdt
);
397 make_lowmem_page_readonly(gdt
);
398 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn
));
400 ctxt
->gdt_frames
[0] = gdt_mfn
;
401 ctxt
->gdt_ents
= GDT_ENTRIES
;
403 ctxt
->kernel_ss
= __KERNEL_DS
;
404 ctxt
->kernel_sp
= idle
->thread
.sp0
;
407 ctxt
->event_callback_cs
= __KERNEL_CS
;
408 ctxt
->failsafe_callback_cs
= __KERNEL_CS
;
410 ctxt
->gs_base_kernel
= per_cpu_offset(cpu
);
412 ctxt
->event_callback_eip
=
413 (unsigned long)xen_hypervisor_callback
;
414 ctxt
->failsafe_callback_eip
=
415 (unsigned long)xen_failsafe_callback
;
416 ctxt
->user_regs
.cs
= __KERNEL_CS
;
417 per_cpu(xen_cr3
, cpu
) = __pa(swapper_pg_dir
);
419 #ifdef CONFIG_XEN_PVH
422 * The vcpu comes on kernel page tables which have the NX pte
423 * bit set. This means before DS/SS is touched, NX in
424 * EFER must be set. Hence the following assembly glue code.
426 ctxt
->user_regs
.eip
= (unsigned long)xen_pvh_early_cpu_init
;
427 ctxt
->user_regs
.rdi
= cpu
;
428 ctxt
->user_regs
.rsi
= true; /* entry == true */
431 ctxt
->user_regs
.esp
= idle
->thread
.sp0
- sizeof(struct pt_regs
);
432 ctxt
->ctrlreg
[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir
));
433 if (HYPERVISOR_vcpu_op(VCPUOP_initialise
, cpu
, ctxt
))
440 static int xen_cpu_up(unsigned int cpu
, struct task_struct
*idle
)
444 common_cpu_up(cpu
, idle
);
446 xen_setup_runstate_info(cpu
);
447 xen_setup_timer(cpu
);
448 xen_init_lock_cpu(cpu
);
451 * PV VCPUs are always successfully taken down (see 'while' loop
452 * in xen_cpu_die()), so -EBUSY is an error.
454 rc
= cpu_check_up_prepare(cpu
);
458 /* make sure interrupts start blocked */
459 per_cpu(xen_vcpu
, cpu
)->evtchn_upcall_mask
= 1;
461 rc
= cpu_initialize_context(cpu
, idle
);
465 rc
= xen_smp_intr_init(cpu
);
469 rc
= HYPERVISOR_vcpu_op(VCPUOP_up
, cpu
, NULL
);
472 while (cpu_report_state(cpu
) != CPU_ONLINE
)
473 HYPERVISOR_sched_op(SCHEDOP_yield
, NULL
);
478 static void xen_smp_cpus_done(unsigned int max_cpus
)
482 #ifdef CONFIG_HOTPLUG_CPU
483 static int xen_cpu_disable(void)
485 unsigned int cpu
= smp_processor_id();
489 cpu_disable_common();
491 load_cr3(swapper_pg_dir
);
495 static void xen_cpu_die(unsigned int cpu
)
497 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up
, cpu
, NULL
)) {
498 __set_current_state(TASK_UNINTERRUPTIBLE
);
499 schedule_timeout(HZ
/10);
502 if (common_cpu_die(cpu
) == 0) {
503 xen_smp_intr_free(cpu
);
504 xen_uninit_lock_cpu(cpu
);
505 xen_teardown_timer(cpu
);
509 static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
512 HYPERVISOR_vcpu_op(VCPUOP_down
, smp_processor_id(), NULL
);
515 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
516 * clears certain data that the cpu_idle loop (which called us
517 * and that we return from) expects. The only way to get that
518 * data back is to call:
520 tick_nohz_idle_enter();
523 #else /* !CONFIG_HOTPLUG_CPU */
524 static int xen_cpu_disable(void)
529 static void xen_cpu_die(unsigned int cpu
)
534 static void xen_play_dead(void)
540 static void stop_self(void *v
)
542 int cpu
= smp_processor_id();
544 /* make sure we're not pinning something down */
545 load_cr3(swapper_pg_dir
);
546 /* should set up a minimal gdt */
548 set_cpu_online(cpu
, false);
550 HYPERVISOR_vcpu_op(VCPUOP_down
, cpu
, NULL
);
554 static void xen_stop_other_cpus(int wait
)
556 smp_call_function(stop_self
, NULL
, wait
);
559 static void xen_smp_send_reschedule(int cpu
)
561 xen_send_IPI_one(cpu
, XEN_RESCHEDULE_VECTOR
);
564 static void __xen_send_IPI_mask(const struct cpumask
*mask
,
569 for_each_cpu_and(cpu
, mask
, cpu_online_mask
)
570 xen_send_IPI_one(cpu
, vector
);
573 static void xen_smp_send_call_function_ipi(const struct cpumask
*mask
)
577 __xen_send_IPI_mask(mask
, XEN_CALL_FUNCTION_VECTOR
);
579 /* Make sure other vcpus get a chance to run if they need to. */
580 for_each_cpu(cpu
, mask
) {
581 if (xen_vcpu_stolen(cpu
)) {
582 HYPERVISOR_sched_op(SCHEDOP_yield
, NULL
);
588 static void xen_smp_send_call_function_single_ipi(int cpu
)
590 __xen_send_IPI_mask(cpumask_of(cpu
),
591 XEN_CALL_FUNCTION_SINGLE_VECTOR
);
594 static inline int xen_map_vector(int vector
)
599 case RESCHEDULE_VECTOR
:
600 xen_vector
= XEN_RESCHEDULE_VECTOR
;
602 case CALL_FUNCTION_VECTOR
:
603 xen_vector
= XEN_CALL_FUNCTION_VECTOR
;
605 case CALL_FUNCTION_SINGLE_VECTOR
:
606 xen_vector
= XEN_CALL_FUNCTION_SINGLE_VECTOR
;
608 case IRQ_WORK_VECTOR
:
609 xen_vector
= XEN_IRQ_WORK_VECTOR
;
613 case APIC_DM_NMI
: /* Some use that instead of NMI_VECTOR */
614 xen_vector
= XEN_NMI_VECTOR
;
619 printk(KERN_ERR
"xen: vector 0x%x is not implemented\n",
626 void xen_send_IPI_mask(const struct cpumask
*mask
,
629 int xen_vector
= xen_map_vector(vector
);
632 __xen_send_IPI_mask(mask
, xen_vector
);
635 void xen_send_IPI_all(int vector
)
637 int xen_vector
= xen_map_vector(vector
);
640 __xen_send_IPI_mask(cpu_online_mask
, xen_vector
);
643 void xen_send_IPI_self(int vector
)
645 int xen_vector
= xen_map_vector(vector
);
648 xen_send_IPI_one(smp_processor_id(), xen_vector
);
651 void xen_send_IPI_mask_allbutself(const struct cpumask
*mask
,
655 unsigned int this_cpu
= smp_processor_id();
656 int xen_vector
= xen_map_vector(vector
);
658 if (!(num_online_cpus() > 1) || (xen_vector
< 0))
661 for_each_cpu_and(cpu
, mask
, cpu_online_mask
) {
665 xen_send_IPI_one(cpu
, xen_vector
);
669 void xen_send_IPI_allbutself(int vector
)
671 xen_send_IPI_mask_allbutself(cpu_online_mask
, vector
);
674 static irqreturn_t
xen_call_function_interrupt(int irq
, void *dev_id
)
677 generic_smp_call_function_interrupt();
678 inc_irq_stat(irq_call_count
);
684 static irqreturn_t
xen_call_function_single_interrupt(int irq
, void *dev_id
)
687 generic_smp_call_function_single_interrupt();
688 inc_irq_stat(irq_call_count
);
694 static irqreturn_t
xen_irq_work_interrupt(int irq
, void *dev_id
)
698 inc_irq_stat(apic_irq_work_irqs
);
704 static const struct smp_ops xen_smp_ops __initconst
= {
705 .smp_prepare_boot_cpu
= xen_smp_prepare_boot_cpu
,
706 .smp_prepare_cpus
= xen_smp_prepare_cpus
,
707 .smp_cpus_done
= xen_smp_cpus_done
,
709 .cpu_up
= xen_cpu_up
,
710 .cpu_die
= xen_cpu_die
,
711 .cpu_disable
= xen_cpu_disable
,
712 .play_dead
= xen_play_dead
,
714 .stop_other_cpus
= xen_stop_other_cpus
,
715 .smp_send_reschedule
= xen_smp_send_reschedule
,
717 .send_call_func_ipi
= xen_smp_send_call_function_ipi
,
718 .send_call_func_single_ipi
= xen_smp_send_call_function_single_ipi
,
721 void __init
xen_smp_init(void)
723 smp_ops
= xen_smp_ops
;
724 xen_fill_possible_map();
727 static void __init
xen_hvm_smp_prepare_cpus(unsigned int max_cpus
)
729 native_smp_prepare_cpus(max_cpus
);
730 WARN_ON(xen_smp_intr_init(0));
732 xen_init_lock_cpu(0);
735 static int xen_hvm_cpu_up(unsigned int cpu
, struct task_struct
*tidle
)
740 * This can happen if CPU was offlined earlier and
741 * offlining timed out in common_cpu_die().
743 if (cpu_report_state(cpu
) == CPU_DEAD_FROZEN
) {
744 xen_smp_intr_free(cpu
);
745 xen_uninit_lock_cpu(cpu
);
749 * xen_smp_intr_init() needs to run before native_cpu_up()
750 * so that IPI vectors are set up on the booting CPU before
751 * it is marked online in native_cpu_up().
753 rc
= xen_smp_intr_init(cpu
);
756 rc
= native_cpu_up(cpu
, tidle
);
759 * We must initialize the slowpath CPU kicker _after_ the native
760 * path has executed. If we initialized it before none of the
761 * unlocker IPI kicks would reach the booting CPU as the booting
762 * CPU had not set itself 'online' in cpu_online_mask. That mask
763 * is checked when IPIs are sent (on HVM at least).
765 xen_init_lock_cpu(cpu
);
769 void __init
xen_hvm_smp_init(void)
771 if (!xen_have_vector_callback
)
773 smp_ops
.smp_prepare_cpus
= xen_hvm_smp_prepare_cpus
;
774 smp_ops
.smp_send_reschedule
= xen_smp_send_reschedule
;
775 smp_ops
.cpu_up
= xen_hvm_cpu_up
;
776 smp_ops
.cpu_die
= xen_cpu_die
;
777 smp_ops
.send_call_func_ipi
= xen_smp_send_call_function_ipi
;
778 smp_ops
.send_call_func_single_ipi
= xen_smp_send_call_function_single_ipi
;
779 smp_ops
.smp_prepare_boot_cpu
= xen_smp_prepare_boot_cpu
;