Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux/fpc-iii.git] / arch / x86 / xen / smp.c
bloba18eadd8bb4039a6616b2f5b31afd1c72dd68c88
1 /*
2 * Xen SMP support
4 * This file implements the Xen versions of smp_ops. SMP under Xen is
5 * very straightforward. Bringing a CPU up is simply a matter of
6 * loading its initial context and setting it running.
8 * IPIs are handled through the Xen event mechanism.
10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and
13 * single-threaded.
15 #include <linux/sched.h>
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/smp.h>
19 #include <linux/irq_work.h>
20 #include <linux/tick.h>
22 #include <asm/paravirt.h>
23 #include <asm/desc.h>
24 #include <asm/pgtable.h>
25 #include <asm/cpu.h>
27 #include <xen/interface/xen.h>
28 #include <xen/interface/vcpu.h>
30 #include <asm/xen/interface.h>
31 #include <asm/xen/hypercall.h>
33 #include <xen/xen.h>
34 #include <xen/page.h>
35 #include <xen/events.h>
37 #include <xen/hvc-console.h>
38 #include "xen-ops.h"
39 #include "mmu.h"
41 cpumask_var_t xen_cpu_initialized_map;
43 struct xen_common_irq {
44 int irq;
45 char *name;
47 static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
48 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
49 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
50 static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
51 static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
53 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
54 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
55 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
58 * Reschedule call back.
60 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
62 inc_irq_stat(irq_resched_count);
63 scheduler_ipi();
65 return IRQ_HANDLED;
68 static void cpu_bringup(void)
70 int cpu;
72 cpu_init();
73 touch_softlockup_watchdog();
74 preempt_disable();
76 /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
77 if (!xen_feature(XENFEAT_supervisor_mode_kernel)) {
78 xen_enable_sysenter();
79 xen_enable_syscall();
81 cpu = smp_processor_id();
82 smp_store_cpu_info(cpu);
83 cpu_data(cpu).x86_max_cores = 1;
84 set_cpu_sibling_map(cpu);
86 xen_setup_cpu_clockevents();
88 notify_cpu_starting(cpu);
90 set_cpu_online(cpu, true);
92 this_cpu_write(cpu_state, CPU_ONLINE);
94 wmb();
96 /* We can take interrupts now: we're officially "up". */
97 local_irq_enable();
99 wmb(); /* make sure everything is out */
102 /* Note: cpu parameter is only relevant for PVH */
103 static void cpu_bringup_and_idle(int cpu)
105 #ifdef CONFIG_X86_64
106 if (xen_feature(XENFEAT_auto_translated_physmap) &&
107 xen_feature(XENFEAT_supervisor_mode_kernel))
108 xen_pvh_secondary_vcpu_init(cpu);
109 #endif
110 cpu_bringup();
111 cpu_startup_entry(CPUHP_ONLINE);
114 static void xen_smp_intr_free(unsigned int cpu)
116 if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
117 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
118 per_cpu(xen_resched_irq, cpu).irq = -1;
119 kfree(per_cpu(xen_resched_irq, cpu).name);
120 per_cpu(xen_resched_irq, cpu).name = NULL;
122 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
123 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
124 per_cpu(xen_callfunc_irq, cpu).irq = -1;
125 kfree(per_cpu(xen_callfunc_irq, cpu).name);
126 per_cpu(xen_callfunc_irq, cpu).name = NULL;
128 if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
129 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
130 per_cpu(xen_debug_irq, cpu).irq = -1;
131 kfree(per_cpu(xen_debug_irq, cpu).name);
132 per_cpu(xen_debug_irq, cpu).name = NULL;
134 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
135 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
136 NULL);
137 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
138 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
139 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
141 if (xen_hvm_domain())
142 return;
144 if (per_cpu(xen_irq_work, cpu).irq >= 0) {
145 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
146 per_cpu(xen_irq_work, cpu).irq = -1;
147 kfree(per_cpu(xen_irq_work, cpu).name);
148 per_cpu(xen_irq_work, cpu).name = NULL;
151 static int xen_smp_intr_init(unsigned int cpu)
153 int rc;
154 char *resched_name, *callfunc_name, *debug_name;
156 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
157 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
158 cpu,
159 xen_reschedule_interrupt,
160 IRQF_PERCPU|IRQF_NOBALANCING,
161 resched_name,
162 NULL);
163 if (rc < 0)
164 goto fail;
165 per_cpu(xen_resched_irq, cpu).irq = rc;
166 per_cpu(xen_resched_irq, cpu).name = resched_name;
168 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
169 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
170 cpu,
171 xen_call_function_interrupt,
172 IRQF_PERCPU|IRQF_NOBALANCING,
173 callfunc_name,
174 NULL);
175 if (rc < 0)
176 goto fail;
177 per_cpu(xen_callfunc_irq, cpu).irq = rc;
178 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
180 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
181 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
182 IRQF_PERCPU | IRQF_NOBALANCING,
183 debug_name, NULL);
184 if (rc < 0)
185 goto fail;
186 per_cpu(xen_debug_irq, cpu).irq = rc;
187 per_cpu(xen_debug_irq, cpu).name = debug_name;
189 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
190 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
191 cpu,
192 xen_call_function_single_interrupt,
193 IRQF_PERCPU|IRQF_NOBALANCING,
194 callfunc_name,
195 NULL);
196 if (rc < 0)
197 goto fail;
198 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
199 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
202 * The IRQ worker on PVHVM goes through the native path and uses the
203 * IPI mechanism.
205 if (xen_hvm_domain())
206 return 0;
208 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
209 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
210 cpu,
211 xen_irq_work_interrupt,
212 IRQF_PERCPU|IRQF_NOBALANCING,
213 callfunc_name,
214 NULL);
215 if (rc < 0)
216 goto fail;
217 per_cpu(xen_irq_work, cpu).irq = rc;
218 per_cpu(xen_irq_work, cpu).name = callfunc_name;
220 return 0;
222 fail:
223 xen_smp_intr_free(cpu);
224 return rc;
227 static void __init xen_fill_possible_map(void)
229 int i, rc;
231 if (xen_initial_domain())
232 return;
234 for (i = 0; i < nr_cpu_ids; i++) {
235 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
236 if (rc >= 0) {
237 num_processors++;
238 set_cpu_possible(i, true);
243 static void __init xen_filter_cpu_maps(void)
245 int i, rc;
246 unsigned int subtract = 0;
248 if (!xen_initial_domain())
249 return;
251 num_processors = 0;
252 disabled_cpus = 0;
253 for (i = 0; i < nr_cpu_ids; i++) {
254 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
255 if (rc >= 0) {
256 num_processors++;
257 set_cpu_possible(i, true);
258 } else {
259 set_cpu_possible(i, false);
260 set_cpu_present(i, false);
261 subtract++;
264 #ifdef CONFIG_HOTPLUG_CPU
265 /* This is akin to using 'nr_cpus' on the Linux command line.
266 * Which is OK as when we use 'dom0_max_vcpus=X' we can only
267 * have up to X, while nr_cpu_ids is greater than X. This
268 * normally is not a problem, except when CPU hotplugging
269 * is involved and then there might be more than X CPUs
270 * in the guest - which will not work as there is no
271 * hypercall to expand the max number of VCPUs an already
272 * running guest has. So cap it up to X. */
273 if (subtract)
274 nr_cpu_ids = nr_cpu_ids - subtract;
275 #endif
279 static void __init xen_smp_prepare_boot_cpu(void)
281 BUG_ON(smp_processor_id() != 0);
282 native_smp_prepare_boot_cpu();
284 if (xen_pv_domain()) {
285 if (!xen_feature(XENFEAT_writable_page_tables))
286 /* We've switched to the "real" per-cpu gdt, so make
287 * sure the old memory can be recycled. */
288 make_lowmem_page_readwrite(xen_initial_gdt);
290 #ifdef CONFIG_X86_32
292 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
293 * expects __USER_DS
295 loadsegment(ds, __USER_DS);
296 loadsegment(es, __USER_DS);
297 #endif
299 xen_filter_cpu_maps();
300 xen_setup_vcpu_info_placement();
303 * The alternative logic (which patches the unlock/lock) runs before
304 * the smp bootup up code is activated. Hence we need to set this up
305 * the core kernel is being patched. Otherwise we will have only
306 * modules patched but not core code.
308 xen_init_spinlocks();
311 static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
313 unsigned cpu;
314 unsigned int i;
316 if (skip_ioapic_setup) {
317 char *m = (max_cpus == 0) ?
318 "The nosmp parameter is incompatible with Xen; " \
319 "use Xen dom0_max_vcpus=1 parameter" :
320 "The noapic parameter is incompatible with Xen";
322 xen_raw_printk(m);
323 panic(m);
325 xen_init_lock_cpu(0);
327 smp_store_boot_cpu_info();
328 cpu_data(0).x86_max_cores = 1;
330 for_each_possible_cpu(i) {
331 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
332 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
333 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
335 set_cpu_sibling_map(0);
337 if (xen_smp_intr_init(0))
338 BUG();
340 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
341 panic("could not allocate xen_cpu_initialized_map\n");
343 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
345 /* Restrict the possible_map according to max_cpus. */
346 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
347 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
348 continue;
349 set_cpu_possible(cpu, false);
352 for_each_possible_cpu(cpu)
353 set_cpu_present(cpu, true);
356 static int
357 cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
359 struct vcpu_guest_context *ctxt;
360 struct desc_struct *gdt;
361 unsigned long gdt_mfn;
363 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
364 return 0;
366 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
367 if (ctxt == NULL)
368 return -ENOMEM;
370 gdt = get_cpu_gdt_table(cpu);
372 #ifdef CONFIG_X86_32
373 /* Note: PVH is not yet supported on x86_32. */
374 ctxt->user_regs.fs = __KERNEL_PERCPU;
375 ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
376 #endif
377 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
379 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
381 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
382 ctxt->flags = VGCF_IN_KERNEL;
383 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
384 ctxt->user_regs.ds = __USER_DS;
385 ctxt->user_regs.es = __USER_DS;
386 ctxt->user_regs.ss = __KERNEL_DS;
388 xen_copy_trap_info(ctxt->trap_ctxt);
390 ctxt->ldt_ents = 0;
392 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
394 gdt_mfn = arbitrary_virt_to_mfn(gdt);
395 make_lowmem_page_readonly(gdt);
396 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
398 ctxt->gdt_frames[0] = gdt_mfn;
399 ctxt->gdt_ents = GDT_ENTRIES;
401 ctxt->kernel_ss = __KERNEL_DS;
402 ctxt->kernel_sp = idle->thread.sp0;
404 #ifdef CONFIG_X86_32
405 ctxt->event_callback_cs = __KERNEL_CS;
406 ctxt->failsafe_callback_cs = __KERNEL_CS;
407 #else
408 ctxt->gs_base_kernel = per_cpu_offset(cpu);
409 #endif
410 ctxt->event_callback_eip =
411 (unsigned long)xen_hypervisor_callback;
412 ctxt->failsafe_callback_eip =
413 (unsigned long)xen_failsafe_callback;
414 ctxt->user_regs.cs = __KERNEL_CS;
415 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
416 #ifdef CONFIG_X86_32
418 #else
419 } else
420 /* N.B. The user_regs.eip (cpu_bringup_and_idle) is called with
421 * %rdi having the cpu number - which means are passing in
422 * as the first parameter the cpu. Subtle!
424 ctxt->user_regs.rdi = cpu;
425 #endif
426 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
427 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
428 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
429 BUG();
431 kfree(ctxt);
432 return 0;
435 static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
437 int rc;
439 per_cpu(current_task, cpu) = idle;
440 #ifdef CONFIG_X86_32
441 irq_ctx_init(cpu);
442 #else
443 clear_tsk_thread_flag(idle, TIF_FORK);
444 per_cpu(kernel_stack, cpu) =
445 (unsigned long)task_stack_page(idle) -
446 KERNEL_STACK_OFFSET + THREAD_SIZE;
447 #endif
448 xen_setup_runstate_info(cpu);
449 xen_setup_timer(cpu);
450 xen_init_lock_cpu(cpu);
452 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
454 /* make sure interrupts start blocked */
455 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
457 rc = cpu_initialize_context(cpu, idle);
458 if (rc)
459 return rc;
461 if (num_online_cpus() == 1)
462 /* Just in case we booted with a single CPU. */
463 alternatives_enable_smp();
465 rc = xen_smp_intr_init(cpu);
466 if (rc)
467 return rc;
469 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
470 BUG_ON(rc);
472 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
473 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
474 barrier();
477 return 0;
480 static void xen_smp_cpus_done(unsigned int max_cpus)
484 #ifdef CONFIG_HOTPLUG_CPU
485 static int xen_cpu_disable(void)
487 unsigned int cpu = smp_processor_id();
488 if (cpu == 0)
489 return -EBUSY;
491 cpu_disable_common();
493 load_cr3(swapper_pg_dir);
494 return 0;
497 static void xen_cpu_die(unsigned int cpu)
499 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
500 current->state = TASK_UNINTERRUPTIBLE;
501 schedule_timeout(HZ/10);
503 xen_smp_intr_free(cpu);
504 xen_uninit_lock_cpu(cpu);
505 xen_teardown_timer(cpu);
508 static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
510 play_dead_common();
511 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
512 cpu_bringup();
514 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
515 * clears certain data that the cpu_idle loop (which called us
516 * and that we return from) expects. The only way to get that
517 * data back is to call:
519 tick_nohz_idle_enter();
522 #else /* !CONFIG_HOTPLUG_CPU */
523 static int xen_cpu_disable(void)
525 return -ENOSYS;
528 static void xen_cpu_die(unsigned int cpu)
530 BUG();
533 static void xen_play_dead(void)
535 BUG();
538 #endif
539 static void stop_self(void *v)
541 int cpu = smp_processor_id();
543 /* make sure we're not pinning something down */
544 load_cr3(swapper_pg_dir);
545 /* should set up a minimal gdt */
547 set_cpu_online(cpu, false);
549 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
550 BUG();
553 static void xen_stop_other_cpus(int wait)
555 smp_call_function(stop_self, NULL, wait);
558 static void xen_smp_send_reschedule(int cpu)
560 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
563 static void __xen_send_IPI_mask(const struct cpumask *mask,
564 int vector)
566 unsigned cpu;
568 for_each_cpu_and(cpu, mask, cpu_online_mask)
569 xen_send_IPI_one(cpu, vector);
572 static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
574 int cpu;
576 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
578 /* Make sure other vcpus get a chance to run if they need to. */
579 for_each_cpu(cpu, mask) {
580 if (xen_vcpu_stolen(cpu)) {
581 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
582 break;
587 static void xen_smp_send_call_function_single_ipi(int cpu)
589 __xen_send_IPI_mask(cpumask_of(cpu),
590 XEN_CALL_FUNCTION_SINGLE_VECTOR);
593 static inline int xen_map_vector(int vector)
595 int xen_vector;
597 switch (vector) {
598 case RESCHEDULE_VECTOR:
599 xen_vector = XEN_RESCHEDULE_VECTOR;
600 break;
601 case CALL_FUNCTION_VECTOR:
602 xen_vector = XEN_CALL_FUNCTION_VECTOR;
603 break;
604 case CALL_FUNCTION_SINGLE_VECTOR:
605 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
606 break;
607 case IRQ_WORK_VECTOR:
608 xen_vector = XEN_IRQ_WORK_VECTOR;
609 break;
610 #ifdef CONFIG_X86_64
611 case NMI_VECTOR:
612 case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
613 xen_vector = XEN_NMI_VECTOR;
614 break;
615 #endif
616 default:
617 xen_vector = -1;
618 printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
619 vector);
622 return xen_vector;
625 void xen_send_IPI_mask(const struct cpumask *mask,
626 int vector)
628 int xen_vector = xen_map_vector(vector);
630 if (xen_vector >= 0)
631 __xen_send_IPI_mask(mask, xen_vector);
634 void xen_send_IPI_all(int vector)
636 int xen_vector = xen_map_vector(vector);
638 if (xen_vector >= 0)
639 __xen_send_IPI_mask(cpu_online_mask, xen_vector);
642 void xen_send_IPI_self(int vector)
644 int xen_vector = xen_map_vector(vector);
646 if (xen_vector >= 0)
647 xen_send_IPI_one(smp_processor_id(), xen_vector);
650 void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
651 int vector)
653 unsigned cpu;
654 unsigned int this_cpu = smp_processor_id();
655 int xen_vector = xen_map_vector(vector);
657 if (!(num_online_cpus() > 1) || (xen_vector < 0))
658 return;
660 for_each_cpu_and(cpu, mask, cpu_online_mask) {
661 if (this_cpu == cpu)
662 continue;
664 xen_send_IPI_one(cpu, xen_vector);
668 void xen_send_IPI_allbutself(int vector)
670 xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
673 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
675 irq_enter();
676 generic_smp_call_function_interrupt();
677 inc_irq_stat(irq_call_count);
678 irq_exit();
680 return IRQ_HANDLED;
683 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
685 irq_enter();
686 generic_smp_call_function_single_interrupt();
687 inc_irq_stat(irq_call_count);
688 irq_exit();
690 return IRQ_HANDLED;
693 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
695 irq_enter();
696 irq_work_run();
697 inc_irq_stat(apic_irq_work_irqs);
698 irq_exit();
700 return IRQ_HANDLED;
703 static const struct smp_ops xen_smp_ops __initconst = {
704 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
705 .smp_prepare_cpus = xen_smp_prepare_cpus,
706 .smp_cpus_done = xen_smp_cpus_done,
708 .cpu_up = xen_cpu_up,
709 .cpu_die = xen_cpu_die,
710 .cpu_disable = xen_cpu_disable,
711 .play_dead = xen_play_dead,
713 .stop_other_cpus = xen_stop_other_cpus,
714 .smp_send_reschedule = xen_smp_send_reschedule,
716 .send_call_func_ipi = xen_smp_send_call_function_ipi,
717 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
720 void __init xen_smp_init(void)
722 smp_ops = xen_smp_ops;
723 xen_fill_possible_map();
726 static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
728 native_smp_prepare_cpus(max_cpus);
729 WARN_ON(xen_smp_intr_init(0));
731 xen_init_lock_cpu(0);
734 static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
736 int rc;
738 * xen_smp_intr_init() needs to run before native_cpu_up()
739 * so that IPI vectors are set up on the booting CPU before
740 * it is marked online in native_cpu_up().
742 rc = xen_smp_intr_init(cpu);
743 WARN_ON(rc);
744 if (!rc)
745 rc = native_cpu_up(cpu, tidle);
748 * We must initialize the slowpath CPU kicker _after_ the native
749 * path has executed. If we initialized it before none of the
750 * unlocker IPI kicks would reach the booting CPU as the booting
751 * CPU had not set itself 'online' in cpu_online_mask. That mask
752 * is checked when IPIs are sent (on HVM at least).
754 xen_init_lock_cpu(cpu);
755 return rc;
758 static void xen_hvm_cpu_die(unsigned int cpu)
760 xen_cpu_die(cpu);
761 native_cpu_die(cpu);
764 void __init xen_hvm_smp_init(void)
766 if (!xen_have_vector_callback)
767 return;
768 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
769 smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
770 smp_ops.cpu_up = xen_hvm_cpu_up;
771 smp_ops.cpu_die = xen_hvm_cpu_die;
772 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
773 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
774 smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;