Linux 4.1.18
[linux/fpc-iii.git] / arch / powerpc / kernel / smp.c
blobec9ec2058d2d3f3ec6db2dfbe25b35ab1f4cfb0b
1 /*
2 * SMP support for ppc.
4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #undef DEBUG
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/cache.h>
29 #include <linux/err.h>
30 #include <linux/device.h>
31 #include <linux/cpu.h>
32 #include <linux/notifier.h>
33 #include <linux/topology.h>
35 #include <asm/ptrace.h>
36 #include <linux/atomic.h>
37 #include <asm/irq.h>
38 #include <asm/hw_irq.h>
39 #include <asm/kvm_ppc.h>
40 #include <asm/page.h>
41 #include <asm/pgtable.h>
42 #include <asm/prom.h>
43 #include <asm/smp.h>
44 #include <asm/time.h>
45 #include <asm/machdep.h>
46 #include <asm/cputhreads.h>
47 #include <asm/cputable.h>
48 #include <asm/mpic.h>
49 #include <asm/vdso_datapage.h>
50 #ifdef CONFIG_PPC64
51 #include <asm/paca.h>
52 #endif
53 #include <asm/vdso.h>
54 #include <asm/debug.h>
55 #include <asm/kexec.h>
57 #ifdef DEBUG
58 #include <asm/udbg.h>
59 #define DBG(fmt...) udbg_printf(fmt)
60 #else
61 #define DBG(fmt...)
62 #endif
64 #ifdef CONFIG_HOTPLUG_CPU
65 /* State of each CPU during hotplug phases */
66 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
67 #endif
69 struct thread_info *secondary_ti;
71 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
72 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
74 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
75 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
77 /* SMP operations for this machine */
78 struct smp_ops_t *smp_ops;
80 /* Can't be static due to PowerMac hackery */
81 volatile unsigned int cpu_callin_map[NR_CPUS];
83 int smt_enabled_at_boot = 1;
85 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
88 * Returns 1 if the specified cpu should be brought up during boot.
89 * Used to inhibit booting threads if they've been disabled or
90 * limited on the command line
92 int smp_generic_cpu_bootable(unsigned int nr)
94 /* Special case - we inhibit secondary thread startup
95 * during boot if the user requests it.
97 if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
98 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
99 return 0;
100 if (smt_enabled_at_boot
101 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
102 return 0;
105 return 1;
109 #ifdef CONFIG_PPC64
110 int smp_generic_kick_cpu(int nr)
112 BUG_ON(nr < 0 || nr >= NR_CPUS);
115 * The processor is currently spinning, waiting for the
116 * cpu_start field to become non-zero After we set cpu_start,
117 * the processor will continue on to secondary_start
119 if (!paca[nr].cpu_start) {
120 paca[nr].cpu_start = 1;
121 smp_mb();
122 return 0;
125 #ifdef CONFIG_HOTPLUG_CPU
127 * Ok it's not there, so it might be soft-unplugged, let's
128 * try to bring it back
130 generic_set_cpu_up(nr);
131 smp_wmb();
132 smp_send_reschedule(nr);
133 #endif /* CONFIG_HOTPLUG_CPU */
135 return 0;
137 #endif /* CONFIG_PPC64 */
139 static irqreturn_t call_function_action(int irq, void *data)
141 generic_smp_call_function_interrupt();
142 return IRQ_HANDLED;
145 static irqreturn_t reschedule_action(int irq, void *data)
147 scheduler_ipi();
148 return IRQ_HANDLED;
151 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
153 tick_broadcast_ipi_handler();
154 return IRQ_HANDLED;
157 static irqreturn_t debug_ipi_action(int irq, void *data)
159 if (crash_ipi_function_ptr) {
160 crash_ipi_function_ptr(get_irq_regs());
161 return IRQ_HANDLED;
164 #ifdef CONFIG_DEBUGGER
165 debugger_ipi(get_irq_regs());
166 #endif /* CONFIG_DEBUGGER */
168 return IRQ_HANDLED;
171 static irq_handler_t smp_ipi_action[] = {
172 [PPC_MSG_CALL_FUNCTION] = call_function_action,
173 [PPC_MSG_RESCHEDULE] = reschedule_action,
174 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
175 [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
178 const char *smp_ipi_name[] = {
179 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
180 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
181 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
182 [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
185 /* optional function to request ipi, for controllers with >= 4 ipis */
186 int smp_request_message_ipi(int virq, int msg)
188 int err;
190 if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
191 return -EINVAL;
193 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
194 if (msg == PPC_MSG_DEBUGGER_BREAK) {
195 return 1;
197 #endif
198 err = request_irq(virq, smp_ipi_action[msg],
199 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
200 smp_ipi_name[msg], NULL);
201 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
202 virq, smp_ipi_name[msg], err);
204 return err;
207 #ifdef CONFIG_PPC_SMP_MUXED_IPI
208 struct cpu_messages {
209 int messages; /* current messages */
210 unsigned long data; /* data for cause ipi */
212 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
214 void smp_muxed_ipi_set_data(int cpu, unsigned long data)
216 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
218 info->data = data;
221 void smp_muxed_ipi_message_pass(int cpu, int msg)
223 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
224 char *message = (char *)&info->messages;
227 * Order previous accesses before accesses in the IPI handler.
229 smp_mb();
230 message[msg] = 1;
232 * cause_ipi functions are required to include a full barrier
233 * before doing whatever causes the IPI.
235 smp_ops->cause_ipi(cpu, info->data);
238 #ifdef __BIG_ENDIAN__
239 #define IPI_MESSAGE(A) (1 << (24 - 8 * (A)))
240 #else
241 #define IPI_MESSAGE(A) (1 << (8 * (A)))
242 #endif
244 irqreturn_t smp_ipi_demux(void)
246 struct cpu_messages *info = this_cpu_ptr(&ipi_message);
247 unsigned int all;
249 mb(); /* order any irq clear */
251 do {
252 all = xchg(&info->messages, 0);
253 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
254 generic_smp_call_function_interrupt();
255 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
256 scheduler_ipi();
257 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
258 tick_broadcast_ipi_handler();
259 if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK))
260 debug_ipi_action(0, NULL);
261 } while (info->messages);
263 return IRQ_HANDLED;
265 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
267 static inline void do_message_pass(int cpu, int msg)
269 if (smp_ops->message_pass)
270 smp_ops->message_pass(cpu, msg);
271 #ifdef CONFIG_PPC_SMP_MUXED_IPI
272 else
273 smp_muxed_ipi_message_pass(cpu, msg);
274 #endif
277 void smp_send_reschedule(int cpu)
279 if (likely(smp_ops))
280 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
282 EXPORT_SYMBOL_GPL(smp_send_reschedule);
284 void arch_send_call_function_single_ipi(int cpu)
286 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
289 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
291 unsigned int cpu;
293 for_each_cpu(cpu, mask)
294 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
297 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
298 void tick_broadcast(const struct cpumask *mask)
300 unsigned int cpu;
302 for_each_cpu(cpu, mask)
303 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
305 #endif
307 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
308 void smp_send_debugger_break(void)
310 int cpu;
311 int me = raw_smp_processor_id();
313 if (unlikely(!smp_ops))
314 return;
316 for_each_online_cpu(cpu)
317 if (cpu != me)
318 do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
320 #endif
322 #ifdef CONFIG_KEXEC
323 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
325 crash_ipi_function_ptr = crash_ipi_callback;
326 if (crash_ipi_callback) {
327 mb();
328 smp_send_debugger_break();
331 #endif
333 static void stop_this_cpu(void *dummy)
335 /* Remove this CPU */
336 set_cpu_online(smp_processor_id(), false);
338 local_irq_disable();
339 while (1)
343 void smp_send_stop(void)
345 smp_call_function(stop_this_cpu, NULL, 0);
348 struct thread_info *current_set[NR_CPUS];
350 static void smp_store_cpu_info(int id)
352 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
353 #ifdef CONFIG_PPC_FSL_BOOK3E
354 per_cpu(next_tlbcam_idx, id)
355 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
356 #endif
359 void __init smp_prepare_cpus(unsigned int max_cpus)
361 unsigned int cpu;
363 DBG("smp_prepare_cpus\n");
366 * setup_cpu may need to be called on the boot cpu. We havent
367 * spun any cpus up but lets be paranoid.
369 BUG_ON(boot_cpuid != smp_processor_id());
371 /* Fixup boot cpu */
372 smp_store_cpu_info(boot_cpuid);
373 cpu_callin_map[boot_cpuid] = 1;
375 for_each_possible_cpu(cpu) {
376 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
377 GFP_KERNEL, cpu_to_node(cpu));
378 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
379 GFP_KERNEL, cpu_to_node(cpu));
381 * numa_node_id() works after this.
383 if (cpu_present(cpu)) {
384 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
385 set_cpu_numa_mem(cpu,
386 local_memory_node(numa_cpu_lookup_table[cpu]));
390 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
391 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
393 if (smp_ops && smp_ops->probe)
394 smp_ops->probe();
397 void smp_prepare_boot_cpu(void)
399 BUG_ON(smp_processor_id() != boot_cpuid);
400 #ifdef CONFIG_PPC64
401 paca[boot_cpuid].__current = current;
402 #endif
403 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
404 current_set[boot_cpuid] = task_thread_info(current);
407 #ifdef CONFIG_HOTPLUG_CPU
409 int generic_cpu_disable(void)
411 unsigned int cpu = smp_processor_id();
413 if (cpu == boot_cpuid)
414 return -EBUSY;
416 set_cpu_online(cpu, false);
417 #ifdef CONFIG_PPC64
418 vdso_data->processorCount--;
419 #endif
420 migrate_irqs();
421 return 0;
424 void generic_cpu_die(unsigned int cpu)
426 int i;
428 for (i = 0; i < 100; i++) {
429 smp_rmb();
430 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
431 return;
432 msleep(100);
434 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
437 void generic_set_cpu_dead(unsigned int cpu)
439 per_cpu(cpu_state, cpu) = CPU_DEAD;
443 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
444 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
445 * which makes the delay in generic_cpu_die() not happen.
447 void generic_set_cpu_up(unsigned int cpu)
449 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
452 int generic_check_cpu_restart(unsigned int cpu)
454 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
457 static bool secondaries_inhibited(void)
459 return kvm_hv_mode_active();
462 #else /* HOTPLUG_CPU */
464 #define secondaries_inhibited() 0
466 #endif
468 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
470 struct thread_info *ti = task_thread_info(idle);
472 #ifdef CONFIG_PPC64
473 paca[cpu].__current = idle;
474 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
475 #endif
476 ti->cpu = cpu;
477 secondary_ti = current_set[cpu] = ti;
480 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
482 int rc, c;
485 * Don't allow secondary threads to come online if inhibited
487 if (threads_per_core > 1 && secondaries_inhibited() &&
488 cpu_thread_in_subcore(cpu))
489 return -EBUSY;
491 if (smp_ops == NULL ||
492 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
493 return -EINVAL;
495 cpu_idle_thread_init(cpu, tidle);
497 /* Make sure callin-map entry is 0 (can be leftover a CPU
498 * hotplug
500 cpu_callin_map[cpu] = 0;
502 /* The information for processor bringup must
503 * be written out to main store before we release
504 * the processor.
506 smp_mb();
508 /* wake up cpus */
509 DBG("smp: kicking cpu %d\n", cpu);
510 rc = smp_ops->kick_cpu(cpu);
511 if (rc) {
512 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
513 return rc;
517 * wait to see if the cpu made a callin (is actually up).
518 * use this value that I found through experimentation.
519 * -- Cort
521 if (system_state < SYSTEM_RUNNING)
522 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
523 udelay(100);
524 #ifdef CONFIG_HOTPLUG_CPU
525 else
527 * CPUs can take much longer to come up in the
528 * hotplug case. Wait five seconds.
530 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
531 msleep(1);
532 #endif
534 if (!cpu_callin_map[cpu]) {
535 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
536 return -ENOENT;
539 DBG("Processor %u found.\n", cpu);
541 if (smp_ops->give_timebase)
542 smp_ops->give_timebase();
544 /* Wait until cpu puts itself in the online & active maps */
545 while (!cpu_online(cpu) || !cpu_active(cpu))
546 cpu_relax();
548 return 0;
551 /* Return the value of the reg property corresponding to the given
552 * logical cpu.
554 int cpu_to_core_id(int cpu)
556 struct device_node *np;
557 const __be32 *reg;
558 int id = -1;
560 np = of_get_cpu_node(cpu, NULL);
561 if (!np)
562 goto out;
564 reg = of_get_property(np, "reg", NULL);
565 if (!reg)
566 goto out;
568 id = be32_to_cpup(reg);
569 out:
570 of_node_put(np);
571 return id;
574 /* Helper routines for cpu to core mapping */
575 int cpu_core_index_of_thread(int cpu)
577 return cpu >> threads_shift;
579 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
581 int cpu_first_thread_of_core(int core)
583 return core << threads_shift;
585 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
587 static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
589 const struct cpumask *mask;
590 struct device_node *np;
591 int i, plen;
592 const __be32 *prop;
594 mask = add ? cpu_online_mask : cpu_present_mask;
595 for_each_cpu(i, mask) {
596 np = of_get_cpu_node(i, NULL);
597 if (!np)
598 continue;
599 prop = of_get_property(np, "ibm,chip-id", &plen);
600 if (prop && plen == sizeof(int) &&
601 of_read_number(prop, 1) == chipid) {
602 if (add) {
603 cpumask_set_cpu(cpu, cpu_core_mask(i));
604 cpumask_set_cpu(i, cpu_core_mask(cpu));
605 } else {
606 cpumask_clear_cpu(cpu, cpu_core_mask(i));
607 cpumask_clear_cpu(i, cpu_core_mask(cpu));
610 of_node_put(np);
614 /* Must be called when no change can occur to cpu_present_mask,
615 * i.e. during cpu online or offline.
617 static struct device_node *cpu_to_l2cache(int cpu)
619 struct device_node *np;
620 struct device_node *cache;
622 if (!cpu_present(cpu))
623 return NULL;
625 np = of_get_cpu_node(cpu, NULL);
626 if (np == NULL)
627 return NULL;
629 cache = of_find_next_cache_node(np);
631 of_node_put(np);
633 return cache;
636 static void traverse_core_siblings(int cpu, bool add)
638 struct device_node *l2_cache, *np;
639 const struct cpumask *mask;
640 int i, chip, plen;
641 const __be32 *prop;
643 /* First see if we have ibm,chip-id properties in cpu nodes */
644 np = of_get_cpu_node(cpu, NULL);
645 if (np) {
646 chip = -1;
647 prop = of_get_property(np, "ibm,chip-id", &plen);
648 if (prop && plen == sizeof(int))
649 chip = of_read_number(prop, 1);
650 of_node_put(np);
651 if (chip >= 0) {
652 traverse_siblings_chip_id(cpu, add, chip);
653 return;
657 l2_cache = cpu_to_l2cache(cpu);
658 mask = add ? cpu_online_mask : cpu_present_mask;
659 for_each_cpu(i, mask) {
660 np = cpu_to_l2cache(i);
661 if (!np)
662 continue;
663 if (np == l2_cache) {
664 if (add) {
665 cpumask_set_cpu(cpu, cpu_core_mask(i));
666 cpumask_set_cpu(i, cpu_core_mask(cpu));
667 } else {
668 cpumask_clear_cpu(cpu, cpu_core_mask(i));
669 cpumask_clear_cpu(i, cpu_core_mask(cpu));
672 of_node_put(np);
674 of_node_put(l2_cache);
677 /* Activate a secondary processor. */
678 void start_secondary(void *unused)
680 unsigned int cpu = smp_processor_id();
681 int i, base;
683 atomic_inc(&init_mm.mm_count);
684 current->active_mm = &init_mm;
686 smp_store_cpu_info(cpu);
687 set_dec(tb_ticks_per_jiffy);
688 preempt_disable();
689 cpu_callin_map[cpu] = 1;
691 if (smp_ops->setup_cpu)
692 smp_ops->setup_cpu(cpu);
693 if (smp_ops->take_timebase)
694 smp_ops->take_timebase();
696 secondary_cpu_time_init();
698 #ifdef CONFIG_PPC64
699 if (system_state == SYSTEM_RUNNING)
700 vdso_data->processorCount++;
702 vdso_getcpu_init();
703 #endif
704 /* Update sibling maps */
705 base = cpu_first_thread_sibling(cpu);
706 for (i = 0; i < threads_per_core; i++) {
707 if (cpu_is_offline(base + i) && (cpu != base + i))
708 continue;
709 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
710 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
712 /* cpu_core_map should be a superset of
713 * cpu_sibling_map even if we don't have cache
714 * information, so update the former here, too.
716 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
717 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
719 traverse_core_siblings(cpu, true);
721 set_numa_node(numa_cpu_lookup_table[cpu]);
722 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
724 smp_wmb();
725 notify_cpu_starting(cpu);
726 set_cpu_online(cpu, true);
728 local_irq_enable();
730 cpu_startup_entry(CPUHP_ONLINE);
732 BUG();
735 int setup_profiling_timer(unsigned int multiplier)
737 return 0;
740 #ifdef CONFIG_SCHED_SMT
741 /* cpumask of CPUs with asymetric SMT dependancy */
742 static int powerpc_smt_flags(void)
744 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
746 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
747 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
748 flags |= SD_ASYM_PACKING;
750 return flags;
752 #endif
754 static struct sched_domain_topology_level powerpc_topology[] = {
755 #ifdef CONFIG_SCHED_SMT
756 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
757 #endif
758 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
759 { NULL, },
762 void __init smp_cpus_done(unsigned int max_cpus)
764 cpumask_var_t old_mask;
766 /* We want the setup_cpu() here to be called from CPU 0, but our
767 * init thread may have been "borrowed" by another CPU in the meantime
768 * se we pin us down to CPU 0 for a short while
770 alloc_cpumask_var(&old_mask, GFP_NOWAIT);
771 cpumask_copy(old_mask, tsk_cpus_allowed(current));
772 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
774 if (smp_ops && smp_ops->setup_cpu)
775 smp_ops->setup_cpu(boot_cpuid);
777 set_cpus_allowed_ptr(current, old_mask);
779 free_cpumask_var(old_mask);
781 if (smp_ops && smp_ops->bringup_done)
782 smp_ops->bringup_done();
784 dump_numa_cpu_topology();
786 set_sched_topology(powerpc_topology);
790 #ifdef CONFIG_HOTPLUG_CPU
791 int __cpu_disable(void)
793 int cpu = smp_processor_id();
794 int base, i;
795 int err;
797 if (!smp_ops->cpu_disable)
798 return -ENOSYS;
800 err = smp_ops->cpu_disable();
801 if (err)
802 return err;
804 /* Update sibling maps */
805 base = cpu_first_thread_sibling(cpu);
806 for (i = 0; i < threads_per_core; i++) {
807 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
808 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
809 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
810 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
812 traverse_core_siblings(cpu, false);
814 return 0;
817 void __cpu_die(unsigned int cpu)
819 if (smp_ops->cpu_die)
820 smp_ops->cpu_die(cpu);
823 void cpu_die(void)
825 if (ppc_md.cpu_die)
826 ppc_md.cpu_die();
828 /* If we return, we re-enter start_secondary */
829 start_secondary_resume();
832 #endif