x86/efi: Enforce CONFIG_RELOCATABLE for EFI boot stub
[linux/fpc-iii.git] / arch / arm / kernel / smp.c
blob72024ea8a3a6c07038103e153527cb2454e4552f
1 /*
2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
19 #include <linux/mm.h>
20 #include <linux/err.h>
21 #include <linux/cpu.h>
22 #include <linux/seq_file.h>
23 #include <linux/irq.h>
24 #include <linux/percpu.h>
25 #include <linux/clockchips.h>
26 #include <linux/completion.h>
27 #include <linux/cpufreq.h>
29 #include <linux/atomic.h>
30 #include <asm/smp.h>
31 #include <asm/cacheflush.h>
32 #include <asm/cpu.h>
33 #include <asm/cputype.h>
34 #include <asm/exception.h>
35 #include <asm/idmap.h>
36 #include <asm/topology.h>
37 #include <asm/mmu_context.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/processor.h>
41 #include <asm/sections.h>
42 #include <asm/tlbflush.h>
43 #include <asm/ptrace.h>
44 #include <asm/smp_plat.h>
45 #include <asm/virt.h>
46 #include <asm/mach/arch.h>
47 #include <asm/mpu.h>
50 * as from 2.5, kernels no longer have an init_tasks structure
51 * so we need some other way of telling a new secondary core
52 * where to place its SVC stack
54 struct secondary_data secondary_data;
57 * control for which core is the next to come out of the secondary
58 * boot "holding pen"
60 volatile int pen_release = -1;
62 enum ipi_msg_type {
63 IPI_WAKEUP,
64 IPI_TIMER,
65 IPI_RESCHEDULE,
66 IPI_CALL_FUNC,
67 IPI_CALL_FUNC_SINGLE,
68 IPI_CPU_STOP,
71 static DECLARE_COMPLETION(cpu_running);
73 static struct smp_operations smp_ops;
75 void __init smp_set_ops(struct smp_operations *ops)
77 if (ops)
78 smp_ops = *ops;
81 static unsigned long get_arch_pgd(pgd_t *pgd)
83 phys_addr_t pgdir = virt_to_phys(pgd);
84 BUG_ON(pgdir & ARCH_PGD_MASK);
85 return pgdir >> ARCH_PGD_SHIFT;
88 int __cpu_up(unsigned int cpu, struct task_struct *idle)
90 int ret;
93 * We need to tell the secondary core where to find
94 * its stack and the page tables.
96 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
97 #ifdef CONFIG_ARM_MPU
98 secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr;
99 #endif
101 #ifdef CONFIG_MMU
102 secondary_data.pgdir = get_arch_pgd(idmap_pgd);
103 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
104 #endif
105 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
106 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
109 * Now bring the CPU into our world.
111 ret = boot_secondary(cpu, idle);
112 if (ret == 0) {
114 * CPU was successfully started, wait for it
115 * to come online or time out.
117 wait_for_completion_timeout(&cpu_running,
118 msecs_to_jiffies(1000));
120 if (!cpu_online(cpu)) {
121 pr_crit("CPU%u: failed to come online\n", cpu);
122 ret = -EIO;
124 } else {
125 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
129 memset(&secondary_data, 0, sizeof(secondary_data));
130 return ret;
133 /* platform specific SMP operations */
134 void __init smp_init_cpus(void)
136 if (smp_ops.smp_init_cpus)
137 smp_ops.smp_init_cpus();
140 int boot_secondary(unsigned int cpu, struct task_struct *idle)
142 if (smp_ops.smp_boot_secondary)
143 return smp_ops.smp_boot_secondary(cpu, idle);
144 return -ENOSYS;
147 int platform_can_cpu_hotplug(void)
149 #ifdef CONFIG_HOTPLUG_CPU
150 if (smp_ops.cpu_kill)
151 return 1;
152 #endif
154 return 0;
157 #ifdef CONFIG_HOTPLUG_CPU
158 static int platform_cpu_kill(unsigned int cpu)
160 if (smp_ops.cpu_kill)
161 return smp_ops.cpu_kill(cpu);
162 return 1;
165 static int platform_cpu_disable(unsigned int cpu)
167 if (smp_ops.cpu_disable)
168 return smp_ops.cpu_disable(cpu);
171 * By default, allow disabling all CPUs except the first one,
172 * since this is special on a lot of platforms, e.g. because
173 * of clock tick interrupts.
175 return cpu == 0 ? -EPERM : 0;
178 * __cpu_disable runs on the processor to be shutdown.
180 int __cpu_disable(void)
182 unsigned int cpu = smp_processor_id();
183 int ret;
185 ret = platform_cpu_disable(cpu);
186 if (ret)
187 return ret;
190 * Take this CPU offline. Once we clear this, we can't return,
191 * and we must not schedule until we're ready to give up the cpu.
193 set_cpu_online(cpu, false);
196 * OK - migrate IRQs away from this CPU
198 migrate_irqs();
201 * Flush user cache and TLB mappings, and then remove this CPU
202 * from the vm mask set of all processes.
204 * Caches are flushed to the Level of Unification Inner Shareable
205 * to write-back dirty lines to unified caches shared by all CPUs.
207 flush_cache_louis();
208 local_flush_tlb_all();
210 clear_tasks_mm_cpumask(cpu);
212 return 0;
215 static DECLARE_COMPLETION(cpu_died);
218 * called on the thread which is asking for a CPU to be shutdown -
219 * waits until shutdown has completed, or it is timed out.
221 void __cpu_die(unsigned int cpu)
223 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
224 pr_err("CPU%u: cpu didn't die\n", cpu);
225 return;
227 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
230 * platform_cpu_kill() is generally expected to do the powering off
231 * and/or cutting of clocks to the dying CPU. Optionally, this may
232 * be done by the CPU which is dying in preference to supporting
233 * this call, but that means there is _no_ synchronisation between
234 * the requesting CPU and the dying CPU actually losing power.
236 if (!platform_cpu_kill(cpu))
237 printk("CPU%u: unable to kill\n", cpu);
241 * Called from the idle thread for the CPU which has been shutdown.
243 * Note that we disable IRQs here, but do not re-enable them
244 * before returning to the caller. This is also the behaviour
245 * of the other hotplug-cpu capable cores, so presumably coming
246 * out of idle fixes this.
248 void __ref cpu_die(void)
250 unsigned int cpu = smp_processor_id();
252 idle_task_exit();
254 local_irq_disable();
257 * Flush the data out of the L1 cache for this CPU. This must be
258 * before the completion to ensure that data is safely written out
259 * before platform_cpu_kill() gets called - which may disable
260 * *this* CPU and power down its cache.
262 flush_cache_louis();
265 * Tell __cpu_die() that this CPU is now safe to dispose of. Once
266 * this returns, power and/or clocks can be removed at any point
267 * from this CPU and its cache by platform_cpu_kill().
269 complete(&cpu_died);
272 * Ensure that the cache lines associated with that completion are
273 * written out. This covers the case where _this_ CPU is doing the
274 * powering down, to ensure that the completion is visible to the
275 * CPU waiting for this one.
277 flush_cache_louis();
280 * The actual CPU shutdown procedure is at least platform (if not
281 * CPU) specific. This may remove power, or it may simply spin.
283 * Platforms are generally expected *NOT* to return from this call,
284 * although there are some which do because they have no way to
285 * power down the CPU. These platforms are the _only_ reason we
286 * have a return path which uses the fragment of assembly below.
288 * The return path should not be used for platforms which can
289 * power off the CPU.
291 if (smp_ops.cpu_die)
292 smp_ops.cpu_die(cpu);
295 * Do not return to the idle loop - jump back to the secondary
296 * cpu initialisation. There's some initialisation which needs
297 * to be repeated to undo the effects of taking the CPU offline.
299 __asm__("mov sp, %0\n"
300 " mov fp, #0\n"
301 " b secondary_start_kernel"
303 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
305 #endif /* CONFIG_HOTPLUG_CPU */
308 * Called by both boot and secondaries to move global data into
309 * per-processor storage.
311 static void smp_store_cpu_info(unsigned int cpuid)
313 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
315 cpu_info->loops_per_jiffy = loops_per_jiffy;
316 cpu_info->cpuid = read_cpuid_id();
318 store_cpu_topology(cpuid);
322 * This is the secondary CPU boot entry. We're using this CPUs
323 * idle thread stack, but a set of temporary page tables.
325 asmlinkage void secondary_start_kernel(void)
327 struct mm_struct *mm = &init_mm;
328 unsigned int cpu;
331 * The identity mapping is uncached (strongly ordered), so
332 * switch away from it before attempting any exclusive accesses.
334 cpu_switch_mm(mm->pgd, mm);
335 local_flush_bp_all();
336 enter_lazy_tlb(mm, current);
337 local_flush_tlb_all();
340 * All kernel threads share the same mm context; grab a
341 * reference and switch to it.
343 cpu = smp_processor_id();
344 atomic_inc(&mm->mm_count);
345 current->active_mm = mm;
346 cpumask_set_cpu(cpu, mm_cpumask(mm));
348 cpu_init();
350 printk("CPU%u: Booted secondary processor\n", cpu);
352 preempt_disable();
353 trace_hardirqs_off();
356 * Give the platform a chance to do its own initialisation.
358 if (smp_ops.smp_secondary_init)
359 smp_ops.smp_secondary_init(cpu);
361 notify_cpu_starting(cpu);
363 calibrate_delay();
365 smp_store_cpu_info(cpu);
368 * OK, now it's safe to let the boot CPU continue. Wait for
369 * the CPU migration code to notice that the CPU is online
370 * before we continue - which happens after __cpu_up returns.
372 set_cpu_online(cpu, true);
373 complete(&cpu_running);
375 local_irq_enable();
376 local_fiq_enable();
379 * OK, it's off to the idle thread for us
381 cpu_startup_entry(CPUHP_ONLINE);
384 void __init smp_cpus_done(unsigned int max_cpus)
386 printk(KERN_INFO "SMP: Total of %d processors activated.\n",
387 num_online_cpus());
389 hyp_mode_check();
392 void __init smp_prepare_boot_cpu(void)
394 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
397 void __init smp_prepare_cpus(unsigned int max_cpus)
399 unsigned int ncores = num_possible_cpus();
401 init_cpu_topology();
403 smp_store_cpu_info(smp_processor_id());
406 * are we trying to boot more cores than exist?
408 if (max_cpus > ncores)
409 max_cpus = ncores;
410 if (ncores > 1 && max_cpus) {
412 * Initialise the present map, which describes the set of CPUs
413 * actually populated at the present time. A platform should
414 * re-initialize the map in the platforms smp_prepare_cpus()
415 * if present != possible (e.g. physical hotplug).
417 init_cpu_present(cpu_possible_mask);
420 * Initialise the SCU if there are more than one CPU
421 * and let them know where to start.
423 if (smp_ops.smp_prepare_cpus)
424 smp_ops.smp_prepare_cpus(max_cpus);
428 static void (*smp_cross_call)(const struct cpumask *, unsigned int);
430 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
432 if (!smp_cross_call)
433 smp_cross_call = fn;
436 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
438 smp_cross_call(mask, IPI_CALL_FUNC);
441 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
443 smp_cross_call(mask, IPI_WAKEUP);
446 void arch_send_call_function_single_ipi(int cpu)
448 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
451 static const char *ipi_types[NR_IPI] = {
452 #define S(x,s) [x] = s
453 S(IPI_WAKEUP, "CPU wakeup interrupts"),
454 S(IPI_TIMER, "Timer broadcast interrupts"),
455 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
456 S(IPI_CALL_FUNC, "Function call interrupts"),
457 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
458 S(IPI_CPU_STOP, "CPU stop interrupts"),
461 void show_ipi_list(struct seq_file *p, int prec)
463 unsigned int cpu, i;
465 for (i = 0; i < NR_IPI; i++) {
466 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
468 for_each_online_cpu(cpu)
469 seq_printf(p, "%10u ",
470 __get_irq_stat(cpu, ipi_irqs[i]));
472 seq_printf(p, " %s\n", ipi_types[i]);
476 u64 smp_irq_stat_cpu(unsigned int cpu)
478 u64 sum = 0;
479 int i;
481 for (i = 0; i < NR_IPI; i++)
482 sum += __get_irq_stat(cpu, ipi_irqs[i]);
484 return sum;
487 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
488 void tick_broadcast(const struct cpumask *mask)
490 smp_cross_call(mask, IPI_TIMER);
492 #endif
494 static DEFINE_RAW_SPINLOCK(stop_lock);
497 * ipi_cpu_stop - handle IPI from smp_send_stop()
499 static void ipi_cpu_stop(unsigned int cpu)
501 if (system_state == SYSTEM_BOOTING ||
502 system_state == SYSTEM_RUNNING) {
503 raw_spin_lock(&stop_lock);
504 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
505 dump_stack();
506 raw_spin_unlock(&stop_lock);
509 set_cpu_online(cpu, false);
511 local_fiq_disable();
512 local_irq_disable();
514 while (1)
515 cpu_relax();
519 * Main handler for inter-processor interrupts
521 asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
523 handle_IPI(ipinr, regs);
526 void handle_IPI(int ipinr, struct pt_regs *regs)
528 unsigned int cpu = smp_processor_id();
529 struct pt_regs *old_regs = set_irq_regs(regs);
531 if (ipinr < NR_IPI)
532 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
534 switch (ipinr) {
535 case IPI_WAKEUP:
536 break;
538 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
539 case IPI_TIMER:
540 irq_enter();
541 tick_receive_broadcast();
542 irq_exit();
543 break;
544 #endif
546 case IPI_RESCHEDULE:
547 scheduler_ipi();
548 break;
550 case IPI_CALL_FUNC:
551 irq_enter();
552 generic_smp_call_function_interrupt();
553 irq_exit();
554 break;
556 case IPI_CALL_FUNC_SINGLE:
557 irq_enter();
558 generic_smp_call_function_single_interrupt();
559 irq_exit();
560 break;
562 case IPI_CPU_STOP:
563 irq_enter();
564 ipi_cpu_stop(cpu);
565 irq_exit();
566 break;
568 default:
569 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
570 cpu, ipinr);
571 break;
573 set_irq_regs(old_regs);
576 void smp_send_reschedule(int cpu)
578 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
581 void smp_send_stop(void)
583 unsigned long timeout;
584 struct cpumask mask;
586 cpumask_copy(&mask, cpu_online_mask);
587 cpumask_clear_cpu(smp_processor_id(), &mask);
588 if (!cpumask_empty(&mask))
589 smp_cross_call(&mask, IPI_CPU_STOP);
591 /* Wait up to one second for other CPUs to stop */
592 timeout = USEC_PER_SEC;
593 while (num_online_cpus() > 1 && timeout--)
594 udelay(1);
596 if (num_online_cpus() > 1)
597 pr_warning("SMP: failed to stop secondary CPUs\n");
601 * not supported here
603 int setup_profiling_timer(unsigned int multiplier)
605 return -EINVAL;
608 #ifdef CONFIG_CPU_FREQ
610 static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
611 static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
612 static unsigned long global_l_p_j_ref;
613 static unsigned long global_l_p_j_ref_freq;
615 static int cpufreq_callback(struct notifier_block *nb,
616 unsigned long val, void *data)
618 struct cpufreq_freqs *freq = data;
619 int cpu = freq->cpu;
621 if (freq->flags & CPUFREQ_CONST_LOOPS)
622 return NOTIFY_OK;
624 if (!per_cpu(l_p_j_ref, cpu)) {
625 per_cpu(l_p_j_ref, cpu) =
626 per_cpu(cpu_data, cpu).loops_per_jiffy;
627 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
628 if (!global_l_p_j_ref) {
629 global_l_p_j_ref = loops_per_jiffy;
630 global_l_p_j_ref_freq = freq->old;
634 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
635 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
636 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
637 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
638 global_l_p_j_ref_freq,
639 freq->new);
640 per_cpu(cpu_data, cpu).loops_per_jiffy =
641 cpufreq_scale(per_cpu(l_p_j_ref, cpu),
642 per_cpu(l_p_j_ref_freq, cpu),
643 freq->new);
645 return NOTIFY_OK;
648 static struct notifier_block cpufreq_notifier = {
649 .notifier_call = cpufreq_callback,
652 static int __init register_cpufreq_notifier(void)
654 return cpufreq_register_notifier(&cpufreq_notifier,
655 CPUFREQ_TRANSITION_NOTIFIER);
657 core_initcall(register_cpufreq_notifier);
659 #endif