arm/arm64: KVM: Turn off vcpus on PSCI shutdown/reboot
[linux/fpc-iii.git] / arch / arm / mach-spear / hotplug.c
blobd97749c642ce67b8bf721dc61461b5106594a3f3
1 /*
2 * linux/arch/arm/mach-spear13xx/hotplug.c
4 * Copyright (C) 2012 ST Microelectronics Ltd.
5 * Deepak Sikri <deepak.sikri@st.com>
7 * based upon linux/arch/arm/mach-realview/hotplug.c
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/smp.h>
16 #include <asm/cp15.h>
17 #include <asm/smp_plat.h>
19 static inline void cpu_enter_lowpower(void)
21 unsigned int v;
23 asm volatile(
24 " mcr p15, 0, %1, c7, c5, 0\n"
25 " dsb\n"
27 * Turn off coherency
29 " mrc p15, 0, %0, c1, c0, 1\n"
30 " bic %0, %0, #0x20\n"
31 " mcr p15, 0, %0, c1, c0, 1\n"
32 " mrc p15, 0, %0, c1, c0, 0\n"
33 " bic %0, %0, %2\n"
34 " mcr p15, 0, %0, c1, c0, 0\n"
35 : "=&r" (v)
36 : "r" (0), "Ir" (CR_C)
37 : "cc", "memory");
40 static inline void cpu_leave_lowpower(void)
42 unsigned int v;
44 asm volatile("mrc p15, 0, %0, c1, c0, 0\n"
45 " orr %0, %0, %1\n"
46 " mcr p15, 0, %0, c1, c0, 0\n"
47 " mrc p15, 0, %0, c1, c0, 1\n"
48 " orr %0, %0, #0x20\n"
49 " mcr p15, 0, %0, c1, c0, 1\n"
50 : "=&r" (v)
51 : "Ir" (CR_C)
52 : "cc");
55 static inline void spear13xx_do_lowpower(unsigned int cpu, int *spurious)
57 for (;;) {
58 wfi();
60 if (pen_release == cpu) {
62 * OK, proper wakeup, we're done
64 break;
68 * Getting here, means that we have come out of WFI without
69 * having been woken up - this shouldn't happen
71 * Just note it happening - when we're woken, we can report
72 * its occurrence.
74 (*spurious)++;
79 * platform-specific code to shutdown a CPU
81 * Called with IRQs disabled
83 void __ref spear13xx_cpu_die(unsigned int cpu)
85 int spurious = 0;
88 * we're ready for shutdown now, so do it
90 cpu_enter_lowpower();
91 spear13xx_do_lowpower(cpu, &spurious);
94 * bring this CPU back into the world of cache
95 * coherency, and then restore interrupts
97 cpu_leave_lowpower();
99 if (spurious)
100 pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);