Linux 4.1.18
[linux/fpc-iii.git] / kernel / power / suspend.c
blob8d7a1ef7275855089957f877d6743b653dd7f076
1 /*
2 * kernel/power/suspend.c - Suspend to RAM and standby functionality.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
8 * This file is released under the GPLv2.
9 */
11 #include <linux/string.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/console.h>
16 #include <linux/cpu.h>
17 #include <linux/cpuidle.h>
18 #include <linux/syscalls.h>
19 #include <linux/gfp.h>
20 #include <linux/io.h>
21 #include <linux/kernel.h>
22 #include <linux/list.h>
23 #include <linux/mm.h>
24 #include <linux/slab.h>
25 #include <linux/export.h>
26 #include <linux/suspend.h>
27 #include <linux/syscore_ops.h>
28 #include <linux/ftrace.h>
29 #include <trace/events/power.h>
30 #include <linux/compiler.h>
31 #include <linux/moduleparam.h>
33 #include "power.h"
35 const char *pm_labels[] = { "mem", "standby", "freeze", NULL };
36 const char *pm_states[PM_SUSPEND_MAX];
38 static const struct platform_suspend_ops *suspend_ops;
39 static const struct platform_freeze_ops *freeze_ops;
40 static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
42 enum freeze_state __read_mostly suspend_freeze_state;
43 static DEFINE_SPINLOCK(suspend_freeze_lock);
45 void freeze_set_ops(const struct platform_freeze_ops *ops)
47 lock_system_sleep();
48 freeze_ops = ops;
49 unlock_system_sleep();
52 static void freeze_begin(void)
54 suspend_freeze_state = FREEZE_STATE_NONE;
57 static void freeze_enter(void)
59 spin_lock_irq(&suspend_freeze_lock);
60 if (pm_wakeup_pending())
61 goto out;
63 suspend_freeze_state = FREEZE_STATE_ENTER;
64 spin_unlock_irq(&suspend_freeze_lock);
66 get_online_cpus();
67 cpuidle_resume();
69 /* Push all the CPUs into the idle loop. */
70 wake_up_all_idle_cpus();
71 pr_debug("PM: suspend-to-idle\n");
72 /* Make the current CPU wait so it can enter the idle loop too. */
73 wait_event(suspend_freeze_wait_head,
74 suspend_freeze_state == FREEZE_STATE_WAKE);
75 pr_debug("PM: resume from suspend-to-idle\n");
77 cpuidle_pause();
78 put_online_cpus();
80 spin_lock_irq(&suspend_freeze_lock);
82 out:
83 suspend_freeze_state = FREEZE_STATE_NONE;
84 spin_unlock_irq(&suspend_freeze_lock);
87 void freeze_wake(void)
89 unsigned long flags;
91 spin_lock_irqsave(&suspend_freeze_lock, flags);
92 if (suspend_freeze_state > FREEZE_STATE_NONE) {
93 suspend_freeze_state = FREEZE_STATE_WAKE;
94 wake_up(&suspend_freeze_wait_head);
96 spin_unlock_irqrestore(&suspend_freeze_lock, flags);
98 EXPORT_SYMBOL_GPL(freeze_wake);
100 static bool valid_state(suspend_state_t state)
103 * PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level
104 * support and need to be valid to the low level
105 * implementation, no valid callback implies that none are valid.
107 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
111 * If this is set, the "mem" label always corresponds to the deepest sleep state
112 * available, the "standby" label corresponds to the second deepest sleep state
113 * available (if any), and the "freeze" label corresponds to the remaining
114 * available sleep state (if there is one).
116 static bool relative_states;
118 static int __init sleep_states_setup(char *str)
120 relative_states = !strncmp(str, "1", 1);
121 pm_states[PM_SUSPEND_FREEZE] = pm_labels[relative_states ? 0 : 2];
122 return 1;
125 __setup("relative_sleep_states=", sleep_states_setup);
128 * suspend_set_ops - Set the global suspend method table.
129 * @ops: Suspend operations to use.
131 void suspend_set_ops(const struct platform_suspend_ops *ops)
133 suspend_state_t i;
134 int j = 0;
136 lock_system_sleep();
138 suspend_ops = ops;
139 for (i = PM_SUSPEND_MEM; i >= PM_SUSPEND_STANDBY; i--)
140 if (valid_state(i)) {
141 pm_states[i] = pm_labels[j++];
142 } else if (!relative_states) {
143 pm_states[i] = NULL;
144 j++;
147 pm_states[PM_SUSPEND_FREEZE] = pm_labels[j];
149 unlock_system_sleep();
151 EXPORT_SYMBOL_GPL(suspend_set_ops);
154 * suspend_valid_only_mem - Generic memory-only valid callback.
156 * Platform drivers that implement mem suspend only and only need to check for
157 * that in their .valid() callback can use this instead of rolling their own
158 * .valid() callback.
160 int suspend_valid_only_mem(suspend_state_t state)
162 return state == PM_SUSPEND_MEM;
164 EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
166 static bool sleep_state_supported(suspend_state_t state)
168 return state == PM_SUSPEND_FREEZE || (suspend_ops && suspend_ops->enter);
171 static int platform_suspend_prepare(suspend_state_t state)
173 return state != PM_SUSPEND_FREEZE && suspend_ops->prepare ?
174 suspend_ops->prepare() : 0;
177 static int platform_suspend_prepare_late(suspend_state_t state)
179 return state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->prepare ?
180 freeze_ops->prepare() : 0;
183 static int platform_suspend_prepare_noirq(suspend_state_t state)
185 return state != PM_SUSPEND_FREEZE && suspend_ops->prepare_late ?
186 suspend_ops->prepare_late() : 0;
189 static void platform_resume_noirq(suspend_state_t state)
191 if (state != PM_SUSPEND_FREEZE && suspend_ops->wake)
192 suspend_ops->wake();
195 static void platform_resume_early(suspend_state_t state)
197 if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->restore)
198 freeze_ops->restore();
201 static void platform_resume_finish(suspend_state_t state)
203 if (state != PM_SUSPEND_FREEZE && suspend_ops->finish)
204 suspend_ops->finish();
207 static int platform_suspend_begin(suspend_state_t state)
209 if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin)
210 return freeze_ops->begin();
211 else if (suspend_ops->begin)
212 return suspend_ops->begin(state);
213 else
214 return 0;
217 static void platform_resume_end(suspend_state_t state)
219 if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
220 freeze_ops->end();
221 else if (suspend_ops->end)
222 suspend_ops->end();
225 static void platform_recover(suspend_state_t state)
227 if (state != PM_SUSPEND_FREEZE && suspend_ops->recover)
228 suspend_ops->recover();
231 static bool platform_suspend_again(suspend_state_t state)
233 return state != PM_SUSPEND_FREEZE && suspend_ops->suspend_again ?
234 suspend_ops->suspend_again() : false;
237 #ifdef CONFIG_PM_DEBUG
238 static unsigned int pm_test_delay = 5;
239 module_param(pm_test_delay, uint, 0644);
240 MODULE_PARM_DESC(pm_test_delay,
241 "Number of seconds to wait before resuming from suspend test");
242 #endif
244 static int suspend_test(int level)
246 #ifdef CONFIG_PM_DEBUG
247 if (pm_test_level == level) {
248 printk(KERN_INFO "suspend debug: Waiting for %d second(s).\n",
249 pm_test_delay);
250 mdelay(pm_test_delay * 1000);
251 return 1;
253 #endif /* !CONFIG_PM_DEBUG */
254 return 0;
258 * suspend_prepare - Prepare for entering system sleep state.
260 * Common code run for every system sleep state that can be entered (except for
261 * hibernation). Run suspend notifiers, allocate the "suspend" console and
262 * freeze processes.
264 static int suspend_prepare(suspend_state_t state)
266 int error;
268 if (!sleep_state_supported(state))
269 return -EPERM;
271 pm_prepare_console();
273 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
274 if (error)
275 goto Finish;
277 trace_suspend_resume(TPS("freeze_processes"), 0, true);
278 error = suspend_freeze_processes();
279 trace_suspend_resume(TPS("freeze_processes"), 0, false);
280 if (!error)
281 return 0;
283 suspend_stats.failed_freeze++;
284 dpm_save_failed_step(SUSPEND_FREEZE);
285 Finish:
286 pm_notifier_call_chain(PM_POST_SUSPEND);
287 pm_restore_console();
288 return error;
291 /* default implementation */
292 void __weak arch_suspend_disable_irqs(void)
294 local_irq_disable();
297 /* default implementation */
298 void __weak arch_suspend_enable_irqs(void)
300 local_irq_enable();
304 * suspend_enter - Make the system enter the given sleep state.
305 * @state: System sleep state to enter.
306 * @wakeup: Returns information that the sleep state should not be re-entered.
308 * This function should be called after devices have been suspended.
310 static int suspend_enter(suspend_state_t state, bool *wakeup)
312 int error;
314 error = platform_suspend_prepare(state);
315 if (error)
316 goto Platform_finish;
318 error = dpm_suspend_late(PMSG_SUSPEND);
319 if (error) {
320 printk(KERN_ERR "PM: late suspend of devices failed\n");
321 goto Platform_finish;
323 error = platform_suspend_prepare_late(state);
324 if (error)
325 goto Devices_early_resume;
327 error = dpm_suspend_noirq(PMSG_SUSPEND);
328 if (error) {
329 printk(KERN_ERR "PM: noirq suspend of devices failed\n");
330 goto Platform_early_resume;
332 error = platform_suspend_prepare_noirq(state);
333 if (error)
334 goto Platform_wake;
336 if (suspend_test(TEST_PLATFORM))
337 goto Platform_wake;
340 * PM_SUSPEND_FREEZE equals
341 * frozen processes + suspended devices + idle processors.
342 * Thus we should invoke freeze_enter() soon after
343 * all the devices are suspended.
345 if (state == PM_SUSPEND_FREEZE) {
346 trace_suspend_resume(TPS("machine_suspend"), state, true);
347 freeze_enter();
348 trace_suspend_resume(TPS("machine_suspend"), state, false);
349 goto Platform_wake;
352 error = disable_nonboot_cpus();
353 if (error || suspend_test(TEST_CPUS))
354 goto Enable_cpus;
356 arch_suspend_disable_irqs();
357 BUG_ON(!irqs_disabled());
359 error = syscore_suspend();
360 if (!error) {
361 *wakeup = pm_wakeup_pending();
362 if (!(suspend_test(TEST_CORE) || *wakeup)) {
363 trace_suspend_resume(TPS("machine_suspend"),
364 state, true);
365 error = suspend_ops->enter(state);
366 trace_suspend_resume(TPS("machine_suspend"),
367 state, false);
368 events_check_enabled = false;
370 syscore_resume();
373 arch_suspend_enable_irqs();
374 BUG_ON(irqs_disabled());
376 Enable_cpus:
377 enable_nonboot_cpus();
379 Platform_wake:
380 platform_resume_noirq(state);
381 dpm_resume_noirq(PMSG_RESUME);
383 Platform_early_resume:
384 platform_resume_early(state);
386 Devices_early_resume:
387 dpm_resume_early(PMSG_RESUME);
389 Platform_finish:
390 platform_resume_finish(state);
391 return error;
395 * suspend_devices_and_enter - Suspend devices and enter system sleep state.
396 * @state: System sleep state to enter.
398 int suspend_devices_and_enter(suspend_state_t state)
400 int error;
401 bool wakeup = false;
403 if (!sleep_state_supported(state))
404 return -ENOSYS;
406 error = platform_suspend_begin(state);
407 if (error)
408 goto Close;
410 suspend_console();
411 suspend_test_start();
412 error = dpm_suspend_start(PMSG_SUSPEND);
413 if (error) {
414 pr_err("PM: Some devices failed to suspend, or early wake event detected\n");
415 goto Recover_platform;
417 suspend_test_finish("suspend devices");
418 if (suspend_test(TEST_DEVICES))
419 goto Recover_platform;
421 do {
422 error = suspend_enter(state, &wakeup);
423 } while (!error && !wakeup && platform_suspend_again(state));
425 Resume_devices:
426 suspend_test_start();
427 dpm_resume_end(PMSG_RESUME);
428 suspend_test_finish("resume devices");
429 trace_suspend_resume(TPS("resume_console"), state, true);
430 resume_console();
431 trace_suspend_resume(TPS("resume_console"), state, false);
433 Close:
434 platform_resume_end(state);
435 return error;
437 Recover_platform:
438 platform_recover(state);
439 goto Resume_devices;
443 * suspend_finish - Clean up before finishing the suspend sequence.
445 * Call platform code to clean up, restart processes, and free the console that
446 * we've allocated. This routine is not called for hibernation.
448 static void suspend_finish(void)
450 suspend_thaw_processes();
451 pm_notifier_call_chain(PM_POST_SUSPEND);
452 pm_restore_console();
456 * enter_state - Do common work needed to enter system sleep state.
457 * @state: System sleep state to enter.
459 * Make sure that no one else is trying to put the system into a sleep state.
460 * Fail if that's not the case. Otherwise, prepare for system suspend, make the
461 * system enter the given sleep state and clean up after wakeup.
463 static int enter_state(suspend_state_t state)
465 int error;
467 trace_suspend_resume(TPS("suspend_enter"), state, true);
468 if (state == PM_SUSPEND_FREEZE) {
469 #ifdef CONFIG_PM_DEBUG
470 if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
471 pr_warning("PM: Unsupported test mode for freeze state,"
472 "please choose none/freezer/devices/platform.\n");
473 return -EAGAIN;
475 #endif
476 } else if (!valid_state(state)) {
477 return -EINVAL;
479 if (!mutex_trylock(&pm_mutex))
480 return -EBUSY;
482 if (state == PM_SUSPEND_FREEZE)
483 freeze_begin();
485 trace_suspend_resume(TPS("sync_filesystems"), 0, true);
486 printk(KERN_INFO "PM: Syncing filesystems ... ");
487 sys_sync();
488 printk("done.\n");
489 trace_suspend_resume(TPS("sync_filesystems"), 0, false);
491 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
492 error = suspend_prepare(state);
493 if (error)
494 goto Unlock;
496 if (suspend_test(TEST_FREEZER))
497 goto Finish;
499 trace_suspend_resume(TPS("suspend_enter"), state, false);
500 pr_debug("PM: Entering %s sleep\n", pm_states[state]);
501 pm_restrict_gfp_mask();
502 error = suspend_devices_and_enter(state);
503 pm_restore_gfp_mask();
505 Finish:
506 pr_debug("PM: Finishing wakeup.\n");
507 suspend_finish();
508 Unlock:
509 mutex_unlock(&pm_mutex);
510 return error;
514 * pm_suspend - Externally visible function for suspending the system.
515 * @state: System sleep state to enter.
517 * Check if the value of @state represents one of the supported states,
518 * execute enter_state() and update system suspend statistics.
520 int pm_suspend(suspend_state_t state)
522 int error;
524 if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
525 return -EINVAL;
527 error = enter_state(state);
528 if (error) {
529 suspend_stats.fail++;
530 dpm_save_failed_errno(error);
531 } else {
532 suspend_stats.success++;
534 return error;
536 EXPORT_SYMBOL(pm_suspend);