2 * kernel/power/suspend.c - Suspend to RAM and standby functionality.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
8 * This file is released under the GPLv2.
11 #include <linux/string.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/console.h>
16 #include <linux/cpu.h>
17 #include <linux/cpuidle.h>
18 #include <linux/syscalls.h>
19 #include <linux/gfp.h>
21 #include <linux/kernel.h>
22 #include <linux/list.h>
24 #include <linux/slab.h>
25 #include <linux/export.h>
26 #include <linux/suspend.h>
27 #include <linux/syscore_ops.h>
28 #include <linux/ftrace.h>
29 #include <trace/events/power.h>
30 #include <linux/compiler.h>
34 const char *pm_labels
[] = { "mem", "standby", "freeze", NULL
};
35 const char *pm_states
[PM_SUSPEND_MAX
];
37 static const struct platform_suspend_ops
*suspend_ops
;
38 static const struct platform_freeze_ops
*freeze_ops
;
39 static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head
);
41 enum freeze_state __read_mostly suspend_freeze_state
;
42 static DEFINE_SPINLOCK(suspend_freeze_lock
);
44 void freeze_set_ops(const struct platform_freeze_ops
*ops
)
48 unlock_system_sleep();
51 static void freeze_begin(void)
53 suspend_freeze_state
= FREEZE_STATE_NONE
;
56 static void freeze_enter(void)
58 spin_lock_irq(&suspend_freeze_lock
);
59 if (pm_wakeup_pending())
62 suspend_freeze_state
= FREEZE_STATE_ENTER
;
63 spin_unlock_irq(&suspend_freeze_lock
);
68 /* Push all the CPUs into the idle loop. */
69 wake_up_all_idle_cpus();
70 pr_debug("PM: suspend-to-idle\n");
71 /* Make the current CPU wait so it can enter the idle loop too. */
72 wait_event(suspend_freeze_wait_head
,
73 suspend_freeze_state
== FREEZE_STATE_WAKE
);
74 pr_debug("PM: resume from suspend-to-idle\n");
79 spin_lock_irq(&suspend_freeze_lock
);
82 suspend_freeze_state
= FREEZE_STATE_NONE
;
83 spin_unlock_irq(&suspend_freeze_lock
);
86 void freeze_wake(void)
90 spin_lock_irqsave(&suspend_freeze_lock
, flags
);
91 if (suspend_freeze_state
> FREEZE_STATE_NONE
) {
92 suspend_freeze_state
= FREEZE_STATE_WAKE
;
93 wake_up(&suspend_freeze_wait_head
);
95 spin_unlock_irqrestore(&suspend_freeze_lock
, flags
);
97 EXPORT_SYMBOL_GPL(freeze_wake
);
99 static bool valid_state(suspend_state_t state
)
102 * PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level
103 * support and need to be valid to the low level
104 * implementation, no valid callback implies that none are valid.
106 return suspend_ops
&& suspend_ops
->valid
&& suspend_ops
->valid(state
);
110 * If this is set, the "mem" label always corresponds to the deepest sleep state
111 * available, the "standby" label corresponds to the second deepest sleep state
112 * available (if any), and the "freeze" label corresponds to the remaining
113 * available sleep state (if there is one).
115 static bool relative_states
;
117 static int __init
sleep_states_setup(char *str
)
119 relative_states
= !strncmp(str
, "1", 1);
120 pm_states
[PM_SUSPEND_FREEZE
] = pm_labels
[relative_states
? 0 : 2];
124 __setup("relative_sleep_states=", sleep_states_setup
);
127 * suspend_set_ops - Set the global suspend method table.
128 * @ops: Suspend operations to use.
130 void suspend_set_ops(const struct platform_suspend_ops
*ops
)
138 for (i
= PM_SUSPEND_MEM
; i
>= PM_SUSPEND_STANDBY
; i
--)
139 if (valid_state(i
)) {
140 pm_states
[i
] = pm_labels
[j
++];
141 } else if (!relative_states
) {
146 pm_states
[PM_SUSPEND_FREEZE
] = pm_labels
[j
];
148 unlock_system_sleep();
150 EXPORT_SYMBOL_GPL(suspend_set_ops
);
153 * suspend_valid_only_mem - Generic memory-only valid callback.
155 * Platform drivers that implement mem suspend only and only need to check for
156 * that in their .valid() callback can use this instead of rolling their own
159 int suspend_valid_only_mem(suspend_state_t state
)
161 return state
== PM_SUSPEND_MEM
;
163 EXPORT_SYMBOL_GPL(suspend_valid_only_mem
);
165 static bool sleep_state_supported(suspend_state_t state
)
167 return state
== PM_SUSPEND_FREEZE
|| (suspend_ops
&& suspend_ops
->enter
);
170 static int platform_suspend_prepare(suspend_state_t state
)
172 return state
!= PM_SUSPEND_FREEZE
&& suspend_ops
->prepare
?
173 suspend_ops
->prepare() : 0;
176 static int platform_suspend_prepare_late(suspend_state_t state
)
178 return state
== PM_SUSPEND_FREEZE
&& freeze_ops
&& freeze_ops
->prepare
?
179 freeze_ops
->prepare() : 0;
182 static int platform_suspend_prepare_noirq(suspend_state_t state
)
184 return state
!= PM_SUSPEND_FREEZE
&& suspend_ops
->prepare_late
?
185 suspend_ops
->prepare_late() : 0;
188 static void platform_resume_noirq(suspend_state_t state
)
190 if (state
!= PM_SUSPEND_FREEZE
&& suspend_ops
->wake
)
194 static void platform_resume_early(suspend_state_t state
)
196 if (state
== PM_SUSPEND_FREEZE
&& freeze_ops
&& freeze_ops
->restore
)
197 freeze_ops
->restore();
200 static void platform_resume_finish(suspend_state_t state
)
202 if (state
!= PM_SUSPEND_FREEZE
&& suspend_ops
->finish
)
203 suspend_ops
->finish();
206 static int platform_suspend_begin(suspend_state_t state
)
208 if (state
== PM_SUSPEND_FREEZE
&& freeze_ops
&& freeze_ops
->begin
)
209 return freeze_ops
->begin();
210 else if (suspend_ops
->begin
)
211 return suspend_ops
->begin(state
);
216 static void platform_resume_end(suspend_state_t state
)
218 if (state
== PM_SUSPEND_FREEZE
&& freeze_ops
&& freeze_ops
->end
)
220 else if (suspend_ops
->end
)
224 static void platform_recover(suspend_state_t state
)
226 if (state
!= PM_SUSPEND_FREEZE
&& suspend_ops
->recover
)
227 suspend_ops
->recover();
230 static bool platform_suspend_again(suspend_state_t state
)
232 return state
!= PM_SUSPEND_FREEZE
&& suspend_ops
->suspend_again
?
233 suspend_ops
->suspend_again() : false;
236 static int suspend_test(int level
)
238 #ifdef CONFIG_PM_DEBUG
239 if (pm_test_level
== level
) {
240 printk(KERN_INFO
"suspend debug: Waiting for 5 seconds.\n");
244 #endif /* !CONFIG_PM_DEBUG */
249 * suspend_prepare - Prepare for entering system sleep state.
251 * Common code run for every system sleep state that can be entered (except for
252 * hibernation). Run suspend notifiers, allocate the "suspend" console and
255 static int suspend_prepare(suspend_state_t state
)
259 if (!sleep_state_supported(state
))
262 pm_prepare_console();
264 error
= pm_notifier_call_chain(PM_SUSPEND_PREPARE
);
268 trace_suspend_resume(TPS("freeze_processes"), 0, true);
269 error
= suspend_freeze_processes();
270 trace_suspend_resume(TPS("freeze_processes"), 0, false);
274 suspend_stats
.failed_freeze
++;
275 dpm_save_failed_step(SUSPEND_FREEZE
);
277 pm_notifier_call_chain(PM_POST_SUSPEND
);
278 pm_restore_console();
282 /* default implementation */
283 void __weak
arch_suspend_disable_irqs(void)
288 /* default implementation */
289 void __weak
arch_suspend_enable_irqs(void)
295 * suspend_enter - Make the system enter the given sleep state.
296 * @state: System sleep state to enter.
297 * @wakeup: Returns information that the sleep state should not be re-entered.
299 * This function should be called after devices have been suspended.
301 static int suspend_enter(suspend_state_t state
, bool *wakeup
)
305 error
= platform_suspend_prepare(state
);
307 goto Platform_finish
;
309 error
= dpm_suspend_late(PMSG_SUSPEND
);
311 printk(KERN_ERR
"PM: late suspend of devices failed\n");
312 goto Platform_finish
;
314 error
= platform_suspend_prepare_late(state
);
316 goto Devices_early_resume
;
318 error
= dpm_suspend_noirq(PMSG_SUSPEND
);
320 printk(KERN_ERR
"PM: noirq suspend of devices failed\n");
321 goto Platform_early_resume
;
323 error
= platform_suspend_prepare_noirq(state
);
327 if (suspend_test(TEST_PLATFORM
))
331 * PM_SUSPEND_FREEZE equals
332 * frozen processes + suspended devices + idle processors.
333 * Thus we should invoke freeze_enter() soon after
334 * all the devices are suspended.
336 if (state
== PM_SUSPEND_FREEZE
) {
337 trace_suspend_resume(TPS("machine_suspend"), state
, true);
339 trace_suspend_resume(TPS("machine_suspend"), state
, false);
343 error
= disable_nonboot_cpus();
344 if (error
|| suspend_test(TEST_CPUS
))
347 arch_suspend_disable_irqs();
348 BUG_ON(!irqs_disabled());
350 error
= syscore_suspend();
352 *wakeup
= pm_wakeup_pending();
353 if (!(suspend_test(TEST_CORE
) || *wakeup
)) {
354 trace_suspend_resume(TPS("machine_suspend"),
356 error
= suspend_ops
->enter(state
);
357 trace_suspend_resume(TPS("machine_suspend"),
359 events_check_enabled
= false;
364 arch_suspend_enable_irqs();
365 BUG_ON(irqs_disabled());
368 enable_nonboot_cpus();
371 platform_resume_noirq(state
);
372 dpm_resume_noirq(PMSG_RESUME
);
374 Platform_early_resume
:
375 platform_resume_early(state
);
377 Devices_early_resume
:
378 dpm_resume_early(PMSG_RESUME
);
381 platform_resume_finish(state
);
386 * suspend_devices_and_enter - Suspend devices and enter system sleep state.
387 * @state: System sleep state to enter.
389 int suspend_devices_and_enter(suspend_state_t state
)
394 if (!sleep_state_supported(state
))
397 error
= platform_suspend_begin(state
);
402 suspend_test_start();
403 error
= dpm_suspend_start(PMSG_SUSPEND
);
405 pr_err("PM: Some devices failed to suspend, or early wake event detected\n");
406 goto Recover_platform
;
408 suspend_test_finish("suspend devices");
409 if (suspend_test(TEST_DEVICES
))
410 goto Recover_platform
;
413 error
= suspend_enter(state
, &wakeup
);
414 } while (!error
&& !wakeup
&& platform_suspend_again(state
));
417 suspend_test_start();
418 dpm_resume_end(PMSG_RESUME
);
419 suspend_test_finish("resume devices");
420 trace_suspend_resume(TPS("resume_console"), state
, true);
422 trace_suspend_resume(TPS("resume_console"), state
, false);
425 platform_resume_end(state
);
429 platform_recover(state
);
434 * suspend_finish - Clean up before finishing the suspend sequence.
436 * Call platform code to clean up, restart processes, and free the console that
437 * we've allocated. This routine is not called for hibernation.
439 static void suspend_finish(void)
441 suspend_thaw_processes();
442 pm_notifier_call_chain(PM_POST_SUSPEND
);
443 pm_restore_console();
447 * enter_state - Do common work needed to enter system sleep state.
448 * @state: System sleep state to enter.
450 * Make sure that no one else is trying to put the system into a sleep state.
451 * Fail if that's not the case. Otherwise, prepare for system suspend, make the
452 * system enter the given sleep state and clean up after wakeup.
454 static int enter_state(suspend_state_t state
)
458 trace_suspend_resume(TPS("suspend_enter"), state
, true);
459 if (state
== PM_SUSPEND_FREEZE
) {
460 #ifdef CONFIG_PM_DEBUG
461 if (pm_test_level
!= TEST_NONE
&& pm_test_level
<= TEST_CPUS
) {
462 pr_warning("PM: Unsupported test mode for freeze state,"
463 "please choose none/freezer/devices/platform.\n");
467 } else if (!valid_state(state
)) {
470 if (!mutex_trylock(&pm_mutex
))
473 if (state
== PM_SUSPEND_FREEZE
)
476 trace_suspend_resume(TPS("sync_filesystems"), 0, true);
477 printk(KERN_INFO
"PM: Syncing filesystems ... ");
480 trace_suspend_resume(TPS("sync_filesystems"), 0, false);
482 pr_debug("PM: Preparing system for %s sleep\n", pm_states
[state
]);
483 error
= suspend_prepare(state
);
487 if (suspend_test(TEST_FREEZER
))
490 trace_suspend_resume(TPS("suspend_enter"), state
, false);
491 pr_debug("PM: Entering %s sleep\n", pm_states
[state
]);
492 pm_restrict_gfp_mask();
493 error
= suspend_devices_and_enter(state
);
494 pm_restore_gfp_mask();
497 pr_debug("PM: Finishing wakeup.\n");
500 mutex_unlock(&pm_mutex
);
505 * pm_suspend - Externally visible function for suspending the system.
506 * @state: System sleep state to enter.
508 * Check if the value of @state represents one of the supported states,
509 * execute enter_state() and update system suspend statistics.
511 int pm_suspend(suspend_state_t state
)
515 if (state
<= PM_SUSPEND_ON
|| state
>= PM_SUSPEND_MAX
)
518 error
= enter_state(state
);
520 suspend_stats
.fail
++;
521 dpm_save_failed_errno(error
);
523 suspend_stats
.success
++;
527 EXPORT_SYMBOL(pm_suspend
);