1 #ifndef _LINUX_SUSPEND_H
2 #define _LINUX_SUSPEND_H
4 #include <linux/swap.h>
5 #include <linux/notifier.h>
6 #include <linux/init.h>
11 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
12 extern void pm_set_vt_switch(int);
13 extern int pm_prepare_console(void);
14 extern void pm_restore_console(void);
16 static inline void pm_set_vt_switch(int do_switch
)
20 static inline int pm_prepare_console(void)
25 static inline void pm_restore_console(void)
30 typedef int __bitwise suspend_state_t
;
32 #define PM_SUSPEND_ON ((__force suspend_state_t) 0)
33 #define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1)
34 #define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
35 #define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
37 enum suspend_stat_step
{
41 SUSPEND_SUSPEND_NOIRQ
,
46 struct suspend_stats
{
52 int failed_suspend_noirq
;
54 int failed_resume_noirq
;
55 #define REC_FAILED_NUM 2
57 char failed_devs
[REC_FAILED_NUM
][40];
58 int last_failed_errno
;
59 int errno
[REC_FAILED_NUM
];
61 enum suspend_stat_step failed_steps
[REC_FAILED_NUM
];
64 extern struct suspend_stats suspend_stats
;
66 static inline void dpm_save_failed_dev(const char *name
)
68 strlcpy(suspend_stats
.failed_devs
[suspend_stats
.last_failed_dev
],
70 sizeof(suspend_stats
.failed_devs
[0]));
71 suspend_stats
.last_failed_dev
++;
72 suspend_stats
.last_failed_dev
%= REC_FAILED_NUM
;
75 static inline void dpm_save_failed_errno(int err
)
77 suspend_stats
.errno
[suspend_stats
.last_failed_errno
] = err
;
78 suspend_stats
.last_failed_errno
++;
79 suspend_stats
.last_failed_errno
%= REC_FAILED_NUM
;
82 static inline void dpm_save_failed_step(enum suspend_stat_step step
)
84 suspend_stats
.failed_steps
[suspend_stats
.last_failed_step
] = step
;
85 suspend_stats
.last_failed_step
++;
86 suspend_stats
.last_failed_step
%= REC_FAILED_NUM
;
90 * struct platform_suspend_ops - Callbacks for managing platform dependent
91 * system sleep states.
93 * @valid: Callback to determine if given system sleep state is supported by
95 * Valid (ie. supported) states are advertised in /sys/power/state. Note
96 * that it still may be impossible to enter given system sleep state if the
97 * conditions aren't right.
98 * There is the %suspend_valid_only_mem function available that can be
99 * assigned to this if the platform only supports mem sleep.
101 * @begin: Initialise a transition to given system sleep state.
102 * @begin() is executed right prior to suspending devices. The information
103 * conveyed to the platform code by @begin() should be disregarded by it as
104 * soon as @end() is executed. If @begin() fails (ie. returns nonzero),
105 * @prepare(), @enter() and @finish() will not be called by the PM core.
106 * This callback is optional. However, if it is implemented, the argument
107 * passed to @enter() is redundant and should be ignored.
109 * @prepare: Prepare the platform for entering the system sleep state indicated
111 * @prepare() is called right after devices have been suspended (ie. the
112 * appropriate .suspend() method has been executed for each device) and
113 * before device drivers' late suspend callbacks are executed. It returns
114 * 0 on success or a negative error code otherwise, in which case the
115 * system cannot enter the desired sleep state (@prepare_late(), @enter(),
116 * and @wake() will not be called in that case).
118 * @prepare_late: Finish preparing the platform for entering the system sleep
119 * state indicated by @begin().
120 * @prepare_late is called before disabling nonboot CPUs and after
121 * device drivers' late suspend callbacks have been executed. It returns
122 * 0 on success or a negative error code otherwise, in which case the
123 * system cannot enter the desired sleep state (@enter() will not be
126 * @enter: Enter the system sleep state indicated by @begin() or represented by
127 * the argument if @begin() is not implemented.
128 * This callback is mandatory. It returns 0 on success or a negative
129 * error code otherwise, in which case the system cannot enter the desired
132 * @wake: Called when the system has just left a sleep state, right after
133 * the nonboot CPUs have been enabled and before device drivers' early
134 * resume callbacks are executed.
135 * This callback is optional, but should be implemented by the platforms
136 * that implement @prepare_late(). If implemented, it is always called
137 * after @prepare_late and @enter(), even if one of them fails.
139 * @finish: Finish wake-up of the platform.
140 * @finish is called right prior to calling device drivers' regular suspend
142 * This callback is optional, but should be implemented by the platforms
143 * that implement @prepare(). If implemented, it is always called after
144 * @enter() and @wake(), even if any of them fails. It is executed after
145 * a failing @prepare.
147 * @suspend_again: Returns whether the system should suspend again (true) or
148 * not (false). If the platform wants to poll sensors or execute some
149 * code during suspended without invoking userspace and most of devices,
150 * suspend_again callback is the place assuming that periodic-wakeup or
151 * alarm-wakeup is already setup. This allows to execute some codes while
152 * being kept suspended in the view of userland and devices.
154 * @end: Called by the PM core right after resuming devices, to indicate to
155 * the platform that the system has returned to the working state or
156 * the transition to the sleep state has been aborted.
157 * This callback is optional, but should be implemented by the platforms
158 * that implement @begin(). Accordingly, platforms implementing @begin()
159 * should also provide a @end() which cleans up transitions aborted before
162 * @recover: Recover the platform from a suspend failure.
163 * Called by the PM core if the suspending of devices fails.
164 * This callback is optional and should only be implemented by platforms
165 * which require special recovery actions in that situation.
167 struct platform_suspend_ops
{
168 int (*valid
)(suspend_state_t state
);
169 int (*begin
)(suspend_state_t state
);
170 int (*prepare
)(void);
171 int (*prepare_late
)(void);
172 int (*enter
)(suspend_state_t state
);
174 void (*finish
)(void);
175 bool (*suspend_again
)(void);
177 void (*recover
)(void);
180 #ifdef CONFIG_SUSPEND
182 * suspend_set_ops - set platform dependent suspend operations
183 * @ops: The new suspend operations to set.
185 extern void suspend_set_ops(const struct platform_suspend_ops
*ops
);
186 extern int suspend_valid_only_mem(suspend_state_t state
);
189 * arch_suspend_disable_irqs - disable IRQs for suspend
191 * Disables IRQs (in the default case). This is a weak symbol in the common
192 * code and thus allows architectures to override it if more needs to be
193 * done. Not called for suspend to disk.
195 extern void arch_suspend_disable_irqs(void);
198 * arch_suspend_enable_irqs - enable IRQs after suspend
200 * Enables IRQs (in the default case). This is a weak symbol in the common
201 * code and thus allows architectures to override it if more needs to be
202 * done. Not called for suspend to disk.
204 extern void arch_suspend_enable_irqs(void);
206 extern int pm_suspend(suspend_state_t state
);
207 #else /* !CONFIG_SUSPEND */
208 #define suspend_valid_only_mem NULL
210 static inline void suspend_set_ops(const struct platform_suspend_ops
*ops
) {}
211 static inline int pm_suspend(suspend_state_t state
) { return -ENOSYS
; }
212 #endif /* !CONFIG_SUSPEND */
214 /* struct pbe is used for creating lists of pages that should be restored
215 * atomically during the resume from disk, because the page frames they have
216 * occupied before the suspend are in use.
219 void *address
; /* address of the copy */
220 void *orig_address
; /* original address of a page */
224 /* mm/page_alloc.c */
225 extern void mark_free_pages(struct zone
*zone
);
228 * struct platform_hibernation_ops - hibernation platform support
230 * The methods in this structure allow a platform to carry out special
231 * operations required by it during a hibernation transition.
233 * All the methods below, except for @recover(), must be implemented.
235 * @begin: Tell the platform driver that we're starting hibernation.
236 * Called right after shrinking memory and before freezing devices.
238 * @end: Called by the PM core right after resuming devices, to indicate to
239 * the platform that the system has returned to the working state.
241 * @pre_snapshot: Prepare the platform for creating the hibernation image.
242 * Called right after devices have been frozen and before the nonboot
243 * CPUs are disabled (runs with IRQs on).
245 * @finish: Restore the previous state of the platform after the hibernation
246 * image has been created *or* put the platform into the normal operation
247 * mode after the hibernation (the same method is executed in both cases).
248 * Called right after the nonboot CPUs have been enabled and before
249 * thawing devices (runs with IRQs on).
251 * @prepare: Prepare the platform for entering the low power state.
252 * Called right after the hibernation image has been saved and before
253 * devices are prepared for entering the low power state.
255 * @enter: Put the system into the low power state after the hibernation image
256 * has been saved to disk.
257 * Called after the nonboot CPUs have been disabled and all of the low
258 * level devices have been shut down (runs with IRQs off).
260 * @leave: Perform the first stage of the cleanup after the system sleep state
261 * indicated by @set_target() has been left.
262 * Called right after the control has been passed from the boot kernel to
263 * the image kernel, before the nonboot CPUs are enabled and before devices
264 * are resumed. Executed with interrupts disabled.
266 * @pre_restore: Prepare system for the restoration from a hibernation image.
267 * Called right after devices have been frozen and before the nonboot
268 * CPUs are disabled (runs with IRQs on).
270 * @restore_cleanup: Clean up after a failing image restoration.
271 * Called right after the nonboot CPUs have been enabled and before
272 * thawing devices (runs with IRQs on).
274 * @recover: Recover the platform from a failure to suspend devices.
275 * Called by the PM core if the suspending of devices during hibernation
276 * fails. This callback is optional and should only be implemented by
277 * platforms which require special recovery actions in that situation.
279 struct platform_hibernation_ops
{
282 int (*pre_snapshot
)(void);
283 void (*finish
)(void);
284 int (*prepare
)(void);
287 int (*pre_restore
)(void);
288 void (*restore_cleanup
)(void);
289 void (*recover
)(void);
292 #ifdef CONFIG_HIBERNATION
293 /* kernel/power/snapshot.c */
294 extern void __register_nosave_region(unsigned long b
, unsigned long e
, int km
);
295 static inline void __init
register_nosave_region(unsigned long b
, unsigned long e
)
297 __register_nosave_region(b
, e
, 0);
299 static inline void __init
register_nosave_region_late(unsigned long b
, unsigned long e
)
301 __register_nosave_region(b
, e
, 1);
303 extern int swsusp_page_is_forbidden(struct page
*);
304 extern void swsusp_set_page_free(struct page
*);
305 extern void swsusp_unset_page_free(struct page
*);
306 extern unsigned long get_safe_page(gfp_t gfp_mask
);
308 extern void hibernation_set_ops(const struct platform_hibernation_ops
*ops
);
309 extern int hibernate(void);
310 extern bool system_entering_hibernation(void);
311 #else /* CONFIG_HIBERNATION */
312 static inline void register_nosave_region(unsigned long b
, unsigned long e
) {}
313 static inline void register_nosave_region_late(unsigned long b
, unsigned long e
) {}
314 static inline int swsusp_page_is_forbidden(struct page
*p
) { return 0; }
315 static inline void swsusp_set_page_free(struct page
*p
) {}
316 static inline void swsusp_unset_page_free(struct page
*p
) {}
318 static inline void hibernation_set_ops(const struct platform_hibernation_ops
*ops
) {}
319 static inline int hibernate(void) { return -ENOSYS
; }
320 static inline bool system_entering_hibernation(void) { return false; }
321 #endif /* CONFIG_HIBERNATION */
323 /* Hibernation and suspend events */
324 #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
325 #define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
326 #define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
327 #define PM_POST_SUSPEND 0x0004 /* Suspend finished */
328 #define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
329 #define PM_POST_RESTORE 0x0006 /* Restore failed */
331 #ifdef CONFIG_PM_SLEEP
332 void save_processor_state(void);
333 void restore_processor_state(void);
335 /* kernel/power/main.c */
336 extern int register_pm_notifier(struct notifier_block
*nb
);
337 extern int unregister_pm_notifier(struct notifier_block
*nb
);
339 #define pm_notifier(fn, pri) { \
340 static struct notifier_block fn##_nb = \
341 { .notifier_call = fn, .priority = pri }; \
342 register_pm_notifier(&fn##_nb); \
345 /* drivers/base/power/wakeup.c */
346 extern bool events_check_enabled
;
348 extern bool pm_wakeup_pending(void);
349 extern bool pm_get_wakeup_count(unsigned int *count
);
350 extern bool pm_save_wakeup_count(unsigned int count
);
351 #else /* !CONFIG_PM_SLEEP */
353 static inline int register_pm_notifier(struct notifier_block
*nb
)
358 static inline int unregister_pm_notifier(struct notifier_block
*nb
)
363 #define pm_notifier(fn, pri) do { (void)(fn); } while (0)
365 static inline bool pm_wakeup_pending(void) { return false; }
366 #endif /* !CONFIG_PM_SLEEP */
368 extern struct mutex pm_mutex
;
370 #ifndef CONFIG_HIBERNATE_CALLBACKS
371 static inline void lock_system_sleep(void) {}
372 static inline void unlock_system_sleep(void) {}
376 /* Let some subsystems like memory hotadd exclude hibernation */
378 static inline void lock_system_sleep(void)
380 mutex_lock(&pm_mutex
);
383 static inline void unlock_system_sleep(void)
385 mutex_unlock(&pm_mutex
);
389 #ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
391 * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture
392 * to save/restore additional information to/from the array of page
393 * frame numbers in the hibernation image. For s390 this is used to
394 * save and restore the storage key for each page that is included
395 * in the hibernation image.
397 unsigned long page_key_additional_pages(unsigned long pages
);
398 int page_key_alloc(unsigned long pages
);
399 void page_key_free(void);
400 void page_key_read(unsigned long *pfn
);
401 void page_key_memorize(unsigned long *pfn
);
402 void page_key_write(void *address
);
404 #else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
406 static inline unsigned long page_key_additional_pages(unsigned long pages
)
411 static inline int page_key_alloc(unsigned long pages
)
416 static inline void page_key_free(void) {}
417 static inline void page_key_read(unsigned long *pfn
) {}
418 static inline void page_key_memorize(unsigned long *pfn
) {}
419 static inline void page_key_write(void *address
) {}
421 #endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
423 #endif /* _LINUX_SUSPEND_H */