spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / include / linux / freezer.h
blobd09af4b67cf121ad91fd4ca02bec01560b6110fd
1 /* Freezer declarations */
3 #ifndef FREEZER_H_INCLUDED
4 #define FREEZER_H_INCLUDED
6 #include <linux/sched.h>
7 #include <linux/wait.h>
8 #include <linux/atomic.h>
10 #ifdef CONFIG_FREEZER
11 extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
12 extern bool pm_freezing; /* PM freezing in effect */
13 extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
16 * Check if a process has been frozen
18 static inline bool frozen(struct task_struct *p)
20 return p->flags & PF_FROZEN;
23 extern bool freezing_slow_path(struct task_struct *p);
26 * Check if there is a request to freeze a process
28 static inline bool freezing(struct task_struct *p)
30 if (likely(!atomic_read(&system_freezing_cnt)))
31 return false;
32 return freezing_slow_path(p);
35 /* Takes and releases task alloc lock using task_lock() */
36 extern void __thaw_task(struct task_struct *t);
38 extern bool __refrigerator(bool check_kthr_stop);
39 extern int freeze_processes(void);
40 extern int freeze_kernel_threads(void);
41 extern void thaw_processes(void);
42 extern void thaw_kernel_threads(void);
44 static inline bool try_to_freeze(void)
46 might_sleep();
47 if (likely(!freezing(current)))
48 return false;
49 return __refrigerator(false);
52 extern bool freeze_task(struct task_struct *p);
53 extern bool set_freezable(void);
55 #ifdef CONFIG_CGROUP_FREEZER
56 extern bool cgroup_freezing(struct task_struct *task);
57 #else /* !CONFIG_CGROUP_FREEZER */
58 static inline bool cgroup_freezing(struct task_struct *task)
60 return false;
62 #endif /* !CONFIG_CGROUP_FREEZER */
65 * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
66 * calls wait_for_completion(&vfork) and reset right after it returns from this
67 * function. Next, the parent should call try_to_freeze() to freeze itself
68 * appropriately in case the child has exited before the freezing of tasks is
69 * complete. However, we don't want kernel threads to be frozen in unexpected
70 * places, so we allow them to block freeze_processes() instead or to set
71 * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
72 * parent won't really block freeze_processes(), since ____call_usermodehelper()
73 * (the child) does a little before exec/exit and it can't be frozen before
74 * waking up the parent.
78 /* Tell the freezer not to count the current task as freezable. */
79 static inline void freezer_do_not_count(void)
81 current->flags |= PF_FREEZER_SKIP;
85 * Tell the freezer to count the current task as freezable again and try to
86 * freeze it.
88 static inline void freezer_count(void)
90 current->flags &= ~PF_FREEZER_SKIP;
91 try_to_freeze();
95 * Check if the task should be counted as freezable by the freezer
97 static inline int freezer_should_skip(struct task_struct *p)
99 return !!(p->flags & PF_FREEZER_SKIP);
103 * These macros are intended to be used whenever you want allow a task that's
104 * sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note
105 * that neither return any clear indication of whether a freeze event happened
106 * while in this function.
109 /* Like schedule(), but should not block the freezer. */
110 #define freezable_schedule() \
111 ({ \
112 freezer_do_not_count(); \
113 schedule(); \
114 freezer_count(); \
117 /* Like schedule_timeout_killable(), but should not block the freezer. */
118 #define freezable_schedule_timeout_killable(timeout) \
119 ({ \
120 long __retval; \
121 freezer_do_not_count(); \
122 __retval = schedule_timeout_killable(timeout); \
123 freezer_count(); \
124 __retval; \
128 * Freezer-friendly wrappers around wait_event_interruptible(),
129 * wait_event_killable() and wait_event_interruptible_timeout(), originally
130 * defined in <linux/wait.h>
133 #define wait_event_freezekillable(wq, condition) \
134 ({ \
135 int __retval; \
136 freezer_do_not_count(); \
137 __retval = wait_event_killable(wq, (condition)); \
138 freezer_count(); \
139 __retval; \
142 #define wait_event_freezable(wq, condition) \
143 ({ \
144 int __retval; \
145 for (;;) { \
146 __retval = wait_event_interruptible(wq, \
147 (condition) || freezing(current)); \
148 if (__retval || (condition)) \
149 break; \
150 try_to_freeze(); \
152 __retval; \
155 #define wait_event_freezable_timeout(wq, condition, timeout) \
156 ({ \
157 long __retval = timeout; \
158 for (;;) { \
159 __retval = wait_event_interruptible_timeout(wq, \
160 (condition) || freezing(current), \
161 __retval); \
162 if (__retval <= 0 || (condition)) \
163 break; \
164 try_to_freeze(); \
166 __retval; \
169 #else /* !CONFIG_FREEZER */
170 static inline bool frozen(struct task_struct *p) { return false; }
171 static inline bool freezing(struct task_struct *p) { return false; }
172 static inline void __thaw_task(struct task_struct *t) {}
174 static inline bool __refrigerator(bool check_kthr_stop) { return false; }
175 static inline int freeze_processes(void) { return -ENOSYS; }
176 static inline int freeze_kernel_threads(void) { return -ENOSYS; }
177 static inline void thaw_processes(void) {}
178 static inline void thaw_kernel_threads(void) {}
180 static inline bool try_to_freeze(void) { return false; }
182 static inline void freezer_do_not_count(void) {}
183 static inline void freezer_count(void) {}
184 static inline int freezer_should_skip(struct task_struct *p) { return 0; }
185 static inline void set_freezable(void) {}
187 #define freezable_schedule() schedule()
189 #define freezable_schedule_timeout_killable(timeout) \
190 schedule_timeout_killable(timeout)
192 #define wait_event_freezable(wq, condition) \
193 wait_event_interruptible(wq, condition)
195 #define wait_event_freezable_timeout(wq, condition, timeout) \
196 wait_event_interruptible_timeout(wq, condition, timeout)
198 #define wait_event_freezekillable(wq, condition) \
199 wait_event_killable(wq, condition)
201 #endif /* !CONFIG_FREEZER */
203 #endif /* FREEZER_H_INCLUDED */