2 * cpuidle.h - a generic framework for CPU idle power management
4 * (C) 2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
8 * This code is licenced under the GPL.
11 #ifndef _LINUX_CPUIDLE_H
12 #define _LINUX_CPUIDLE_H
14 #include <linux/percpu.h>
15 #include <linux/list.h>
16 #include <linux/hrtimer.h>
17 #include <linux/context_tracking.h>
19 #define CPUIDLE_STATE_MAX 10
20 #define CPUIDLE_NAME_LEN 16
21 #define CPUIDLE_DESC_LEN 32
25 struct cpuidle_device
;
26 struct cpuidle_driver
;
29 /****************************
30 * CPUIDLE DEVICE INTERFACE *
31 ****************************/
33 #define CPUIDLE_STATE_DISABLED_BY_USER BIT(0)
34 #define CPUIDLE_STATE_DISABLED_BY_DRIVER BIT(1)
36 struct cpuidle_state_usage
{
37 unsigned long long disable
;
38 unsigned long long usage
;
40 unsigned long long above
; /* Number of times it's been too deep */
41 unsigned long long below
; /* Number of times it's been too shallow */
42 unsigned long long rejected
; /* Number of times idle entry was rejected */
44 unsigned long long s2idle_usage
;
45 unsigned long long s2idle_time
; /* in US */
49 struct cpuidle_state
{
50 char name
[CPUIDLE_NAME_LEN
];
51 char desc
[CPUIDLE_DESC_LEN
];
54 s64 target_residency_ns
;
56 unsigned int exit_latency
; /* in US */
57 int power_usage
; /* in mW */
58 unsigned int target_residency
; /* in US */
60 int (*enter
) (struct cpuidle_device
*dev
,
61 struct cpuidle_driver
*drv
,
64 void (*enter_dead
) (struct cpuidle_device
*dev
, int index
);
67 * CPUs execute ->enter_s2idle with the local tick or entire timekeeping
68 * suspended, so it must not re-enable interrupts at any point (even
69 * temporarily) or attempt to change states of clock event devices.
71 * This callback may point to the same function as ->enter if all of
72 * the above requirements are met by it.
74 int (*enter_s2idle
)(struct cpuidle_device
*dev
,
75 struct cpuidle_driver
*drv
,
79 /* Idle State Flags */
80 #define CPUIDLE_FLAG_NONE (0x00)
81 #define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
82 #define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
83 #define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
84 #define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
85 #define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
86 #define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */
87 #define CPUIDLE_FLAG_RCU_IDLE BIT(6) /* idle-state takes care of RCU */
89 struct cpuidle_device_kobj
;
90 struct cpuidle_state_kobj
;
91 struct cpuidle_driver_kobj
;
93 struct cpuidle_device
{
94 unsigned int registered
:1;
95 unsigned int enabled
:1;
96 unsigned int poll_time_limit
:1;
101 u64 last_residency_ns
;
103 u64 forced_idle_latency_limit_ns
;
104 struct cpuidle_state_usage states_usage
[CPUIDLE_STATE_MAX
];
105 struct cpuidle_state_kobj
*kobjs
[CPUIDLE_STATE_MAX
];
106 struct cpuidle_driver_kobj
*kobj_driver
;
107 struct cpuidle_device_kobj
*kobj_dev
;
108 struct list_head device_list
;
110 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
111 cpumask_t coupled_cpus
;
112 struct cpuidle_coupled
*coupled
;
116 DECLARE_PER_CPU(struct cpuidle_device
*, cpuidle_devices
);
117 DECLARE_PER_CPU(struct cpuidle_device
, cpuidle_dev
);
119 static __always_inline
void ct_cpuidle_enter(void)
121 lockdep_assert_irqs_disabled();
123 * Idle is allowed to (temporary) enable IRQs. It
124 * will return with IRQs disabled.
126 * Trace IRQs enable here, then switch off RCU, and have
127 * arch_cpu_idle() use raw_local_irq_enable(). Note that
128 * ct_idle_enter() relies on lockdep IRQ state, so switch that
129 * last -- this is very similar to the entry code.
131 trace_hardirqs_on_prepare();
132 lockdep_hardirqs_on_prepare();
133 instrumentation_end();
135 lockdep_hardirqs_on(_RET_IP_
);
138 static __always_inline
void ct_cpuidle_exit(void)
141 * Carefully undo the above.
143 lockdep_hardirqs_off(_RET_IP_
);
145 instrumentation_begin();
148 /****************************
149 * CPUIDLE DRIVER INTERFACE *
150 ****************************/
152 struct cpuidle_driver
{
154 struct module
*owner
;
156 /* used by the cpuidle framework to setup the broadcast timer */
157 unsigned int bctimer
:1;
158 /* states array must be ordered in decreasing power consumption */
159 struct cpuidle_state states
[CPUIDLE_STATE_MAX
];
161 int safe_state_index
;
163 /* the driver handles the cpus in cpumask */
164 struct cpumask
*cpumask
;
166 /* preferred governor to switch at register time */
167 const char *governor
;
170 #ifdef CONFIG_CPU_IDLE
171 extern void disable_cpuidle(void);
172 extern bool cpuidle_not_available(struct cpuidle_driver
*drv
,
173 struct cpuidle_device
*dev
);
175 extern int cpuidle_select(struct cpuidle_driver
*drv
,
176 struct cpuidle_device
*dev
,
178 extern int cpuidle_enter(struct cpuidle_driver
*drv
,
179 struct cpuidle_device
*dev
, int index
);
180 extern void cpuidle_reflect(struct cpuidle_device
*dev
, int index
);
181 extern u64
cpuidle_poll_time(struct cpuidle_driver
*drv
,
182 struct cpuidle_device
*dev
);
184 extern int cpuidle_register_driver(struct cpuidle_driver
*drv
);
185 extern struct cpuidle_driver
*cpuidle_get_driver(void);
186 extern void cpuidle_driver_state_disabled(struct cpuidle_driver
*drv
, int idx
,
188 extern void cpuidle_unregister_driver(struct cpuidle_driver
*drv
);
189 extern int cpuidle_register_device(struct cpuidle_device
*dev
);
190 extern void cpuidle_unregister_device(struct cpuidle_device
*dev
);
191 extern int cpuidle_register(struct cpuidle_driver
*drv
,
192 const struct cpumask
*const coupled_cpus
);
193 extern void cpuidle_unregister(struct cpuidle_driver
*drv
);
194 extern void cpuidle_pause_and_lock(void);
195 extern void cpuidle_resume_and_unlock(void);
196 extern void cpuidle_pause(void);
197 extern void cpuidle_resume(void);
198 extern int cpuidle_enable_device(struct cpuidle_device
*dev
);
199 extern void cpuidle_disable_device(struct cpuidle_device
*dev
);
200 extern int cpuidle_play_dead(void);
202 extern struct cpuidle_driver
*cpuidle_get_cpu_driver(struct cpuidle_device
*dev
);
203 static inline struct cpuidle_device
*cpuidle_get_device(void)
204 {return __this_cpu_read(cpuidle_devices
); }
206 static inline void disable_cpuidle(void) { }
207 static inline bool cpuidle_not_available(struct cpuidle_driver
*drv
,
208 struct cpuidle_device
*dev
)
210 static inline int cpuidle_select(struct cpuidle_driver
*drv
,
211 struct cpuidle_device
*dev
, bool *stop_tick
)
213 static inline int cpuidle_enter(struct cpuidle_driver
*drv
,
214 struct cpuidle_device
*dev
, int index
)
216 static inline void cpuidle_reflect(struct cpuidle_device
*dev
, int index
) { }
217 static inline u64
cpuidle_poll_time(struct cpuidle_driver
*drv
,
218 struct cpuidle_device
*dev
)
220 static inline int cpuidle_register_driver(struct cpuidle_driver
*drv
)
222 static inline struct cpuidle_driver
*cpuidle_get_driver(void) {return NULL
; }
223 static inline void cpuidle_driver_state_disabled(struct cpuidle_driver
*drv
,
224 int idx
, bool disable
) { }
225 static inline void cpuidle_unregister_driver(struct cpuidle_driver
*drv
) { }
226 static inline int cpuidle_register_device(struct cpuidle_device
*dev
)
228 static inline void cpuidle_unregister_device(struct cpuidle_device
*dev
) { }
229 static inline int cpuidle_register(struct cpuidle_driver
*drv
,
230 const struct cpumask
*const coupled_cpus
)
232 static inline void cpuidle_unregister(struct cpuidle_driver
*drv
) { }
233 static inline void cpuidle_pause_and_lock(void) { }
234 static inline void cpuidle_resume_and_unlock(void) { }
235 static inline void cpuidle_pause(void) { }
236 static inline void cpuidle_resume(void) { }
237 static inline int cpuidle_enable_device(struct cpuidle_device
*dev
)
239 static inline void cpuidle_disable_device(struct cpuidle_device
*dev
) { }
240 static inline int cpuidle_play_dead(void) {return -ENODEV
; }
241 static inline struct cpuidle_driver
*cpuidle_get_cpu_driver(
242 struct cpuidle_device
*dev
) {return NULL
; }
243 static inline struct cpuidle_device
*cpuidle_get_device(void) {return NULL
; }
246 #ifdef CONFIG_CPU_IDLE
247 extern int cpuidle_find_deepest_state(struct cpuidle_driver
*drv
,
248 struct cpuidle_device
*dev
,
249 u64 latency_limit_ns
);
250 extern int cpuidle_enter_s2idle(struct cpuidle_driver
*drv
,
251 struct cpuidle_device
*dev
);
252 extern void cpuidle_use_deepest_state(u64 latency_limit_ns
);
254 static inline int cpuidle_find_deepest_state(struct cpuidle_driver
*drv
,
255 struct cpuidle_device
*dev
,
256 u64 latency_limit_ns
)
258 static inline int cpuidle_enter_s2idle(struct cpuidle_driver
*drv
,
259 struct cpuidle_device
*dev
)
261 static inline void cpuidle_use_deepest_state(u64 latency_limit_ns
)
266 /* kernel/sched/idle.c */
267 extern void sched_idle_set_state(struct cpuidle_state
*idle_state
);
268 extern void default_idle_call(void);
270 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
271 void cpuidle_coupled_parallel_barrier(struct cpuidle_device
*dev
, atomic_t
*a
);
273 static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device
*dev
, atomic_t
*a
)
278 #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX)
279 void cpuidle_poll_state_init(struct cpuidle_driver
*drv
);
281 static inline void cpuidle_poll_state_init(struct cpuidle_driver
*drv
) {}
284 /******************************
285 * CPUIDLE GOVERNOR INTERFACE *
286 ******************************/
288 struct cpuidle_governor
{
289 char name
[CPUIDLE_NAME_LEN
];
290 struct list_head governor_list
;
293 int (*enable
) (struct cpuidle_driver
*drv
,
294 struct cpuidle_device
*dev
);
295 void (*disable
) (struct cpuidle_driver
*drv
,
296 struct cpuidle_device
*dev
);
298 int (*select
) (struct cpuidle_driver
*drv
,
299 struct cpuidle_device
*dev
,
301 void (*reflect
) (struct cpuidle_device
*dev
, int index
);
304 extern int cpuidle_register_governor(struct cpuidle_governor
*gov
);
305 extern s64
cpuidle_governor_latency_req(unsigned int cpu
);
307 #define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \
310 is_retention, is_rcu) \
320 __ret = cpu_pm_enter(); \
323 ct_cpuidle_enter(); \
324 __ret = low_level_idle_enter(state); \
334 #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
335 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0, 0)
337 #define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \
338 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1, 0)
340 #define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \
341 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 0)
343 #define CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(low_level_idle_enter, idx, state) \
344 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 1)
346 #define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \
347 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 0)
349 #define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(low_level_idle_enter, idx, state) \
350 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 1)
352 #endif /* _LINUX_CPUIDLE_H */