2 * Generic entry point for the idle threads
4 #include <linux/sched.h>
6 #include <linux/cpuidle.h>
7 #include <linux/tick.h>
9 #include <linux/stackprotector.h>
10 #include <linux/suspend.h>
14 #include <trace/events/power.h>
18 static int __read_mostly cpu_idle_force_poll
;
20 void cpu_idle_poll_ctrl(bool enable
)
23 cpu_idle_force_poll
++;
25 cpu_idle_force_poll
--;
26 WARN_ON_ONCE(cpu_idle_force_poll
< 0);
30 #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
31 static int __init
cpu_idle_poll_setup(char *__unused
)
33 cpu_idle_force_poll
= 1;
36 __setup("nohlt", cpu_idle_poll_setup
);
38 static int __init
cpu_idle_nopoll_setup(char *__unused
)
40 cpu_idle_force_poll
= 0;
43 __setup("hlt", cpu_idle_nopoll_setup
);
46 static inline int cpu_idle_poll(void)
49 trace_cpu_idle_rcuidle(0, smp_processor_id());
51 while (!tif_need_resched() &&
52 (cpu_idle_force_poll
|| tick_check_broadcast_expired()))
54 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT
, smp_processor_id());
59 /* Weak implementations for optional arch specific functions */
60 void __weak
arch_cpu_idle_prepare(void) { }
61 void __weak
arch_cpu_idle_enter(void) { }
62 void __weak
arch_cpu_idle_exit(void) { }
63 void __weak
arch_cpu_idle_dead(void) { }
64 void __weak
arch_cpu_idle(void)
66 cpu_idle_force_poll
= 1;
71 * cpuidle_idle_call - the main idle function
73 * NOTE: no locks or semaphores should be used here
75 * On archs that support TIF_POLLING_NRFLAG, is called with polling
76 * set, and it returns with polling set. If it ever stops polling, it
77 * must clear the polling bit.
79 static void cpuidle_idle_call(void)
81 struct cpuidle_device
*dev
= __this_cpu_read(cpuidle_devices
);
82 struct cpuidle_driver
*drv
= cpuidle_get_cpu_driver(dev
);
83 int next_state
, entered_state
;
87 * Check if the idle task must be rescheduled. If it is the
88 * case, exit the function after re-enabling the local irq.
96 * During the idle period, stop measuring the disabled irqs
97 * critical sections latencies
99 stop_critical_timings();
102 * Tell the RCU framework we are entering an idle section,
103 * so no more rcu read side critical sections and one more
104 * step to the grace period
108 if (cpuidle_not_available(drv
, dev
))
112 * Suspend-to-idle ("freeze") is a system state in which all user space
113 * has been frozen, all I/O devices have been suspended and the only
114 * activity happens here and in iterrupts (if any). In that case bypass
115 * the cpuidle governor and go stratight for the deepest idle state
116 * available. Possibly also suspend the local tick and the entire
117 * timekeeping to prevent timer interrupts from kicking us out of idle
118 * until a proper wakeup interrupt happens.
120 if (idle_should_freeze()) {
121 entered_state
= cpuidle_enter_freeze(drv
, dev
);
122 if (entered_state
>= 0) {
128 next_state
= cpuidle_find_deepest_state(drv
, dev
);
132 * Ask the cpuidle framework to choose a convenient idle state.
134 next_state
= cpuidle_select(drv
, dev
);
136 /* Fall back to the default arch idle method on errors. */
141 * The idle task must be scheduled, it is pointless to
142 * go to idle, just update no idle residency and get
143 * out of this function
145 if (current_clr_polling_and_test()) {
146 dev
->last_residency
= 0;
147 entered_state
= next_state
;
152 /* Take note of the planned idle state. */
153 idle_set_state(this_rq(), &drv
->states
[next_state
]);
156 * Enter the idle state previously returned by the governor decision.
157 * This function will block until an interrupt occurs and will take
158 * care of re-enabling the local interrupts
160 entered_state
= cpuidle_enter(drv
, dev
, next_state
);
162 /* The cpu is no longer idle or about to enter idle. */
163 idle_set_state(this_rq(), NULL
);
165 if (entered_state
== -EBUSY
)
169 * Give the governor an opportunity to reflect on the outcome
172 cpuidle_reflect(dev
, entered_state
);
175 __current_set_polling();
178 * It is up to the idle functions to reenable local interrupts
180 if (WARN_ON_ONCE(irqs_disabled()))
184 start_critical_timings();
189 * We can't use the cpuidle framework, let's use the default
192 if (current_clr_polling_and_test())
200 DEFINE_PER_CPU(bool, cpu_dead_idle
);
203 * Generic idle loop implementation
205 * Called with polling cleared.
207 static void cpu_idle_loop(void)
211 * If the arch has a polling bit, we maintain an invariant:
213 * Our polling bit is clear if we're not scheduled (i.e. if
214 * rq->curr != rq->idle). This means that, if rq->idle has
215 * the polling bit set, then setting need_resched is
216 * guaranteed to cause the cpu to reschedule.
219 __current_set_polling();
220 tick_nohz_idle_enter();
222 while (!need_resched()) {
226 if (cpu_is_offline(smp_processor_id())) {
227 rcu_cpu_notify(NULL
, CPU_DYING_IDLE
,
228 (void *)(long)smp_processor_id());
229 smp_mb(); /* all activity before dead. */
230 this_cpu_write(cpu_dead_idle
, true);
231 arch_cpu_idle_dead();
235 arch_cpu_idle_enter();
238 * In poll mode we reenable interrupts and spin.
240 * Also if we detected in the wakeup from idle
241 * path that the tick broadcast device expired
242 * for us, we don't want to go deep idle as we
243 * know that the IPI is going to arrive right
246 if (cpu_idle_force_poll
|| tick_check_broadcast_expired())
251 arch_cpu_idle_exit();
255 * Since we fell out of the loop above, we know
256 * TIF_NEED_RESCHED must be set, propagate it into
257 * PREEMPT_NEED_RESCHED.
259 * This is required because for polling idle loops we will
260 * not have had an IPI to fold the state for us.
262 preempt_set_need_resched();
263 tick_nohz_idle_exit();
264 __current_clr_polling();
267 * We promise to call sched_ttwu_pending and reschedule
268 * if need_resched is set while polling is set. That
269 * means that clearing polling needs to be visible
270 * before doing these things.
272 smp_mb__after_atomic();
274 sched_ttwu_pending();
275 schedule_preempt_disabled();
279 void cpu_startup_entry(enum cpuhp_state state
)
282 * This #ifdef needs to die, but it's too late in the cycle to
283 * make this generic (arm and sh have never invoked the canary
284 * init for the non boot cpus!). Will be fixed in 3.11
288 * If we're the non-boot CPU, nothing set the stack canary up
289 * for us. The boot CPU already has it initialized but no harm
290 * in doing it again. This is a good place for updating it, as
291 * we wont ever return from this function (so the invalid
292 * canaries already on the stack wont ever trigger).
294 boot_init_stack_canary();
296 arch_cpu_idle_prepare();