1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_IDLE_H
3 #define _LINUX_SCHED_IDLE_H
5 #include <linux/sched.h>
15 extern void wake_up_if_idle(int cpu
);
17 static inline void wake_up_if_idle(int cpu
) { }
21 * Idle thread specific functions to determine the need_resched
24 #ifdef TIF_POLLING_NRFLAG
26 #ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
28 static __always_inline
void __current_set_polling(void)
30 arch_set_bit(TIF_POLLING_NRFLAG
,
31 (unsigned long *)(¤t_thread_info()->flags
));
34 static __always_inline
void __current_clr_polling(void)
36 arch_clear_bit(TIF_POLLING_NRFLAG
,
37 (unsigned long *)(¤t_thread_info()->flags
));
42 static __always_inline
void __current_set_polling(void)
44 set_bit(TIF_POLLING_NRFLAG
,
45 (unsigned long *)(¤t_thread_info()->flags
));
48 static __always_inline
void __current_clr_polling(void)
50 clear_bit(TIF_POLLING_NRFLAG
,
51 (unsigned long *)(¤t_thread_info()->flags
));
54 #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H */
56 static __always_inline
bool __must_check
current_set_polling_and_test(void)
58 __current_set_polling();
61 * Polling state must be visible before we test NEED_RESCHED,
62 * paired by resched_curr()
64 smp_mb__after_atomic();
66 return unlikely(tif_need_resched());
69 static __always_inline
bool __must_check
current_clr_polling_and_test(void)
71 __current_clr_polling();
74 * Polling state must be visible before we test NEED_RESCHED,
75 * paired by resched_curr()
77 smp_mb__after_atomic();
79 return unlikely(tif_need_resched());
83 static inline void __current_set_polling(void) { }
84 static inline void __current_clr_polling(void) { }
86 static inline bool __must_check
current_set_polling_and_test(void)
88 return unlikely(tif_need_resched());
90 static inline bool __must_check
current_clr_polling_and_test(void)
92 return unlikely(tif_need_resched());
96 static __always_inline
void current_clr_polling(void)
98 __current_clr_polling();
101 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
102 * Once the bit is cleared, we'll get IPIs with every new
103 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
106 smp_mb(); /* paired with resched_curr() */
108 preempt_fold_need_resched();
111 #endif /* _LINUX_SCHED_IDLE_H */