1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_IDLE_H
3 #define _LINUX_SCHED_IDLE_H
4 
5 #include <linux/sched.h>
6 
7 enum cpu_idle_type {
8 	CPU_IDLE,
9 	CPU_NOT_IDLE,
10 	CPU_NEWLY_IDLE,
11 	CPU_MAX_IDLE_TYPES
12 };
13 
14 #ifdef CONFIG_SMP
15 extern void wake_up_if_idle(int cpu);
16 #else
wake_up_if_idle(int cpu)17 static inline void wake_up_if_idle(int cpu) { }
18 #endif
19 
20 /*
21  * Idle thread specific functions to determine the need_resched
22  * polling state.
23  */
24 #ifdef TIF_POLLING_NRFLAG
25 
__current_set_polling(void)26 static inline void __current_set_polling(void)
27 {
28 	set_thread_flag(TIF_POLLING_NRFLAG);
29 }
30 
current_set_polling_and_test(void)31 static inline bool __must_check current_set_polling_and_test(void)
32 {
33 	__current_set_polling();
34 
35 	/*
36 	 * Polling state must be visible before we test NEED_RESCHED,
37 	 * paired by resched_curr()
38 	 */
39 	smp_mb__after_atomic();
40 
41 	return unlikely(tif_need_resched());
42 }
43 
__current_clr_polling(void)44 static inline void __current_clr_polling(void)
45 {
46 	clear_thread_flag(TIF_POLLING_NRFLAG);
47 }
48 
current_clr_polling_and_test(void)49 static inline bool __must_check current_clr_polling_and_test(void)
50 {
51 	__current_clr_polling();
52 
53 	/*
54 	 * Polling state must be visible before we test NEED_RESCHED,
55 	 * paired by resched_curr()
56 	 */
57 	smp_mb__after_atomic();
58 
59 	return unlikely(tif_need_resched());
60 }
61 
62 #else
__current_set_polling(void)63 static inline void __current_set_polling(void) { }
__current_clr_polling(void)64 static inline void __current_clr_polling(void) { }
65 
current_set_polling_and_test(void)66 static inline bool __must_check current_set_polling_and_test(void)
67 {
68 	return unlikely(tif_need_resched());
69 }
current_clr_polling_and_test(void)70 static inline bool __must_check current_clr_polling_and_test(void)
71 {
72 	return unlikely(tif_need_resched());
73 }
74 #endif
75 
current_clr_polling(void)76 static inline void current_clr_polling(void)
77 {
78 	__current_clr_polling();
79 
80 	/*
81 	 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
82 	 * Once the bit is cleared, we'll get IPIs with every new
83 	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
84 	 * fold.
85 	 */
86 	smp_mb(); /* paired with resched_curr() */
87 
88 	preempt_fold_need_resched();
89 }
90 
91 #endif /* _LINUX_SCHED_IDLE_H */
92