1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifdef CONFIG_SCHEDSTATS
4
5 extern struct static_key_false sched_schedstats;
6
7 /*
8 * Expects runqueue lock to be held for atomicity of update
9 */
10 static inline void
rq_sched_info_arrive(struct rq * rq,unsigned long long delta)11 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
12 {
13 if (rq) {
14 rq->rq_sched_info.run_delay += delta;
15 rq->rq_sched_info.pcount++;
16 }
17 }
18
19 /*
20 * Expects runqueue lock to be held for atomicity of update
21 */
22 static inline void
rq_sched_info_depart(struct rq * rq,unsigned long long delta)23 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
24 {
25 if (rq)
26 rq->rq_cpu_time += delta;
27 }
28
29 static inline void
rq_sched_info_dequeue(struct rq * rq,unsigned long long delta)30 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta)
31 {
32 if (rq)
33 rq->rq_sched_info.run_delay += delta;
34 }
35 #define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
36 #define __schedstat_inc(var) do { var++; } while (0)
37 #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
38 #define __schedstat_add(var, amt) do { var += (amt); } while (0)
39 #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
40 #define __schedstat_set(var, val) do { var = (val); } while (0)
41 #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
42 #define schedstat_val(var) (var)
43 #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
44
45 void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
46 struct sched_statistics *stats);
47
48 void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
49 struct sched_statistics *stats);
50 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
51 struct sched_statistics *stats);
52
53 static inline void
check_schedstat_required(void)54 check_schedstat_required(void)
55 {
56 if (schedstat_enabled())
57 return;
58
59 /* Force schedstat enabled if a dependent tracepoint is active */
60 if (trace_sched_stat_wait_enabled() ||
61 trace_sched_stat_sleep_enabled() ||
62 trace_sched_stat_iowait_enabled() ||
63 trace_sched_stat_blocked_enabled() ||
64 trace_sched_stat_runtime_enabled())
65 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, stat_blocked and stat_runtime require the kernel parameter schedstats=enable or kernel.sched_schedstats=1\n");
66 }
67
68 #else /* !CONFIG_SCHEDSTATS: */
69
rq_sched_info_arrive(struct rq * rq,unsigned long long delta)70 static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { }
rq_sched_info_dequeue(struct rq * rq,unsigned long long delta)71 static inline void rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { }
rq_sched_info_depart(struct rq * rq,unsigned long long delta)72 static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { }
73 # define schedstat_enabled() 0
74 # define __schedstat_inc(var) do { } while (0)
75 # define schedstat_inc(var) do { } while (0)
76 # define __schedstat_add(var, amt) do { } while (0)
77 # define schedstat_add(var, amt) do { } while (0)
78 # define __schedstat_set(var, val) do { } while (0)
79 # define schedstat_set(var, val) do { } while (0)
80 # define schedstat_val(var) 0
81 # define schedstat_val_or_zero(var) 0
82
83 # define __update_stats_wait_start(rq, p, stats) do { } while (0)
84 # define __update_stats_wait_end(rq, p, stats) do { } while (0)
85 # define __update_stats_enqueue_sleeper(rq, p, stats) do { } while (0)
86 # define check_schedstat_required() do { } while (0)
87
88 #endif /* CONFIG_SCHEDSTATS */
89
90 #ifdef CONFIG_FAIR_GROUP_SCHED
91 struct sched_entity_stats {
92 struct sched_entity se;
93 struct sched_statistics stats;
94 } __no_randomize_layout;
95 #endif
96
97 static inline struct sched_statistics *
__schedstats_from_se(struct sched_entity * se)98 __schedstats_from_se(struct sched_entity *se)
99 {
100 #ifdef CONFIG_FAIR_GROUP_SCHED
101 if (!entity_is_task(se))
102 return &container_of(se, struct sched_entity_stats, se)->stats;
103 #endif
104 return &task_of(se)->stats;
105 }
106
107 #ifdef CONFIG_PSI
108 /*
109 * PSI tracks state that persists across sleeps, such as iowaits and
110 * memory stalls. As a result, it has to distinguish between sleeps,
111 * where a task's runnable state changes, and requeues, where a task
112 * and its state are being moved between CPUs and runqueues.
113 */
psi_enqueue(struct task_struct * p,bool wakeup)114 static inline void psi_enqueue(struct task_struct *p, bool wakeup)
115 {
116 int clear = 0, set = TSK_RUNNING;
117
118 if (static_branch_likely(&psi_disabled))
119 return;
120
121 if (!wakeup || p->sched_psi_wake_requeue) {
122 if (p->in_memstall)
123 set |= TSK_MEMSTALL;
124 if (p->sched_psi_wake_requeue)
125 p->sched_psi_wake_requeue = 0;
126 } else {
127 if (p->in_iowait)
128 clear |= TSK_IOWAIT;
129 }
130
131 psi_task_change(p, clear, set);
132 }
133
psi_dequeue(struct task_struct * p,bool sleep)134 static inline void psi_dequeue(struct task_struct *p, bool sleep)
135 {
136 int clear = TSK_RUNNING;
137
138 if (static_branch_likely(&psi_disabled))
139 return;
140
141 /*
142 * A voluntary sleep is a dequeue followed by a task switch. To
143 * avoid walking all ancestors twice, psi_task_switch() handles
144 * TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU.
145 * Do nothing here.
146 */
147 if (sleep)
148 return;
149
150 if (p->in_memstall)
151 clear |= TSK_MEMSTALL;
152
153 psi_task_change(p, clear, 0);
154 }
155
psi_ttwu_dequeue(struct task_struct * p)156 static inline void psi_ttwu_dequeue(struct task_struct *p)
157 {
158 if (static_branch_likely(&psi_disabled))
159 return;
160 /*
161 * Is the task being migrated during a wakeup? Make sure to
162 * deregister its sleep-persistent psi states from the old
163 * queue, and let psi_enqueue() know it has to requeue.
164 */
165 if (unlikely(p->in_iowait || p->in_memstall)) {
166 struct rq_flags rf;
167 struct rq *rq;
168 int clear = 0;
169
170 if (p->in_iowait)
171 clear |= TSK_IOWAIT;
172 if (p->in_memstall)
173 clear |= TSK_MEMSTALL;
174
175 rq = __task_rq_lock(p, &rf);
176 psi_task_change(p, clear, 0);
177 p->sched_psi_wake_requeue = 1;
178 __task_rq_unlock(rq, &rf);
179 }
180 }
181
psi_sched_switch(struct task_struct * prev,struct task_struct * next,bool sleep)182 static inline void psi_sched_switch(struct task_struct *prev,
183 struct task_struct *next,
184 bool sleep)
185 {
186 if (static_branch_likely(&psi_disabled))
187 return;
188
189 psi_task_switch(prev, next, sleep);
190 }
191
192 #else /* CONFIG_PSI */
psi_enqueue(struct task_struct * p,bool wakeup)193 static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
psi_dequeue(struct task_struct * p,bool sleep)194 static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
psi_ttwu_dequeue(struct task_struct * p)195 static inline void psi_ttwu_dequeue(struct task_struct *p) {}
psi_sched_switch(struct task_struct * prev,struct task_struct * next,bool sleep)196 static inline void psi_sched_switch(struct task_struct *prev,
197 struct task_struct *next,
198 bool sleep) {}
199 #endif /* CONFIG_PSI */
200
201 #ifdef CONFIG_SCHED_INFO
202 /*
203 * We are interested in knowing how long it was from the *first* time a
204 * task was queued to the time that it finally hit a CPU, we call this routine
205 * from dequeue_task() to account for possible rq->clock skew across CPUs. The
206 * delta taken on each CPU would annul the skew.
207 */
sched_info_dequeue(struct rq * rq,struct task_struct * t)208 static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t)
209 {
210 unsigned long long delta = 0;
211
212 if (!t->sched_info.last_queued)
213 return;
214
215 delta = rq_clock(rq) - t->sched_info.last_queued;
216 t->sched_info.last_queued = 0;
217 t->sched_info.run_delay += delta;
218
219 rq_sched_info_dequeue(rq, delta);
220 }
221
222 /*
223 * Called when a task finally hits the CPU. We can now calculate how
224 * long it was waiting to run. We also note when it began so that we
225 * can keep stats on how long its timeslice is.
226 */
sched_info_arrive(struct rq * rq,struct task_struct * t)227 static void sched_info_arrive(struct rq *rq, struct task_struct *t)
228 {
229 unsigned long long now, delta = 0;
230
231 if (!t->sched_info.last_queued)
232 return;
233
234 now = rq_clock(rq);
235 delta = now - t->sched_info.last_queued;
236 t->sched_info.last_queued = 0;
237 t->sched_info.run_delay += delta;
238 t->sched_info.last_arrival = now;
239 t->sched_info.pcount++;
240
241 rq_sched_info_arrive(rq, delta);
242 }
243
244 /*
245 * This function is only called from enqueue_task(), but also only updates
246 * the timestamp if it is already not set. It's assumed that
247 * sched_info_dequeue() will clear that stamp when appropriate.
248 */
sched_info_enqueue(struct rq * rq,struct task_struct * t)249 static inline void sched_info_enqueue(struct rq *rq, struct task_struct *t)
250 {
251 if (!t->sched_info.last_queued)
252 t->sched_info.last_queued = rq_clock(rq);
253 }
254
255 /*
256 * Called when a process ceases being the active-running process involuntarily
257 * due, typically, to expiring its time slice (this may also be called when
258 * switching to the idle task). Now we can calculate how long we ran.
259 * Also, if the process is still in the TASK_RUNNING state, call
260 * sched_info_enqueue() to mark that it has now again started waiting on
261 * the runqueue.
262 */
sched_info_depart(struct rq * rq,struct task_struct * t)263 static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
264 {
265 unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
266
267 rq_sched_info_depart(rq, delta);
268
269 if (task_is_running(t))
270 sched_info_enqueue(rq, t);
271 }
272
273 /*
274 * Called when tasks are switched involuntarily due, typically, to expiring
275 * their time slice. (This may also be called when switching to or from
276 * the idle task.) We are only called when prev != next.
277 */
278 static inline void
sched_info_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)279 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
280 {
281 /*
282 * prev now departs the CPU. It's not interesting to record
283 * stats about how efficient we were at scheduling the idle
284 * process, however.
285 */
286 if (prev != rq->idle)
287 sched_info_depart(rq, prev);
288
289 if (next != rq->idle)
290 sched_info_arrive(rq, next);
291 }
292
293 #else /* !CONFIG_SCHED_INFO: */
294 # define sched_info_enqueue(rq, t) do { } while (0)
295 # define sched_info_dequeue(rq, t) do { } while (0)
296 # define sched_info_switch(rq, t, next) do { } while (0)
297 #endif /* CONFIG_SCHED_INFO */
298