1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/debug.c
4 *
5 * Print the CFS rbtree and other debugging details
6 *
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8 */
9 #include "sched.h"
10
11 /*
12 * This allows printing both to /proc/sched_debug and
13 * to the console
14 */
15 #define SEQ_printf(m, x...) \
16 do { \
17 if (m) \
18 seq_printf(m, x); \
19 else \
20 pr_cont(x); \
21 } while (0)
22
23 /*
24 * Ease the printing of nsec fields:
25 */
nsec_high(unsigned long long nsec)26 static long long nsec_high(unsigned long long nsec)
27 {
28 if ((long long)nsec < 0) {
29 nsec = -nsec;
30 do_div(nsec, 1000000);
31 return -nsec;
32 }
33 do_div(nsec, 1000000);
34
35 return nsec;
36 }
37
nsec_low(unsigned long long nsec)38 static unsigned long nsec_low(unsigned long long nsec)
39 {
40 if ((long long)nsec < 0)
41 nsec = -nsec;
42
43 return do_div(nsec, 1000000);
44 }
45
46 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
47
48 #define SCHED_FEAT(name, enabled) \
49 #name ,
50
51 static const char * const sched_feat_names[] = {
52 #include "features.h"
53 };
54
55 #undef SCHED_FEAT
56
sched_feat_show(struct seq_file * m,void * v)57 static int sched_feat_show(struct seq_file *m, void *v)
58 {
59 int i;
60
61 for (i = 0; i < __SCHED_FEAT_NR; i++) {
62 if (!(sysctl_sched_features & (1UL << i)))
63 seq_puts(m, "NO_");
64 seq_printf(m, "%s ", sched_feat_names[i]);
65 }
66 seq_puts(m, "\n");
67
68 return 0;
69 }
70
71 #ifdef CONFIG_JUMP_LABEL
72
73 #define jump_label_key__true STATIC_KEY_INIT_TRUE
74 #define jump_label_key__false STATIC_KEY_INIT_FALSE
75
76 #define SCHED_FEAT(name, enabled) \
77 jump_label_key__##enabled ,
78
79 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
80 #include "features.h"
81 };
82
83 #undef SCHED_FEAT
84
sched_feat_disable(int i)85 static void sched_feat_disable(int i)
86 {
87 static_key_disable_cpuslocked(&sched_feat_keys[i]);
88 }
89
sched_feat_enable(int i)90 static void sched_feat_enable(int i)
91 {
92 static_key_enable_cpuslocked(&sched_feat_keys[i]);
93 }
94 #else
sched_feat_disable(int i)95 static void sched_feat_disable(int i) { };
sched_feat_enable(int i)96 static void sched_feat_enable(int i) { };
97 #endif /* CONFIG_JUMP_LABEL */
98
sched_feat_set(char * cmp)99 static int sched_feat_set(char *cmp)
100 {
101 int i;
102 int neg = 0;
103
104 if (strncmp(cmp, "NO_", 3) == 0) {
105 neg = 1;
106 cmp += 3;
107 }
108
109 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
110 if (i < 0)
111 return i;
112
113 if (neg) {
114 sysctl_sched_features &= ~(1UL << i);
115 sched_feat_disable(i);
116 } else {
117 sysctl_sched_features |= (1UL << i);
118 sched_feat_enable(i);
119 }
120
121 return 0;
122 }
123
124 static ssize_t
sched_feat_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)125 sched_feat_write(struct file *filp, const char __user *ubuf,
126 size_t cnt, loff_t *ppos)
127 {
128 char buf[64];
129 char *cmp;
130 int ret;
131 struct inode *inode;
132
133 if (cnt > 63)
134 cnt = 63;
135
136 if (copy_from_user(&buf, ubuf, cnt))
137 return -EFAULT;
138
139 buf[cnt] = 0;
140 cmp = strstrip(buf);
141
142 /* Ensure the static_key remains in a consistent state */
143 inode = file_inode(filp);
144 cpus_read_lock();
145 inode_lock(inode);
146 ret = sched_feat_set(cmp);
147 inode_unlock(inode);
148 cpus_read_unlock();
149 if (ret < 0)
150 return ret;
151
152 *ppos += cnt;
153
154 return cnt;
155 }
156
sched_feat_open(struct inode * inode,struct file * filp)157 static int sched_feat_open(struct inode *inode, struct file *filp)
158 {
159 return single_open(filp, sched_feat_show, NULL);
160 }
161
162 static const struct file_operations sched_feat_fops = {
163 .open = sched_feat_open,
164 .write = sched_feat_write,
165 .read = seq_read,
166 .llseek = seq_lseek,
167 .release = single_release,
168 };
169
170 #ifdef CONFIG_SMP
171
sched_scaling_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)172 static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
173 size_t cnt, loff_t *ppos)
174 {
175 char buf[16];
176 unsigned int scaling;
177
178 if (cnt > 15)
179 cnt = 15;
180
181 if (copy_from_user(&buf, ubuf, cnt))
182 return -EFAULT;
183 buf[cnt] = '\0';
184
185 if (kstrtouint(buf, 10, &scaling))
186 return -EINVAL;
187
188 if (scaling >= SCHED_TUNABLESCALING_END)
189 return -EINVAL;
190
191 sysctl_sched_tunable_scaling = scaling;
192 if (sched_update_scaling())
193 return -EINVAL;
194
195 *ppos += cnt;
196 return cnt;
197 }
198
sched_scaling_show(struct seq_file * m,void * v)199 static int sched_scaling_show(struct seq_file *m, void *v)
200 {
201 seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
202 return 0;
203 }
204
sched_scaling_open(struct inode * inode,struct file * filp)205 static int sched_scaling_open(struct inode *inode, struct file *filp)
206 {
207 return single_open(filp, sched_scaling_show, NULL);
208 }
209
210 static const struct file_operations sched_scaling_fops = {
211 .open = sched_scaling_open,
212 .write = sched_scaling_write,
213 .read = seq_read,
214 .llseek = seq_lseek,
215 .release = single_release,
216 };
217
218 #endif /* SMP */
219
220 #ifdef CONFIG_PREEMPT_DYNAMIC
221
sched_dynamic_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)222 static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
223 size_t cnt, loff_t *ppos)
224 {
225 char buf[16];
226 int mode;
227
228 if (cnt > 15)
229 cnt = 15;
230
231 if (copy_from_user(&buf, ubuf, cnt))
232 return -EFAULT;
233
234 buf[cnt] = 0;
235 mode = sched_dynamic_mode(strstrip(buf));
236 if (mode < 0)
237 return mode;
238
239 sched_dynamic_update(mode);
240
241 *ppos += cnt;
242
243 return cnt;
244 }
245
sched_dynamic_show(struct seq_file * m,void * v)246 static int sched_dynamic_show(struct seq_file *m, void *v)
247 {
248 static const char * preempt_modes[] = {
249 "none", "voluntary", "full"
250 };
251 int i;
252
253 for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
254 if (preempt_dynamic_mode == i)
255 seq_puts(m, "(");
256 seq_puts(m, preempt_modes[i]);
257 if (preempt_dynamic_mode == i)
258 seq_puts(m, ")");
259
260 seq_puts(m, " ");
261 }
262
263 seq_puts(m, "\n");
264 return 0;
265 }
266
sched_dynamic_open(struct inode * inode,struct file * filp)267 static int sched_dynamic_open(struct inode *inode, struct file *filp)
268 {
269 return single_open(filp, sched_dynamic_show, NULL);
270 }
271
272 static const struct file_operations sched_dynamic_fops = {
273 .open = sched_dynamic_open,
274 .write = sched_dynamic_write,
275 .read = seq_read,
276 .llseek = seq_lseek,
277 .release = single_release,
278 };
279
280 #endif /* CONFIG_PREEMPT_DYNAMIC */
281
282 __read_mostly bool sched_debug_verbose;
283
284 static const struct seq_operations sched_debug_sops;
285
sched_debug_open(struct inode * inode,struct file * filp)286 static int sched_debug_open(struct inode *inode, struct file *filp)
287 {
288 return seq_open(filp, &sched_debug_sops);
289 }
290
291 static const struct file_operations sched_debug_fops = {
292 .open = sched_debug_open,
293 .read = seq_read,
294 .llseek = seq_lseek,
295 .release = seq_release,
296 };
297
298 static struct dentry *debugfs_sched;
299
sched_init_debug(void)300 static __init int sched_init_debug(void)
301 {
302 struct dentry __maybe_unused *numa;
303
304 debugfs_sched = debugfs_create_dir("sched", NULL);
305
306 debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
307 debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
308 #ifdef CONFIG_PREEMPT_DYNAMIC
309 debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
310 #endif
311
312 debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
313 debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
314 debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
315 debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
316
317 debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
318 debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
319
320 #ifdef CONFIG_SMP
321 debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
322 debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
323 debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
324
325 mutex_lock(&sched_domains_mutex);
326 update_sched_domain_debugfs();
327 mutex_unlock(&sched_domains_mutex);
328 #endif
329
330 #ifdef CONFIG_NUMA_BALANCING
331 numa = debugfs_create_dir("numa_balancing", debugfs_sched);
332
333 debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
334 debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
335 debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
336 debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
337 #endif
338
339 debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
340
341 return 0;
342 }
343 late_initcall(sched_init_debug);
344
345 #ifdef CONFIG_SMP
346
347 static cpumask_var_t sd_sysctl_cpus;
348 static struct dentry *sd_dentry;
349
sd_flags_show(struct seq_file * m,void * v)350 static int sd_flags_show(struct seq_file *m, void *v)
351 {
352 unsigned long flags = *(unsigned int *)m->private;
353 int idx;
354
355 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
356 seq_puts(m, sd_flag_debug[idx].name);
357 seq_puts(m, " ");
358 }
359 seq_puts(m, "\n");
360
361 return 0;
362 }
363
sd_flags_open(struct inode * inode,struct file * file)364 static int sd_flags_open(struct inode *inode, struct file *file)
365 {
366 return single_open(file, sd_flags_show, inode->i_private);
367 }
368
369 static const struct file_operations sd_flags_fops = {
370 .open = sd_flags_open,
371 .read = seq_read,
372 .llseek = seq_lseek,
373 .release = single_release,
374 };
375
register_sd(struct sched_domain * sd,struct dentry * parent)376 static void register_sd(struct sched_domain *sd, struct dentry *parent)
377 {
378 #define SDM(type, mode, member) \
379 debugfs_create_##type(#member, mode, parent, &sd->member)
380
381 SDM(ulong, 0644, min_interval);
382 SDM(ulong, 0644, max_interval);
383 SDM(u64, 0644, max_newidle_lb_cost);
384 SDM(u32, 0644, busy_factor);
385 SDM(u32, 0644, imbalance_pct);
386 SDM(u32, 0644, cache_nice_tries);
387 SDM(str, 0444, name);
388
389 #undef SDM
390
391 debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
392 }
393
update_sched_domain_debugfs(void)394 void update_sched_domain_debugfs(void)
395 {
396 int cpu, i;
397
398 /*
399 * This can unfortunately be invoked before sched_debug_init() creates
400 * the debug directory. Don't touch sd_sysctl_cpus until then.
401 */
402 if (!debugfs_sched)
403 return;
404
405 if (!cpumask_available(sd_sysctl_cpus)) {
406 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
407 return;
408 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
409 }
410
411 if (!sd_dentry)
412 sd_dentry = debugfs_create_dir("domains", debugfs_sched);
413
414 for_each_cpu(cpu, sd_sysctl_cpus) {
415 struct sched_domain *sd;
416 struct dentry *d_cpu;
417 char buf[32];
418
419 snprintf(buf, sizeof(buf), "cpu%d", cpu);
420 debugfs_remove(debugfs_lookup(buf, sd_dentry));
421 d_cpu = debugfs_create_dir(buf, sd_dentry);
422
423 i = 0;
424 for_each_domain(cpu, sd) {
425 struct dentry *d_sd;
426
427 snprintf(buf, sizeof(buf), "domain%d", i);
428 d_sd = debugfs_create_dir(buf, d_cpu);
429
430 register_sd(sd, d_sd);
431 i++;
432 }
433
434 __cpumask_clear_cpu(cpu, sd_sysctl_cpus);
435 }
436 }
437
dirty_sched_domain_sysctl(int cpu)438 void dirty_sched_domain_sysctl(int cpu)
439 {
440 if (cpumask_available(sd_sysctl_cpus))
441 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
442 }
443
444 #endif /* CONFIG_SMP */
445
446 #ifdef CONFIG_FAIR_GROUP_SCHED
print_cfs_group_stats(struct seq_file * m,int cpu,struct task_group * tg)447 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
448 {
449 struct sched_entity *se = tg->se[cpu];
450
451 #define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
452 #define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", \
453 #F, (long long)schedstat_val(stats->F))
454 #define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
455 #define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", \
456 #F, SPLIT_NS((long long)schedstat_val(stats->F)))
457
458 if (!se)
459 return;
460
461 PN(se->exec_start);
462 PN(se->vruntime);
463 PN(se->sum_exec_runtime);
464
465 if (schedstat_enabled()) {
466 struct sched_statistics *stats;
467 stats = __schedstats_from_se(se);
468
469 PN_SCHEDSTAT(wait_start);
470 PN_SCHEDSTAT(sleep_start);
471 PN_SCHEDSTAT(block_start);
472 PN_SCHEDSTAT(sleep_max);
473 PN_SCHEDSTAT(block_max);
474 PN_SCHEDSTAT(exec_max);
475 PN_SCHEDSTAT(slice_max);
476 PN_SCHEDSTAT(wait_max);
477 PN_SCHEDSTAT(wait_sum);
478 P_SCHEDSTAT(wait_count);
479 }
480
481 P(se->load.weight);
482 #ifdef CONFIG_SMP
483 P(se->avg.load_avg);
484 P(se->avg.util_avg);
485 P(se->avg.runnable_avg);
486 #endif
487
488 #undef PN_SCHEDSTAT
489 #undef PN
490 #undef P_SCHEDSTAT
491 #undef P
492 }
493 #endif
494
495 #ifdef CONFIG_CGROUP_SCHED
496 static DEFINE_SPINLOCK(sched_debug_lock);
497 static char group_path[PATH_MAX];
498
task_group_path(struct task_group * tg,char * path,int plen)499 static void task_group_path(struct task_group *tg, char *path, int plen)
500 {
501 if (autogroup_path(tg, path, plen))
502 return;
503
504 cgroup_path(tg->css.cgroup, path, plen);
505 }
506
507 /*
508 * Only 1 SEQ_printf_task_group_path() caller can use the full length
509 * group_path[] for cgroup path. Other simultaneous callers will have
510 * to use a shorter stack buffer. A "..." suffix is appended at the end
511 * of the stack buffer so that it will show up in case the output length
512 * matches the given buffer size to indicate possible path name truncation.
513 */
514 #define SEQ_printf_task_group_path(m, tg, fmt...) \
515 { \
516 if (spin_trylock(&sched_debug_lock)) { \
517 task_group_path(tg, group_path, sizeof(group_path)); \
518 SEQ_printf(m, fmt, group_path); \
519 spin_unlock(&sched_debug_lock); \
520 } else { \
521 char buf[128]; \
522 char *bufend = buf + sizeof(buf) - 3; \
523 task_group_path(tg, buf, bufend - buf); \
524 strcpy(bufend - 1, "..."); \
525 SEQ_printf(m, fmt, buf); \
526 } \
527 }
528 #endif
529
530 static void
print_task(struct seq_file * m,struct rq * rq,struct task_struct * p)531 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
532 {
533 if (task_current(rq, p))
534 SEQ_printf(m, ">R");
535 else
536 SEQ_printf(m, " %c", task_state_to_char(p));
537
538 SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
539 p->comm, task_pid_nr(p),
540 SPLIT_NS(p->se.vruntime),
541 (long long)(p->nvcsw + p->nivcsw),
542 p->prio);
543
544 SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld %9lld.%06ld",
545 SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
546 SPLIT_NS(p->se.sum_exec_runtime),
547 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
548 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
549
550 #ifdef CONFIG_NUMA_BALANCING
551 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
552 #endif
553 #ifdef CONFIG_CGROUP_SCHED
554 SEQ_printf_task_group_path(m, task_group(p), " %s")
555 #endif
556
557 SEQ_printf(m, "\n");
558 }
559
print_rq(struct seq_file * m,struct rq * rq,int rq_cpu)560 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
561 {
562 struct task_struct *g, *p;
563
564 SEQ_printf(m, "\n");
565 SEQ_printf(m, "runnable tasks:\n");
566 SEQ_printf(m, " S task PID tree-key switches prio"
567 " wait-time sum-exec sum-sleep\n");
568 SEQ_printf(m, "-------------------------------------------------------"
569 "------------------------------------------------------\n");
570
571 rcu_read_lock();
572 for_each_process_thread(g, p) {
573 if (task_cpu(p) != rq_cpu)
574 continue;
575
576 print_task(m, rq, p);
577 }
578 rcu_read_unlock();
579 }
580
print_cfs_rq(struct seq_file * m,int cpu,struct cfs_rq * cfs_rq)581 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
582 {
583 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
584 spread, rq0_min_vruntime, spread0;
585 struct rq *rq = cpu_rq(cpu);
586 struct sched_entity *last;
587 unsigned long flags;
588
589 #ifdef CONFIG_FAIR_GROUP_SCHED
590 SEQ_printf(m, "\n");
591 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
592 #else
593 SEQ_printf(m, "\n");
594 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
595 #endif
596 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
597 SPLIT_NS(cfs_rq->exec_clock));
598
599 raw_spin_rq_lock_irqsave(rq, flags);
600 if (rb_first_cached(&cfs_rq->tasks_timeline))
601 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
602 last = __pick_last_entity(cfs_rq);
603 if (last)
604 max_vruntime = last->vruntime;
605 min_vruntime = cfs_rq->min_vruntime;
606 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
607 raw_spin_rq_unlock_irqrestore(rq, flags);
608 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
609 SPLIT_NS(MIN_vruntime));
610 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
611 SPLIT_NS(min_vruntime));
612 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
613 SPLIT_NS(max_vruntime));
614 spread = max_vruntime - MIN_vruntime;
615 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
616 SPLIT_NS(spread));
617 spread0 = min_vruntime - rq0_min_vruntime;
618 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
619 SPLIT_NS(spread0));
620 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
621 cfs_rq->nr_spread_over);
622 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
623 SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
624 SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
625 cfs_rq->idle_nr_running);
626 SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",
627 cfs_rq->idle_h_nr_running);
628 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
629 #ifdef CONFIG_SMP
630 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
631 cfs_rq->avg.load_avg);
632 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
633 cfs_rq->avg.runnable_avg);
634 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
635 cfs_rq->avg.util_avg);
636 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
637 cfs_rq->avg.util_est.enqueued);
638 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
639 cfs_rq->removed.load_avg);
640 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
641 cfs_rq->removed.util_avg);
642 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
643 cfs_rq->removed.runnable_avg);
644 #ifdef CONFIG_FAIR_GROUP_SCHED
645 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
646 cfs_rq->tg_load_avg_contrib);
647 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
648 atomic_long_read(&cfs_rq->tg->load_avg));
649 #endif
650 #endif
651 #ifdef CONFIG_CFS_BANDWIDTH
652 SEQ_printf(m, " .%-30s: %d\n", "throttled",
653 cfs_rq->throttled);
654 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
655 cfs_rq->throttle_count);
656 #endif
657
658 #ifdef CONFIG_FAIR_GROUP_SCHED
659 print_cfs_group_stats(m, cpu, cfs_rq->tg);
660 #endif
661 }
662
print_rt_rq(struct seq_file * m,int cpu,struct rt_rq * rt_rq)663 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
664 {
665 #ifdef CONFIG_RT_GROUP_SCHED
666 SEQ_printf(m, "\n");
667 SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
668 #else
669 SEQ_printf(m, "\n");
670 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
671 #endif
672
673 #define P(x) \
674 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
675 #define PU(x) \
676 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
677 #define PN(x) \
678 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
679
680 PU(rt_nr_running);
681 #ifdef CONFIG_SMP
682 PU(rt_nr_migratory);
683 #endif
684 P(rt_throttled);
685 PN(rt_time);
686 PN(rt_runtime);
687
688 #undef PN
689 #undef PU
690 #undef P
691 }
692
print_dl_rq(struct seq_file * m,int cpu,struct dl_rq * dl_rq)693 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
694 {
695 struct dl_bw *dl_bw;
696
697 SEQ_printf(m, "\n");
698 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
699
700 #define PU(x) \
701 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
702
703 PU(dl_nr_running);
704 #ifdef CONFIG_SMP
705 PU(dl_nr_migratory);
706 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
707 #else
708 dl_bw = &dl_rq->dl_bw;
709 #endif
710 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
711 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
712
713 #undef PU
714 }
715
print_cpu(struct seq_file * m,int cpu)716 static void print_cpu(struct seq_file *m, int cpu)
717 {
718 struct rq *rq = cpu_rq(cpu);
719
720 #ifdef CONFIG_X86
721 {
722 unsigned int freq = cpu_khz ? : 1;
723
724 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
725 cpu, freq / 1000, (freq % 1000));
726 }
727 #else
728 SEQ_printf(m, "cpu#%d\n", cpu);
729 #endif
730
731 #define P(x) \
732 do { \
733 if (sizeof(rq->x) == 4) \
734 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
735 else \
736 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
737 } while (0)
738
739 #define PN(x) \
740 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
741
742 P(nr_running);
743 P(nr_switches);
744 P(nr_uninterruptible);
745 PN(next_balance);
746 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
747 PN(clock);
748 PN(clock_task);
749 #undef P
750 #undef PN
751
752 #ifdef CONFIG_SMP
753 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
754 P64(avg_idle);
755 P64(max_idle_balance_cost);
756 #undef P64
757 #endif
758
759 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
760 if (schedstat_enabled()) {
761 P(yld_count);
762 P(sched_count);
763 P(sched_goidle);
764 P(ttwu_count);
765 P(ttwu_local);
766 }
767 #undef P
768
769 print_cfs_stats(m, cpu);
770 print_rt_stats(m, cpu);
771 print_dl_stats(m, cpu);
772
773 print_rq(m, rq, cpu);
774 SEQ_printf(m, "\n");
775 }
776
777 static const char *sched_tunable_scaling_names[] = {
778 "none",
779 "logarithmic",
780 "linear"
781 };
782
sched_debug_header(struct seq_file * m)783 static void sched_debug_header(struct seq_file *m)
784 {
785 u64 ktime, sched_clk, cpu_clk;
786 unsigned long flags;
787
788 local_irq_save(flags);
789 ktime = ktime_to_ns(ktime_get());
790 sched_clk = sched_clock();
791 cpu_clk = local_clock();
792 local_irq_restore(flags);
793
794 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
795 init_utsname()->release,
796 (int)strcspn(init_utsname()->version, " "),
797 init_utsname()->version);
798
799 #define P(x) \
800 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
801 #define PN(x) \
802 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
803 PN(ktime);
804 PN(sched_clk);
805 PN(cpu_clk);
806 P(jiffies);
807 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
808 P(sched_clock_stable());
809 #endif
810 #undef PN
811 #undef P
812
813 SEQ_printf(m, "\n");
814 SEQ_printf(m, "sysctl_sched\n");
815
816 #define P(x) \
817 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
818 #define PN(x) \
819 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
820 PN(sysctl_sched_latency);
821 PN(sysctl_sched_min_granularity);
822 PN(sysctl_sched_idle_min_granularity);
823 PN(sysctl_sched_wakeup_granularity);
824 P(sysctl_sched_child_runs_first);
825 P(sysctl_sched_features);
826 #undef PN
827 #undef P
828
829 SEQ_printf(m, " .%-40s: %d (%s)\n",
830 "sysctl_sched_tunable_scaling",
831 sysctl_sched_tunable_scaling,
832 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
833 SEQ_printf(m, "\n");
834 }
835
sched_debug_show(struct seq_file * m,void * v)836 static int sched_debug_show(struct seq_file *m, void *v)
837 {
838 int cpu = (unsigned long)(v - 2);
839
840 if (cpu != -1)
841 print_cpu(m, cpu);
842 else
843 sched_debug_header(m);
844
845 return 0;
846 }
847
sysrq_sched_debug_show(void)848 void sysrq_sched_debug_show(void)
849 {
850 int cpu;
851
852 sched_debug_header(NULL);
853 for_each_online_cpu(cpu) {
854 /*
855 * Need to reset softlockup watchdogs on all CPUs, because
856 * another CPU might be blocked waiting for us to process
857 * an IPI or stop_machine.
858 */
859 touch_nmi_watchdog();
860 touch_all_softlockup_watchdogs();
861 print_cpu(NULL, cpu);
862 }
863 }
864
865 /*
866 * This iterator needs some explanation.
867 * It returns 1 for the header position.
868 * This means 2 is CPU 0.
869 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
870 * to use cpumask_* to iterate over the CPUs.
871 */
sched_debug_start(struct seq_file * file,loff_t * offset)872 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
873 {
874 unsigned long n = *offset;
875
876 if (n == 0)
877 return (void *) 1;
878
879 n--;
880
881 if (n > 0)
882 n = cpumask_next(n - 1, cpu_online_mask);
883 else
884 n = cpumask_first(cpu_online_mask);
885
886 *offset = n + 1;
887
888 if (n < nr_cpu_ids)
889 return (void *)(unsigned long)(n + 2);
890
891 return NULL;
892 }
893
sched_debug_next(struct seq_file * file,void * data,loff_t * offset)894 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
895 {
896 (*offset)++;
897 return sched_debug_start(file, offset);
898 }
899
sched_debug_stop(struct seq_file * file,void * data)900 static void sched_debug_stop(struct seq_file *file, void *data)
901 {
902 }
903
904 static const struct seq_operations sched_debug_sops = {
905 .start = sched_debug_start,
906 .next = sched_debug_next,
907 .stop = sched_debug_stop,
908 .show = sched_debug_show,
909 };
910
911 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
912 #define __P(F) __PS(#F, F)
913 #define P(F) __PS(#F, p->F)
914 #define PM(F, M) __PS(#F, p->F & (M))
915 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
916 #define __PN(F) __PSN(#F, F)
917 #define PN(F) __PSN(#F, p->F)
918
919
920 #ifdef CONFIG_NUMA_BALANCING
print_numa_stats(struct seq_file * m,int node,unsigned long tsf,unsigned long tpf,unsigned long gsf,unsigned long gpf)921 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
922 unsigned long tpf, unsigned long gsf, unsigned long gpf)
923 {
924 SEQ_printf(m, "numa_faults node=%d ", node);
925 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
926 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
927 }
928 #endif
929
930
sched_show_numa(struct task_struct * p,struct seq_file * m)931 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
932 {
933 #ifdef CONFIG_NUMA_BALANCING
934 struct mempolicy *pol;
935
936 if (p->mm)
937 P(mm->numa_scan_seq);
938
939 task_lock(p);
940 pol = p->mempolicy;
941 if (pol && !(pol->flags & MPOL_F_MORON))
942 pol = NULL;
943 mpol_get(pol);
944 task_unlock(p);
945
946 P(numa_pages_migrated);
947 P(numa_preferred_nid);
948 P(total_numa_faults);
949 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
950 task_node(p), task_numa_group_id(p));
951 show_numa_stats(p, m);
952 mpol_put(pol);
953 #endif
954 }
955
proc_sched_show_task(struct task_struct * p,struct pid_namespace * ns,struct seq_file * m)956 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
957 struct seq_file *m)
958 {
959 unsigned long nr_switches;
960
961 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
962 get_nr_threads(p));
963 SEQ_printf(m,
964 "---------------------------------------------------------"
965 "----------\n");
966
967 #define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->stats.F))
968 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F))
969
970 PN(se.exec_start);
971 PN(se.vruntime);
972 PN(se.sum_exec_runtime);
973
974 nr_switches = p->nvcsw + p->nivcsw;
975
976 P(se.nr_migrations);
977
978 if (schedstat_enabled()) {
979 u64 avg_atom, avg_per_cpu;
980
981 PN_SCHEDSTAT(sum_sleep_runtime);
982 PN_SCHEDSTAT(sum_block_runtime);
983 PN_SCHEDSTAT(wait_start);
984 PN_SCHEDSTAT(sleep_start);
985 PN_SCHEDSTAT(block_start);
986 PN_SCHEDSTAT(sleep_max);
987 PN_SCHEDSTAT(block_max);
988 PN_SCHEDSTAT(exec_max);
989 PN_SCHEDSTAT(slice_max);
990 PN_SCHEDSTAT(wait_max);
991 PN_SCHEDSTAT(wait_sum);
992 P_SCHEDSTAT(wait_count);
993 PN_SCHEDSTAT(iowait_sum);
994 P_SCHEDSTAT(iowait_count);
995 P_SCHEDSTAT(nr_migrations_cold);
996 P_SCHEDSTAT(nr_failed_migrations_affine);
997 P_SCHEDSTAT(nr_failed_migrations_running);
998 P_SCHEDSTAT(nr_failed_migrations_hot);
999 P_SCHEDSTAT(nr_forced_migrations);
1000 P_SCHEDSTAT(nr_wakeups);
1001 P_SCHEDSTAT(nr_wakeups_sync);
1002 P_SCHEDSTAT(nr_wakeups_migrate);
1003 P_SCHEDSTAT(nr_wakeups_local);
1004 P_SCHEDSTAT(nr_wakeups_remote);
1005 P_SCHEDSTAT(nr_wakeups_affine);
1006 P_SCHEDSTAT(nr_wakeups_affine_attempts);
1007 P_SCHEDSTAT(nr_wakeups_passive);
1008 P_SCHEDSTAT(nr_wakeups_idle);
1009
1010 avg_atom = p->se.sum_exec_runtime;
1011 if (nr_switches)
1012 avg_atom = div64_ul(avg_atom, nr_switches);
1013 else
1014 avg_atom = -1LL;
1015
1016 avg_per_cpu = p->se.sum_exec_runtime;
1017 if (p->se.nr_migrations) {
1018 avg_per_cpu = div64_u64(avg_per_cpu,
1019 p->se.nr_migrations);
1020 } else {
1021 avg_per_cpu = -1LL;
1022 }
1023
1024 __PN(avg_atom);
1025 __PN(avg_per_cpu);
1026 }
1027
1028 __P(nr_switches);
1029 __PS("nr_voluntary_switches", p->nvcsw);
1030 __PS("nr_involuntary_switches", p->nivcsw);
1031
1032 P(se.load.weight);
1033 #ifdef CONFIG_SMP
1034 P(se.avg.load_sum);
1035 P(se.avg.runnable_sum);
1036 P(se.avg.util_sum);
1037 P(se.avg.load_avg);
1038 P(se.avg.runnable_avg);
1039 P(se.avg.util_avg);
1040 P(se.avg.last_update_time);
1041 P(se.avg.util_est.ewma);
1042 PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
1043 #endif
1044 #ifdef CONFIG_UCLAMP_TASK
1045 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1046 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1047 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1048 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1049 #endif
1050 P(policy);
1051 P(prio);
1052 if (task_has_dl_policy(p)) {
1053 P(dl.runtime);
1054 P(dl.deadline);
1055 }
1056 #undef PN_SCHEDSTAT
1057 #undef P_SCHEDSTAT
1058
1059 {
1060 unsigned int this_cpu = raw_smp_processor_id();
1061 u64 t0, t1;
1062
1063 t0 = cpu_clock(this_cpu);
1064 t1 = cpu_clock(this_cpu);
1065 __PS("clock-delta", t1-t0);
1066 }
1067
1068 sched_show_numa(p, m);
1069 }
1070
proc_sched_set_task(struct task_struct * p)1071 void proc_sched_set_task(struct task_struct *p)
1072 {
1073 #ifdef CONFIG_SCHEDSTATS
1074 memset(&p->stats, 0, sizeof(p->stats));
1075 #endif
1076 }
1077
resched_latency_warn(int cpu,u64 latency)1078 void resched_latency_warn(int cpu, u64 latency)
1079 {
1080 static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1081
1082 WARN(__ratelimit(&latency_check_ratelimit),
1083 "sched: CPU %d need_resched set for > %llu ns (%d ticks) "
1084 "without schedule\n",
1085 cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1086 }
1087