1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2019 Facebook
3 #include "vmlinux.h"
4 #include <bpf/bpf_helpers.h>
5 #include "runqslower.h"
6 
7 #define TASK_RUNNING 0
8 #define BPF_F_CURRENT_CPU 0xffffffffULL
9 
10 const volatile __u64 min_us = 0;
11 const volatile pid_t targ_pid = 0;
12 
13 struct {
14 	__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
15 	__uint(map_flags, BPF_F_NO_PREALLOC);
16 	__type(key, int);
17 	__type(value, u64);
18 } start SEC(".maps");
19 
20 struct {
21 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
22 	__uint(key_size, sizeof(u32));
23 	__uint(value_size, sizeof(u32));
24 } events SEC(".maps");
25 
26 /* record enqueue timestamp */
27 __always_inline
trace_enqueue(struct task_struct * t)28 static int trace_enqueue(struct task_struct *t)
29 {
30 	u32 pid = t->pid;
31 	u64 *ptr;
32 
33 	if (!pid || (targ_pid && targ_pid != pid))
34 		return 0;
35 
36 	ptr = bpf_task_storage_get(&start, t, 0,
37 				   BPF_LOCAL_STORAGE_GET_F_CREATE);
38 	if (!ptr)
39 		return 0;
40 
41 	*ptr = bpf_ktime_get_ns();
42 	return 0;
43 }
44 
45 SEC("tp_btf/sched_wakeup")
handle__sched_wakeup(u64 * ctx)46 int handle__sched_wakeup(u64 *ctx)
47 {
48 	/* TP_PROTO(struct task_struct *p) */
49 	struct task_struct *p = (void *)ctx[0];
50 
51 	return trace_enqueue(p);
52 }
53 
54 SEC("tp_btf/sched_wakeup_new")
handle__sched_wakeup_new(u64 * ctx)55 int handle__sched_wakeup_new(u64 *ctx)
56 {
57 	/* TP_PROTO(struct task_struct *p) */
58 	struct task_struct *p = (void *)ctx[0];
59 
60 	return trace_enqueue(p);
61 }
62 
63 SEC("tp_btf/sched_switch")
handle__sched_switch(u64 * ctx)64 int handle__sched_switch(u64 *ctx)
65 {
66 	/* TP_PROTO(bool preempt, struct task_struct *prev,
67 	 *	    struct task_struct *next)
68 	 */
69 	struct task_struct *prev = (struct task_struct *)ctx[1];
70 	struct task_struct *next = (struct task_struct *)ctx[2];
71 	struct event event = {};
72 	u64 *tsp, delta_us;
73 	long state;
74 	u32 pid;
75 
76 	/* ivcsw: treat like an enqueue event and store timestamp */
77 	if (prev->__state == TASK_RUNNING)
78 		trace_enqueue(prev);
79 
80 	pid = next->pid;
81 
82 	/* For pid mismatch, save a bpf_task_storage_get */
83 	if (!pid || (targ_pid && targ_pid != pid))
84 		return 0;
85 
86 	/* fetch timestamp and calculate delta */
87 	tsp = bpf_task_storage_get(&start, next, 0, 0);
88 	if (!tsp)
89 		return 0;   /* missed enqueue */
90 
91 	delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
92 	if (min_us && delta_us <= min_us)
93 		return 0;
94 
95 	event.pid = pid;
96 	event.delta_us = delta_us;
97 	bpf_get_current_comm(&event.task, sizeof(event.task));
98 
99 	/* output */
100 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
101 			      &event, sizeof(event));
102 
103 	bpf_task_storage_delete(&start, next);
104 	return 0;
105 }
106 
107 char LICENSE[] SEC("license") = "GPL";
108