1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #include "perf-sys.h"
4 #include "util/cloexec.h"
5 #include "util/evlist.h"
6 #include "util/evsel.h"
7 #include "util/parse-events.h"
8 #include "util/perf_api_probe.h"
9 #include <perf/cpumap.h>
10 #include <errno.h>
11 
12 typedef void (*setup_probe_fn_t)(struct evsel *evsel);
13 
perf_do_probe_api(setup_probe_fn_t fn,int cpu,const char * str)14 static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
15 {
16 	struct evlist *evlist;
17 	struct evsel *evsel;
18 	unsigned long flags = perf_event_open_cloexec_flag();
19 	int err = -EAGAIN, fd;
20 	static pid_t pid = -1;
21 
22 	evlist = evlist__new();
23 	if (!evlist)
24 		return -ENOMEM;
25 
26 	if (parse_events(evlist, str, NULL))
27 		goto out_delete;
28 
29 	evsel = evlist__first(evlist);
30 
31 	while (1) {
32 		fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1, flags);
33 		if (fd < 0) {
34 			if (pid == -1 && errno == EACCES) {
35 				pid = 0;
36 				continue;
37 			}
38 			goto out_delete;
39 		}
40 		break;
41 	}
42 	close(fd);
43 
44 	fn(evsel);
45 
46 	fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1, flags);
47 	if (fd < 0) {
48 		if (errno == EINVAL)
49 			err = -EINVAL;
50 		goto out_delete;
51 	}
52 	close(fd);
53 	err = 0;
54 
55 out_delete:
56 	evlist__delete(evlist);
57 	return err;
58 }
59 
perf_probe_api(setup_probe_fn_t fn)60 static bool perf_probe_api(setup_probe_fn_t fn)
61 {
62 	const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
63 	struct perf_cpu_map *cpus;
64 	int cpu, ret, i = 0;
65 
66 	cpus = perf_cpu_map__new(NULL);
67 	if (!cpus)
68 		return false;
69 	cpu = cpus->map[0];
70 	perf_cpu_map__put(cpus);
71 
72 	do {
73 		ret = perf_do_probe_api(fn, cpu, try[i++]);
74 		if (!ret)
75 			return true;
76 	} while (ret == -EAGAIN && try[i]);
77 
78 	return false;
79 }
80 
perf_probe_sample_identifier(struct evsel * evsel)81 static void perf_probe_sample_identifier(struct evsel *evsel)
82 {
83 	evsel->core.attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
84 }
85 
perf_probe_comm_exec(struct evsel * evsel)86 static void perf_probe_comm_exec(struct evsel *evsel)
87 {
88 	evsel->core.attr.comm_exec = 1;
89 }
90 
perf_probe_context_switch(struct evsel * evsel)91 static void perf_probe_context_switch(struct evsel *evsel)
92 {
93 	evsel->core.attr.context_switch = 1;
94 }
95 
perf_probe_text_poke(struct evsel * evsel)96 static void perf_probe_text_poke(struct evsel *evsel)
97 {
98 	evsel->core.attr.text_poke = 1;
99 }
100 
perf_probe_build_id(struct evsel * evsel)101 static void perf_probe_build_id(struct evsel *evsel)
102 {
103 	evsel->core.attr.build_id = 1;
104 }
105 
perf_probe_cgroup(struct evsel * evsel)106 static void perf_probe_cgroup(struct evsel *evsel)
107 {
108 	evsel->core.attr.cgroup = 1;
109 }
110 
perf_can_sample_identifier(void)111 bool perf_can_sample_identifier(void)
112 {
113 	return perf_probe_api(perf_probe_sample_identifier);
114 }
115 
perf_can_comm_exec(void)116 bool perf_can_comm_exec(void)
117 {
118 	return perf_probe_api(perf_probe_comm_exec);
119 }
120 
perf_can_record_switch_events(void)121 bool perf_can_record_switch_events(void)
122 {
123 	return perf_probe_api(perf_probe_context_switch);
124 }
125 
perf_can_record_text_poke_events(void)126 bool perf_can_record_text_poke_events(void)
127 {
128 	return perf_probe_api(perf_probe_text_poke);
129 }
130 
perf_can_record_cpu_wide(void)131 bool perf_can_record_cpu_wide(void)
132 {
133 	struct perf_event_attr attr = {
134 		.type = PERF_TYPE_SOFTWARE,
135 		.config = PERF_COUNT_SW_CPU_CLOCK,
136 		.exclude_kernel = 1,
137 	};
138 	struct perf_cpu_map *cpus;
139 	int cpu, fd;
140 
141 	cpus = perf_cpu_map__new(NULL);
142 	if (!cpus)
143 		return false;
144 	cpu = cpus->map[0];
145 	perf_cpu_map__put(cpus);
146 
147 	fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
148 	if (fd < 0)
149 		return false;
150 	close(fd);
151 
152 	return true;
153 }
154 
155 /*
156  * Architectures are expected to know if AUX area sampling is supported by the
157  * hardware. Here we check for kernel support.
158  */
perf_can_aux_sample(void)159 bool perf_can_aux_sample(void)
160 {
161 	struct perf_event_attr attr = {
162 		.size = sizeof(struct perf_event_attr),
163 		.exclude_kernel = 1,
164 		/*
165 		 * Non-zero value causes the kernel to calculate the effective
166 		 * attribute size up to that byte.
167 		 */
168 		.aux_sample_size = 1,
169 	};
170 	int fd;
171 
172 	fd = sys_perf_event_open(&attr, -1, 0, -1, 0);
173 	/*
174 	 * If the kernel attribute is big enough to contain aux_sample_size
175 	 * then we assume that it is supported. We are relying on the kernel to
176 	 * validate the attribute size before anything else that could be wrong.
177 	 */
178 	if (fd < 0 && errno == E2BIG)
179 		return false;
180 	if (fd >= 0)
181 		close(fd);
182 
183 	return true;
184 }
185 
perf_can_record_build_id(void)186 bool perf_can_record_build_id(void)
187 {
188 	return perf_probe_api(perf_probe_build_id);
189 }
190 
perf_can_record_cgroup(void)191 bool perf_can_record_cgroup(void)
192 {
193 	return perf_probe_api(perf_probe_cgroup);
194 }
195