1 // SPDX-License-Identifier: GPL-2.0
2 #include <Python.h>
3 #include <structmember.h>
4 #include <inttypes.h>
5 #include <poll.h>
6 #include <linux/err.h>
7 #include <perf/cpumap.h>
8 #include <traceevent/event-parse.h>
9 #include <perf/mmap.h>
10 #include "evlist.h"
11 #include "callchain.h"
12 #include "evsel.h"
13 #include "event.h"
14 #include "print_binary.h"
15 #include "thread_map.h"
16 #include "trace-event.h"
17 #include "mmap.h"
18 #include "stat.h"
19 #include "metricgroup.h"
20 #include "util/env.h"
21 #include <internal/lib.h>
22 #include "util.h"
23
24 #if PY_MAJOR_VERSION < 3
25 #define _PyUnicode_FromString(arg) \
26 PyString_FromString(arg)
27 #define _PyUnicode_AsString(arg) \
28 PyString_AsString(arg)
29 #define _PyUnicode_FromFormat(...) \
30 PyString_FromFormat(__VA_ARGS__)
31 #define _PyLong_FromLong(arg) \
32 PyInt_FromLong(arg)
33
34 #else
35
36 #define _PyUnicode_FromString(arg) \
37 PyUnicode_FromString(arg)
38 #define _PyUnicode_FromFormat(...) \
39 PyUnicode_FromFormat(__VA_ARGS__)
40 #define _PyLong_FromLong(arg) \
41 PyLong_FromLong(arg)
42 #endif
43
44 #ifndef Py_TYPE
45 #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
46 #endif
47
48 /*
49 * Provide these two so that we don't have to link against callchain.c and
50 * start dragging hist.c, etc.
51 */
52 struct callchain_param callchain_param;
53
parse_callchain_record(const char * arg __maybe_unused,struct callchain_param * param __maybe_unused)54 int parse_callchain_record(const char *arg __maybe_unused,
55 struct callchain_param *param __maybe_unused)
56 {
57 return 0;
58 }
59
60 /*
61 * Add this one here not to drag util/env.c
62 */
63 struct perf_env perf_env;
64
65 /*
66 * Add this one here not to drag util/stat-shadow.c
67 */
perf_stat__collect_metric_expr(struct evlist * evsel_list)68 void perf_stat__collect_metric_expr(struct evlist *evsel_list)
69 {
70 }
71
72 /*
73 * This one is needed not to drag the PMU bandwagon, jevents generated
74 * pmu_sys_event_tables, etc and evsel__find_pmu() is used so far just for
75 * doing per PMU perf_event_attr.exclude_guest handling, not really needed, so
76 * far, for the perf python binding known usecases, revisit if this become
77 * necessary.
78 */
evsel__find_pmu(struct evsel * evsel __maybe_unused)79 struct perf_pmu *evsel__find_pmu(struct evsel *evsel __maybe_unused)
80 {
81 return NULL;
82 }
83
84 /*
85 * Add this one here not to drag util/metricgroup.c
86 */
metricgroup__copy_metric_events(struct evlist * evlist,struct cgroup * cgrp,struct rblist * new_metric_events,struct rblist * old_metric_events)87 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
88 struct rblist *new_metric_events,
89 struct rblist *old_metric_events)
90 {
91 return 0;
92 }
93
94 /*
95 * XXX: All these evsel destructors need some better mechanism, like a linked
96 * list of destructors registered when the relevant code indeed is used instead
97 * of having more and more calls in perf_evsel__delete(). -- acme
98 *
99 * For now, add some more:
100 *
101 * Not to drag the BPF bandwagon...
102 */
103 void bpf_counter__destroy(struct evsel *evsel);
104 int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
105 int bpf_counter__disable(struct evsel *evsel);
106
bpf_counter__destroy(struct evsel * evsel __maybe_unused)107 void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
108 {
109 }
110
bpf_counter__install_pe(struct evsel * evsel __maybe_unused,int cpu __maybe_unused,int fd __maybe_unused)111 int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_unused, int fd __maybe_unused)
112 {
113 return 0;
114 }
115
bpf_counter__disable(struct evsel * evsel __maybe_unused)116 int bpf_counter__disable(struct evsel *evsel __maybe_unused)
117 {
118 return 0;
119 }
120
121 /*
122 * Support debug printing even though util/debug.c is not linked. That means
123 * implementing 'verbose' and 'eprintf'.
124 */
125 int verbose;
126 int debug_peo_args;
127
128 int eprintf(int level, int var, const char *fmt, ...);
129
eprintf(int level,int var,const char * fmt,...)130 int eprintf(int level, int var, const char *fmt, ...)
131 {
132 va_list args;
133 int ret = 0;
134
135 if (var >= level) {
136 va_start(args, fmt);
137 ret = vfprintf(stderr, fmt, args);
138 va_end(args);
139 }
140
141 return ret;
142 }
143
144 /* Define PyVarObject_HEAD_INIT for python 2.5 */
145 #ifndef PyVarObject_HEAD_INIT
146 # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
147 #endif
148
149 #if PY_MAJOR_VERSION < 3
150 PyMODINIT_FUNC initperf(void);
151 #else
152 PyMODINIT_FUNC PyInit_perf(void);
153 #endif
154
155 #define member_def(type, member, ptype, help) \
156 { #member, ptype, \
157 offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
158 0, help }
159
160 #define sample_member_def(name, member, ptype, help) \
161 { #name, ptype, \
162 offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
163 0, help }
164
165 struct pyrf_event {
166 PyObject_HEAD
167 struct evsel *evsel;
168 struct perf_sample sample;
169 union perf_event event;
170 };
171
172 #define sample_members \
173 sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \
174 sample_member_def(sample_pid, pid, T_INT, "event pid"), \
175 sample_member_def(sample_tid, tid, T_INT, "event tid"), \
176 sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \
177 sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \
178 sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \
179 sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
180 sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \
181 sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
182
183 static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
184
185 static PyMemberDef pyrf_mmap_event__members[] = {
186 sample_members
187 member_def(perf_event_header, type, T_UINT, "event type"),
188 member_def(perf_event_header, misc, T_UINT, "event misc"),
189 member_def(perf_record_mmap, pid, T_UINT, "event pid"),
190 member_def(perf_record_mmap, tid, T_UINT, "event tid"),
191 member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"),
192 member_def(perf_record_mmap, len, T_ULONGLONG, "map length"),
193 member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"),
194 member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"),
195 { .name = NULL, },
196 };
197
pyrf_mmap_event__repr(struct pyrf_event * pevent)198 static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
199 {
200 PyObject *ret;
201 char *s;
202
203 if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", "
204 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", "
205 "filename: %s }",
206 pevent->event.mmap.pid, pevent->event.mmap.tid,
207 pevent->event.mmap.start, pevent->event.mmap.len,
208 pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
209 ret = PyErr_NoMemory();
210 } else {
211 ret = _PyUnicode_FromString(s);
212 free(s);
213 }
214 return ret;
215 }
216
217 static PyTypeObject pyrf_mmap_event__type = {
218 PyVarObject_HEAD_INIT(NULL, 0)
219 .tp_name = "perf.mmap_event",
220 .tp_basicsize = sizeof(struct pyrf_event),
221 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
222 .tp_doc = pyrf_mmap_event__doc,
223 .tp_members = pyrf_mmap_event__members,
224 .tp_repr = (reprfunc)pyrf_mmap_event__repr,
225 };
226
227 static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
228
229 static PyMemberDef pyrf_task_event__members[] = {
230 sample_members
231 member_def(perf_event_header, type, T_UINT, "event type"),
232 member_def(perf_record_fork, pid, T_UINT, "event pid"),
233 member_def(perf_record_fork, ppid, T_UINT, "event ppid"),
234 member_def(perf_record_fork, tid, T_UINT, "event tid"),
235 member_def(perf_record_fork, ptid, T_UINT, "event ptid"),
236 member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"),
237 { .name = NULL, },
238 };
239
pyrf_task_event__repr(struct pyrf_event * pevent)240 static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
241 {
242 return _PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
243 "ptid: %u, time: %" PRI_lu64 "}",
244 pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
245 pevent->event.fork.pid,
246 pevent->event.fork.ppid,
247 pevent->event.fork.tid,
248 pevent->event.fork.ptid,
249 pevent->event.fork.time);
250 }
251
252 static PyTypeObject pyrf_task_event__type = {
253 PyVarObject_HEAD_INIT(NULL, 0)
254 .tp_name = "perf.task_event",
255 .tp_basicsize = sizeof(struct pyrf_event),
256 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
257 .tp_doc = pyrf_task_event__doc,
258 .tp_members = pyrf_task_event__members,
259 .tp_repr = (reprfunc)pyrf_task_event__repr,
260 };
261
262 static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
263
264 static PyMemberDef pyrf_comm_event__members[] = {
265 sample_members
266 member_def(perf_event_header, type, T_UINT, "event type"),
267 member_def(perf_record_comm, pid, T_UINT, "event pid"),
268 member_def(perf_record_comm, tid, T_UINT, "event tid"),
269 member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"),
270 { .name = NULL, },
271 };
272
pyrf_comm_event__repr(struct pyrf_event * pevent)273 static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
274 {
275 return _PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
276 pevent->event.comm.pid,
277 pevent->event.comm.tid,
278 pevent->event.comm.comm);
279 }
280
281 static PyTypeObject pyrf_comm_event__type = {
282 PyVarObject_HEAD_INIT(NULL, 0)
283 .tp_name = "perf.comm_event",
284 .tp_basicsize = sizeof(struct pyrf_event),
285 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
286 .tp_doc = pyrf_comm_event__doc,
287 .tp_members = pyrf_comm_event__members,
288 .tp_repr = (reprfunc)pyrf_comm_event__repr,
289 };
290
291 static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
292
293 static PyMemberDef pyrf_throttle_event__members[] = {
294 sample_members
295 member_def(perf_event_header, type, T_UINT, "event type"),
296 member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"),
297 member_def(perf_record_throttle, id, T_ULONGLONG, "event id"),
298 member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"),
299 { .name = NULL, },
300 };
301
pyrf_throttle_event__repr(struct pyrf_event * pevent)302 static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
303 {
304 struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1);
305
306 return _PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64
307 ", stream_id: %" PRI_lu64 " }",
308 pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
309 te->time, te->id, te->stream_id);
310 }
311
312 static PyTypeObject pyrf_throttle_event__type = {
313 PyVarObject_HEAD_INIT(NULL, 0)
314 .tp_name = "perf.throttle_event",
315 .tp_basicsize = sizeof(struct pyrf_event),
316 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
317 .tp_doc = pyrf_throttle_event__doc,
318 .tp_members = pyrf_throttle_event__members,
319 .tp_repr = (reprfunc)pyrf_throttle_event__repr,
320 };
321
322 static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
323
324 static PyMemberDef pyrf_lost_event__members[] = {
325 sample_members
326 member_def(perf_record_lost, id, T_ULONGLONG, "event id"),
327 member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"),
328 { .name = NULL, },
329 };
330
pyrf_lost_event__repr(struct pyrf_event * pevent)331 static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
332 {
333 PyObject *ret;
334 char *s;
335
336 if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", "
337 "lost: %#" PRI_lx64 " }",
338 pevent->event.lost.id, pevent->event.lost.lost) < 0) {
339 ret = PyErr_NoMemory();
340 } else {
341 ret = _PyUnicode_FromString(s);
342 free(s);
343 }
344 return ret;
345 }
346
347 static PyTypeObject pyrf_lost_event__type = {
348 PyVarObject_HEAD_INIT(NULL, 0)
349 .tp_name = "perf.lost_event",
350 .tp_basicsize = sizeof(struct pyrf_event),
351 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
352 .tp_doc = pyrf_lost_event__doc,
353 .tp_members = pyrf_lost_event__members,
354 .tp_repr = (reprfunc)pyrf_lost_event__repr,
355 };
356
357 static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
358
359 static PyMemberDef pyrf_read_event__members[] = {
360 sample_members
361 member_def(perf_record_read, pid, T_UINT, "event pid"),
362 member_def(perf_record_read, tid, T_UINT, "event tid"),
363 { .name = NULL, },
364 };
365
pyrf_read_event__repr(struct pyrf_event * pevent)366 static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
367 {
368 return _PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
369 pevent->event.read.pid,
370 pevent->event.read.tid);
371 /*
372 * FIXME: return the array of read values,
373 * making this method useful ;-)
374 */
375 }
376
377 static PyTypeObject pyrf_read_event__type = {
378 PyVarObject_HEAD_INIT(NULL, 0)
379 .tp_name = "perf.read_event",
380 .tp_basicsize = sizeof(struct pyrf_event),
381 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
382 .tp_doc = pyrf_read_event__doc,
383 .tp_members = pyrf_read_event__members,
384 .tp_repr = (reprfunc)pyrf_read_event__repr,
385 };
386
387 static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
388
389 static PyMemberDef pyrf_sample_event__members[] = {
390 sample_members
391 member_def(perf_event_header, type, T_UINT, "event type"),
392 { .name = NULL, },
393 };
394
pyrf_sample_event__repr(struct pyrf_event * pevent)395 static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
396 {
397 PyObject *ret;
398 char *s;
399
400 if (asprintf(&s, "{ type: sample }") < 0) {
401 ret = PyErr_NoMemory();
402 } else {
403 ret = _PyUnicode_FromString(s);
404 free(s);
405 }
406 return ret;
407 }
408
is_tracepoint(struct pyrf_event * pevent)409 static bool is_tracepoint(struct pyrf_event *pevent)
410 {
411 return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
412 }
413
414 static PyObject*
tracepoint_field(struct pyrf_event * pe,struct tep_format_field * field)415 tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
416 {
417 struct tep_handle *pevent = field->event->tep;
418 void *data = pe->sample.raw_data;
419 PyObject *ret = NULL;
420 unsigned long long val;
421 unsigned int offset, len;
422
423 if (field->flags & TEP_FIELD_IS_ARRAY) {
424 offset = field->offset;
425 len = field->size;
426 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
427 val = tep_read_number(pevent, data + offset, len);
428 offset = val;
429 len = offset >> 16;
430 offset &= 0xffff;
431 }
432 if (field->flags & TEP_FIELD_IS_STRING &&
433 is_printable_array(data + offset, len)) {
434 ret = _PyUnicode_FromString((char *)data + offset);
435 } else {
436 ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
437 field->flags &= ~TEP_FIELD_IS_STRING;
438 }
439 } else {
440 val = tep_read_number(pevent, data + field->offset,
441 field->size);
442 if (field->flags & TEP_FIELD_IS_POINTER)
443 ret = PyLong_FromUnsignedLong((unsigned long) val);
444 else if (field->flags & TEP_FIELD_IS_SIGNED)
445 ret = PyLong_FromLong((long) val);
446 else
447 ret = PyLong_FromUnsignedLong((unsigned long) val);
448 }
449
450 return ret;
451 }
452
453 static PyObject*
get_tracepoint_field(struct pyrf_event * pevent,PyObject * attr_name)454 get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
455 {
456 const char *str = _PyUnicode_AsString(PyObject_Str(attr_name));
457 struct evsel *evsel = pevent->evsel;
458 struct tep_format_field *field;
459
460 if (!evsel->tp_format) {
461 struct tep_event *tp_format;
462
463 tp_format = trace_event__tp_format_id(evsel->core.attr.config);
464 if (IS_ERR_OR_NULL(tp_format))
465 return NULL;
466
467 evsel->tp_format = tp_format;
468 }
469
470 field = tep_find_any_field(evsel->tp_format, str);
471 if (!field)
472 return NULL;
473
474 return tracepoint_field(pevent, field);
475 }
476
477 static PyObject*
pyrf_sample_event__getattro(struct pyrf_event * pevent,PyObject * attr_name)478 pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name)
479 {
480 PyObject *obj = NULL;
481
482 if (is_tracepoint(pevent))
483 obj = get_tracepoint_field(pevent, attr_name);
484
485 return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name);
486 }
487
488 static PyTypeObject pyrf_sample_event__type = {
489 PyVarObject_HEAD_INIT(NULL, 0)
490 .tp_name = "perf.sample_event",
491 .tp_basicsize = sizeof(struct pyrf_event),
492 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
493 .tp_doc = pyrf_sample_event__doc,
494 .tp_members = pyrf_sample_event__members,
495 .tp_repr = (reprfunc)pyrf_sample_event__repr,
496 .tp_getattro = (getattrofunc) pyrf_sample_event__getattro,
497 };
498
499 static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
500
501 static PyMemberDef pyrf_context_switch_event__members[] = {
502 sample_members
503 member_def(perf_event_header, type, T_UINT, "event type"),
504 member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"),
505 member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"),
506 { .name = NULL, },
507 };
508
pyrf_context_switch_event__repr(struct pyrf_event * pevent)509 static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
510 {
511 PyObject *ret;
512 char *s;
513
514 if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }",
515 pevent->event.context_switch.next_prev_pid,
516 pevent->event.context_switch.next_prev_tid,
517 !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
518 ret = PyErr_NoMemory();
519 } else {
520 ret = _PyUnicode_FromString(s);
521 free(s);
522 }
523 return ret;
524 }
525
526 static PyTypeObject pyrf_context_switch_event__type = {
527 PyVarObject_HEAD_INIT(NULL, 0)
528 .tp_name = "perf.context_switch_event",
529 .tp_basicsize = sizeof(struct pyrf_event),
530 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
531 .tp_doc = pyrf_context_switch_event__doc,
532 .tp_members = pyrf_context_switch_event__members,
533 .tp_repr = (reprfunc)pyrf_context_switch_event__repr,
534 };
535
pyrf_event__setup_types(void)536 static int pyrf_event__setup_types(void)
537 {
538 int err;
539 pyrf_mmap_event__type.tp_new =
540 pyrf_task_event__type.tp_new =
541 pyrf_comm_event__type.tp_new =
542 pyrf_lost_event__type.tp_new =
543 pyrf_read_event__type.tp_new =
544 pyrf_sample_event__type.tp_new =
545 pyrf_context_switch_event__type.tp_new =
546 pyrf_throttle_event__type.tp_new = PyType_GenericNew;
547 err = PyType_Ready(&pyrf_mmap_event__type);
548 if (err < 0)
549 goto out;
550 err = PyType_Ready(&pyrf_lost_event__type);
551 if (err < 0)
552 goto out;
553 err = PyType_Ready(&pyrf_task_event__type);
554 if (err < 0)
555 goto out;
556 err = PyType_Ready(&pyrf_comm_event__type);
557 if (err < 0)
558 goto out;
559 err = PyType_Ready(&pyrf_throttle_event__type);
560 if (err < 0)
561 goto out;
562 err = PyType_Ready(&pyrf_read_event__type);
563 if (err < 0)
564 goto out;
565 err = PyType_Ready(&pyrf_sample_event__type);
566 if (err < 0)
567 goto out;
568 err = PyType_Ready(&pyrf_context_switch_event__type);
569 if (err < 0)
570 goto out;
571 out:
572 return err;
573 }
574
575 static PyTypeObject *pyrf_event__type[] = {
576 [PERF_RECORD_MMAP] = &pyrf_mmap_event__type,
577 [PERF_RECORD_LOST] = &pyrf_lost_event__type,
578 [PERF_RECORD_COMM] = &pyrf_comm_event__type,
579 [PERF_RECORD_EXIT] = &pyrf_task_event__type,
580 [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type,
581 [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
582 [PERF_RECORD_FORK] = &pyrf_task_event__type,
583 [PERF_RECORD_READ] = &pyrf_read_event__type,
584 [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type,
585 [PERF_RECORD_SWITCH] = &pyrf_context_switch_event__type,
586 [PERF_RECORD_SWITCH_CPU_WIDE] = &pyrf_context_switch_event__type,
587 };
588
pyrf_event__new(union perf_event * event)589 static PyObject *pyrf_event__new(union perf_event *event)
590 {
591 struct pyrf_event *pevent;
592 PyTypeObject *ptype;
593
594 if ((event->header.type < PERF_RECORD_MMAP ||
595 event->header.type > PERF_RECORD_SAMPLE) &&
596 !(event->header.type == PERF_RECORD_SWITCH ||
597 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
598 return NULL;
599
600 ptype = pyrf_event__type[event->header.type];
601 pevent = PyObject_New(struct pyrf_event, ptype);
602 if (pevent != NULL)
603 memcpy(&pevent->event, event, event->header.size);
604 return (PyObject *)pevent;
605 }
606
607 struct pyrf_cpu_map {
608 PyObject_HEAD
609
610 struct perf_cpu_map *cpus;
611 };
612
pyrf_cpu_map__init(struct pyrf_cpu_map * pcpus,PyObject * args,PyObject * kwargs)613 static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
614 PyObject *args, PyObject *kwargs)
615 {
616 static char *kwlist[] = { "cpustr", NULL };
617 char *cpustr = NULL;
618
619 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
620 kwlist, &cpustr))
621 return -1;
622
623 pcpus->cpus = perf_cpu_map__new(cpustr);
624 if (pcpus->cpus == NULL)
625 return -1;
626 return 0;
627 }
628
pyrf_cpu_map__delete(struct pyrf_cpu_map * pcpus)629 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
630 {
631 perf_cpu_map__put(pcpus->cpus);
632 Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
633 }
634
pyrf_cpu_map__length(PyObject * obj)635 static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
636 {
637 struct pyrf_cpu_map *pcpus = (void *)obj;
638
639 return pcpus->cpus->nr;
640 }
641
pyrf_cpu_map__item(PyObject * obj,Py_ssize_t i)642 static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
643 {
644 struct pyrf_cpu_map *pcpus = (void *)obj;
645
646 if (i >= pcpus->cpus->nr)
647 return NULL;
648
649 return Py_BuildValue("i", pcpus->cpus->map[i]);
650 }
651
652 static PySequenceMethods pyrf_cpu_map__sequence_methods = {
653 .sq_length = pyrf_cpu_map__length,
654 .sq_item = pyrf_cpu_map__item,
655 };
656
657 static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
658
659 static PyTypeObject pyrf_cpu_map__type = {
660 PyVarObject_HEAD_INIT(NULL, 0)
661 .tp_name = "perf.cpu_map",
662 .tp_basicsize = sizeof(struct pyrf_cpu_map),
663 .tp_dealloc = (destructor)pyrf_cpu_map__delete,
664 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
665 .tp_doc = pyrf_cpu_map__doc,
666 .tp_as_sequence = &pyrf_cpu_map__sequence_methods,
667 .tp_init = (initproc)pyrf_cpu_map__init,
668 };
669
pyrf_cpu_map__setup_types(void)670 static int pyrf_cpu_map__setup_types(void)
671 {
672 pyrf_cpu_map__type.tp_new = PyType_GenericNew;
673 return PyType_Ready(&pyrf_cpu_map__type);
674 }
675
676 struct pyrf_thread_map {
677 PyObject_HEAD
678
679 struct perf_thread_map *threads;
680 };
681
pyrf_thread_map__init(struct pyrf_thread_map * pthreads,PyObject * args,PyObject * kwargs)682 static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
683 PyObject *args, PyObject *kwargs)
684 {
685 static char *kwlist[] = { "pid", "tid", "uid", NULL };
686 int pid = -1, tid = -1, uid = UINT_MAX;
687
688 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii",
689 kwlist, &pid, &tid, &uid))
690 return -1;
691
692 pthreads->threads = thread_map__new(pid, tid, uid);
693 if (pthreads->threads == NULL)
694 return -1;
695 return 0;
696 }
697
pyrf_thread_map__delete(struct pyrf_thread_map * pthreads)698 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
699 {
700 perf_thread_map__put(pthreads->threads);
701 Py_TYPE(pthreads)->tp_free((PyObject*)pthreads);
702 }
703
pyrf_thread_map__length(PyObject * obj)704 static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
705 {
706 struct pyrf_thread_map *pthreads = (void *)obj;
707
708 return pthreads->threads->nr;
709 }
710
pyrf_thread_map__item(PyObject * obj,Py_ssize_t i)711 static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
712 {
713 struct pyrf_thread_map *pthreads = (void *)obj;
714
715 if (i >= pthreads->threads->nr)
716 return NULL;
717
718 return Py_BuildValue("i", pthreads->threads->map[i]);
719 }
720
721 static PySequenceMethods pyrf_thread_map__sequence_methods = {
722 .sq_length = pyrf_thread_map__length,
723 .sq_item = pyrf_thread_map__item,
724 };
725
726 static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
727
728 static PyTypeObject pyrf_thread_map__type = {
729 PyVarObject_HEAD_INIT(NULL, 0)
730 .tp_name = "perf.thread_map",
731 .tp_basicsize = sizeof(struct pyrf_thread_map),
732 .tp_dealloc = (destructor)pyrf_thread_map__delete,
733 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
734 .tp_doc = pyrf_thread_map__doc,
735 .tp_as_sequence = &pyrf_thread_map__sequence_methods,
736 .tp_init = (initproc)pyrf_thread_map__init,
737 };
738
pyrf_thread_map__setup_types(void)739 static int pyrf_thread_map__setup_types(void)
740 {
741 pyrf_thread_map__type.tp_new = PyType_GenericNew;
742 return PyType_Ready(&pyrf_thread_map__type);
743 }
744
745 struct pyrf_evsel {
746 PyObject_HEAD
747
748 struct evsel evsel;
749 };
750
pyrf_evsel__init(struct pyrf_evsel * pevsel,PyObject * args,PyObject * kwargs)751 static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
752 PyObject *args, PyObject *kwargs)
753 {
754 struct perf_event_attr attr = {
755 .type = PERF_TYPE_HARDWARE,
756 .config = PERF_COUNT_HW_CPU_CYCLES,
757 .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
758 };
759 static char *kwlist[] = {
760 "type",
761 "config",
762 "sample_freq",
763 "sample_period",
764 "sample_type",
765 "read_format",
766 "disabled",
767 "inherit",
768 "pinned",
769 "exclusive",
770 "exclude_user",
771 "exclude_kernel",
772 "exclude_hv",
773 "exclude_idle",
774 "mmap",
775 "context_switch",
776 "comm",
777 "freq",
778 "inherit_stat",
779 "enable_on_exec",
780 "task",
781 "watermark",
782 "precise_ip",
783 "mmap_data",
784 "sample_id_all",
785 "wakeup_events",
786 "bp_type",
787 "bp_addr",
788 "bp_len",
789 NULL
790 };
791 u64 sample_period = 0;
792 u32 disabled = 0,
793 inherit = 0,
794 pinned = 0,
795 exclusive = 0,
796 exclude_user = 0,
797 exclude_kernel = 0,
798 exclude_hv = 0,
799 exclude_idle = 0,
800 mmap = 0,
801 context_switch = 0,
802 comm = 0,
803 freq = 1,
804 inherit_stat = 0,
805 enable_on_exec = 0,
806 task = 0,
807 watermark = 0,
808 precise_ip = 0,
809 mmap_data = 0,
810 sample_id_all = 1;
811 int idx = 0;
812
813 if (!PyArg_ParseTupleAndKeywords(args, kwargs,
814 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
815 &attr.type, &attr.config, &attr.sample_freq,
816 &sample_period, &attr.sample_type,
817 &attr.read_format, &disabled, &inherit,
818 &pinned, &exclusive, &exclude_user,
819 &exclude_kernel, &exclude_hv, &exclude_idle,
820 &mmap, &context_switch, &comm, &freq, &inherit_stat,
821 &enable_on_exec, &task, &watermark,
822 &precise_ip, &mmap_data, &sample_id_all,
823 &attr.wakeup_events, &attr.bp_type,
824 &attr.bp_addr, &attr.bp_len, &idx))
825 return -1;
826
827 /* union... */
828 if (sample_period != 0) {
829 if (attr.sample_freq != 0)
830 return -1; /* FIXME: throw right exception */
831 attr.sample_period = sample_period;
832 }
833
834 /* Bitfields */
835 attr.disabled = disabled;
836 attr.inherit = inherit;
837 attr.pinned = pinned;
838 attr.exclusive = exclusive;
839 attr.exclude_user = exclude_user;
840 attr.exclude_kernel = exclude_kernel;
841 attr.exclude_hv = exclude_hv;
842 attr.exclude_idle = exclude_idle;
843 attr.mmap = mmap;
844 attr.context_switch = context_switch;
845 attr.comm = comm;
846 attr.freq = freq;
847 attr.inherit_stat = inherit_stat;
848 attr.enable_on_exec = enable_on_exec;
849 attr.task = task;
850 attr.watermark = watermark;
851 attr.precise_ip = precise_ip;
852 attr.mmap_data = mmap_data;
853 attr.sample_id_all = sample_id_all;
854 attr.size = sizeof(attr);
855
856 evsel__init(&pevsel->evsel, &attr, idx);
857 return 0;
858 }
859
pyrf_evsel__delete(struct pyrf_evsel * pevsel)860 static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
861 {
862 evsel__exit(&pevsel->evsel);
863 Py_TYPE(pevsel)->tp_free((PyObject*)pevsel);
864 }
865
pyrf_evsel__open(struct pyrf_evsel * pevsel,PyObject * args,PyObject * kwargs)866 static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
867 PyObject *args, PyObject *kwargs)
868 {
869 struct evsel *evsel = &pevsel->evsel;
870 struct perf_cpu_map *cpus = NULL;
871 struct perf_thread_map *threads = NULL;
872 PyObject *pcpus = NULL, *pthreads = NULL;
873 int group = 0, inherit = 0;
874 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
875
876 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
877 &pcpus, &pthreads, &group, &inherit))
878 return NULL;
879
880 if (pthreads != NULL)
881 threads = ((struct pyrf_thread_map *)pthreads)->threads;
882
883 if (pcpus != NULL)
884 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
885
886 evsel->core.attr.inherit = inherit;
887 /*
888 * This will group just the fds for this single evsel, to group
889 * multiple events, use evlist.open().
890 */
891 if (evsel__open(evsel, cpus, threads) < 0) {
892 PyErr_SetFromErrno(PyExc_OSError);
893 return NULL;
894 }
895
896 Py_INCREF(Py_None);
897 return Py_None;
898 }
899
900 static PyMethodDef pyrf_evsel__methods[] = {
901 {
902 .ml_name = "open",
903 .ml_meth = (PyCFunction)pyrf_evsel__open,
904 .ml_flags = METH_VARARGS | METH_KEYWORDS,
905 .ml_doc = PyDoc_STR("open the event selector file descriptor table.")
906 },
907 { .ml_name = NULL, }
908 };
909
910 static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
911
912 static PyTypeObject pyrf_evsel__type = {
913 PyVarObject_HEAD_INIT(NULL, 0)
914 .tp_name = "perf.evsel",
915 .tp_basicsize = sizeof(struct pyrf_evsel),
916 .tp_dealloc = (destructor)pyrf_evsel__delete,
917 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
918 .tp_doc = pyrf_evsel__doc,
919 .tp_methods = pyrf_evsel__methods,
920 .tp_init = (initproc)pyrf_evsel__init,
921 };
922
pyrf_evsel__setup_types(void)923 static int pyrf_evsel__setup_types(void)
924 {
925 pyrf_evsel__type.tp_new = PyType_GenericNew;
926 return PyType_Ready(&pyrf_evsel__type);
927 }
928
929 struct pyrf_evlist {
930 PyObject_HEAD
931
932 struct evlist evlist;
933 };
934
pyrf_evlist__init(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs __maybe_unused)935 static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
936 PyObject *args, PyObject *kwargs __maybe_unused)
937 {
938 PyObject *pcpus = NULL, *pthreads = NULL;
939 struct perf_cpu_map *cpus;
940 struct perf_thread_map *threads;
941
942 if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
943 return -1;
944
945 threads = ((struct pyrf_thread_map *)pthreads)->threads;
946 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
947 evlist__init(&pevlist->evlist, cpus, threads);
948 return 0;
949 }
950
pyrf_evlist__delete(struct pyrf_evlist * pevlist)951 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
952 {
953 evlist__exit(&pevlist->evlist);
954 Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
955 }
956
pyrf_evlist__mmap(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)957 static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
958 PyObject *args, PyObject *kwargs)
959 {
960 struct evlist *evlist = &pevlist->evlist;
961 static char *kwlist[] = { "pages", "overwrite", NULL };
962 int pages = 128, overwrite = false;
963
964 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
965 &pages, &overwrite))
966 return NULL;
967
968 if (evlist__mmap(evlist, pages) < 0) {
969 PyErr_SetFromErrno(PyExc_OSError);
970 return NULL;
971 }
972
973 Py_INCREF(Py_None);
974 return Py_None;
975 }
976
pyrf_evlist__poll(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)977 static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
978 PyObject *args, PyObject *kwargs)
979 {
980 struct evlist *evlist = &pevlist->evlist;
981 static char *kwlist[] = { "timeout", NULL };
982 int timeout = -1, n;
983
984 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
985 return NULL;
986
987 n = evlist__poll(evlist, timeout);
988 if (n < 0) {
989 PyErr_SetFromErrno(PyExc_OSError);
990 return NULL;
991 }
992
993 return Py_BuildValue("i", n);
994 }
995
pyrf_evlist__get_pollfd(struct pyrf_evlist * pevlist,PyObject * args __maybe_unused,PyObject * kwargs __maybe_unused)996 static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
997 PyObject *args __maybe_unused,
998 PyObject *kwargs __maybe_unused)
999 {
1000 struct evlist *evlist = &pevlist->evlist;
1001 PyObject *list = PyList_New(0);
1002 int i;
1003
1004 for (i = 0; i < evlist->core.pollfd.nr; ++i) {
1005 PyObject *file;
1006 #if PY_MAJOR_VERSION < 3
1007 FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r");
1008
1009 if (fp == NULL)
1010 goto free_list;
1011
1012 file = PyFile_FromFile(fp, "perf", "r", NULL);
1013 #else
1014 file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
1015 NULL, NULL, NULL, 0);
1016 #endif
1017 if (file == NULL)
1018 goto free_list;
1019
1020 if (PyList_Append(list, file) != 0) {
1021 Py_DECREF(file);
1022 goto free_list;
1023 }
1024
1025 Py_DECREF(file);
1026 }
1027
1028 return list;
1029 free_list:
1030 return PyErr_NoMemory();
1031 }
1032
1033
pyrf_evlist__add(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs __maybe_unused)1034 static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
1035 PyObject *args,
1036 PyObject *kwargs __maybe_unused)
1037 {
1038 struct evlist *evlist = &pevlist->evlist;
1039 PyObject *pevsel;
1040 struct evsel *evsel;
1041
1042 if (!PyArg_ParseTuple(args, "O", &pevsel))
1043 return NULL;
1044
1045 Py_INCREF(pevsel);
1046 evsel = &((struct pyrf_evsel *)pevsel)->evsel;
1047 evsel->core.idx = evlist->core.nr_entries;
1048 evlist__add(evlist, evsel);
1049
1050 return Py_BuildValue("i", evlist->core.nr_entries);
1051 }
1052
get_md(struct evlist * evlist,int cpu)1053 static struct mmap *get_md(struct evlist *evlist, int cpu)
1054 {
1055 int i;
1056
1057 for (i = 0; i < evlist->core.nr_mmaps; i++) {
1058 struct mmap *md = &evlist->mmap[i];
1059
1060 if (md->core.cpu == cpu)
1061 return md;
1062 }
1063
1064 return NULL;
1065 }
1066
pyrf_evlist__read_on_cpu(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)1067 static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
1068 PyObject *args, PyObject *kwargs)
1069 {
1070 struct evlist *evlist = &pevlist->evlist;
1071 union perf_event *event;
1072 int sample_id_all = 1, cpu;
1073 static char *kwlist[] = { "cpu", "sample_id_all", NULL };
1074 struct mmap *md;
1075 int err;
1076
1077 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
1078 &cpu, &sample_id_all))
1079 return NULL;
1080
1081 md = get_md(evlist, cpu);
1082 if (!md)
1083 return NULL;
1084
1085 if (perf_mmap__read_init(&md->core) < 0)
1086 goto end;
1087
1088 event = perf_mmap__read_event(&md->core);
1089 if (event != NULL) {
1090 PyObject *pyevent = pyrf_event__new(event);
1091 struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
1092 struct evsel *evsel;
1093
1094 if (pyevent == NULL)
1095 return PyErr_NoMemory();
1096
1097 evsel = evlist__event2evsel(evlist, event);
1098 if (!evsel) {
1099 Py_INCREF(Py_None);
1100 return Py_None;
1101 }
1102
1103 pevent->evsel = evsel;
1104
1105 err = evsel__parse_sample(evsel, event, &pevent->sample);
1106
1107 /* Consume the even only after we parsed it out. */
1108 perf_mmap__consume(&md->core);
1109
1110 if (err)
1111 return PyErr_Format(PyExc_OSError,
1112 "perf: can't parse sample, err=%d", err);
1113 return pyevent;
1114 }
1115 end:
1116 Py_INCREF(Py_None);
1117 return Py_None;
1118 }
1119
pyrf_evlist__open(struct pyrf_evlist * pevlist,PyObject * args,PyObject * kwargs)1120 static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
1121 PyObject *args, PyObject *kwargs)
1122 {
1123 struct evlist *evlist = &pevlist->evlist;
1124 int group = 0;
1125 static char *kwlist[] = { "group", NULL };
1126
1127 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, &group))
1128 return NULL;
1129
1130 if (group)
1131 evlist__set_leader(evlist);
1132
1133 if (evlist__open(evlist) < 0) {
1134 PyErr_SetFromErrno(PyExc_OSError);
1135 return NULL;
1136 }
1137
1138 Py_INCREF(Py_None);
1139 return Py_None;
1140 }
1141
1142 static PyMethodDef pyrf_evlist__methods[] = {
1143 {
1144 .ml_name = "mmap",
1145 .ml_meth = (PyCFunction)pyrf_evlist__mmap,
1146 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1147 .ml_doc = PyDoc_STR("mmap the file descriptor table.")
1148 },
1149 {
1150 .ml_name = "open",
1151 .ml_meth = (PyCFunction)pyrf_evlist__open,
1152 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1153 .ml_doc = PyDoc_STR("open the file descriptors.")
1154 },
1155 {
1156 .ml_name = "poll",
1157 .ml_meth = (PyCFunction)pyrf_evlist__poll,
1158 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1159 .ml_doc = PyDoc_STR("poll the file descriptor table.")
1160 },
1161 {
1162 .ml_name = "get_pollfd",
1163 .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd,
1164 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1165 .ml_doc = PyDoc_STR("get the poll file descriptor table.")
1166 },
1167 {
1168 .ml_name = "add",
1169 .ml_meth = (PyCFunction)pyrf_evlist__add,
1170 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1171 .ml_doc = PyDoc_STR("adds an event selector to the list.")
1172 },
1173 {
1174 .ml_name = "read_on_cpu",
1175 .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu,
1176 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1177 .ml_doc = PyDoc_STR("reads an event.")
1178 },
1179 { .ml_name = NULL, }
1180 };
1181
pyrf_evlist__length(PyObject * obj)1182 static Py_ssize_t pyrf_evlist__length(PyObject *obj)
1183 {
1184 struct pyrf_evlist *pevlist = (void *)obj;
1185
1186 return pevlist->evlist.core.nr_entries;
1187 }
1188
pyrf_evlist__item(PyObject * obj,Py_ssize_t i)1189 static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
1190 {
1191 struct pyrf_evlist *pevlist = (void *)obj;
1192 struct evsel *pos;
1193
1194 if (i >= pevlist->evlist.core.nr_entries)
1195 return NULL;
1196
1197 evlist__for_each_entry(&pevlist->evlist, pos) {
1198 if (i-- == 0)
1199 break;
1200 }
1201
1202 return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
1203 }
1204
1205 static PySequenceMethods pyrf_evlist__sequence_methods = {
1206 .sq_length = pyrf_evlist__length,
1207 .sq_item = pyrf_evlist__item,
1208 };
1209
1210 static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
1211
1212 static PyTypeObject pyrf_evlist__type = {
1213 PyVarObject_HEAD_INIT(NULL, 0)
1214 .tp_name = "perf.evlist",
1215 .tp_basicsize = sizeof(struct pyrf_evlist),
1216 .tp_dealloc = (destructor)pyrf_evlist__delete,
1217 .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
1218 .tp_as_sequence = &pyrf_evlist__sequence_methods,
1219 .tp_doc = pyrf_evlist__doc,
1220 .tp_methods = pyrf_evlist__methods,
1221 .tp_init = (initproc)pyrf_evlist__init,
1222 };
1223
pyrf_evlist__setup_types(void)1224 static int pyrf_evlist__setup_types(void)
1225 {
1226 pyrf_evlist__type.tp_new = PyType_GenericNew;
1227 return PyType_Ready(&pyrf_evlist__type);
1228 }
1229
1230 #define PERF_CONST(name) { #name, PERF_##name }
1231
1232 static struct {
1233 const char *name;
1234 int value;
1235 } perf__constants[] = {
1236 PERF_CONST(TYPE_HARDWARE),
1237 PERF_CONST(TYPE_SOFTWARE),
1238 PERF_CONST(TYPE_TRACEPOINT),
1239 PERF_CONST(TYPE_HW_CACHE),
1240 PERF_CONST(TYPE_RAW),
1241 PERF_CONST(TYPE_BREAKPOINT),
1242
1243 PERF_CONST(COUNT_HW_CPU_CYCLES),
1244 PERF_CONST(COUNT_HW_INSTRUCTIONS),
1245 PERF_CONST(COUNT_HW_CACHE_REFERENCES),
1246 PERF_CONST(COUNT_HW_CACHE_MISSES),
1247 PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS),
1248 PERF_CONST(COUNT_HW_BRANCH_MISSES),
1249 PERF_CONST(COUNT_HW_BUS_CYCLES),
1250 PERF_CONST(COUNT_HW_CACHE_L1D),
1251 PERF_CONST(COUNT_HW_CACHE_L1I),
1252 PERF_CONST(COUNT_HW_CACHE_LL),
1253 PERF_CONST(COUNT_HW_CACHE_DTLB),
1254 PERF_CONST(COUNT_HW_CACHE_ITLB),
1255 PERF_CONST(COUNT_HW_CACHE_BPU),
1256 PERF_CONST(COUNT_HW_CACHE_OP_READ),
1257 PERF_CONST(COUNT_HW_CACHE_OP_WRITE),
1258 PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH),
1259 PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS),
1260 PERF_CONST(COUNT_HW_CACHE_RESULT_MISS),
1261
1262 PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND),
1263 PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND),
1264
1265 PERF_CONST(COUNT_SW_CPU_CLOCK),
1266 PERF_CONST(COUNT_SW_TASK_CLOCK),
1267 PERF_CONST(COUNT_SW_PAGE_FAULTS),
1268 PERF_CONST(COUNT_SW_CONTEXT_SWITCHES),
1269 PERF_CONST(COUNT_SW_CPU_MIGRATIONS),
1270 PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN),
1271 PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ),
1272 PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS),
1273 PERF_CONST(COUNT_SW_EMULATION_FAULTS),
1274 PERF_CONST(COUNT_SW_DUMMY),
1275
1276 PERF_CONST(SAMPLE_IP),
1277 PERF_CONST(SAMPLE_TID),
1278 PERF_CONST(SAMPLE_TIME),
1279 PERF_CONST(SAMPLE_ADDR),
1280 PERF_CONST(SAMPLE_READ),
1281 PERF_CONST(SAMPLE_CALLCHAIN),
1282 PERF_CONST(SAMPLE_ID),
1283 PERF_CONST(SAMPLE_CPU),
1284 PERF_CONST(SAMPLE_PERIOD),
1285 PERF_CONST(SAMPLE_STREAM_ID),
1286 PERF_CONST(SAMPLE_RAW),
1287
1288 PERF_CONST(FORMAT_TOTAL_TIME_ENABLED),
1289 PERF_CONST(FORMAT_TOTAL_TIME_RUNNING),
1290 PERF_CONST(FORMAT_ID),
1291 PERF_CONST(FORMAT_GROUP),
1292
1293 PERF_CONST(RECORD_MMAP),
1294 PERF_CONST(RECORD_LOST),
1295 PERF_CONST(RECORD_COMM),
1296 PERF_CONST(RECORD_EXIT),
1297 PERF_CONST(RECORD_THROTTLE),
1298 PERF_CONST(RECORD_UNTHROTTLE),
1299 PERF_CONST(RECORD_FORK),
1300 PERF_CONST(RECORD_READ),
1301 PERF_CONST(RECORD_SAMPLE),
1302 PERF_CONST(RECORD_MMAP2),
1303 PERF_CONST(RECORD_AUX),
1304 PERF_CONST(RECORD_ITRACE_START),
1305 PERF_CONST(RECORD_LOST_SAMPLES),
1306 PERF_CONST(RECORD_SWITCH),
1307 PERF_CONST(RECORD_SWITCH_CPU_WIDE),
1308
1309 PERF_CONST(RECORD_MISC_SWITCH_OUT),
1310 { .name = NULL, },
1311 };
1312
pyrf__tracepoint(struct pyrf_evsel * pevsel,PyObject * args,PyObject * kwargs)1313 static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
1314 PyObject *args, PyObject *kwargs)
1315 {
1316 struct tep_event *tp_format;
1317 static char *kwlist[] = { "sys", "name", NULL };
1318 char *sys = NULL;
1319 char *name = NULL;
1320
1321 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist,
1322 &sys, &name))
1323 return NULL;
1324
1325 tp_format = trace_event__tp_format(sys, name);
1326 if (IS_ERR(tp_format))
1327 return _PyLong_FromLong(-1);
1328
1329 return _PyLong_FromLong(tp_format->id);
1330 }
1331
1332 static PyMethodDef perf__methods[] = {
1333 {
1334 .ml_name = "tracepoint",
1335 .ml_meth = (PyCFunction) pyrf__tracepoint,
1336 .ml_flags = METH_VARARGS | METH_KEYWORDS,
1337 .ml_doc = PyDoc_STR("Get tracepoint config.")
1338 },
1339 { .ml_name = NULL, }
1340 };
1341
1342 #if PY_MAJOR_VERSION < 3
initperf(void)1343 PyMODINIT_FUNC initperf(void)
1344 #else
1345 PyMODINIT_FUNC PyInit_perf(void)
1346 #endif
1347 {
1348 PyObject *obj;
1349 int i;
1350 PyObject *dict;
1351 #if PY_MAJOR_VERSION < 3
1352 PyObject *module = Py_InitModule("perf", perf__methods);
1353 #else
1354 static struct PyModuleDef moduledef = {
1355 PyModuleDef_HEAD_INIT,
1356 "perf", /* m_name */
1357 "", /* m_doc */
1358 -1, /* m_size */
1359 perf__methods, /* m_methods */
1360 NULL, /* m_reload */
1361 NULL, /* m_traverse */
1362 NULL, /* m_clear */
1363 NULL, /* m_free */
1364 };
1365 PyObject *module = PyModule_Create(&moduledef);
1366 #endif
1367
1368 if (module == NULL ||
1369 pyrf_event__setup_types() < 0 ||
1370 pyrf_evlist__setup_types() < 0 ||
1371 pyrf_evsel__setup_types() < 0 ||
1372 pyrf_thread_map__setup_types() < 0 ||
1373 pyrf_cpu_map__setup_types() < 0)
1374 #if PY_MAJOR_VERSION < 3
1375 return;
1376 #else
1377 return module;
1378 #endif
1379
1380 /* The page_size is placed in util object. */
1381 page_size = sysconf(_SC_PAGE_SIZE);
1382
1383 Py_INCREF(&pyrf_evlist__type);
1384 PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
1385
1386 Py_INCREF(&pyrf_evsel__type);
1387 PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
1388
1389 Py_INCREF(&pyrf_mmap_event__type);
1390 PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type);
1391
1392 Py_INCREF(&pyrf_lost_event__type);
1393 PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type);
1394
1395 Py_INCREF(&pyrf_comm_event__type);
1396 PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type);
1397
1398 Py_INCREF(&pyrf_task_event__type);
1399 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1400
1401 Py_INCREF(&pyrf_throttle_event__type);
1402 PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type);
1403
1404 Py_INCREF(&pyrf_task_event__type);
1405 PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1406
1407 Py_INCREF(&pyrf_read_event__type);
1408 PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type);
1409
1410 Py_INCREF(&pyrf_sample_event__type);
1411 PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type);
1412
1413 Py_INCREF(&pyrf_context_switch_event__type);
1414 PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type);
1415
1416 Py_INCREF(&pyrf_thread_map__type);
1417 PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
1418
1419 Py_INCREF(&pyrf_cpu_map__type);
1420 PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
1421
1422 dict = PyModule_GetDict(module);
1423 if (dict == NULL)
1424 goto error;
1425
1426 for (i = 0; perf__constants[i].name != NULL; i++) {
1427 obj = _PyLong_FromLong(perf__constants[i].value);
1428 if (obj == NULL)
1429 goto error;
1430 PyDict_SetItemString(dict, perf__constants[i].name, obj);
1431 Py_DECREF(obj);
1432 }
1433
1434 error:
1435 if (PyErr_Occurred())
1436 PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
1437 #if PY_MAJOR_VERSION >= 3
1438 return module;
1439 #endif
1440 }
1441
1442 /*
1443 * Dummy, to avoid dragging all the test_attr infrastructure in the python
1444 * binding.
1445 */
test_attr__open(struct perf_event_attr * attr,pid_t pid,int cpu,int fd,int group_fd,unsigned long flags)1446 void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
1447 int fd, int group_fd, unsigned long flags)
1448 {
1449 }
1450