1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Kprobes-based tracing events
4 *
5 * Created by Masami Hiramatsu <mhiramat@redhat.com>
6 *
7 */
8 #define pr_fmt(fmt) "trace_kprobe: " fmt
9
10 #include <linux/security.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/rculist.h>
14 #include <linux/error-injection.h>
15
16 #include <asm/setup.h> /* for COMMAND_LINE_SIZE */
17
18 #include "trace_dynevent.h"
19 #include "trace_kprobe_selftest.h"
20 #include "trace_probe.h"
21 #include "trace_probe_tmpl.h"
22
23 #define KPROBE_EVENT_SYSTEM "kprobes"
24 #define KRETPROBE_MAXACTIVE_MAX 4096
25
26 /* Kprobe early definition from command line */
27 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
28
set_kprobe_boot_events(char * str)29 static int __init set_kprobe_boot_events(char *str)
30 {
31 strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
32 disable_tracing_selftest("running kprobe events");
33
34 return 0;
35 }
36 __setup("kprobe_event=", set_kprobe_boot_events);
37
38 static int trace_kprobe_create(const char *raw_command);
39 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
40 static int trace_kprobe_release(struct dyn_event *ev);
41 static bool trace_kprobe_is_busy(struct dyn_event *ev);
42 static bool trace_kprobe_match(const char *system, const char *event,
43 int argc, const char **argv, struct dyn_event *ev);
44
45 static struct dyn_event_operations trace_kprobe_ops = {
46 .create = trace_kprobe_create,
47 .show = trace_kprobe_show,
48 .is_busy = trace_kprobe_is_busy,
49 .free = trace_kprobe_release,
50 .match = trace_kprobe_match,
51 };
52
53 /*
54 * Kprobe event core functions
55 */
56 struct trace_kprobe {
57 struct dyn_event devent;
58 struct kretprobe rp; /* Use rp.kp for kprobe use */
59 unsigned long __percpu *nhit;
60 const char *symbol; /* symbol name */
61 struct trace_probe tp;
62 };
63
is_trace_kprobe(struct dyn_event * ev)64 static bool is_trace_kprobe(struct dyn_event *ev)
65 {
66 return ev->ops == &trace_kprobe_ops;
67 }
68
to_trace_kprobe(struct dyn_event * ev)69 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
70 {
71 return container_of(ev, struct trace_kprobe, devent);
72 }
73
74 /**
75 * for_each_trace_kprobe - iterate over the trace_kprobe list
76 * @pos: the struct trace_kprobe * for each entry
77 * @dpos: the struct dyn_event * to use as a loop cursor
78 */
79 #define for_each_trace_kprobe(pos, dpos) \
80 for_each_dyn_event(dpos) \
81 if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
82
trace_kprobe_is_return(struct trace_kprobe * tk)83 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
84 {
85 return tk->rp.handler != NULL;
86 }
87
trace_kprobe_symbol(struct trace_kprobe * tk)88 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
89 {
90 return tk->symbol ? tk->symbol : "unknown";
91 }
92
trace_kprobe_offset(struct trace_kprobe * tk)93 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
94 {
95 return tk->rp.kp.offset;
96 }
97
trace_kprobe_has_gone(struct trace_kprobe * tk)98 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
99 {
100 return kprobe_gone(&tk->rp.kp);
101 }
102
trace_kprobe_within_module(struct trace_kprobe * tk,struct module * mod)103 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
104 struct module *mod)
105 {
106 int len = strlen(module_name(mod));
107 const char *name = trace_kprobe_symbol(tk);
108
109 return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
110 }
111
trace_kprobe_module_exist(struct trace_kprobe * tk)112 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
113 {
114 char *p;
115 bool ret;
116
117 if (!tk->symbol)
118 return false;
119 p = strchr(tk->symbol, ':');
120 if (!p)
121 return true;
122 *p = '\0';
123 rcu_read_lock_sched();
124 ret = !!find_module(tk->symbol);
125 rcu_read_unlock_sched();
126 *p = ':';
127
128 return ret;
129 }
130
trace_kprobe_is_busy(struct dyn_event * ev)131 static bool trace_kprobe_is_busy(struct dyn_event *ev)
132 {
133 struct trace_kprobe *tk = to_trace_kprobe(ev);
134
135 return trace_probe_is_enabled(&tk->tp);
136 }
137
trace_kprobe_match_command_head(struct trace_kprobe * tk,int argc,const char ** argv)138 static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
139 int argc, const char **argv)
140 {
141 char buf[MAX_ARGSTR_LEN + 1];
142
143 if (!argc)
144 return true;
145
146 if (!tk->symbol)
147 snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
148 else if (tk->rp.kp.offset)
149 snprintf(buf, sizeof(buf), "%s+%u",
150 trace_kprobe_symbol(tk), tk->rp.kp.offset);
151 else
152 snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
153 if (strcmp(buf, argv[0]))
154 return false;
155 argc--; argv++;
156
157 return trace_probe_match_command_args(&tk->tp, argc, argv);
158 }
159
trace_kprobe_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)160 static bool trace_kprobe_match(const char *system, const char *event,
161 int argc, const char **argv, struct dyn_event *ev)
162 {
163 struct trace_kprobe *tk = to_trace_kprobe(ev);
164
165 return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
166 (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
167 trace_kprobe_match_command_head(tk, argc, argv);
168 }
169
trace_kprobe_nhit(struct trace_kprobe * tk)170 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
171 {
172 unsigned long nhit = 0;
173 int cpu;
174
175 for_each_possible_cpu(cpu)
176 nhit += *per_cpu_ptr(tk->nhit, cpu);
177
178 return nhit;
179 }
180
trace_kprobe_is_registered(struct trace_kprobe * tk)181 static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
182 {
183 return !(list_empty(&tk->rp.kp.list) &&
184 hlist_unhashed(&tk->rp.kp.hlist));
185 }
186
187 /* Return 0 if it fails to find the symbol address */
188 static nokprobe_inline
trace_kprobe_address(struct trace_kprobe * tk)189 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
190 {
191 unsigned long addr;
192
193 if (tk->symbol) {
194 addr = (unsigned long)
195 kallsyms_lookup_name(trace_kprobe_symbol(tk));
196 if (addr)
197 addr += tk->rp.kp.offset;
198 } else {
199 addr = (unsigned long)tk->rp.kp.addr;
200 }
201 return addr;
202 }
203
204 static nokprobe_inline struct trace_kprobe *
trace_kprobe_primary_from_call(struct trace_event_call * call)205 trace_kprobe_primary_from_call(struct trace_event_call *call)
206 {
207 struct trace_probe *tp;
208
209 tp = trace_probe_primary_from_call(call);
210 if (WARN_ON_ONCE(!tp))
211 return NULL;
212
213 return container_of(tp, struct trace_kprobe, tp);
214 }
215
trace_kprobe_on_func_entry(struct trace_event_call * call)216 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
217 {
218 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
219
220 return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
221 tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
222 tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
223 }
224
trace_kprobe_error_injectable(struct trace_event_call * call)225 bool trace_kprobe_error_injectable(struct trace_event_call *call)
226 {
227 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
228
229 return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
230 false;
231 }
232
233 static int register_kprobe_event(struct trace_kprobe *tk);
234 static int unregister_kprobe_event(struct trace_kprobe *tk);
235
236 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
237 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
238 struct pt_regs *regs);
239
free_trace_kprobe(struct trace_kprobe * tk)240 static void free_trace_kprobe(struct trace_kprobe *tk)
241 {
242 if (tk) {
243 trace_probe_cleanup(&tk->tp);
244 kfree(tk->symbol);
245 free_percpu(tk->nhit);
246 kfree(tk);
247 }
248 }
249
250 /*
251 * Allocate new trace_probe and initialize it (including kprobes).
252 */
alloc_trace_kprobe(const char * group,const char * event,void * addr,const char * symbol,unsigned long offs,int maxactive,int nargs,bool is_return)253 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
254 const char *event,
255 void *addr,
256 const char *symbol,
257 unsigned long offs,
258 int maxactive,
259 int nargs, bool is_return)
260 {
261 struct trace_kprobe *tk;
262 int ret = -ENOMEM;
263
264 tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL);
265 if (!tk)
266 return ERR_PTR(ret);
267
268 tk->nhit = alloc_percpu(unsigned long);
269 if (!tk->nhit)
270 goto error;
271
272 if (symbol) {
273 tk->symbol = kstrdup(symbol, GFP_KERNEL);
274 if (!tk->symbol)
275 goto error;
276 tk->rp.kp.symbol_name = tk->symbol;
277 tk->rp.kp.offset = offs;
278 } else
279 tk->rp.kp.addr = addr;
280
281 if (is_return)
282 tk->rp.handler = kretprobe_dispatcher;
283 else
284 tk->rp.kp.pre_handler = kprobe_dispatcher;
285
286 tk->rp.maxactive = maxactive;
287 INIT_HLIST_NODE(&tk->rp.kp.hlist);
288 INIT_LIST_HEAD(&tk->rp.kp.list);
289
290 ret = trace_probe_init(&tk->tp, event, group, false);
291 if (ret < 0)
292 goto error;
293
294 dyn_event_init(&tk->devent, &trace_kprobe_ops);
295 return tk;
296 error:
297 free_trace_kprobe(tk);
298 return ERR_PTR(ret);
299 }
300
find_trace_kprobe(const char * event,const char * group)301 static struct trace_kprobe *find_trace_kprobe(const char *event,
302 const char *group)
303 {
304 struct dyn_event *pos;
305 struct trace_kprobe *tk;
306
307 for_each_trace_kprobe(tk, pos)
308 if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
309 strcmp(trace_probe_group_name(&tk->tp), group) == 0)
310 return tk;
311 return NULL;
312 }
313
__enable_trace_kprobe(struct trace_kprobe * tk)314 static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
315 {
316 int ret = 0;
317
318 if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
319 if (trace_kprobe_is_return(tk))
320 ret = enable_kretprobe(&tk->rp);
321 else
322 ret = enable_kprobe(&tk->rp.kp);
323 }
324
325 return ret;
326 }
327
__disable_trace_kprobe(struct trace_probe * tp)328 static void __disable_trace_kprobe(struct trace_probe *tp)
329 {
330 struct trace_probe *pos;
331 struct trace_kprobe *tk;
332
333 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
334 tk = container_of(pos, struct trace_kprobe, tp);
335 if (!trace_kprobe_is_registered(tk))
336 continue;
337 if (trace_kprobe_is_return(tk))
338 disable_kretprobe(&tk->rp);
339 else
340 disable_kprobe(&tk->rp.kp);
341 }
342 }
343
344 /*
345 * Enable trace_probe
346 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
347 */
enable_trace_kprobe(struct trace_event_call * call,struct trace_event_file * file)348 static int enable_trace_kprobe(struct trace_event_call *call,
349 struct trace_event_file *file)
350 {
351 struct trace_probe *pos, *tp;
352 struct trace_kprobe *tk;
353 bool enabled;
354 int ret = 0;
355
356 tp = trace_probe_primary_from_call(call);
357 if (WARN_ON_ONCE(!tp))
358 return -ENODEV;
359 enabled = trace_probe_is_enabled(tp);
360
361 /* This also changes "enabled" state */
362 if (file) {
363 ret = trace_probe_add_file(tp, file);
364 if (ret)
365 return ret;
366 } else
367 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
368
369 if (enabled)
370 return 0;
371
372 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
373 tk = container_of(pos, struct trace_kprobe, tp);
374 if (trace_kprobe_has_gone(tk))
375 continue;
376 ret = __enable_trace_kprobe(tk);
377 if (ret)
378 break;
379 enabled = true;
380 }
381
382 if (ret) {
383 /* Failed to enable one of them. Roll back all */
384 if (enabled)
385 __disable_trace_kprobe(tp);
386 if (file)
387 trace_probe_remove_file(tp, file);
388 else
389 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
390 }
391
392 return ret;
393 }
394
395 /*
396 * Disable trace_probe
397 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
398 */
disable_trace_kprobe(struct trace_event_call * call,struct trace_event_file * file)399 static int disable_trace_kprobe(struct trace_event_call *call,
400 struct trace_event_file *file)
401 {
402 struct trace_probe *tp;
403
404 tp = trace_probe_primary_from_call(call);
405 if (WARN_ON_ONCE(!tp))
406 return -ENODEV;
407
408 if (file) {
409 if (!trace_probe_get_file_link(tp, file))
410 return -ENOENT;
411 if (!trace_probe_has_single_file(tp))
412 goto out;
413 trace_probe_clear_flag(tp, TP_FLAG_TRACE);
414 } else
415 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
416
417 if (!trace_probe_is_enabled(tp))
418 __disable_trace_kprobe(tp);
419
420 out:
421 if (file)
422 /*
423 * Synchronization is done in below function. For perf event,
424 * file == NULL and perf_trace_event_unreg() calls
425 * tracepoint_synchronize_unregister() to ensure synchronize
426 * event. We don't need to care about it.
427 */
428 trace_probe_remove_file(tp, file);
429
430 return 0;
431 }
432
433 #if defined(CONFIG_DYNAMIC_FTRACE) && \
434 !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
__within_notrace_func(unsigned long addr)435 static bool __within_notrace_func(unsigned long addr)
436 {
437 unsigned long offset, size;
438
439 if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
440 return false;
441
442 /* Get the entry address of the target function */
443 addr -= offset;
444
445 /*
446 * Since ftrace_location_range() does inclusive range check, we need
447 * to subtract 1 byte from the end address.
448 */
449 return !ftrace_location_range(addr, addr + size - 1);
450 }
451
within_notrace_func(struct trace_kprobe * tk)452 static bool within_notrace_func(struct trace_kprobe *tk)
453 {
454 unsigned long addr = trace_kprobe_address(tk);
455 char symname[KSYM_NAME_LEN], *p;
456
457 if (!__within_notrace_func(addr))
458 return false;
459
460 /* Check if the address is on a suffixed-symbol */
461 if (!lookup_symbol_name(addr, symname)) {
462 p = strchr(symname, '.');
463 if (!p)
464 return true;
465 *p = '\0';
466 addr = (unsigned long)kprobe_lookup_name(symname, 0);
467 if (addr)
468 return __within_notrace_func(addr);
469 }
470
471 return true;
472 }
473 #else
474 #define within_notrace_func(tk) (false)
475 #endif
476
477 /* Internal register function - just handle k*probes and flags */
__register_trace_kprobe(struct trace_kprobe * tk)478 static int __register_trace_kprobe(struct trace_kprobe *tk)
479 {
480 int i, ret;
481
482 ret = security_locked_down(LOCKDOWN_KPROBES);
483 if (ret)
484 return ret;
485
486 if (trace_kprobe_is_registered(tk))
487 return -EINVAL;
488
489 if (within_notrace_func(tk)) {
490 pr_warn("Could not probe notrace function %s\n",
491 trace_kprobe_symbol(tk));
492 return -EINVAL;
493 }
494
495 for (i = 0; i < tk->tp.nr_args; i++) {
496 ret = traceprobe_update_arg(&tk->tp.args[i]);
497 if (ret)
498 return ret;
499 }
500
501 /* Set/clear disabled flag according to tp->flag */
502 if (trace_probe_is_enabled(&tk->tp))
503 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
504 else
505 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
506
507 if (trace_kprobe_is_return(tk))
508 ret = register_kretprobe(&tk->rp);
509 else
510 ret = register_kprobe(&tk->rp.kp);
511
512 return ret;
513 }
514
515 /* Internal unregister function - just handle k*probes and flags */
__unregister_trace_kprobe(struct trace_kprobe * tk)516 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
517 {
518 if (trace_kprobe_is_registered(tk)) {
519 if (trace_kprobe_is_return(tk))
520 unregister_kretprobe(&tk->rp);
521 else
522 unregister_kprobe(&tk->rp.kp);
523 /* Cleanup kprobe for reuse and mark it unregistered */
524 INIT_HLIST_NODE(&tk->rp.kp.hlist);
525 INIT_LIST_HEAD(&tk->rp.kp.list);
526 if (tk->rp.kp.symbol_name)
527 tk->rp.kp.addr = NULL;
528 }
529 }
530
531 /* Unregister a trace_probe and probe_event */
unregister_trace_kprobe(struct trace_kprobe * tk)532 static int unregister_trace_kprobe(struct trace_kprobe *tk)
533 {
534 /* If other probes are on the event, just unregister kprobe */
535 if (trace_probe_has_sibling(&tk->tp))
536 goto unreg;
537
538 /* Enabled event can not be unregistered */
539 if (trace_probe_is_enabled(&tk->tp))
540 return -EBUSY;
541
542 /* If there's a reference to the dynamic event */
543 if (trace_event_dyn_busy(trace_probe_event_call(&tk->tp)))
544 return -EBUSY;
545
546 /* Will fail if probe is being used by ftrace or perf */
547 if (unregister_kprobe_event(tk))
548 return -EBUSY;
549
550 unreg:
551 __unregister_trace_kprobe(tk);
552 dyn_event_remove(&tk->devent);
553 trace_probe_unlink(&tk->tp);
554
555 return 0;
556 }
557
trace_kprobe_has_same_kprobe(struct trace_kprobe * orig,struct trace_kprobe * comp)558 static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
559 struct trace_kprobe *comp)
560 {
561 struct trace_probe_event *tpe = orig->tp.event;
562 struct trace_probe *pos;
563 int i;
564
565 list_for_each_entry(pos, &tpe->probes, list) {
566 orig = container_of(pos, struct trace_kprobe, tp);
567 if (strcmp(trace_kprobe_symbol(orig),
568 trace_kprobe_symbol(comp)) ||
569 trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
570 continue;
571
572 /*
573 * trace_probe_compare_arg_type() ensured that nr_args and
574 * each argument name and type are same. Let's compare comm.
575 */
576 for (i = 0; i < orig->tp.nr_args; i++) {
577 if (strcmp(orig->tp.args[i].comm,
578 comp->tp.args[i].comm))
579 break;
580 }
581
582 if (i == orig->tp.nr_args)
583 return true;
584 }
585
586 return false;
587 }
588
append_trace_kprobe(struct trace_kprobe * tk,struct trace_kprobe * to)589 static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
590 {
591 int ret;
592
593 ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
594 if (ret) {
595 /* Note that argument starts index = 2 */
596 trace_probe_log_set_index(ret + 1);
597 trace_probe_log_err(0, DIFF_ARG_TYPE);
598 return -EEXIST;
599 }
600 if (trace_kprobe_has_same_kprobe(to, tk)) {
601 trace_probe_log_set_index(0);
602 trace_probe_log_err(0, SAME_PROBE);
603 return -EEXIST;
604 }
605
606 /* Append to existing event */
607 ret = trace_probe_append(&tk->tp, &to->tp);
608 if (ret)
609 return ret;
610
611 /* Register k*probe */
612 ret = __register_trace_kprobe(tk);
613 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
614 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
615 ret = 0;
616 }
617
618 if (ret)
619 trace_probe_unlink(&tk->tp);
620 else
621 dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
622
623 return ret;
624 }
625
626 /* Register a trace_probe and probe_event */
register_trace_kprobe(struct trace_kprobe * tk)627 static int register_trace_kprobe(struct trace_kprobe *tk)
628 {
629 struct trace_kprobe *old_tk;
630 int ret;
631
632 mutex_lock(&event_mutex);
633
634 old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
635 trace_probe_group_name(&tk->tp));
636 if (old_tk) {
637 if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
638 trace_probe_log_set_index(0);
639 trace_probe_log_err(0, DIFF_PROBE_TYPE);
640 ret = -EEXIST;
641 } else {
642 ret = append_trace_kprobe(tk, old_tk);
643 }
644 goto end;
645 }
646
647 /* Register new event */
648 ret = register_kprobe_event(tk);
649 if (ret) {
650 if (ret == -EEXIST) {
651 trace_probe_log_set_index(0);
652 trace_probe_log_err(0, EVENT_EXIST);
653 } else
654 pr_warn("Failed to register probe event(%d)\n", ret);
655 goto end;
656 }
657
658 /* Register k*probe */
659 ret = __register_trace_kprobe(tk);
660 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
661 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
662 ret = 0;
663 }
664
665 if (ret < 0)
666 unregister_kprobe_event(tk);
667 else
668 dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
669
670 end:
671 mutex_unlock(&event_mutex);
672 return ret;
673 }
674
675 /* Module notifier call back, checking event on the module */
trace_kprobe_module_callback(struct notifier_block * nb,unsigned long val,void * data)676 static int trace_kprobe_module_callback(struct notifier_block *nb,
677 unsigned long val, void *data)
678 {
679 struct module *mod = data;
680 struct dyn_event *pos;
681 struct trace_kprobe *tk;
682 int ret;
683
684 if (val != MODULE_STATE_COMING)
685 return NOTIFY_DONE;
686
687 /* Update probes on coming module */
688 mutex_lock(&event_mutex);
689 for_each_trace_kprobe(tk, pos) {
690 if (trace_kprobe_within_module(tk, mod)) {
691 /* Don't need to check busy - this should have gone. */
692 __unregister_trace_kprobe(tk);
693 ret = __register_trace_kprobe(tk);
694 if (ret)
695 pr_warn("Failed to re-register probe %s on %s: %d\n",
696 trace_probe_name(&tk->tp),
697 module_name(mod), ret);
698 }
699 }
700 mutex_unlock(&event_mutex);
701
702 return NOTIFY_DONE;
703 }
704
705 static struct notifier_block trace_kprobe_module_nb = {
706 .notifier_call = trace_kprobe_module_callback,
707 .priority = 1 /* Invoked after kprobe module callback */
708 };
709
__trace_kprobe_create(int argc,const char * argv[])710 static int __trace_kprobe_create(int argc, const char *argv[])
711 {
712 /*
713 * Argument syntax:
714 * - Add kprobe:
715 * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
716 * - Add kretprobe:
717 * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
718 * Or
719 * p:[GRP/]EVENT] [MOD:]KSYM[+0]%return [FETCHARGS]
720 *
721 * Fetch args:
722 * $retval : fetch return value
723 * $stack : fetch stack address
724 * $stackN : fetch Nth of stack (N:0-)
725 * $comm : fetch current task comm
726 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
727 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
728 * %REG : fetch register REG
729 * Dereferencing memory fetch:
730 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
731 * Alias name of args:
732 * NAME=FETCHARG : set NAME as alias of FETCHARG.
733 * Type of args:
734 * FETCHARG:TYPE : use TYPE instead of unsigned long.
735 */
736 struct trace_kprobe *tk = NULL;
737 int i, len, ret = 0;
738 bool is_return = false;
739 char *symbol = NULL, *tmp = NULL;
740 const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
741 enum probe_print_type ptype;
742 int maxactive = 0;
743 long offset = 0;
744 void *addr = NULL;
745 char buf[MAX_EVENT_NAME_LEN];
746 unsigned int flags = TPARG_FL_KERNEL;
747
748 switch (argv[0][0]) {
749 case 'r':
750 is_return = true;
751 break;
752 case 'p':
753 break;
754 default:
755 return -ECANCELED;
756 }
757 if (argc < 2)
758 return -ECANCELED;
759
760 trace_probe_log_init("trace_kprobe", argc, argv);
761
762 event = strchr(&argv[0][1], ':');
763 if (event)
764 event++;
765
766 if (isdigit(argv[0][1])) {
767 if (!is_return) {
768 trace_probe_log_err(1, MAXACT_NO_KPROBE);
769 goto parse_error;
770 }
771 if (event)
772 len = event - &argv[0][1] - 1;
773 else
774 len = strlen(&argv[0][1]);
775 if (len > MAX_EVENT_NAME_LEN - 1) {
776 trace_probe_log_err(1, BAD_MAXACT);
777 goto parse_error;
778 }
779 memcpy(buf, &argv[0][1], len);
780 buf[len] = '\0';
781 ret = kstrtouint(buf, 0, &maxactive);
782 if (ret || !maxactive) {
783 trace_probe_log_err(1, BAD_MAXACT);
784 goto parse_error;
785 }
786 /* kretprobes instances are iterated over via a list. The
787 * maximum should stay reasonable.
788 */
789 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
790 trace_probe_log_err(1, MAXACT_TOO_BIG);
791 goto parse_error;
792 }
793 }
794
795 /* try to parse an address. if that fails, try to read the
796 * input as a symbol. */
797 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
798 trace_probe_log_set_index(1);
799 /* Check whether uprobe event specified */
800 if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
801 ret = -ECANCELED;
802 goto error;
803 }
804 /* a symbol specified */
805 symbol = kstrdup(argv[1], GFP_KERNEL);
806 if (!symbol)
807 return -ENOMEM;
808
809 tmp = strchr(symbol, '%');
810 if (tmp) {
811 if (!strcmp(tmp, "%return")) {
812 *tmp = '\0';
813 is_return = true;
814 } else {
815 trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
816 goto parse_error;
817 }
818 }
819
820 /* TODO: support .init module functions */
821 ret = traceprobe_split_symbol_offset(symbol, &offset);
822 if (ret || offset < 0 || offset > UINT_MAX) {
823 trace_probe_log_err(0, BAD_PROBE_ADDR);
824 goto parse_error;
825 }
826 if (is_return)
827 flags |= TPARG_FL_RETURN;
828 ret = kprobe_on_func_entry(NULL, symbol, offset);
829 if (ret == 0)
830 flags |= TPARG_FL_FENTRY;
831 /* Defer the ENOENT case until register kprobe */
832 if (ret == -EINVAL && is_return) {
833 trace_probe_log_err(0, BAD_RETPROBE);
834 goto parse_error;
835 }
836 }
837
838 trace_probe_log_set_index(0);
839 if (event) {
840 ret = traceprobe_parse_event_name(&event, &group, buf,
841 event - argv[0]);
842 if (ret)
843 goto parse_error;
844 } else {
845 /* Make a new event name */
846 if (symbol)
847 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
848 is_return ? 'r' : 'p', symbol, offset);
849 else
850 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
851 is_return ? 'r' : 'p', addr);
852 sanitize_event_name(buf);
853 event = buf;
854 }
855
856 /* setup a probe */
857 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
858 argc - 2, is_return);
859 if (IS_ERR(tk)) {
860 ret = PTR_ERR(tk);
861 /* This must return -ENOMEM, else there is a bug */
862 WARN_ON_ONCE(ret != -ENOMEM);
863 goto out; /* We know tk is not allocated */
864 }
865 argc -= 2; argv += 2;
866
867 /* parse arguments */
868 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
869 trace_probe_log_set_index(i + 2);
870 ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], flags);
871 if (ret)
872 goto error; /* This can be -ENOMEM */
873 }
874
875 ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
876 ret = traceprobe_set_print_fmt(&tk->tp, ptype);
877 if (ret < 0)
878 goto error;
879
880 ret = register_trace_kprobe(tk);
881 if (ret) {
882 trace_probe_log_set_index(1);
883 if (ret == -EILSEQ)
884 trace_probe_log_err(0, BAD_INSN_BNDRY);
885 else if (ret == -ENOENT)
886 trace_probe_log_err(0, BAD_PROBE_ADDR);
887 else if (ret != -ENOMEM && ret != -EEXIST)
888 trace_probe_log_err(0, FAIL_REG_PROBE);
889 goto error;
890 }
891
892 out:
893 trace_probe_log_clear();
894 kfree(symbol);
895 return ret;
896
897 parse_error:
898 ret = -EINVAL;
899 error:
900 free_trace_kprobe(tk);
901 goto out;
902 }
903
trace_kprobe_create(const char * raw_command)904 static int trace_kprobe_create(const char *raw_command)
905 {
906 return trace_probe_create(raw_command, __trace_kprobe_create);
907 }
908
create_or_delete_trace_kprobe(const char * raw_command)909 static int create_or_delete_trace_kprobe(const char *raw_command)
910 {
911 int ret;
912
913 if (raw_command[0] == '-')
914 return dyn_event_release(raw_command, &trace_kprobe_ops);
915
916 ret = trace_kprobe_create(raw_command);
917 return ret == -ECANCELED ? -EINVAL : ret;
918 }
919
trace_kprobe_run_command(struct dynevent_cmd * cmd)920 static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
921 {
922 return create_or_delete_trace_kprobe(cmd->seq.buffer);
923 }
924
925 /**
926 * kprobe_event_cmd_init - Initialize a kprobe event command object
927 * @cmd: A pointer to the dynevent_cmd struct representing the new event
928 * @buf: A pointer to the buffer used to build the command
929 * @maxlen: The length of the buffer passed in @buf
930 *
931 * Initialize a synthetic event command object. Use this before
932 * calling any of the other kprobe_event functions.
933 */
kprobe_event_cmd_init(struct dynevent_cmd * cmd,char * buf,int maxlen)934 void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
935 {
936 dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
937 trace_kprobe_run_command);
938 }
939 EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
940
941 /**
942 * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
943 * @cmd: A pointer to the dynevent_cmd struct representing the new event
944 * @name: The name of the kprobe event
945 * @loc: The location of the kprobe event
946 * @kretprobe: Is this a return probe?
947 * @args: Variable number of arg (pairs), one pair for each field
948 *
949 * NOTE: Users normally won't want to call this function directly, but
950 * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
951 * adds a NULL to the end of the arg list. If this function is used
952 * directly, make sure the last arg in the variable arg list is NULL.
953 *
954 * Generate a kprobe event command to be executed by
955 * kprobe_event_gen_cmd_end(). This function can be used to generate the
956 * complete command or only the first part of it; in the latter case,
957 * kprobe_event_add_fields() can be used to add more fields following this.
958 *
959 * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
960 * returns -EINVAL if @loc == NULL.
961 *
962 * Return: 0 if successful, error otherwise.
963 */
__kprobe_event_gen_cmd_start(struct dynevent_cmd * cmd,bool kretprobe,const char * name,const char * loc,...)964 int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
965 const char *name, const char *loc, ...)
966 {
967 char buf[MAX_EVENT_NAME_LEN];
968 struct dynevent_arg arg;
969 va_list args;
970 int ret;
971
972 if (cmd->type != DYNEVENT_TYPE_KPROBE)
973 return -EINVAL;
974
975 if (!loc)
976 return -EINVAL;
977
978 if (kretprobe)
979 snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
980 else
981 snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
982
983 ret = dynevent_str_add(cmd, buf);
984 if (ret)
985 return ret;
986
987 dynevent_arg_init(&arg, 0);
988 arg.str = loc;
989 ret = dynevent_arg_add(cmd, &arg, NULL);
990 if (ret)
991 return ret;
992
993 va_start(args, loc);
994 for (;;) {
995 const char *field;
996
997 field = va_arg(args, const char *);
998 if (!field)
999 break;
1000
1001 if (++cmd->n_fields > MAX_TRACE_ARGS) {
1002 ret = -EINVAL;
1003 break;
1004 }
1005
1006 arg.str = field;
1007 ret = dynevent_arg_add(cmd, &arg, NULL);
1008 if (ret)
1009 break;
1010 }
1011 va_end(args);
1012
1013 return ret;
1014 }
1015 EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
1016
1017 /**
1018 * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
1019 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1020 * @args: Variable number of arg (pairs), one pair for each field
1021 *
1022 * NOTE: Users normally won't want to call this function directly, but
1023 * rather use the kprobe_event_add_fields() wrapper, which
1024 * automatically adds a NULL to the end of the arg list. If this
1025 * function is used directly, make sure the last arg in the variable
1026 * arg list is NULL.
1027 *
1028 * Add probe fields to an existing kprobe command using a variable
1029 * list of args. Fields are added in the same order they're listed.
1030 *
1031 * Return: 0 if successful, error otherwise.
1032 */
__kprobe_event_add_fields(struct dynevent_cmd * cmd,...)1033 int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
1034 {
1035 struct dynevent_arg arg;
1036 va_list args;
1037 int ret = 0;
1038
1039 if (cmd->type != DYNEVENT_TYPE_KPROBE)
1040 return -EINVAL;
1041
1042 dynevent_arg_init(&arg, 0);
1043
1044 va_start(args, cmd);
1045 for (;;) {
1046 const char *field;
1047
1048 field = va_arg(args, const char *);
1049 if (!field)
1050 break;
1051
1052 if (++cmd->n_fields > MAX_TRACE_ARGS) {
1053 ret = -EINVAL;
1054 break;
1055 }
1056
1057 arg.str = field;
1058 ret = dynevent_arg_add(cmd, &arg, NULL);
1059 if (ret)
1060 break;
1061 }
1062 va_end(args);
1063
1064 return ret;
1065 }
1066 EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
1067
1068 /**
1069 * kprobe_event_delete - Delete a kprobe event
1070 * @name: The name of the kprobe event to delete
1071 *
1072 * Delete a kprobe event with the give @name from kernel code rather
1073 * than directly from the command line.
1074 *
1075 * Return: 0 if successful, error otherwise.
1076 */
kprobe_event_delete(const char * name)1077 int kprobe_event_delete(const char *name)
1078 {
1079 char buf[MAX_EVENT_NAME_LEN];
1080
1081 snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
1082
1083 return create_or_delete_trace_kprobe(buf);
1084 }
1085 EXPORT_SYMBOL_GPL(kprobe_event_delete);
1086
trace_kprobe_release(struct dyn_event * ev)1087 static int trace_kprobe_release(struct dyn_event *ev)
1088 {
1089 struct trace_kprobe *tk = to_trace_kprobe(ev);
1090 int ret = unregister_trace_kprobe(tk);
1091
1092 if (!ret)
1093 free_trace_kprobe(tk);
1094 return ret;
1095 }
1096
trace_kprobe_show(struct seq_file * m,struct dyn_event * ev)1097 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
1098 {
1099 struct trace_kprobe *tk = to_trace_kprobe(ev);
1100 int i;
1101
1102 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
1103 if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
1104 seq_printf(m, "%d", tk->rp.maxactive);
1105 seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
1106 trace_probe_name(&tk->tp));
1107
1108 if (!tk->symbol)
1109 seq_printf(m, " 0x%p", tk->rp.kp.addr);
1110 else if (tk->rp.kp.offset)
1111 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
1112 tk->rp.kp.offset);
1113 else
1114 seq_printf(m, " %s", trace_kprobe_symbol(tk));
1115
1116 for (i = 0; i < tk->tp.nr_args; i++)
1117 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
1118 seq_putc(m, '\n');
1119
1120 return 0;
1121 }
1122
probes_seq_show(struct seq_file * m,void * v)1123 static int probes_seq_show(struct seq_file *m, void *v)
1124 {
1125 struct dyn_event *ev = v;
1126
1127 if (!is_trace_kprobe(ev))
1128 return 0;
1129
1130 return trace_kprobe_show(m, ev);
1131 }
1132
1133 static const struct seq_operations probes_seq_op = {
1134 .start = dyn_event_seq_start,
1135 .next = dyn_event_seq_next,
1136 .stop = dyn_event_seq_stop,
1137 .show = probes_seq_show
1138 };
1139
probes_open(struct inode * inode,struct file * file)1140 static int probes_open(struct inode *inode, struct file *file)
1141 {
1142 int ret;
1143
1144 ret = security_locked_down(LOCKDOWN_TRACEFS);
1145 if (ret)
1146 return ret;
1147
1148 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1149 ret = dyn_events_release_all(&trace_kprobe_ops);
1150 if (ret < 0)
1151 return ret;
1152 }
1153
1154 return seq_open(file, &probes_seq_op);
1155 }
1156
probes_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)1157 static ssize_t probes_write(struct file *file, const char __user *buffer,
1158 size_t count, loff_t *ppos)
1159 {
1160 return trace_parse_run_command(file, buffer, count, ppos,
1161 create_or_delete_trace_kprobe);
1162 }
1163
1164 static const struct file_operations kprobe_events_ops = {
1165 .owner = THIS_MODULE,
1166 .open = probes_open,
1167 .read = seq_read,
1168 .llseek = seq_lseek,
1169 .release = seq_release,
1170 .write = probes_write,
1171 };
1172
1173 /* Probes profiling interfaces */
probes_profile_seq_show(struct seq_file * m,void * v)1174 static int probes_profile_seq_show(struct seq_file *m, void *v)
1175 {
1176 struct dyn_event *ev = v;
1177 struct trace_kprobe *tk;
1178
1179 if (!is_trace_kprobe(ev))
1180 return 0;
1181
1182 tk = to_trace_kprobe(ev);
1183 seq_printf(m, " %-44s %15lu %15lu\n",
1184 trace_probe_name(&tk->tp),
1185 trace_kprobe_nhit(tk),
1186 tk->rp.kp.nmissed);
1187
1188 return 0;
1189 }
1190
1191 static const struct seq_operations profile_seq_op = {
1192 .start = dyn_event_seq_start,
1193 .next = dyn_event_seq_next,
1194 .stop = dyn_event_seq_stop,
1195 .show = probes_profile_seq_show
1196 };
1197
profile_open(struct inode * inode,struct file * file)1198 static int profile_open(struct inode *inode, struct file *file)
1199 {
1200 int ret;
1201
1202 ret = security_locked_down(LOCKDOWN_TRACEFS);
1203 if (ret)
1204 return ret;
1205
1206 return seq_open(file, &profile_seq_op);
1207 }
1208
1209 static const struct file_operations kprobe_profile_ops = {
1210 .owner = THIS_MODULE,
1211 .open = profile_open,
1212 .read = seq_read,
1213 .llseek = seq_lseek,
1214 .release = seq_release,
1215 };
1216
1217 /* Kprobe specific fetch functions */
1218
1219 /* Return the length of string -- including null terminal byte */
1220 static nokprobe_inline int
fetch_store_strlen_user(unsigned long addr)1221 fetch_store_strlen_user(unsigned long addr)
1222 {
1223 const void __user *uaddr = (__force const void __user *)addr;
1224
1225 return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
1226 }
1227
1228 /* Return the length of string -- including null terminal byte */
1229 static nokprobe_inline int
fetch_store_strlen(unsigned long addr)1230 fetch_store_strlen(unsigned long addr)
1231 {
1232 int ret, len = 0;
1233 u8 c;
1234
1235 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1236 if (addr < TASK_SIZE)
1237 return fetch_store_strlen_user(addr);
1238 #endif
1239
1240 do {
1241 ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1);
1242 len++;
1243 } while (c && ret == 0 && len < MAX_STRING_SIZE);
1244
1245 return (ret < 0) ? ret : len;
1246 }
1247
1248 /*
1249 * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
1250 * with max length and relative data location.
1251 */
1252 static nokprobe_inline int
fetch_store_string_user(unsigned long addr,void * dest,void * base)1253 fetch_store_string_user(unsigned long addr, void *dest, void *base)
1254 {
1255 const void __user *uaddr = (__force const void __user *)addr;
1256 int maxlen = get_loc_len(*(u32 *)dest);
1257 void *__dest;
1258 long ret;
1259
1260 if (unlikely(!maxlen))
1261 return -ENOMEM;
1262
1263 __dest = get_loc_data(dest, base);
1264
1265 ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
1266 if (ret >= 0)
1267 *(u32 *)dest = make_data_loc(ret, __dest - base);
1268
1269 return ret;
1270 }
1271
1272 /*
1273 * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
1274 * length and relative data location.
1275 */
1276 static nokprobe_inline int
fetch_store_string(unsigned long addr,void * dest,void * base)1277 fetch_store_string(unsigned long addr, void *dest, void *base)
1278 {
1279 int maxlen = get_loc_len(*(u32 *)dest);
1280 void *__dest;
1281 long ret;
1282
1283 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1284 if ((unsigned long)addr < TASK_SIZE)
1285 return fetch_store_string_user(addr, dest, base);
1286 #endif
1287
1288 if (unlikely(!maxlen))
1289 return -ENOMEM;
1290
1291 __dest = get_loc_data(dest, base);
1292
1293 /*
1294 * Try to get string again, since the string can be changed while
1295 * probing.
1296 */
1297 ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
1298 if (ret >= 0)
1299 *(u32 *)dest = make_data_loc(ret, __dest - base);
1300
1301 return ret;
1302 }
1303
1304 static nokprobe_inline int
probe_mem_read_user(void * dest,void * src,size_t size)1305 probe_mem_read_user(void *dest, void *src, size_t size)
1306 {
1307 const void __user *uaddr = (__force const void __user *)src;
1308
1309 return copy_from_user_nofault(dest, uaddr, size);
1310 }
1311
1312 static nokprobe_inline int
probe_mem_read(void * dest,void * src,size_t size)1313 probe_mem_read(void *dest, void *src, size_t size)
1314 {
1315 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1316 if ((unsigned long)src < TASK_SIZE)
1317 return probe_mem_read_user(dest, src, size);
1318 #endif
1319 return copy_from_kernel_nofault(dest, src, size);
1320 }
1321
1322 /* Note that we don't verify it, since the code does not come from user space */
1323 static int
process_fetch_insn(struct fetch_insn * code,void * rec,void * dest,void * base)1324 process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
1325 void *base)
1326 {
1327 struct pt_regs *regs = rec;
1328 unsigned long val;
1329
1330 retry:
1331 /* 1st stage: get value from context */
1332 switch (code->op) {
1333 case FETCH_OP_REG:
1334 val = regs_get_register(regs, code->param);
1335 break;
1336 case FETCH_OP_STACK:
1337 val = regs_get_kernel_stack_nth(regs, code->param);
1338 break;
1339 case FETCH_OP_STACKP:
1340 val = kernel_stack_pointer(regs);
1341 break;
1342 case FETCH_OP_RETVAL:
1343 val = regs_return_value(regs);
1344 break;
1345 case FETCH_OP_IMM:
1346 val = code->immediate;
1347 break;
1348 case FETCH_OP_COMM:
1349 val = (unsigned long)current->comm;
1350 break;
1351 case FETCH_OP_DATA:
1352 val = (unsigned long)code->data;
1353 break;
1354 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1355 case FETCH_OP_ARG:
1356 val = regs_get_kernel_argument(regs, code->param);
1357 break;
1358 #endif
1359 case FETCH_NOP_SYMBOL: /* Ignore a place holder */
1360 code++;
1361 goto retry;
1362 default:
1363 return -EILSEQ;
1364 }
1365 code++;
1366
1367 return process_fetch_insn_bottom(code, val, dest, base);
1368 }
NOKPROBE_SYMBOL(process_fetch_insn)1369 NOKPROBE_SYMBOL(process_fetch_insn)
1370
1371 /* Kprobe handler */
1372 static nokprobe_inline void
1373 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1374 struct trace_event_file *trace_file)
1375 {
1376 struct kprobe_trace_entry_head *entry;
1377 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1378 struct trace_event_buffer fbuffer;
1379 int dsize;
1380
1381 WARN_ON(call != trace_file->event_call);
1382
1383 if (trace_trigger_soft_disabled(trace_file))
1384 return;
1385
1386 fbuffer.trace_ctx = tracing_gen_ctx();
1387 fbuffer.trace_file = trace_file;
1388
1389 dsize = __get_data_size(&tk->tp, regs);
1390
1391 fbuffer.event =
1392 trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
1393 call->event.type,
1394 sizeof(*entry) + tk->tp.size + dsize,
1395 fbuffer.trace_ctx);
1396 if (!fbuffer.event)
1397 return;
1398
1399 fbuffer.regs = regs;
1400 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1401 entry->ip = (unsigned long)tk->rp.kp.addr;
1402 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1403
1404 trace_event_buffer_commit(&fbuffer);
1405 }
1406
1407 static void
kprobe_trace_func(struct trace_kprobe * tk,struct pt_regs * regs)1408 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1409 {
1410 struct event_file_link *link;
1411
1412 trace_probe_for_each_link_rcu(link, &tk->tp)
1413 __kprobe_trace_func(tk, regs, link->file);
1414 }
1415 NOKPROBE_SYMBOL(kprobe_trace_func);
1416
1417 /* Kretprobe handler */
1418 static nokprobe_inline void
__kretprobe_trace_func(struct trace_kprobe * tk,struct kretprobe_instance * ri,struct pt_regs * regs,struct trace_event_file * trace_file)1419 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1420 struct pt_regs *regs,
1421 struct trace_event_file *trace_file)
1422 {
1423 struct kretprobe_trace_entry_head *entry;
1424 struct trace_event_buffer fbuffer;
1425 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1426 int dsize;
1427
1428 WARN_ON(call != trace_file->event_call);
1429
1430 if (trace_trigger_soft_disabled(trace_file))
1431 return;
1432
1433 fbuffer.trace_ctx = tracing_gen_ctx();
1434 fbuffer.trace_file = trace_file;
1435
1436 dsize = __get_data_size(&tk->tp, regs);
1437 fbuffer.event =
1438 trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
1439 call->event.type,
1440 sizeof(*entry) + tk->tp.size + dsize,
1441 fbuffer.trace_ctx);
1442 if (!fbuffer.event)
1443 return;
1444
1445 fbuffer.regs = regs;
1446 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1447 entry->func = (unsigned long)tk->rp.kp.addr;
1448 entry->ret_ip = (unsigned long)ri->ret_addr;
1449 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1450
1451 trace_event_buffer_commit(&fbuffer);
1452 }
1453
1454 static void
kretprobe_trace_func(struct trace_kprobe * tk,struct kretprobe_instance * ri,struct pt_regs * regs)1455 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1456 struct pt_regs *regs)
1457 {
1458 struct event_file_link *link;
1459
1460 trace_probe_for_each_link_rcu(link, &tk->tp)
1461 __kretprobe_trace_func(tk, ri, regs, link->file);
1462 }
1463 NOKPROBE_SYMBOL(kretprobe_trace_func);
1464
1465 /* Event entry printers */
1466 static enum print_line_t
print_kprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1467 print_kprobe_event(struct trace_iterator *iter, int flags,
1468 struct trace_event *event)
1469 {
1470 struct kprobe_trace_entry_head *field;
1471 struct trace_seq *s = &iter->seq;
1472 struct trace_probe *tp;
1473
1474 field = (struct kprobe_trace_entry_head *)iter->ent;
1475 tp = trace_probe_primary_from_call(
1476 container_of(event, struct trace_event_call, event));
1477 if (WARN_ON_ONCE(!tp))
1478 goto out;
1479
1480 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1481
1482 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1483 goto out;
1484
1485 trace_seq_putc(s, ')');
1486
1487 if (print_probe_args(s, tp->args, tp->nr_args,
1488 (u8 *)&field[1], field) < 0)
1489 goto out;
1490
1491 trace_seq_putc(s, '\n');
1492 out:
1493 return trace_handle_return(s);
1494 }
1495
1496 static enum print_line_t
print_kretprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1497 print_kretprobe_event(struct trace_iterator *iter, int flags,
1498 struct trace_event *event)
1499 {
1500 struct kretprobe_trace_entry_head *field;
1501 struct trace_seq *s = &iter->seq;
1502 struct trace_probe *tp;
1503
1504 field = (struct kretprobe_trace_entry_head *)iter->ent;
1505 tp = trace_probe_primary_from_call(
1506 container_of(event, struct trace_event_call, event));
1507 if (WARN_ON_ONCE(!tp))
1508 goto out;
1509
1510 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1511
1512 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1513 goto out;
1514
1515 trace_seq_puts(s, " <- ");
1516
1517 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1518 goto out;
1519
1520 trace_seq_putc(s, ')');
1521
1522 if (print_probe_args(s, tp->args, tp->nr_args,
1523 (u8 *)&field[1], field) < 0)
1524 goto out;
1525
1526 trace_seq_putc(s, '\n');
1527
1528 out:
1529 return trace_handle_return(s);
1530 }
1531
1532
kprobe_event_define_fields(struct trace_event_call * event_call)1533 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1534 {
1535 int ret;
1536 struct kprobe_trace_entry_head field;
1537 struct trace_probe *tp;
1538
1539 tp = trace_probe_primary_from_call(event_call);
1540 if (WARN_ON_ONCE(!tp))
1541 return -ENOENT;
1542
1543 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1544
1545 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1546 }
1547
kretprobe_event_define_fields(struct trace_event_call * event_call)1548 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1549 {
1550 int ret;
1551 struct kretprobe_trace_entry_head field;
1552 struct trace_probe *tp;
1553
1554 tp = trace_probe_primary_from_call(event_call);
1555 if (WARN_ON_ONCE(!tp))
1556 return -ENOENT;
1557
1558 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1559 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1560
1561 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1562 }
1563
1564 #ifdef CONFIG_PERF_EVENTS
1565
1566 /* Kprobe profile handler */
1567 static int
kprobe_perf_func(struct trace_kprobe * tk,struct pt_regs * regs)1568 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1569 {
1570 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1571 struct kprobe_trace_entry_head *entry;
1572 struct hlist_head *head;
1573 int size, __size, dsize;
1574 int rctx;
1575
1576 if (bpf_prog_array_valid(call)) {
1577 unsigned long orig_ip = instruction_pointer(regs);
1578 int ret;
1579
1580 ret = trace_call_bpf(call, regs);
1581
1582 /*
1583 * We need to check and see if we modified the pc of the
1584 * pt_regs, and if so return 1 so that we don't do the
1585 * single stepping.
1586 */
1587 if (orig_ip != instruction_pointer(regs))
1588 return 1;
1589 if (!ret)
1590 return 0;
1591 }
1592
1593 head = this_cpu_ptr(call->perf_events);
1594 if (hlist_empty(head))
1595 return 0;
1596
1597 dsize = __get_data_size(&tk->tp, regs);
1598 __size = sizeof(*entry) + tk->tp.size + dsize;
1599 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1600 size -= sizeof(u32);
1601
1602 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1603 if (!entry)
1604 return 0;
1605
1606 entry->ip = (unsigned long)tk->rp.kp.addr;
1607 memset(&entry[1], 0, dsize);
1608 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1609 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1610 head, NULL);
1611 return 0;
1612 }
1613 NOKPROBE_SYMBOL(kprobe_perf_func);
1614
1615 /* Kretprobe profile handler */
1616 static void
kretprobe_perf_func(struct trace_kprobe * tk,struct kretprobe_instance * ri,struct pt_regs * regs)1617 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1618 struct pt_regs *regs)
1619 {
1620 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1621 struct kretprobe_trace_entry_head *entry;
1622 struct hlist_head *head;
1623 int size, __size, dsize;
1624 int rctx;
1625
1626 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1627 return;
1628
1629 head = this_cpu_ptr(call->perf_events);
1630 if (hlist_empty(head))
1631 return;
1632
1633 dsize = __get_data_size(&tk->tp, regs);
1634 __size = sizeof(*entry) + tk->tp.size + dsize;
1635 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1636 size -= sizeof(u32);
1637
1638 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1639 if (!entry)
1640 return;
1641
1642 entry->func = (unsigned long)tk->rp.kp.addr;
1643 entry->ret_ip = (unsigned long)ri->ret_addr;
1644 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1645 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1646 head, NULL);
1647 }
1648 NOKPROBE_SYMBOL(kretprobe_perf_func);
1649
bpf_get_kprobe_info(const struct perf_event * event,u32 * fd_type,const char ** symbol,u64 * probe_offset,u64 * probe_addr,bool perf_type_tracepoint)1650 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1651 const char **symbol, u64 *probe_offset,
1652 u64 *probe_addr, bool perf_type_tracepoint)
1653 {
1654 const char *pevent = trace_event_name(event->tp_event);
1655 const char *group = event->tp_event->class->system;
1656 struct trace_kprobe *tk;
1657
1658 if (perf_type_tracepoint)
1659 tk = find_trace_kprobe(pevent, group);
1660 else
1661 tk = trace_kprobe_primary_from_call(event->tp_event);
1662 if (!tk)
1663 return -EINVAL;
1664
1665 *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1666 : BPF_FD_TYPE_KPROBE;
1667 if (tk->symbol) {
1668 *symbol = tk->symbol;
1669 *probe_offset = tk->rp.kp.offset;
1670 *probe_addr = 0;
1671 } else {
1672 *symbol = NULL;
1673 *probe_offset = 0;
1674 *probe_addr = (unsigned long)tk->rp.kp.addr;
1675 }
1676 return 0;
1677 }
1678 #endif /* CONFIG_PERF_EVENTS */
1679
1680 /*
1681 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1682 *
1683 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1684 * lockless, but we can't race with this __init function.
1685 */
kprobe_register(struct trace_event_call * event,enum trace_reg type,void * data)1686 static int kprobe_register(struct trace_event_call *event,
1687 enum trace_reg type, void *data)
1688 {
1689 struct trace_event_file *file = data;
1690
1691 switch (type) {
1692 case TRACE_REG_REGISTER:
1693 return enable_trace_kprobe(event, file);
1694 case TRACE_REG_UNREGISTER:
1695 return disable_trace_kprobe(event, file);
1696
1697 #ifdef CONFIG_PERF_EVENTS
1698 case TRACE_REG_PERF_REGISTER:
1699 return enable_trace_kprobe(event, NULL);
1700 case TRACE_REG_PERF_UNREGISTER:
1701 return disable_trace_kprobe(event, NULL);
1702 case TRACE_REG_PERF_OPEN:
1703 case TRACE_REG_PERF_CLOSE:
1704 case TRACE_REG_PERF_ADD:
1705 case TRACE_REG_PERF_DEL:
1706 return 0;
1707 #endif
1708 }
1709 return 0;
1710 }
1711
kprobe_dispatcher(struct kprobe * kp,struct pt_regs * regs)1712 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1713 {
1714 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1715 int ret = 0;
1716
1717 raw_cpu_inc(*tk->nhit);
1718
1719 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1720 kprobe_trace_func(tk, regs);
1721 #ifdef CONFIG_PERF_EVENTS
1722 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1723 ret = kprobe_perf_func(tk, regs);
1724 #endif
1725 return ret;
1726 }
1727 NOKPROBE_SYMBOL(kprobe_dispatcher);
1728
1729 static int
kretprobe_dispatcher(struct kretprobe_instance * ri,struct pt_regs * regs)1730 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1731 {
1732 struct kretprobe *rp = get_kretprobe(ri);
1733 struct trace_kprobe *tk = container_of(rp, struct trace_kprobe, rp);
1734
1735 raw_cpu_inc(*tk->nhit);
1736
1737 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1738 kretprobe_trace_func(tk, ri, regs);
1739 #ifdef CONFIG_PERF_EVENTS
1740 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1741 kretprobe_perf_func(tk, ri, regs);
1742 #endif
1743 return 0; /* We don't tweak kernel, so just return 0 */
1744 }
1745 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1746
1747 static struct trace_event_functions kretprobe_funcs = {
1748 .trace = print_kretprobe_event
1749 };
1750
1751 static struct trace_event_functions kprobe_funcs = {
1752 .trace = print_kprobe_event
1753 };
1754
1755 static struct trace_event_fields kretprobe_fields_array[] = {
1756 { .type = TRACE_FUNCTION_TYPE,
1757 .define_fields = kretprobe_event_define_fields },
1758 {}
1759 };
1760
1761 static struct trace_event_fields kprobe_fields_array[] = {
1762 { .type = TRACE_FUNCTION_TYPE,
1763 .define_fields = kprobe_event_define_fields },
1764 {}
1765 };
1766
init_trace_event_call(struct trace_kprobe * tk)1767 static inline void init_trace_event_call(struct trace_kprobe *tk)
1768 {
1769 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1770
1771 if (trace_kprobe_is_return(tk)) {
1772 call->event.funcs = &kretprobe_funcs;
1773 call->class->fields_array = kretprobe_fields_array;
1774 } else {
1775 call->event.funcs = &kprobe_funcs;
1776 call->class->fields_array = kprobe_fields_array;
1777 }
1778
1779 call->flags = TRACE_EVENT_FL_KPROBE;
1780 call->class->reg = kprobe_register;
1781 }
1782
register_kprobe_event(struct trace_kprobe * tk)1783 static int register_kprobe_event(struct trace_kprobe *tk)
1784 {
1785 init_trace_event_call(tk);
1786
1787 return trace_probe_register_event_call(&tk->tp);
1788 }
1789
unregister_kprobe_event(struct trace_kprobe * tk)1790 static int unregister_kprobe_event(struct trace_kprobe *tk)
1791 {
1792 return trace_probe_unregister_event_call(&tk->tp);
1793 }
1794
1795 #ifdef CONFIG_PERF_EVENTS
1796 /* create a trace_kprobe, but don't add it to global lists */
1797 struct trace_event_call *
create_local_trace_kprobe(char * func,void * addr,unsigned long offs,bool is_return)1798 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1799 bool is_return)
1800 {
1801 enum probe_print_type ptype;
1802 struct trace_kprobe *tk;
1803 int ret;
1804 char *event;
1805
1806 /*
1807 * local trace_kprobes are not added to dyn_event, so they are never
1808 * searched in find_trace_kprobe(). Therefore, there is no concern of
1809 * duplicated name here.
1810 */
1811 event = func ? func : "DUMMY_EVENT";
1812
1813 tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1814 offs, 0 /* maxactive */, 0 /* nargs */,
1815 is_return);
1816
1817 if (IS_ERR(tk)) {
1818 pr_info("Failed to allocate trace_probe.(%d)\n",
1819 (int)PTR_ERR(tk));
1820 return ERR_CAST(tk);
1821 }
1822
1823 init_trace_event_call(tk);
1824
1825 ptype = trace_kprobe_is_return(tk) ?
1826 PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1827 if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0) {
1828 ret = -ENOMEM;
1829 goto error;
1830 }
1831
1832 ret = __register_trace_kprobe(tk);
1833 if (ret < 0)
1834 goto error;
1835
1836 return trace_probe_event_call(&tk->tp);
1837 error:
1838 free_trace_kprobe(tk);
1839 return ERR_PTR(ret);
1840 }
1841
destroy_local_trace_kprobe(struct trace_event_call * event_call)1842 void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1843 {
1844 struct trace_kprobe *tk;
1845
1846 tk = trace_kprobe_primary_from_call(event_call);
1847 if (unlikely(!tk))
1848 return;
1849
1850 if (trace_probe_is_enabled(&tk->tp)) {
1851 WARN_ON(1);
1852 return;
1853 }
1854
1855 __unregister_trace_kprobe(tk);
1856
1857 free_trace_kprobe(tk);
1858 }
1859 #endif /* CONFIG_PERF_EVENTS */
1860
enable_boot_kprobe_events(void)1861 static __init void enable_boot_kprobe_events(void)
1862 {
1863 struct trace_array *tr = top_trace_array();
1864 struct trace_event_file *file;
1865 struct trace_kprobe *tk;
1866 struct dyn_event *pos;
1867
1868 mutex_lock(&event_mutex);
1869 for_each_trace_kprobe(tk, pos) {
1870 list_for_each_entry(file, &tr->events, list)
1871 if (file->event_call == trace_probe_event_call(&tk->tp))
1872 trace_event_enable_disable(file, 1, 0);
1873 }
1874 mutex_unlock(&event_mutex);
1875 }
1876
setup_boot_kprobe_events(void)1877 static __init void setup_boot_kprobe_events(void)
1878 {
1879 char *p, *cmd = kprobe_boot_events_buf;
1880 int ret;
1881
1882 strreplace(kprobe_boot_events_buf, ',', ' ');
1883
1884 while (cmd && *cmd != '\0') {
1885 p = strchr(cmd, ';');
1886 if (p)
1887 *p++ = '\0';
1888
1889 ret = create_or_delete_trace_kprobe(cmd);
1890 if (ret)
1891 pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1892
1893 cmd = p;
1894 }
1895
1896 enable_boot_kprobe_events();
1897 }
1898
1899 /*
1900 * Register dynevent at core_initcall. This allows kernel to setup kprobe
1901 * events in postcore_initcall without tracefs.
1902 */
init_kprobe_trace_early(void)1903 static __init int init_kprobe_trace_early(void)
1904 {
1905 int ret;
1906
1907 ret = dyn_event_register(&trace_kprobe_ops);
1908 if (ret)
1909 return ret;
1910
1911 if (register_module_notifier(&trace_kprobe_module_nb))
1912 return -EINVAL;
1913
1914 return 0;
1915 }
1916 core_initcall(init_kprobe_trace_early);
1917
1918 /* Make a tracefs interface for controlling probe points */
init_kprobe_trace(void)1919 static __init int init_kprobe_trace(void)
1920 {
1921 int ret;
1922 struct dentry *entry;
1923
1924 ret = tracing_init_dentry();
1925 if (ret)
1926 return 0;
1927
1928 entry = tracefs_create_file("kprobe_events", TRACE_MODE_WRITE,
1929 NULL, NULL, &kprobe_events_ops);
1930
1931 /* Event list interface */
1932 if (!entry)
1933 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1934
1935 /* Profile interface */
1936 entry = tracefs_create_file("kprobe_profile", TRACE_MODE_READ,
1937 NULL, NULL, &kprobe_profile_ops);
1938
1939 if (!entry)
1940 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1941
1942 setup_boot_kprobe_events();
1943
1944 return 0;
1945 }
1946 fs_initcall(init_kprobe_trace);
1947
1948
1949 #ifdef CONFIG_FTRACE_STARTUP_TEST
1950 static __init struct trace_event_file *
find_trace_probe_file(struct trace_kprobe * tk,struct trace_array * tr)1951 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1952 {
1953 struct trace_event_file *file;
1954
1955 list_for_each_entry(file, &tr->events, list)
1956 if (file->event_call == trace_probe_event_call(&tk->tp))
1957 return file;
1958
1959 return NULL;
1960 }
1961
1962 /*
1963 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1964 * stage, we can do this lockless.
1965 */
kprobe_trace_self_tests_init(void)1966 static __init int kprobe_trace_self_tests_init(void)
1967 {
1968 int ret, warn = 0;
1969 int (*target)(int, int, int, int, int, int);
1970 struct trace_kprobe *tk;
1971 struct trace_event_file *file;
1972
1973 if (tracing_is_disabled())
1974 return -ENODEV;
1975
1976 if (tracing_selftest_disabled)
1977 return 0;
1978
1979 target = kprobe_trace_selftest_target;
1980
1981 pr_info("Testing kprobe tracing: ");
1982
1983 ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)");
1984 if (WARN_ON_ONCE(ret)) {
1985 pr_warn("error on probing function entry.\n");
1986 warn++;
1987 } else {
1988 /* Enable trace point */
1989 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1990 if (WARN_ON_ONCE(tk == NULL)) {
1991 pr_warn("error on getting new probe.\n");
1992 warn++;
1993 } else {
1994 file = find_trace_probe_file(tk, top_trace_array());
1995 if (WARN_ON_ONCE(file == NULL)) {
1996 pr_warn("error on getting probe file.\n");
1997 warn++;
1998 } else
1999 enable_trace_kprobe(
2000 trace_probe_event_call(&tk->tp), file);
2001 }
2002 }
2003
2004 ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval");
2005 if (WARN_ON_ONCE(ret)) {
2006 pr_warn("error on probing function return.\n");
2007 warn++;
2008 } else {
2009 /* Enable trace point */
2010 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2011 if (WARN_ON_ONCE(tk == NULL)) {
2012 pr_warn("error on getting 2nd new probe.\n");
2013 warn++;
2014 } else {
2015 file = find_trace_probe_file(tk, top_trace_array());
2016 if (WARN_ON_ONCE(file == NULL)) {
2017 pr_warn("error on getting probe file.\n");
2018 warn++;
2019 } else
2020 enable_trace_kprobe(
2021 trace_probe_event_call(&tk->tp), file);
2022 }
2023 }
2024
2025 if (warn)
2026 goto end;
2027
2028 ret = target(1, 2, 3, 4, 5, 6);
2029
2030 /*
2031 * Not expecting an error here, the check is only to prevent the
2032 * optimizer from removing the call to target() as otherwise there
2033 * are no side-effects and the call is never performed.
2034 */
2035 if (ret != 21)
2036 warn++;
2037
2038 /* Disable trace points before removing it */
2039 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
2040 if (WARN_ON_ONCE(tk == NULL)) {
2041 pr_warn("error on getting test probe.\n");
2042 warn++;
2043 } else {
2044 if (trace_kprobe_nhit(tk) != 1) {
2045 pr_warn("incorrect number of testprobe hits\n");
2046 warn++;
2047 }
2048
2049 file = find_trace_probe_file(tk, top_trace_array());
2050 if (WARN_ON_ONCE(file == NULL)) {
2051 pr_warn("error on getting probe file.\n");
2052 warn++;
2053 } else
2054 disable_trace_kprobe(
2055 trace_probe_event_call(&tk->tp), file);
2056 }
2057
2058 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2059 if (WARN_ON_ONCE(tk == NULL)) {
2060 pr_warn("error on getting 2nd test probe.\n");
2061 warn++;
2062 } else {
2063 if (trace_kprobe_nhit(tk) != 1) {
2064 pr_warn("incorrect number of testprobe2 hits\n");
2065 warn++;
2066 }
2067
2068 file = find_trace_probe_file(tk, top_trace_array());
2069 if (WARN_ON_ONCE(file == NULL)) {
2070 pr_warn("error on getting probe file.\n");
2071 warn++;
2072 } else
2073 disable_trace_kprobe(
2074 trace_probe_event_call(&tk->tp), file);
2075 }
2076
2077 ret = create_or_delete_trace_kprobe("-:testprobe");
2078 if (WARN_ON_ONCE(ret)) {
2079 pr_warn("error on deleting a probe.\n");
2080 warn++;
2081 }
2082
2083 ret = create_or_delete_trace_kprobe("-:testprobe2");
2084 if (WARN_ON_ONCE(ret)) {
2085 pr_warn("error on deleting a probe.\n");
2086 warn++;
2087 }
2088
2089 end:
2090 ret = dyn_events_release_all(&trace_kprobe_ops);
2091 if (WARN_ON_ONCE(ret)) {
2092 pr_warn("error on cleaning up probes.\n");
2093 warn++;
2094 }
2095 /*
2096 * Wait for the optimizer work to finish. Otherwise it might fiddle
2097 * with probes in already freed __init text.
2098 */
2099 wait_for_kprobe_optimizer();
2100 if (warn)
2101 pr_cont("NG: Some tests are failed. Please check them.\n");
2102 else
2103 pr_cont("OK\n");
2104 return 0;
2105 }
2106
2107 late_initcall(kprobe_trace_self_tests_init);
2108
2109 #endif
2110