1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace_output.c
4 *
5 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 *
7 */
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
11 #include <linux/kprobes.h>
12 #include <linux/sched/clock.h>
13 #include <linux/sched/mm.h>
14
15 #include "trace_output.h"
16
17 /* must be a power of 2 */
18 #define EVENT_HASHSIZE 128
19
20 DECLARE_RWSEM(trace_event_sem);
21
22 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
23
24 static int next_event_type = __TRACE_LAST_TYPE;
25
trace_print_bputs_msg_only(struct trace_iterator * iter)26 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
27 {
28 struct trace_seq *s = &iter->seq;
29 struct trace_entry *entry = iter->ent;
30 struct bputs_entry *field;
31
32 trace_assign_type(field, entry);
33
34 trace_seq_puts(s, field->str);
35
36 return trace_handle_return(s);
37 }
38
trace_print_bprintk_msg_only(struct trace_iterator * iter)39 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
40 {
41 struct trace_seq *s = &iter->seq;
42 struct trace_entry *entry = iter->ent;
43 struct bprint_entry *field;
44
45 trace_assign_type(field, entry);
46
47 trace_seq_bprintf(s, field->fmt, field->buf);
48
49 return trace_handle_return(s);
50 }
51
trace_print_printk_msg_only(struct trace_iterator * iter)52 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
53 {
54 struct trace_seq *s = &iter->seq;
55 struct trace_entry *entry = iter->ent;
56 struct print_entry *field;
57
58 trace_assign_type(field, entry);
59
60 trace_seq_puts(s, field->buf);
61
62 return trace_handle_return(s);
63 }
64
65 const char *
trace_print_flags_seq(struct trace_seq * p,const char * delim,unsigned long flags,const struct trace_print_flags * flag_array)66 trace_print_flags_seq(struct trace_seq *p, const char *delim,
67 unsigned long flags,
68 const struct trace_print_flags *flag_array)
69 {
70 unsigned long mask;
71 const char *str;
72 const char *ret = trace_seq_buffer_ptr(p);
73 int i, first = 1;
74
75 for (i = 0; flag_array[i].name && flags; i++) {
76
77 mask = flag_array[i].mask;
78 if ((flags & mask) != mask)
79 continue;
80
81 str = flag_array[i].name;
82 flags &= ~mask;
83 if (!first && delim)
84 trace_seq_puts(p, delim);
85 else
86 first = 0;
87 trace_seq_puts(p, str);
88 }
89
90 /* check for left over flags */
91 if (flags) {
92 if (!first && delim)
93 trace_seq_puts(p, delim);
94 trace_seq_printf(p, "0x%lx", flags);
95 }
96
97 trace_seq_putc(p, 0);
98
99 return ret;
100 }
101 EXPORT_SYMBOL(trace_print_flags_seq);
102
103 const char *
trace_print_symbols_seq(struct trace_seq * p,unsigned long val,const struct trace_print_flags * symbol_array)104 trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
105 const struct trace_print_flags *symbol_array)
106 {
107 int i;
108 const char *ret = trace_seq_buffer_ptr(p);
109
110 for (i = 0; symbol_array[i].name; i++) {
111
112 if (val != symbol_array[i].mask)
113 continue;
114
115 trace_seq_puts(p, symbol_array[i].name);
116 break;
117 }
118
119 if (ret == (const char *)(trace_seq_buffer_ptr(p)))
120 trace_seq_printf(p, "0x%lx", val);
121
122 trace_seq_putc(p, 0);
123
124 return ret;
125 }
126 EXPORT_SYMBOL(trace_print_symbols_seq);
127
128 #if BITS_PER_LONG == 32
129 const char *
trace_print_flags_seq_u64(struct trace_seq * p,const char * delim,unsigned long long flags,const struct trace_print_flags_u64 * flag_array)130 trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
131 unsigned long long flags,
132 const struct trace_print_flags_u64 *flag_array)
133 {
134 unsigned long long mask;
135 const char *str;
136 const char *ret = trace_seq_buffer_ptr(p);
137 int i, first = 1;
138
139 for (i = 0; flag_array[i].name && flags; i++) {
140
141 mask = flag_array[i].mask;
142 if ((flags & mask) != mask)
143 continue;
144
145 str = flag_array[i].name;
146 flags &= ~mask;
147 if (!first && delim)
148 trace_seq_puts(p, delim);
149 else
150 first = 0;
151 trace_seq_puts(p, str);
152 }
153
154 /* check for left over flags */
155 if (flags) {
156 if (!first && delim)
157 trace_seq_puts(p, delim);
158 trace_seq_printf(p, "0x%llx", flags);
159 }
160
161 trace_seq_putc(p, 0);
162
163 return ret;
164 }
165 EXPORT_SYMBOL(trace_print_flags_seq_u64);
166
167 const char *
trace_print_symbols_seq_u64(struct trace_seq * p,unsigned long long val,const struct trace_print_flags_u64 * symbol_array)168 trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
169 const struct trace_print_flags_u64 *symbol_array)
170 {
171 int i;
172 const char *ret = trace_seq_buffer_ptr(p);
173
174 for (i = 0; symbol_array[i].name; i++) {
175
176 if (val != symbol_array[i].mask)
177 continue;
178
179 trace_seq_puts(p, symbol_array[i].name);
180 break;
181 }
182
183 if (ret == (const char *)(trace_seq_buffer_ptr(p)))
184 trace_seq_printf(p, "0x%llx", val);
185
186 trace_seq_putc(p, 0);
187
188 return ret;
189 }
190 EXPORT_SYMBOL(trace_print_symbols_seq_u64);
191 #endif
192
193 const char *
trace_print_bitmask_seq(struct trace_seq * p,void * bitmask_ptr,unsigned int bitmask_size)194 trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
195 unsigned int bitmask_size)
196 {
197 const char *ret = trace_seq_buffer_ptr(p);
198
199 trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8);
200 trace_seq_putc(p, 0);
201
202 return ret;
203 }
204 EXPORT_SYMBOL_GPL(trace_print_bitmask_seq);
205
206 /**
207 * trace_print_hex_seq - print buffer as hex sequence
208 * @p: trace seq struct to write to
209 * @buf: The buffer to print
210 * @buf_len: Length of @buf in bytes
211 * @concatenate: Print @buf as single hex string or with spacing
212 *
213 * Prints the passed buffer as a hex sequence either as a whole,
214 * single hex string if @concatenate is true or with spacing after
215 * each byte in case @concatenate is false.
216 */
217 const char *
trace_print_hex_seq(struct trace_seq * p,const unsigned char * buf,int buf_len,bool concatenate)218 trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len,
219 bool concatenate)
220 {
221 int i;
222 const char *ret = trace_seq_buffer_ptr(p);
223 const char *fmt = concatenate ? "%*phN" : "%*ph";
224
225 for (i = 0; i < buf_len; i += 16)
226 trace_seq_printf(p, fmt, min(buf_len - i, 16), &buf[i]);
227 trace_seq_putc(p, 0);
228
229 return ret;
230 }
231 EXPORT_SYMBOL(trace_print_hex_seq);
232
233 const char *
trace_print_array_seq(struct trace_seq * p,const void * buf,int count,size_t el_size)234 trace_print_array_seq(struct trace_seq *p, const void *buf, int count,
235 size_t el_size)
236 {
237 const char *ret = trace_seq_buffer_ptr(p);
238 const char *prefix = "";
239 void *ptr = (void *)buf;
240 size_t buf_len = count * el_size;
241
242 trace_seq_putc(p, '{');
243
244 while (ptr < buf + buf_len) {
245 switch (el_size) {
246 case 1:
247 trace_seq_printf(p, "%s0x%x", prefix,
248 *(u8 *)ptr);
249 break;
250 case 2:
251 trace_seq_printf(p, "%s0x%x", prefix,
252 *(u16 *)ptr);
253 break;
254 case 4:
255 trace_seq_printf(p, "%s0x%x", prefix,
256 *(u32 *)ptr);
257 break;
258 case 8:
259 trace_seq_printf(p, "%s0x%llx", prefix,
260 *(u64 *)ptr);
261 break;
262 default:
263 trace_seq_printf(p, "BAD SIZE:%zu 0x%x", el_size,
264 *(u8 *)ptr);
265 el_size = 1;
266 }
267 prefix = ",";
268 ptr += el_size;
269 }
270
271 trace_seq_putc(p, '}');
272 trace_seq_putc(p, 0);
273
274 return ret;
275 }
276 EXPORT_SYMBOL(trace_print_array_seq);
277
278 const char *
trace_print_hex_dump_seq(struct trace_seq * p,const char * prefix_str,int prefix_type,int rowsize,int groupsize,const void * buf,size_t len,bool ascii)279 trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
280 int prefix_type, int rowsize, int groupsize,
281 const void *buf, size_t len, bool ascii)
282 {
283 const char *ret = trace_seq_buffer_ptr(p);
284
285 trace_seq_putc(p, '\n');
286 trace_seq_hex_dump(p, prefix_str, prefix_type,
287 rowsize, groupsize, buf, len, ascii);
288 trace_seq_putc(p, 0);
289 return ret;
290 }
291 EXPORT_SYMBOL(trace_print_hex_dump_seq);
292
trace_raw_output_prep(struct trace_iterator * iter,struct trace_event * trace_event)293 int trace_raw_output_prep(struct trace_iterator *iter,
294 struct trace_event *trace_event)
295 {
296 struct trace_event_call *event;
297 struct trace_seq *s = &iter->seq;
298 struct trace_seq *p = &iter->tmp_seq;
299 struct trace_entry *entry;
300
301 event = container_of(trace_event, struct trace_event_call, event);
302 entry = iter->ent;
303
304 if (entry->type != event->event.type) {
305 WARN_ON_ONCE(1);
306 return TRACE_TYPE_UNHANDLED;
307 }
308
309 trace_seq_init(p);
310 trace_seq_printf(s, "%s: ", trace_event_name(event));
311
312 return trace_handle_return(s);
313 }
314 EXPORT_SYMBOL(trace_raw_output_prep);
315
trace_event_printf(struct trace_iterator * iter,const char * fmt,...)316 void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...)
317 {
318 va_list ap;
319
320 va_start(ap, fmt);
321 trace_check_vprintf(iter, trace_event_format(iter, fmt), ap);
322 va_end(ap);
323 }
324 EXPORT_SYMBOL(trace_event_printf);
325
trace_output_raw(struct trace_iterator * iter,char * name,char * fmt,va_list ap)326 static int trace_output_raw(struct trace_iterator *iter, char *name,
327 char *fmt, va_list ap)
328 {
329 struct trace_seq *s = &iter->seq;
330
331 trace_seq_printf(s, "%s: ", name);
332 trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
333
334 return trace_handle_return(s);
335 }
336
trace_output_call(struct trace_iterator * iter,char * name,char * fmt,...)337 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
338 {
339 va_list ap;
340 int ret;
341
342 va_start(ap, fmt);
343 ret = trace_output_raw(iter, name, fmt, ap);
344 va_end(ap);
345
346 return ret;
347 }
348 EXPORT_SYMBOL_GPL(trace_output_call);
349
kretprobed(const char * name,unsigned long addr)350 static inline const char *kretprobed(const char *name, unsigned long addr)
351 {
352 if (is_kretprobe_trampoline(addr))
353 return "[unknown/kretprobe'd]";
354 return name;
355 }
356
357 void
trace_seq_print_sym(struct trace_seq * s,unsigned long address,bool offset)358 trace_seq_print_sym(struct trace_seq *s, unsigned long address, bool offset)
359 {
360 #ifdef CONFIG_KALLSYMS
361 char str[KSYM_SYMBOL_LEN];
362 const char *name;
363
364 if (offset)
365 sprint_symbol(str, address);
366 else
367 kallsyms_lookup(address, NULL, NULL, NULL, str);
368 name = kretprobed(str, address);
369
370 if (name && strlen(name)) {
371 trace_seq_puts(s, name);
372 return;
373 }
374 #endif
375 trace_seq_printf(s, "0x%08lx", address);
376 }
377
378 #ifndef CONFIG_64BIT
379 # define IP_FMT "%08lx"
380 #else
381 # define IP_FMT "%016lx"
382 #endif
383
seq_print_user_ip(struct trace_seq * s,struct mm_struct * mm,unsigned long ip,unsigned long sym_flags)384 static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
385 unsigned long ip, unsigned long sym_flags)
386 {
387 struct file *file = NULL;
388 unsigned long vmstart = 0;
389 int ret = 1;
390
391 if (s->full)
392 return 0;
393
394 if (mm) {
395 const struct vm_area_struct *vma;
396
397 mmap_read_lock(mm);
398 vma = find_vma(mm, ip);
399 if (vma) {
400 file = vma->vm_file;
401 vmstart = vma->vm_start;
402 }
403 if (file) {
404 ret = trace_seq_path(s, &file->f_path);
405 if (ret)
406 trace_seq_printf(s, "[+0x%lx]",
407 ip - vmstart);
408 }
409 mmap_read_unlock(mm);
410 }
411 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
412 trace_seq_printf(s, " <" IP_FMT ">", ip);
413 return !trace_seq_has_overflowed(s);
414 }
415
416 int
seq_print_ip_sym(struct trace_seq * s,unsigned long ip,unsigned long sym_flags)417 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
418 {
419 if (!ip) {
420 trace_seq_putc(s, '0');
421 goto out;
422 }
423
424 trace_seq_print_sym(s, ip, sym_flags & TRACE_ITER_SYM_OFFSET);
425
426 if (sym_flags & TRACE_ITER_SYM_ADDR)
427 trace_seq_printf(s, " <" IP_FMT ">", ip);
428
429 out:
430 return !trace_seq_has_overflowed(s);
431 }
432
433 /**
434 * trace_print_lat_fmt - print the irq, preempt and lockdep fields
435 * @s: trace seq struct to write to
436 * @entry: The trace entry field from the ring buffer
437 *
438 * Prints the generic fields of irqs off, in hard or softirq, preempt
439 * count.
440 */
trace_print_lat_fmt(struct trace_seq * s,struct trace_entry * entry)441 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
442 {
443 char hardsoft_irq;
444 char need_resched;
445 char irqs_off;
446 int hardirq;
447 int softirq;
448 int nmi;
449
450 nmi = entry->flags & TRACE_FLAG_NMI;
451 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
452 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
453
454 irqs_off =
455 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
456 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
457 '.';
458
459 switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
460 TRACE_FLAG_PREEMPT_RESCHED)) {
461 case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
462 need_resched = 'N';
463 break;
464 case TRACE_FLAG_NEED_RESCHED:
465 need_resched = 'n';
466 break;
467 case TRACE_FLAG_PREEMPT_RESCHED:
468 need_resched = 'p';
469 break;
470 default:
471 need_resched = '.';
472 break;
473 }
474
475 hardsoft_irq =
476 (nmi && hardirq) ? 'Z' :
477 nmi ? 'z' :
478 (hardirq && softirq) ? 'H' :
479 hardirq ? 'h' :
480 softirq ? 's' :
481 '.' ;
482
483 trace_seq_printf(s, "%c%c%c",
484 irqs_off, need_resched, hardsoft_irq);
485
486 if (entry->preempt_count & 0xf)
487 trace_seq_printf(s, "%x", entry->preempt_count & 0xf);
488 else
489 trace_seq_putc(s, '.');
490
491 if (entry->preempt_count & 0xf0)
492 trace_seq_printf(s, "%x", entry->preempt_count >> 4);
493 else
494 trace_seq_putc(s, '.');
495
496 return !trace_seq_has_overflowed(s);
497 }
498
499 static int
lat_print_generic(struct trace_seq * s,struct trace_entry * entry,int cpu)500 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
501 {
502 char comm[TASK_COMM_LEN];
503
504 trace_find_cmdline(entry->pid, comm);
505
506 trace_seq_printf(s, "%8.8s-%-7d %3d",
507 comm, entry->pid, cpu);
508
509 return trace_print_lat_fmt(s, entry);
510 }
511
512 #undef MARK
513 #define MARK(v, s) {.val = v, .sym = s}
514 /* trace overhead mark */
515 static const struct trace_mark {
516 unsigned long long val; /* unit: nsec */
517 char sym;
518 } mark[] = {
519 MARK(1000000000ULL , '$'), /* 1 sec */
520 MARK(100000000ULL , '@'), /* 100 msec */
521 MARK(10000000ULL , '*'), /* 10 msec */
522 MARK(1000000ULL , '#'), /* 1000 usecs */
523 MARK(100000ULL , '!'), /* 100 usecs */
524 MARK(10000ULL , '+'), /* 10 usecs */
525 };
526 #undef MARK
527
trace_find_mark(unsigned long long d)528 char trace_find_mark(unsigned long long d)
529 {
530 int i;
531 int size = ARRAY_SIZE(mark);
532
533 for (i = 0; i < size; i++) {
534 if (d > mark[i].val)
535 break;
536 }
537
538 return (i == size) ? ' ' : mark[i].sym;
539 }
540
541 static int
lat_print_timestamp(struct trace_iterator * iter,u64 next_ts)542 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
543 {
544 struct trace_array *tr = iter->tr;
545 unsigned long verbose = tr->trace_flags & TRACE_ITER_VERBOSE;
546 unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
547 unsigned long long abs_ts = iter->ts - iter->array_buffer->time_start;
548 unsigned long long rel_ts = next_ts - iter->ts;
549 struct trace_seq *s = &iter->seq;
550
551 if (in_ns) {
552 abs_ts = ns2usecs(abs_ts);
553 rel_ts = ns2usecs(rel_ts);
554 }
555
556 if (verbose && in_ns) {
557 unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
558 unsigned long abs_msec = (unsigned long)abs_ts;
559 unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
560 unsigned long rel_msec = (unsigned long)rel_ts;
561
562 trace_seq_printf(
563 s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
564 ns2usecs(iter->ts),
565 abs_msec, abs_usec,
566 rel_msec, rel_usec);
567
568 } else if (verbose && !in_ns) {
569 trace_seq_printf(
570 s, "[%016llx] %lld (+%lld): ",
571 iter->ts, abs_ts, rel_ts);
572
573 } else if (!verbose && in_ns) {
574 trace_seq_printf(
575 s, " %4lldus%c: ",
576 abs_ts,
577 trace_find_mark(rel_ts * NSEC_PER_USEC));
578
579 } else { /* !verbose && !in_ns */
580 trace_seq_printf(s, " %4lld: ", abs_ts);
581 }
582
583 return !trace_seq_has_overflowed(s);
584 }
585
trace_print_time(struct trace_seq * s,struct trace_iterator * iter,unsigned long long ts)586 static void trace_print_time(struct trace_seq *s, struct trace_iterator *iter,
587 unsigned long long ts)
588 {
589 unsigned long secs, usec_rem;
590 unsigned long long t;
591
592 if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
593 t = ns2usecs(ts);
594 usec_rem = do_div(t, USEC_PER_SEC);
595 secs = (unsigned long)t;
596 trace_seq_printf(s, " %5lu.%06lu", secs, usec_rem);
597 } else
598 trace_seq_printf(s, " %12llu", ts);
599 }
600
trace_print_context(struct trace_iterator * iter)601 int trace_print_context(struct trace_iterator *iter)
602 {
603 struct trace_array *tr = iter->tr;
604 struct trace_seq *s = &iter->seq;
605 struct trace_entry *entry = iter->ent;
606 char comm[TASK_COMM_LEN];
607
608 trace_find_cmdline(entry->pid, comm);
609
610 trace_seq_printf(s, "%16s-%-7d ", comm, entry->pid);
611
612 if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
613 unsigned int tgid = trace_find_tgid(entry->pid);
614
615 if (!tgid)
616 trace_seq_printf(s, "(-------) ");
617 else
618 trace_seq_printf(s, "(%7d) ", tgid);
619 }
620
621 trace_seq_printf(s, "[%03d] ", iter->cpu);
622
623 if (tr->trace_flags & TRACE_ITER_IRQ_INFO)
624 trace_print_lat_fmt(s, entry);
625
626 trace_print_time(s, iter, iter->ts);
627 trace_seq_puts(s, ": ");
628
629 return !trace_seq_has_overflowed(s);
630 }
631
trace_print_lat_context(struct trace_iterator * iter)632 int trace_print_lat_context(struct trace_iterator *iter)
633 {
634 struct trace_entry *entry, *next_entry;
635 struct trace_array *tr = iter->tr;
636 struct trace_seq *s = &iter->seq;
637 unsigned long verbose = (tr->trace_flags & TRACE_ITER_VERBOSE);
638 u64 next_ts;
639
640 next_entry = trace_find_next_entry(iter, NULL, &next_ts);
641 if (!next_entry)
642 next_ts = iter->ts;
643
644 /* trace_find_next_entry() may change iter->ent */
645 entry = iter->ent;
646
647 if (verbose) {
648 char comm[TASK_COMM_LEN];
649
650 trace_find_cmdline(entry->pid, comm);
651
652 trace_seq_printf(
653 s, "%16s %7d %3d %d %08x %08lx ",
654 comm, entry->pid, iter->cpu, entry->flags,
655 entry->preempt_count & 0xf, iter->idx);
656 } else {
657 lat_print_generic(s, entry, iter->cpu);
658 }
659
660 lat_print_timestamp(iter, next_ts);
661
662 return !trace_seq_has_overflowed(s);
663 }
664
665 /**
666 * ftrace_find_event - find a registered event
667 * @type: the type of event to look for
668 *
669 * Returns an event of type @type otherwise NULL
670 * Called with trace_event_read_lock() held.
671 */
ftrace_find_event(int type)672 struct trace_event *ftrace_find_event(int type)
673 {
674 struct trace_event *event;
675 unsigned key;
676
677 key = type & (EVENT_HASHSIZE - 1);
678
679 hlist_for_each_entry(event, &event_hash[key], node) {
680 if (event->type == type)
681 return event;
682 }
683
684 return NULL;
685 }
686
687 static LIST_HEAD(ftrace_event_list);
688
trace_search_list(struct list_head ** list)689 static int trace_search_list(struct list_head **list)
690 {
691 struct trace_event *e;
692 int next = __TRACE_LAST_TYPE;
693
694 if (list_empty(&ftrace_event_list)) {
695 *list = &ftrace_event_list;
696 return next;
697 }
698
699 /*
700 * We used up all possible max events,
701 * lets see if somebody freed one.
702 */
703 list_for_each_entry(e, &ftrace_event_list, list) {
704 if (e->type != next)
705 break;
706 next++;
707 }
708
709 /* Did we used up all 65 thousand events??? */
710 if (next > TRACE_EVENT_TYPE_MAX)
711 return 0;
712
713 *list = &e->list;
714 return next;
715 }
716
trace_event_read_lock(void)717 void trace_event_read_lock(void)
718 {
719 down_read(&trace_event_sem);
720 }
721
trace_event_read_unlock(void)722 void trace_event_read_unlock(void)
723 {
724 up_read(&trace_event_sem);
725 }
726
727 /**
728 * register_trace_event - register output for an event type
729 * @event: the event type to register
730 *
731 * Event types are stored in a hash and this hash is used to
732 * find a way to print an event. If the @event->type is set
733 * then it will use that type, otherwise it will assign a
734 * type to use.
735 *
736 * If you assign your own type, please make sure it is added
737 * to the trace_type enum in trace.h, to avoid collisions
738 * with the dynamic types.
739 *
740 * Returns the event type number or zero on error.
741 */
register_trace_event(struct trace_event * event)742 int register_trace_event(struct trace_event *event)
743 {
744 unsigned key;
745 int ret = 0;
746
747 down_write(&trace_event_sem);
748
749 if (WARN_ON(!event))
750 goto out;
751
752 if (WARN_ON(!event->funcs))
753 goto out;
754
755 INIT_LIST_HEAD(&event->list);
756
757 if (!event->type) {
758 struct list_head *list = NULL;
759
760 if (next_event_type > TRACE_EVENT_TYPE_MAX) {
761
762 event->type = trace_search_list(&list);
763 if (!event->type)
764 goto out;
765
766 } else {
767
768 event->type = next_event_type++;
769 list = &ftrace_event_list;
770 }
771
772 if (WARN_ON(ftrace_find_event(event->type)))
773 goto out;
774
775 list_add_tail(&event->list, list);
776
777 } else if (event->type > __TRACE_LAST_TYPE) {
778 printk(KERN_WARNING "Need to add type to trace.h\n");
779 WARN_ON(1);
780 goto out;
781 } else {
782 /* Is this event already used */
783 if (ftrace_find_event(event->type))
784 goto out;
785 }
786
787 if (event->funcs->trace == NULL)
788 event->funcs->trace = trace_nop_print;
789 if (event->funcs->raw == NULL)
790 event->funcs->raw = trace_nop_print;
791 if (event->funcs->hex == NULL)
792 event->funcs->hex = trace_nop_print;
793 if (event->funcs->binary == NULL)
794 event->funcs->binary = trace_nop_print;
795
796 key = event->type & (EVENT_HASHSIZE - 1);
797
798 hlist_add_head(&event->node, &event_hash[key]);
799
800 ret = event->type;
801 out:
802 up_write(&trace_event_sem);
803
804 return ret;
805 }
806 EXPORT_SYMBOL_GPL(register_trace_event);
807
808 /*
809 * Used by module code with the trace_event_sem held for write.
810 */
__unregister_trace_event(struct trace_event * event)811 int __unregister_trace_event(struct trace_event *event)
812 {
813 hlist_del(&event->node);
814 list_del(&event->list);
815 return 0;
816 }
817
818 /**
819 * unregister_trace_event - remove a no longer used event
820 * @event: the event to remove
821 */
unregister_trace_event(struct trace_event * event)822 int unregister_trace_event(struct trace_event *event)
823 {
824 down_write(&trace_event_sem);
825 __unregister_trace_event(event);
826 up_write(&trace_event_sem);
827
828 return 0;
829 }
830 EXPORT_SYMBOL_GPL(unregister_trace_event);
831
832 /*
833 * Standard events
834 */
835
trace_nop_print(struct trace_iterator * iter,int flags,struct trace_event * event)836 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
837 struct trace_event *event)
838 {
839 trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type);
840
841 return trace_handle_return(&iter->seq);
842 }
843
print_fn_trace(struct trace_seq * s,unsigned long ip,unsigned long parent_ip,int flags)844 static void print_fn_trace(struct trace_seq *s, unsigned long ip,
845 unsigned long parent_ip, int flags)
846 {
847 seq_print_ip_sym(s, ip, flags);
848
849 if ((flags & TRACE_ITER_PRINT_PARENT) && parent_ip) {
850 trace_seq_puts(s, " <-");
851 seq_print_ip_sym(s, parent_ip, flags);
852 }
853 }
854
855 /* TRACE_FN */
trace_fn_trace(struct trace_iterator * iter,int flags,struct trace_event * event)856 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
857 struct trace_event *event)
858 {
859 struct ftrace_entry *field;
860 struct trace_seq *s = &iter->seq;
861
862 trace_assign_type(field, iter->ent);
863
864 print_fn_trace(s, field->ip, field->parent_ip, flags);
865 trace_seq_putc(s, '\n');
866
867 return trace_handle_return(s);
868 }
869
trace_fn_raw(struct trace_iterator * iter,int flags,struct trace_event * event)870 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
871 struct trace_event *event)
872 {
873 struct ftrace_entry *field;
874
875 trace_assign_type(field, iter->ent);
876
877 trace_seq_printf(&iter->seq, "%lx %lx\n",
878 field->ip,
879 field->parent_ip);
880
881 return trace_handle_return(&iter->seq);
882 }
883
trace_fn_hex(struct trace_iterator * iter,int flags,struct trace_event * event)884 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
885 struct trace_event *event)
886 {
887 struct ftrace_entry *field;
888 struct trace_seq *s = &iter->seq;
889
890 trace_assign_type(field, iter->ent);
891
892 SEQ_PUT_HEX_FIELD(s, field->ip);
893 SEQ_PUT_HEX_FIELD(s, field->parent_ip);
894
895 return trace_handle_return(s);
896 }
897
trace_fn_bin(struct trace_iterator * iter,int flags,struct trace_event * event)898 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
899 struct trace_event *event)
900 {
901 struct ftrace_entry *field;
902 struct trace_seq *s = &iter->seq;
903
904 trace_assign_type(field, iter->ent);
905
906 SEQ_PUT_FIELD(s, field->ip);
907 SEQ_PUT_FIELD(s, field->parent_ip);
908
909 return trace_handle_return(s);
910 }
911
912 static struct trace_event_functions trace_fn_funcs = {
913 .trace = trace_fn_trace,
914 .raw = trace_fn_raw,
915 .hex = trace_fn_hex,
916 .binary = trace_fn_bin,
917 };
918
919 static struct trace_event trace_fn_event = {
920 .type = TRACE_FN,
921 .funcs = &trace_fn_funcs,
922 };
923
924 /* TRACE_CTX an TRACE_WAKE */
trace_ctxwake_print(struct trace_iterator * iter,char * delim)925 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
926 char *delim)
927 {
928 struct ctx_switch_entry *field;
929 char comm[TASK_COMM_LEN];
930 int S, T;
931
932
933 trace_assign_type(field, iter->ent);
934
935 T = task_index_to_char(field->next_state);
936 S = task_index_to_char(field->prev_state);
937 trace_find_cmdline(field->next_pid, comm);
938 trace_seq_printf(&iter->seq,
939 " %7d:%3d:%c %s [%03d] %7d:%3d:%c %s\n",
940 field->prev_pid,
941 field->prev_prio,
942 S, delim,
943 field->next_cpu,
944 field->next_pid,
945 field->next_prio,
946 T, comm);
947
948 return trace_handle_return(&iter->seq);
949 }
950
trace_ctx_print(struct trace_iterator * iter,int flags,struct trace_event * event)951 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
952 struct trace_event *event)
953 {
954 return trace_ctxwake_print(iter, "==>");
955 }
956
trace_wake_print(struct trace_iterator * iter,int flags,struct trace_event * event)957 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
958 int flags, struct trace_event *event)
959 {
960 return trace_ctxwake_print(iter, " +");
961 }
962
trace_ctxwake_raw(struct trace_iterator * iter,char S)963 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
964 {
965 struct ctx_switch_entry *field;
966 int T;
967
968 trace_assign_type(field, iter->ent);
969
970 if (!S)
971 S = task_index_to_char(field->prev_state);
972 T = task_index_to_char(field->next_state);
973 trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
974 field->prev_pid,
975 field->prev_prio,
976 S,
977 field->next_cpu,
978 field->next_pid,
979 field->next_prio,
980 T);
981
982 return trace_handle_return(&iter->seq);
983 }
984
trace_ctx_raw(struct trace_iterator * iter,int flags,struct trace_event * event)985 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
986 struct trace_event *event)
987 {
988 return trace_ctxwake_raw(iter, 0);
989 }
990
trace_wake_raw(struct trace_iterator * iter,int flags,struct trace_event * event)991 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
992 struct trace_event *event)
993 {
994 return trace_ctxwake_raw(iter, '+');
995 }
996
997
trace_ctxwake_hex(struct trace_iterator * iter,char S)998 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
999 {
1000 struct ctx_switch_entry *field;
1001 struct trace_seq *s = &iter->seq;
1002 int T;
1003
1004 trace_assign_type(field, iter->ent);
1005
1006 if (!S)
1007 S = task_index_to_char(field->prev_state);
1008 T = task_index_to_char(field->next_state);
1009
1010 SEQ_PUT_HEX_FIELD(s, field->prev_pid);
1011 SEQ_PUT_HEX_FIELD(s, field->prev_prio);
1012 SEQ_PUT_HEX_FIELD(s, S);
1013 SEQ_PUT_HEX_FIELD(s, field->next_cpu);
1014 SEQ_PUT_HEX_FIELD(s, field->next_pid);
1015 SEQ_PUT_HEX_FIELD(s, field->next_prio);
1016 SEQ_PUT_HEX_FIELD(s, T);
1017
1018 return trace_handle_return(s);
1019 }
1020
trace_ctx_hex(struct trace_iterator * iter,int flags,struct trace_event * event)1021 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
1022 struct trace_event *event)
1023 {
1024 return trace_ctxwake_hex(iter, 0);
1025 }
1026
trace_wake_hex(struct trace_iterator * iter,int flags,struct trace_event * event)1027 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
1028 struct trace_event *event)
1029 {
1030 return trace_ctxwake_hex(iter, '+');
1031 }
1032
trace_ctxwake_bin(struct trace_iterator * iter,int flags,struct trace_event * event)1033 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
1034 int flags, struct trace_event *event)
1035 {
1036 struct ctx_switch_entry *field;
1037 struct trace_seq *s = &iter->seq;
1038
1039 trace_assign_type(field, iter->ent);
1040
1041 SEQ_PUT_FIELD(s, field->prev_pid);
1042 SEQ_PUT_FIELD(s, field->prev_prio);
1043 SEQ_PUT_FIELD(s, field->prev_state);
1044 SEQ_PUT_FIELD(s, field->next_cpu);
1045 SEQ_PUT_FIELD(s, field->next_pid);
1046 SEQ_PUT_FIELD(s, field->next_prio);
1047 SEQ_PUT_FIELD(s, field->next_state);
1048
1049 return trace_handle_return(s);
1050 }
1051
1052 static struct trace_event_functions trace_ctx_funcs = {
1053 .trace = trace_ctx_print,
1054 .raw = trace_ctx_raw,
1055 .hex = trace_ctx_hex,
1056 .binary = trace_ctxwake_bin,
1057 };
1058
1059 static struct trace_event trace_ctx_event = {
1060 .type = TRACE_CTX,
1061 .funcs = &trace_ctx_funcs,
1062 };
1063
1064 static struct trace_event_functions trace_wake_funcs = {
1065 .trace = trace_wake_print,
1066 .raw = trace_wake_raw,
1067 .hex = trace_wake_hex,
1068 .binary = trace_ctxwake_bin,
1069 };
1070
1071 static struct trace_event trace_wake_event = {
1072 .type = TRACE_WAKE,
1073 .funcs = &trace_wake_funcs,
1074 };
1075
1076 /* TRACE_STACK */
1077
trace_stack_print(struct trace_iterator * iter,int flags,struct trace_event * event)1078 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1079 int flags, struct trace_event *event)
1080 {
1081 struct stack_entry *field;
1082 struct trace_seq *s = &iter->seq;
1083 unsigned long *p;
1084 unsigned long *end;
1085
1086 trace_assign_type(field, iter->ent);
1087 end = (unsigned long *)((long)iter->ent + iter->ent_size);
1088
1089 trace_seq_puts(s, "<stack trace>\n");
1090
1091 for (p = field->caller; p && p < end && *p != ULONG_MAX; p++) {
1092
1093 if (trace_seq_has_overflowed(s))
1094 break;
1095
1096 trace_seq_puts(s, " => ");
1097 seq_print_ip_sym(s, *p, flags);
1098 trace_seq_putc(s, '\n');
1099 }
1100
1101 return trace_handle_return(s);
1102 }
1103
1104 static struct trace_event_functions trace_stack_funcs = {
1105 .trace = trace_stack_print,
1106 };
1107
1108 static struct trace_event trace_stack_event = {
1109 .type = TRACE_STACK,
1110 .funcs = &trace_stack_funcs,
1111 };
1112
1113 /* TRACE_USER_STACK */
trace_user_stack_print(struct trace_iterator * iter,int flags,struct trace_event * event)1114 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1115 int flags, struct trace_event *event)
1116 {
1117 struct trace_array *tr = iter->tr;
1118 struct userstack_entry *field;
1119 struct trace_seq *s = &iter->seq;
1120 struct mm_struct *mm = NULL;
1121 unsigned int i;
1122
1123 trace_assign_type(field, iter->ent);
1124
1125 trace_seq_puts(s, "<user stack trace>\n");
1126
1127 if (tr->trace_flags & TRACE_ITER_SYM_USEROBJ) {
1128 struct task_struct *task;
1129 /*
1130 * we do the lookup on the thread group leader,
1131 * since individual threads might have already quit!
1132 */
1133 rcu_read_lock();
1134 task = find_task_by_vpid(field->tgid);
1135 if (task)
1136 mm = get_task_mm(task);
1137 rcu_read_unlock();
1138 }
1139
1140 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1141 unsigned long ip = field->caller[i];
1142
1143 if (!ip || trace_seq_has_overflowed(s))
1144 break;
1145
1146 trace_seq_puts(s, " => ");
1147 seq_print_user_ip(s, mm, ip, flags);
1148 trace_seq_putc(s, '\n');
1149 }
1150
1151 if (mm)
1152 mmput(mm);
1153
1154 return trace_handle_return(s);
1155 }
1156
1157 static struct trace_event_functions trace_user_stack_funcs = {
1158 .trace = trace_user_stack_print,
1159 };
1160
1161 static struct trace_event trace_user_stack_event = {
1162 .type = TRACE_USER_STACK,
1163 .funcs = &trace_user_stack_funcs,
1164 };
1165
1166 /* TRACE_HWLAT */
1167 static enum print_line_t
trace_hwlat_print(struct trace_iterator * iter,int flags,struct trace_event * event)1168 trace_hwlat_print(struct trace_iterator *iter, int flags,
1169 struct trace_event *event)
1170 {
1171 struct trace_entry *entry = iter->ent;
1172 struct trace_seq *s = &iter->seq;
1173 struct hwlat_entry *field;
1174
1175 trace_assign_type(field, entry);
1176
1177 trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%lld.%09ld count:%d",
1178 field->seqnum,
1179 field->duration,
1180 field->outer_duration,
1181 (long long)field->timestamp.tv_sec,
1182 field->timestamp.tv_nsec, field->count);
1183
1184 if (field->nmi_count) {
1185 /*
1186 * The generic sched_clock() is not NMI safe, thus
1187 * we only record the count and not the time.
1188 */
1189 if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK))
1190 trace_seq_printf(s, " nmi-total:%llu",
1191 field->nmi_total_ts);
1192 trace_seq_printf(s, " nmi-count:%u",
1193 field->nmi_count);
1194 }
1195
1196 trace_seq_putc(s, '\n');
1197
1198 return trace_handle_return(s);
1199 }
1200
1201 static enum print_line_t
trace_hwlat_raw(struct trace_iterator * iter,int flags,struct trace_event * event)1202 trace_hwlat_raw(struct trace_iterator *iter, int flags,
1203 struct trace_event *event)
1204 {
1205 struct hwlat_entry *field;
1206 struct trace_seq *s = &iter->seq;
1207
1208 trace_assign_type(field, iter->ent);
1209
1210 trace_seq_printf(s, "%llu %lld %lld %09ld %u\n",
1211 field->duration,
1212 field->outer_duration,
1213 (long long)field->timestamp.tv_sec,
1214 field->timestamp.tv_nsec,
1215 field->seqnum);
1216
1217 return trace_handle_return(s);
1218 }
1219
1220 static struct trace_event_functions trace_hwlat_funcs = {
1221 .trace = trace_hwlat_print,
1222 .raw = trace_hwlat_raw,
1223 };
1224
1225 static struct trace_event trace_hwlat_event = {
1226 .type = TRACE_HWLAT,
1227 .funcs = &trace_hwlat_funcs,
1228 };
1229
1230 /* TRACE_OSNOISE */
1231 static enum print_line_t
trace_osnoise_print(struct trace_iterator * iter,int flags,struct trace_event * event)1232 trace_osnoise_print(struct trace_iterator *iter, int flags,
1233 struct trace_event *event)
1234 {
1235 struct trace_entry *entry = iter->ent;
1236 struct trace_seq *s = &iter->seq;
1237 struct osnoise_entry *field;
1238 u64 ratio, ratio_dec;
1239 u64 net_runtime;
1240
1241 trace_assign_type(field, entry);
1242
1243 /*
1244 * compute the available % of cpu time.
1245 */
1246 net_runtime = field->runtime - field->noise;
1247 ratio = net_runtime * 10000000;
1248 do_div(ratio, field->runtime);
1249 ratio_dec = do_div(ratio, 100000);
1250
1251 trace_seq_printf(s, "%llu %10llu %3llu.%05llu %7llu",
1252 field->runtime,
1253 field->noise,
1254 ratio, ratio_dec,
1255 field->max_sample);
1256
1257 trace_seq_printf(s, " %6u", field->hw_count);
1258 trace_seq_printf(s, " %6u", field->nmi_count);
1259 trace_seq_printf(s, " %6u", field->irq_count);
1260 trace_seq_printf(s, " %6u", field->softirq_count);
1261 trace_seq_printf(s, " %6u", field->thread_count);
1262
1263 trace_seq_putc(s, '\n');
1264
1265 return trace_handle_return(s);
1266 }
1267
1268 static enum print_line_t
trace_osnoise_raw(struct trace_iterator * iter,int flags,struct trace_event * event)1269 trace_osnoise_raw(struct trace_iterator *iter, int flags,
1270 struct trace_event *event)
1271 {
1272 struct osnoise_entry *field;
1273 struct trace_seq *s = &iter->seq;
1274
1275 trace_assign_type(field, iter->ent);
1276
1277 trace_seq_printf(s, "%lld %llu %llu %u %u %u %u %u\n",
1278 field->runtime,
1279 field->noise,
1280 field->max_sample,
1281 field->hw_count,
1282 field->nmi_count,
1283 field->irq_count,
1284 field->softirq_count,
1285 field->thread_count);
1286
1287 return trace_handle_return(s);
1288 }
1289
1290 static struct trace_event_functions trace_osnoise_funcs = {
1291 .trace = trace_osnoise_print,
1292 .raw = trace_osnoise_raw,
1293 };
1294
1295 static struct trace_event trace_osnoise_event = {
1296 .type = TRACE_OSNOISE,
1297 .funcs = &trace_osnoise_funcs,
1298 };
1299
1300 /* TRACE_TIMERLAT */
1301 static enum print_line_t
trace_timerlat_print(struct trace_iterator * iter,int flags,struct trace_event * event)1302 trace_timerlat_print(struct trace_iterator *iter, int flags,
1303 struct trace_event *event)
1304 {
1305 struct trace_entry *entry = iter->ent;
1306 struct trace_seq *s = &iter->seq;
1307 struct timerlat_entry *field;
1308
1309 trace_assign_type(field, entry);
1310
1311 trace_seq_printf(s, "#%-5u context %6s timer_latency %9llu ns\n",
1312 field->seqnum,
1313 field->context ? "thread" : "irq",
1314 field->timer_latency);
1315
1316 return trace_handle_return(s);
1317 }
1318
1319 static enum print_line_t
trace_timerlat_raw(struct trace_iterator * iter,int flags,struct trace_event * event)1320 trace_timerlat_raw(struct trace_iterator *iter, int flags,
1321 struct trace_event *event)
1322 {
1323 struct timerlat_entry *field;
1324 struct trace_seq *s = &iter->seq;
1325
1326 trace_assign_type(field, iter->ent);
1327
1328 trace_seq_printf(s, "%u %d %llu\n",
1329 field->seqnum,
1330 field->context,
1331 field->timer_latency);
1332
1333 return trace_handle_return(s);
1334 }
1335
1336 static struct trace_event_functions trace_timerlat_funcs = {
1337 .trace = trace_timerlat_print,
1338 .raw = trace_timerlat_raw,
1339 };
1340
1341 static struct trace_event trace_timerlat_event = {
1342 .type = TRACE_TIMERLAT,
1343 .funcs = &trace_timerlat_funcs,
1344 };
1345
1346 /* TRACE_BPUTS */
1347 static enum print_line_t
trace_bputs_print(struct trace_iterator * iter,int flags,struct trace_event * event)1348 trace_bputs_print(struct trace_iterator *iter, int flags,
1349 struct trace_event *event)
1350 {
1351 struct trace_entry *entry = iter->ent;
1352 struct trace_seq *s = &iter->seq;
1353 struct bputs_entry *field;
1354
1355 trace_assign_type(field, entry);
1356
1357 seq_print_ip_sym(s, field->ip, flags);
1358 trace_seq_puts(s, ": ");
1359 trace_seq_puts(s, field->str);
1360
1361 return trace_handle_return(s);
1362 }
1363
1364
1365 static enum print_line_t
trace_bputs_raw(struct trace_iterator * iter,int flags,struct trace_event * event)1366 trace_bputs_raw(struct trace_iterator *iter, int flags,
1367 struct trace_event *event)
1368 {
1369 struct bputs_entry *field;
1370 struct trace_seq *s = &iter->seq;
1371
1372 trace_assign_type(field, iter->ent);
1373
1374 trace_seq_printf(s, ": %lx : ", field->ip);
1375 trace_seq_puts(s, field->str);
1376
1377 return trace_handle_return(s);
1378 }
1379
1380 static struct trace_event_functions trace_bputs_funcs = {
1381 .trace = trace_bputs_print,
1382 .raw = trace_bputs_raw,
1383 };
1384
1385 static struct trace_event trace_bputs_event = {
1386 .type = TRACE_BPUTS,
1387 .funcs = &trace_bputs_funcs,
1388 };
1389
1390 /* TRACE_BPRINT */
1391 static enum print_line_t
trace_bprint_print(struct trace_iterator * iter,int flags,struct trace_event * event)1392 trace_bprint_print(struct trace_iterator *iter, int flags,
1393 struct trace_event *event)
1394 {
1395 struct trace_entry *entry = iter->ent;
1396 struct trace_seq *s = &iter->seq;
1397 struct bprint_entry *field;
1398
1399 trace_assign_type(field, entry);
1400
1401 seq_print_ip_sym(s, field->ip, flags);
1402 trace_seq_puts(s, ": ");
1403 trace_seq_bprintf(s, field->fmt, field->buf);
1404
1405 return trace_handle_return(s);
1406 }
1407
1408
1409 static enum print_line_t
trace_bprint_raw(struct trace_iterator * iter,int flags,struct trace_event * event)1410 trace_bprint_raw(struct trace_iterator *iter, int flags,
1411 struct trace_event *event)
1412 {
1413 struct bprint_entry *field;
1414 struct trace_seq *s = &iter->seq;
1415
1416 trace_assign_type(field, iter->ent);
1417
1418 trace_seq_printf(s, ": %lx : ", field->ip);
1419 trace_seq_bprintf(s, field->fmt, field->buf);
1420
1421 return trace_handle_return(s);
1422 }
1423
1424 static struct trace_event_functions trace_bprint_funcs = {
1425 .trace = trace_bprint_print,
1426 .raw = trace_bprint_raw,
1427 };
1428
1429 static struct trace_event trace_bprint_event = {
1430 .type = TRACE_BPRINT,
1431 .funcs = &trace_bprint_funcs,
1432 };
1433
1434 /* TRACE_PRINT */
trace_print_print(struct trace_iterator * iter,int flags,struct trace_event * event)1435 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1436 int flags, struct trace_event *event)
1437 {
1438 struct print_entry *field;
1439 struct trace_seq *s = &iter->seq;
1440
1441 trace_assign_type(field, iter->ent);
1442
1443 seq_print_ip_sym(s, field->ip, flags);
1444 trace_seq_printf(s, ": %s", field->buf);
1445
1446 return trace_handle_return(s);
1447 }
1448
trace_print_raw(struct trace_iterator * iter,int flags,struct trace_event * event)1449 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
1450 struct trace_event *event)
1451 {
1452 struct print_entry *field;
1453
1454 trace_assign_type(field, iter->ent);
1455
1456 trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf);
1457
1458 return trace_handle_return(&iter->seq);
1459 }
1460
1461 static struct trace_event_functions trace_print_funcs = {
1462 .trace = trace_print_print,
1463 .raw = trace_print_raw,
1464 };
1465
1466 static struct trace_event trace_print_event = {
1467 .type = TRACE_PRINT,
1468 .funcs = &trace_print_funcs,
1469 };
1470
trace_raw_data(struct trace_iterator * iter,int flags,struct trace_event * event)1471 static enum print_line_t trace_raw_data(struct trace_iterator *iter, int flags,
1472 struct trace_event *event)
1473 {
1474 struct raw_data_entry *field;
1475 int i;
1476
1477 trace_assign_type(field, iter->ent);
1478
1479 trace_seq_printf(&iter->seq, "# %x buf:", field->id);
1480
1481 for (i = 0; i < iter->ent_size - offsetof(struct raw_data_entry, buf); i++)
1482 trace_seq_printf(&iter->seq, " %02x",
1483 (unsigned char)field->buf[i]);
1484
1485 trace_seq_putc(&iter->seq, '\n');
1486
1487 return trace_handle_return(&iter->seq);
1488 }
1489
1490 static struct trace_event_functions trace_raw_data_funcs = {
1491 .trace = trace_raw_data,
1492 .raw = trace_raw_data,
1493 };
1494
1495 static struct trace_event trace_raw_data_event = {
1496 .type = TRACE_RAW_DATA,
1497 .funcs = &trace_raw_data_funcs,
1498 };
1499
1500 static enum print_line_t
trace_func_repeats_raw(struct trace_iterator * iter,int flags,struct trace_event * event)1501 trace_func_repeats_raw(struct trace_iterator *iter, int flags,
1502 struct trace_event *event)
1503 {
1504 struct func_repeats_entry *field;
1505 struct trace_seq *s = &iter->seq;
1506
1507 trace_assign_type(field, iter->ent);
1508
1509 trace_seq_printf(s, "%lu %lu %u %llu\n",
1510 field->ip,
1511 field->parent_ip,
1512 field->count,
1513 FUNC_REPEATS_GET_DELTA_TS(field));
1514
1515 return trace_handle_return(s);
1516 }
1517
1518 static enum print_line_t
trace_func_repeats_print(struct trace_iterator * iter,int flags,struct trace_event * event)1519 trace_func_repeats_print(struct trace_iterator *iter, int flags,
1520 struct trace_event *event)
1521 {
1522 struct func_repeats_entry *field;
1523 struct trace_seq *s = &iter->seq;
1524
1525 trace_assign_type(field, iter->ent);
1526
1527 print_fn_trace(s, field->ip, field->parent_ip, flags);
1528 trace_seq_printf(s, " (repeats: %u, last_ts:", field->count);
1529 trace_print_time(s, iter,
1530 iter->ts - FUNC_REPEATS_GET_DELTA_TS(field));
1531 trace_seq_puts(s, ")\n");
1532
1533 return trace_handle_return(s);
1534 }
1535
1536 static struct trace_event_functions trace_func_repeats_funcs = {
1537 .trace = trace_func_repeats_print,
1538 .raw = trace_func_repeats_raw,
1539 };
1540
1541 static struct trace_event trace_func_repeats_event = {
1542 .type = TRACE_FUNC_REPEATS,
1543 .funcs = &trace_func_repeats_funcs,
1544 };
1545
1546 static struct trace_event *events[] __initdata = {
1547 &trace_fn_event,
1548 &trace_ctx_event,
1549 &trace_wake_event,
1550 &trace_stack_event,
1551 &trace_user_stack_event,
1552 &trace_bputs_event,
1553 &trace_bprint_event,
1554 &trace_print_event,
1555 &trace_hwlat_event,
1556 &trace_osnoise_event,
1557 &trace_timerlat_event,
1558 &trace_raw_data_event,
1559 &trace_func_repeats_event,
1560 NULL
1561 };
1562
init_events(void)1563 __init static int init_events(void)
1564 {
1565 struct trace_event *event;
1566 int i, ret;
1567
1568 for (i = 0; events[i]; i++) {
1569 event = events[i];
1570
1571 ret = register_trace_event(event);
1572 if (!ret) {
1573 printk(KERN_WARNING "event %d failed to register\n",
1574 event->type);
1575 WARN_ON_ONCE(1);
1576 }
1577 }
1578
1579 return 0;
1580 }
1581 early_initcall(init_events);
1582