Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * trace_output.c
0004  *
0005  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
0006  *
0007  */
0008 #include <linux/module.h>
0009 #include <linux/mutex.h>
0010 #include <linux/ftrace.h>
0011 #include <linux/kprobes.h>
0012 #include <linux/sched/clock.h>
0013 #include <linux/sched/mm.h>
0014 
0015 #include "trace_output.h"
0016 
0017 /* must be a power of 2 */
0018 #define EVENT_HASHSIZE  128
0019 
0020 DECLARE_RWSEM(trace_event_sem);
0021 
0022 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
0023 
0024 static int next_event_type = __TRACE_LAST_TYPE;
0025 
0026 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
0027 {
0028     struct trace_seq *s = &iter->seq;
0029     struct trace_entry *entry = iter->ent;
0030     struct bputs_entry *field;
0031 
0032     trace_assign_type(field, entry);
0033 
0034     trace_seq_puts(s, field->str);
0035 
0036     return trace_handle_return(s);
0037 }
0038 
0039 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
0040 {
0041     struct trace_seq *s = &iter->seq;
0042     struct trace_entry *entry = iter->ent;
0043     struct bprint_entry *field;
0044 
0045     trace_assign_type(field, entry);
0046 
0047     trace_seq_bprintf(s, field->fmt, field->buf);
0048 
0049     return trace_handle_return(s);
0050 }
0051 
0052 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
0053 {
0054     struct trace_seq *s = &iter->seq;
0055     struct trace_entry *entry = iter->ent;
0056     struct print_entry *field;
0057 
0058     trace_assign_type(field, entry);
0059 
0060     trace_seq_puts(s, field->buf);
0061 
0062     return trace_handle_return(s);
0063 }
0064 
0065 const char *
0066 trace_print_flags_seq(struct trace_seq *p, const char *delim,
0067               unsigned long flags,
0068               const struct trace_print_flags *flag_array)
0069 {
0070     unsigned long mask;
0071     const char *str;
0072     const char *ret = trace_seq_buffer_ptr(p);
0073     int i, first = 1;
0074 
0075     for (i = 0;  flag_array[i].name && flags; i++) {
0076 
0077         mask = flag_array[i].mask;
0078         if ((flags & mask) != mask)
0079             continue;
0080 
0081         str = flag_array[i].name;
0082         flags &= ~mask;
0083         if (!first && delim)
0084             trace_seq_puts(p, delim);
0085         else
0086             first = 0;
0087         trace_seq_puts(p, str);
0088     }
0089 
0090     /* check for left over flags */
0091     if (flags) {
0092         if (!first && delim)
0093             trace_seq_puts(p, delim);
0094         trace_seq_printf(p, "0x%lx", flags);
0095     }
0096 
0097     trace_seq_putc(p, 0);
0098 
0099     return ret;
0100 }
0101 EXPORT_SYMBOL(trace_print_flags_seq);
0102 
0103 const char *
0104 trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
0105             const struct trace_print_flags *symbol_array)
0106 {
0107     int i;
0108     const char *ret = trace_seq_buffer_ptr(p);
0109 
0110     for (i = 0;  symbol_array[i].name; i++) {
0111 
0112         if (val != symbol_array[i].mask)
0113             continue;
0114 
0115         trace_seq_puts(p, symbol_array[i].name);
0116         break;
0117     }
0118 
0119     if (ret == (const char *)(trace_seq_buffer_ptr(p)))
0120         trace_seq_printf(p, "0x%lx", val);
0121 
0122     trace_seq_putc(p, 0);
0123 
0124     return ret;
0125 }
0126 EXPORT_SYMBOL(trace_print_symbols_seq);
0127 
0128 #if BITS_PER_LONG == 32
0129 const char *
0130 trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
0131               unsigned long long flags,
0132               const struct trace_print_flags_u64 *flag_array)
0133 {
0134     unsigned long long mask;
0135     const char *str;
0136     const char *ret = trace_seq_buffer_ptr(p);
0137     int i, first = 1;
0138 
0139     for (i = 0;  flag_array[i].name && flags; i++) {
0140 
0141         mask = flag_array[i].mask;
0142         if ((flags & mask) != mask)
0143             continue;
0144 
0145         str = flag_array[i].name;
0146         flags &= ~mask;
0147         if (!first && delim)
0148             trace_seq_puts(p, delim);
0149         else
0150             first = 0;
0151         trace_seq_puts(p, str);
0152     }
0153 
0154     /* check for left over flags */
0155     if (flags) {
0156         if (!first && delim)
0157             trace_seq_puts(p, delim);
0158         trace_seq_printf(p, "0x%llx", flags);
0159     }
0160 
0161     trace_seq_putc(p, 0);
0162 
0163     return ret;
0164 }
0165 EXPORT_SYMBOL(trace_print_flags_seq_u64);
0166 
0167 const char *
0168 trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
0169              const struct trace_print_flags_u64 *symbol_array)
0170 {
0171     int i;
0172     const char *ret = trace_seq_buffer_ptr(p);
0173 
0174     for (i = 0;  symbol_array[i].name; i++) {
0175 
0176         if (val != symbol_array[i].mask)
0177             continue;
0178 
0179         trace_seq_puts(p, symbol_array[i].name);
0180         break;
0181     }
0182 
0183     if (ret == (const char *)(trace_seq_buffer_ptr(p)))
0184         trace_seq_printf(p, "0x%llx", val);
0185 
0186     trace_seq_putc(p, 0);
0187 
0188     return ret;
0189 }
0190 EXPORT_SYMBOL(trace_print_symbols_seq_u64);
0191 #endif
0192 
0193 const char *
0194 trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
0195             unsigned int bitmask_size)
0196 {
0197     const char *ret = trace_seq_buffer_ptr(p);
0198 
0199     trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8);
0200     trace_seq_putc(p, 0);
0201 
0202     return ret;
0203 }
0204 EXPORT_SYMBOL_GPL(trace_print_bitmask_seq);
0205 
0206 /**
0207  * trace_print_hex_seq - print buffer as hex sequence
0208  * @p: trace seq struct to write to
0209  * @buf: The buffer to print
0210  * @buf_len: Length of @buf in bytes
0211  * @concatenate: Print @buf as single hex string or with spacing
0212  *
0213  * Prints the passed buffer as a hex sequence either as a whole,
0214  * single hex string if @concatenate is true or with spacing after
0215  * each byte in case @concatenate is false.
0216  */
0217 const char *
0218 trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len,
0219             bool concatenate)
0220 {
0221     int i;
0222     const char *ret = trace_seq_buffer_ptr(p);
0223     const char *fmt = concatenate ? "%*phN" : "%*ph";
0224 
0225     for (i = 0; i < buf_len; i += 16)
0226         trace_seq_printf(p, fmt, min(buf_len - i, 16), &buf[i]);
0227     trace_seq_putc(p, 0);
0228 
0229     return ret;
0230 }
0231 EXPORT_SYMBOL(trace_print_hex_seq);
0232 
0233 const char *
0234 trace_print_array_seq(struct trace_seq *p, const void *buf, int count,
0235               size_t el_size)
0236 {
0237     const char *ret = trace_seq_buffer_ptr(p);
0238     const char *prefix = "";
0239     void *ptr = (void *)buf;
0240     size_t buf_len = count * el_size;
0241 
0242     trace_seq_putc(p, '{');
0243 
0244     while (ptr < buf + buf_len) {
0245         switch (el_size) {
0246         case 1:
0247             trace_seq_printf(p, "%s0x%x", prefix,
0248                      *(u8 *)ptr);
0249             break;
0250         case 2:
0251             trace_seq_printf(p, "%s0x%x", prefix,
0252                      *(u16 *)ptr);
0253             break;
0254         case 4:
0255             trace_seq_printf(p, "%s0x%x", prefix,
0256                      *(u32 *)ptr);
0257             break;
0258         case 8:
0259             trace_seq_printf(p, "%s0x%llx", prefix,
0260                      *(u64 *)ptr);
0261             break;
0262         default:
0263             trace_seq_printf(p, "BAD SIZE:%zu 0x%x", el_size,
0264                      *(u8 *)ptr);
0265             el_size = 1;
0266         }
0267         prefix = ",";
0268         ptr += el_size;
0269     }
0270 
0271     trace_seq_putc(p, '}');
0272     trace_seq_putc(p, 0);
0273 
0274     return ret;
0275 }
0276 EXPORT_SYMBOL(trace_print_array_seq);
0277 
0278 const char *
0279 trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
0280              int prefix_type, int rowsize, int groupsize,
0281              const void *buf, size_t len, bool ascii)
0282 {
0283     const char *ret = trace_seq_buffer_ptr(p);
0284 
0285     trace_seq_putc(p, '\n');
0286     trace_seq_hex_dump(p, prefix_str, prefix_type,
0287                rowsize, groupsize, buf, len, ascii);
0288     trace_seq_putc(p, 0);
0289     return ret;
0290 }
0291 EXPORT_SYMBOL(trace_print_hex_dump_seq);
0292 
0293 int trace_raw_output_prep(struct trace_iterator *iter,
0294               struct trace_event *trace_event)
0295 {
0296     struct trace_event_call *event;
0297     struct trace_seq *s = &iter->seq;
0298     struct trace_seq *p = &iter->tmp_seq;
0299     struct trace_entry *entry;
0300 
0301     event = container_of(trace_event, struct trace_event_call, event);
0302     entry = iter->ent;
0303 
0304     if (entry->type != event->event.type) {
0305         WARN_ON_ONCE(1);
0306         return TRACE_TYPE_UNHANDLED;
0307     }
0308 
0309     trace_seq_init(p);
0310     trace_seq_printf(s, "%s: ", trace_event_name(event));
0311 
0312     return trace_handle_return(s);
0313 }
0314 EXPORT_SYMBOL(trace_raw_output_prep);
0315 
0316 void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...)
0317 {
0318     va_list ap;
0319 
0320     va_start(ap, fmt);
0321     trace_check_vprintf(iter, trace_event_format(iter, fmt), ap);
0322     va_end(ap);
0323 }
0324 EXPORT_SYMBOL(trace_event_printf);
0325 
0326 static int trace_output_raw(struct trace_iterator *iter, char *name,
0327                 char *fmt, va_list ap)
0328 {
0329     struct trace_seq *s = &iter->seq;
0330 
0331     trace_seq_printf(s, "%s: ", name);
0332     trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
0333 
0334     return trace_handle_return(s);
0335 }
0336 
0337 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
0338 {
0339     va_list ap;
0340     int ret;
0341 
0342     va_start(ap, fmt);
0343     ret = trace_output_raw(iter, name, fmt, ap);
0344     va_end(ap);
0345 
0346     return ret;
0347 }
0348 EXPORT_SYMBOL_GPL(trace_output_call);
0349 
0350 static inline const char *kretprobed(const char *name, unsigned long addr)
0351 {
0352     if (is_kretprobe_trampoline(addr))
0353         return "[unknown/kretprobe'd]";
0354     return name;
0355 }
0356 
0357 void
0358 trace_seq_print_sym(struct trace_seq *s, unsigned long address, bool offset)
0359 {
0360 #ifdef CONFIG_KALLSYMS
0361     char str[KSYM_SYMBOL_LEN];
0362     const char *name;
0363 
0364     if (offset)
0365         sprint_symbol(str, address);
0366     else
0367         kallsyms_lookup(address, NULL, NULL, NULL, str);
0368     name = kretprobed(str, address);
0369 
0370     if (name && strlen(name)) {
0371         trace_seq_puts(s, name);
0372         return;
0373     }
0374 #endif
0375     trace_seq_printf(s, "0x%08lx", address);
0376 }
0377 
0378 #ifndef CONFIG_64BIT
0379 # define IP_FMT "%08lx"
0380 #else
0381 # define IP_FMT "%016lx"
0382 #endif
0383 
0384 static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
0385                  unsigned long ip, unsigned long sym_flags)
0386 {
0387     struct file *file = NULL;
0388     unsigned long vmstart = 0;
0389     int ret = 1;
0390 
0391     if (s->full)
0392         return 0;
0393 
0394     if (mm) {
0395         const struct vm_area_struct *vma;
0396 
0397         mmap_read_lock(mm);
0398         vma = find_vma(mm, ip);
0399         if (vma) {
0400             file = vma->vm_file;
0401             vmstart = vma->vm_start;
0402         }
0403         if (file) {
0404             ret = trace_seq_path(s, &file->f_path);
0405             if (ret)
0406                 trace_seq_printf(s, "[+0x%lx]",
0407                          ip - vmstart);
0408         }
0409         mmap_read_unlock(mm);
0410     }
0411     if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
0412         trace_seq_printf(s, " <" IP_FMT ">", ip);
0413     return !trace_seq_has_overflowed(s);
0414 }
0415 
0416 int
0417 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
0418 {
0419     if (!ip) {
0420         trace_seq_putc(s, '0');
0421         goto out;
0422     }
0423 
0424     trace_seq_print_sym(s, ip, sym_flags & TRACE_ITER_SYM_OFFSET);
0425 
0426     if (sym_flags & TRACE_ITER_SYM_ADDR)
0427         trace_seq_printf(s, " <" IP_FMT ">", ip);
0428 
0429  out:
0430     return !trace_seq_has_overflowed(s);
0431 }
0432 
0433 /**
0434  * trace_print_lat_fmt - print the irq, preempt and lockdep fields
0435  * @s: trace seq struct to write to
0436  * @entry: The trace entry field from the ring buffer
0437  *
0438  * Prints the generic fields of irqs off, in hard or softirq, preempt
0439  * count.
0440  */
0441 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
0442 {
0443     char hardsoft_irq;
0444     char need_resched;
0445     char irqs_off;
0446     int hardirq;
0447     int softirq;
0448     int bh_off;
0449     int nmi;
0450 
0451     nmi = entry->flags & TRACE_FLAG_NMI;
0452     hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
0453     softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
0454     bh_off = entry->flags & TRACE_FLAG_BH_OFF;
0455 
0456     irqs_off =
0457         (entry->flags & TRACE_FLAG_IRQS_OFF && bh_off) ? 'D' :
0458         (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
0459         bh_off ? 'b' :
0460         (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
0461         '.';
0462 
0463     switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
0464                 TRACE_FLAG_PREEMPT_RESCHED)) {
0465     case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
0466         need_resched = 'N';
0467         break;
0468     case TRACE_FLAG_NEED_RESCHED:
0469         need_resched = 'n';
0470         break;
0471     case TRACE_FLAG_PREEMPT_RESCHED:
0472         need_resched = 'p';
0473         break;
0474     default:
0475         need_resched = '.';
0476         break;
0477     }
0478 
0479     hardsoft_irq =
0480         (nmi && hardirq)     ? 'Z' :
0481         nmi                  ? 'z' :
0482         (hardirq && softirq) ? 'H' :
0483         hardirq              ? 'h' :
0484         softirq              ? 's' :
0485                                '.' ;
0486 
0487     trace_seq_printf(s, "%c%c%c",
0488              irqs_off, need_resched, hardsoft_irq);
0489 
0490     if (entry->preempt_count & 0xf)
0491         trace_seq_printf(s, "%x", entry->preempt_count & 0xf);
0492     else
0493         trace_seq_putc(s, '.');
0494 
0495     if (entry->preempt_count & 0xf0)
0496         trace_seq_printf(s, "%x", entry->preempt_count >> 4);
0497     else
0498         trace_seq_putc(s, '.');
0499 
0500     return !trace_seq_has_overflowed(s);
0501 }
0502 
0503 static int
0504 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
0505 {
0506     char comm[TASK_COMM_LEN];
0507 
0508     trace_find_cmdline(entry->pid, comm);
0509 
0510     trace_seq_printf(s, "%8.8s-%-7d %3d",
0511              comm, entry->pid, cpu);
0512 
0513     return trace_print_lat_fmt(s, entry);
0514 }
0515 
0516 #undef MARK
0517 #define MARK(v, s) {.val = v, .sym = s}
0518 /* trace overhead mark */
0519 static const struct trace_mark {
0520     unsigned long long  val; /* unit: nsec */
0521     char            sym;
0522 } mark[] = {
0523     MARK(1000000000ULL  , '$'), /* 1 sec */
0524     MARK(100000000ULL   , '@'), /* 100 msec */
0525     MARK(10000000ULL    , '*'), /* 10 msec */
0526     MARK(1000000ULL     , '#'), /* 1000 usecs */
0527     MARK(100000ULL      , '!'), /* 100 usecs */
0528     MARK(10000ULL       , '+'), /* 10 usecs */
0529 };
0530 #undef MARK
0531 
0532 char trace_find_mark(unsigned long long d)
0533 {
0534     int i;
0535     int size = ARRAY_SIZE(mark);
0536 
0537     for (i = 0; i < size; i++) {
0538         if (d > mark[i].val)
0539             break;
0540     }
0541 
0542     return (i == size) ? ' ' : mark[i].sym;
0543 }
0544 
0545 static int
0546 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
0547 {
0548     struct trace_array *tr = iter->tr;
0549     unsigned long verbose = tr->trace_flags & TRACE_ITER_VERBOSE;
0550     unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
0551     unsigned long long abs_ts = iter->ts - iter->array_buffer->time_start;
0552     unsigned long long rel_ts = next_ts - iter->ts;
0553     struct trace_seq *s = &iter->seq;
0554 
0555     if (in_ns) {
0556         abs_ts = ns2usecs(abs_ts);
0557         rel_ts = ns2usecs(rel_ts);
0558     }
0559 
0560     if (verbose && in_ns) {
0561         unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
0562         unsigned long abs_msec = (unsigned long)abs_ts;
0563         unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
0564         unsigned long rel_msec = (unsigned long)rel_ts;
0565 
0566         trace_seq_printf(
0567             s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
0568             ns2usecs(iter->ts),
0569             abs_msec, abs_usec,
0570             rel_msec, rel_usec);
0571 
0572     } else if (verbose && !in_ns) {
0573         trace_seq_printf(
0574             s, "[%016llx] %lld (+%lld): ",
0575             iter->ts, abs_ts, rel_ts);
0576 
0577     } else if (!verbose && in_ns) {
0578         trace_seq_printf(
0579             s, " %4lldus%c: ",
0580             abs_ts,
0581             trace_find_mark(rel_ts * NSEC_PER_USEC));
0582 
0583     } else { /* !verbose && !in_ns */
0584         trace_seq_printf(s, " %4lld: ", abs_ts);
0585     }
0586 
0587     return !trace_seq_has_overflowed(s);
0588 }
0589 
0590 static void trace_print_time(struct trace_seq *s, struct trace_iterator *iter,
0591                  unsigned long long ts)
0592 {
0593     unsigned long secs, usec_rem;
0594     unsigned long long t;
0595 
0596     if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
0597         t = ns2usecs(ts);
0598         usec_rem = do_div(t, USEC_PER_SEC);
0599         secs = (unsigned long)t;
0600         trace_seq_printf(s, " %5lu.%06lu", secs, usec_rem);
0601     } else
0602         trace_seq_printf(s, " %12llu", ts);
0603 }
0604 
0605 int trace_print_context(struct trace_iterator *iter)
0606 {
0607     struct trace_array *tr = iter->tr;
0608     struct trace_seq *s = &iter->seq;
0609     struct trace_entry *entry = iter->ent;
0610     char comm[TASK_COMM_LEN];
0611 
0612     trace_find_cmdline(entry->pid, comm);
0613 
0614     trace_seq_printf(s, "%16s-%-7d ", comm, entry->pid);
0615 
0616     if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
0617         unsigned int tgid = trace_find_tgid(entry->pid);
0618 
0619         if (!tgid)
0620             trace_seq_printf(s, "(-------) ");
0621         else
0622             trace_seq_printf(s, "(%7d) ", tgid);
0623     }
0624 
0625     trace_seq_printf(s, "[%03d] ", iter->cpu);
0626 
0627     if (tr->trace_flags & TRACE_ITER_IRQ_INFO)
0628         trace_print_lat_fmt(s, entry);
0629 
0630     trace_print_time(s, iter, iter->ts);
0631     trace_seq_puts(s, ": ");
0632 
0633     return !trace_seq_has_overflowed(s);
0634 }
0635 
0636 int trace_print_lat_context(struct trace_iterator *iter)
0637 {
0638     struct trace_entry *entry, *next_entry;
0639     struct trace_array *tr = iter->tr;
0640     struct trace_seq *s = &iter->seq;
0641     unsigned long verbose = (tr->trace_flags & TRACE_ITER_VERBOSE);
0642     u64 next_ts;
0643 
0644     next_entry = trace_find_next_entry(iter, NULL, &next_ts);
0645     if (!next_entry)
0646         next_ts = iter->ts;
0647 
0648     /* trace_find_next_entry() may change iter->ent */
0649     entry = iter->ent;
0650 
0651     if (verbose) {
0652         char comm[TASK_COMM_LEN];
0653 
0654         trace_find_cmdline(entry->pid, comm);
0655 
0656         trace_seq_printf(
0657             s, "%16s %7d %3d %d %08x %08lx ",
0658             comm, entry->pid, iter->cpu, entry->flags,
0659             entry->preempt_count & 0xf, iter->idx);
0660     } else {
0661         lat_print_generic(s, entry, iter->cpu);
0662     }
0663 
0664     lat_print_timestamp(iter, next_ts);
0665 
0666     return !trace_seq_has_overflowed(s);
0667 }
0668 
0669 /**
0670  * ftrace_find_event - find a registered event
0671  * @type: the type of event to look for
0672  *
0673  * Returns an event of type @type otherwise NULL
0674  * Called with trace_event_read_lock() held.
0675  */
0676 struct trace_event *ftrace_find_event(int type)
0677 {
0678     struct trace_event *event;
0679     unsigned key;
0680 
0681     key = type & (EVENT_HASHSIZE - 1);
0682 
0683     hlist_for_each_entry(event, &event_hash[key], node) {
0684         if (event->type == type)
0685             return event;
0686     }
0687 
0688     return NULL;
0689 }
0690 
0691 static LIST_HEAD(ftrace_event_list);
0692 
0693 static int trace_search_list(struct list_head **list)
0694 {
0695     struct trace_event *e = NULL, *iter;
0696     int next = __TRACE_LAST_TYPE;
0697 
0698     if (list_empty(&ftrace_event_list)) {
0699         *list = &ftrace_event_list;
0700         return next;
0701     }
0702 
0703     /*
0704      * We used up all possible max events,
0705      * lets see if somebody freed one.
0706      */
0707     list_for_each_entry(iter, &ftrace_event_list, list) {
0708         if (iter->type != next) {
0709             e = iter;
0710             break;
0711         }
0712         next++;
0713     }
0714 
0715     /* Did we used up all 65 thousand events??? */
0716     if (next > TRACE_EVENT_TYPE_MAX)
0717         return 0;
0718 
0719     if (e)
0720         *list = &e->list;
0721     else
0722         *list = &ftrace_event_list;
0723     return next;
0724 }
0725 
0726 void trace_event_read_lock(void)
0727 {
0728     down_read(&trace_event_sem);
0729 }
0730 
0731 void trace_event_read_unlock(void)
0732 {
0733     up_read(&trace_event_sem);
0734 }
0735 
0736 /**
0737  * register_trace_event - register output for an event type
0738  * @event: the event type to register
0739  *
0740  * Event types are stored in a hash and this hash is used to
0741  * find a way to print an event. If the @event->type is set
0742  * then it will use that type, otherwise it will assign a
0743  * type to use.
0744  *
0745  * If you assign your own type, please make sure it is added
0746  * to the trace_type enum in trace.h, to avoid collisions
0747  * with the dynamic types.
0748  *
0749  * Returns the event type number or zero on error.
0750  */
0751 int register_trace_event(struct trace_event *event)
0752 {
0753     unsigned key;
0754     int ret = 0;
0755 
0756     down_write(&trace_event_sem);
0757 
0758     if (WARN_ON(!event))
0759         goto out;
0760 
0761     if (WARN_ON(!event->funcs))
0762         goto out;
0763 
0764     INIT_LIST_HEAD(&event->list);
0765 
0766     if (!event->type) {
0767         struct list_head *list = NULL;
0768 
0769         if (next_event_type > TRACE_EVENT_TYPE_MAX) {
0770 
0771             event->type = trace_search_list(&list);
0772             if (!event->type)
0773                 goto out;
0774 
0775         } else {
0776 
0777             event->type = next_event_type++;
0778             list = &ftrace_event_list;
0779         }
0780 
0781         if (WARN_ON(ftrace_find_event(event->type)))
0782             goto out;
0783 
0784         list_add_tail(&event->list, list);
0785 
0786     } else if (WARN(event->type > __TRACE_LAST_TYPE,
0787             "Need to add type to trace.h")) {
0788         goto out;
0789     } else {
0790         /* Is this event already used */
0791         if (ftrace_find_event(event->type))
0792             goto out;
0793     }
0794 
0795     if (event->funcs->trace == NULL)
0796         event->funcs->trace = trace_nop_print;
0797     if (event->funcs->raw == NULL)
0798         event->funcs->raw = trace_nop_print;
0799     if (event->funcs->hex == NULL)
0800         event->funcs->hex = trace_nop_print;
0801     if (event->funcs->binary == NULL)
0802         event->funcs->binary = trace_nop_print;
0803 
0804     key = event->type & (EVENT_HASHSIZE - 1);
0805 
0806     hlist_add_head(&event->node, &event_hash[key]);
0807 
0808     ret = event->type;
0809  out:
0810     up_write(&trace_event_sem);
0811 
0812     return ret;
0813 }
0814 EXPORT_SYMBOL_GPL(register_trace_event);
0815 
0816 /*
0817  * Used by module code with the trace_event_sem held for write.
0818  */
0819 int __unregister_trace_event(struct trace_event *event)
0820 {
0821     hlist_del(&event->node);
0822     list_del(&event->list);
0823     return 0;
0824 }
0825 
0826 /**
0827  * unregister_trace_event - remove a no longer used event
0828  * @event: the event to remove
0829  */
0830 int unregister_trace_event(struct trace_event *event)
0831 {
0832     down_write(&trace_event_sem);
0833     __unregister_trace_event(event);
0834     up_write(&trace_event_sem);
0835 
0836     return 0;
0837 }
0838 EXPORT_SYMBOL_GPL(unregister_trace_event);
0839 
0840 /*
0841  * Standard events
0842  */
0843 
0844 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
0845                   struct trace_event *event)
0846 {
0847     trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type);
0848 
0849     return trace_handle_return(&iter->seq);
0850 }
0851 
0852 static void print_fn_trace(struct trace_seq *s, unsigned long ip,
0853                unsigned long parent_ip, int flags)
0854 {
0855     seq_print_ip_sym(s, ip, flags);
0856 
0857     if ((flags & TRACE_ITER_PRINT_PARENT) && parent_ip) {
0858         trace_seq_puts(s, " <-");
0859         seq_print_ip_sym(s, parent_ip, flags);
0860     }
0861 }
0862 
0863 /* TRACE_FN */
0864 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
0865                     struct trace_event *event)
0866 {
0867     struct ftrace_entry *field;
0868     struct trace_seq *s = &iter->seq;
0869 
0870     trace_assign_type(field, iter->ent);
0871 
0872     print_fn_trace(s, field->ip, field->parent_ip, flags);
0873     trace_seq_putc(s, '\n');
0874 
0875     return trace_handle_return(s);
0876 }
0877 
0878 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
0879                       struct trace_event *event)
0880 {
0881     struct ftrace_entry *field;
0882 
0883     trace_assign_type(field, iter->ent);
0884 
0885     trace_seq_printf(&iter->seq, "%lx %lx\n",
0886              field->ip,
0887              field->parent_ip);
0888 
0889     return trace_handle_return(&iter->seq);
0890 }
0891 
0892 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
0893                       struct trace_event *event)
0894 {
0895     struct ftrace_entry *field;
0896     struct trace_seq *s = &iter->seq;
0897 
0898     trace_assign_type(field, iter->ent);
0899 
0900     SEQ_PUT_HEX_FIELD(s, field->ip);
0901     SEQ_PUT_HEX_FIELD(s, field->parent_ip);
0902 
0903     return trace_handle_return(s);
0904 }
0905 
0906 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
0907                       struct trace_event *event)
0908 {
0909     struct ftrace_entry *field;
0910     struct trace_seq *s = &iter->seq;
0911 
0912     trace_assign_type(field, iter->ent);
0913 
0914     SEQ_PUT_FIELD(s, field->ip);
0915     SEQ_PUT_FIELD(s, field->parent_ip);
0916 
0917     return trace_handle_return(s);
0918 }
0919 
0920 static struct trace_event_functions trace_fn_funcs = {
0921     .trace      = trace_fn_trace,
0922     .raw        = trace_fn_raw,
0923     .hex        = trace_fn_hex,
0924     .binary     = trace_fn_bin,
0925 };
0926 
0927 static struct trace_event trace_fn_event = {
0928     .type       = TRACE_FN,
0929     .funcs      = &trace_fn_funcs,
0930 };
0931 
0932 /* TRACE_CTX an TRACE_WAKE */
0933 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
0934                          char *delim)
0935 {
0936     struct ctx_switch_entry *field;
0937     char comm[TASK_COMM_LEN];
0938     int S, T;
0939 
0940 
0941     trace_assign_type(field, iter->ent);
0942 
0943     T = task_index_to_char(field->next_state);
0944     S = task_index_to_char(field->prev_state);
0945     trace_find_cmdline(field->next_pid, comm);
0946     trace_seq_printf(&iter->seq,
0947              " %7d:%3d:%c %s [%03d] %7d:%3d:%c %s\n",
0948              field->prev_pid,
0949              field->prev_prio,
0950              S, delim,
0951              field->next_cpu,
0952              field->next_pid,
0953              field->next_prio,
0954              T, comm);
0955 
0956     return trace_handle_return(&iter->seq);
0957 }
0958 
0959 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
0960                      struct trace_event *event)
0961 {
0962     return trace_ctxwake_print(iter, "==>");
0963 }
0964 
0965 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
0966                       int flags, struct trace_event *event)
0967 {
0968     return trace_ctxwake_print(iter, "  +");
0969 }
0970 
0971 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
0972 {
0973     struct ctx_switch_entry *field;
0974     int T;
0975 
0976     trace_assign_type(field, iter->ent);
0977 
0978     if (!S)
0979         S = task_index_to_char(field->prev_state);
0980     T = task_index_to_char(field->next_state);
0981     trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
0982              field->prev_pid,
0983              field->prev_prio,
0984              S,
0985              field->next_cpu,
0986              field->next_pid,
0987              field->next_prio,
0988              T);
0989 
0990     return trace_handle_return(&iter->seq);
0991 }
0992 
0993 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
0994                        struct trace_event *event)
0995 {
0996     return trace_ctxwake_raw(iter, 0);
0997 }
0998 
0999 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
1000                     struct trace_event *event)
1001 {
1002     return trace_ctxwake_raw(iter, '+');
1003 }
1004 
1005 
1006 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
1007 {
1008     struct ctx_switch_entry *field;
1009     struct trace_seq *s = &iter->seq;
1010     int T;
1011 
1012     trace_assign_type(field, iter->ent);
1013 
1014     if (!S)
1015         S = task_index_to_char(field->prev_state);
1016     T = task_index_to_char(field->next_state);
1017 
1018     SEQ_PUT_HEX_FIELD(s, field->prev_pid);
1019     SEQ_PUT_HEX_FIELD(s, field->prev_prio);
1020     SEQ_PUT_HEX_FIELD(s, S);
1021     SEQ_PUT_HEX_FIELD(s, field->next_cpu);
1022     SEQ_PUT_HEX_FIELD(s, field->next_pid);
1023     SEQ_PUT_HEX_FIELD(s, field->next_prio);
1024     SEQ_PUT_HEX_FIELD(s, T);
1025 
1026     return trace_handle_return(s);
1027 }
1028 
1029 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
1030                        struct trace_event *event)
1031 {
1032     return trace_ctxwake_hex(iter, 0);
1033 }
1034 
1035 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
1036                     struct trace_event *event)
1037 {
1038     return trace_ctxwake_hex(iter, '+');
1039 }
1040 
1041 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
1042                        int flags, struct trace_event *event)
1043 {
1044     struct ctx_switch_entry *field;
1045     struct trace_seq *s = &iter->seq;
1046 
1047     trace_assign_type(field, iter->ent);
1048 
1049     SEQ_PUT_FIELD(s, field->prev_pid);
1050     SEQ_PUT_FIELD(s, field->prev_prio);
1051     SEQ_PUT_FIELD(s, field->prev_state);
1052     SEQ_PUT_FIELD(s, field->next_cpu);
1053     SEQ_PUT_FIELD(s, field->next_pid);
1054     SEQ_PUT_FIELD(s, field->next_prio);
1055     SEQ_PUT_FIELD(s, field->next_state);
1056 
1057     return trace_handle_return(s);
1058 }
1059 
1060 static struct trace_event_functions trace_ctx_funcs = {
1061     .trace      = trace_ctx_print,
1062     .raw        = trace_ctx_raw,
1063     .hex        = trace_ctx_hex,
1064     .binary     = trace_ctxwake_bin,
1065 };
1066 
1067 static struct trace_event trace_ctx_event = {
1068     .type       = TRACE_CTX,
1069     .funcs      = &trace_ctx_funcs,
1070 };
1071 
1072 static struct trace_event_functions trace_wake_funcs = {
1073     .trace      = trace_wake_print,
1074     .raw        = trace_wake_raw,
1075     .hex        = trace_wake_hex,
1076     .binary     = trace_ctxwake_bin,
1077 };
1078 
1079 static struct trace_event trace_wake_event = {
1080     .type       = TRACE_WAKE,
1081     .funcs      = &trace_wake_funcs,
1082 };
1083 
1084 /* TRACE_STACK */
1085 
1086 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1087                        int flags, struct trace_event *event)
1088 {
1089     struct stack_entry *field;
1090     struct trace_seq *s = &iter->seq;
1091     unsigned long *p;
1092     unsigned long *end;
1093 
1094     trace_assign_type(field, iter->ent);
1095     end = (unsigned long *)((long)iter->ent + iter->ent_size);
1096 
1097     trace_seq_puts(s, "<stack trace>\n");
1098 
1099     for (p = field->caller; p && p < end && *p != ULONG_MAX; p++) {
1100 
1101         if (trace_seq_has_overflowed(s))
1102             break;
1103 
1104         trace_seq_puts(s, " => ");
1105         seq_print_ip_sym(s, *p, flags);
1106         trace_seq_putc(s, '\n');
1107     }
1108 
1109     return trace_handle_return(s);
1110 }
1111 
1112 static struct trace_event_functions trace_stack_funcs = {
1113     .trace      = trace_stack_print,
1114 };
1115 
1116 static struct trace_event trace_stack_event = {
1117     .type       = TRACE_STACK,
1118     .funcs      = &trace_stack_funcs,
1119 };
1120 
1121 /* TRACE_USER_STACK */
1122 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1123                         int flags, struct trace_event *event)
1124 {
1125     struct trace_array *tr = iter->tr;
1126     struct userstack_entry *field;
1127     struct trace_seq *s = &iter->seq;
1128     struct mm_struct *mm = NULL;
1129     unsigned int i;
1130 
1131     trace_assign_type(field, iter->ent);
1132 
1133     trace_seq_puts(s, "<user stack trace>\n");
1134 
1135     if (tr->trace_flags & TRACE_ITER_SYM_USEROBJ) {
1136         struct task_struct *task;
1137         /*
1138          * we do the lookup on the thread group leader,
1139          * since individual threads might have already quit!
1140          */
1141         rcu_read_lock();
1142         task = find_task_by_vpid(field->tgid);
1143         if (task)
1144             mm = get_task_mm(task);
1145         rcu_read_unlock();
1146     }
1147 
1148     for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1149         unsigned long ip = field->caller[i];
1150 
1151         if (!ip || trace_seq_has_overflowed(s))
1152             break;
1153 
1154         trace_seq_puts(s, " => ");
1155         seq_print_user_ip(s, mm, ip, flags);
1156         trace_seq_putc(s, '\n');
1157     }
1158 
1159     if (mm)
1160         mmput(mm);
1161 
1162     return trace_handle_return(s);
1163 }
1164 
1165 static struct trace_event_functions trace_user_stack_funcs = {
1166     .trace      = trace_user_stack_print,
1167 };
1168 
1169 static struct trace_event trace_user_stack_event = {
1170     .type       = TRACE_USER_STACK,
1171     .funcs      = &trace_user_stack_funcs,
1172 };
1173 
1174 /* TRACE_HWLAT */
1175 static enum print_line_t
1176 trace_hwlat_print(struct trace_iterator *iter, int flags,
1177           struct trace_event *event)
1178 {
1179     struct trace_entry *entry = iter->ent;
1180     struct trace_seq *s = &iter->seq;
1181     struct hwlat_entry *field;
1182 
1183     trace_assign_type(field, entry);
1184 
1185     trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%lld.%09ld count:%d",
1186              field->seqnum,
1187              field->duration,
1188              field->outer_duration,
1189              (long long)field->timestamp.tv_sec,
1190              field->timestamp.tv_nsec, field->count);
1191 
1192     if (field->nmi_count) {
1193         /*
1194          * The generic sched_clock() is not NMI safe, thus
1195          * we only record the count and not the time.
1196          */
1197         if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK))
1198             trace_seq_printf(s, " nmi-total:%llu",
1199                      field->nmi_total_ts);
1200         trace_seq_printf(s, " nmi-count:%u",
1201                  field->nmi_count);
1202     }
1203 
1204     trace_seq_putc(s, '\n');
1205 
1206     return trace_handle_return(s);
1207 }
1208 
1209 static enum print_line_t
1210 trace_hwlat_raw(struct trace_iterator *iter, int flags,
1211         struct trace_event *event)
1212 {
1213     struct hwlat_entry *field;
1214     struct trace_seq *s = &iter->seq;
1215 
1216     trace_assign_type(field, iter->ent);
1217 
1218     trace_seq_printf(s, "%llu %lld %lld %09ld %u\n",
1219              field->duration,
1220              field->outer_duration,
1221              (long long)field->timestamp.tv_sec,
1222              field->timestamp.tv_nsec,
1223              field->seqnum);
1224 
1225     return trace_handle_return(s);
1226 }
1227 
1228 static struct trace_event_functions trace_hwlat_funcs = {
1229     .trace      = trace_hwlat_print,
1230     .raw        = trace_hwlat_raw,
1231 };
1232 
1233 static struct trace_event trace_hwlat_event = {
1234     .type       = TRACE_HWLAT,
1235     .funcs      = &trace_hwlat_funcs,
1236 };
1237 
1238 /* TRACE_OSNOISE */
1239 static enum print_line_t
1240 trace_osnoise_print(struct trace_iterator *iter, int flags,
1241             struct trace_event *event)
1242 {
1243     struct trace_entry *entry = iter->ent;
1244     struct trace_seq *s = &iter->seq;
1245     struct osnoise_entry *field;
1246     u64 ratio, ratio_dec;
1247     u64 net_runtime;
1248 
1249     trace_assign_type(field, entry);
1250 
1251     /*
1252      * compute the available % of cpu time.
1253      */
1254     net_runtime = field->runtime - field->noise;
1255     ratio = net_runtime * 10000000;
1256     do_div(ratio, field->runtime);
1257     ratio_dec = do_div(ratio, 100000);
1258 
1259     trace_seq_printf(s, "%llu %10llu %3llu.%05llu %7llu",
1260              field->runtime,
1261              field->noise,
1262              ratio, ratio_dec,
1263              field->max_sample);
1264 
1265     trace_seq_printf(s, " %6u", field->hw_count);
1266     trace_seq_printf(s, " %6u", field->nmi_count);
1267     trace_seq_printf(s, " %6u", field->irq_count);
1268     trace_seq_printf(s, " %6u", field->softirq_count);
1269     trace_seq_printf(s, " %6u", field->thread_count);
1270 
1271     trace_seq_putc(s, '\n');
1272 
1273     return trace_handle_return(s);
1274 }
1275 
1276 static enum print_line_t
1277 trace_osnoise_raw(struct trace_iterator *iter, int flags,
1278           struct trace_event *event)
1279 {
1280     struct osnoise_entry *field;
1281     struct trace_seq *s = &iter->seq;
1282 
1283     trace_assign_type(field, iter->ent);
1284 
1285     trace_seq_printf(s, "%lld %llu %llu %u %u %u %u %u\n",
1286              field->runtime,
1287              field->noise,
1288              field->max_sample,
1289              field->hw_count,
1290              field->nmi_count,
1291              field->irq_count,
1292              field->softirq_count,
1293              field->thread_count);
1294 
1295     return trace_handle_return(s);
1296 }
1297 
1298 static struct trace_event_functions trace_osnoise_funcs = {
1299     .trace      = trace_osnoise_print,
1300     .raw        = trace_osnoise_raw,
1301 };
1302 
1303 static struct trace_event trace_osnoise_event = {
1304     .type       = TRACE_OSNOISE,
1305     .funcs      = &trace_osnoise_funcs,
1306 };
1307 
1308 /* TRACE_TIMERLAT */
1309 static enum print_line_t
1310 trace_timerlat_print(struct trace_iterator *iter, int flags,
1311              struct trace_event *event)
1312 {
1313     struct trace_entry *entry = iter->ent;
1314     struct trace_seq *s = &iter->seq;
1315     struct timerlat_entry *field;
1316 
1317     trace_assign_type(field, entry);
1318 
1319     trace_seq_printf(s, "#%-5u context %6s timer_latency %9llu ns\n",
1320              field->seqnum,
1321              field->context ? "thread" : "irq",
1322              field->timer_latency);
1323 
1324     return trace_handle_return(s);
1325 }
1326 
1327 static enum print_line_t
1328 trace_timerlat_raw(struct trace_iterator *iter, int flags,
1329            struct trace_event *event)
1330 {
1331     struct timerlat_entry *field;
1332     struct trace_seq *s = &iter->seq;
1333 
1334     trace_assign_type(field, iter->ent);
1335 
1336     trace_seq_printf(s, "%u %d %llu\n",
1337              field->seqnum,
1338              field->context,
1339              field->timer_latency);
1340 
1341     return trace_handle_return(s);
1342 }
1343 
1344 static struct trace_event_functions trace_timerlat_funcs = {
1345     .trace      = trace_timerlat_print,
1346     .raw        = trace_timerlat_raw,
1347 };
1348 
1349 static struct trace_event trace_timerlat_event = {
1350     .type       = TRACE_TIMERLAT,
1351     .funcs      = &trace_timerlat_funcs,
1352 };
1353 
1354 /* TRACE_BPUTS */
1355 static enum print_line_t
1356 trace_bputs_print(struct trace_iterator *iter, int flags,
1357            struct trace_event *event)
1358 {
1359     struct trace_entry *entry = iter->ent;
1360     struct trace_seq *s = &iter->seq;
1361     struct bputs_entry *field;
1362 
1363     trace_assign_type(field, entry);
1364 
1365     seq_print_ip_sym(s, field->ip, flags);
1366     trace_seq_puts(s, ": ");
1367     trace_seq_puts(s, field->str);
1368 
1369     return trace_handle_return(s);
1370 }
1371 
1372 
1373 static enum print_line_t
1374 trace_bputs_raw(struct trace_iterator *iter, int flags,
1375         struct trace_event *event)
1376 {
1377     struct bputs_entry *field;
1378     struct trace_seq *s = &iter->seq;
1379 
1380     trace_assign_type(field, iter->ent);
1381 
1382     trace_seq_printf(s, ": %lx : ", field->ip);
1383     trace_seq_puts(s, field->str);
1384 
1385     return trace_handle_return(s);
1386 }
1387 
1388 static struct trace_event_functions trace_bputs_funcs = {
1389     .trace      = trace_bputs_print,
1390     .raw        = trace_bputs_raw,
1391 };
1392 
1393 static struct trace_event trace_bputs_event = {
1394     .type       = TRACE_BPUTS,
1395     .funcs      = &trace_bputs_funcs,
1396 };
1397 
1398 /* TRACE_BPRINT */
1399 static enum print_line_t
1400 trace_bprint_print(struct trace_iterator *iter, int flags,
1401            struct trace_event *event)
1402 {
1403     struct trace_entry *entry = iter->ent;
1404     struct trace_seq *s = &iter->seq;
1405     struct bprint_entry *field;
1406 
1407     trace_assign_type(field, entry);
1408 
1409     seq_print_ip_sym(s, field->ip, flags);
1410     trace_seq_puts(s, ": ");
1411     trace_seq_bprintf(s, field->fmt, field->buf);
1412 
1413     return trace_handle_return(s);
1414 }
1415 
1416 
1417 static enum print_line_t
1418 trace_bprint_raw(struct trace_iterator *iter, int flags,
1419          struct trace_event *event)
1420 {
1421     struct bprint_entry *field;
1422     struct trace_seq *s = &iter->seq;
1423 
1424     trace_assign_type(field, iter->ent);
1425 
1426     trace_seq_printf(s, ": %lx : ", field->ip);
1427     trace_seq_bprintf(s, field->fmt, field->buf);
1428 
1429     return trace_handle_return(s);
1430 }
1431 
1432 static struct trace_event_functions trace_bprint_funcs = {
1433     .trace      = trace_bprint_print,
1434     .raw        = trace_bprint_raw,
1435 };
1436 
1437 static struct trace_event trace_bprint_event = {
1438     .type       = TRACE_BPRINT,
1439     .funcs      = &trace_bprint_funcs,
1440 };
1441 
1442 /* TRACE_PRINT */
1443 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1444                        int flags, struct trace_event *event)
1445 {
1446     struct print_entry *field;
1447     struct trace_seq *s = &iter->seq;
1448 
1449     trace_assign_type(field, iter->ent);
1450 
1451     seq_print_ip_sym(s, field->ip, flags);
1452     trace_seq_printf(s, ": %s", field->buf);
1453 
1454     return trace_handle_return(s);
1455 }
1456 
1457 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
1458                      struct trace_event *event)
1459 {
1460     struct print_entry *field;
1461 
1462     trace_assign_type(field, iter->ent);
1463 
1464     trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf);
1465 
1466     return trace_handle_return(&iter->seq);
1467 }
1468 
1469 static struct trace_event_functions trace_print_funcs = {
1470     .trace      = trace_print_print,
1471     .raw        = trace_print_raw,
1472 };
1473 
1474 static struct trace_event trace_print_event = {
1475     .type       = TRACE_PRINT,
1476     .funcs      = &trace_print_funcs,
1477 };
1478 
1479 static enum print_line_t trace_raw_data(struct trace_iterator *iter, int flags,
1480                      struct trace_event *event)
1481 {
1482     struct raw_data_entry *field;
1483     int i;
1484 
1485     trace_assign_type(field, iter->ent);
1486 
1487     trace_seq_printf(&iter->seq, "# %x buf:", field->id);
1488 
1489     for (i = 0; i < iter->ent_size - offsetof(struct raw_data_entry, buf); i++)
1490         trace_seq_printf(&iter->seq, " %02x",
1491                  (unsigned char)field->buf[i]);
1492 
1493     trace_seq_putc(&iter->seq, '\n');
1494 
1495     return trace_handle_return(&iter->seq);
1496 }
1497 
1498 static struct trace_event_functions trace_raw_data_funcs = {
1499     .trace      = trace_raw_data,
1500     .raw        = trace_raw_data,
1501 };
1502 
1503 static struct trace_event trace_raw_data_event = {
1504     .type       = TRACE_RAW_DATA,
1505     .funcs      = &trace_raw_data_funcs,
1506 };
1507 
1508 static enum print_line_t
1509 trace_func_repeats_raw(struct trace_iterator *iter, int flags,
1510              struct trace_event *event)
1511 {
1512     struct func_repeats_entry *field;
1513     struct trace_seq *s = &iter->seq;
1514 
1515     trace_assign_type(field, iter->ent);
1516 
1517     trace_seq_printf(s, "%lu %lu %u %llu\n",
1518              field->ip,
1519              field->parent_ip,
1520              field->count,
1521              FUNC_REPEATS_GET_DELTA_TS(field));
1522 
1523     return trace_handle_return(s);
1524 }
1525 
1526 static enum print_line_t
1527 trace_func_repeats_print(struct trace_iterator *iter, int flags,
1528              struct trace_event *event)
1529 {
1530     struct func_repeats_entry *field;
1531     struct trace_seq *s = &iter->seq;
1532 
1533     trace_assign_type(field, iter->ent);
1534 
1535     print_fn_trace(s, field->ip, field->parent_ip, flags);
1536     trace_seq_printf(s, " (repeats: %u, last_ts:", field->count);
1537     trace_print_time(s, iter,
1538              iter->ts - FUNC_REPEATS_GET_DELTA_TS(field));
1539     trace_seq_puts(s, ")\n");
1540 
1541     return trace_handle_return(s);
1542 }
1543 
1544 static struct trace_event_functions trace_func_repeats_funcs = {
1545     .trace      = trace_func_repeats_print,
1546     .raw        = trace_func_repeats_raw,
1547 };
1548 
1549 static struct trace_event trace_func_repeats_event = {
1550     .type       = TRACE_FUNC_REPEATS,
1551     .funcs      = &trace_func_repeats_funcs,
1552 };
1553 
1554 static struct trace_event *events[] __initdata = {
1555     &trace_fn_event,
1556     &trace_ctx_event,
1557     &trace_wake_event,
1558     &trace_stack_event,
1559     &trace_user_stack_event,
1560     &trace_bputs_event,
1561     &trace_bprint_event,
1562     &trace_print_event,
1563     &trace_hwlat_event,
1564     &trace_osnoise_event,
1565     &trace_timerlat_event,
1566     &trace_raw_data_event,
1567     &trace_func_repeats_event,
1568     NULL
1569 };
1570 
1571 __init static int init_events(void)
1572 {
1573     struct trace_event *event;
1574     int i, ret;
1575 
1576     for (i = 0; events[i]; i++) {
1577         event = events[i];
1578         ret = register_trace_event(event);
1579         WARN_ONCE(!ret, "event %d failed to register", event->type);
1580     }
1581 
1582     return 0;
1583 }
1584 early_initcall(init_events);