Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *
0004  * Function graph tracer.
0005  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
0006  * Mostly borrowed from function tracer which
0007  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
0008  *
0009  */
0010 #include <linux/uaccess.h>
0011 #include <linux/ftrace.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/slab.h>
0014 #include <linux/fs.h>
0015 
0016 #include "trace.h"
0017 #include "trace_output.h"
0018 
0019 /* When set, irq functions will be ignored */
0020 static int ftrace_graph_skip_irqs;
0021 
0022 struct fgraph_cpu_data {
0023     pid_t       last_pid;
0024     int     depth;
0025     int     depth_irq;
0026     int     ignore;
0027     unsigned long   enter_funcs[FTRACE_RETFUNC_DEPTH];
0028 };
0029 
0030 struct fgraph_data {
0031     struct fgraph_cpu_data __percpu *cpu_data;
0032 
0033     /* Place to preserve last processed entry. */
0034     struct ftrace_graph_ent_entry   ent;
0035     struct ftrace_graph_ret_entry   ret;
0036     int             failed;
0037     int             cpu;
0038 };
0039 
0040 #define TRACE_GRAPH_INDENT  2
0041 
0042 unsigned int fgraph_max_depth;
0043 
0044 static struct tracer_opt trace_opts[] = {
0045     /* Display overruns? (for self-debug purpose) */
0046     { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
0047     /* Display CPU ? */
0048     { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
0049     /* Display Overhead ? */
0050     { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
0051     /* Display proc name/pid */
0052     { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
0053     /* Display duration of execution */
0054     { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
0055     /* Display absolute time of an entry */
0056     { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
0057     /* Display interrupts */
0058     { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
0059     /* Display function name after trailing } */
0060     { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
0061     /* Include sleep time (scheduled out) between entry and return */
0062     { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
0063 
0064 #ifdef CONFIG_FUNCTION_PROFILER
0065     /* Include time within nested functions */
0066     { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
0067 #endif
0068 
0069     { } /* Empty entry */
0070 };
0071 
0072 static struct tracer_flags tracer_flags = {
0073     /* Don't display overruns, proc, or tail by default */
0074     .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
0075            TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
0076            TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
0077     .opts = trace_opts
0078 };
0079 
0080 static struct trace_array *graph_array;
0081 
0082 /*
0083  * DURATION column is being also used to display IRQ signs,
0084  * following values are used by print_graph_irq and others
0085  * to fill in space into DURATION column.
0086  */
0087 enum {
0088     FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
0089     FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
0090     FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
0091 };
0092 
0093 static void
0094 print_graph_duration(struct trace_array *tr, unsigned long long duration,
0095              struct trace_seq *s, u32 flags);
0096 
0097 int __trace_graph_entry(struct trace_array *tr,
0098                 struct ftrace_graph_ent *trace,
0099                 unsigned int trace_ctx)
0100 {
0101     struct trace_event_call *call = &event_funcgraph_entry;
0102     struct ring_buffer_event *event;
0103     struct trace_buffer *buffer = tr->array_buffer.buffer;
0104     struct ftrace_graph_ent_entry *entry;
0105 
0106     event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
0107                       sizeof(*entry), trace_ctx);
0108     if (!event)
0109         return 0;
0110     entry   = ring_buffer_event_data(event);
0111     entry->graph_ent            = *trace;
0112     if (!call_filter_check_discard(call, entry, buffer, event))
0113         trace_buffer_unlock_commit_nostack(buffer, event);
0114 
0115     return 1;
0116 }
0117 
0118 static inline int ftrace_graph_ignore_irqs(void)
0119 {
0120     if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
0121         return 0;
0122 
0123     return in_hardirq();
0124 }
0125 
0126 int trace_graph_entry(struct ftrace_graph_ent *trace)
0127 {
0128     struct trace_array *tr = graph_array;
0129     struct trace_array_cpu *data;
0130     unsigned long flags;
0131     unsigned int trace_ctx;
0132     long disabled;
0133     int ret;
0134     int cpu;
0135 
0136     if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
0137         return 0;
0138 
0139     /*
0140      * Do not trace a function if it's filtered by set_graph_notrace.
0141      * Make the index of ret stack negative to indicate that it should
0142      * ignore further functions.  But it needs its own ret stack entry
0143      * to recover the original index in order to continue tracing after
0144      * returning from the function.
0145      */
0146     if (ftrace_graph_notrace_addr(trace->func)) {
0147         trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
0148         /*
0149          * Need to return 1 to have the return called
0150          * that will clear the NOTRACE bit.
0151          */
0152         return 1;
0153     }
0154 
0155     if (!ftrace_trace_task(tr))
0156         return 0;
0157 
0158     if (ftrace_graph_ignore_func(trace))
0159         return 0;
0160 
0161     if (ftrace_graph_ignore_irqs())
0162         return 0;
0163 
0164     /*
0165      * Stop here if tracing_threshold is set. We only write function return
0166      * events to the ring buffer.
0167      */
0168     if (tracing_thresh)
0169         return 1;
0170 
0171     local_irq_save(flags);
0172     cpu = raw_smp_processor_id();
0173     data = per_cpu_ptr(tr->array_buffer.data, cpu);
0174     disabled = atomic_inc_return(&data->disabled);
0175     if (likely(disabled == 1)) {
0176         trace_ctx = tracing_gen_ctx_flags(flags);
0177         ret = __trace_graph_entry(tr, trace, trace_ctx);
0178     } else {
0179         ret = 0;
0180     }
0181 
0182     atomic_dec(&data->disabled);
0183     local_irq_restore(flags);
0184 
0185     return ret;
0186 }
0187 
0188 static void
0189 __trace_graph_function(struct trace_array *tr,
0190         unsigned long ip, unsigned int trace_ctx)
0191 {
0192     u64 time = trace_clock_local();
0193     struct ftrace_graph_ent ent = {
0194         .func  = ip,
0195         .depth = 0,
0196     };
0197     struct ftrace_graph_ret ret = {
0198         .func     = ip,
0199         .depth    = 0,
0200         .calltime = time,
0201         .rettime  = time,
0202     };
0203 
0204     __trace_graph_entry(tr, &ent, trace_ctx);
0205     __trace_graph_return(tr, &ret, trace_ctx);
0206 }
0207 
0208 void
0209 trace_graph_function(struct trace_array *tr,
0210         unsigned long ip, unsigned long parent_ip,
0211         unsigned int trace_ctx)
0212 {
0213     __trace_graph_function(tr, ip, trace_ctx);
0214 }
0215 
0216 void __trace_graph_return(struct trace_array *tr,
0217                 struct ftrace_graph_ret *trace,
0218                 unsigned int trace_ctx)
0219 {
0220     struct trace_event_call *call = &event_funcgraph_exit;
0221     struct ring_buffer_event *event;
0222     struct trace_buffer *buffer = tr->array_buffer.buffer;
0223     struct ftrace_graph_ret_entry *entry;
0224 
0225     event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
0226                       sizeof(*entry), trace_ctx);
0227     if (!event)
0228         return;
0229     entry   = ring_buffer_event_data(event);
0230     entry->ret              = *trace;
0231     if (!call_filter_check_discard(call, entry, buffer, event))
0232         trace_buffer_unlock_commit_nostack(buffer, event);
0233 }
0234 
0235 void trace_graph_return(struct ftrace_graph_ret *trace)
0236 {
0237     struct trace_array *tr = graph_array;
0238     struct trace_array_cpu *data;
0239     unsigned long flags;
0240     unsigned int trace_ctx;
0241     long disabled;
0242     int cpu;
0243 
0244     ftrace_graph_addr_finish(trace);
0245 
0246     if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
0247         trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
0248         return;
0249     }
0250 
0251     local_irq_save(flags);
0252     cpu = raw_smp_processor_id();
0253     data = per_cpu_ptr(tr->array_buffer.data, cpu);
0254     disabled = atomic_inc_return(&data->disabled);
0255     if (likely(disabled == 1)) {
0256         trace_ctx = tracing_gen_ctx_flags(flags);
0257         __trace_graph_return(tr, trace, trace_ctx);
0258     }
0259     atomic_dec(&data->disabled);
0260     local_irq_restore(flags);
0261 }
0262 
0263 void set_graph_array(struct trace_array *tr)
0264 {
0265     graph_array = tr;
0266 
0267     /* Make graph_array visible before we start tracing */
0268 
0269     smp_mb();
0270 }
0271 
0272 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
0273 {
0274     ftrace_graph_addr_finish(trace);
0275 
0276     if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
0277         trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
0278         return;
0279     }
0280 
0281     if (tracing_thresh &&
0282         (trace->rettime - trace->calltime < tracing_thresh))
0283         return;
0284     else
0285         trace_graph_return(trace);
0286 }
0287 
0288 static struct fgraph_ops funcgraph_thresh_ops = {
0289     .entryfunc = &trace_graph_entry,
0290     .retfunc = &trace_graph_thresh_return,
0291 };
0292 
0293 static struct fgraph_ops funcgraph_ops = {
0294     .entryfunc = &trace_graph_entry,
0295     .retfunc = &trace_graph_return,
0296 };
0297 
0298 static int graph_trace_init(struct trace_array *tr)
0299 {
0300     int ret;
0301 
0302     set_graph_array(tr);
0303     if (tracing_thresh)
0304         ret = register_ftrace_graph(&funcgraph_thresh_ops);
0305     else
0306         ret = register_ftrace_graph(&funcgraph_ops);
0307     if (ret)
0308         return ret;
0309     tracing_start_cmdline_record();
0310 
0311     return 0;
0312 }
0313 
0314 static void graph_trace_reset(struct trace_array *tr)
0315 {
0316     tracing_stop_cmdline_record();
0317     if (tracing_thresh)
0318         unregister_ftrace_graph(&funcgraph_thresh_ops);
0319     else
0320         unregister_ftrace_graph(&funcgraph_ops);
0321 }
0322 
0323 static int graph_trace_update_thresh(struct trace_array *tr)
0324 {
0325     graph_trace_reset(tr);
0326     return graph_trace_init(tr);
0327 }
0328 
0329 static int max_bytes_for_cpu;
0330 
0331 static void print_graph_cpu(struct trace_seq *s, int cpu)
0332 {
0333     /*
0334      * Start with a space character - to make it stand out
0335      * to the right a bit when trace output is pasted into
0336      * email:
0337      */
0338     trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
0339 }
0340 
0341 #define TRACE_GRAPH_PROCINFO_LENGTH 14
0342 
0343 static void print_graph_proc(struct trace_seq *s, pid_t pid)
0344 {
0345     char comm[TASK_COMM_LEN];
0346     /* sign + log10(MAX_INT) + '\0' */
0347     char pid_str[11];
0348     int spaces = 0;
0349     int len;
0350     int i;
0351 
0352     trace_find_cmdline(pid, comm);
0353     comm[7] = '\0';
0354     sprintf(pid_str, "%d", pid);
0355 
0356     /* 1 stands for the "-" character */
0357     len = strlen(comm) + strlen(pid_str) + 1;
0358 
0359     if (len < TRACE_GRAPH_PROCINFO_LENGTH)
0360         spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
0361 
0362     /* First spaces to align center */
0363     for (i = 0; i < spaces / 2; i++)
0364         trace_seq_putc(s, ' ');
0365 
0366     trace_seq_printf(s, "%s-%s", comm, pid_str);
0367 
0368     /* Last spaces to align center */
0369     for (i = 0; i < spaces - (spaces / 2); i++)
0370         trace_seq_putc(s, ' ');
0371 }
0372 
0373 
0374 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
0375 {
0376     trace_seq_putc(s, ' ');
0377     trace_print_lat_fmt(s, entry);
0378     trace_seq_puts(s, " | ");
0379 }
0380 
0381 /* If the pid changed since the last trace, output this event */
0382 static void
0383 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
0384 {
0385     pid_t prev_pid;
0386     pid_t *last_pid;
0387 
0388     if (!data)
0389         return;
0390 
0391     last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
0392 
0393     if (*last_pid == pid)
0394         return;
0395 
0396     prev_pid = *last_pid;
0397     *last_pid = pid;
0398 
0399     if (prev_pid == -1)
0400         return;
0401 /*
0402  * Context-switch trace line:
0403 
0404  ------------------------------------------
0405  | 1)  migration/0--1  =>  sshd-1755
0406  ------------------------------------------
0407 
0408  */
0409     trace_seq_puts(s, " ------------------------------------------\n");
0410     print_graph_cpu(s, cpu);
0411     print_graph_proc(s, prev_pid);
0412     trace_seq_puts(s, " => ");
0413     print_graph_proc(s, pid);
0414     trace_seq_puts(s, "\n ------------------------------------------\n\n");
0415 }
0416 
0417 static struct ftrace_graph_ret_entry *
0418 get_return_for_leaf(struct trace_iterator *iter,
0419         struct ftrace_graph_ent_entry *curr)
0420 {
0421     struct fgraph_data *data = iter->private;
0422     struct ring_buffer_iter *ring_iter = NULL;
0423     struct ring_buffer_event *event;
0424     struct ftrace_graph_ret_entry *next;
0425 
0426     /*
0427      * If the previous output failed to write to the seq buffer,
0428      * then we just reuse the data from before.
0429      */
0430     if (data && data->failed) {
0431         curr = &data->ent;
0432         next = &data->ret;
0433     } else {
0434 
0435         ring_iter = trace_buffer_iter(iter, iter->cpu);
0436 
0437         /* First peek to compare current entry and the next one */
0438         if (ring_iter)
0439             event = ring_buffer_iter_peek(ring_iter, NULL);
0440         else {
0441             /*
0442              * We need to consume the current entry to see
0443              * the next one.
0444              */
0445             ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
0446                         NULL, NULL);
0447             event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
0448                          NULL, NULL);
0449         }
0450 
0451         if (!event)
0452             return NULL;
0453 
0454         next = ring_buffer_event_data(event);
0455 
0456         if (data) {
0457             /*
0458              * Save current and next entries for later reference
0459              * if the output fails.
0460              */
0461             data->ent = *curr;
0462             /*
0463              * If the next event is not a return type, then
0464              * we only care about what type it is. Otherwise we can
0465              * safely copy the entire event.
0466              */
0467             if (next->ent.type == TRACE_GRAPH_RET)
0468                 data->ret = *next;
0469             else
0470                 data->ret.ent.type = next->ent.type;
0471         }
0472     }
0473 
0474     if (next->ent.type != TRACE_GRAPH_RET)
0475         return NULL;
0476 
0477     if (curr->ent.pid != next->ent.pid ||
0478             curr->graph_ent.func != next->ret.func)
0479         return NULL;
0480 
0481     /* this is a leaf, now advance the iterator */
0482     if (ring_iter)
0483         ring_buffer_iter_advance(ring_iter);
0484 
0485     return next;
0486 }
0487 
0488 static void print_graph_abs_time(u64 t, struct trace_seq *s)
0489 {
0490     unsigned long usecs_rem;
0491 
0492     usecs_rem = do_div(t, NSEC_PER_SEC);
0493     usecs_rem /= 1000;
0494 
0495     trace_seq_printf(s, "%5lu.%06lu |  ",
0496              (unsigned long)t, usecs_rem);
0497 }
0498 
0499 static void
0500 print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
0501 {
0502     unsigned long long usecs;
0503 
0504     usecs = iter->ts - iter->array_buffer->time_start;
0505     do_div(usecs, NSEC_PER_USEC);
0506 
0507     trace_seq_printf(s, "%9llu us |  ", usecs);
0508 }
0509 
0510 static void
0511 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
0512         enum trace_type type, int cpu, pid_t pid, u32 flags)
0513 {
0514     struct trace_array *tr = iter->tr;
0515     struct trace_seq *s = &iter->seq;
0516     struct trace_entry *ent = iter->ent;
0517 
0518     if (addr < (unsigned long)__irqentry_text_start ||
0519         addr >= (unsigned long)__irqentry_text_end)
0520         return;
0521 
0522     if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
0523         /* Absolute time */
0524         if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
0525             print_graph_abs_time(iter->ts, s);
0526 
0527         /* Relative time */
0528         if (flags & TRACE_GRAPH_PRINT_REL_TIME)
0529             print_graph_rel_time(iter, s);
0530 
0531         /* Cpu */
0532         if (flags & TRACE_GRAPH_PRINT_CPU)
0533             print_graph_cpu(s, cpu);
0534 
0535         /* Proc */
0536         if (flags & TRACE_GRAPH_PRINT_PROC) {
0537             print_graph_proc(s, pid);
0538             trace_seq_puts(s, " | ");
0539         }
0540 
0541         /* Latency format */
0542         if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
0543             print_graph_lat_fmt(s, ent);
0544     }
0545 
0546     /* No overhead */
0547     print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
0548 
0549     if (type == TRACE_GRAPH_ENT)
0550         trace_seq_puts(s, "==========>");
0551     else
0552         trace_seq_puts(s, "<==========");
0553 
0554     print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
0555     trace_seq_putc(s, '\n');
0556 }
0557 
0558 void
0559 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
0560 {
0561     unsigned long nsecs_rem = do_div(duration, 1000);
0562     /* log10(ULONG_MAX) + '\0' */
0563     char usecs_str[21];
0564     char nsecs_str[5];
0565     int len;
0566     int i;
0567 
0568     sprintf(usecs_str, "%lu", (unsigned long) duration);
0569 
0570     /* Print msecs */
0571     trace_seq_printf(s, "%s", usecs_str);
0572 
0573     len = strlen(usecs_str);
0574 
0575     /* Print nsecs (we don't want to exceed 7 numbers) */
0576     if (len < 7) {
0577         size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
0578 
0579         snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
0580         trace_seq_printf(s, ".%s", nsecs_str);
0581         len += strlen(nsecs_str) + 1;
0582     }
0583 
0584     trace_seq_puts(s, " us ");
0585 
0586     /* Print remaining spaces to fit the row's width */
0587     for (i = len; i < 8; i++)
0588         trace_seq_putc(s, ' ');
0589 }
0590 
0591 static void
0592 print_graph_duration(struct trace_array *tr, unsigned long long duration,
0593              struct trace_seq *s, u32 flags)
0594 {
0595     if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
0596         !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
0597         return;
0598 
0599     /* No real adata, just filling the column with spaces */
0600     switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
0601     case FLAGS_FILL_FULL:
0602         trace_seq_puts(s, "              |  ");
0603         return;
0604     case FLAGS_FILL_START:
0605         trace_seq_puts(s, "  ");
0606         return;
0607     case FLAGS_FILL_END:
0608         trace_seq_puts(s, " |");
0609         return;
0610     }
0611 
0612     /* Signal a overhead of time execution to the output */
0613     if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
0614         trace_seq_printf(s, "%c ", trace_find_mark(duration));
0615     else
0616         trace_seq_puts(s, "  ");
0617 
0618     trace_print_graph_duration(duration, s);
0619     trace_seq_puts(s, "|  ");
0620 }
0621 
0622 /* Case of a leaf function on its call entry */
0623 static enum print_line_t
0624 print_graph_entry_leaf(struct trace_iterator *iter,
0625         struct ftrace_graph_ent_entry *entry,
0626         struct ftrace_graph_ret_entry *ret_entry,
0627         struct trace_seq *s, u32 flags)
0628 {
0629     struct fgraph_data *data = iter->private;
0630     struct trace_array *tr = iter->tr;
0631     struct ftrace_graph_ret *graph_ret;
0632     struct ftrace_graph_ent *call;
0633     unsigned long long duration;
0634     int cpu = iter->cpu;
0635     int i;
0636 
0637     graph_ret = &ret_entry->ret;
0638     call = &entry->graph_ent;
0639     duration = graph_ret->rettime - graph_ret->calltime;
0640 
0641     if (data) {
0642         struct fgraph_cpu_data *cpu_data;
0643 
0644         cpu_data = per_cpu_ptr(data->cpu_data, cpu);
0645 
0646         /*
0647          * Comments display at + 1 to depth. Since
0648          * this is a leaf function, keep the comments
0649          * equal to this depth.
0650          */
0651         cpu_data->depth = call->depth - 1;
0652 
0653         /* No need to keep this function around for this depth */
0654         if (call->depth < FTRACE_RETFUNC_DEPTH &&
0655             !WARN_ON_ONCE(call->depth < 0))
0656             cpu_data->enter_funcs[call->depth] = 0;
0657     }
0658 
0659     /* Overhead and duration */
0660     print_graph_duration(tr, duration, s, flags);
0661 
0662     /* Function */
0663     for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
0664         trace_seq_putc(s, ' ');
0665 
0666     trace_seq_printf(s, "%ps();\n", (void *)call->func);
0667 
0668     print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
0669             cpu, iter->ent->pid, flags);
0670 
0671     return trace_handle_return(s);
0672 }
0673 
0674 static enum print_line_t
0675 print_graph_entry_nested(struct trace_iterator *iter,
0676              struct ftrace_graph_ent_entry *entry,
0677              struct trace_seq *s, int cpu, u32 flags)
0678 {
0679     struct ftrace_graph_ent *call = &entry->graph_ent;
0680     struct fgraph_data *data = iter->private;
0681     struct trace_array *tr = iter->tr;
0682     int i;
0683 
0684     if (data) {
0685         struct fgraph_cpu_data *cpu_data;
0686         int cpu = iter->cpu;
0687 
0688         cpu_data = per_cpu_ptr(data->cpu_data, cpu);
0689         cpu_data->depth = call->depth;
0690 
0691         /* Save this function pointer to see if the exit matches */
0692         if (call->depth < FTRACE_RETFUNC_DEPTH &&
0693             !WARN_ON_ONCE(call->depth < 0))
0694             cpu_data->enter_funcs[call->depth] = call->func;
0695     }
0696 
0697     /* No time */
0698     print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
0699 
0700     /* Function */
0701     for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
0702         trace_seq_putc(s, ' ');
0703 
0704     trace_seq_printf(s, "%ps() {\n", (void *)call->func);
0705 
0706     if (trace_seq_has_overflowed(s))
0707         return TRACE_TYPE_PARTIAL_LINE;
0708 
0709     /*
0710      * we already consumed the current entry to check the next one
0711      * and see if this is a leaf.
0712      */
0713     return TRACE_TYPE_NO_CONSUME;
0714 }
0715 
0716 static void
0717 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
0718              int type, unsigned long addr, u32 flags)
0719 {
0720     struct fgraph_data *data = iter->private;
0721     struct trace_entry *ent = iter->ent;
0722     struct trace_array *tr = iter->tr;
0723     int cpu = iter->cpu;
0724 
0725     /* Pid */
0726     verif_pid(s, ent->pid, cpu, data);
0727 
0728     if (type)
0729         /* Interrupt */
0730         print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
0731 
0732     if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
0733         return;
0734 
0735     /* Absolute time */
0736     if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
0737         print_graph_abs_time(iter->ts, s);
0738 
0739     /* Relative time */
0740     if (flags & TRACE_GRAPH_PRINT_REL_TIME)
0741         print_graph_rel_time(iter, s);
0742 
0743     /* Cpu */
0744     if (flags & TRACE_GRAPH_PRINT_CPU)
0745         print_graph_cpu(s, cpu);
0746 
0747     /* Proc */
0748     if (flags & TRACE_GRAPH_PRINT_PROC) {
0749         print_graph_proc(s, ent->pid);
0750         trace_seq_puts(s, " | ");
0751     }
0752 
0753     /* Latency format */
0754     if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
0755         print_graph_lat_fmt(s, ent);
0756 
0757     return;
0758 }
0759 
0760 /*
0761  * Entry check for irq code
0762  *
0763  * returns 1 if
0764  *  - we are inside irq code
0765  *  - we just entered irq code
0766  *
0767  * returns 0 if
0768  *  - funcgraph-interrupts option is set
0769  *  - we are not inside irq code
0770  */
0771 static int
0772 check_irq_entry(struct trace_iterator *iter, u32 flags,
0773         unsigned long addr, int depth)
0774 {
0775     int cpu = iter->cpu;
0776     int *depth_irq;
0777     struct fgraph_data *data = iter->private;
0778 
0779     /*
0780      * If we are either displaying irqs, or we got called as
0781      * a graph event and private data does not exist,
0782      * then we bypass the irq check.
0783      */
0784     if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
0785         (!data))
0786         return 0;
0787 
0788     depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
0789 
0790     /*
0791      * We are inside the irq code
0792      */
0793     if (*depth_irq >= 0)
0794         return 1;
0795 
0796     if ((addr < (unsigned long)__irqentry_text_start) ||
0797         (addr >= (unsigned long)__irqentry_text_end))
0798         return 0;
0799 
0800     /*
0801      * We are entering irq code.
0802      */
0803     *depth_irq = depth;
0804     return 1;
0805 }
0806 
0807 /*
0808  * Return check for irq code
0809  *
0810  * returns 1 if
0811  *  - we are inside irq code
0812  *  - we just left irq code
0813  *
0814  * returns 0 if
0815  *  - funcgraph-interrupts option is set
0816  *  - we are not inside irq code
0817  */
0818 static int
0819 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
0820 {
0821     int cpu = iter->cpu;
0822     int *depth_irq;
0823     struct fgraph_data *data = iter->private;
0824 
0825     /*
0826      * If we are either displaying irqs, or we got called as
0827      * a graph event and private data does not exist,
0828      * then we bypass the irq check.
0829      */
0830     if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
0831         (!data))
0832         return 0;
0833 
0834     depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
0835 
0836     /*
0837      * We are not inside the irq code.
0838      */
0839     if (*depth_irq == -1)
0840         return 0;
0841 
0842     /*
0843      * We are inside the irq code, and this is returning entry.
0844      * Let's not trace it and clear the entry depth, since
0845      * we are out of irq code.
0846      *
0847      * This condition ensures that we 'leave the irq code' once
0848      * we are out of the entry depth. Thus protecting us from
0849      * the RETURN entry loss.
0850      */
0851     if (*depth_irq >= depth) {
0852         *depth_irq = -1;
0853         return 1;
0854     }
0855 
0856     /*
0857      * We are inside the irq code, and this is not the entry.
0858      */
0859     return 1;
0860 }
0861 
0862 static enum print_line_t
0863 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
0864             struct trace_iterator *iter, u32 flags)
0865 {
0866     struct fgraph_data *data = iter->private;
0867     struct ftrace_graph_ent *call = &field->graph_ent;
0868     struct ftrace_graph_ret_entry *leaf_ret;
0869     static enum print_line_t ret;
0870     int cpu = iter->cpu;
0871 
0872     if (check_irq_entry(iter, flags, call->func, call->depth))
0873         return TRACE_TYPE_HANDLED;
0874 
0875     print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
0876 
0877     leaf_ret = get_return_for_leaf(iter, field);
0878     if (leaf_ret)
0879         ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
0880     else
0881         ret = print_graph_entry_nested(iter, field, s, cpu, flags);
0882 
0883     if (data) {
0884         /*
0885          * If we failed to write our output, then we need to make
0886          * note of it. Because we already consumed our entry.
0887          */
0888         if (s->full) {
0889             data->failed = 1;
0890             data->cpu = cpu;
0891         } else
0892             data->failed = 0;
0893     }
0894 
0895     return ret;
0896 }
0897 
0898 static enum print_line_t
0899 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
0900            struct trace_entry *ent, struct trace_iterator *iter,
0901            u32 flags)
0902 {
0903     unsigned long long duration = trace->rettime - trace->calltime;
0904     struct fgraph_data *data = iter->private;
0905     struct trace_array *tr = iter->tr;
0906     pid_t pid = ent->pid;
0907     int cpu = iter->cpu;
0908     int func_match = 1;
0909     int i;
0910 
0911     if (check_irq_return(iter, flags, trace->depth))
0912         return TRACE_TYPE_HANDLED;
0913 
0914     if (data) {
0915         struct fgraph_cpu_data *cpu_data;
0916         int cpu = iter->cpu;
0917 
0918         cpu_data = per_cpu_ptr(data->cpu_data, cpu);
0919 
0920         /*
0921          * Comments display at + 1 to depth. This is the
0922          * return from a function, we now want the comments
0923          * to display at the same level of the bracket.
0924          */
0925         cpu_data->depth = trace->depth - 1;
0926 
0927         if (trace->depth < FTRACE_RETFUNC_DEPTH &&
0928             !WARN_ON_ONCE(trace->depth < 0)) {
0929             if (cpu_data->enter_funcs[trace->depth] != trace->func)
0930                 func_match = 0;
0931             cpu_data->enter_funcs[trace->depth] = 0;
0932         }
0933     }
0934 
0935     print_graph_prologue(iter, s, 0, 0, flags);
0936 
0937     /* Overhead and duration */
0938     print_graph_duration(tr, duration, s, flags);
0939 
0940     /* Closing brace */
0941     for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
0942         trace_seq_putc(s, ' ');
0943 
0944     /*
0945      * If the return function does not have a matching entry,
0946      * then the entry was lost. Instead of just printing
0947      * the '}' and letting the user guess what function this
0948      * belongs to, write out the function name. Always do
0949      * that if the funcgraph-tail option is enabled.
0950      */
0951     if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
0952         trace_seq_puts(s, "}\n");
0953     else
0954         trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
0955 
0956     /* Overrun */
0957     if (flags & TRACE_GRAPH_PRINT_OVERRUN)
0958         trace_seq_printf(s, " (Overruns: %u)\n",
0959                  trace->overrun);
0960 
0961     print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
0962             cpu, pid, flags);
0963 
0964     return trace_handle_return(s);
0965 }
0966 
0967 static enum print_line_t
0968 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
0969             struct trace_iterator *iter, u32 flags)
0970 {
0971     struct trace_array *tr = iter->tr;
0972     unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
0973     struct fgraph_data *data = iter->private;
0974     struct trace_event *event;
0975     int depth = 0;
0976     int ret;
0977     int i;
0978 
0979     if (data)
0980         depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
0981 
0982     print_graph_prologue(iter, s, 0, 0, flags);
0983 
0984     /* No time */
0985     print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
0986 
0987     /* Indentation */
0988     if (depth > 0)
0989         for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
0990             trace_seq_putc(s, ' ');
0991 
0992     /* The comment */
0993     trace_seq_puts(s, "/* ");
0994 
0995     switch (iter->ent->type) {
0996     case TRACE_BPUTS:
0997         ret = trace_print_bputs_msg_only(iter);
0998         if (ret != TRACE_TYPE_HANDLED)
0999             return ret;
1000         break;
1001     case TRACE_BPRINT:
1002         ret = trace_print_bprintk_msg_only(iter);
1003         if (ret != TRACE_TYPE_HANDLED)
1004             return ret;
1005         break;
1006     case TRACE_PRINT:
1007         ret = trace_print_printk_msg_only(iter);
1008         if (ret != TRACE_TYPE_HANDLED)
1009             return ret;
1010         break;
1011     default:
1012         event = ftrace_find_event(ent->type);
1013         if (!event)
1014             return TRACE_TYPE_UNHANDLED;
1015 
1016         ret = event->funcs->trace(iter, sym_flags, event);
1017         if (ret != TRACE_TYPE_HANDLED)
1018             return ret;
1019     }
1020 
1021     if (trace_seq_has_overflowed(s))
1022         goto out;
1023 
1024     /* Strip ending newline */
1025     if (s->buffer[s->seq.len - 1] == '\n') {
1026         s->buffer[s->seq.len - 1] = '\0';
1027         s->seq.len--;
1028     }
1029 
1030     trace_seq_puts(s, " */\n");
1031  out:
1032     return trace_handle_return(s);
1033 }
1034 
1035 
1036 enum print_line_t
1037 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1038 {
1039     struct ftrace_graph_ent_entry *field;
1040     struct fgraph_data *data = iter->private;
1041     struct trace_entry *entry = iter->ent;
1042     struct trace_seq *s = &iter->seq;
1043     int cpu = iter->cpu;
1044     int ret;
1045 
1046     if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1047         per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1048         return TRACE_TYPE_HANDLED;
1049     }
1050 
1051     /*
1052      * If the last output failed, there's a possibility we need
1053      * to print out the missing entry which would never go out.
1054      */
1055     if (data && data->failed) {
1056         field = &data->ent;
1057         iter->cpu = data->cpu;
1058         ret = print_graph_entry(field, s, iter, flags);
1059         if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1060             per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1061             ret = TRACE_TYPE_NO_CONSUME;
1062         }
1063         iter->cpu = cpu;
1064         return ret;
1065     }
1066 
1067     switch (entry->type) {
1068     case TRACE_GRAPH_ENT: {
1069         /*
1070          * print_graph_entry() may consume the current event,
1071          * thus @field may become invalid, so we need to save it.
1072          * sizeof(struct ftrace_graph_ent_entry) is very small,
1073          * it can be safely saved at the stack.
1074          */
1075         struct ftrace_graph_ent_entry saved;
1076         trace_assign_type(field, entry);
1077         saved = *field;
1078         return print_graph_entry(&saved, s, iter, flags);
1079     }
1080     case TRACE_GRAPH_RET: {
1081         struct ftrace_graph_ret_entry *field;
1082         trace_assign_type(field, entry);
1083         return print_graph_return(&field->ret, s, entry, iter, flags);
1084     }
1085     case TRACE_STACK:
1086     case TRACE_FN:
1087         /* dont trace stack and functions as comments */
1088         return TRACE_TYPE_UNHANDLED;
1089 
1090     default:
1091         return print_graph_comment(s, entry, iter, flags);
1092     }
1093 
1094     return TRACE_TYPE_HANDLED;
1095 }
1096 
1097 static enum print_line_t
1098 print_graph_function(struct trace_iterator *iter)
1099 {
1100     return print_graph_function_flags(iter, tracer_flags.val);
1101 }
1102 
1103 static enum print_line_t
1104 print_graph_function_event(struct trace_iterator *iter, int flags,
1105                struct trace_event *event)
1106 {
1107     return print_graph_function(iter);
1108 }
1109 
1110 static void print_lat_header(struct seq_file *s, u32 flags)
1111 {
1112     static const char spaces[] = "                " /* 16 spaces */
1113         "    "                  /* 4 spaces */
1114         "                 ";            /* 17 spaces */
1115     int size = 0;
1116 
1117     if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1118         size += 16;
1119     if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1120         size += 16;
1121     if (flags & TRACE_GRAPH_PRINT_CPU)
1122         size += 4;
1123     if (flags & TRACE_GRAPH_PRINT_PROC)
1124         size += 17;
1125 
1126     seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1127     seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1128     seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1129     seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1130     seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1131 }
1132 
1133 static void __print_graph_headers_flags(struct trace_array *tr,
1134                     struct seq_file *s, u32 flags)
1135 {
1136     int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1137 
1138     if (lat)
1139         print_lat_header(s, flags);
1140 
1141     /* 1st line */
1142     seq_putc(s, '#');
1143     if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1144         seq_puts(s, "     TIME       ");
1145     if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1146         seq_puts(s, "   REL TIME     ");
1147     if (flags & TRACE_GRAPH_PRINT_CPU)
1148         seq_puts(s, " CPU");
1149     if (flags & TRACE_GRAPH_PRINT_PROC)
1150         seq_puts(s, "  TASK/PID       ");
1151     if (lat)
1152         seq_puts(s, "||||   ");
1153     if (flags & TRACE_GRAPH_PRINT_DURATION)
1154         seq_puts(s, "  DURATION   ");
1155     seq_puts(s, "               FUNCTION CALLS\n");
1156 
1157     /* 2nd line */
1158     seq_putc(s, '#');
1159     if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1160         seq_puts(s, "      |         ");
1161     if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1162         seq_puts(s, "      |         ");
1163     if (flags & TRACE_GRAPH_PRINT_CPU)
1164         seq_puts(s, " |  ");
1165     if (flags & TRACE_GRAPH_PRINT_PROC)
1166         seq_puts(s, "   |    |        ");
1167     if (lat)
1168         seq_puts(s, "||||   ");
1169     if (flags & TRACE_GRAPH_PRINT_DURATION)
1170         seq_puts(s, "   |   |      ");
1171     seq_puts(s, "               |   |   |   |\n");
1172 }
1173 
1174 static void print_graph_headers(struct seq_file *s)
1175 {
1176     print_graph_headers_flags(s, tracer_flags.val);
1177 }
1178 
1179 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1180 {
1181     struct trace_iterator *iter = s->private;
1182     struct trace_array *tr = iter->tr;
1183 
1184     if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1185         return;
1186 
1187     if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1188         /* print nothing if the buffers are empty */
1189         if (trace_empty(iter))
1190             return;
1191 
1192         print_trace_header(s, iter);
1193     }
1194 
1195     __print_graph_headers_flags(tr, s, flags);
1196 }
1197 
1198 void graph_trace_open(struct trace_iterator *iter)
1199 {
1200     /* pid and depth on the last trace processed */
1201     struct fgraph_data *data;
1202     gfp_t gfpflags;
1203     int cpu;
1204 
1205     iter->private = NULL;
1206 
1207     /* We can be called in atomic context via ftrace_dump() */
1208     gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1209 
1210     data = kzalloc(sizeof(*data), gfpflags);
1211     if (!data)
1212         goto out_err;
1213 
1214     data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1215     if (!data->cpu_data)
1216         goto out_err_free;
1217 
1218     for_each_possible_cpu(cpu) {
1219         pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1220         int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1221         int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1222         int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1223 
1224         *pid = -1;
1225         *depth = 0;
1226         *ignore = 0;
1227         *depth_irq = -1;
1228     }
1229 
1230     iter->private = data;
1231 
1232     return;
1233 
1234  out_err_free:
1235     kfree(data);
1236  out_err:
1237     pr_warn("function graph tracer: not enough memory\n");
1238 }
1239 
1240 void graph_trace_close(struct trace_iterator *iter)
1241 {
1242     struct fgraph_data *data = iter->private;
1243 
1244     if (data) {
1245         free_percpu(data->cpu_data);
1246         kfree(data);
1247     }
1248 }
1249 
1250 static int
1251 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1252 {
1253     if (bit == TRACE_GRAPH_PRINT_IRQS)
1254         ftrace_graph_skip_irqs = !set;
1255 
1256     if (bit == TRACE_GRAPH_SLEEP_TIME)
1257         ftrace_graph_sleep_time_control(set);
1258 
1259     if (bit == TRACE_GRAPH_GRAPH_TIME)
1260         ftrace_graph_graph_time_control(set);
1261 
1262     return 0;
1263 }
1264 
1265 static struct trace_event_functions graph_functions = {
1266     .trace      = print_graph_function_event,
1267 };
1268 
1269 static struct trace_event graph_trace_entry_event = {
1270     .type       = TRACE_GRAPH_ENT,
1271     .funcs      = &graph_functions,
1272 };
1273 
1274 static struct trace_event graph_trace_ret_event = {
1275     .type       = TRACE_GRAPH_RET,
1276     .funcs      = &graph_functions
1277 };
1278 
1279 static struct tracer graph_trace __tracer_data = {
1280     .name       = "function_graph",
1281     .update_thresh  = graph_trace_update_thresh,
1282     .open       = graph_trace_open,
1283     .pipe_open  = graph_trace_open,
1284     .close      = graph_trace_close,
1285     .pipe_close = graph_trace_close,
1286     .init       = graph_trace_init,
1287     .reset      = graph_trace_reset,
1288     .print_line = print_graph_function,
1289     .print_header   = print_graph_headers,
1290     .flags      = &tracer_flags,
1291     .set_flag   = func_graph_set_flag,
1292 #ifdef CONFIG_FTRACE_SELFTEST
1293     .selftest   = trace_selftest_startup_function_graph,
1294 #endif
1295 };
1296 
1297 
1298 static ssize_t
1299 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1300           loff_t *ppos)
1301 {
1302     unsigned long val;
1303     int ret;
1304 
1305     ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1306     if (ret)
1307         return ret;
1308 
1309     fgraph_max_depth = val;
1310 
1311     *ppos += cnt;
1312 
1313     return cnt;
1314 }
1315 
1316 static ssize_t
1317 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1318          loff_t *ppos)
1319 {
1320     char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1321     int n;
1322 
1323     n = sprintf(buf, "%d\n", fgraph_max_depth);
1324 
1325     return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1326 }
1327 
1328 static const struct file_operations graph_depth_fops = {
1329     .open       = tracing_open_generic,
1330     .write      = graph_depth_write,
1331     .read       = graph_depth_read,
1332     .llseek     = generic_file_llseek,
1333 };
1334 
1335 static __init int init_graph_tracefs(void)
1336 {
1337     int ret;
1338 
1339     ret = tracing_init_dentry();
1340     if (ret)
1341         return 0;
1342 
1343     trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
1344               NULL, &graph_depth_fops);
1345 
1346     return 0;
1347 }
1348 fs_initcall(init_graph_tracefs);
1349 
1350 static __init int init_graph_trace(void)
1351 {
1352     max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1353 
1354     if (!register_trace_event(&graph_trace_entry_event)) {
1355         pr_warn("Warning: could not register graph trace events\n");
1356         return 1;
1357     }
1358 
1359     if (!register_trace_event(&graph_trace_ret_event)) {
1360         pr_warn("Warning: could not register graph trace events\n");
1361         return 1;
1362     }
1363 
1364     return register_tracer(&graph_trace);
1365 }
1366 
1367 core_initcall(init_graph_trace);