Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * ring buffer based function tracer
0004  *
0005  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
0006  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
0007  *
0008  * Based on code from the latency_tracer, that is:
0009  *
0010  *  Copyright (C) 2004-2006 Ingo Molnar
0011  *  Copyright (C) 2004 Nadia Yvette Chambers
0012  */
0013 #include <linux/ring_buffer.h>
0014 #include <linux/debugfs.h>
0015 #include <linux/uaccess.h>
0016 #include <linux/ftrace.h>
0017 #include <linux/slab.h>
0018 #include <linux/fs.h>
0019 
0020 #include "trace.h"
0021 
0022 static void tracing_start_function_trace(struct trace_array *tr);
0023 static void tracing_stop_function_trace(struct trace_array *tr);
0024 static void
0025 function_trace_call(unsigned long ip, unsigned long parent_ip,
0026             struct ftrace_ops *op, struct ftrace_regs *fregs);
0027 static void
0028 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
0029               struct ftrace_ops *op, struct ftrace_regs *fregs);
0030 static void
0031 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
0032                    struct ftrace_ops *op, struct ftrace_regs *fregs);
0033 static void
0034 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
0035                      struct ftrace_ops *op,
0036                      struct ftrace_regs *fregs);
0037 static struct tracer_flags func_flags;
0038 
0039 /* Our option */
0040 enum {
0041 
0042     TRACE_FUNC_NO_OPTS      = 0x0, /* No flags set. */
0043     TRACE_FUNC_OPT_STACK        = 0x1,
0044     TRACE_FUNC_OPT_NO_REPEATS   = 0x2,
0045 
0046     /* Update this to next highest bit. */
0047     TRACE_FUNC_OPT_HIGHEST_BIT  = 0x4
0048 };
0049 
0050 #define TRACE_FUNC_OPT_MASK (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
0051 
0052 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
0053 {
0054     struct ftrace_ops *ops;
0055 
0056     /* The top level array uses the "global_ops" */
0057     if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
0058         return 0;
0059 
0060     ops = kzalloc(sizeof(*ops), GFP_KERNEL);
0061     if (!ops)
0062         return -ENOMEM;
0063 
0064     /* Currently only the non stack version is supported */
0065     ops->func = function_trace_call;
0066     ops->flags = FTRACE_OPS_FL_PID;
0067 
0068     tr->ops = ops;
0069     ops->private = tr;
0070 
0071     return 0;
0072 }
0073 
0074 void ftrace_free_ftrace_ops(struct trace_array *tr)
0075 {
0076     kfree(tr->ops);
0077     tr->ops = NULL;
0078 }
0079 
0080 int ftrace_create_function_files(struct trace_array *tr,
0081                  struct dentry *parent)
0082 {
0083     /*
0084      * The top level array uses the "global_ops", and the files are
0085      * created on boot up.
0086      */
0087     if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
0088         return 0;
0089 
0090     if (!tr->ops)
0091         return -EINVAL;
0092 
0093     ftrace_create_filter_files(tr->ops, parent);
0094 
0095     return 0;
0096 }
0097 
0098 void ftrace_destroy_function_files(struct trace_array *tr)
0099 {
0100     ftrace_destroy_filter_files(tr->ops);
0101     ftrace_free_ftrace_ops(tr);
0102 }
0103 
0104 static ftrace_func_t select_trace_function(u32 flags_val)
0105 {
0106     switch (flags_val & TRACE_FUNC_OPT_MASK) {
0107     case TRACE_FUNC_NO_OPTS:
0108         return function_trace_call;
0109     case TRACE_FUNC_OPT_STACK:
0110         return function_stack_trace_call;
0111     case TRACE_FUNC_OPT_NO_REPEATS:
0112         return function_no_repeats_trace_call;
0113     case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
0114         return function_stack_no_repeats_trace_call;
0115     default:
0116         return NULL;
0117     }
0118 }
0119 
0120 static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
0121 {
0122     if (!tr->last_func_repeats &&
0123         (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
0124         tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
0125         if (!tr->last_func_repeats)
0126             return false;
0127     }
0128 
0129     return true;
0130 }
0131 
0132 static int function_trace_init(struct trace_array *tr)
0133 {
0134     ftrace_func_t func;
0135     /*
0136      * Instance trace_arrays get their ops allocated
0137      * at instance creation. Unless it failed
0138      * the allocation.
0139      */
0140     if (!tr->ops)
0141         return -ENOMEM;
0142 
0143     func = select_trace_function(func_flags.val);
0144     if (!func)
0145         return -EINVAL;
0146 
0147     if (!handle_func_repeats(tr, func_flags.val))
0148         return -ENOMEM;
0149 
0150     ftrace_init_array_ops(tr, func);
0151 
0152     tr->array_buffer.cpu = raw_smp_processor_id();
0153 
0154     tracing_start_cmdline_record();
0155     tracing_start_function_trace(tr);
0156     return 0;
0157 }
0158 
0159 static void function_trace_reset(struct trace_array *tr)
0160 {
0161     tracing_stop_function_trace(tr);
0162     tracing_stop_cmdline_record();
0163     ftrace_reset_array_ops(tr);
0164 }
0165 
0166 static void function_trace_start(struct trace_array *tr)
0167 {
0168     tracing_reset_online_cpus(&tr->array_buffer);
0169 }
0170 
0171 static void
0172 function_trace_call(unsigned long ip, unsigned long parent_ip,
0173             struct ftrace_ops *op, struct ftrace_regs *fregs)
0174 {
0175     struct trace_array *tr = op->private;
0176     struct trace_array_cpu *data;
0177     unsigned int trace_ctx;
0178     int bit;
0179     int cpu;
0180 
0181     if (unlikely(!tr->function_enabled))
0182         return;
0183 
0184     bit = ftrace_test_recursion_trylock(ip, parent_ip);
0185     if (bit < 0)
0186         return;
0187 
0188     trace_ctx = tracing_gen_ctx();
0189 
0190     cpu = smp_processor_id();
0191     data = per_cpu_ptr(tr->array_buffer.data, cpu);
0192     if (!atomic_read(&data->disabled))
0193         trace_function(tr, ip, parent_ip, trace_ctx);
0194 
0195     ftrace_test_recursion_unlock(bit);
0196 }
0197 
0198 #ifdef CONFIG_UNWINDER_ORC
0199 /*
0200  * Skip 2:
0201  *
0202  *   function_stack_trace_call()
0203  *   ftrace_call()
0204  */
0205 #define STACK_SKIP 2
0206 #else
0207 /*
0208  * Skip 3:
0209  *   __trace_stack()
0210  *   function_stack_trace_call()
0211  *   ftrace_call()
0212  */
0213 #define STACK_SKIP 3
0214 #endif
0215 
0216 static void
0217 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
0218               struct ftrace_ops *op, struct ftrace_regs *fregs)
0219 {
0220     struct trace_array *tr = op->private;
0221     struct trace_array_cpu *data;
0222     unsigned long flags;
0223     long disabled;
0224     int cpu;
0225     unsigned int trace_ctx;
0226 
0227     if (unlikely(!tr->function_enabled))
0228         return;
0229 
0230     /*
0231      * Need to use raw, since this must be called before the
0232      * recursive protection is performed.
0233      */
0234     local_irq_save(flags);
0235     cpu = raw_smp_processor_id();
0236     data = per_cpu_ptr(tr->array_buffer.data, cpu);
0237     disabled = atomic_inc_return(&data->disabled);
0238 
0239     if (likely(disabled == 1)) {
0240         trace_ctx = tracing_gen_ctx_flags(flags);
0241         trace_function(tr, ip, parent_ip, trace_ctx);
0242         __trace_stack(tr, trace_ctx, STACK_SKIP);
0243     }
0244 
0245     atomic_dec(&data->disabled);
0246     local_irq_restore(flags);
0247 }
0248 
0249 static inline bool is_repeat_check(struct trace_array *tr,
0250                    struct trace_func_repeats *last_info,
0251                    unsigned long ip, unsigned long parent_ip)
0252 {
0253     if (last_info->ip == ip &&
0254         last_info->parent_ip == parent_ip &&
0255         last_info->count < U16_MAX) {
0256         last_info->ts_last_call =
0257             ring_buffer_time_stamp(tr->array_buffer.buffer);
0258         last_info->count++;
0259         return true;
0260     }
0261 
0262     return false;
0263 }
0264 
0265 static inline void process_repeats(struct trace_array *tr,
0266                    unsigned long ip, unsigned long parent_ip,
0267                    struct trace_func_repeats *last_info,
0268                    unsigned int trace_ctx)
0269 {
0270     if (last_info->count) {
0271         trace_last_func_repeats(tr, last_info, trace_ctx);
0272         last_info->count = 0;
0273     }
0274 
0275     last_info->ip = ip;
0276     last_info->parent_ip = parent_ip;
0277 }
0278 
0279 static void
0280 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
0281                    struct ftrace_ops *op,
0282                    struct ftrace_regs *fregs)
0283 {
0284     struct trace_func_repeats *last_info;
0285     struct trace_array *tr = op->private;
0286     struct trace_array_cpu *data;
0287     unsigned int trace_ctx;
0288     unsigned long flags;
0289     int bit;
0290     int cpu;
0291 
0292     if (unlikely(!tr->function_enabled))
0293         return;
0294 
0295     bit = ftrace_test_recursion_trylock(ip, parent_ip);
0296     if (bit < 0)
0297         return;
0298 
0299     cpu = smp_processor_id();
0300     data = per_cpu_ptr(tr->array_buffer.data, cpu);
0301     if (atomic_read(&data->disabled))
0302         goto out;
0303 
0304     /*
0305      * An interrupt may happen at any place here. But as far as I can see,
0306      * the only damage that this can cause is to mess up the repetition
0307      * counter without valuable data being lost.
0308      * TODO: think about a solution that is better than just hoping to be
0309      * lucky.
0310      */
0311     last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
0312     if (is_repeat_check(tr, last_info, ip, parent_ip))
0313         goto out;
0314 
0315     local_save_flags(flags);
0316     trace_ctx = tracing_gen_ctx_flags(flags);
0317     process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
0318 
0319     trace_function(tr, ip, parent_ip, trace_ctx);
0320 
0321 out:
0322     ftrace_test_recursion_unlock(bit);
0323 }
0324 
0325 static void
0326 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
0327                      struct ftrace_ops *op,
0328                      struct ftrace_regs *fregs)
0329 {
0330     struct trace_func_repeats *last_info;
0331     struct trace_array *tr = op->private;
0332     struct trace_array_cpu *data;
0333     unsigned long flags;
0334     long disabled;
0335     int cpu;
0336     unsigned int trace_ctx;
0337 
0338     if (unlikely(!tr->function_enabled))
0339         return;
0340 
0341     /*
0342      * Need to use raw, since this must be called before the
0343      * recursive protection is performed.
0344      */
0345     local_irq_save(flags);
0346     cpu = raw_smp_processor_id();
0347     data = per_cpu_ptr(tr->array_buffer.data, cpu);
0348     disabled = atomic_inc_return(&data->disabled);
0349 
0350     if (likely(disabled == 1)) {
0351         last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
0352         if (is_repeat_check(tr, last_info, ip, parent_ip))
0353             goto out;
0354 
0355         trace_ctx = tracing_gen_ctx_flags(flags);
0356         process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
0357 
0358         trace_function(tr, ip, parent_ip, trace_ctx);
0359         __trace_stack(tr, trace_ctx, STACK_SKIP);
0360     }
0361 
0362  out:
0363     atomic_dec(&data->disabled);
0364     local_irq_restore(flags);
0365 }
0366 
0367 static struct tracer_opt func_opts[] = {
0368 #ifdef CONFIG_STACKTRACE
0369     { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
0370 #endif
0371     { TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
0372     { } /* Always set a last empty entry */
0373 };
0374 
0375 static struct tracer_flags func_flags = {
0376     .val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
0377     .opts = func_opts
0378 };
0379 
0380 static void tracing_start_function_trace(struct trace_array *tr)
0381 {
0382     tr->function_enabled = 0;
0383     register_ftrace_function(tr->ops);
0384     tr->function_enabled = 1;
0385 }
0386 
0387 static void tracing_stop_function_trace(struct trace_array *tr)
0388 {
0389     tr->function_enabled = 0;
0390     unregister_ftrace_function(tr->ops);
0391 }
0392 
0393 static struct tracer function_trace;
0394 
0395 static int
0396 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
0397 {
0398     ftrace_func_t func;
0399     u32 new_flags;
0400 
0401     /* Do nothing if already set. */
0402     if (!!set == !!(func_flags.val & bit))
0403         return 0;
0404 
0405     /* We can change this flag only when not running. */
0406     if (tr->current_trace != &function_trace)
0407         return 0;
0408 
0409     new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
0410     func = select_trace_function(new_flags);
0411     if (!func)
0412         return -EINVAL;
0413 
0414     /* Check if there's anything to change. */
0415     if (tr->ops->func == func)
0416         return 0;
0417 
0418     if (!handle_func_repeats(tr, new_flags))
0419         return -ENOMEM;
0420 
0421     unregister_ftrace_function(tr->ops);
0422     tr->ops->func = func;
0423     register_ftrace_function(tr->ops);
0424 
0425     return 0;
0426 }
0427 
0428 static struct tracer function_trace __tracer_data =
0429 {
0430     .name       = "function",
0431     .init       = function_trace_init,
0432     .reset      = function_trace_reset,
0433     .start      = function_trace_start,
0434     .flags      = &func_flags,
0435     .set_flag   = func_set_flag,
0436     .allow_instances = true,
0437 #ifdef CONFIG_FTRACE_SELFTEST
0438     .selftest   = trace_selftest_startup_function,
0439 #endif
0440 };
0441 
0442 #ifdef CONFIG_DYNAMIC_FTRACE
0443 static void update_traceon_count(struct ftrace_probe_ops *ops,
0444                  unsigned long ip,
0445                  struct trace_array *tr, bool on,
0446                  void *data)
0447 {
0448     struct ftrace_func_mapper *mapper = data;
0449     long *count;
0450     long old_count;
0451 
0452     /*
0453      * Tracing gets disabled (or enabled) once per count.
0454      * This function can be called at the same time on multiple CPUs.
0455      * It is fine if both disable (or enable) tracing, as disabling
0456      * (or enabling) the second time doesn't do anything as the
0457      * state of the tracer is already disabled (or enabled).
0458      * What needs to be synchronized in this case is that the count
0459      * only gets decremented once, even if the tracer is disabled
0460      * (or enabled) twice, as the second one is really a nop.
0461      *
0462      * The memory barriers guarantee that we only decrement the
0463      * counter once. First the count is read to a local variable
0464      * and a read barrier is used to make sure that it is loaded
0465      * before checking if the tracer is in the state we want.
0466      * If the tracer is not in the state we want, then the count
0467      * is guaranteed to be the old count.
0468      *
0469      * Next the tracer is set to the state we want (disabled or enabled)
0470      * then a write memory barrier is used to make sure that
0471      * the new state is visible before changing the counter by
0472      * one minus the old counter. This guarantees that another CPU
0473      * executing this code will see the new state before seeing
0474      * the new counter value, and would not do anything if the new
0475      * counter is seen.
0476      *
0477      * Note, there is no synchronization between this and a user
0478      * setting the tracing_on file. But we currently don't care
0479      * about that.
0480      */
0481     count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
0482     old_count = *count;
0483 
0484     if (old_count <= 0)
0485         return;
0486 
0487     /* Make sure we see count before checking tracing state */
0488     smp_rmb();
0489 
0490     if (on == !!tracer_tracing_is_on(tr))
0491         return;
0492 
0493     if (on)
0494         tracer_tracing_on(tr);
0495     else
0496         tracer_tracing_off(tr);
0497 
0498     /* Make sure tracing state is visible before updating count */
0499     smp_wmb();
0500 
0501     *count = old_count - 1;
0502 }
0503 
0504 static void
0505 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
0506              struct trace_array *tr, struct ftrace_probe_ops *ops,
0507              void *data)
0508 {
0509     update_traceon_count(ops, ip, tr, 1, data);
0510 }
0511 
0512 static void
0513 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
0514               struct trace_array *tr, struct ftrace_probe_ops *ops,
0515               void *data)
0516 {
0517     update_traceon_count(ops, ip, tr, 0, data);
0518 }
0519 
0520 static void
0521 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
0522            struct trace_array *tr, struct ftrace_probe_ops *ops,
0523            void *data)
0524 {
0525     if (tracer_tracing_is_on(tr))
0526         return;
0527 
0528     tracer_tracing_on(tr);
0529 }
0530 
0531 static void
0532 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
0533         struct trace_array *tr, struct ftrace_probe_ops *ops,
0534         void *data)
0535 {
0536     if (!tracer_tracing_is_on(tr))
0537         return;
0538 
0539     tracer_tracing_off(tr);
0540 }
0541 
0542 #ifdef CONFIG_UNWINDER_ORC
0543 /*
0544  * Skip 3:
0545  *
0546  *   function_trace_probe_call()
0547  *   ftrace_ops_assist_func()
0548  *   ftrace_call()
0549  */
0550 #define FTRACE_STACK_SKIP 3
0551 #else
0552 /*
0553  * Skip 5:
0554  *
0555  *   __trace_stack()
0556  *   ftrace_stacktrace()
0557  *   function_trace_probe_call()
0558  *   ftrace_ops_assist_func()
0559  *   ftrace_call()
0560  */
0561 #define FTRACE_STACK_SKIP 5
0562 #endif
0563 
0564 static __always_inline void trace_stack(struct trace_array *tr)
0565 {
0566     unsigned int trace_ctx;
0567 
0568     trace_ctx = tracing_gen_ctx();
0569 
0570     __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
0571 }
0572 
0573 static void
0574 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
0575           struct trace_array *tr, struct ftrace_probe_ops *ops,
0576           void *data)
0577 {
0578     trace_stack(tr);
0579 }
0580 
0581 static void
0582 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
0583             struct trace_array *tr, struct ftrace_probe_ops *ops,
0584             void *data)
0585 {
0586     struct ftrace_func_mapper *mapper = data;
0587     long *count;
0588     long old_count;
0589     long new_count;
0590 
0591     if (!tracing_is_on())
0592         return;
0593 
0594     /* unlimited? */
0595     if (!mapper) {
0596         trace_stack(tr);
0597         return;
0598     }
0599 
0600     count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
0601 
0602     /*
0603      * Stack traces should only execute the number of times the
0604      * user specified in the counter.
0605      */
0606     do {
0607         old_count = *count;
0608 
0609         if (!old_count)
0610             return;
0611 
0612         new_count = old_count - 1;
0613         new_count = cmpxchg(count, old_count, new_count);
0614         if (new_count == old_count)
0615             trace_stack(tr);
0616 
0617         if (!tracing_is_on())
0618             return;
0619 
0620     } while (new_count != old_count);
0621 }
0622 
0623 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
0624             void *data)
0625 {
0626     struct ftrace_func_mapper *mapper = data;
0627     long *count = NULL;
0628 
0629     if (mapper)
0630         count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
0631 
0632     if (count) {
0633         if (*count <= 0)
0634             return 0;
0635         (*count)--;
0636     }
0637 
0638     return 1;
0639 }
0640 
0641 static void
0642 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
0643           struct trace_array *tr, struct ftrace_probe_ops *ops,
0644           void *data)
0645 {
0646     if (update_count(ops, ip, data))
0647         ftrace_dump(DUMP_ALL);
0648 }
0649 
0650 /* Only dump the current CPU buffer. */
0651 static void
0652 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
0653              struct trace_array *tr, struct ftrace_probe_ops *ops,
0654              void *data)
0655 {
0656     if (update_count(ops, ip, data))
0657         ftrace_dump(DUMP_ORIG);
0658 }
0659 
0660 static int
0661 ftrace_probe_print(const char *name, struct seq_file *m,
0662            unsigned long ip, struct ftrace_probe_ops *ops,
0663            void *data)
0664 {
0665     struct ftrace_func_mapper *mapper = data;
0666     long *count = NULL;
0667 
0668     seq_printf(m, "%ps:%s", (void *)ip, name);
0669 
0670     if (mapper)
0671         count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
0672 
0673     if (count)
0674         seq_printf(m, ":count=%ld\n", *count);
0675     else
0676         seq_puts(m, ":unlimited\n");
0677 
0678     return 0;
0679 }
0680 
0681 static int
0682 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
0683              struct ftrace_probe_ops *ops,
0684              void *data)
0685 {
0686     return ftrace_probe_print("traceon", m, ip, ops, data);
0687 }
0688 
0689 static int
0690 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
0691              struct ftrace_probe_ops *ops, void *data)
0692 {
0693     return ftrace_probe_print("traceoff", m, ip, ops, data);
0694 }
0695 
0696 static int
0697 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
0698             struct ftrace_probe_ops *ops, void *data)
0699 {
0700     return ftrace_probe_print("stacktrace", m, ip, ops, data);
0701 }
0702 
0703 static int
0704 ftrace_dump_print(struct seq_file *m, unsigned long ip,
0705             struct ftrace_probe_ops *ops, void *data)
0706 {
0707     return ftrace_probe_print("dump", m, ip, ops, data);
0708 }
0709 
0710 static int
0711 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
0712             struct ftrace_probe_ops *ops, void *data)
0713 {
0714     return ftrace_probe_print("cpudump", m, ip, ops, data);
0715 }
0716 
0717 
0718 static int
0719 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
0720           unsigned long ip, void *init_data, void **data)
0721 {
0722     struct ftrace_func_mapper *mapper = *data;
0723 
0724     if (!mapper) {
0725         mapper = allocate_ftrace_func_mapper();
0726         if (!mapper)
0727             return -ENOMEM;
0728         *data = mapper;
0729     }
0730 
0731     return ftrace_func_mapper_add_ip(mapper, ip, init_data);
0732 }
0733 
0734 static void
0735 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
0736           unsigned long ip, void *data)
0737 {
0738     struct ftrace_func_mapper *mapper = data;
0739 
0740     if (!ip) {
0741         free_ftrace_func_mapper(mapper, NULL);
0742         return;
0743     }
0744 
0745     ftrace_func_mapper_remove_ip(mapper, ip);
0746 }
0747 
0748 static struct ftrace_probe_ops traceon_count_probe_ops = {
0749     .func           = ftrace_traceon_count,
0750     .print          = ftrace_traceon_print,
0751     .init           = ftrace_count_init,
0752     .free           = ftrace_count_free,
0753 };
0754 
0755 static struct ftrace_probe_ops traceoff_count_probe_ops = {
0756     .func           = ftrace_traceoff_count,
0757     .print          = ftrace_traceoff_print,
0758     .init           = ftrace_count_init,
0759     .free           = ftrace_count_free,
0760 };
0761 
0762 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
0763     .func           = ftrace_stacktrace_count,
0764     .print          = ftrace_stacktrace_print,
0765     .init           = ftrace_count_init,
0766     .free           = ftrace_count_free,
0767 };
0768 
0769 static struct ftrace_probe_ops dump_probe_ops = {
0770     .func           = ftrace_dump_probe,
0771     .print          = ftrace_dump_print,
0772     .init           = ftrace_count_init,
0773     .free           = ftrace_count_free,
0774 };
0775 
0776 static struct ftrace_probe_ops cpudump_probe_ops = {
0777     .func           = ftrace_cpudump_probe,
0778     .print          = ftrace_cpudump_print,
0779 };
0780 
0781 static struct ftrace_probe_ops traceon_probe_ops = {
0782     .func           = ftrace_traceon,
0783     .print          = ftrace_traceon_print,
0784 };
0785 
0786 static struct ftrace_probe_ops traceoff_probe_ops = {
0787     .func           = ftrace_traceoff,
0788     .print          = ftrace_traceoff_print,
0789 };
0790 
0791 static struct ftrace_probe_ops stacktrace_probe_ops = {
0792     .func           = ftrace_stacktrace,
0793     .print          = ftrace_stacktrace_print,
0794 };
0795 
0796 static int
0797 ftrace_trace_probe_callback(struct trace_array *tr,
0798                 struct ftrace_probe_ops *ops,
0799                 struct ftrace_hash *hash, char *glob,
0800                 char *cmd, char *param, int enable)
0801 {
0802     void *count = (void *)-1;
0803     char *number;
0804     int ret;
0805 
0806     /* hash funcs only work with set_ftrace_filter */
0807     if (!enable)
0808         return -EINVAL;
0809 
0810     if (glob[0] == '!')
0811         return unregister_ftrace_function_probe_func(glob+1, tr, ops);
0812 
0813     if (!param)
0814         goto out_reg;
0815 
0816     number = strsep(&param, ":");
0817 
0818     if (!strlen(number))
0819         goto out_reg;
0820 
0821     /*
0822      * We use the callback data field (which is a pointer)
0823      * as our counter.
0824      */
0825     ret = kstrtoul(number, 0, (unsigned long *)&count);
0826     if (ret)
0827         return ret;
0828 
0829  out_reg:
0830     ret = register_ftrace_function_probe(glob, tr, ops, count);
0831 
0832     return ret < 0 ? ret : 0;
0833 }
0834 
0835 static int
0836 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
0837                 char *glob, char *cmd, char *param, int enable)
0838 {
0839     struct ftrace_probe_ops *ops;
0840 
0841     if (!tr)
0842         return -ENODEV;
0843 
0844     /* we register both traceon and traceoff to this callback */
0845     if (strcmp(cmd, "traceon") == 0)
0846         ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
0847     else
0848         ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
0849 
0850     return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
0851                        param, enable);
0852 }
0853 
0854 static int
0855 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
0856                char *glob, char *cmd, char *param, int enable)
0857 {
0858     struct ftrace_probe_ops *ops;
0859 
0860     if (!tr)
0861         return -ENODEV;
0862 
0863     ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
0864 
0865     return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
0866                        param, enable);
0867 }
0868 
0869 static int
0870 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
0871                char *glob, char *cmd, char *param, int enable)
0872 {
0873     struct ftrace_probe_ops *ops;
0874 
0875     if (!tr)
0876         return -ENODEV;
0877 
0878     ops = &dump_probe_ops;
0879 
0880     /* Only dump once. */
0881     return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
0882                        "1", enable);
0883 }
0884 
0885 static int
0886 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
0887                char *glob, char *cmd, char *param, int enable)
0888 {
0889     struct ftrace_probe_ops *ops;
0890 
0891     if (!tr)
0892         return -ENODEV;
0893 
0894     ops = &cpudump_probe_ops;
0895 
0896     /* Only dump once. */
0897     return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
0898                        "1", enable);
0899 }
0900 
0901 static struct ftrace_func_command ftrace_traceon_cmd = {
0902     .name           = "traceon",
0903     .func           = ftrace_trace_onoff_callback,
0904 };
0905 
0906 static struct ftrace_func_command ftrace_traceoff_cmd = {
0907     .name           = "traceoff",
0908     .func           = ftrace_trace_onoff_callback,
0909 };
0910 
0911 static struct ftrace_func_command ftrace_stacktrace_cmd = {
0912     .name           = "stacktrace",
0913     .func           = ftrace_stacktrace_callback,
0914 };
0915 
0916 static struct ftrace_func_command ftrace_dump_cmd = {
0917     .name           = "dump",
0918     .func           = ftrace_dump_callback,
0919 };
0920 
0921 static struct ftrace_func_command ftrace_cpudump_cmd = {
0922     .name           = "cpudump",
0923     .func           = ftrace_cpudump_callback,
0924 };
0925 
0926 static int __init init_func_cmd_traceon(void)
0927 {
0928     int ret;
0929 
0930     ret = register_ftrace_command(&ftrace_traceoff_cmd);
0931     if (ret)
0932         return ret;
0933 
0934     ret = register_ftrace_command(&ftrace_traceon_cmd);
0935     if (ret)
0936         goto out_free_traceoff;
0937 
0938     ret = register_ftrace_command(&ftrace_stacktrace_cmd);
0939     if (ret)
0940         goto out_free_traceon;
0941 
0942     ret = register_ftrace_command(&ftrace_dump_cmd);
0943     if (ret)
0944         goto out_free_stacktrace;
0945 
0946     ret = register_ftrace_command(&ftrace_cpudump_cmd);
0947     if (ret)
0948         goto out_free_dump;
0949 
0950     return 0;
0951 
0952  out_free_dump:
0953     unregister_ftrace_command(&ftrace_dump_cmd);
0954  out_free_stacktrace:
0955     unregister_ftrace_command(&ftrace_stacktrace_cmd);
0956  out_free_traceon:
0957     unregister_ftrace_command(&ftrace_traceon_cmd);
0958  out_free_traceoff:
0959     unregister_ftrace_command(&ftrace_traceoff_cmd);
0960 
0961     return ret;
0962 }
0963 #else
0964 static inline int init_func_cmd_traceon(void)
0965 {
0966     return 0;
0967 }
0968 #endif /* CONFIG_DYNAMIC_FTRACE */
0969 
0970 __init int init_function_trace(void)
0971 {
0972     init_func_cmd_traceon();
0973     return register_tracer(&function_trace);
0974 }