0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/ring_buffer.h>
0014 #include <linux/debugfs.h>
0015 #include <linux/uaccess.h>
0016 #include <linux/ftrace.h>
0017 #include <linux/slab.h>
0018 #include <linux/fs.h>
0019
0020 #include "trace.h"
0021
0022 static void tracing_start_function_trace(struct trace_array *tr);
0023 static void tracing_stop_function_trace(struct trace_array *tr);
0024 static void
0025 function_trace_call(unsigned long ip, unsigned long parent_ip,
0026 struct ftrace_ops *op, struct ftrace_regs *fregs);
0027 static void
0028 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
0029 struct ftrace_ops *op, struct ftrace_regs *fregs);
0030 static void
0031 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
0032 struct ftrace_ops *op, struct ftrace_regs *fregs);
0033 static void
0034 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
0035 struct ftrace_ops *op,
0036 struct ftrace_regs *fregs);
0037 static struct tracer_flags func_flags;
0038
0039
0040 enum {
0041
0042 TRACE_FUNC_NO_OPTS = 0x0,
0043 TRACE_FUNC_OPT_STACK = 0x1,
0044 TRACE_FUNC_OPT_NO_REPEATS = 0x2,
0045
0046
0047 TRACE_FUNC_OPT_HIGHEST_BIT = 0x4
0048 };
0049
0050 #define TRACE_FUNC_OPT_MASK (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
0051
0052 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
0053 {
0054 struct ftrace_ops *ops;
0055
0056
0057 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
0058 return 0;
0059
0060 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
0061 if (!ops)
0062 return -ENOMEM;
0063
0064
0065 ops->func = function_trace_call;
0066 ops->flags = FTRACE_OPS_FL_PID;
0067
0068 tr->ops = ops;
0069 ops->private = tr;
0070
0071 return 0;
0072 }
0073
0074 void ftrace_free_ftrace_ops(struct trace_array *tr)
0075 {
0076 kfree(tr->ops);
0077 tr->ops = NULL;
0078 }
0079
0080 int ftrace_create_function_files(struct trace_array *tr,
0081 struct dentry *parent)
0082 {
0083
0084
0085
0086
0087 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
0088 return 0;
0089
0090 if (!tr->ops)
0091 return -EINVAL;
0092
0093 ftrace_create_filter_files(tr->ops, parent);
0094
0095 return 0;
0096 }
0097
0098 void ftrace_destroy_function_files(struct trace_array *tr)
0099 {
0100 ftrace_destroy_filter_files(tr->ops);
0101 ftrace_free_ftrace_ops(tr);
0102 }
0103
0104 static ftrace_func_t select_trace_function(u32 flags_val)
0105 {
0106 switch (flags_val & TRACE_FUNC_OPT_MASK) {
0107 case TRACE_FUNC_NO_OPTS:
0108 return function_trace_call;
0109 case TRACE_FUNC_OPT_STACK:
0110 return function_stack_trace_call;
0111 case TRACE_FUNC_OPT_NO_REPEATS:
0112 return function_no_repeats_trace_call;
0113 case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
0114 return function_stack_no_repeats_trace_call;
0115 default:
0116 return NULL;
0117 }
0118 }
0119
0120 static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
0121 {
0122 if (!tr->last_func_repeats &&
0123 (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
0124 tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
0125 if (!tr->last_func_repeats)
0126 return false;
0127 }
0128
0129 return true;
0130 }
0131
0132 static int function_trace_init(struct trace_array *tr)
0133 {
0134 ftrace_func_t func;
0135
0136
0137
0138
0139
0140 if (!tr->ops)
0141 return -ENOMEM;
0142
0143 func = select_trace_function(func_flags.val);
0144 if (!func)
0145 return -EINVAL;
0146
0147 if (!handle_func_repeats(tr, func_flags.val))
0148 return -ENOMEM;
0149
0150 ftrace_init_array_ops(tr, func);
0151
0152 tr->array_buffer.cpu = raw_smp_processor_id();
0153
0154 tracing_start_cmdline_record();
0155 tracing_start_function_trace(tr);
0156 return 0;
0157 }
0158
0159 static void function_trace_reset(struct trace_array *tr)
0160 {
0161 tracing_stop_function_trace(tr);
0162 tracing_stop_cmdline_record();
0163 ftrace_reset_array_ops(tr);
0164 }
0165
0166 static void function_trace_start(struct trace_array *tr)
0167 {
0168 tracing_reset_online_cpus(&tr->array_buffer);
0169 }
0170
0171 static void
0172 function_trace_call(unsigned long ip, unsigned long parent_ip,
0173 struct ftrace_ops *op, struct ftrace_regs *fregs)
0174 {
0175 struct trace_array *tr = op->private;
0176 struct trace_array_cpu *data;
0177 unsigned int trace_ctx;
0178 int bit;
0179 int cpu;
0180
0181 if (unlikely(!tr->function_enabled))
0182 return;
0183
0184 bit = ftrace_test_recursion_trylock(ip, parent_ip);
0185 if (bit < 0)
0186 return;
0187
0188 trace_ctx = tracing_gen_ctx();
0189
0190 cpu = smp_processor_id();
0191 data = per_cpu_ptr(tr->array_buffer.data, cpu);
0192 if (!atomic_read(&data->disabled))
0193 trace_function(tr, ip, parent_ip, trace_ctx);
0194
0195 ftrace_test_recursion_unlock(bit);
0196 }
0197
0198 #ifdef CONFIG_UNWINDER_ORC
0199
0200
0201
0202
0203
0204
0205 #define STACK_SKIP 2
0206 #else
0207
0208
0209
0210
0211
0212
0213 #define STACK_SKIP 3
0214 #endif
0215
0216 static void
0217 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
0218 struct ftrace_ops *op, struct ftrace_regs *fregs)
0219 {
0220 struct trace_array *tr = op->private;
0221 struct trace_array_cpu *data;
0222 unsigned long flags;
0223 long disabled;
0224 int cpu;
0225 unsigned int trace_ctx;
0226
0227 if (unlikely(!tr->function_enabled))
0228 return;
0229
0230
0231
0232
0233
0234 local_irq_save(flags);
0235 cpu = raw_smp_processor_id();
0236 data = per_cpu_ptr(tr->array_buffer.data, cpu);
0237 disabled = atomic_inc_return(&data->disabled);
0238
0239 if (likely(disabled == 1)) {
0240 trace_ctx = tracing_gen_ctx_flags(flags);
0241 trace_function(tr, ip, parent_ip, trace_ctx);
0242 __trace_stack(tr, trace_ctx, STACK_SKIP);
0243 }
0244
0245 atomic_dec(&data->disabled);
0246 local_irq_restore(flags);
0247 }
0248
0249 static inline bool is_repeat_check(struct trace_array *tr,
0250 struct trace_func_repeats *last_info,
0251 unsigned long ip, unsigned long parent_ip)
0252 {
0253 if (last_info->ip == ip &&
0254 last_info->parent_ip == parent_ip &&
0255 last_info->count < U16_MAX) {
0256 last_info->ts_last_call =
0257 ring_buffer_time_stamp(tr->array_buffer.buffer);
0258 last_info->count++;
0259 return true;
0260 }
0261
0262 return false;
0263 }
0264
0265 static inline void process_repeats(struct trace_array *tr,
0266 unsigned long ip, unsigned long parent_ip,
0267 struct trace_func_repeats *last_info,
0268 unsigned int trace_ctx)
0269 {
0270 if (last_info->count) {
0271 trace_last_func_repeats(tr, last_info, trace_ctx);
0272 last_info->count = 0;
0273 }
0274
0275 last_info->ip = ip;
0276 last_info->parent_ip = parent_ip;
0277 }
0278
0279 static void
0280 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
0281 struct ftrace_ops *op,
0282 struct ftrace_regs *fregs)
0283 {
0284 struct trace_func_repeats *last_info;
0285 struct trace_array *tr = op->private;
0286 struct trace_array_cpu *data;
0287 unsigned int trace_ctx;
0288 unsigned long flags;
0289 int bit;
0290 int cpu;
0291
0292 if (unlikely(!tr->function_enabled))
0293 return;
0294
0295 bit = ftrace_test_recursion_trylock(ip, parent_ip);
0296 if (bit < 0)
0297 return;
0298
0299 cpu = smp_processor_id();
0300 data = per_cpu_ptr(tr->array_buffer.data, cpu);
0301 if (atomic_read(&data->disabled))
0302 goto out;
0303
0304
0305
0306
0307
0308
0309
0310
0311 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
0312 if (is_repeat_check(tr, last_info, ip, parent_ip))
0313 goto out;
0314
0315 local_save_flags(flags);
0316 trace_ctx = tracing_gen_ctx_flags(flags);
0317 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
0318
0319 trace_function(tr, ip, parent_ip, trace_ctx);
0320
0321 out:
0322 ftrace_test_recursion_unlock(bit);
0323 }
0324
0325 static void
0326 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
0327 struct ftrace_ops *op,
0328 struct ftrace_regs *fregs)
0329 {
0330 struct trace_func_repeats *last_info;
0331 struct trace_array *tr = op->private;
0332 struct trace_array_cpu *data;
0333 unsigned long flags;
0334 long disabled;
0335 int cpu;
0336 unsigned int trace_ctx;
0337
0338 if (unlikely(!tr->function_enabled))
0339 return;
0340
0341
0342
0343
0344
0345 local_irq_save(flags);
0346 cpu = raw_smp_processor_id();
0347 data = per_cpu_ptr(tr->array_buffer.data, cpu);
0348 disabled = atomic_inc_return(&data->disabled);
0349
0350 if (likely(disabled == 1)) {
0351 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
0352 if (is_repeat_check(tr, last_info, ip, parent_ip))
0353 goto out;
0354
0355 trace_ctx = tracing_gen_ctx_flags(flags);
0356 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
0357
0358 trace_function(tr, ip, parent_ip, trace_ctx);
0359 __trace_stack(tr, trace_ctx, STACK_SKIP);
0360 }
0361
0362 out:
0363 atomic_dec(&data->disabled);
0364 local_irq_restore(flags);
0365 }
0366
0367 static struct tracer_opt func_opts[] = {
0368 #ifdef CONFIG_STACKTRACE
0369 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
0370 #endif
0371 { TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
0372 { }
0373 };
0374
0375 static struct tracer_flags func_flags = {
0376 .val = TRACE_FUNC_NO_OPTS,
0377 .opts = func_opts
0378 };
0379
0380 static void tracing_start_function_trace(struct trace_array *tr)
0381 {
0382 tr->function_enabled = 0;
0383 register_ftrace_function(tr->ops);
0384 tr->function_enabled = 1;
0385 }
0386
0387 static void tracing_stop_function_trace(struct trace_array *tr)
0388 {
0389 tr->function_enabled = 0;
0390 unregister_ftrace_function(tr->ops);
0391 }
0392
0393 static struct tracer function_trace;
0394
0395 static int
0396 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
0397 {
0398 ftrace_func_t func;
0399 u32 new_flags;
0400
0401
0402 if (!!set == !!(func_flags.val & bit))
0403 return 0;
0404
0405
0406 if (tr->current_trace != &function_trace)
0407 return 0;
0408
0409 new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
0410 func = select_trace_function(new_flags);
0411 if (!func)
0412 return -EINVAL;
0413
0414
0415 if (tr->ops->func == func)
0416 return 0;
0417
0418 if (!handle_func_repeats(tr, new_flags))
0419 return -ENOMEM;
0420
0421 unregister_ftrace_function(tr->ops);
0422 tr->ops->func = func;
0423 register_ftrace_function(tr->ops);
0424
0425 return 0;
0426 }
0427
0428 static struct tracer function_trace __tracer_data =
0429 {
0430 .name = "function",
0431 .init = function_trace_init,
0432 .reset = function_trace_reset,
0433 .start = function_trace_start,
0434 .flags = &func_flags,
0435 .set_flag = func_set_flag,
0436 .allow_instances = true,
0437 #ifdef CONFIG_FTRACE_SELFTEST
0438 .selftest = trace_selftest_startup_function,
0439 #endif
0440 };
0441
0442 #ifdef CONFIG_DYNAMIC_FTRACE
0443 static void update_traceon_count(struct ftrace_probe_ops *ops,
0444 unsigned long ip,
0445 struct trace_array *tr, bool on,
0446 void *data)
0447 {
0448 struct ftrace_func_mapper *mapper = data;
0449 long *count;
0450 long old_count;
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
0482 old_count = *count;
0483
0484 if (old_count <= 0)
0485 return;
0486
0487
0488 smp_rmb();
0489
0490 if (on == !!tracer_tracing_is_on(tr))
0491 return;
0492
0493 if (on)
0494 tracer_tracing_on(tr);
0495 else
0496 tracer_tracing_off(tr);
0497
0498
0499 smp_wmb();
0500
0501 *count = old_count - 1;
0502 }
0503
0504 static void
0505 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
0506 struct trace_array *tr, struct ftrace_probe_ops *ops,
0507 void *data)
0508 {
0509 update_traceon_count(ops, ip, tr, 1, data);
0510 }
0511
0512 static void
0513 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
0514 struct trace_array *tr, struct ftrace_probe_ops *ops,
0515 void *data)
0516 {
0517 update_traceon_count(ops, ip, tr, 0, data);
0518 }
0519
0520 static void
0521 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
0522 struct trace_array *tr, struct ftrace_probe_ops *ops,
0523 void *data)
0524 {
0525 if (tracer_tracing_is_on(tr))
0526 return;
0527
0528 tracer_tracing_on(tr);
0529 }
0530
0531 static void
0532 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
0533 struct trace_array *tr, struct ftrace_probe_ops *ops,
0534 void *data)
0535 {
0536 if (!tracer_tracing_is_on(tr))
0537 return;
0538
0539 tracer_tracing_off(tr);
0540 }
0541
0542 #ifdef CONFIG_UNWINDER_ORC
0543
0544
0545
0546
0547
0548
0549
0550 #define FTRACE_STACK_SKIP 3
0551 #else
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561 #define FTRACE_STACK_SKIP 5
0562 #endif
0563
0564 static __always_inline void trace_stack(struct trace_array *tr)
0565 {
0566 unsigned int trace_ctx;
0567
0568 trace_ctx = tracing_gen_ctx();
0569
0570 __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
0571 }
0572
0573 static void
0574 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
0575 struct trace_array *tr, struct ftrace_probe_ops *ops,
0576 void *data)
0577 {
0578 trace_stack(tr);
0579 }
0580
0581 static void
0582 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
0583 struct trace_array *tr, struct ftrace_probe_ops *ops,
0584 void *data)
0585 {
0586 struct ftrace_func_mapper *mapper = data;
0587 long *count;
0588 long old_count;
0589 long new_count;
0590
0591 if (!tracing_is_on())
0592 return;
0593
0594
0595 if (!mapper) {
0596 trace_stack(tr);
0597 return;
0598 }
0599
0600 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
0601
0602
0603
0604
0605
0606 do {
0607 old_count = *count;
0608
0609 if (!old_count)
0610 return;
0611
0612 new_count = old_count - 1;
0613 new_count = cmpxchg(count, old_count, new_count);
0614 if (new_count == old_count)
0615 trace_stack(tr);
0616
0617 if (!tracing_is_on())
0618 return;
0619
0620 } while (new_count != old_count);
0621 }
0622
0623 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
0624 void *data)
0625 {
0626 struct ftrace_func_mapper *mapper = data;
0627 long *count = NULL;
0628
0629 if (mapper)
0630 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
0631
0632 if (count) {
0633 if (*count <= 0)
0634 return 0;
0635 (*count)--;
0636 }
0637
0638 return 1;
0639 }
0640
0641 static void
0642 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
0643 struct trace_array *tr, struct ftrace_probe_ops *ops,
0644 void *data)
0645 {
0646 if (update_count(ops, ip, data))
0647 ftrace_dump(DUMP_ALL);
0648 }
0649
0650
0651 static void
0652 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
0653 struct trace_array *tr, struct ftrace_probe_ops *ops,
0654 void *data)
0655 {
0656 if (update_count(ops, ip, data))
0657 ftrace_dump(DUMP_ORIG);
0658 }
0659
0660 static int
0661 ftrace_probe_print(const char *name, struct seq_file *m,
0662 unsigned long ip, struct ftrace_probe_ops *ops,
0663 void *data)
0664 {
0665 struct ftrace_func_mapper *mapper = data;
0666 long *count = NULL;
0667
0668 seq_printf(m, "%ps:%s", (void *)ip, name);
0669
0670 if (mapper)
0671 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
0672
0673 if (count)
0674 seq_printf(m, ":count=%ld\n", *count);
0675 else
0676 seq_puts(m, ":unlimited\n");
0677
0678 return 0;
0679 }
0680
0681 static int
0682 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
0683 struct ftrace_probe_ops *ops,
0684 void *data)
0685 {
0686 return ftrace_probe_print("traceon", m, ip, ops, data);
0687 }
0688
0689 static int
0690 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
0691 struct ftrace_probe_ops *ops, void *data)
0692 {
0693 return ftrace_probe_print("traceoff", m, ip, ops, data);
0694 }
0695
0696 static int
0697 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
0698 struct ftrace_probe_ops *ops, void *data)
0699 {
0700 return ftrace_probe_print("stacktrace", m, ip, ops, data);
0701 }
0702
0703 static int
0704 ftrace_dump_print(struct seq_file *m, unsigned long ip,
0705 struct ftrace_probe_ops *ops, void *data)
0706 {
0707 return ftrace_probe_print("dump", m, ip, ops, data);
0708 }
0709
0710 static int
0711 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
0712 struct ftrace_probe_ops *ops, void *data)
0713 {
0714 return ftrace_probe_print("cpudump", m, ip, ops, data);
0715 }
0716
0717
0718 static int
0719 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
0720 unsigned long ip, void *init_data, void **data)
0721 {
0722 struct ftrace_func_mapper *mapper = *data;
0723
0724 if (!mapper) {
0725 mapper = allocate_ftrace_func_mapper();
0726 if (!mapper)
0727 return -ENOMEM;
0728 *data = mapper;
0729 }
0730
0731 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
0732 }
0733
0734 static void
0735 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
0736 unsigned long ip, void *data)
0737 {
0738 struct ftrace_func_mapper *mapper = data;
0739
0740 if (!ip) {
0741 free_ftrace_func_mapper(mapper, NULL);
0742 return;
0743 }
0744
0745 ftrace_func_mapper_remove_ip(mapper, ip);
0746 }
0747
0748 static struct ftrace_probe_ops traceon_count_probe_ops = {
0749 .func = ftrace_traceon_count,
0750 .print = ftrace_traceon_print,
0751 .init = ftrace_count_init,
0752 .free = ftrace_count_free,
0753 };
0754
0755 static struct ftrace_probe_ops traceoff_count_probe_ops = {
0756 .func = ftrace_traceoff_count,
0757 .print = ftrace_traceoff_print,
0758 .init = ftrace_count_init,
0759 .free = ftrace_count_free,
0760 };
0761
0762 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
0763 .func = ftrace_stacktrace_count,
0764 .print = ftrace_stacktrace_print,
0765 .init = ftrace_count_init,
0766 .free = ftrace_count_free,
0767 };
0768
0769 static struct ftrace_probe_ops dump_probe_ops = {
0770 .func = ftrace_dump_probe,
0771 .print = ftrace_dump_print,
0772 .init = ftrace_count_init,
0773 .free = ftrace_count_free,
0774 };
0775
0776 static struct ftrace_probe_ops cpudump_probe_ops = {
0777 .func = ftrace_cpudump_probe,
0778 .print = ftrace_cpudump_print,
0779 };
0780
0781 static struct ftrace_probe_ops traceon_probe_ops = {
0782 .func = ftrace_traceon,
0783 .print = ftrace_traceon_print,
0784 };
0785
0786 static struct ftrace_probe_ops traceoff_probe_ops = {
0787 .func = ftrace_traceoff,
0788 .print = ftrace_traceoff_print,
0789 };
0790
0791 static struct ftrace_probe_ops stacktrace_probe_ops = {
0792 .func = ftrace_stacktrace,
0793 .print = ftrace_stacktrace_print,
0794 };
0795
0796 static int
0797 ftrace_trace_probe_callback(struct trace_array *tr,
0798 struct ftrace_probe_ops *ops,
0799 struct ftrace_hash *hash, char *glob,
0800 char *cmd, char *param, int enable)
0801 {
0802 void *count = (void *)-1;
0803 char *number;
0804 int ret;
0805
0806
0807 if (!enable)
0808 return -EINVAL;
0809
0810 if (glob[0] == '!')
0811 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
0812
0813 if (!param)
0814 goto out_reg;
0815
0816 number = strsep(¶m, ":");
0817
0818 if (!strlen(number))
0819 goto out_reg;
0820
0821
0822
0823
0824
0825 ret = kstrtoul(number, 0, (unsigned long *)&count);
0826 if (ret)
0827 return ret;
0828
0829 out_reg:
0830 ret = register_ftrace_function_probe(glob, tr, ops, count);
0831
0832 return ret < 0 ? ret : 0;
0833 }
0834
0835 static int
0836 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
0837 char *glob, char *cmd, char *param, int enable)
0838 {
0839 struct ftrace_probe_ops *ops;
0840
0841 if (!tr)
0842 return -ENODEV;
0843
0844
0845 if (strcmp(cmd, "traceon") == 0)
0846 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
0847 else
0848 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
0849
0850 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
0851 param, enable);
0852 }
0853
0854 static int
0855 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
0856 char *glob, char *cmd, char *param, int enable)
0857 {
0858 struct ftrace_probe_ops *ops;
0859
0860 if (!tr)
0861 return -ENODEV;
0862
0863 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
0864
0865 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
0866 param, enable);
0867 }
0868
0869 static int
0870 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
0871 char *glob, char *cmd, char *param, int enable)
0872 {
0873 struct ftrace_probe_ops *ops;
0874
0875 if (!tr)
0876 return -ENODEV;
0877
0878 ops = &dump_probe_ops;
0879
0880
0881 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
0882 "1", enable);
0883 }
0884
0885 static int
0886 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
0887 char *glob, char *cmd, char *param, int enable)
0888 {
0889 struct ftrace_probe_ops *ops;
0890
0891 if (!tr)
0892 return -ENODEV;
0893
0894 ops = &cpudump_probe_ops;
0895
0896
0897 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
0898 "1", enable);
0899 }
0900
0901 static struct ftrace_func_command ftrace_traceon_cmd = {
0902 .name = "traceon",
0903 .func = ftrace_trace_onoff_callback,
0904 };
0905
0906 static struct ftrace_func_command ftrace_traceoff_cmd = {
0907 .name = "traceoff",
0908 .func = ftrace_trace_onoff_callback,
0909 };
0910
0911 static struct ftrace_func_command ftrace_stacktrace_cmd = {
0912 .name = "stacktrace",
0913 .func = ftrace_stacktrace_callback,
0914 };
0915
0916 static struct ftrace_func_command ftrace_dump_cmd = {
0917 .name = "dump",
0918 .func = ftrace_dump_callback,
0919 };
0920
0921 static struct ftrace_func_command ftrace_cpudump_cmd = {
0922 .name = "cpudump",
0923 .func = ftrace_cpudump_callback,
0924 };
0925
0926 static int __init init_func_cmd_traceon(void)
0927 {
0928 int ret;
0929
0930 ret = register_ftrace_command(&ftrace_traceoff_cmd);
0931 if (ret)
0932 return ret;
0933
0934 ret = register_ftrace_command(&ftrace_traceon_cmd);
0935 if (ret)
0936 goto out_free_traceoff;
0937
0938 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
0939 if (ret)
0940 goto out_free_traceon;
0941
0942 ret = register_ftrace_command(&ftrace_dump_cmd);
0943 if (ret)
0944 goto out_free_stacktrace;
0945
0946 ret = register_ftrace_command(&ftrace_cpudump_cmd);
0947 if (ret)
0948 goto out_free_dump;
0949
0950 return 0;
0951
0952 out_free_dump:
0953 unregister_ftrace_command(&ftrace_dump_cmd);
0954 out_free_stacktrace:
0955 unregister_ftrace_command(&ftrace_stacktrace_cmd);
0956 out_free_traceon:
0957 unregister_ftrace_command(&ftrace_traceon_cmd);
0958 out_free_traceoff:
0959 unregister_ftrace_command(&ftrace_traceoff_cmd);
0960
0961 return ret;
0962 }
0963 #else
0964 static inline int init_func_cmd_traceon(void)
0965 {
0966 return 0;
0967 }
0968 #endif
0969
0970 __init int init_function_trace(void)
0971 {
0972 init_func_cmd_traceon();
0973 return register_tracer(&function_trace);
0974 }