0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/stop_machine.h>
0018 #include <linux/clocksource.h>
0019 #include <linux/sched/task.h>
0020 #include <linux/kallsyms.h>
0021 #include <linux/security.h>
0022 #include <linux/seq_file.h>
0023 #include <linux/tracefs.h>
0024 #include <linux/hardirq.h>
0025 #include <linux/kthread.h>
0026 #include <linux/uaccess.h>
0027 #include <linux/bsearch.h>
0028 #include <linux/module.h>
0029 #include <linux/ftrace.h>
0030 #include <linux/sysctl.h>
0031 #include <linux/slab.h>
0032 #include <linux/ctype.h>
0033 #include <linux/sort.h>
0034 #include <linux/list.h>
0035 #include <linux/hash.h>
0036 #include <linux/rcupdate.h>
0037 #include <linux/kprobes.h>
0038
0039 #include <trace/events/sched.h>
0040
0041 #include <asm/sections.h>
0042 #include <asm/setup.h>
0043
0044 #include "ftrace_internal.h"
0045 #include "trace_output.h"
0046 #include "trace_stat.h"
0047
0048 #define FTRACE_INVALID_FUNCTION "__ftrace_invalid_address__"
0049
0050 #define FTRACE_WARN_ON(cond) \
0051 ({ \
0052 int ___r = cond; \
0053 if (WARN_ON(___r)) \
0054 ftrace_kill(); \
0055 ___r; \
0056 })
0057
0058 #define FTRACE_WARN_ON_ONCE(cond) \
0059 ({ \
0060 int ___r = cond; \
0061 if (WARN_ON_ONCE(___r)) \
0062 ftrace_kill(); \
0063 ___r; \
0064 })
0065
0066
0067 #define FTRACE_HASH_DEFAULT_BITS 10
0068 #define FTRACE_HASH_MAX_BITS 12
0069
0070 #ifdef CONFIG_DYNAMIC_FTRACE
0071 #define INIT_OPS_HASH(opsname) \
0072 .func_hash = &opsname.local_hash, \
0073 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
0074 #else
0075 #define INIT_OPS_HASH(opsname)
0076 #endif
0077
0078 enum {
0079 FTRACE_MODIFY_ENABLE_FL = (1 << 0),
0080 FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1),
0081 };
0082
0083 struct ftrace_ops ftrace_list_end __read_mostly = {
0084 .func = ftrace_stub,
0085 .flags = FTRACE_OPS_FL_STUB,
0086 INIT_OPS_HASH(ftrace_list_end)
0087 };
0088
0089
0090 int ftrace_enabled __read_mostly;
0091 static int __maybe_unused last_ftrace_enabled;
0092
0093
0094 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
0095
0096 static struct ftrace_ops *set_function_trace_op;
0097
0098 static bool ftrace_pids_enabled(struct ftrace_ops *ops)
0099 {
0100 struct trace_array *tr;
0101
0102 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
0103 return false;
0104
0105 tr = ops->private;
0106
0107 return tr->function_pids != NULL || tr->function_no_pids != NULL;
0108 }
0109
0110 static void ftrace_update_trampoline(struct ftrace_ops *ops);
0111
0112
0113
0114
0115
0116 static int ftrace_disabled __read_mostly;
0117
0118 DEFINE_MUTEX(ftrace_lock);
0119
0120 struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
0121 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
0122 struct ftrace_ops global_ops;
0123
0124
0125 void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
0126 struct ftrace_ops *op, struct ftrace_regs *fregs);
0127
0128 static inline void ftrace_ops_init(struct ftrace_ops *ops)
0129 {
0130 #ifdef CONFIG_DYNAMIC_FTRACE
0131 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
0132 mutex_init(&ops->local_hash.regex_lock);
0133 ops->func_hash = &ops->local_hash;
0134 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
0135 }
0136 #endif
0137 }
0138
0139 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
0140 struct ftrace_ops *op, struct ftrace_regs *fregs)
0141 {
0142 struct trace_array *tr = op->private;
0143 int pid;
0144
0145 if (tr) {
0146 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
0147 if (pid == FTRACE_PID_IGNORE)
0148 return;
0149 if (pid != FTRACE_PID_TRACE &&
0150 pid != current->pid)
0151 return;
0152 }
0153
0154 op->saved_func(ip, parent_ip, op, fregs);
0155 }
0156
0157 static void ftrace_sync_ipi(void *data)
0158 {
0159
0160 smp_rmb();
0161 }
0162
0163 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
0164 {
0165
0166
0167
0168
0169 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
0170 FTRACE_FORCE_LIST_FUNC)
0171 return ftrace_ops_list_func;
0172
0173 return ftrace_ops_get_func(ops);
0174 }
0175
0176 static void update_ftrace_function(void)
0177 {
0178 ftrace_func_t func;
0179
0180
0181
0182
0183
0184
0185 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
0186 lockdep_is_held(&ftrace_lock));
0187
0188
0189 if (set_function_trace_op == &ftrace_list_end) {
0190 func = ftrace_stub;
0191
0192
0193
0194
0195
0196
0197 } else if (rcu_dereference_protected(ftrace_ops_list->next,
0198 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
0199 func = ftrace_ops_get_list_func(ftrace_ops_list);
0200
0201 } else {
0202
0203 set_function_trace_op = &ftrace_list_end;
0204 func = ftrace_ops_list_func;
0205 }
0206
0207 update_function_graph_func();
0208
0209
0210 if (ftrace_trace_function == func)
0211 return;
0212
0213
0214
0215
0216
0217 if (func == ftrace_ops_list_func) {
0218 ftrace_trace_function = func;
0219
0220
0221
0222
0223 return;
0224 }
0225
0226 #ifndef CONFIG_DYNAMIC_FTRACE
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237 ftrace_trace_function = ftrace_ops_list_func;
0238
0239
0240
0241
0242 synchronize_rcu_tasks_rude();
0243
0244 function_trace_op = set_function_trace_op;
0245
0246 smp_wmb();
0247
0248 smp_call_function(ftrace_sync_ipi, NULL, 1);
0249
0250 #endif
0251
0252 ftrace_trace_function = func;
0253 }
0254
0255 static void add_ftrace_ops(struct ftrace_ops __rcu **list,
0256 struct ftrace_ops *ops)
0257 {
0258 rcu_assign_pointer(ops->next, *list);
0259
0260
0261
0262
0263
0264
0265
0266 rcu_assign_pointer(*list, ops);
0267 }
0268
0269 static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
0270 struct ftrace_ops *ops)
0271 {
0272 struct ftrace_ops **p;
0273
0274
0275
0276
0277
0278 if (rcu_dereference_protected(*list,
0279 lockdep_is_held(&ftrace_lock)) == ops &&
0280 rcu_dereference_protected(ops->next,
0281 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
0282 *list = &ftrace_list_end;
0283 return 0;
0284 }
0285
0286 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
0287 if (*p == ops)
0288 break;
0289
0290 if (*p != ops)
0291 return -1;
0292
0293 *p = (*p)->next;
0294 return 0;
0295 }
0296
0297 static void ftrace_update_trampoline(struct ftrace_ops *ops);
0298
0299 int __register_ftrace_function(struct ftrace_ops *ops)
0300 {
0301 if (ops->flags & FTRACE_OPS_FL_DELETED)
0302 return -EINVAL;
0303
0304 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
0305 return -EBUSY;
0306
0307 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
0308
0309
0310
0311
0312
0313 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
0314 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
0315 return -EINVAL;
0316
0317 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
0318 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
0319 #endif
0320 if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
0321 return -EBUSY;
0322
0323 if (!is_kernel_core_data((unsigned long)ops))
0324 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
0325
0326 add_ftrace_ops(&ftrace_ops_list, ops);
0327
0328
0329 ops->saved_func = ops->func;
0330
0331 if (ftrace_pids_enabled(ops))
0332 ops->func = ftrace_pid_func;
0333
0334 ftrace_update_trampoline(ops);
0335
0336 if (ftrace_enabled)
0337 update_ftrace_function();
0338
0339 return 0;
0340 }
0341
0342 int __unregister_ftrace_function(struct ftrace_ops *ops)
0343 {
0344 int ret;
0345
0346 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
0347 return -EBUSY;
0348
0349 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
0350
0351 if (ret < 0)
0352 return ret;
0353
0354 if (ftrace_enabled)
0355 update_ftrace_function();
0356
0357 ops->func = ops->saved_func;
0358
0359 return 0;
0360 }
0361
0362 static void ftrace_update_pid_func(void)
0363 {
0364 struct ftrace_ops *op;
0365
0366
0367 if (ftrace_trace_function == ftrace_stub)
0368 return;
0369
0370 do_for_each_ftrace_op(op, ftrace_ops_list) {
0371 if (op->flags & FTRACE_OPS_FL_PID) {
0372 op->func = ftrace_pids_enabled(op) ?
0373 ftrace_pid_func : op->saved_func;
0374 ftrace_update_trampoline(op);
0375 }
0376 } while_for_each_ftrace_op(op);
0377
0378 update_ftrace_function();
0379 }
0380
0381 #ifdef CONFIG_FUNCTION_PROFILER
0382 struct ftrace_profile {
0383 struct hlist_node node;
0384 unsigned long ip;
0385 unsigned long counter;
0386 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0387 unsigned long long time;
0388 unsigned long long time_squared;
0389 #endif
0390 };
0391
0392 struct ftrace_profile_page {
0393 struct ftrace_profile_page *next;
0394 unsigned long index;
0395 struct ftrace_profile records[];
0396 };
0397
0398 struct ftrace_profile_stat {
0399 atomic_t disabled;
0400 struct hlist_head *hash;
0401 struct ftrace_profile_page *pages;
0402 struct ftrace_profile_page *start;
0403 struct tracer_stat stat;
0404 };
0405
0406 #define PROFILE_RECORDS_SIZE \
0407 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
0408
0409 #define PROFILES_PER_PAGE \
0410 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
0411
0412 static int ftrace_profile_enabled __read_mostly;
0413
0414
0415 static DEFINE_MUTEX(ftrace_profile_lock);
0416
0417 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
0418
0419 #define FTRACE_PROFILE_HASH_BITS 10
0420 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
0421
0422 static void *
0423 function_stat_next(void *v, int idx)
0424 {
0425 struct ftrace_profile *rec = v;
0426 struct ftrace_profile_page *pg;
0427
0428 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
0429
0430 again:
0431 if (idx != 0)
0432 rec++;
0433
0434 if ((void *)rec >= (void *)&pg->records[pg->index]) {
0435 pg = pg->next;
0436 if (!pg)
0437 return NULL;
0438 rec = &pg->records[0];
0439 if (!rec->counter)
0440 goto again;
0441 }
0442
0443 return rec;
0444 }
0445
0446 static void *function_stat_start(struct tracer_stat *trace)
0447 {
0448 struct ftrace_profile_stat *stat =
0449 container_of(trace, struct ftrace_profile_stat, stat);
0450
0451 if (!stat || !stat->start)
0452 return NULL;
0453
0454 return function_stat_next(&stat->start->records[0], 0);
0455 }
0456
0457 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0458
0459 static int function_stat_cmp(const void *p1, const void *p2)
0460 {
0461 const struct ftrace_profile *a = p1;
0462 const struct ftrace_profile *b = p2;
0463
0464 if (a->time < b->time)
0465 return -1;
0466 if (a->time > b->time)
0467 return 1;
0468 else
0469 return 0;
0470 }
0471 #else
0472
0473 static int function_stat_cmp(const void *p1, const void *p2)
0474 {
0475 const struct ftrace_profile *a = p1;
0476 const struct ftrace_profile *b = p2;
0477
0478 if (a->counter < b->counter)
0479 return -1;
0480 if (a->counter > b->counter)
0481 return 1;
0482 else
0483 return 0;
0484 }
0485 #endif
0486
0487 static int function_stat_headers(struct seq_file *m)
0488 {
0489 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0490 seq_puts(m, " Function "
0491 "Hit Time Avg s^2\n"
0492 " -------- "
0493 "--- ---- --- ---\n");
0494 #else
0495 seq_puts(m, " Function Hit\n"
0496 " -------- ---\n");
0497 #endif
0498 return 0;
0499 }
0500
0501 static int function_stat_show(struct seq_file *m, void *v)
0502 {
0503 struct ftrace_profile *rec = v;
0504 char str[KSYM_SYMBOL_LEN];
0505 int ret = 0;
0506 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0507 static struct trace_seq s;
0508 unsigned long long avg;
0509 unsigned long long stddev;
0510 #endif
0511 mutex_lock(&ftrace_profile_lock);
0512
0513
0514 if (unlikely(rec->counter == 0)) {
0515 ret = -EBUSY;
0516 goto out;
0517 }
0518
0519 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0520 avg = div64_ul(rec->time, rec->counter);
0521 if (tracing_thresh && (avg < tracing_thresh))
0522 goto out;
0523 #endif
0524
0525 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
0526 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
0527
0528 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0529 seq_puts(m, " ");
0530
0531
0532 if (rec->counter <= 1)
0533 stddev = 0;
0534 else {
0535
0536
0537
0538
0539 stddev = rec->counter * rec->time_squared -
0540 rec->time * rec->time;
0541
0542
0543
0544
0545
0546 stddev = div64_ul(stddev,
0547 rec->counter * (rec->counter - 1) * 1000);
0548 }
0549
0550 trace_seq_init(&s);
0551 trace_print_graph_duration(rec->time, &s);
0552 trace_seq_puts(&s, " ");
0553 trace_print_graph_duration(avg, &s);
0554 trace_seq_puts(&s, " ");
0555 trace_print_graph_duration(stddev, &s);
0556 trace_print_seq(m, &s);
0557 #endif
0558 seq_putc(m, '\n');
0559 out:
0560 mutex_unlock(&ftrace_profile_lock);
0561
0562 return ret;
0563 }
0564
0565 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
0566 {
0567 struct ftrace_profile_page *pg;
0568
0569 pg = stat->pages = stat->start;
0570
0571 while (pg) {
0572 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
0573 pg->index = 0;
0574 pg = pg->next;
0575 }
0576
0577 memset(stat->hash, 0,
0578 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
0579 }
0580
0581 static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
0582 {
0583 struct ftrace_profile_page *pg;
0584 int functions;
0585 int pages;
0586 int i;
0587
0588
0589 if (stat->pages)
0590 return 0;
0591
0592 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
0593 if (!stat->pages)
0594 return -ENOMEM;
0595
0596 #ifdef CONFIG_DYNAMIC_FTRACE
0597 functions = ftrace_update_tot_cnt;
0598 #else
0599
0600
0601
0602
0603
0604
0605
0606 functions = 20000;
0607 #endif
0608
0609 pg = stat->start = stat->pages;
0610
0611 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
0612
0613 for (i = 1; i < pages; i++) {
0614 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
0615 if (!pg->next)
0616 goto out_free;
0617 pg = pg->next;
0618 }
0619
0620 return 0;
0621
0622 out_free:
0623 pg = stat->start;
0624 while (pg) {
0625 unsigned long tmp = (unsigned long)pg;
0626
0627 pg = pg->next;
0628 free_page(tmp);
0629 }
0630
0631 stat->pages = NULL;
0632 stat->start = NULL;
0633
0634 return -ENOMEM;
0635 }
0636
0637 static int ftrace_profile_init_cpu(int cpu)
0638 {
0639 struct ftrace_profile_stat *stat;
0640 int size;
0641
0642 stat = &per_cpu(ftrace_profile_stats, cpu);
0643
0644 if (stat->hash) {
0645
0646 ftrace_profile_reset(stat);
0647 return 0;
0648 }
0649
0650
0651
0652
0653
0654 size = FTRACE_PROFILE_HASH_SIZE;
0655
0656 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
0657
0658 if (!stat->hash)
0659 return -ENOMEM;
0660
0661
0662 if (ftrace_profile_pages_init(stat) < 0) {
0663 kfree(stat->hash);
0664 stat->hash = NULL;
0665 return -ENOMEM;
0666 }
0667
0668 return 0;
0669 }
0670
0671 static int ftrace_profile_init(void)
0672 {
0673 int cpu;
0674 int ret = 0;
0675
0676 for_each_possible_cpu(cpu) {
0677 ret = ftrace_profile_init_cpu(cpu);
0678 if (ret)
0679 break;
0680 }
0681
0682 return ret;
0683 }
0684
0685
0686 static struct ftrace_profile *
0687 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
0688 {
0689 struct ftrace_profile *rec;
0690 struct hlist_head *hhd;
0691 unsigned long key;
0692
0693 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
0694 hhd = &stat->hash[key];
0695
0696 if (hlist_empty(hhd))
0697 return NULL;
0698
0699 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
0700 if (rec->ip == ip)
0701 return rec;
0702 }
0703
0704 return NULL;
0705 }
0706
0707 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
0708 struct ftrace_profile *rec)
0709 {
0710 unsigned long key;
0711
0712 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
0713 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
0714 }
0715
0716
0717
0718
0719 static struct ftrace_profile *
0720 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
0721 {
0722 struct ftrace_profile *rec = NULL;
0723
0724
0725 if (atomic_inc_return(&stat->disabled) != 1)
0726 goto out;
0727
0728
0729
0730
0731
0732 rec = ftrace_find_profiled_func(stat, ip);
0733 if (rec)
0734 goto out;
0735
0736 if (stat->pages->index == PROFILES_PER_PAGE) {
0737 if (!stat->pages->next)
0738 goto out;
0739 stat->pages = stat->pages->next;
0740 }
0741
0742 rec = &stat->pages->records[stat->pages->index++];
0743 rec->ip = ip;
0744 ftrace_add_profile(stat, rec);
0745
0746 out:
0747 atomic_dec(&stat->disabled);
0748
0749 return rec;
0750 }
0751
0752 static void
0753 function_profile_call(unsigned long ip, unsigned long parent_ip,
0754 struct ftrace_ops *ops, struct ftrace_regs *fregs)
0755 {
0756 struct ftrace_profile_stat *stat;
0757 struct ftrace_profile *rec;
0758 unsigned long flags;
0759
0760 if (!ftrace_profile_enabled)
0761 return;
0762
0763 local_irq_save(flags);
0764
0765 stat = this_cpu_ptr(&ftrace_profile_stats);
0766 if (!stat->hash || !ftrace_profile_enabled)
0767 goto out;
0768
0769 rec = ftrace_find_profiled_func(stat, ip);
0770 if (!rec) {
0771 rec = ftrace_profile_alloc(stat, ip);
0772 if (!rec)
0773 goto out;
0774 }
0775
0776 rec->counter++;
0777 out:
0778 local_irq_restore(flags);
0779 }
0780
0781 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0782 static bool fgraph_graph_time = true;
0783
0784 void ftrace_graph_graph_time_control(bool enable)
0785 {
0786 fgraph_graph_time = enable;
0787 }
0788
0789 static int profile_graph_entry(struct ftrace_graph_ent *trace)
0790 {
0791 struct ftrace_ret_stack *ret_stack;
0792
0793 function_profile_call(trace->func, 0, NULL, NULL);
0794
0795
0796 if (!current->ret_stack)
0797 return 0;
0798
0799 ret_stack = ftrace_graph_get_ret_stack(current, 0);
0800 if (ret_stack)
0801 ret_stack->subtime = 0;
0802
0803 return 1;
0804 }
0805
0806 static void profile_graph_return(struct ftrace_graph_ret *trace)
0807 {
0808 struct ftrace_ret_stack *ret_stack;
0809 struct ftrace_profile_stat *stat;
0810 unsigned long long calltime;
0811 struct ftrace_profile *rec;
0812 unsigned long flags;
0813
0814 local_irq_save(flags);
0815 stat = this_cpu_ptr(&ftrace_profile_stats);
0816 if (!stat->hash || !ftrace_profile_enabled)
0817 goto out;
0818
0819
0820 if (!trace->calltime)
0821 goto out;
0822
0823 calltime = trace->rettime - trace->calltime;
0824
0825 if (!fgraph_graph_time) {
0826
0827
0828 ret_stack = ftrace_graph_get_ret_stack(current, 1);
0829 if (ret_stack)
0830 ret_stack->subtime += calltime;
0831
0832 ret_stack = ftrace_graph_get_ret_stack(current, 0);
0833 if (ret_stack && ret_stack->subtime < calltime)
0834 calltime -= ret_stack->subtime;
0835 else
0836 calltime = 0;
0837 }
0838
0839 rec = ftrace_find_profiled_func(stat, trace->func);
0840 if (rec) {
0841 rec->time += calltime;
0842 rec->time_squared += calltime * calltime;
0843 }
0844
0845 out:
0846 local_irq_restore(flags);
0847 }
0848
0849 static struct fgraph_ops fprofiler_ops = {
0850 .entryfunc = &profile_graph_entry,
0851 .retfunc = &profile_graph_return,
0852 };
0853
0854 static int register_ftrace_profiler(void)
0855 {
0856 return register_ftrace_graph(&fprofiler_ops);
0857 }
0858
0859 static void unregister_ftrace_profiler(void)
0860 {
0861 unregister_ftrace_graph(&fprofiler_ops);
0862 }
0863 #else
0864 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
0865 .func = function_profile_call,
0866 .flags = FTRACE_OPS_FL_INITIALIZED,
0867 INIT_OPS_HASH(ftrace_profile_ops)
0868 };
0869
0870 static int register_ftrace_profiler(void)
0871 {
0872 return register_ftrace_function(&ftrace_profile_ops);
0873 }
0874
0875 static void unregister_ftrace_profiler(void)
0876 {
0877 unregister_ftrace_function(&ftrace_profile_ops);
0878 }
0879 #endif
0880
0881 static ssize_t
0882 ftrace_profile_write(struct file *filp, const char __user *ubuf,
0883 size_t cnt, loff_t *ppos)
0884 {
0885 unsigned long val;
0886 int ret;
0887
0888 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
0889 if (ret)
0890 return ret;
0891
0892 val = !!val;
0893
0894 mutex_lock(&ftrace_profile_lock);
0895 if (ftrace_profile_enabled ^ val) {
0896 if (val) {
0897 ret = ftrace_profile_init();
0898 if (ret < 0) {
0899 cnt = ret;
0900 goto out;
0901 }
0902
0903 ret = register_ftrace_profiler();
0904 if (ret < 0) {
0905 cnt = ret;
0906 goto out;
0907 }
0908 ftrace_profile_enabled = 1;
0909 } else {
0910 ftrace_profile_enabled = 0;
0911
0912
0913
0914
0915 unregister_ftrace_profiler();
0916 }
0917 }
0918 out:
0919 mutex_unlock(&ftrace_profile_lock);
0920
0921 *ppos += cnt;
0922
0923 return cnt;
0924 }
0925
0926 static ssize_t
0927 ftrace_profile_read(struct file *filp, char __user *ubuf,
0928 size_t cnt, loff_t *ppos)
0929 {
0930 char buf[64];
0931 int r;
0932
0933 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
0934 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
0935 }
0936
0937 static const struct file_operations ftrace_profile_fops = {
0938 .open = tracing_open_generic,
0939 .read = ftrace_profile_read,
0940 .write = ftrace_profile_write,
0941 .llseek = default_llseek,
0942 };
0943
0944
0945 static struct tracer_stat function_stats __initdata = {
0946 .name = "functions",
0947 .stat_start = function_stat_start,
0948 .stat_next = function_stat_next,
0949 .stat_cmp = function_stat_cmp,
0950 .stat_headers = function_stat_headers,
0951 .stat_show = function_stat_show
0952 };
0953
0954 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
0955 {
0956 struct ftrace_profile_stat *stat;
0957 char *name;
0958 int ret;
0959 int cpu;
0960
0961 for_each_possible_cpu(cpu) {
0962 stat = &per_cpu(ftrace_profile_stats, cpu);
0963
0964 name = kasprintf(GFP_KERNEL, "function%d", cpu);
0965 if (!name) {
0966
0967
0968
0969
0970 WARN(1,
0971 "Could not allocate stat file for cpu %d\n",
0972 cpu);
0973 return;
0974 }
0975 stat->stat = function_stats;
0976 stat->stat.name = name;
0977 ret = register_stat_tracer(&stat->stat);
0978 if (ret) {
0979 WARN(1,
0980 "Could not register function stat for cpu %d\n",
0981 cpu);
0982 kfree(name);
0983 return;
0984 }
0985 }
0986
0987 trace_create_file("function_profile_enabled",
0988 TRACE_MODE_WRITE, d_tracer, NULL,
0989 &ftrace_profile_fops);
0990 }
0991
0992 #else
0993 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
0994 {
0995 }
0996 #endif
0997
0998 #ifdef CONFIG_DYNAMIC_FTRACE
0999
1000 static struct ftrace_ops *removed_ops;
1001
1002
1003
1004
1005
1006 static bool update_all_ops;
1007
1008 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1009 # error Dynamic ftrace depends on MCOUNT_RECORD
1010 #endif
1011
1012 struct ftrace_func_probe {
1013 struct ftrace_probe_ops *probe_ops;
1014 struct ftrace_ops ops;
1015 struct trace_array *tr;
1016 struct list_head list;
1017 void *data;
1018 int ref;
1019 };
1020
1021
1022
1023
1024
1025
1026
1027 static const struct hlist_head empty_buckets[1];
1028 static const struct ftrace_hash empty_hash = {
1029 .buckets = (struct hlist_head *)empty_buckets,
1030 };
1031 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1032
1033 struct ftrace_ops global_ops = {
1034 .func = ftrace_stub,
1035 .local_hash.notrace_hash = EMPTY_HASH,
1036 .local_hash.filter_hash = EMPTY_HASH,
1037 INIT_OPS_HASH(global_ops)
1038 .flags = FTRACE_OPS_FL_INITIALIZED |
1039 FTRACE_OPS_FL_PID,
1040 };
1041
1042
1043
1044
1045 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1046 {
1047 struct ftrace_ops *op = NULL;
1048
1049
1050
1051
1052
1053 preempt_disable_notrace();
1054
1055 do_for_each_ftrace_op(op, ftrace_ops_list) {
1056
1057
1058
1059
1060
1061 if (op->trampoline && op->trampoline_size)
1062 if (addr >= op->trampoline &&
1063 addr < op->trampoline + op->trampoline_size) {
1064 preempt_enable_notrace();
1065 return op;
1066 }
1067 } while_for_each_ftrace_op(op);
1068 preempt_enable_notrace();
1069
1070 return NULL;
1071 }
1072
1073
1074
1075
1076
1077
1078
1079 bool is_ftrace_trampoline(unsigned long addr)
1080 {
1081 return ftrace_ops_trampoline(addr) != NULL;
1082 }
1083
1084 struct ftrace_page {
1085 struct ftrace_page *next;
1086 struct dyn_ftrace *records;
1087 int index;
1088 int order;
1089 };
1090
1091 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1092 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1093
1094 static struct ftrace_page *ftrace_pages_start;
1095 static struct ftrace_page *ftrace_pages;
1096
1097 static __always_inline unsigned long
1098 ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1099 {
1100 if (hash->size_bits > 0)
1101 return hash_long(ip, hash->size_bits);
1102
1103 return 0;
1104 }
1105
1106
1107 static __always_inline struct ftrace_func_entry *
1108 __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1109 {
1110 unsigned long key;
1111 struct ftrace_func_entry *entry;
1112 struct hlist_head *hhd;
1113
1114 key = ftrace_hash_key(hash, ip);
1115 hhd = &hash->buckets[key];
1116
1117 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1118 if (entry->ip == ip)
1119 return entry;
1120 }
1121 return NULL;
1122 }
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134 struct ftrace_func_entry *
1135 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1136 {
1137 if (ftrace_hash_empty(hash))
1138 return NULL;
1139
1140 return __ftrace_lookup_ip(hash, ip);
1141 }
1142
1143 static void __add_hash_entry(struct ftrace_hash *hash,
1144 struct ftrace_func_entry *entry)
1145 {
1146 struct hlist_head *hhd;
1147 unsigned long key;
1148
1149 key = ftrace_hash_key(hash, entry->ip);
1150 hhd = &hash->buckets[key];
1151 hlist_add_head(&entry->hlist, hhd);
1152 hash->count++;
1153 }
1154
1155 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1156 {
1157 struct ftrace_func_entry *entry;
1158
1159 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1160 if (!entry)
1161 return -ENOMEM;
1162
1163 entry->ip = ip;
1164 __add_hash_entry(hash, entry);
1165
1166 return 0;
1167 }
1168
1169 static void
1170 free_hash_entry(struct ftrace_hash *hash,
1171 struct ftrace_func_entry *entry)
1172 {
1173 hlist_del(&entry->hlist);
1174 kfree(entry);
1175 hash->count--;
1176 }
1177
1178 static void
1179 remove_hash_entry(struct ftrace_hash *hash,
1180 struct ftrace_func_entry *entry)
1181 {
1182 hlist_del_rcu(&entry->hlist);
1183 hash->count--;
1184 }
1185
1186 static void ftrace_hash_clear(struct ftrace_hash *hash)
1187 {
1188 struct hlist_head *hhd;
1189 struct hlist_node *tn;
1190 struct ftrace_func_entry *entry;
1191 int size = 1 << hash->size_bits;
1192 int i;
1193
1194 if (!hash->count)
1195 return;
1196
1197 for (i = 0; i < size; i++) {
1198 hhd = &hash->buckets[i];
1199 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1200 free_hash_entry(hash, entry);
1201 }
1202 FTRACE_WARN_ON(hash->count);
1203 }
1204
1205 static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1206 {
1207 list_del(&ftrace_mod->list);
1208 kfree(ftrace_mod->module);
1209 kfree(ftrace_mod->func);
1210 kfree(ftrace_mod);
1211 }
1212
1213 static void clear_ftrace_mod_list(struct list_head *head)
1214 {
1215 struct ftrace_mod_load *p, *n;
1216
1217
1218 if (!head)
1219 return;
1220
1221 mutex_lock(&ftrace_lock);
1222 list_for_each_entry_safe(p, n, head, list)
1223 free_ftrace_mod(p);
1224 mutex_unlock(&ftrace_lock);
1225 }
1226
1227 static void free_ftrace_hash(struct ftrace_hash *hash)
1228 {
1229 if (!hash || hash == EMPTY_HASH)
1230 return;
1231 ftrace_hash_clear(hash);
1232 kfree(hash->buckets);
1233 kfree(hash);
1234 }
1235
1236 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1237 {
1238 struct ftrace_hash *hash;
1239
1240 hash = container_of(rcu, struct ftrace_hash, rcu);
1241 free_ftrace_hash(hash);
1242 }
1243
1244 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1245 {
1246 if (!hash || hash == EMPTY_HASH)
1247 return;
1248 call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1249 }
1250
1251 void ftrace_free_filter(struct ftrace_ops *ops)
1252 {
1253 ftrace_ops_init(ops);
1254 free_ftrace_hash(ops->func_hash->filter_hash);
1255 free_ftrace_hash(ops->func_hash->notrace_hash);
1256 }
1257
1258 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1259 {
1260 struct ftrace_hash *hash;
1261 int size;
1262
1263 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1264 if (!hash)
1265 return NULL;
1266
1267 size = 1 << size_bits;
1268 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1269
1270 if (!hash->buckets) {
1271 kfree(hash);
1272 return NULL;
1273 }
1274
1275 hash->size_bits = size_bits;
1276
1277 return hash;
1278 }
1279
1280
1281 static int ftrace_add_mod(struct trace_array *tr,
1282 const char *func, const char *module,
1283 int enable)
1284 {
1285 struct ftrace_mod_load *ftrace_mod;
1286 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1287
1288 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1289 if (!ftrace_mod)
1290 return -ENOMEM;
1291
1292 ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1293 ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1294 ftrace_mod->enable = enable;
1295
1296 if (!ftrace_mod->func || !ftrace_mod->module)
1297 goto out_free;
1298
1299 list_add(&ftrace_mod->list, mod_head);
1300
1301 return 0;
1302
1303 out_free:
1304 free_ftrace_mod(ftrace_mod);
1305
1306 return -ENOMEM;
1307 }
1308
1309 static struct ftrace_hash *
1310 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1311 {
1312 struct ftrace_func_entry *entry;
1313 struct ftrace_hash *new_hash;
1314 int size;
1315 int ret;
1316 int i;
1317
1318 new_hash = alloc_ftrace_hash(size_bits);
1319 if (!new_hash)
1320 return NULL;
1321
1322 if (hash)
1323 new_hash->flags = hash->flags;
1324
1325
1326 if (ftrace_hash_empty(hash))
1327 return new_hash;
1328
1329 size = 1 << hash->size_bits;
1330 for (i = 0; i < size; i++) {
1331 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1332 ret = add_hash_entry(new_hash, entry->ip);
1333 if (ret < 0)
1334 goto free_hash;
1335 }
1336 }
1337
1338 FTRACE_WARN_ON(new_hash->count != hash->count);
1339
1340 return new_hash;
1341
1342 free_hash:
1343 free_ftrace_hash(new_hash);
1344 return NULL;
1345 }
1346
1347 static void
1348 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1349 static void
1350 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1351
1352 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1353 struct ftrace_hash *new_hash);
1354
1355 static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size)
1356 {
1357 struct ftrace_func_entry *entry;
1358 struct ftrace_hash *new_hash;
1359 struct hlist_head *hhd;
1360 struct hlist_node *tn;
1361 int bits = 0;
1362 int i;
1363
1364
1365
1366
1367
1368 bits = fls(size / 2);
1369
1370
1371 if (bits > FTRACE_HASH_MAX_BITS)
1372 bits = FTRACE_HASH_MAX_BITS;
1373
1374 new_hash = alloc_ftrace_hash(bits);
1375 if (!new_hash)
1376 return NULL;
1377
1378 new_hash->flags = src->flags;
1379
1380 size = 1 << src->size_bits;
1381 for (i = 0; i < size; i++) {
1382 hhd = &src->buckets[i];
1383 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1384 remove_hash_entry(src, entry);
1385 __add_hash_entry(new_hash, entry);
1386 }
1387 }
1388 return new_hash;
1389 }
1390
1391 static struct ftrace_hash *
1392 __ftrace_hash_move(struct ftrace_hash *src)
1393 {
1394 int size = src->count;
1395
1396
1397
1398
1399 if (ftrace_hash_empty(src))
1400 return EMPTY_HASH;
1401
1402 return dup_hash(src, size);
1403 }
1404
1405 static int
1406 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1407 struct ftrace_hash **dst, struct ftrace_hash *src)
1408 {
1409 struct ftrace_hash *new_hash;
1410 int ret;
1411
1412
1413 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1414 return -EINVAL;
1415
1416 new_hash = __ftrace_hash_move(src);
1417 if (!new_hash)
1418 return -ENOMEM;
1419
1420
1421 if (enable) {
1422
1423 ret = ftrace_hash_ipmodify_update(ops, new_hash);
1424 if (ret < 0) {
1425 free_ftrace_hash(new_hash);
1426 return ret;
1427 }
1428 }
1429
1430
1431
1432
1433
1434 ftrace_hash_rec_disable_modify(ops, enable);
1435
1436 rcu_assign_pointer(*dst, new_hash);
1437
1438 ftrace_hash_rec_enable_modify(ops, enable);
1439
1440 return 0;
1441 }
1442
1443 static bool hash_contains_ip(unsigned long ip,
1444 struct ftrace_ops_hash *hash)
1445 {
1446
1447
1448
1449
1450
1451
1452 return (ftrace_hash_empty(hash->filter_hash) ||
1453 __ftrace_lookup_ip(hash->filter_hash, ip)) &&
1454 (ftrace_hash_empty(hash->notrace_hash) ||
1455 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1456 }
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470 int
1471 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1472 {
1473 struct ftrace_ops_hash hash;
1474 int ret;
1475
1476 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1477
1478
1479
1480
1481
1482 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1483 return 0;
1484 #endif
1485
1486 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1487 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1488
1489 if (hash_contains_ip(ip, &hash))
1490 ret = 1;
1491 else
1492 ret = 0;
1493
1494 return ret;
1495 }
1496
1497
1498
1499
1500
1501 #define do_for_each_ftrace_rec(pg, rec) \
1502 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1503 int _____i; \
1504 for (_____i = 0; _____i < pg->index; _____i++) { \
1505 rec = &pg->records[_____i];
1506
1507 #define while_for_each_ftrace_rec() \
1508 } \
1509 }
1510
1511
1512 static int ftrace_cmp_recs(const void *a, const void *b)
1513 {
1514 const struct dyn_ftrace *key = a;
1515 const struct dyn_ftrace *rec = b;
1516
1517 if (key->flags < rec->ip)
1518 return -1;
1519 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1520 return 1;
1521 return 0;
1522 }
1523
1524 static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
1525 {
1526 struct ftrace_page *pg;
1527 struct dyn_ftrace *rec = NULL;
1528 struct dyn_ftrace key;
1529
1530 key.ip = start;
1531 key.flags = end;
1532
1533 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1534 if (end < pg->records[0].ip ||
1535 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1536 continue;
1537 rec = bsearch(&key, pg->records, pg->index,
1538 sizeof(struct dyn_ftrace),
1539 ftrace_cmp_recs);
1540 if (rec)
1541 break;
1542 }
1543 return rec;
1544 }
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558 unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1559 {
1560 struct dyn_ftrace *rec;
1561
1562 rec = lookup_rec(start, end);
1563 if (rec)
1564 return rec->ip;
1565
1566 return 0;
1567 }
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577 unsigned long ftrace_location(unsigned long ip)
1578 {
1579 struct dyn_ftrace *rec;
1580 unsigned long offset;
1581 unsigned long size;
1582
1583 rec = lookup_rec(ip, ip);
1584 if (!rec) {
1585 if (!kallsyms_lookup_size_offset(ip, &size, &offset))
1586 goto out;
1587
1588
1589 if (!offset)
1590 rec = lookup_rec(ip, ip + size - 1);
1591 }
1592
1593 if (rec)
1594 return rec->ip;
1595
1596 out:
1597 return 0;
1598 }
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610 int ftrace_text_reserved(const void *start, const void *end)
1611 {
1612 unsigned long ret;
1613
1614 ret = ftrace_location_range((unsigned long)start,
1615 (unsigned long)end);
1616
1617 return (int)!!ret;
1618 }
1619
1620
1621 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1622 {
1623 struct ftrace_ops *ops;
1624 bool keep_regs = false;
1625
1626 for (ops = ftrace_ops_list;
1627 ops != &ftrace_list_end; ops = ops->next) {
1628
1629 if (ftrace_ops_test(ops, rec->ip, rec)) {
1630 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1631 keep_regs = true;
1632 break;
1633 }
1634 }
1635 }
1636
1637 return keep_regs;
1638 }
1639
1640 static struct ftrace_ops *
1641 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1642 static struct ftrace_ops *
1643 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
1644 static struct ftrace_ops *
1645 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1646
1647 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1648 int filter_hash,
1649 bool inc)
1650 {
1651 struct ftrace_hash *hash;
1652 struct ftrace_hash *other_hash;
1653 struct ftrace_page *pg;
1654 struct dyn_ftrace *rec;
1655 bool update = false;
1656 int count = 0;
1657 int all = false;
1658
1659
1660 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1661 return false;
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674 if (filter_hash) {
1675 hash = ops->func_hash->filter_hash;
1676 other_hash = ops->func_hash->notrace_hash;
1677 if (ftrace_hash_empty(hash))
1678 all = true;
1679 } else {
1680 inc = !inc;
1681 hash = ops->func_hash->notrace_hash;
1682 other_hash = ops->func_hash->filter_hash;
1683
1684
1685
1686
1687 if (ftrace_hash_empty(hash))
1688 return false;
1689 }
1690
1691 do_for_each_ftrace_rec(pg, rec) {
1692 int in_other_hash = 0;
1693 int in_hash = 0;
1694 int match = 0;
1695
1696 if (rec->flags & FTRACE_FL_DISABLED)
1697 continue;
1698
1699 if (all) {
1700
1701
1702
1703
1704 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1705 match = 1;
1706 } else {
1707 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1708 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720 if (filter_hash && in_hash && !in_other_hash)
1721 match = 1;
1722 else if (!filter_hash && in_hash &&
1723 (in_other_hash || ftrace_hash_empty(other_hash)))
1724 match = 1;
1725 }
1726 if (!match)
1727 continue;
1728
1729 if (inc) {
1730 rec->flags++;
1731 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1732 return false;
1733
1734 if (ops->flags & FTRACE_OPS_FL_DIRECT)
1735 rec->flags |= FTRACE_FL_DIRECT;
1736
1737
1738
1739
1740
1741
1742 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1743 rec->flags |= FTRACE_FL_TRAMP;
1744 else
1745
1746
1747
1748
1749
1750
1751 rec->flags &= ~FTRACE_FL_TRAMP;
1752
1753
1754
1755
1756
1757 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1758 rec->flags |= FTRACE_FL_REGS;
1759 } else {
1760 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1761 return false;
1762 rec->flags--;
1763
1764
1765
1766
1767
1768
1769
1770 if (ops->flags & FTRACE_OPS_FL_DIRECT)
1771 rec->flags &= ~FTRACE_FL_DIRECT;
1772
1773
1774
1775
1776
1777
1778
1779 if (ftrace_rec_count(rec) > 0 &&
1780 rec->flags & FTRACE_FL_REGS &&
1781 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1782 if (!test_rec_ops_needs_regs(rec))
1783 rec->flags &= ~FTRACE_FL_REGS;
1784 }
1785
1786
1787
1788
1789
1790
1791
1792
1793 if (ftrace_rec_count(rec) == 1 &&
1794 ftrace_find_tramp_ops_any_other(rec, ops))
1795 rec->flags |= FTRACE_FL_TRAMP;
1796 else
1797 rec->flags &= ~FTRACE_FL_TRAMP;
1798
1799
1800
1801
1802
1803 }
1804 count++;
1805
1806
1807 update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
1808
1809
1810 if (!all && count == hash->count)
1811 return update;
1812 } while_for_each_ftrace_rec();
1813
1814 return update;
1815 }
1816
1817 static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1818 int filter_hash)
1819 {
1820 return __ftrace_hash_rec_update(ops, filter_hash, 0);
1821 }
1822
1823 static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1824 int filter_hash)
1825 {
1826 return __ftrace_hash_rec_update(ops, filter_hash, 1);
1827 }
1828
1829 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1830 int filter_hash, int inc)
1831 {
1832 struct ftrace_ops *op;
1833
1834 __ftrace_hash_rec_update(ops, filter_hash, inc);
1835
1836 if (ops->func_hash != &global_ops.local_hash)
1837 return;
1838
1839
1840
1841
1842
1843 do_for_each_ftrace_op(op, ftrace_ops_list) {
1844
1845 if (op == ops)
1846 continue;
1847 if (op->func_hash == &global_ops.local_hash)
1848 __ftrace_hash_rec_update(op, filter_hash, inc);
1849 } while_for_each_ftrace_op(op);
1850 }
1851
1852 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1853 int filter_hash)
1854 {
1855 ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1856 }
1857
1858 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1859 int filter_hash)
1860 {
1861 ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1862 }
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1881 struct ftrace_hash *old_hash,
1882 struct ftrace_hash *new_hash)
1883 {
1884 struct ftrace_page *pg;
1885 struct dyn_ftrace *rec, *end = NULL;
1886 int in_old, in_new;
1887 bool is_ipmodify, is_direct;
1888
1889
1890 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1891 return 0;
1892
1893 is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY;
1894 is_direct = ops->flags & FTRACE_OPS_FL_DIRECT;
1895
1896
1897 if (!is_ipmodify && !is_direct)
1898 return 0;
1899
1900 if (WARN_ON_ONCE(is_ipmodify && is_direct))
1901 return 0;
1902
1903
1904
1905
1906
1907
1908 if (!new_hash || !old_hash)
1909 return -EINVAL;
1910
1911
1912 do_for_each_ftrace_rec(pg, rec) {
1913
1914 if (rec->flags & FTRACE_FL_DISABLED)
1915 continue;
1916
1917
1918 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1919 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1920 if (in_old == in_new)
1921 continue;
1922
1923 if (in_new) {
1924 if (rec->flags & FTRACE_FL_IPMODIFY) {
1925 int ret;
1926
1927
1928 if (is_ipmodify)
1929 goto rollback;
1930
1931 FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT);
1932
1933
1934
1935
1936
1937
1938
1939 if (!ops->ops_func)
1940 return -EBUSY;
1941 ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF);
1942 if (ret)
1943 return ret;
1944 } else if (is_ipmodify) {
1945 rec->flags |= FTRACE_FL_IPMODIFY;
1946 }
1947 } else if (is_ipmodify) {
1948 rec->flags &= ~FTRACE_FL_IPMODIFY;
1949 }
1950 } while_for_each_ftrace_rec();
1951
1952 return 0;
1953
1954 rollback:
1955 end = rec;
1956
1957
1958 do_for_each_ftrace_rec(pg, rec) {
1959
1960 if (rec->flags & FTRACE_FL_DISABLED)
1961 continue;
1962
1963 if (rec == end)
1964 goto err_out;
1965
1966 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1967 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1968 if (in_old == in_new)
1969 continue;
1970
1971 if (in_new)
1972 rec->flags &= ~FTRACE_FL_IPMODIFY;
1973 else
1974 rec->flags |= FTRACE_FL_IPMODIFY;
1975 } while_for_each_ftrace_rec();
1976
1977 err_out:
1978 return -EBUSY;
1979 }
1980
1981 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1982 {
1983 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1984
1985 if (ftrace_hash_empty(hash))
1986 hash = NULL;
1987
1988 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1989 }
1990
1991
1992 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1993 {
1994 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1995
1996 if (ftrace_hash_empty(hash))
1997 hash = NULL;
1998
1999 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
2000 }
2001
2002 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
2003 struct ftrace_hash *new_hash)
2004 {
2005 struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
2006
2007 if (ftrace_hash_empty(old_hash))
2008 old_hash = NULL;
2009
2010 if (ftrace_hash_empty(new_hash))
2011 new_hash = NULL;
2012
2013 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
2014 }
2015
2016 static void print_ip_ins(const char *fmt, const unsigned char *p)
2017 {
2018 char ins[MCOUNT_INSN_SIZE];
2019 int i;
2020
2021 if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
2022 printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
2023 return;
2024 }
2025
2026 printk(KERN_CONT "%s", fmt);
2027
2028 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
2029 printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]);
2030 }
2031
2032 enum ftrace_bug_type ftrace_bug_type;
2033 const void *ftrace_expected;
2034
2035 static void print_bug_type(void)
2036 {
2037 switch (ftrace_bug_type) {
2038 case FTRACE_BUG_UNKNOWN:
2039 break;
2040 case FTRACE_BUG_INIT:
2041 pr_info("Initializing ftrace call sites\n");
2042 break;
2043 case FTRACE_BUG_NOP:
2044 pr_info("Setting ftrace call site to NOP\n");
2045 break;
2046 case FTRACE_BUG_CALL:
2047 pr_info("Setting ftrace call site to call ftrace function\n");
2048 break;
2049 case FTRACE_BUG_UPDATE:
2050 pr_info("Updating ftrace call site to call a different ftrace function\n");
2051 break;
2052 }
2053 }
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067 void ftrace_bug(int failed, struct dyn_ftrace *rec)
2068 {
2069 unsigned long ip = rec ? rec->ip : 0;
2070
2071 pr_info("------------[ ftrace bug ]------------\n");
2072
2073 switch (failed) {
2074 case -EFAULT:
2075 pr_info("ftrace faulted on modifying ");
2076 print_ip_sym(KERN_INFO, ip);
2077 break;
2078 case -EINVAL:
2079 pr_info("ftrace failed to modify ");
2080 print_ip_sym(KERN_INFO, ip);
2081 print_ip_ins(" actual: ", (unsigned char *)ip);
2082 pr_cont("\n");
2083 if (ftrace_expected) {
2084 print_ip_ins(" expected: ", ftrace_expected);
2085 pr_cont("\n");
2086 }
2087 break;
2088 case -EPERM:
2089 pr_info("ftrace faulted on writing ");
2090 print_ip_sym(KERN_INFO, ip);
2091 break;
2092 default:
2093 pr_info("ftrace faulted on unknown error ");
2094 print_ip_sym(KERN_INFO, ip);
2095 }
2096 print_bug_type();
2097 if (rec) {
2098 struct ftrace_ops *ops = NULL;
2099
2100 pr_info("ftrace record flags: %lx\n", rec->flags);
2101 pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2102 rec->flags & FTRACE_FL_REGS ? " R" : " ");
2103 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2104 ops = ftrace_find_tramp_ops_any(rec);
2105 if (ops) {
2106 do {
2107 pr_cont("\ttramp: %pS (%pS)",
2108 (void *)ops->trampoline,
2109 (void *)ops->func);
2110 ops = ftrace_find_tramp_ops_next(rec, ops);
2111 } while (ops);
2112 } else
2113 pr_cont("\ttramp: ERROR!");
2114
2115 }
2116 ip = ftrace_get_addr_curr(rec);
2117 pr_cont("\n expected tramp: %lx\n", ip);
2118 }
2119
2120 FTRACE_WARN_ON_ONCE(1);
2121 }
2122
2123 static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
2124 {
2125 unsigned long flag = 0UL;
2126
2127 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2128
2129 if (rec->flags & FTRACE_FL_DISABLED)
2130 return FTRACE_UPDATE_IGNORE;
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143 if (enable && ftrace_rec_count(rec))
2144 flag = FTRACE_FL_ENABLED;
2145
2146
2147
2148
2149
2150
2151
2152 if (flag) {
2153 if (!(rec->flags & FTRACE_FL_REGS) !=
2154 !(rec->flags & FTRACE_FL_REGS_EN))
2155 flag |= FTRACE_FL_REGS;
2156
2157 if (!(rec->flags & FTRACE_FL_TRAMP) !=
2158 !(rec->flags & FTRACE_FL_TRAMP_EN))
2159 flag |= FTRACE_FL_TRAMP;
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171 if (ftrace_rec_count(rec) == 1) {
2172 if (!(rec->flags & FTRACE_FL_DIRECT) !=
2173 !(rec->flags & FTRACE_FL_DIRECT_EN))
2174 flag |= FTRACE_FL_DIRECT;
2175 } else if (rec->flags & FTRACE_FL_DIRECT_EN) {
2176 flag |= FTRACE_FL_DIRECT;
2177 }
2178 }
2179
2180
2181 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2182 return FTRACE_UPDATE_IGNORE;
2183
2184 if (flag) {
2185
2186 flag ^= rec->flags & FTRACE_FL_ENABLED;
2187
2188 if (update) {
2189 rec->flags |= FTRACE_FL_ENABLED;
2190 if (flag & FTRACE_FL_REGS) {
2191 if (rec->flags & FTRACE_FL_REGS)
2192 rec->flags |= FTRACE_FL_REGS_EN;
2193 else
2194 rec->flags &= ~FTRACE_FL_REGS_EN;
2195 }
2196 if (flag & FTRACE_FL_TRAMP) {
2197 if (rec->flags & FTRACE_FL_TRAMP)
2198 rec->flags |= FTRACE_FL_TRAMP_EN;
2199 else
2200 rec->flags &= ~FTRACE_FL_TRAMP_EN;
2201 }
2202
2203 if (flag & FTRACE_FL_DIRECT) {
2204
2205
2206
2207
2208
2209 if (ftrace_rec_count(rec) == 1) {
2210 if (rec->flags & FTRACE_FL_DIRECT)
2211 rec->flags |= FTRACE_FL_DIRECT_EN;
2212 else
2213 rec->flags &= ~FTRACE_FL_DIRECT_EN;
2214 } else {
2215
2216
2217
2218
2219 rec->flags &= ~FTRACE_FL_DIRECT_EN;
2220 }
2221 }
2222 }
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232 if (flag & FTRACE_FL_ENABLED) {
2233 ftrace_bug_type = FTRACE_BUG_CALL;
2234 return FTRACE_UPDATE_MAKE_CALL;
2235 }
2236
2237 ftrace_bug_type = FTRACE_BUG_UPDATE;
2238 return FTRACE_UPDATE_MODIFY_CALL;
2239 }
2240
2241 if (update) {
2242
2243 if (!ftrace_rec_count(rec))
2244 rec->flags = 0;
2245 else
2246
2247
2248
2249
2250 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2251 FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN);
2252 }
2253
2254 ftrace_bug_type = FTRACE_BUG_NOP;
2255 return FTRACE_UPDATE_MAKE_NOP;
2256 }
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266 int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
2267 {
2268 return ftrace_check_record(rec, enable, true);
2269 }
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280 int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
2281 {
2282 return ftrace_check_record(rec, enable, false);
2283 }
2284
2285 static struct ftrace_ops *
2286 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2287 {
2288 struct ftrace_ops *op;
2289 unsigned long ip = rec->ip;
2290
2291 do_for_each_ftrace_op(op, ftrace_ops_list) {
2292
2293 if (!op->trampoline)
2294 continue;
2295
2296 if (hash_contains_ip(ip, op->func_hash))
2297 return op;
2298 } while_for_each_ftrace_op(op);
2299
2300 return NULL;
2301 }
2302
2303 static struct ftrace_ops *
2304 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
2305 {
2306 struct ftrace_ops *op;
2307 unsigned long ip = rec->ip;
2308
2309 do_for_each_ftrace_op(op, ftrace_ops_list) {
2310
2311 if (op == op_exclude || !op->trampoline)
2312 continue;
2313
2314 if (hash_contains_ip(ip, op->func_hash))
2315 return op;
2316 } while_for_each_ftrace_op(op);
2317
2318 return NULL;
2319 }
2320
2321 static struct ftrace_ops *
2322 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2323 struct ftrace_ops *op)
2324 {
2325 unsigned long ip = rec->ip;
2326
2327 while_for_each_ftrace_op(op) {
2328
2329 if (!op->trampoline)
2330 continue;
2331
2332 if (hash_contains_ip(ip, op->func_hash))
2333 return op;
2334 }
2335
2336 return NULL;
2337 }
2338
2339 static struct ftrace_ops *
2340 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2341 {
2342 struct ftrace_ops *op;
2343 unsigned long ip = rec->ip;
2344
2345
2346
2347
2348
2349
2350
2351 if (removed_ops) {
2352 if (hash_contains_ip(ip, &removed_ops->old_hash))
2353 return removed_ops;
2354 }
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374 do_for_each_ftrace_op(op, ftrace_ops_list) {
2375
2376 if (!op->trampoline)
2377 continue;
2378
2379
2380
2381
2382
2383 if (op->flags & FTRACE_OPS_FL_ADDING)
2384 continue;
2385
2386
2387
2388
2389
2390
2391
2392 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2393 hash_contains_ip(ip, &op->old_hash))
2394 return op;
2395
2396
2397
2398
2399
2400 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2401 hash_contains_ip(ip, op->func_hash))
2402 return op;
2403
2404 } while_for_each_ftrace_op(op);
2405
2406 return NULL;
2407 }
2408
2409 static struct ftrace_ops *
2410 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2411 {
2412 struct ftrace_ops *op;
2413 unsigned long ip = rec->ip;
2414
2415 do_for_each_ftrace_op(op, ftrace_ops_list) {
2416
2417 if (hash_contains_ip(ip, op->func_hash))
2418 return op;
2419 } while_for_each_ftrace_op(op);
2420
2421 return NULL;
2422 }
2423
2424 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
2425
2426 static struct ftrace_hash *direct_functions = EMPTY_HASH;
2427 static DEFINE_MUTEX(direct_mutex);
2428 int ftrace_direct_func_count;
2429
2430
2431
2432
2433
2434 unsigned long ftrace_find_rec_direct(unsigned long ip)
2435 {
2436 struct ftrace_func_entry *entry;
2437
2438 entry = __ftrace_lookup_ip(direct_functions, ip);
2439 if (!entry)
2440 return 0;
2441
2442 return entry->direct;
2443 }
2444
2445 static struct ftrace_func_entry*
2446 ftrace_add_rec_direct(unsigned long ip, unsigned long addr,
2447 struct ftrace_hash **free_hash)
2448 {
2449 struct ftrace_func_entry *entry;
2450
2451 if (ftrace_hash_empty(direct_functions) ||
2452 direct_functions->count > 2 * (1 << direct_functions->size_bits)) {
2453 struct ftrace_hash *new_hash;
2454 int size = ftrace_hash_empty(direct_functions) ? 0 :
2455 direct_functions->count + 1;
2456
2457 if (size < 32)
2458 size = 32;
2459
2460 new_hash = dup_hash(direct_functions, size);
2461 if (!new_hash)
2462 return NULL;
2463
2464 *free_hash = direct_functions;
2465 direct_functions = new_hash;
2466 }
2467
2468 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2469 if (!entry)
2470 return NULL;
2471
2472 entry->ip = ip;
2473 entry->direct = addr;
2474 __add_hash_entry(direct_functions, entry);
2475 return entry;
2476 }
2477
2478 static void call_direct_funcs(unsigned long ip, unsigned long pip,
2479 struct ftrace_ops *ops, struct ftrace_regs *fregs)
2480 {
2481 struct pt_regs *regs = ftrace_get_regs(fregs);
2482 unsigned long addr;
2483
2484 addr = ftrace_find_rec_direct(ip);
2485 if (!addr)
2486 return;
2487
2488 arch_ftrace_set_direct_caller(regs, addr);
2489 }
2490
2491 struct ftrace_ops direct_ops = {
2492 .func = call_direct_funcs,
2493 .flags = FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
2494 | FTRACE_OPS_FL_PERMANENT,
2495
2496
2497
2498
2499
2500
2501
2502 .trampoline = FTRACE_REGS_ADDR,
2503 };
2504 #endif
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2517 {
2518 struct ftrace_ops *ops;
2519 unsigned long addr;
2520
2521 if ((rec->flags & FTRACE_FL_DIRECT) &&
2522 (ftrace_rec_count(rec) == 1)) {
2523 addr = ftrace_find_rec_direct(rec->ip);
2524 if (addr)
2525 return addr;
2526 WARN_ON_ONCE(1);
2527 }
2528
2529
2530 if (rec->flags & FTRACE_FL_TRAMP) {
2531 ops = ftrace_find_tramp_ops_new(rec);
2532 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2533 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2534 (void *)rec->ip, (void *)rec->ip, rec->flags);
2535
2536 return (unsigned long)FTRACE_ADDR;
2537 }
2538 return ops->trampoline;
2539 }
2540
2541 if (rec->flags & FTRACE_FL_REGS)
2542 return (unsigned long)FTRACE_REGS_ADDR;
2543 else
2544 return (unsigned long)FTRACE_ADDR;
2545 }
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2558 {
2559 struct ftrace_ops *ops;
2560 unsigned long addr;
2561
2562
2563 if (rec->flags & FTRACE_FL_DIRECT_EN) {
2564 addr = ftrace_find_rec_direct(rec->ip);
2565 if (addr)
2566 return addr;
2567 WARN_ON_ONCE(1);
2568 }
2569
2570
2571 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2572 ops = ftrace_find_tramp_ops_curr(rec);
2573 if (FTRACE_WARN_ON(!ops)) {
2574 pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2575 (void *)rec->ip, (void *)rec->ip);
2576
2577 return (unsigned long)FTRACE_ADDR;
2578 }
2579 return ops->trampoline;
2580 }
2581
2582 if (rec->flags & FTRACE_FL_REGS_EN)
2583 return (unsigned long)FTRACE_REGS_ADDR;
2584 else
2585 return (unsigned long)FTRACE_ADDR;
2586 }
2587
2588 static int
2589 __ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
2590 {
2591 unsigned long ftrace_old_addr;
2592 unsigned long ftrace_addr;
2593 int ret;
2594
2595 ftrace_addr = ftrace_get_addr_new(rec);
2596
2597
2598 ftrace_old_addr = ftrace_get_addr_curr(rec);
2599
2600 ret = ftrace_update_record(rec, enable);
2601
2602 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2603
2604 switch (ret) {
2605 case FTRACE_UPDATE_IGNORE:
2606 return 0;
2607
2608 case FTRACE_UPDATE_MAKE_CALL:
2609 ftrace_bug_type = FTRACE_BUG_CALL;
2610 return ftrace_make_call(rec, ftrace_addr);
2611
2612 case FTRACE_UPDATE_MAKE_NOP:
2613 ftrace_bug_type = FTRACE_BUG_NOP;
2614 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2615
2616 case FTRACE_UPDATE_MODIFY_CALL:
2617 ftrace_bug_type = FTRACE_BUG_UPDATE;
2618 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2619 }
2620
2621 return -1;
2622 }
2623
2624 void __weak ftrace_replace_code(int mod_flags)
2625 {
2626 struct dyn_ftrace *rec;
2627 struct ftrace_page *pg;
2628 bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2629 int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
2630 int failed;
2631
2632 if (unlikely(ftrace_disabled))
2633 return;
2634
2635 do_for_each_ftrace_rec(pg, rec) {
2636
2637 if (rec->flags & FTRACE_FL_DISABLED)
2638 continue;
2639
2640 failed = __ftrace_replace_code(rec, enable);
2641 if (failed) {
2642 ftrace_bug(failed, rec);
2643
2644 return;
2645 }
2646 if (schedulable)
2647 cond_resched();
2648 } while_for_each_ftrace_rec();
2649 }
2650
2651 struct ftrace_rec_iter {
2652 struct ftrace_page *pg;
2653 int index;
2654 };
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2666 {
2667
2668
2669
2670
2671 static struct ftrace_rec_iter ftrace_rec_iter;
2672 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2673
2674 iter->pg = ftrace_pages_start;
2675 iter->index = 0;
2676
2677
2678 while (iter->pg && !iter->pg->index)
2679 iter->pg = iter->pg->next;
2680
2681 if (!iter->pg)
2682 return NULL;
2683
2684 return iter;
2685 }
2686
2687
2688
2689
2690
2691
2692
2693 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2694 {
2695 iter->index++;
2696
2697 if (iter->index >= iter->pg->index) {
2698 iter->pg = iter->pg->next;
2699 iter->index = 0;
2700
2701
2702 while (iter->pg && !iter->pg->index)
2703 iter->pg = iter->pg->next;
2704 }
2705
2706 if (!iter->pg)
2707 return NULL;
2708
2709 return iter;
2710 }
2711
2712
2713
2714
2715
2716
2717
2718 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2719 {
2720 return &iter->pg->records[iter->index];
2721 }
2722
2723 static int
2724 ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
2725 {
2726 int ret;
2727
2728 if (unlikely(ftrace_disabled))
2729 return 0;
2730
2731 ret = ftrace_init_nop(mod, rec);
2732 if (ret) {
2733 ftrace_bug_type = FTRACE_BUG_INIT;
2734 ftrace_bug(ret, rec);
2735 return 0;
2736 }
2737 return 1;
2738 }
2739
2740
2741
2742
2743
2744 void __weak ftrace_arch_code_modify_prepare(void)
2745 {
2746 }
2747
2748
2749
2750
2751
2752 void __weak ftrace_arch_code_modify_post_process(void)
2753 {
2754 }
2755
2756 void ftrace_modify_all_code(int command)
2757 {
2758 int update = command & FTRACE_UPDATE_TRACE_FUNC;
2759 int mod_flags = 0;
2760 int err = 0;
2761
2762 if (command & FTRACE_MAY_SLEEP)
2763 mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775 if (update) {
2776 err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2777 if (FTRACE_WARN_ON(err))
2778 return;
2779 }
2780
2781 if (command & FTRACE_UPDATE_CALLS)
2782 ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
2783 else if (command & FTRACE_DISABLE_CALLS)
2784 ftrace_replace_code(mod_flags);
2785
2786 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2787 function_trace_op = set_function_trace_op;
2788 smp_wmb();
2789
2790 if (!irqs_disabled())
2791 smp_call_function(ftrace_sync_ipi, NULL, 1);
2792 err = ftrace_update_ftrace_func(ftrace_trace_function);
2793 if (FTRACE_WARN_ON(err))
2794 return;
2795 }
2796
2797 if (command & FTRACE_START_FUNC_RET)
2798 err = ftrace_enable_ftrace_graph_caller();
2799 else if (command & FTRACE_STOP_FUNC_RET)
2800 err = ftrace_disable_ftrace_graph_caller();
2801 FTRACE_WARN_ON(err);
2802 }
2803
2804 static int __ftrace_modify_code(void *data)
2805 {
2806 int *command = data;
2807
2808 ftrace_modify_all_code(*command);
2809
2810 return 0;
2811 }
2812
2813
2814
2815
2816
2817
2818
2819
2820 void ftrace_run_stop_machine(int command)
2821 {
2822 stop_machine(__ftrace_modify_code, &command, NULL);
2823 }
2824
2825
2826
2827
2828
2829
2830
2831
2832 void __weak arch_ftrace_update_code(int command)
2833 {
2834 ftrace_run_stop_machine(command);
2835 }
2836
2837 static void ftrace_run_update_code(int command)
2838 {
2839 ftrace_arch_code_modify_prepare();
2840
2841
2842
2843
2844
2845
2846
2847 arch_ftrace_update_code(command);
2848
2849 ftrace_arch_code_modify_post_process();
2850 }
2851
2852 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2853 struct ftrace_ops_hash *old_hash)
2854 {
2855 ops->flags |= FTRACE_OPS_FL_MODIFYING;
2856 ops->old_hash.filter_hash = old_hash->filter_hash;
2857 ops->old_hash.notrace_hash = old_hash->notrace_hash;
2858 ftrace_run_update_code(command);
2859 ops->old_hash.filter_hash = NULL;
2860 ops->old_hash.notrace_hash = NULL;
2861 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2862 }
2863
2864 static ftrace_func_t saved_ftrace_func;
2865 static int ftrace_start_up;
2866
2867 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2868 {
2869 }
2870
2871
2872 static LIST_HEAD(ftrace_ops_trampoline_list);
2873
2874 static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
2875 {
2876 lockdep_assert_held(&ftrace_lock);
2877 list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
2878 }
2879
2880 static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
2881 {
2882 lockdep_assert_held(&ftrace_lock);
2883 list_del_rcu(&ops->list);
2884 synchronize_rcu();
2885 }
2886
2887
2888
2889
2890
2891
2892 #define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
2893 #define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
2894
2895 static void ftrace_trampoline_free(struct ftrace_ops *ops)
2896 {
2897 if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
2898 ops->trampoline) {
2899
2900
2901
2902
2903 perf_event_text_poke((void *)ops->trampoline,
2904 (void *)ops->trampoline,
2905 ops->trampoline_size, NULL, 0);
2906 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
2907 ops->trampoline, ops->trampoline_size,
2908 true, FTRACE_TRAMPOLINE_SYM);
2909
2910 ftrace_remove_trampoline_from_kallsyms(ops);
2911 }
2912
2913 arch_ftrace_trampoline_free(ops);
2914 }
2915
2916 static void ftrace_startup_enable(int command)
2917 {
2918 if (saved_ftrace_func != ftrace_trace_function) {
2919 saved_ftrace_func = ftrace_trace_function;
2920 command |= FTRACE_UPDATE_TRACE_FUNC;
2921 }
2922
2923 if (!command || !ftrace_enabled)
2924 return;
2925
2926 ftrace_run_update_code(command);
2927 }
2928
2929 static void ftrace_startup_all(int command)
2930 {
2931 update_all_ops = true;
2932 ftrace_startup_enable(command);
2933 update_all_ops = false;
2934 }
2935
2936 int ftrace_startup(struct ftrace_ops *ops, int command)
2937 {
2938 int ret;
2939
2940 if (unlikely(ftrace_disabled))
2941 return -ENODEV;
2942
2943 ret = __register_ftrace_function(ops);
2944 if (ret)
2945 return ret;
2946
2947 ftrace_start_up++;
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2958
2959 ret = ftrace_hash_ipmodify_enable(ops);
2960 if (ret < 0) {
2961
2962 __unregister_ftrace_function(ops);
2963 ftrace_start_up--;
2964 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2965 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2966 ftrace_trampoline_free(ops);
2967 return ret;
2968 }
2969
2970 if (ftrace_hash_rec_enable(ops, 1))
2971 command |= FTRACE_UPDATE_CALLS;
2972
2973 ftrace_startup_enable(command);
2974
2975
2976
2977
2978
2979
2980 if (unlikely(ftrace_disabled)) {
2981 __unregister_ftrace_function(ops);
2982 return -ENODEV;
2983 }
2984
2985 ops->flags &= ~FTRACE_OPS_FL_ADDING;
2986
2987 return 0;
2988 }
2989
2990 int ftrace_shutdown(struct ftrace_ops *ops, int command)
2991 {
2992 int ret;
2993
2994 if (unlikely(ftrace_disabled))
2995 return -ENODEV;
2996
2997 ret = __unregister_ftrace_function(ops);
2998 if (ret)
2999 return ret;
3000
3001 ftrace_start_up--;
3002
3003
3004
3005
3006
3007 WARN_ON_ONCE(ftrace_start_up < 0);
3008
3009
3010 ftrace_hash_ipmodify_disable(ops);
3011
3012 if (ftrace_hash_rec_disable(ops, 1))
3013 command |= FTRACE_UPDATE_CALLS;
3014
3015 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
3016
3017 if (saved_ftrace_func != ftrace_trace_function) {
3018 saved_ftrace_func = ftrace_trace_function;
3019 command |= FTRACE_UPDATE_TRACE_FUNC;
3020 }
3021
3022 if (!command || !ftrace_enabled) {
3023
3024
3025
3026
3027
3028
3029 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
3030 goto free_ops;
3031
3032 return 0;
3033 }
3034
3035
3036
3037
3038
3039 ops->flags |= FTRACE_OPS_FL_REMOVING;
3040 removed_ops = ops;
3041
3042
3043 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
3044 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
3045
3046 ftrace_run_update_code(command);
3047
3048
3049
3050
3051
3052 if (rcu_dereference_protected(ftrace_ops_list,
3053 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
3054 struct ftrace_page *pg;
3055 struct dyn_ftrace *rec;
3056
3057 do_for_each_ftrace_rec(pg, rec) {
3058 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
3059 pr_warn(" %pS flags:%lx\n",
3060 (void *)rec->ip, rec->flags);
3061 } while_for_each_ftrace_rec();
3062 }
3063
3064 ops->old_hash.filter_hash = NULL;
3065 ops->old_hash.notrace_hash = NULL;
3066
3067 removed_ops = NULL;
3068 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
3069
3070
3071
3072
3073
3074
3075
3076 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
3077
3078
3079
3080
3081
3082
3083
3084
3085 synchronize_rcu_tasks_rude();
3086
3087
3088
3089
3090
3091
3092
3093
3094 if (IS_ENABLED(CONFIG_PREEMPTION))
3095 synchronize_rcu_tasks();
3096
3097 free_ops:
3098 ftrace_trampoline_free(ops);
3099 }
3100
3101 return 0;
3102 }
3103
3104 static u64 ftrace_update_time;
3105 unsigned long ftrace_update_tot_cnt;
3106 unsigned long ftrace_number_of_pages;
3107 unsigned long ftrace_number_of_groups;
3108
3109 static inline int ops_traces_mod(struct ftrace_ops *ops)
3110 {
3111
3112
3113
3114
3115 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
3116 ftrace_hash_empty(ops->func_hash->notrace_hash);
3117 }
3118
3119 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3120 {
3121 bool init_nop = ftrace_need_init_nop();
3122 struct ftrace_page *pg;
3123 struct dyn_ftrace *p;
3124 u64 start, stop;
3125 unsigned long update_cnt = 0;
3126 unsigned long rec_flags = 0;
3127 int i;
3128
3129 start = ftrace_now(raw_smp_processor_id());
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142 if (mod)
3143 rec_flags |= FTRACE_FL_DISABLED;
3144
3145 for (pg = new_pgs; pg; pg = pg->next) {
3146
3147 for (i = 0; i < pg->index; i++) {
3148
3149
3150 if (unlikely(ftrace_disabled))
3151 return -1;
3152
3153 p = &pg->records[i];
3154 p->flags = rec_flags;
3155
3156
3157
3158
3159
3160 if (init_nop && !ftrace_nop_initialize(mod, p))
3161 break;
3162
3163 update_cnt++;
3164 }
3165 }
3166
3167 stop = ftrace_now(raw_smp_processor_id());
3168 ftrace_update_time = stop - start;
3169 ftrace_update_tot_cnt += update_cnt;
3170
3171 return 0;
3172 }
3173
3174 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3175 {
3176 int order;
3177 int pages;
3178 int cnt;
3179
3180 if (WARN_ON(!count))
3181 return -EINVAL;
3182
3183
3184 pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
3185 order = fls(pages) - 1;
3186
3187 again:
3188 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3189
3190 if (!pg->records) {
3191
3192 if (!order)
3193 return -ENOMEM;
3194 order >>= 1;
3195 goto again;
3196 }
3197
3198 ftrace_number_of_pages += 1 << order;
3199 ftrace_number_of_groups++;
3200
3201 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3202 pg->order = order;
3203
3204 if (cnt > count)
3205 cnt = count;
3206
3207 return cnt;
3208 }
3209
3210 static struct ftrace_page *
3211 ftrace_allocate_pages(unsigned long num_to_init)
3212 {
3213 struct ftrace_page *start_pg;
3214 struct ftrace_page *pg;
3215 int cnt;
3216
3217 if (!num_to_init)
3218 return NULL;
3219
3220 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3221 if (!pg)
3222 return NULL;
3223
3224
3225
3226
3227
3228
3229 for (;;) {
3230 cnt = ftrace_allocate_records(pg, num_to_init);
3231 if (cnt < 0)
3232 goto free_pages;
3233
3234 num_to_init -= cnt;
3235 if (!num_to_init)
3236 break;
3237
3238 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3239 if (!pg->next)
3240 goto free_pages;
3241
3242 pg = pg->next;
3243 }
3244
3245 return start_pg;
3246
3247 free_pages:
3248 pg = start_pg;
3249 while (pg) {
3250 if (pg->records) {
3251 free_pages((unsigned long)pg->records, pg->order);
3252 ftrace_number_of_pages -= 1 << pg->order;
3253 }
3254 start_pg = pg->next;
3255 kfree(pg);
3256 pg = start_pg;
3257 ftrace_number_of_groups--;
3258 }
3259 pr_info("ftrace: FAILED to allocate memory for functions\n");
3260 return NULL;
3261 }
3262
3263 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4)
3264
3265 struct ftrace_iterator {
3266 loff_t pos;
3267 loff_t func_pos;
3268 loff_t mod_pos;
3269 struct ftrace_page *pg;
3270 struct dyn_ftrace *func;
3271 struct ftrace_func_probe *probe;
3272 struct ftrace_func_entry *probe_entry;
3273 struct trace_parser parser;
3274 struct ftrace_hash *hash;
3275 struct ftrace_ops *ops;
3276 struct trace_array *tr;
3277 struct list_head *mod_list;
3278 int pidx;
3279 int idx;
3280 unsigned flags;
3281 };
3282
3283 static void *
3284 t_probe_next(struct seq_file *m, loff_t *pos)
3285 {
3286 struct ftrace_iterator *iter = m->private;
3287 struct trace_array *tr = iter->ops->private;
3288 struct list_head *func_probes;
3289 struct ftrace_hash *hash;
3290 struct list_head *next;
3291 struct hlist_node *hnd = NULL;
3292 struct hlist_head *hhd;
3293 int size;
3294
3295 (*pos)++;
3296 iter->pos = *pos;
3297
3298 if (!tr)
3299 return NULL;
3300
3301 func_probes = &tr->func_probes;
3302 if (list_empty(func_probes))
3303 return NULL;
3304
3305 if (!iter->probe) {
3306 next = func_probes->next;
3307 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3308 }
3309
3310 if (iter->probe_entry)
3311 hnd = &iter->probe_entry->hlist;
3312
3313 hash = iter->probe->ops.func_hash->filter_hash;
3314
3315
3316
3317
3318
3319 if (!hash || hash == EMPTY_HASH)
3320 return NULL;
3321
3322 size = 1 << hash->size_bits;
3323
3324 retry:
3325 if (iter->pidx >= size) {
3326 if (iter->probe->list.next == func_probes)
3327 return NULL;
3328 next = iter->probe->list.next;
3329 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3330 hash = iter->probe->ops.func_hash->filter_hash;
3331 size = 1 << hash->size_bits;
3332 iter->pidx = 0;
3333 }
3334
3335 hhd = &hash->buckets[iter->pidx];
3336
3337 if (hlist_empty(hhd)) {
3338 iter->pidx++;
3339 hnd = NULL;
3340 goto retry;
3341 }
3342
3343 if (!hnd)
3344 hnd = hhd->first;
3345 else {
3346 hnd = hnd->next;
3347 if (!hnd) {
3348 iter->pidx++;
3349 goto retry;
3350 }
3351 }
3352
3353 if (WARN_ON_ONCE(!hnd))
3354 return NULL;
3355
3356 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3357
3358 return iter;
3359 }
3360
3361 static void *t_probe_start(struct seq_file *m, loff_t *pos)
3362 {
3363 struct ftrace_iterator *iter = m->private;
3364 void *p = NULL;
3365 loff_t l;
3366
3367 if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3368 return NULL;
3369
3370 if (iter->mod_pos > *pos)
3371 return NULL;
3372
3373 iter->probe = NULL;
3374 iter->probe_entry = NULL;
3375 iter->pidx = 0;
3376 for (l = 0; l <= (*pos - iter->mod_pos); ) {
3377 p = t_probe_next(m, &l);
3378 if (!p)
3379 break;
3380 }
3381 if (!p)
3382 return NULL;
3383
3384
3385 iter->flags |= FTRACE_ITER_PROBE;
3386
3387 return iter;
3388 }
3389
3390 static int
3391 t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3392 {
3393 struct ftrace_func_entry *probe_entry;
3394 struct ftrace_probe_ops *probe_ops;
3395 struct ftrace_func_probe *probe;
3396
3397 probe = iter->probe;
3398 probe_entry = iter->probe_entry;
3399
3400 if (WARN_ON_ONCE(!probe || !probe_entry))
3401 return -EIO;
3402
3403 probe_ops = probe->probe_ops;
3404
3405 if (probe_ops->print)
3406 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
3407
3408 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3409 (void *)probe_ops->func);
3410
3411 return 0;
3412 }
3413
3414 static void *
3415 t_mod_next(struct seq_file *m, loff_t *pos)
3416 {
3417 struct ftrace_iterator *iter = m->private;
3418 struct trace_array *tr = iter->tr;
3419
3420 (*pos)++;
3421 iter->pos = *pos;
3422
3423 iter->mod_list = iter->mod_list->next;
3424
3425 if (iter->mod_list == &tr->mod_trace ||
3426 iter->mod_list == &tr->mod_notrace) {
3427 iter->flags &= ~FTRACE_ITER_MOD;
3428 return NULL;
3429 }
3430
3431 iter->mod_pos = *pos;
3432
3433 return iter;
3434 }
3435
3436 static void *t_mod_start(struct seq_file *m, loff_t *pos)
3437 {
3438 struct ftrace_iterator *iter = m->private;
3439 void *p = NULL;
3440 loff_t l;
3441
3442 if (iter->func_pos > *pos)
3443 return NULL;
3444
3445 iter->mod_pos = iter->func_pos;
3446
3447
3448 if (!iter->tr)
3449 return NULL;
3450
3451 for (l = 0; l <= (*pos - iter->func_pos); ) {
3452 p = t_mod_next(m, &l);
3453 if (!p)
3454 break;
3455 }
3456 if (!p) {
3457 iter->flags &= ~FTRACE_ITER_MOD;
3458 return t_probe_start(m, pos);
3459 }
3460
3461
3462 iter->flags |= FTRACE_ITER_MOD;
3463
3464 return iter;
3465 }
3466
3467 static int
3468 t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
3469 {
3470 struct ftrace_mod_load *ftrace_mod;
3471 struct trace_array *tr = iter->tr;
3472
3473 if (WARN_ON_ONCE(!iter->mod_list) ||
3474 iter->mod_list == &tr->mod_trace ||
3475 iter->mod_list == &tr->mod_notrace)
3476 return -EIO;
3477
3478 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
3479
3480 if (ftrace_mod->func)
3481 seq_printf(m, "%s", ftrace_mod->func);
3482 else
3483 seq_putc(m, '*');
3484
3485 seq_printf(m, ":mod:%s\n", ftrace_mod->module);
3486
3487 return 0;
3488 }
3489
3490 static void *
3491 t_func_next(struct seq_file *m, loff_t *pos)
3492 {
3493 struct ftrace_iterator *iter = m->private;
3494 struct dyn_ftrace *rec = NULL;
3495
3496 (*pos)++;
3497
3498 retry:
3499 if (iter->idx >= iter->pg->index) {
3500 if (iter->pg->next) {
3501 iter->pg = iter->pg->next;
3502 iter->idx = 0;
3503 goto retry;
3504 }
3505 } else {
3506 rec = &iter->pg->records[iter->idx++];
3507 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3508 !ftrace_lookup_ip(iter->hash, rec->ip)) ||
3509
3510 ((iter->flags & FTRACE_ITER_ENABLED) &&
3511 !(rec->flags & FTRACE_FL_ENABLED))) {
3512
3513 rec = NULL;
3514 goto retry;
3515 }
3516 }
3517
3518 if (!rec)
3519 return NULL;
3520
3521 iter->pos = iter->func_pos = *pos;
3522 iter->func = rec;
3523
3524 return iter;
3525 }
3526
3527 static void *
3528 t_next(struct seq_file *m, void *v, loff_t *pos)
3529 {
3530 struct ftrace_iterator *iter = m->private;
3531 loff_t l = *pos;
3532 void *ret;
3533
3534 if (unlikely(ftrace_disabled))
3535 return NULL;
3536
3537 if (iter->flags & FTRACE_ITER_PROBE)
3538 return t_probe_next(m, pos);
3539
3540 if (iter->flags & FTRACE_ITER_MOD)
3541 return t_mod_next(m, pos);
3542
3543 if (iter->flags & FTRACE_ITER_PRINTALL) {
3544
3545 (*pos)++;
3546 return t_mod_start(m, &l);
3547 }
3548
3549 ret = t_func_next(m, pos);
3550
3551 if (!ret)
3552 return t_mod_start(m, &l);
3553
3554 return ret;
3555 }
3556
3557 static void reset_iter_read(struct ftrace_iterator *iter)
3558 {
3559 iter->pos = 0;
3560 iter->func_pos = 0;
3561 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
3562 }
3563
3564 static void *t_start(struct seq_file *m, loff_t *pos)
3565 {
3566 struct ftrace_iterator *iter = m->private;
3567 void *p = NULL;
3568 loff_t l;
3569
3570 mutex_lock(&ftrace_lock);
3571
3572 if (unlikely(ftrace_disabled))
3573 return NULL;
3574
3575
3576
3577
3578 if (*pos < iter->pos)
3579 reset_iter_read(iter);
3580
3581
3582
3583
3584
3585
3586 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3587 ftrace_hash_empty(iter->hash)) {
3588 iter->func_pos = 1;
3589 if (*pos > 0)
3590 return t_mod_start(m, pos);
3591 iter->flags |= FTRACE_ITER_PRINTALL;
3592
3593 iter->flags &= ~FTRACE_ITER_PROBE;
3594 return iter;
3595 }
3596
3597 if (iter->flags & FTRACE_ITER_MOD)
3598 return t_mod_start(m, pos);
3599
3600
3601
3602
3603
3604
3605 iter->pg = ftrace_pages_start;
3606 iter->idx = 0;
3607 for (l = 0; l <= *pos; ) {
3608 p = t_func_next(m, &l);
3609 if (!p)
3610 break;
3611 }
3612
3613 if (!p)
3614 return t_mod_start(m, pos);
3615
3616 return iter;
3617 }
3618
3619 static void t_stop(struct seq_file *m, void *p)
3620 {
3621 mutex_unlock(&ftrace_lock);
3622 }
3623
3624 void * __weak
3625 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3626 {
3627 return NULL;
3628 }
3629
3630 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3631 struct dyn_ftrace *rec)
3632 {
3633 void *ptr;
3634
3635 ptr = arch_ftrace_trampoline_func(ops, rec);
3636 if (ptr)
3637 seq_printf(m, " ->%pS", ptr);
3638 }
3639
3640 #ifdef FTRACE_MCOUNT_MAX_OFFSET
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651 static int test_for_valid_rec(struct dyn_ftrace *rec)
3652 {
3653 char str[KSYM_SYMBOL_LEN];
3654 unsigned long offset;
3655 const char *ret;
3656
3657 ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str);
3658
3659
3660 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
3661 rec->flags |= FTRACE_FL_DISABLED;
3662 return 0;
3663 }
3664 return 1;
3665 }
3666
3667 static struct workqueue_struct *ftrace_check_wq __initdata;
3668 static struct work_struct ftrace_check_work __initdata;
3669
3670
3671
3672
3673 static __init void ftrace_check_work_func(struct work_struct *work)
3674 {
3675 struct ftrace_page *pg;
3676 struct dyn_ftrace *rec;
3677
3678 mutex_lock(&ftrace_lock);
3679 do_for_each_ftrace_rec(pg, rec) {
3680 test_for_valid_rec(rec);
3681 } while_for_each_ftrace_rec();
3682 mutex_unlock(&ftrace_lock);
3683 }
3684
3685 static int __init ftrace_check_for_weak_functions(void)
3686 {
3687 INIT_WORK(&ftrace_check_work, ftrace_check_work_func);
3688
3689 ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0);
3690
3691 queue_work(ftrace_check_wq, &ftrace_check_work);
3692 return 0;
3693 }
3694
3695 static int __init ftrace_check_sync(void)
3696 {
3697
3698 if (ftrace_check_wq)
3699 destroy_workqueue(ftrace_check_wq);
3700 return 0;
3701 }
3702
3703 late_initcall_sync(ftrace_check_sync);
3704 subsys_initcall(ftrace_check_for_weak_functions);
3705
3706 static int print_rec(struct seq_file *m, unsigned long ip)
3707 {
3708 unsigned long offset;
3709 char str[KSYM_SYMBOL_LEN];
3710 char *modname;
3711 const char *ret;
3712
3713 ret = kallsyms_lookup(ip, NULL, &offset, &modname, str);
3714
3715 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
3716 snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld",
3717 FTRACE_INVALID_FUNCTION, offset);
3718 ret = NULL;
3719 }
3720
3721 seq_puts(m, str);
3722 if (modname)
3723 seq_printf(m, " [%s]", modname);
3724 return ret == NULL ? -1 : 0;
3725 }
3726 #else
3727 static inline int test_for_valid_rec(struct dyn_ftrace *rec)
3728 {
3729 return 1;
3730 }
3731
3732 static inline int print_rec(struct seq_file *m, unsigned long ip)
3733 {
3734 seq_printf(m, "%ps", (void *)ip);
3735 return 0;
3736 }
3737 #endif
3738
3739 static int t_show(struct seq_file *m, void *v)
3740 {
3741 struct ftrace_iterator *iter = m->private;
3742 struct dyn_ftrace *rec;
3743
3744 if (iter->flags & FTRACE_ITER_PROBE)
3745 return t_probe_show(m, iter);
3746
3747 if (iter->flags & FTRACE_ITER_MOD)
3748 return t_mod_show(m, iter);
3749
3750 if (iter->flags & FTRACE_ITER_PRINTALL) {
3751 if (iter->flags & FTRACE_ITER_NOTRACE)
3752 seq_puts(m, "#### no functions disabled ####\n");
3753 else
3754 seq_puts(m, "#### all functions enabled ####\n");
3755 return 0;
3756 }
3757
3758 rec = iter->func;
3759
3760 if (!rec)
3761 return 0;
3762
3763 if (print_rec(m, rec->ip)) {
3764
3765 WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED));
3766 seq_putc(m, '\n');
3767 return 0;
3768 }
3769
3770 if (iter->flags & FTRACE_ITER_ENABLED) {
3771 struct ftrace_ops *ops;
3772
3773 seq_printf(m, " (%ld)%s%s%s",
3774 ftrace_rec_count(rec),
3775 rec->flags & FTRACE_FL_REGS ? " R" : " ",
3776 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ",
3777 rec->flags & FTRACE_FL_DIRECT ? " D" : " ");
3778 if (rec->flags & FTRACE_FL_TRAMP_EN) {
3779 ops = ftrace_find_tramp_ops_any(rec);
3780 if (ops) {
3781 do {
3782 seq_printf(m, "\ttramp: %pS (%pS)",
3783 (void *)ops->trampoline,
3784 (void *)ops->func);
3785 add_trampoline_func(m, ops, rec);
3786 ops = ftrace_find_tramp_ops_next(rec, ops);
3787 } while (ops);
3788 } else
3789 seq_puts(m, "\ttramp: ERROR!");
3790 } else {
3791 add_trampoline_func(m, NULL, rec);
3792 }
3793 if (rec->flags & FTRACE_FL_DIRECT) {
3794 unsigned long direct;
3795
3796 direct = ftrace_find_rec_direct(rec->ip);
3797 if (direct)
3798 seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
3799 }
3800 }
3801
3802 seq_putc(m, '\n');
3803
3804 return 0;
3805 }
3806
3807 static const struct seq_operations show_ftrace_seq_ops = {
3808 .start = t_start,
3809 .next = t_next,
3810 .stop = t_stop,
3811 .show = t_show,
3812 };
3813
3814 static int
3815 ftrace_avail_open(struct inode *inode, struct file *file)
3816 {
3817 struct ftrace_iterator *iter;
3818 int ret;
3819
3820 ret = security_locked_down(LOCKDOWN_TRACEFS);
3821 if (ret)
3822 return ret;
3823
3824 if (unlikely(ftrace_disabled))
3825 return -ENODEV;
3826
3827 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3828 if (!iter)
3829 return -ENOMEM;
3830
3831 iter->pg = ftrace_pages_start;
3832 iter->ops = &global_ops;
3833
3834 return 0;
3835 }
3836
3837 static int
3838 ftrace_enabled_open(struct inode *inode, struct file *file)
3839 {
3840 struct ftrace_iterator *iter;
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3852 if (!iter)
3853 return -ENOMEM;
3854
3855 iter->pg = ftrace_pages_start;
3856 iter->flags = FTRACE_ITER_ENABLED;
3857 iter->ops = &global_ops;
3858
3859 return 0;
3860 }
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878 int
3879 ftrace_regex_open(struct ftrace_ops *ops, int flag,
3880 struct inode *inode, struct file *file)
3881 {
3882 struct ftrace_iterator *iter;
3883 struct ftrace_hash *hash;
3884 struct list_head *mod_head;
3885 struct trace_array *tr = ops->private;
3886 int ret = -ENOMEM;
3887
3888 ftrace_ops_init(ops);
3889
3890 if (unlikely(ftrace_disabled))
3891 return -ENODEV;
3892
3893 if (tracing_check_open_get_tr(tr))
3894 return -ENODEV;
3895
3896 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3897 if (!iter)
3898 goto out;
3899
3900 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
3901 goto out;
3902
3903 iter->ops = ops;
3904 iter->flags = flag;
3905 iter->tr = tr;
3906
3907 mutex_lock(&ops->func_hash->regex_lock);
3908
3909 if (flag & FTRACE_ITER_NOTRACE) {
3910 hash = ops->func_hash->notrace_hash;
3911 mod_head = tr ? &tr->mod_notrace : NULL;
3912 } else {
3913 hash = ops->func_hash->filter_hash;
3914 mod_head = tr ? &tr->mod_trace : NULL;
3915 }
3916
3917 iter->mod_list = mod_head;
3918
3919 if (file->f_mode & FMODE_WRITE) {
3920 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3921
3922 if (file->f_flags & O_TRUNC) {
3923 iter->hash = alloc_ftrace_hash(size_bits);
3924 clear_ftrace_mod_list(mod_head);
3925 } else {
3926 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3927 }
3928
3929 if (!iter->hash) {
3930 trace_parser_put(&iter->parser);
3931 goto out_unlock;
3932 }
3933 } else
3934 iter->hash = hash;
3935
3936 ret = 0;
3937
3938 if (file->f_mode & FMODE_READ) {
3939 iter->pg = ftrace_pages_start;
3940
3941 ret = seq_open(file, &show_ftrace_seq_ops);
3942 if (!ret) {
3943 struct seq_file *m = file->private_data;
3944 m->private = iter;
3945 } else {
3946
3947 free_ftrace_hash(iter->hash);
3948 trace_parser_put(&iter->parser);
3949 }
3950 } else
3951 file->private_data = iter;
3952
3953 out_unlock:
3954 mutex_unlock(&ops->func_hash->regex_lock);
3955
3956 out:
3957 if (ret) {
3958 kfree(iter);
3959 if (tr)
3960 trace_array_put(tr);
3961 }
3962
3963 return ret;
3964 }
3965
3966 static int
3967 ftrace_filter_open(struct inode *inode, struct file *file)
3968 {
3969 struct ftrace_ops *ops = inode->i_private;
3970
3971
3972 return ftrace_regex_open(ops,
3973 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
3974 inode, file);
3975 }
3976
3977 static int
3978 ftrace_notrace_open(struct inode *inode, struct file *file)
3979 {
3980 struct ftrace_ops *ops = inode->i_private;
3981
3982
3983 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3984 inode, file);
3985 }
3986
3987
3988 struct ftrace_glob {
3989 char *search;
3990 unsigned len;
3991 int type;
3992 };
3993
3994
3995
3996
3997
3998
3999 char * __weak arch_ftrace_match_adjust(char *str, const char *search)
4000 {
4001 return str;
4002 }
4003
4004 static int ftrace_match(char *str, struct ftrace_glob *g)
4005 {
4006 int matched = 0;
4007 int slen;
4008
4009 str = arch_ftrace_match_adjust(str, g->search);
4010
4011 switch (g->type) {
4012 case MATCH_FULL:
4013 if (strcmp(str, g->search) == 0)
4014 matched = 1;
4015 break;
4016 case MATCH_FRONT_ONLY:
4017 if (strncmp(str, g->search, g->len) == 0)
4018 matched = 1;
4019 break;
4020 case MATCH_MIDDLE_ONLY:
4021 if (strstr(str, g->search))
4022 matched = 1;
4023 break;
4024 case MATCH_END_ONLY:
4025 slen = strlen(str);
4026 if (slen >= g->len &&
4027 memcmp(str + slen - g->len, g->search, g->len) == 0)
4028 matched = 1;
4029 break;
4030 case MATCH_GLOB:
4031 if (glob_match(g->search, str))
4032 matched = 1;
4033 break;
4034 }
4035
4036 return matched;
4037 }
4038
4039 static int
4040 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
4041 {
4042 struct ftrace_func_entry *entry;
4043 int ret = 0;
4044
4045 entry = ftrace_lookup_ip(hash, rec->ip);
4046 if (clear_filter) {
4047
4048 if (!entry)
4049 return 0;
4050
4051 free_hash_entry(hash, entry);
4052 } else {
4053
4054 if (entry)
4055 return 0;
4056
4057 ret = add_hash_entry(hash, rec->ip);
4058 }
4059 return ret;
4060 }
4061
4062 static int
4063 add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
4064 int clear_filter)
4065 {
4066 long index = simple_strtoul(func_g->search, NULL, 0);
4067 struct ftrace_page *pg;
4068 struct dyn_ftrace *rec;
4069
4070
4071 if (--index < 0)
4072 return 0;
4073
4074 do_for_each_ftrace_rec(pg, rec) {
4075 if (pg->index <= index) {
4076 index -= pg->index;
4077
4078 break;
4079 }
4080 rec = &pg->records[index];
4081 enter_record(hash, rec, clear_filter);
4082 return 1;
4083 } while_for_each_ftrace_rec();
4084 return 0;
4085 }
4086
4087 #ifdef FTRACE_MCOUNT_MAX_OFFSET
4088 static int lookup_ip(unsigned long ip, char **modname, char *str)
4089 {
4090 unsigned long offset;
4091
4092 kallsyms_lookup(ip, NULL, &offset, modname, str);
4093 if (offset > FTRACE_MCOUNT_MAX_OFFSET)
4094 return -1;
4095 return 0;
4096 }
4097 #else
4098 static int lookup_ip(unsigned long ip, char **modname, char *str)
4099 {
4100 kallsyms_lookup(ip, NULL, NULL, modname, str);
4101 return 0;
4102 }
4103 #endif
4104
4105 static int
4106 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
4107 struct ftrace_glob *mod_g, int exclude_mod)
4108 {
4109 char str[KSYM_SYMBOL_LEN];
4110 char *modname;
4111
4112 if (lookup_ip(rec->ip, &modname, str)) {
4113
4114 WARN_ON_ONCE(system_state == SYSTEM_RUNNING &&
4115 !(rec->flags & FTRACE_FL_DISABLED));
4116 return 0;
4117 }
4118
4119 if (mod_g) {
4120 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
4121
4122
4123 if (!mod_g->len) {
4124
4125 if (!exclude_mod != !modname)
4126 goto func_match;
4127 return 0;
4128 }
4129
4130
4131
4132
4133
4134
4135
4136
4137 if (!mod_matches == !exclude_mod)
4138 return 0;
4139 func_match:
4140
4141 if (!func_g->len)
4142 return 1;
4143 }
4144
4145 return ftrace_match(str, func_g);
4146 }
4147
4148 static int
4149 match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
4150 {
4151 struct ftrace_page *pg;
4152 struct dyn_ftrace *rec;
4153 struct ftrace_glob func_g = { .type = MATCH_FULL };
4154 struct ftrace_glob mod_g = { .type = MATCH_FULL };
4155 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
4156 int exclude_mod = 0;
4157 int found = 0;
4158 int ret;
4159 int clear_filter = 0;
4160
4161 if (func) {
4162 func_g.type = filter_parse_regex(func, len, &func_g.search,
4163 &clear_filter);
4164 func_g.len = strlen(func_g.search);
4165 }
4166
4167 if (mod) {
4168 mod_g.type = filter_parse_regex(mod, strlen(mod),
4169 &mod_g.search, &exclude_mod);
4170 mod_g.len = strlen(mod_g.search);
4171 }
4172
4173 mutex_lock(&ftrace_lock);
4174
4175 if (unlikely(ftrace_disabled))
4176 goto out_unlock;
4177
4178 if (func_g.type == MATCH_INDEX) {
4179 found = add_rec_by_index(hash, &func_g, clear_filter);
4180 goto out_unlock;
4181 }
4182
4183 do_for_each_ftrace_rec(pg, rec) {
4184
4185 if (rec->flags & FTRACE_FL_DISABLED)
4186 continue;
4187
4188 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
4189 ret = enter_record(hash, rec, clear_filter);
4190 if (ret < 0) {
4191 found = ret;
4192 goto out_unlock;
4193 }
4194 found = 1;
4195 }
4196 } while_for_each_ftrace_rec();
4197 out_unlock:
4198 mutex_unlock(&ftrace_lock);
4199
4200 return found;
4201 }
4202
4203 static int
4204 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
4205 {
4206 return match_records(hash, buff, len, NULL);
4207 }
4208
4209 static void ftrace_ops_update_code(struct ftrace_ops *ops,
4210 struct ftrace_ops_hash *old_hash)
4211 {
4212 struct ftrace_ops *op;
4213
4214 if (!ftrace_enabled)
4215 return;
4216
4217 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4218 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4219 return;
4220 }
4221
4222
4223
4224
4225
4226
4227 if (ops->func_hash != &global_ops.local_hash)
4228 return;
4229
4230 do_for_each_ftrace_op(op, ftrace_ops_list) {
4231 if (op->func_hash == &global_ops.local_hash &&
4232 op->flags & FTRACE_OPS_FL_ENABLED) {
4233 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4234
4235 return;
4236 }
4237 } while_for_each_ftrace_op(op);
4238 }
4239
4240 static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
4241 struct ftrace_hash **orig_hash,
4242 struct ftrace_hash *hash,
4243 int enable)
4244 {
4245 struct ftrace_ops_hash old_hash_ops;
4246 struct ftrace_hash *old_hash;
4247 int ret;
4248
4249 old_hash = *orig_hash;
4250 old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4251 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4252 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4253 if (!ret) {
4254 ftrace_ops_update_code(ops, &old_hash_ops);
4255 free_ftrace_hash_rcu(old_hash);
4256 }
4257 return ret;
4258 }
4259
4260 static bool module_exists(const char *module)
4261 {
4262
4263 static const char this_mod[] = "__this_module";
4264 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
4265 unsigned long val;
4266 int n;
4267
4268 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
4269
4270 if (n > sizeof(modname) - 1)
4271 return false;
4272
4273 val = module_kallsyms_lookup_name(modname);
4274 return val != 0;
4275 }
4276
4277 static int cache_mod(struct trace_array *tr,
4278 const char *func, char *module, int enable)
4279 {
4280 struct ftrace_mod_load *ftrace_mod, *n;
4281 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
4282 int ret;
4283
4284 mutex_lock(&ftrace_lock);
4285
4286
4287 if (func[0] == '!') {
4288 func++;
4289 ret = -EINVAL;
4290
4291
4292 list_for_each_entry_safe(ftrace_mod, n, head, list) {
4293 if (strcmp(ftrace_mod->module, module) != 0)
4294 continue;
4295
4296
4297 if (strcmp(func, "*") == 0 ||
4298 (ftrace_mod->func &&
4299 strcmp(ftrace_mod->func, func) == 0)) {
4300 ret = 0;
4301 free_ftrace_mod(ftrace_mod);
4302 continue;
4303 }
4304 }
4305 goto out;
4306 }
4307
4308 ret = -EINVAL;
4309
4310 if (module_exists(module))
4311 goto out;
4312
4313
4314 ret = ftrace_add_mod(tr, func, module, enable);
4315 out:
4316 mutex_unlock(&ftrace_lock);
4317
4318 return ret;
4319 }
4320
4321 static int
4322 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4323 int reset, int enable);
4324
4325 #ifdef CONFIG_MODULES
4326 static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
4327 char *mod, bool enable)
4328 {
4329 struct ftrace_mod_load *ftrace_mod, *n;
4330 struct ftrace_hash **orig_hash, *new_hash;
4331 LIST_HEAD(process_mods);
4332 char *func;
4333
4334 mutex_lock(&ops->func_hash->regex_lock);
4335
4336 if (enable)
4337 orig_hash = &ops->func_hash->filter_hash;
4338 else
4339 orig_hash = &ops->func_hash->notrace_hash;
4340
4341 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
4342 *orig_hash);
4343 if (!new_hash)
4344 goto out;
4345
4346 mutex_lock(&ftrace_lock);
4347
4348 list_for_each_entry_safe(ftrace_mod, n, head, list) {
4349
4350 if (strcmp(ftrace_mod->module, mod) != 0)
4351 continue;
4352
4353 if (ftrace_mod->func)
4354 func = kstrdup(ftrace_mod->func, GFP_KERNEL);
4355 else
4356 func = kstrdup("*", GFP_KERNEL);
4357
4358 if (!func)
4359 continue;
4360
4361 list_move(&ftrace_mod->list, &process_mods);
4362
4363
4364 kfree(ftrace_mod->func);
4365 ftrace_mod->func = func;
4366 }
4367
4368 mutex_unlock(&ftrace_lock);
4369
4370 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
4371
4372 func = ftrace_mod->func;
4373
4374
4375 match_records(new_hash, func, strlen(func), mod);
4376 free_ftrace_mod(ftrace_mod);
4377 }
4378
4379 if (enable && list_empty(head))
4380 new_hash->flags &= ~FTRACE_HASH_FL_MOD;
4381
4382 mutex_lock(&ftrace_lock);
4383
4384 ftrace_hash_move_and_update_ops(ops, orig_hash,
4385 new_hash, enable);
4386 mutex_unlock(&ftrace_lock);
4387
4388 out:
4389 mutex_unlock(&ops->func_hash->regex_lock);
4390
4391 free_ftrace_hash(new_hash);
4392 }
4393
4394 static void process_cached_mods(const char *mod_name)
4395 {
4396 struct trace_array *tr;
4397 char *mod;
4398
4399 mod = kstrdup(mod_name, GFP_KERNEL);
4400 if (!mod)
4401 return;
4402
4403 mutex_lock(&trace_types_lock);
4404 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4405 if (!list_empty(&tr->mod_trace))
4406 process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4407 if (!list_empty(&tr->mod_notrace))
4408 process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4409 }
4410 mutex_unlock(&trace_types_lock);
4411
4412 kfree(mod);
4413 }
4414 #endif
4415
4416
4417
4418
4419
4420
4421 static int
4422 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
4423 char *func_orig, char *cmd, char *module, int enable)
4424 {
4425 char *func;
4426 int ret;
4427
4428
4429 func = kstrdup(func_orig, GFP_KERNEL);
4430 if (!func)
4431 return -ENOMEM;
4432
4433
4434
4435
4436
4437
4438
4439
4440 ret = match_records(hash, func, strlen(func), module);
4441 kfree(func);
4442
4443 if (!ret)
4444 return cache_mod(tr, func_orig, module, enable);
4445 if (ret < 0)
4446 return ret;
4447 return 0;
4448 }
4449
4450 static struct ftrace_func_command ftrace_mod_cmd = {
4451 .name = "mod",
4452 .func = ftrace_mod_callback,
4453 };
4454
4455 static int __init ftrace_mod_cmd_init(void)
4456 {
4457 return register_ftrace_command(&ftrace_mod_cmd);
4458 }
4459 core_initcall(ftrace_mod_cmd_init);
4460
4461 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
4462 struct ftrace_ops *op, struct ftrace_regs *fregs)
4463 {
4464 struct ftrace_probe_ops *probe_ops;
4465 struct ftrace_func_probe *probe;
4466
4467 probe = container_of(op, struct ftrace_func_probe, ops);
4468 probe_ops = probe->probe_ops;
4469
4470
4471
4472
4473
4474
4475 preempt_disable_notrace();
4476 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
4477 preempt_enable_notrace();
4478 }
4479
4480 struct ftrace_func_map {
4481 struct ftrace_func_entry entry;
4482 void *data;
4483 };
4484
4485 struct ftrace_func_mapper {
4486 struct ftrace_hash hash;
4487 };
4488
4489
4490
4491
4492
4493
4494 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
4495 {
4496 struct ftrace_hash *hash;
4497
4498
4499
4500
4501
4502
4503 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4504 return (struct ftrace_func_mapper *)hash;
4505 }
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
4519 unsigned long ip)
4520 {
4521 struct ftrace_func_entry *entry;
4522 struct ftrace_func_map *map;
4523
4524 entry = ftrace_lookup_ip(&mapper->hash, ip);
4525 if (!entry)
4526 return NULL;
4527
4528 map = (struct ftrace_func_map *)entry;
4529 return &map->data;
4530 }
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
4541 unsigned long ip, void *data)
4542 {
4543 struct ftrace_func_entry *entry;
4544 struct ftrace_func_map *map;
4545
4546 entry = ftrace_lookup_ip(&mapper->hash, ip);
4547 if (entry)
4548 return -EBUSY;
4549
4550 map = kmalloc(sizeof(*map), GFP_KERNEL);
4551 if (!map)
4552 return -ENOMEM;
4553
4554 map->entry.ip = ip;
4555 map->data = data;
4556
4557 __add_hash_entry(&mapper->hash, &map->entry);
4558
4559 return 0;
4560 }
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
4573 unsigned long ip)
4574 {
4575 struct ftrace_func_entry *entry;
4576 struct ftrace_func_map *map;
4577 void *data;
4578
4579 entry = ftrace_lookup_ip(&mapper->hash, ip);
4580 if (!entry)
4581 return NULL;
4582
4583 map = (struct ftrace_func_map *)entry;
4584 data = map->data;
4585
4586 remove_hash_entry(&mapper->hash, entry);
4587 kfree(entry);
4588
4589 return data;
4590 }
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
4601 ftrace_mapper_func free_func)
4602 {
4603 struct ftrace_func_entry *entry;
4604 struct ftrace_func_map *map;
4605 struct hlist_head *hhd;
4606 int size, i;
4607
4608 if (!mapper)
4609 return;
4610
4611 if (free_func && mapper->hash.count) {
4612 size = 1 << mapper->hash.size_bits;
4613 for (i = 0; i < size; i++) {
4614 hhd = &mapper->hash.buckets[i];
4615 hlist_for_each_entry(entry, hhd, hlist) {
4616 map = (struct ftrace_func_map *)entry;
4617 free_func(map);
4618 }
4619 }
4620 }
4621 free_ftrace_hash(&mapper->hash);
4622 }
4623
4624 static void release_probe(struct ftrace_func_probe *probe)
4625 {
4626 struct ftrace_probe_ops *probe_ops;
4627
4628 mutex_lock(&ftrace_lock);
4629
4630 WARN_ON(probe->ref <= 0);
4631
4632
4633 probe->ref--;
4634
4635 if (!probe->ref) {
4636 probe_ops = probe->probe_ops;
4637
4638
4639
4640
4641 if (probe_ops->free)
4642 probe_ops->free(probe_ops, probe->tr, 0, probe->data);
4643 list_del(&probe->list);
4644 kfree(probe);
4645 }
4646 mutex_unlock(&ftrace_lock);
4647 }
4648
4649 static void acquire_probe_locked(struct ftrace_func_probe *probe)
4650 {
4651
4652
4653
4654
4655 probe->ref++;
4656 }
4657
4658 int
4659 register_ftrace_function_probe(char *glob, struct trace_array *tr,
4660 struct ftrace_probe_ops *probe_ops,
4661 void *data)
4662 {
4663 struct ftrace_func_probe *probe = NULL, *iter;
4664 struct ftrace_func_entry *entry;
4665 struct ftrace_hash **orig_hash;
4666 struct ftrace_hash *old_hash;
4667 struct ftrace_hash *hash;
4668 int count = 0;
4669 int size;
4670 int ret;
4671 int i;
4672
4673 if (WARN_ON(!tr))
4674 return -EINVAL;
4675
4676
4677 if (WARN_ON(glob[0] == '!'))
4678 return -EINVAL;
4679
4680
4681 mutex_lock(&ftrace_lock);
4682
4683 list_for_each_entry(iter, &tr->func_probes, list) {
4684 if (iter->probe_ops == probe_ops) {
4685 probe = iter;
4686 break;
4687 }
4688 }
4689 if (!probe) {
4690 probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4691 if (!probe) {
4692 mutex_unlock(&ftrace_lock);
4693 return -ENOMEM;
4694 }
4695 probe->probe_ops = probe_ops;
4696 probe->ops.func = function_trace_probe_call;
4697 probe->tr = tr;
4698 ftrace_ops_init(&probe->ops);
4699 list_add(&probe->list, &tr->func_probes);
4700 }
4701
4702 acquire_probe_locked(probe);
4703
4704 mutex_unlock(&ftrace_lock);
4705
4706
4707
4708
4709
4710 mutex_lock(&probe->ops.func_hash->regex_lock);
4711
4712 orig_hash = &probe->ops.func_hash->filter_hash;
4713 old_hash = *orig_hash;
4714 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4715
4716 if (!hash) {
4717 ret = -ENOMEM;
4718 goto out;
4719 }
4720
4721 ret = ftrace_match_records(hash, glob, strlen(glob));
4722
4723
4724 if (!ret)
4725 ret = -EINVAL;
4726
4727 if (ret < 0)
4728 goto out;
4729
4730 size = 1 << hash->size_bits;
4731 for (i = 0; i < size; i++) {
4732 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4733 if (ftrace_lookup_ip(old_hash, entry->ip))
4734 continue;
4735
4736
4737
4738
4739
4740 if (probe_ops->init) {
4741 ret = probe_ops->init(probe_ops, tr,
4742 entry->ip, data,
4743 &probe->data);
4744 if (ret < 0) {
4745 if (probe_ops->free && count)
4746 probe_ops->free(probe_ops, tr,
4747 0, probe->data);
4748 probe->data = NULL;
4749 goto out;
4750 }
4751 }
4752 count++;
4753 }
4754 }
4755
4756 mutex_lock(&ftrace_lock);
4757
4758 if (!count) {
4759
4760 ret = -EINVAL;
4761 goto out_unlock;
4762 }
4763
4764 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4765 hash, 1);
4766 if (ret < 0)
4767 goto err_unlock;
4768
4769
4770 probe->ref += count;
4771
4772 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4773 ret = ftrace_startup(&probe->ops, 0);
4774
4775 out_unlock:
4776 mutex_unlock(&ftrace_lock);
4777
4778 if (!ret)
4779 ret = count;
4780 out:
4781 mutex_unlock(&probe->ops.func_hash->regex_lock);
4782 free_ftrace_hash(hash);
4783
4784 release_probe(probe);
4785
4786 return ret;
4787
4788 err_unlock:
4789 if (!probe_ops->free || !count)
4790 goto out_unlock;
4791
4792
4793 for (i = 0; i < size; i++) {
4794 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4795 if (ftrace_lookup_ip(old_hash, entry->ip))
4796 continue;
4797 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4798 }
4799 }
4800 goto out_unlock;
4801 }
4802
4803 int
4804 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4805 struct ftrace_probe_ops *probe_ops)
4806 {
4807 struct ftrace_func_probe *probe = NULL, *iter;
4808 struct ftrace_ops_hash old_hash_ops;
4809 struct ftrace_func_entry *entry;
4810 struct ftrace_glob func_g;
4811 struct ftrace_hash **orig_hash;
4812 struct ftrace_hash *old_hash;
4813 struct ftrace_hash *hash = NULL;
4814 struct hlist_node *tmp;
4815 struct hlist_head hhd;
4816 char str[KSYM_SYMBOL_LEN];
4817 int count = 0;
4818 int i, ret = -ENODEV;
4819 int size;
4820
4821 if (!glob || !strlen(glob) || !strcmp(glob, "*"))
4822 func_g.search = NULL;
4823 else {
4824 int not;
4825
4826 func_g.type = filter_parse_regex(glob, strlen(glob),
4827 &func_g.search, ¬);
4828 func_g.len = strlen(func_g.search);
4829
4830
4831 if (WARN_ON(not))
4832 return -EINVAL;
4833 }
4834
4835 mutex_lock(&ftrace_lock);
4836
4837 list_for_each_entry(iter, &tr->func_probes, list) {
4838 if (iter->probe_ops == probe_ops) {
4839 probe = iter;
4840 break;
4841 }
4842 }
4843 if (!probe)
4844 goto err_unlock_ftrace;
4845
4846 ret = -EINVAL;
4847 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4848 goto err_unlock_ftrace;
4849
4850 acquire_probe_locked(probe);
4851
4852 mutex_unlock(&ftrace_lock);
4853
4854 mutex_lock(&probe->ops.func_hash->regex_lock);
4855
4856 orig_hash = &probe->ops.func_hash->filter_hash;
4857 old_hash = *orig_hash;
4858
4859 if (ftrace_hash_empty(old_hash))
4860 goto out_unlock;
4861
4862 old_hash_ops.filter_hash = old_hash;
4863
4864 old_hash_ops.notrace_hash = NULL;
4865
4866 ret = -ENOMEM;
4867 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4868 if (!hash)
4869 goto out_unlock;
4870
4871 INIT_HLIST_HEAD(&hhd);
4872
4873 size = 1 << hash->size_bits;
4874 for (i = 0; i < size; i++) {
4875 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
4876
4877 if (func_g.search) {
4878 kallsyms_lookup(entry->ip, NULL, NULL,
4879 NULL, str);
4880 if (!ftrace_match(str, &func_g))
4881 continue;
4882 }
4883 count++;
4884 remove_hash_entry(hash, entry);
4885 hlist_add_head(&entry->hlist, &hhd);
4886 }
4887 }
4888
4889
4890 if (!count) {
4891 ret = -EINVAL;
4892 goto out_unlock;
4893 }
4894
4895 mutex_lock(&ftrace_lock);
4896
4897 WARN_ON(probe->ref < count);
4898
4899 probe->ref -= count;
4900
4901 if (ftrace_hash_empty(hash))
4902 ftrace_shutdown(&probe->ops, 0);
4903
4904 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4905 hash, 1);
4906
4907
4908 if (ftrace_enabled && !ftrace_hash_empty(hash))
4909 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
4910 &old_hash_ops);
4911 synchronize_rcu();
4912
4913 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4914 hlist_del(&entry->hlist);
4915 if (probe_ops->free)
4916 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4917 kfree(entry);
4918 }
4919 mutex_unlock(&ftrace_lock);
4920
4921 out_unlock:
4922 mutex_unlock(&probe->ops.func_hash->regex_lock);
4923 free_ftrace_hash(hash);
4924
4925 release_probe(probe);
4926
4927 return ret;
4928
4929 err_unlock_ftrace:
4930 mutex_unlock(&ftrace_lock);
4931 return ret;
4932 }
4933
4934 void clear_ftrace_function_probes(struct trace_array *tr)
4935 {
4936 struct ftrace_func_probe *probe, *n;
4937
4938 list_for_each_entry_safe(probe, n, &tr->func_probes, list)
4939 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
4940 }
4941
4942 static LIST_HEAD(ftrace_commands);
4943 static DEFINE_MUTEX(ftrace_cmd_mutex);
4944
4945
4946
4947
4948
4949 __init int register_ftrace_command(struct ftrace_func_command *cmd)
4950 {
4951 struct ftrace_func_command *p;
4952 int ret = 0;
4953
4954 mutex_lock(&ftrace_cmd_mutex);
4955 list_for_each_entry(p, &ftrace_commands, list) {
4956 if (strcmp(cmd->name, p->name) == 0) {
4957 ret = -EBUSY;
4958 goto out_unlock;
4959 }
4960 }
4961 list_add(&cmd->list, &ftrace_commands);
4962 out_unlock:
4963 mutex_unlock(&ftrace_cmd_mutex);
4964
4965 return ret;
4966 }
4967
4968
4969
4970
4971
4972 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
4973 {
4974 struct ftrace_func_command *p, *n;
4975 int ret = -ENODEV;
4976
4977 mutex_lock(&ftrace_cmd_mutex);
4978 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4979 if (strcmp(cmd->name, p->name) == 0) {
4980 ret = 0;
4981 list_del_init(&p->list);
4982 goto out_unlock;
4983 }
4984 }
4985 out_unlock:
4986 mutex_unlock(&ftrace_cmd_mutex);
4987
4988 return ret;
4989 }
4990
4991 static int ftrace_process_regex(struct ftrace_iterator *iter,
4992 char *buff, int len, int enable)
4993 {
4994 struct ftrace_hash *hash = iter->hash;
4995 struct trace_array *tr = iter->ops->private;
4996 char *func, *command, *next = buff;
4997 struct ftrace_func_command *p;
4998 int ret = -EINVAL;
4999
5000 func = strsep(&next, ":");
5001
5002 if (!next) {
5003 ret = ftrace_match_records(hash, func, len);
5004 if (!ret)
5005 ret = -EINVAL;
5006 if (ret < 0)
5007 return ret;
5008 return 0;
5009 }
5010
5011
5012
5013 command = strsep(&next, ":");
5014
5015 mutex_lock(&ftrace_cmd_mutex);
5016 list_for_each_entry(p, &ftrace_commands, list) {
5017 if (strcmp(p->name, command) == 0) {
5018 ret = p->func(tr, hash, func, command, next, enable);
5019 goto out_unlock;
5020 }
5021 }
5022 out_unlock:
5023 mutex_unlock(&ftrace_cmd_mutex);
5024
5025 return ret;
5026 }
5027
5028 static ssize_t
5029 ftrace_regex_write(struct file *file, const char __user *ubuf,
5030 size_t cnt, loff_t *ppos, int enable)
5031 {
5032 struct ftrace_iterator *iter;
5033 struct trace_parser *parser;
5034 ssize_t ret, read;
5035
5036 if (!cnt)
5037 return 0;
5038
5039 if (file->f_mode & FMODE_READ) {
5040 struct seq_file *m = file->private_data;
5041 iter = m->private;
5042 } else
5043 iter = file->private_data;
5044
5045 if (unlikely(ftrace_disabled))
5046 return -ENODEV;
5047
5048
5049
5050 parser = &iter->parser;
5051 read = trace_get_user(parser, ubuf, cnt, ppos);
5052
5053 if (read >= 0 && trace_parser_loaded(parser) &&
5054 !trace_parser_cont(parser)) {
5055 ret = ftrace_process_regex(iter, parser->buffer,
5056 parser->idx, enable);
5057 trace_parser_clear(parser);
5058 if (ret < 0)
5059 goto out;
5060 }
5061
5062 ret = read;
5063 out:
5064 return ret;
5065 }
5066
5067 ssize_t
5068 ftrace_filter_write(struct file *file, const char __user *ubuf,
5069 size_t cnt, loff_t *ppos)
5070 {
5071 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
5072 }
5073
5074 ssize_t
5075 ftrace_notrace_write(struct file *file, const char __user *ubuf,
5076 size_t cnt, loff_t *ppos)
5077 {
5078 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
5079 }
5080
5081 static int
5082 __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
5083 {
5084 struct ftrace_func_entry *entry;
5085
5086 ip = ftrace_location(ip);
5087 if (!ip)
5088 return -EINVAL;
5089
5090 if (remove) {
5091 entry = ftrace_lookup_ip(hash, ip);
5092 if (!entry)
5093 return -ENOENT;
5094 free_hash_entry(hash, entry);
5095 return 0;
5096 }
5097
5098 return add_hash_entry(hash, ip);
5099 }
5100
5101 static int
5102 ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips,
5103 unsigned int cnt, int remove)
5104 {
5105 unsigned int i;
5106 int err;
5107
5108 for (i = 0; i < cnt; i++) {
5109 err = __ftrace_match_addr(hash, ips[i], remove);
5110 if (err) {
5111
5112
5113
5114
5115 return err;
5116 }
5117 }
5118 return 0;
5119 }
5120
5121 static int
5122 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
5123 unsigned long *ips, unsigned int cnt,
5124 int remove, int reset, int enable)
5125 {
5126 struct ftrace_hash **orig_hash;
5127 struct ftrace_hash *hash;
5128 int ret;
5129
5130 if (unlikely(ftrace_disabled))
5131 return -ENODEV;
5132
5133 mutex_lock(&ops->func_hash->regex_lock);
5134
5135 if (enable)
5136 orig_hash = &ops->func_hash->filter_hash;
5137 else
5138 orig_hash = &ops->func_hash->notrace_hash;
5139
5140 if (reset)
5141 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5142 else
5143 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
5144
5145 if (!hash) {
5146 ret = -ENOMEM;
5147 goto out_regex_unlock;
5148 }
5149
5150 if (buf && !ftrace_match_records(hash, buf, len)) {
5151 ret = -EINVAL;
5152 goto out_regex_unlock;
5153 }
5154 if (ips) {
5155 ret = ftrace_match_addr(hash, ips, cnt, remove);
5156 if (ret < 0)
5157 goto out_regex_unlock;
5158 }
5159
5160 mutex_lock(&ftrace_lock);
5161 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
5162 mutex_unlock(&ftrace_lock);
5163
5164 out_regex_unlock:
5165 mutex_unlock(&ops->func_hash->regex_lock);
5166
5167 free_ftrace_hash(hash);
5168 return ret;
5169 }
5170
5171 static int
5172 ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt,
5173 int remove, int reset, int enable)
5174 {
5175 return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable);
5176 }
5177
5178 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
5179
5180 struct ftrace_direct_func {
5181 struct list_head next;
5182 unsigned long addr;
5183 int count;
5184 };
5185
5186 static LIST_HEAD(ftrace_direct_funcs);
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200
5201 struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
5202 {
5203 struct ftrace_direct_func *entry;
5204 bool found = false;
5205
5206
5207 list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) {
5208 if (entry->addr == addr) {
5209 found = true;
5210 break;
5211 }
5212 }
5213 if (found)
5214 return entry;
5215
5216 return NULL;
5217 }
5218
5219 static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr)
5220 {
5221 struct ftrace_direct_func *direct;
5222
5223 direct = kmalloc(sizeof(*direct), GFP_KERNEL);
5224 if (!direct)
5225 return NULL;
5226 direct->addr = addr;
5227 direct->count = 0;
5228 list_add_rcu(&direct->next, &ftrace_direct_funcs);
5229 ftrace_direct_func_count++;
5230 return direct;
5231 }
5232
5233 static int register_ftrace_function_nolock(struct ftrace_ops *ops);
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252 int register_ftrace_direct(unsigned long ip, unsigned long addr)
5253 {
5254 struct ftrace_direct_func *direct;
5255 struct ftrace_func_entry *entry;
5256 struct ftrace_hash *free_hash = NULL;
5257 struct dyn_ftrace *rec;
5258 int ret = -ENODEV;
5259
5260 mutex_lock(&direct_mutex);
5261
5262 ip = ftrace_location(ip);
5263 if (!ip)
5264 goto out_unlock;
5265
5266
5267 ret = -EBUSY;
5268 if (ftrace_find_rec_direct(ip))
5269 goto out_unlock;
5270
5271 ret = -ENODEV;
5272 rec = lookup_rec(ip, ip);
5273 if (!rec)
5274 goto out_unlock;
5275
5276
5277
5278
5279
5280 if (WARN_ON(rec->flags & FTRACE_FL_DIRECT))
5281 goto out_unlock;
5282
5283
5284 if (ip != rec->ip) {
5285 ip = rec->ip;
5286
5287 if (ftrace_find_rec_direct(ip))
5288 goto out_unlock;
5289 }
5290
5291 ret = -ENOMEM;
5292 direct = ftrace_find_direct_func(addr);
5293 if (!direct) {
5294 direct = ftrace_alloc_direct_func(addr);
5295 if (!direct)
5296 goto out_unlock;
5297 }
5298
5299 entry = ftrace_add_rec_direct(ip, addr, &free_hash);
5300 if (!entry)
5301 goto out_unlock;
5302
5303 ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0);
5304
5305 if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) {
5306 ret = register_ftrace_function_nolock(&direct_ops);
5307 if (ret)
5308 ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5309 }
5310
5311 if (ret) {
5312 remove_hash_entry(direct_functions, entry);
5313 kfree(entry);
5314 if (!direct->count) {
5315 list_del_rcu(&direct->next);
5316 synchronize_rcu_tasks();
5317 kfree(direct);
5318 if (free_hash)
5319 free_ftrace_hash(free_hash);
5320 free_hash = NULL;
5321 ftrace_direct_func_count--;
5322 }
5323 } else {
5324 direct->count++;
5325 }
5326 out_unlock:
5327 mutex_unlock(&direct_mutex);
5328
5329 if (free_hash) {
5330 synchronize_rcu_tasks();
5331 free_ftrace_hash(free_hash);
5332 }
5333
5334 return ret;
5335 }
5336 EXPORT_SYMBOL_GPL(register_ftrace_direct);
5337
5338 static struct ftrace_func_entry *find_direct_entry(unsigned long *ip,
5339 struct dyn_ftrace **recp)
5340 {
5341 struct ftrace_func_entry *entry;
5342 struct dyn_ftrace *rec;
5343
5344 rec = lookup_rec(*ip, *ip);
5345 if (!rec)
5346 return NULL;
5347
5348 entry = __ftrace_lookup_ip(direct_functions, rec->ip);
5349 if (!entry) {
5350 WARN_ON(rec->flags & FTRACE_FL_DIRECT);
5351 return NULL;
5352 }
5353
5354 WARN_ON(!(rec->flags & FTRACE_FL_DIRECT));
5355
5356
5357 *ip = rec->ip;
5358
5359 if (recp)
5360 *recp = rec;
5361
5362 return entry;
5363 }
5364
5365 int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
5366 {
5367 struct ftrace_direct_func *direct;
5368 struct ftrace_func_entry *entry;
5369 struct ftrace_hash *hash;
5370 int ret = -ENODEV;
5371
5372 mutex_lock(&direct_mutex);
5373
5374 ip = ftrace_location(ip);
5375 if (!ip)
5376 goto out_unlock;
5377
5378 entry = find_direct_entry(&ip, NULL);
5379 if (!entry)
5380 goto out_unlock;
5381
5382 hash = direct_ops.func_hash->filter_hash;
5383 if (hash->count == 1)
5384 unregister_ftrace_function(&direct_ops);
5385
5386 ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5387
5388 WARN_ON(ret);
5389
5390 remove_hash_entry(direct_functions, entry);
5391
5392 direct = ftrace_find_direct_func(addr);
5393 if (!WARN_ON(!direct)) {
5394
5395 direct->count--;
5396 WARN_ON(direct->count < 0);
5397 if (!direct->count) {
5398 list_del_rcu(&direct->next);
5399 synchronize_rcu_tasks();
5400 kfree(direct);
5401 kfree(entry);
5402 ftrace_direct_func_count--;
5403 }
5404 }
5405 out_unlock:
5406 mutex_unlock(&direct_mutex);
5407
5408 return ret;
5409 }
5410 EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
5411
5412 static struct ftrace_ops stub_ops = {
5413 .func = ftrace_stub,
5414 };
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426
5427
5428
5429
5430
5431
5432
5433
5434 int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
5435 struct dyn_ftrace *rec,
5436 unsigned long old_addr,
5437 unsigned long new_addr)
5438 {
5439 unsigned long ip = rec->ip;
5440 int ret;
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452 mutex_unlock(&ftrace_lock);
5453
5454
5455
5456
5457
5458
5459
5460 ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0);
5461 if (ret)
5462 goto out_lock;
5463
5464 ret = register_ftrace_function(&stub_ops);
5465 if (ret) {
5466 ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5467 goto out_lock;
5468 }
5469
5470 entry->direct = new_addr;
5471
5472
5473
5474
5475
5476 unregister_ftrace_function(&stub_ops);
5477 ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5478
5479 out_lock:
5480 mutex_lock(&ftrace_lock);
5481
5482 return ret;
5483 }
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499 int modify_ftrace_direct(unsigned long ip,
5500 unsigned long old_addr, unsigned long new_addr)
5501 {
5502 struct ftrace_direct_func *direct, *new_direct = NULL;
5503 struct ftrace_func_entry *entry;
5504 struct dyn_ftrace *rec;
5505 int ret = -ENODEV;
5506
5507 mutex_lock(&direct_mutex);
5508
5509 mutex_lock(&ftrace_lock);
5510
5511 ip = ftrace_location(ip);
5512 if (!ip)
5513 goto out_unlock;
5514
5515 entry = find_direct_entry(&ip, &rec);
5516 if (!entry)
5517 goto out_unlock;
5518
5519 ret = -EINVAL;
5520 if (entry->direct != old_addr)
5521 goto out_unlock;
5522
5523 direct = ftrace_find_direct_func(old_addr);
5524 if (WARN_ON(!direct))
5525 goto out_unlock;
5526 if (direct->count > 1) {
5527 ret = -ENOMEM;
5528 new_direct = ftrace_alloc_direct_func(new_addr);
5529 if (!new_direct)
5530 goto out_unlock;
5531 direct->count--;
5532 new_direct->count++;
5533 } else {
5534 direct->addr = new_addr;
5535 }
5536
5537
5538
5539
5540
5541
5542
5543 if (ftrace_rec_count(rec) == 1) {
5544 ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr);
5545 } else {
5546 entry->direct = new_addr;
5547 ret = 0;
5548 }
5549
5550 if (unlikely(ret && new_direct)) {
5551 direct->count++;
5552 list_del_rcu(&new_direct->next);
5553 synchronize_rcu_tasks();
5554 kfree(new_direct);
5555 ftrace_direct_func_count--;
5556 }
5557
5558 out_unlock:
5559 mutex_unlock(&ftrace_lock);
5560 mutex_unlock(&direct_mutex);
5561 return ret;
5562 }
5563 EXPORT_SYMBOL_GPL(modify_ftrace_direct);
5564
5565 #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS)
5566
5567 static int check_direct_multi(struct ftrace_ops *ops)
5568 {
5569 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5570 return -EINVAL;
5571 if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS)
5572 return -EINVAL;
5573 return 0;
5574 }
5575
5576 static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr)
5577 {
5578 struct ftrace_func_entry *entry, *del;
5579 int size, i;
5580
5581 size = 1 << hash->size_bits;
5582 for (i = 0; i < size; i++) {
5583 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5584 del = __ftrace_lookup_ip(direct_functions, entry->ip);
5585 if (del && del->direct == addr) {
5586 remove_hash_entry(direct_functions, del);
5587 kfree(del);
5588 }
5589 }
5590 }
5591 }
5592
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609
5610
5611
5612
5613
5614
5615 int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5616 {
5617 struct ftrace_hash *hash, *free_hash = NULL;
5618 struct ftrace_func_entry *entry, *new;
5619 int err = -EBUSY, size, i;
5620
5621 if (ops->func || ops->trampoline)
5622 return -EINVAL;
5623 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5624 return -EINVAL;
5625 if (ops->flags & FTRACE_OPS_FL_ENABLED)
5626 return -EINVAL;
5627
5628 hash = ops->func_hash->filter_hash;
5629 if (ftrace_hash_empty(hash))
5630 return -EINVAL;
5631
5632 mutex_lock(&direct_mutex);
5633
5634
5635 size = 1 << hash->size_bits;
5636 for (i = 0; i < size; i++) {
5637 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5638 if (ftrace_find_rec_direct(entry->ip))
5639 goto out_unlock;
5640 }
5641 }
5642
5643
5644 err = -ENOMEM;
5645 for (i = 0; i < size; i++) {
5646 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5647 new = ftrace_add_rec_direct(entry->ip, addr, &free_hash);
5648 if (!new)
5649 goto out_remove;
5650 entry->direct = addr;
5651 }
5652 }
5653
5654 ops->func = call_direct_funcs;
5655 ops->flags = MULTI_FLAGS;
5656 ops->trampoline = FTRACE_REGS_ADDR;
5657
5658 err = register_ftrace_function_nolock(ops);
5659
5660 out_remove:
5661 if (err)
5662 remove_direct_functions_hash(hash, addr);
5663
5664 out_unlock:
5665 mutex_unlock(&direct_mutex);
5666
5667 if (free_hash) {
5668 synchronize_rcu_tasks();
5669 free_ftrace_hash(free_hash);
5670 }
5671 return err;
5672 }
5673 EXPORT_SYMBOL_GPL(register_ftrace_direct_multi);
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687
5688 int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5689 {
5690 struct ftrace_hash *hash = ops->func_hash->filter_hash;
5691 int err;
5692
5693 if (check_direct_multi(ops))
5694 return -EINVAL;
5695 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5696 return -EINVAL;
5697
5698 mutex_lock(&direct_mutex);
5699 err = unregister_ftrace_function(ops);
5700 remove_direct_functions_hash(hash, addr);
5701 mutex_unlock(&direct_mutex);
5702
5703
5704 ops->func = NULL;
5705 ops->trampoline = 0;
5706 return err;
5707 }
5708 EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi);
5709
5710 static int
5711 __modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5712 {
5713 struct ftrace_hash *hash;
5714 struct ftrace_func_entry *entry, *iter;
5715 static struct ftrace_ops tmp_ops = {
5716 .func = ftrace_stub,
5717 .flags = FTRACE_OPS_FL_STUB,
5718 };
5719 int i, size;
5720 int err;
5721
5722 lockdep_assert_held_once(&direct_mutex);
5723
5724
5725 ftrace_ops_init(&tmp_ops);
5726 tmp_ops.func_hash = ops->func_hash;
5727
5728 err = register_ftrace_function_nolock(&tmp_ops);
5729 if (err)
5730 return err;
5731
5732
5733
5734
5735
5736 mutex_lock(&ftrace_lock);
5737
5738 hash = ops->func_hash->filter_hash;
5739 size = 1 << hash->size_bits;
5740 for (i = 0; i < size; i++) {
5741 hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
5742 entry = __ftrace_lookup_ip(direct_functions, iter->ip);
5743 if (!entry)
5744 continue;
5745 entry->direct = addr;
5746 }
5747 }
5748
5749 mutex_unlock(&ftrace_lock);
5750
5751
5752 unregister_ftrace_function(&tmp_ops);
5753
5754 return err;
5755 }
5756
5757
5758
5759
5760
5761
5762
5763
5764
5765
5766
5767
5768
5769
5770
5771
5772
5773
5774
5775 int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr)
5776 {
5777 if (check_direct_multi(ops))
5778 return -EINVAL;
5779 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5780 return -EINVAL;
5781
5782 return __modify_ftrace_direct_multi(ops, addr);
5783 }
5784 EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi_nolock);
5785
5786
5787
5788
5789
5790
5791
5792
5793
5794
5795
5796
5797
5798
5799
5800
5801 int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5802 {
5803 int err;
5804
5805 if (check_direct_multi(ops))
5806 return -EINVAL;
5807 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5808 return -EINVAL;
5809
5810 mutex_lock(&direct_mutex);
5811 err = __modify_ftrace_direct_multi(ops, addr);
5812 mutex_unlock(&direct_mutex);
5813 return err;
5814 }
5815 EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi);
5816 #endif
5817
5818
5819
5820
5821
5822
5823
5824
5825
5826
5827
5828 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
5829 int remove, int reset)
5830 {
5831 ftrace_ops_init(ops);
5832 return ftrace_set_addr(ops, &ip, 1, remove, reset, 1);
5833 }
5834 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
5835
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847 int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
5848 unsigned int cnt, int remove, int reset)
5849 {
5850 ftrace_ops_init(ops);
5851 return ftrace_set_addr(ops, ips, cnt, remove, reset, 1);
5852 }
5853 EXPORT_SYMBOL_GPL(ftrace_set_filter_ips);
5854
5855
5856
5857
5858
5859
5860
5861
5862 void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
5863 {
5864 if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
5865 return;
5866
5867 ftrace_ops_init(ops);
5868 ops->func_hash = &global_ops.local_hash;
5869 }
5870 EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
5871
5872 static int
5873 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
5874 int reset, int enable)
5875 {
5876 return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable);
5877 }
5878
5879
5880
5881
5882
5883
5884
5885
5886
5887
5888
5889 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
5890 int len, int reset)
5891 {
5892 ftrace_ops_init(ops);
5893 return ftrace_set_regex(ops, buf, len, reset, 1);
5894 }
5895 EXPORT_SYMBOL_GPL(ftrace_set_filter);
5896
5897
5898
5899
5900
5901
5902
5903
5904
5905
5906
5907
5908 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
5909 int len, int reset)
5910 {
5911 ftrace_ops_init(ops);
5912 return ftrace_set_regex(ops, buf, len, reset, 0);
5913 }
5914 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
5915
5916
5917
5918
5919
5920
5921
5922
5923
5924 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
5925 {
5926 ftrace_set_regex(&global_ops, buf, len, reset, 1);
5927 }
5928 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
5929
5930
5931
5932
5933
5934
5935
5936
5937
5938
5939
5940 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
5941 {
5942 ftrace_set_regex(&global_ops, buf, len, reset, 0);
5943 }
5944 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
5945
5946
5947
5948
5949 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
5950 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5951 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
5952
5953
5954 bool ftrace_filter_param __initdata;
5955
5956 static int __init set_ftrace_notrace(char *str)
5957 {
5958 ftrace_filter_param = true;
5959 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
5960 return 1;
5961 }
5962 __setup("ftrace_notrace=", set_ftrace_notrace);
5963
5964 static int __init set_ftrace_filter(char *str)
5965 {
5966 ftrace_filter_param = true;
5967 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
5968 return 1;
5969 }
5970 __setup("ftrace_filter=", set_ftrace_filter);
5971
5972 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5973 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
5974 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5975 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
5976
5977 static int __init set_graph_function(char *str)
5978 {
5979 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
5980 return 1;
5981 }
5982 __setup("ftrace_graph_filter=", set_graph_function);
5983
5984 static int __init set_graph_notrace_function(char *str)
5985 {
5986 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
5987 return 1;
5988 }
5989 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
5990
5991 static int __init set_graph_max_depth_function(char *str)
5992 {
5993 if (!str)
5994 return 0;
5995 fgraph_max_depth = simple_strtoul(str, NULL, 0);
5996 return 1;
5997 }
5998 __setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
5999
6000 static void __init set_ftrace_early_graph(char *buf, int enable)
6001 {
6002 int ret;
6003 char *func;
6004 struct ftrace_hash *hash;
6005
6006 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
6007 if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
6008 return;
6009
6010 while (buf) {
6011 func = strsep(&buf, ",");
6012
6013 ret = ftrace_graph_set_hash(hash, func);
6014 if (ret)
6015 printk(KERN_DEBUG "ftrace: function %s not "
6016 "traceable\n", func);
6017 }
6018
6019 if (enable)
6020 ftrace_graph_hash = hash;
6021 else
6022 ftrace_graph_notrace_hash = hash;
6023 }
6024 #endif
6025
6026 void __init
6027 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
6028 {
6029 char *func;
6030
6031 ftrace_ops_init(ops);
6032
6033 while (buf) {
6034 func = strsep(&buf, ",");
6035 ftrace_set_regex(ops, func, strlen(func), 0, enable);
6036 }
6037 }
6038
6039 static void __init set_ftrace_early_filters(void)
6040 {
6041 if (ftrace_filter_buf[0])
6042 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
6043 if (ftrace_notrace_buf[0])
6044 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
6045 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6046 if (ftrace_graph_buf[0])
6047 set_ftrace_early_graph(ftrace_graph_buf, 1);
6048 if (ftrace_graph_notrace_buf[0])
6049 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
6050 #endif
6051 }
6052
6053 int ftrace_regex_release(struct inode *inode, struct file *file)
6054 {
6055 struct seq_file *m = (struct seq_file *)file->private_data;
6056 struct ftrace_iterator *iter;
6057 struct ftrace_hash **orig_hash;
6058 struct trace_parser *parser;
6059 int filter_hash;
6060
6061 if (file->f_mode & FMODE_READ) {
6062 iter = m->private;
6063 seq_release(inode, file);
6064 } else
6065 iter = file->private_data;
6066
6067 parser = &iter->parser;
6068 if (trace_parser_loaded(parser)) {
6069 int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
6070
6071 ftrace_process_regex(iter, parser->buffer,
6072 parser->idx, enable);
6073 }
6074
6075 trace_parser_put(parser);
6076
6077 mutex_lock(&iter->ops->func_hash->regex_lock);
6078
6079 if (file->f_mode & FMODE_WRITE) {
6080 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
6081
6082 if (filter_hash) {
6083 orig_hash = &iter->ops->func_hash->filter_hash;
6084 if (iter->tr && !list_empty(&iter->tr->mod_trace))
6085 iter->hash->flags |= FTRACE_HASH_FL_MOD;
6086 } else
6087 orig_hash = &iter->ops->func_hash->notrace_hash;
6088
6089 mutex_lock(&ftrace_lock);
6090 ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
6091 iter->hash, filter_hash);
6092 mutex_unlock(&ftrace_lock);
6093 } else {
6094
6095 iter->hash = NULL;
6096 }
6097
6098 mutex_unlock(&iter->ops->func_hash->regex_lock);
6099 free_ftrace_hash(iter->hash);
6100 if (iter->tr)
6101 trace_array_put(iter->tr);
6102 kfree(iter);
6103
6104 return 0;
6105 }
6106
6107 static const struct file_operations ftrace_avail_fops = {
6108 .open = ftrace_avail_open,
6109 .read = seq_read,
6110 .llseek = seq_lseek,
6111 .release = seq_release_private,
6112 };
6113
6114 static const struct file_operations ftrace_enabled_fops = {
6115 .open = ftrace_enabled_open,
6116 .read = seq_read,
6117 .llseek = seq_lseek,
6118 .release = seq_release_private,
6119 };
6120
6121 static const struct file_operations ftrace_filter_fops = {
6122 .open = ftrace_filter_open,
6123 .read = seq_read,
6124 .write = ftrace_filter_write,
6125 .llseek = tracing_lseek,
6126 .release = ftrace_regex_release,
6127 };
6128
6129 static const struct file_operations ftrace_notrace_fops = {
6130 .open = ftrace_notrace_open,
6131 .read = seq_read,
6132 .write = ftrace_notrace_write,
6133 .llseek = tracing_lseek,
6134 .release = ftrace_regex_release,
6135 };
6136
6137 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6138
6139 static DEFINE_MUTEX(graph_lock);
6140
6141 struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
6142 struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
6143
6144 enum graph_filter_type {
6145 GRAPH_FILTER_NOTRACE = 0,
6146 GRAPH_FILTER_FUNCTION,
6147 };
6148
6149 #define FTRACE_GRAPH_EMPTY ((void *)1)
6150
6151 struct ftrace_graph_data {
6152 struct ftrace_hash *hash;
6153 struct ftrace_func_entry *entry;
6154 int idx;
6155 enum graph_filter_type type;
6156 struct ftrace_hash *new_hash;
6157 const struct seq_operations *seq_ops;
6158 struct trace_parser parser;
6159 };
6160
6161 static void *
6162 __g_next(struct seq_file *m, loff_t *pos)
6163 {
6164 struct ftrace_graph_data *fgd = m->private;
6165 struct ftrace_func_entry *entry = fgd->entry;
6166 struct hlist_head *head;
6167 int i, idx = fgd->idx;
6168
6169 if (*pos >= fgd->hash->count)
6170 return NULL;
6171
6172 if (entry) {
6173 hlist_for_each_entry_continue(entry, hlist) {
6174 fgd->entry = entry;
6175 return entry;
6176 }
6177
6178 idx++;
6179 }
6180
6181 for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
6182 head = &fgd->hash->buckets[i];
6183 hlist_for_each_entry(entry, head, hlist) {
6184 fgd->entry = entry;
6185 fgd->idx = i;
6186 return entry;
6187 }
6188 }
6189 return NULL;
6190 }
6191
6192 static void *
6193 g_next(struct seq_file *m, void *v, loff_t *pos)
6194 {
6195 (*pos)++;
6196 return __g_next(m, pos);
6197 }
6198
6199 static void *g_start(struct seq_file *m, loff_t *pos)
6200 {
6201 struct ftrace_graph_data *fgd = m->private;
6202
6203 mutex_lock(&graph_lock);
6204
6205 if (fgd->type == GRAPH_FILTER_FUNCTION)
6206 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6207 lockdep_is_held(&graph_lock));
6208 else
6209 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6210 lockdep_is_held(&graph_lock));
6211
6212
6213 if (ftrace_hash_empty(fgd->hash) && !*pos)
6214 return FTRACE_GRAPH_EMPTY;
6215
6216 fgd->idx = 0;
6217 fgd->entry = NULL;
6218 return __g_next(m, pos);
6219 }
6220
6221 static void g_stop(struct seq_file *m, void *p)
6222 {
6223 mutex_unlock(&graph_lock);
6224 }
6225
6226 static int g_show(struct seq_file *m, void *v)
6227 {
6228 struct ftrace_func_entry *entry = v;
6229
6230 if (!entry)
6231 return 0;
6232
6233 if (entry == FTRACE_GRAPH_EMPTY) {
6234 struct ftrace_graph_data *fgd = m->private;
6235
6236 if (fgd->type == GRAPH_FILTER_FUNCTION)
6237 seq_puts(m, "#### all functions enabled ####\n");
6238 else
6239 seq_puts(m, "#### no functions disabled ####\n");
6240 return 0;
6241 }
6242
6243 seq_printf(m, "%ps\n", (void *)entry->ip);
6244
6245 return 0;
6246 }
6247
6248 static const struct seq_operations ftrace_graph_seq_ops = {
6249 .start = g_start,
6250 .next = g_next,
6251 .stop = g_stop,
6252 .show = g_show,
6253 };
6254
6255 static int
6256 __ftrace_graph_open(struct inode *inode, struct file *file,
6257 struct ftrace_graph_data *fgd)
6258 {
6259 int ret;
6260 struct ftrace_hash *new_hash = NULL;
6261
6262 ret = security_locked_down(LOCKDOWN_TRACEFS);
6263 if (ret)
6264 return ret;
6265
6266 if (file->f_mode & FMODE_WRITE) {
6267 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
6268
6269 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
6270 return -ENOMEM;
6271
6272 if (file->f_flags & O_TRUNC)
6273 new_hash = alloc_ftrace_hash(size_bits);
6274 else
6275 new_hash = alloc_and_copy_ftrace_hash(size_bits,
6276 fgd->hash);
6277 if (!new_hash) {
6278 ret = -ENOMEM;
6279 goto out;
6280 }
6281 }
6282
6283 if (file->f_mode & FMODE_READ) {
6284 ret = seq_open(file, &ftrace_graph_seq_ops);
6285 if (!ret) {
6286 struct seq_file *m = file->private_data;
6287 m->private = fgd;
6288 } else {
6289
6290 free_ftrace_hash(new_hash);
6291 new_hash = NULL;
6292 }
6293 } else
6294 file->private_data = fgd;
6295
6296 out:
6297 if (ret < 0 && file->f_mode & FMODE_WRITE)
6298 trace_parser_put(&fgd->parser);
6299
6300 fgd->new_hash = new_hash;
6301
6302
6303
6304
6305
6306
6307 fgd->hash = NULL;
6308
6309 return ret;
6310 }
6311
6312 static int
6313 ftrace_graph_open(struct inode *inode, struct file *file)
6314 {
6315 struct ftrace_graph_data *fgd;
6316 int ret;
6317
6318 if (unlikely(ftrace_disabled))
6319 return -ENODEV;
6320
6321 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6322 if (fgd == NULL)
6323 return -ENOMEM;
6324
6325 mutex_lock(&graph_lock);
6326
6327 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6328 lockdep_is_held(&graph_lock));
6329 fgd->type = GRAPH_FILTER_FUNCTION;
6330 fgd->seq_ops = &ftrace_graph_seq_ops;
6331
6332 ret = __ftrace_graph_open(inode, file, fgd);
6333 if (ret < 0)
6334 kfree(fgd);
6335
6336 mutex_unlock(&graph_lock);
6337 return ret;
6338 }
6339
6340 static int
6341 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
6342 {
6343 struct ftrace_graph_data *fgd;
6344 int ret;
6345
6346 if (unlikely(ftrace_disabled))
6347 return -ENODEV;
6348
6349 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6350 if (fgd == NULL)
6351 return -ENOMEM;
6352
6353 mutex_lock(&graph_lock);
6354
6355 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6356 lockdep_is_held(&graph_lock));
6357 fgd->type = GRAPH_FILTER_NOTRACE;
6358 fgd->seq_ops = &ftrace_graph_seq_ops;
6359
6360 ret = __ftrace_graph_open(inode, file, fgd);
6361 if (ret < 0)
6362 kfree(fgd);
6363
6364 mutex_unlock(&graph_lock);
6365 return ret;
6366 }
6367
6368 static int
6369 ftrace_graph_release(struct inode *inode, struct file *file)
6370 {
6371 struct ftrace_graph_data *fgd;
6372 struct ftrace_hash *old_hash, *new_hash;
6373 struct trace_parser *parser;
6374 int ret = 0;
6375
6376 if (file->f_mode & FMODE_READ) {
6377 struct seq_file *m = file->private_data;
6378
6379 fgd = m->private;
6380 seq_release(inode, file);
6381 } else {
6382 fgd = file->private_data;
6383 }
6384
6385
6386 if (file->f_mode & FMODE_WRITE) {
6387
6388 parser = &fgd->parser;
6389
6390 if (trace_parser_loaded((parser))) {
6391 ret = ftrace_graph_set_hash(fgd->new_hash,
6392 parser->buffer);
6393 }
6394
6395 trace_parser_put(parser);
6396
6397 new_hash = __ftrace_hash_move(fgd->new_hash);
6398 if (!new_hash) {
6399 ret = -ENOMEM;
6400 goto out;
6401 }
6402
6403 mutex_lock(&graph_lock);
6404
6405 if (fgd->type == GRAPH_FILTER_FUNCTION) {
6406 old_hash = rcu_dereference_protected(ftrace_graph_hash,
6407 lockdep_is_held(&graph_lock));
6408 rcu_assign_pointer(ftrace_graph_hash, new_hash);
6409 } else {
6410 old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6411 lockdep_is_held(&graph_lock));
6412 rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
6413 }
6414
6415 mutex_unlock(&graph_lock);
6416
6417
6418
6419
6420
6421
6422
6423
6424
6425 if (old_hash != EMPTY_HASH)
6426 synchronize_rcu_tasks_rude();
6427
6428 free_ftrace_hash(old_hash);
6429 }
6430
6431 out:
6432 free_ftrace_hash(fgd->new_hash);
6433 kfree(fgd);
6434
6435 return ret;
6436 }
6437
6438 static int
6439 ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
6440 {
6441 struct ftrace_glob func_g;
6442 struct dyn_ftrace *rec;
6443 struct ftrace_page *pg;
6444 struct ftrace_func_entry *entry;
6445 int fail = 1;
6446 int not;
6447
6448
6449 func_g.type = filter_parse_regex(buffer, strlen(buffer),
6450 &func_g.search, ¬);
6451
6452 func_g.len = strlen(func_g.search);
6453
6454 mutex_lock(&ftrace_lock);
6455
6456 if (unlikely(ftrace_disabled)) {
6457 mutex_unlock(&ftrace_lock);
6458 return -ENODEV;
6459 }
6460
6461 do_for_each_ftrace_rec(pg, rec) {
6462
6463 if (rec->flags & FTRACE_FL_DISABLED)
6464 continue;
6465
6466 if (ftrace_match_record(rec, &func_g, NULL, 0)) {
6467 entry = ftrace_lookup_ip(hash, rec->ip);
6468
6469 if (!not) {
6470 fail = 0;
6471
6472 if (entry)
6473 continue;
6474 if (add_hash_entry(hash, rec->ip) < 0)
6475 goto out;
6476 } else {
6477 if (entry) {
6478 free_hash_entry(hash, entry);
6479 fail = 0;
6480 }
6481 }
6482 }
6483 } while_for_each_ftrace_rec();
6484 out:
6485 mutex_unlock(&ftrace_lock);
6486
6487 if (fail)
6488 return -EINVAL;
6489
6490 return 0;
6491 }
6492
6493 static ssize_t
6494 ftrace_graph_write(struct file *file, const char __user *ubuf,
6495 size_t cnt, loff_t *ppos)
6496 {
6497 ssize_t read, ret = 0;
6498 struct ftrace_graph_data *fgd = file->private_data;
6499 struct trace_parser *parser;
6500
6501 if (!cnt)
6502 return 0;
6503
6504
6505 if (file->f_mode & FMODE_READ) {
6506 struct seq_file *m = file->private_data;
6507 fgd = m->private;
6508 }
6509
6510 parser = &fgd->parser;
6511
6512 read = trace_get_user(parser, ubuf, cnt, ppos);
6513
6514 if (read >= 0 && trace_parser_loaded(parser) &&
6515 !trace_parser_cont(parser)) {
6516
6517 ret = ftrace_graph_set_hash(fgd->new_hash,
6518 parser->buffer);
6519 trace_parser_clear(parser);
6520 }
6521
6522 if (!ret)
6523 ret = read;
6524
6525 return ret;
6526 }
6527
6528 static const struct file_operations ftrace_graph_fops = {
6529 .open = ftrace_graph_open,
6530 .read = seq_read,
6531 .write = ftrace_graph_write,
6532 .llseek = tracing_lseek,
6533 .release = ftrace_graph_release,
6534 };
6535
6536 static const struct file_operations ftrace_graph_notrace_fops = {
6537 .open = ftrace_graph_notrace_open,
6538 .read = seq_read,
6539 .write = ftrace_graph_write,
6540 .llseek = tracing_lseek,
6541 .release = ftrace_graph_release,
6542 };
6543 #endif
6544
6545 void ftrace_create_filter_files(struct ftrace_ops *ops,
6546 struct dentry *parent)
6547 {
6548
6549 trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent,
6550 ops, &ftrace_filter_fops);
6551
6552 trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent,
6553 ops, &ftrace_notrace_fops);
6554 }
6555
6556
6557
6558
6559
6560
6561
6562
6563
6564
6565
6566 void ftrace_destroy_filter_files(struct ftrace_ops *ops)
6567 {
6568 mutex_lock(&ftrace_lock);
6569 if (ops->flags & FTRACE_OPS_FL_ENABLED)
6570 ftrace_shutdown(ops, 0);
6571 ops->flags |= FTRACE_OPS_FL_DELETED;
6572 ftrace_free_filter(ops);
6573 mutex_unlock(&ftrace_lock);
6574 }
6575
6576 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
6577 {
6578
6579 trace_create_file("available_filter_functions", TRACE_MODE_READ,
6580 d_tracer, NULL, &ftrace_avail_fops);
6581
6582 trace_create_file("enabled_functions", TRACE_MODE_READ,
6583 d_tracer, NULL, &ftrace_enabled_fops);
6584
6585 ftrace_create_filter_files(&global_ops, d_tracer);
6586
6587 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6588 trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer,
6589 NULL,
6590 &ftrace_graph_fops);
6591 trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer,
6592 NULL,
6593 &ftrace_graph_notrace_fops);
6594 #endif
6595
6596 return 0;
6597 }
6598
6599 static int ftrace_cmp_ips(const void *a, const void *b)
6600 {
6601 const unsigned long *ipa = a;
6602 const unsigned long *ipb = b;
6603
6604 if (*ipa > *ipb)
6605 return 1;
6606 if (*ipa < *ipb)
6607 return -1;
6608 return 0;
6609 }
6610
6611 #ifdef CONFIG_FTRACE_SORT_STARTUP_TEST
6612 static void test_is_sorted(unsigned long *start, unsigned long count)
6613 {
6614 int i;
6615
6616 for (i = 1; i < count; i++) {
6617 if (WARN(start[i - 1] > start[i],
6618 "[%d] %pS at %lx is not sorted with %pS at %lx\n", i,
6619 (void *)start[i - 1], start[i - 1],
6620 (void *)start[i], start[i]))
6621 break;
6622 }
6623 if (i == count)
6624 pr_info("ftrace section at %px sorted properly\n", start);
6625 }
6626 #else
6627 static void test_is_sorted(unsigned long *start, unsigned long count)
6628 {
6629 }
6630 #endif
6631
6632 static int ftrace_process_locs(struct module *mod,
6633 unsigned long *start,
6634 unsigned long *end)
6635 {
6636 struct ftrace_page *start_pg;
6637 struct ftrace_page *pg;
6638 struct dyn_ftrace *rec;
6639 unsigned long count;
6640 unsigned long *p;
6641 unsigned long addr;
6642 unsigned long flags = 0;
6643 int ret = -ENOMEM;
6644
6645 count = end - start;
6646
6647 if (!count)
6648 return 0;
6649
6650
6651
6652
6653
6654
6655 if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) {
6656 sort(start, count, sizeof(*start),
6657 ftrace_cmp_ips, NULL);
6658 } else {
6659 test_is_sorted(start, count);
6660 }
6661
6662 start_pg = ftrace_allocate_pages(count);
6663 if (!start_pg)
6664 return -ENOMEM;
6665
6666 mutex_lock(&ftrace_lock);
6667
6668
6669
6670
6671
6672
6673 if (!mod) {
6674 WARN_ON(ftrace_pages || ftrace_pages_start);
6675
6676 ftrace_pages = ftrace_pages_start = start_pg;
6677 } else {
6678 if (!ftrace_pages)
6679 goto out;
6680
6681 if (WARN_ON(ftrace_pages->next)) {
6682
6683 while (ftrace_pages->next)
6684 ftrace_pages = ftrace_pages->next;
6685 }
6686
6687 ftrace_pages->next = start_pg;
6688 }
6689
6690 p = start;
6691 pg = start_pg;
6692 while (p < end) {
6693 unsigned long end_offset;
6694 addr = ftrace_call_adjust(*p++);
6695
6696
6697
6698
6699
6700
6701 if (!addr)
6702 continue;
6703
6704 end_offset = (pg->index+1) * sizeof(pg->records[0]);
6705 if (end_offset > PAGE_SIZE << pg->order) {
6706
6707 if (WARN_ON(!pg->next))
6708 break;
6709 pg = pg->next;
6710 }
6711
6712 rec = &pg->records[pg->index++];
6713 rec->ip = addr;
6714 }
6715
6716
6717 WARN_ON(pg->next);
6718
6719
6720 ftrace_pages = pg;
6721
6722
6723
6724
6725
6726
6727
6728
6729
6730 if (!mod)
6731 local_irq_save(flags);
6732 ftrace_update_code(mod, start_pg);
6733 if (!mod)
6734 local_irq_restore(flags);
6735 ret = 0;
6736 out:
6737 mutex_unlock(&ftrace_lock);
6738
6739 return ret;
6740 }
6741
6742 struct ftrace_mod_func {
6743 struct list_head list;
6744 char *name;
6745 unsigned long ip;
6746 unsigned int size;
6747 };
6748
6749 struct ftrace_mod_map {
6750 struct rcu_head rcu;
6751 struct list_head list;
6752 struct module *mod;
6753 unsigned long start_addr;
6754 unsigned long end_addr;
6755 struct list_head funcs;
6756 unsigned int num_funcs;
6757 };
6758
6759 static int ftrace_get_trampoline_kallsym(unsigned int symnum,
6760 unsigned long *value, char *type,
6761 char *name, char *module_name,
6762 int *exported)
6763 {
6764 struct ftrace_ops *op;
6765
6766 list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
6767 if (!op->trampoline || symnum--)
6768 continue;
6769 *value = op->trampoline;
6770 *type = 't';
6771 strlcpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
6772 strlcpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
6773 *exported = 0;
6774 return 0;
6775 }
6776
6777 return -ERANGE;
6778 }
6779
6780 #if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES)
6781
6782
6783
6784
6785
6786
6787
6788 static bool
6789 ops_references_ip(struct ftrace_ops *ops, unsigned long ip)
6790 {
6791
6792 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
6793 return false;
6794
6795
6796 if (ops_traces_mod(ops))
6797 return true;
6798
6799
6800 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
6801 !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip))
6802 return false;
6803
6804
6805 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip))
6806 return false;
6807
6808 return true;
6809 }
6810 #endif
6811
6812 #ifdef CONFIG_MODULES
6813
6814 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
6815
6816 static LIST_HEAD(ftrace_mod_maps);
6817
6818 static int referenced_filters(struct dyn_ftrace *rec)
6819 {
6820 struct ftrace_ops *ops;
6821 int cnt = 0;
6822
6823 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
6824 if (ops_references_ip(ops, rec->ip)) {
6825 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
6826 continue;
6827 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
6828 continue;
6829 cnt++;
6830 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
6831 rec->flags |= FTRACE_FL_REGS;
6832 if (cnt == 1 && ops->trampoline)
6833 rec->flags |= FTRACE_FL_TRAMP;
6834 else
6835 rec->flags &= ~FTRACE_FL_TRAMP;
6836 }
6837 }
6838
6839 return cnt;
6840 }
6841
6842 static void
6843 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
6844 {
6845 struct ftrace_func_entry *entry;
6846 struct dyn_ftrace *rec;
6847 int i;
6848
6849 if (ftrace_hash_empty(hash))
6850 return;
6851
6852 for (i = 0; i < pg->index; i++) {
6853 rec = &pg->records[i];
6854 entry = __ftrace_lookup_ip(hash, rec->ip);
6855
6856
6857
6858
6859
6860 if (entry)
6861 entry->ip = 0;
6862 }
6863 }
6864
6865
6866 static void clear_mod_from_hashes(struct ftrace_page *pg)
6867 {
6868 struct trace_array *tr;
6869
6870 mutex_lock(&trace_types_lock);
6871 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6872 if (!tr->ops || !tr->ops->func_hash)
6873 continue;
6874 mutex_lock(&tr->ops->func_hash->regex_lock);
6875 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
6876 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
6877 mutex_unlock(&tr->ops->func_hash->regex_lock);
6878 }
6879 mutex_unlock(&trace_types_lock);
6880 }
6881
6882 static void ftrace_free_mod_map(struct rcu_head *rcu)
6883 {
6884 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
6885 struct ftrace_mod_func *mod_func;
6886 struct ftrace_mod_func *n;
6887
6888
6889 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
6890 kfree(mod_func->name);
6891 list_del(&mod_func->list);
6892 kfree(mod_func);
6893 }
6894
6895 kfree(mod_map);
6896 }
6897
6898 void ftrace_release_mod(struct module *mod)
6899 {
6900 struct ftrace_mod_map *mod_map;
6901 struct ftrace_mod_map *n;
6902 struct dyn_ftrace *rec;
6903 struct ftrace_page **last_pg;
6904 struct ftrace_page *tmp_page = NULL;
6905 struct ftrace_page *pg;
6906
6907 mutex_lock(&ftrace_lock);
6908
6909 if (ftrace_disabled)
6910 goto out_unlock;
6911
6912 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
6913 if (mod_map->mod == mod) {
6914 list_del_rcu(&mod_map->list);
6915 call_rcu(&mod_map->rcu, ftrace_free_mod_map);
6916 break;
6917 }
6918 }
6919
6920
6921
6922
6923
6924 last_pg = &ftrace_pages_start;
6925 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
6926 rec = &pg->records[0];
6927 if (within_module_core(rec->ip, mod) ||
6928 within_module_init(rec->ip, mod)) {
6929
6930
6931
6932
6933 if (WARN_ON(pg == ftrace_pages_start))
6934 goto out_unlock;
6935
6936
6937 if (pg == ftrace_pages)
6938 ftrace_pages = next_to_ftrace_page(last_pg);
6939
6940 ftrace_update_tot_cnt -= pg->index;
6941 *last_pg = pg->next;
6942
6943 pg->next = tmp_page;
6944 tmp_page = pg;
6945 } else
6946 last_pg = &pg->next;
6947 }
6948 out_unlock:
6949 mutex_unlock(&ftrace_lock);
6950
6951 for (pg = tmp_page; pg; pg = tmp_page) {
6952
6953
6954 clear_mod_from_hashes(pg);
6955
6956 if (pg->records) {
6957 free_pages((unsigned long)pg->records, pg->order);
6958 ftrace_number_of_pages -= 1 << pg->order;
6959 }
6960 tmp_page = pg->next;
6961 kfree(pg);
6962 ftrace_number_of_groups--;
6963 }
6964 }
6965
6966 void ftrace_module_enable(struct module *mod)
6967 {
6968 struct dyn_ftrace *rec;
6969 struct ftrace_page *pg;
6970
6971 mutex_lock(&ftrace_lock);
6972
6973 if (ftrace_disabled)
6974 goto out_unlock;
6975
6976
6977
6978
6979
6980
6981
6982
6983
6984
6985
6986
6987
6988
6989 if (ftrace_start_up)
6990 ftrace_arch_code_modify_prepare();
6991
6992 do_for_each_ftrace_rec(pg, rec) {
6993 int cnt;
6994
6995
6996
6997
6998
6999
7000 if (!within_module_core(rec->ip, mod) &&
7001 !within_module_init(rec->ip, mod))
7002 break;
7003
7004
7005 if (!test_for_valid_rec(rec)) {
7006
7007 rec->flags = FTRACE_FL_DISABLED;
7008 continue;
7009 }
7010
7011 cnt = 0;
7012
7013
7014
7015
7016
7017
7018
7019 if (ftrace_start_up)
7020 cnt += referenced_filters(rec);
7021
7022 rec->flags &= ~FTRACE_FL_DISABLED;
7023 rec->flags += cnt;
7024
7025 if (ftrace_start_up && cnt) {
7026 int failed = __ftrace_replace_code(rec, 1);
7027 if (failed) {
7028 ftrace_bug(failed, rec);
7029 goto out_loop;
7030 }
7031 }
7032
7033 } while_for_each_ftrace_rec();
7034
7035 out_loop:
7036 if (ftrace_start_up)
7037 ftrace_arch_code_modify_post_process();
7038
7039 out_unlock:
7040 mutex_unlock(&ftrace_lock);
7041
7042 process_cached_mods(mod->name);
7043 }
7044
7045 void ftrace_module_init(struct module *mod)
7046 {
7047 int ret;
7048
7049 if (ftrace_disabled || !mod->num_ftrace_callsites)
7050 return;
7051
7052 ret = ftrace_process_locs(mod, mod->ftrace_callsites,
7053 mod->ftrace_callsites + mod->num_ftrace_callsites);
7054 if (ret)
7055 pr_warn("ftrace: failed to allocate entries for module '%s' functions\n",
7056 mod->name);
7057 }
7058
7059 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7060 struct dyn_ftrace *rec)
7061 {
7062 struct ftrace_mod_func *mod_func;
7063 unsigned long symsize;
7064 unsigned long offset;
7065 char str[KSYM_SYMBOL_LEN];
7066 char *modname;
7067 const char *ret;
7068
7069 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
7070 if (!ret)
7071 return;
7072
7073 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
7074 if (!mod_func)
7075 return;
7076
7077 mod_func->name = kstrdup(str, GFP_KERNEL);
7078 if (!mod_func->name) {
7079 kfree(mod_func);
7080 return;
7081 }
7082
7083 mod_func->ip = rec->ip - offset;
7084 mod_func->size = symsize;
7085
7086 mod_map->num_funcs++;
7087
7088 list_add_rcu(&mod_func->list, &mod_map->funcs);
7089 }
7090
7091 static struct ftrace_mod_map *
7092 allocate_ftrace_mod_map(struct module *mod,
7093 unsigned long start, unsigned long end)
7094 {
7095 struct ftrace_mod_map *mod_map;
7096
7097 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
7098 if (!mod_map)
7099 return NULL;
7100
7101 mod_map->mod = mod;
7102 mod_map->start_addr = start;
7103 mod_map->end_addr = end;
7104 mod_map->num_funcs = 0;
7105
7106 INIT_LIST_HEAD_RCU(&mod_map->funcs);
7107
7108 list_add_rcu(&mod_map->list, &ftrace_mod_maps);
7109
7110 return mod_map;
7111 }
7112
7113 static const char *
7114 ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
7115 unsigned long addr, unsigned long *size,
7116 unsigned long *off, char *sym)
7117 {
7118 struct ftrace_mod_func *found_func = NULL;
7119 struct ftrace_mod_func *mod_func;
7120
7121 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7122 if (addr >= mod_func->ip &&
7123 addr < mod_func->ip + mod_func->size) {
7124 found_func = mod_func;
7125 break;
7126 }
7127 }
7128
7129 if (found_func) {
7130 if (size)
7131 *size = found_func->size;
7132 if (off)
7133 *off = addr - found_func->ip;
7134 if (sym)
7135 strlcpy(sym, found_func->name, KSYM_NAME_LEN);
7136
7137 return found_func->name;
7138 }
7139
7140 return NULL;
7141 }
7142
7143 const char *
7144 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
7145 unsigned long *off, char **modname, char *sym)
7146 {
7147 struct ftrace_mod_map *mod_map;
7148 const char *ret = NULL;
7149
7150
7151 preempt_disable();
7152 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7153 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
7154 if (ret) {
7155 if (modname)
7156 *modname = mod_map->mod->name;
7157 break;
7158 }
7159 }
7160 preempt_enable();
7161
7162 return ret;
7163 }
7164
7165 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7166 char *type, char *name,
7167 char *module_name, int *exported)
7168 {
7169 struct ftrace_mod_map *mod_map;
7170 struct ftrace_mod_func *mod_func;
7171 int ret;
7172
7173 preempt_disable();
7174 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7175
7176 if (symnum >= mod_map->num_funcs) {
7177 symnum -= mod_map->num_funcs;
7178 continue;
7179 }
7180
7181 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7182 if (symnum > 1) {
7183 symnum--;
7184 continue;
7185 }
7186
7187 *value = mod_func->ip;
7188 *type = 'T';
7189 strlcpy(name, mod_func->name, KSYM_NAME_LEN);
7190 strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
7191 *exported = 1;
7192 preempt_enable();
7193 return 0;
7194 }
7195 WARN_ON(1);
7196 break;
7197 }
7198 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7199 module_name, exported);
7200 preempt_enable();
7201 return ret;
7202 }
7203
7204 #else
7205 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7206 struct dyn_ftrace *rec) { }
7207 static inline struct ftrace_mod_map *
7208 allocate_ftrace_mod_map(struct module *mod,
7209 unsigned long start, unsigned long end)
7210 {
7211 return NULL;
7212 }
7213 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7214 char *type, char *name, char *module_name,
7215 int *exported)
7216 {
7217 int ret;
7218
7219 preempt_disable();
7220 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7221 module_name, exported);
7222 preempt_enable();
7223 return ret;
7224 }
7225 #endif
7226
7227 struct ftrace_init_func {
7228 struct list_head list;
7229 unsigned long ip;
7230 };
7231
7232
7233 static void
7234 clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
7235 {
7236 struct ftrace_func_entry *entry;
7237
7238 entry = ftrace_lookup_ip(hash, func->ip);
7239
7240
7241
7242
7243
7244 if (entry)
7245 entry->ip = 0;
7246 }
7247
7248 static void
7249 clear_func_from_hashes(struct ftrace_init_func *func)
7250 {
7251 struct trace_array *tr;
7252
7253 mutex_lock(&trace_types_lock);
7254 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7255 if (!tr->ops || !tr->ops->func_hash)
7256 continue;
7257 mutex_lock(&tr->ops->func_hash->regex_lock);
7258 clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
7259 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
7260 mutex_unlock(&tr->ops->func_hash->regex_lock);
7261 }
7262 mutex_unlock(&trace_types_lock);
7263 }
7264
7265 static void add_to_clear_hash_list(struct list_head *clear_list,
7266 struct dyn_ftrace *rec)
7267 {
7268 struct ftrace_init_func *func;
7269
7270 func = kmalloc(sizeof(*func), GFP_KERNEL);
7271 if (!func) {
7272 MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
7273 return;
7274 }
7275
7276 func->ip = rec->ip;
7277 list_add(&func->list, clear_list);
7278 }
7279
7280 void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
7281 {
7282 unsigned long start = (unsigned long)(start_ptr);
7283 unsigned long end = (unsigned long)(end_ptr);
7284 struct ftrace_page **last_pg = &ftrace_pages_start;
7285 struct ftrace_page *pg;
7286 struct dyn_ftrace *rec;
7287 struct dyn_ftrace key;
7288 struct ftrace_mod_map *mod_map = NULL;
7289 struct ftrace_init_func *func, *func_next;
7290 struct list_head clear_hash;
7291
7292 INIT_LIST_HEAD(&clear_hash);
7293
7294 key.ip = start;
7295 key.flags = end;
7296
7297 mutex_lock(&ftrace_lock);
7298
7299
7300
7301
7302
7303
7304 if (mod && ftrace_ops_list != &ftrace_list_end)
7305 mod_map = allocate_ftrace_mod_map(mod, start, end);
7306
7307 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
7308 if (end < pg->records[0].ip ||
7309 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
7310 continue;
7311 again:
7312 rec = bsearch(&key, pg->records, pg->index,
7313 sizeof(struct dyn_ftrace),
7314 ftrace_cmp_recs);
7315 if (!rec)
7316 continue;
7317
7318
7319 add_to_clear_hash_list(&clear_hash, rec);
7320
7321 if (mod_map)
7322 save_ftrace_mod_rec(mod_map, rec);
7323
7324 pg->index--;
7325 ftrace_update_tot_cnt--;
7326 if (!pg->index) {
7327 *last_pg = pg->next;
7328 if (pg->records) {
7329 free_pages((unsigned long)pg->records, pg->order);
7330 ftrace_number_of_pages -= 1 << pg->order;
7331 }
7332 ftrace_number_of_groups--;
7333 kfree(pg);
7334 pg = container_of(last_pg, struct ftrace_page, next);
7335 if (!(*last_pg))
7336 ftrace_pages = pg;
7337 continue;
7338 }
7339 memmove(rec, rec + 1,
7340 (pg->index - (rec - pg->records)) * sizeof(*rec));
7341
7342 goto again;
7343 }
7344 mutex_unlock(&ftrace_lock);
7345
7346 list_for_each_entry_safe(func, func_next, &clear_hash, list) {
7347 clear_func_from_hashes(func);
7348 kfree(func);
7349 }
7350 }
7351
7352 void __init ftrace_free_init_mem(void)
7353 {
7354 void *start = (void *)(&__init_begin);
7355 void *end = (void *)(&__init_end);
7356
7357 ftrace_boot_snapshot();
7358
7359 ftrace_free_mem(NULL, start, end);
7360 }
7361
7362 int __init __weak ftrace_dyn_arch_init(void)
7363 {
7364 return 0;
7365 }
7366
7367 void __init ftrace_init(void)
7368 {
7369 extern unsigned long __start_mcount_loc[];
7370 extern unsigned long __stop_mcount_loc[];
7371 unsigned long count, flags;
7372 int ret;
7373
7374 local_irq_save(flags);
7375 ret = ftrace_dyn_arch_init();
7376 local_irq_restore(flags);
7377 if (ret)
7378 goto failed;
7379
7380 count = __stop_mcount_loc - __start_mcount_loc;
7381 if (!count) {
7382 pr_info("ftrace: No functions to be traced?\n");
7383 goto failed;
7384 }
7385
7386 pr_info("ftrace: allocating %ld entries in %ld pages\n",
7387 count, count / ENTRIES_PER_PAGE + 1);
7388
7389 ret = ftrace_process_locs(NULL,
7390 __start_mcount_loc,
7391 __stop_mcount_loc);
7392 if (ret) {
7393 pr_warn("ftrace: failed to allocate entries for functions\n");
7394 goto failed;
7395 }
7396
7397 pr_info("ftrace: allocated %ld pages with %ld groups\n",
7398 ftrace_number_of_pages, ftrace_number_of_groups);
7399
7400 last_ftrace_enabled = ftrace_enabled = 1;
7401
7402 set_ftrace_early_filters();
7403
7404 return;
7405 failed:
7406 ftrace_disabled = 1;
7407 }
7408
7409
7410 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
7411 {
7412 }
7413
7414 static void ftrace_update_trampoline(struct ftrace_ops *ops)
7415 {
7416 unsigned long trampoline = ops->trampoline;
7417
7418 arch_ftrace_update_trampoline(ops);
7419 if (ops->trampoline && ops->trampoline != trampoline &&
7420 (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
7421
7422 ftrace_add_trampoline_to_kallsyms(ops);
7423 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
7424 ops->trampoline, ops->trampoline_size, false,
7425 FTRACE_TRAMPOLINE_SYM);
7426
7427
7428
7429
7430 perf_event_text_poke((void *)ops->trampoline, NULL, 0,
7431 (void *)ops->trampoline,
7432 ops->trampoline_size);
7433 }
7434 }
7435
7436 void ftrace_init_trace_array(struct trace_array *tr)
7437 {
7438 INIT_LIST_HEAD(&tr->func_probes);
7439 INIT_LIST_HEAD(&tr->mod_trace);
7440 INIT_LIST_HEAD(&tr->mod_notrace);
7441 }
7442 #else
7443
7444 struct ftrace_ops global_ops = {
7445 .func = ftrace_stub,
7446 .flags = FTRACE_OPS_FL_INITIALIZED |
7447 FTRACE_OPS_FL_PID,
7448 };
7449
7450 static int __init ftrace_nodyn_init(void)
7451 {
7452 ftrace_enabled = 1;
7453 return 0;
7454 }
7455 core_initcall(ftrace_nodyn_init);
7456
7457 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
7458 static inline void ftrace_startup_all(int command) { }
7459
7460 static void ftrace_update_trampoline(struct ftrace_ops *ops)
7461 {
7462 }
7463
7464 #endif
7465
7466 __init void ftrace_init_global_array_ops(struct trace_array *tr)
7467 {
7468 tr->ops = &global_ops;
7469 tr->ops->private = tr;
7470 ftrace_init_trace_array(tr);
7471 }
7472
7473 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
7474 {
7475
7476 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
7477 if (WARN_ON(tr->ops->func != ftrace_stub))
7478 printk("ftrace ops had %pS for function\n",
7479 tr->ops->func);
7480 }
7481 tr->ops->func = func;
7482 tr->ops->private = tr;
7483 }
7484
7485 void ftrace_reset_array_ops(struct trace_array *tr)
7486 {
7487 tr->ops->func = ftrace_stub;
7488 }
7489
7490 static nokprobe_inline void
7491 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7492 struct ftrace_ops *ignored, struct ftrace_regs *fregs)
7493 {
7494 struct pt_regs *regs = ftrace_get_regs(fregs);
7495 struct ftrace_ops *op;
7496 int bit;
7497
7498
7499
7500
7501
7502
7503 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7504 if (bit < 0)
7505 return;
7506
7507 do_for_each_ftrace_op(op, ftrace_ops_list) {
7508
7509 if (op->flags & FTRACE_OPS_FL_STUB)
7510 continue;
7511
7512
7513
7514
7515
7516
7517
7518
7519
7520 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
7521 ftrace_ops_test(op, ip, regs)) {
7522 if (FTRACE_WARN_ON(!op->func)) {
7523 pr_warn("op=%p %pS\n", op, op);
7524 goto out;
7525 }
7526 op->func(ip, parent_ip, op, fregs);
7527 }
7528 } while_for_each_ftrace_op(op);
7529 out:
7530 trace_clear_recursion(bit);
7531 }
7532
7533
7534
7535
7536
7537
7538
7539
7540
7541
7542
7543
7544
7545
7546
7547
7548
7549 #if ARCH_SUPPORTS_FTRACE_OPS
7550 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7551 struct ftrace_ops *op, struct ftrace_regs *fregs)
7552 {
7553 __ftrace_ops_list_func(ip, parent_ip, NULL, fregs);
7554 }
7555 #else
7556 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
7557 {
7558 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
7559 }
7560 #endif
7561 NOKPROBE_SYMBOL(arch_ftrace_ops_list_func);
7562
7563
7564
7565
7566
7567
7568 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
7569 struct ftrace_ops *op, struct ftrace_regs *fregs)
7570 {
7571 int bit;
7572
7573 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7574 if (bit < 0)
7575 return;
7576
7577 if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
7578 op->func(ip, parent_ip, op, fregs);
7579
7580 trace_clear_recursion(bit);
7581 }
7582 NOKPROBE_SYMBOL(ftrace_ops_assist_func);
7583
7584
7585
7586
7587
7588
7589
7590
7591
7592
7593
7594
7595 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
7596 {
7597
7598
7599
7600
7601 if (ops->flags & (FTRACE_OPS_FL_RECURSION |
7602 FTRACE_OPS_FL_RCU))
7603 return ftrace_ops_assist_func;
7604
7605 return ops->func;
7606 }
7607
7608 static void
7609 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
7610 struct task_struct *prev,
7611 struct task_struct *next,
7612 unsigned int prev_state)
7613 {
7614 struct trace_array *tr = data;
7615 struct trace_pid_list *pid_list;
7616 struct trace_pid_list *no_pid_list;
7617
7618 pid_list = rcu_dereference_sched(tr->function_pids);
7619 no_pid_list = rcu_dereference_sched(tr->function_no_pids);
7620
7621 if (trace_ignore_this_task(pid_list, no_pid_list, next))
7622 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7623 FTRACE_PID_IGNORE);
7624 else
7625 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7626 next->pid);
7627 }
7628
7629 static void
7630 ftrace_pid_follow_sched_process_fork(void *data,
7631 struct task_struct *self,
7632 struct task_struct *task)
7633 {
7634 struct trace_pid_list *pid_list;
7635 struct trace_array *tr = data;
7636
7637 pid_list = rcu_dereference_sched(tr->function_pids);
7638 trace_filter_add_remove_task(pid_list, self, task);
7639
7640 pid_list = rcu_dereference_sched(tr->function_no_pids);
7641 trace_filter_add_remove_task(pid_list, self, task);
7642 }
7643
7644 static void
7645 ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
7646 {
7647 struct trace_pid_list *pid_list;
7648 struct trace_array *tr = data;
7649
7650 pid_list = rcu_dereference_sched(tr->function_pids);
7651 trace_filter_add_remove_task(pid_list, NULL, task);
7652
7653 pid_list = rcu_dereference_sched(tr->function_no_pids);
7654 trace_filter_add_remove_task(pid_list, NULL, task);
7655 }
7656
7657 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
7658 {
7659 if (enable) {
7660 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
7661 tr);
7662 register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
7663 tr);
7664 } else {
7665 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
7666 tr);
7667 unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
7668 tr);
7669 }
7670 }
7671
7672 static void clear_ftrace_pids(struct trace_array *tr, int type)
7673 {
7674 struct trace_pid_list *pid_list;
7675 struct trace_pid_list *no_pid_list;
7676 int cpu;
7677
7678 pid_list = rcu_dereference_protected(tr->function_pids,
7679 lockdep_is_held(&ftrace_lock));
7680 no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7681 lockdep_is_held(&ftrace_lock));
7682
7683
7684 if (!pid_type_enabled(type, pid_list, no_pid_list))
7685 return;
7686
7687
7688 if (!still_need_pid_events(type, pid_list, no_pid_list)) {
7689 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7690 for_each_possible_cpu(cpu)
7691 per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
7692 }
7693
7694 if (type & TRACE_PIDS)
7695 rcu_assign_pointer(tr->function_pids, NULL);
7696
7697 if (type & TRACE_NO_PIDS)
7698 rcu_assign_pointer(tr->function_no_pids, NULL);
7699
7700
7701 synchronize_rcu();
7702
7703 if ((type & TRACE_PIDS) && pid_list)
7704 trace_pid_list_free(pid_list);
7705
7706 if ((type & TRACE_NO_PIDS) && no_pid_list)
7707 trace_pid_list_free(no_pid_list);
7708 }
7709
7710 void ftrace_clear_pids(struct trace_array *tr)
7711 {
7712 mutex_lock(&ftrace_lock);
7713
7714 clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
7715
7716 mutex_unlock(&ftrace_lock);
7717 }
7718
7719 static void ftrace_pid_reset(struct trace_array *tr, int type)
7720 {
7721 mutex_lock(&ftrace_lock);
7722 clear_ftrace_pids(tr, type);
7723
7724 ftrace_update_pid_func();
7725 ftrace_startup_all(0);
7726
7727 mutex_unlock(&ftrace_lock);
7728 }
7729
7730
7731 #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1)
7732
7733 static void *fpid_start(struct seq_file *m, loff_t *pos)
7734 __acquires(RCU)
7735 {
7736 struct trace_pid_list *pid_list;
7737 struct trace_array *tr = m->private;
7738
7739 mutex_lock(&ftrace_lock);
7740 rcu_read_lock_sched();
7741
7742 pid_list = rcu_dereference_sched(tr->function_pids);
7743
7744 if (!pid_list)
7745 return !(*pos) ? FTRACE_NO_PIDS : NULL;
7746
7747 return trace_pid_start(pid_list, pos);
7748 }
7749
7750 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
7751 {
7752 struct trace_array *tr = m->private;
7753 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
7754
7755 if (v == FTRACE_NO_PIDS) {
7756 (*pos)++;
7757 return NULL;
7758 }
7759 return trace_pid_next(pid_list, v, pos);
7760 }
7761
7762 static void fpid_stop(struct seq_file *m, void *p)
7763 __releases(RCU)
7764 {
7765 rcu_read_unlock_sched();
7766 mutex_unlock(&ftrace_lock);
7767 }
7768
7769 static int fpid_show(struct seq_file *m, void *v)
7770 {
7771 if (v == FTRACE_NO_PIDS) {
7772 seq_puts(m, "no pid\n");
7773 return 0;
7774 }
7775
7776 return trace_pid_show(m, v);
7777 }
7778
7779 static const struct seq_operations ftrace_pid_sops = {
7780 .start = fpid_start,
7781 .next = fpid_next,
7782 .stop = fpid_stop,
7783 .show = fpid_show,
7784 };
7785
7786 static void *fnpid_start(struct seq_file *m, loff_t *pos)
7787 __acquires(RCU)
7788 {
7789 struct trace_pid_list *pid_list;
7790 struct trace_array *tr = m->private;
7791
7792 mutex_lock(&ftrace_lock);
7793 rcu_read_lock_sched();
7794
7795 pid_list = rcu_dereference_sched(tr->function_no_pids);
7796
7797 if (!pid_list)
7798 return !(*pos) ? FTRACE_NO_PIDS : NULL;
7799
7800 return trace_pid_start(pid_list, pos);
7801 }
7802
7803 static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
7804 {
7805 struct trace_array *tr = m->private;
7806 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
7807
7808 if (v == FTRACE_NO_PIDS) {
7809 (*pos)++;
7810 return NULL;
7811 }
7812 return trace_pid_next(pid_list, v, pos);
7813 }
7814
7815 static const struct seq_operations ftrace_no_pid_sops = {
7816 .start = fnpid_start,
7817 .next = fnpid_next,
7818 .stop = fpid_stop,
7819 .show = fpid_show,
7820 };
7821
7822 static int pid_open(struct inode *inode, struct file *file, int type)
7823 {
7824 const struct seq_operations *seq_ops;
7825 struct trace_array *tr = inode->i_private;
7826 struct seq_file *m;
7827 int ret = 0;
7828
7829 ret = tracing_check_open_get_tr(tr);
7830 if (ret)
7831 return ret;
7832
7833 if ((file->f_mode & FMODE_WRITE) &&
7834 (file->f_flags & O_TRUNC))
7835 ftrace_pid_reset(tr, type);
7836
7837 switch (type) {
7838 case TRACE_PIDS:
7839 seq_ops = &ftrace_pid_sops;
7840 break;
7841 case TRACE_NO_PIDS:
7842 seq_ops = &ftrace_no_pid_sops;
7843 break;
7844 default:
7845 trace_array_put(tr);
7846 WARN_ON_ONCE(1);
7847 return -EINVAL;
7848 }
7849
7850 ret = seq_open(file, seq_ops);
7851 if (ret < 0) {
7852 trace_array_put(tr);
7853 } else {
7854 m = file->private_data;
7855
7856 m->private = tr;
7857 }
7858
7859 return ret;
7860 }
7861
7862 static int
7863 ftrace_pid_open(struct inode *inode, struct file *file)
7864 {
7865 return pid_open(inode, file, TRACE_PIDS);
7866 }
7867
7868 static int
7869 ftrace_no_pid_open(struct inode *inode, struct file *file)
7870 {
7871 return pid_open(inode, file, TRACE_NO_PIDS);
7872 }
7873
7874 static void ignore_task_cpu(void *data)
7875 {
7876 struct trace_array *tr = data;
7877 struct trace_pid_list *pid_list;
7878 struct trace_pid_list *no_pid_list;
7879
7880
7881
7882
7883
7884 pid_list = rcu_dereference_protected(tr->function_pids,
7885 mutex_is_locked(&ftrace_lock));
7886 no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7887 mutex_is_locked(&ftrace_lock));
7888
7889 if (trace_ignore_this_task(pid_list, no_pid_list, current))
7890 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7891 FTRACE_PID_IGNORE);
7892 else
7893 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7894 current->pid);
7895 }
7896
7897 static ssize_t
7898 pid_write(struct file *filp, const char __user *ubuf,
7899 size_t cnt, loff_t *ppos, int type)
7900 {
7901 struct seq_file *m = filp->private_data;
7902 struct trace_array *tr = m->private;
7903 struct trace_pid_list *filtered_pids;
7904 struct trace_pid_list *other_pids;
7905 struct trace_pid_list *pid_list;
7906 ssize_t ret;
7907
7908 if (!cnt)
7909 return 0;
7910
7911 mutex_lock(&ftrace_lock);
7912
7913 switch (type) {
7914 case TRACE_PIDS:
7915 filtered_pids = rcu_dereference_protected(tr->function_pids,
7916 lockdep_is_held(&ftrace_lock));
7917 other_pids = rcu_dereference_protected(tr->function_no_pids,
7918 lockdep_is_held(&ftrace_lock));
7919 break;
7920 case TRACE_NO_PIDS:
7921 filtered_pids = rcu_dereference_protected(tr->function_no_pids,
7922 lockdep_is_held(&ftrace_lock));
7923 other_pids = rcu_dereference_protected(tr->function_pids,
7924 lockdep_is_held(&ftrace_lock));
7925 break;
7926 default:
7927 ret = -EINVAL;
7928 WARN_ON_ONCE(1);
7929 goto out;
7930 }
7931
7932 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
7933 if (ret < 0)
7934 goto out;
7935
7936 switch (type) {
7937 case TRACE_PIDS:
7938 rcu_assign_pointer(tr->function_pids, pid_list);
7939 break;
7940 case TRACE_NO_PIDS:
7941 rcu_assign_pointer(tr->function_no_pids, pid_list);
7942 break;
7943 }
7944
7945
7946 if (filtered_pids) {
7947 synchronize_rcu();
7948 trace_pid_list_free(filtered_pids);
7949 } else if (pid_list && !other_pids) {
7950
7951 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7952 }
7953
7954
7955
7956
7957
7958
7959 on_each_cpu(ignore_task_cpu, tr, 1);
7960
7961 ftrace_update_pid_func();
7962 ftrace_startup_all(0);
7963 out:
7964 mutex_unlock(&ftrace_lock);
7965
7966 if (ret > 0)
7967 *ppos += ret;
7968
7969 return ret;
7970 }
7971
7972 static ssize_t
7973 ftrace_pid_write(struct file *filp, const char __user *ubuf,
7974 size_t cnt, loff_t *ppos)
7975 {
7976 return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
7977 }
7978
7979 static ssize_t
7980 ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
7981 size_t cnt, loff_t *ppos)
7982 {
7983 return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
7984 }
7985
7986 static int
7987 ftrace_pid_release(struct inode *inode, struct file *file)
7988 {
7989 struct trace_array *tr = inode->i_private;
7990
7991 trace_array_put(tr);
7992
7993 return seq_release(inode, file);
7994 }
7995
7996 static const struct file_operations ftrace_pid_fops = {
7997 .open = ftrace_pid_open,
7998 .write = ftrace_pid_write,
7999 .read = seq_read,
8000 .llseek = tracing_lseek,
8001 .release = ftrace_pid_release,
8002 };
8003
8004 static const struct file_operations ftrace_no_pid_fops = {
8005 .open = ftrace_no_pid_open,
8006 .write = ftrace_no_pid_write,
8007 .read = seq_read,
8008 .llseek = tracing_lseek,
8009 .release = ftrace_pid_release,
8010 };
8011
8012 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8013 {
8014 trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer,
8015 tr, &ftrace_pid_fops);
8016 trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE,
8017 d_tracer, tr, &ftrace_no_pid_fops);
8018 }
8019
8020 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
8021 struct dentry *d_tracer)
8022 {
8023
8024 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
8025
8026 ftrace_init_dyn_tracefs(d_tracer);
8027 ftrace_profile_tracefs(d_tracer);
8028 }
8029
8030
8031
8032
8033
8034
8035
8036
8037 void ftrace_kill(void)
8038 {
8039 ftrace_disabled = 1;
8040 ftrace_enabled = 0;
8041 ftrace_trace_function = ftrace_stub;
8042 }
8043
8044
8045
8046
8047
8048
8049 int ftrace_is_dead(void)
8050 {
8051 return ftrace_disabled;
8052 }
8053
8054 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
8055
8056
8057
8058
8059
8060
8061
8062
8063
8064
8065
8066
8067 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
8068 {
8069 struct ftrace_func_entry *entry;
8070 struct ftrace_hash *hash;
8071 struct ftrace_ops *op;
8072 int size, i, ret;
8073
8074 lockdep_assert_held_once(&direct_mutex);
8075
8076 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
8077 return 0;
8078
8079 hash = ops->func_hash->filter_hash;
8080 size = 1 << hash->size_bits;
8081 for (i = 0; i < size; i++) {
8082 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
8083 unsigned long ip = entry->ip;
8084 bool found_op = false;
8085
8086 mutex_lock(&ftrace_lock);
8087 do_for_each_ftrace_op(op, ftrace_ops_list) {
8088 if (!(op->flags & FTRACE_OPS_FL_DIRECT))
8089 continue;
8090 if (ops_references_ip(op, ip)) {
8091 found_op = true;
8092 break;
8093 }
8094 } while_for_each_ftrace_op(op);
8095 mutex_unlock(&ftrace_lock);
8096
8097 if (found_op) {
8098 if (!op->ops_func)
8099 return -EBUSY;
8100
8101 ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER);
8102 if (ret)
8103 return ret;
8104 }
8105 }
8106 }
8107
8108 return 0;
8109 }
8110
8111
8112
8113
8114
8115
8116 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
8117 {
8118 struct ftrace_func_entry *entry;
8119 struct ftrace_hash *hash;
8120 struct ftrace_ops *op;
8121 int size, i;
8122
8123 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
8124 return;
8125
8126 mutex_lock(&direct_mutex);
8127
8128 hash = ops->func_hash->filter_hash;
8129 size = 1 << hash->size_bits;
8130 for (i = 0; i < size; i++) {
8131 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
8132 unsigned long ip = entry->ip;
8133 bool found_op = false;
8134
8135 mutex_lock(&ftrace_lock);
8136 do_for_each_ftrace_op(op, ftrace_ops_list) {
8137 if (!(op->flags & FTRACE_OPS_FL_DIRECT))
8138 continue;
8139 if (ops_references_ip(op, ip)) {
8140 found_op = true;
8141 break;
8142 }
8143 } while_for_each_ftrace_op(op);
8144 mutex_unlock(&ftrace_lock);
8145
8146
8147 if (found_op && op->ops_func)
8148 op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER);
8149 }
8150 }
8151 mutex_unlock(&direct_mutex);
8152 }
8153
8154 #define lock_direct_mutex() mutex_lock(&direct_mutex)
8155 #define unlock_direct_mutex() mutex_unlock(&direct_mutex)
8156
8157 #else
8158
8159 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
8160 {
8161 return 0;
8162 }
8163
8164 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
8165 {
8166 }
8167
8168 #define lock_direct_mutex() do { } while (0)
8169 #define unlock_direct_mutex() do { } while (0)
8170
8171 #endif
8172
8173
8174
8175
8176 static int register_ftrace_function_nolock(struct ftrace_ops *ops)
8177 {
8178 int ret;
8179
8180 ftrace_ops_init(ops);
8181
8182 mutex_lock(&ftrace_lock);
8183
8184 ret = ftrace_startup(ops, 0);
8185
8186 mutex_unlock(&ftrace_lock);
8187
8188 return ret;
8189 }
8190
8191
8192
8193
8194
8195
8196
8197
8198
8199
8200
8201
8202 int register_ftrace_function(struct ftrace_ops *ops)
8203 {
8204 int ret;
8205
8206 lock_direct_mutex();
8207 ret = prepare_direct_functions_for_ipmodify(ops);
8208 if (ret < 0)
8209 goto out_unlock;
8210
8211 ret = register_ftrace_function_nolock(ops);
8212
8213 out_unlock:
8214 unlock_direct_mutex();
8215 return ret;
8216 }
8217 EXPORT_SYMBOL_GPL(register_ftrace_function);
8218
8219
8220
8221
8222
8223
8224
8225 int unregister_ftrace_function(struct ftrace_ops *ops)
8226 {
8227 int ret;
8228
8229 mutex_lock(&ftrace_lock);
8230 ret = ftrace_shutdown(ops, 0);
8231 mutex_unlock(&ftrace_lock);
8232
8233 cleanup_direct_functions_after_ipmodify(ops);
8234 return ret;
8235 }
8236 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
8237
8238 static int symbols_cmp(const void *a, const void *b)
8239 {
8240 const char **str_a = (const char **) a;
8241 const char **str_b = (const char **) b;
8242
8243 return strcmp(*str_a, *str_b);
8244 }
8245
8246 struct kallsyms_data {
8247 unsigned long *addrs;
8248 const char **syms;
8249 size_t cnt;
8250 size_t found;
8251 };
8252
8253 static int kallsyms_callback(void *data, const char *name,
8254 struct module *mod, unsigned long addr)
8255 {
8256 struct kallsyms_data *args = data;
8257 const char **sym;
8258 int idx;
8259
8260 sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp);
8261 if (!sym)
8262 return 0;
8263
8264 idx = sym - args->syms;
8265 if (args->addrs[idx])
8266 return 0;
8267
8268 addr = ftrace_location(addr);
8269 if (!addr)
8270 return 0;
8271
8272 args->addrs[idx] = addr;
8273 args->found++;
8274 return args->found == args->cnt ? 1 : 0;
8275 }
8276
8277
8278
8279
8280
8281
8282
8283
8284
8285
8286
8287
8288
8289
8290
8291
8292
8293 int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
8294 {
8295 struct kallsyms_data args;
8296 int err;
8297
8298 memset(addrs, 0, sizeof(*addrs) * cnt);
8299 args.addrs = addrs;
8300 args.syms = sorted_syms;
8301 args.cnt = cnt;
8302 args.found = 0;
8303 err = kallsyms_on_each_symbol(kallsyms_callback, &args);
8304 if (err < 0)
8305 return err;
8306 return args.found == args.cnt ? 0 : -ESRCH;
8307 }
8308
8309 #ifdef CONFIG_SYSCTL
8310
8311 #ifdef CONFIG_DYNAMIC_FTRACE
8312 static void ftrace_startup_sysctl(void)
8313 {
8314 int command;
8315
8316 if (unlikely(ftrace_disabled))
8317 return;
8318
8319
8320 saved_ftrace_func = NULL;
8321
8322 if (ftrace_start_up) {
8323 command = FTRACE_UPDATE_CALLS;
8324 if (ftrace_graph_active)
8325 command |= FTRACE_START_FUNC_RET;
8326 ftrace_startup_enable(command);
8327 }
8328 }
8329
8330 static void ftrace_shutdown_sysctl(void)
8331 {
8332 int command;
8333
8334 if (unlikely(ftrace_disabled))
8335 return;
8336
8337
8338 if (ftrace_start_up) {
8339 command = FTRACE_DISABLE_CALLS;
8340 if (ftrace_graph_active)
8341 command |= FTRACE_STOP_FUNC_RET;
8342 ftrace_run_update_code(command);
8343 }
8344 }
8345 #else
8346 # define ftrace_startup_sysctl() do { } while (0)
8347 # define ftrace_shutdown_sysctl() do { } while (0)
8348 #endif
8349
8350 static bool is_permanent_ops_registered(void)
8351 {
8352 struct ftrace_ops *op;
8353
8354 do_for_each_ftrace_op(op, ftrace_ops_list) {
8355 if (op->flags & FTRACE_OPS_FL_PERMANENT)
8356 return true;
8357 } while_for_each_ftrace_op(op);
8358
8359 return false;
8360 }
8361
8362 static int
8363 ftrace_enable_sysctl(struct ctl_table *table, int write,
8364 void *buffer, size_t *lenp, loff_t *ppos)
8365 {
8366 int ret = -ENODEV;
8367
8368 mutex_lock(&ftrace_lock);
8369
8370 if (unlikely(ftrace_disabled))
8371 goto out;
8372
8373 ret = proc_dointvec(table, write, buffer, lenp, ppos);
8374
8375 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
8376 goto out;
8377
8378 if (ftrace_enabled) {
8379
8380
8381 if (rcu_dereference_protected(ftrace_ops_list,
8382 lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
8383 update_ftrace_function();
8384
8385 ftrace_startup_sysctl();
8386
8387 } else {
8388 if (is_permanent_ops_registered()) {
8389 ftrace_enabled = true;
8390 ret = -EBUSY;
8391 goto out;
8392 }
8393
8394
8395 ftrace_trace_function = ftrace_stub;
8396
8397 ftrace_shutdown_sysctl();
8398 }
8399
8400 last_ftrace_enabled = !!ftrace_enabled;
8401 out:
8402 mutex_unlock(&ftrace_lock);
8403 return ret;
8404 }
8405
8406 static struct ctl_table ftrace_sysctls[] = {
8407 {
8408 .procname = "ftrace_enabled",
8409 .data = &ftrace_enabled,
8410 .maxlen = sizeof(int),
8411 .mode = 0644,
8412 .proc_handler = ftrace_enable_sysctl,
8413 },
8414 {}
8415 };
8416
8417 static int __init ftrace_sysctl_init(void)
8418 {
8419 register_sysctl_init("kernel", ftrace_sysctls);
8420 return 0;
8421 }
8422 late_initcall(ftrace_sysctl_init);
8423 #endif