Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Infrastructure for profiling code inserted by 'gcc -pg'.
0004  *
0005  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
0006  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
0007  *
0008  * Originally ported from the -rt patch by:
0009  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
0010  *
0011  * Based on code in the latency_tracer, that is:
0012  *
0013  *  Copyright (C) 2004-2006 Ingo Molnar
0014  *  Copyright (C) 2004 Nadia Yvette Chambers
0015  */
0016 
0017 #include <linux/stop_machine.h>
0018 #include <linux/clocksource.h>
0019 #include <linux/sched/task.h>
0020 #include <linux/kallsyms.h>
0021 #include <linux/security.h>
0022 #include <linux/seq_file.h>
0023 #include <linux/tracefs.h>
0024 #include <linux/hardirq.h>
0025 #include <linux/kthread.h>
0026 #include <linux/uaccess.h>
0027 #include <linux/bsearch.h>
0028 #include <linux/module.h>
0029 #include <linux/ftrace.h>
0030 #include <linux/sysctl.h>
0031 #include <linux/slab.h>
0032 #include <linux/ctype.h>
0033 #include <linux/sort.h>
0034 #include <linux/list.h>
0035 #include <linux/hash.h>
0036 #include <linux/rcupdate.h>
0037 #include <linux/kprobes.h>
0038 
0039 #include <trace/events/sched.h>
0040 
0041 #include <asm/sections.h>
0042 #include <asm/setup.h>
0043 
0044 #include "ftrace_internal.h"
0045 #include "trace_output.h"
0046 #include "trace_stat.h"
0047 
0048 #define FTRACE_INVALID_FUNCTION     "__ftrace_invalid_address__"
0049 
0050 #define FTRACE_WARN_ON(cond)            \
0051     ({                  \
0052         int ___r = cond;        \
0053         if (WARN_ON(___r))      \
0054             ftrace_kill();      \
0055         ___r;               \
0056     })
0057 
0058 #define FTRACE_WARN_ON_ONCE(cond)       \
0059     ({                  \
0060         int ___r = cond;        \
0061         if (WARN_ON_ONCE(___r))     \
0062             ftrace_kill();      \
0063         ___r;               \
0064     })
0065 
0066 /* hash bits for specific function selection */
0067 #define FTRACE_HASH_DEFAULT_BITS 10
0068 #define FTRACE_HASH_MAX_BITS 12
0069 
0070 #ifdef CONFIG_DYNAMIC_FTRACE
0071 #define INIT_OPS_HASH(opsname)  \
0072     .func_hash      = &opsname.local_hash,          \
0073     .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
0074 #else
0075 #define INIT_OPS_HASH(opsname)
0076 #endif
0077 
0078 enum {
0079     FTRACE_MODIFY_ENABLE_FL     = (1 << 0),
0080     FTRACE_MODIFY_MAY_SLEEP_FL  = (1 << 1),
0081 };
0082 
0083 struct ftrace_ops ftrace_list_end __read_mostly = {
0084     .func       = ftrace_stub,
0085     .flags      = FTRACE_OPS_FL_STUB,
0086     INIT_OPS_HASH(ftrace_list_end)
0087 };
0088 
0089 /* ftrace_enabled is a method to turn ftrace on or off */
0090 int ftrace_enabled __read_mostly;
0091 static int __maybe_unused last_ftrace_enabled;
0092 
0093 /* Current function tracing op */
0094 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
0095 /* What to set function_trace_op to */
0096 static struct ftrace_ops *set_function_trace_op;
0097 
0098 static bool ftrace_pids_enabled(struct ftrace_ops *ops)
0099 {
0100     struct trace_array *tr;
0101 
0102     if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
0103         return false;
0104 
0105     tr = ops->private;
0106 
0107     return tr->function_pids != NULL || tr->function_no_pids != NULL;
0108 }
0109 
0110 static void ftrace_update_trampoline(struct ftrace_ops *ops);
0111 
0112 /*
0113  * ftrace_disabled is set when an anomaly is discovered.
0114  * ftrace_disabled is much stronger than ftrace_enabled.
0115  */
0116 static int ftrace_disabled __read_mostly;
0117 
0118 DEFINE_MUTEX(ftrace_lock);
0119 
0120 struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
0121 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
0122 struct ftrace_ops global_ops;
0123 
0124 /* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */
0125 void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
0126               struct ftrace_ops *op, struct ftrace_regs *fregs);
0127 
0128 static inline void ftrace_ops_init(struct ftrace_ops *ops)
0129 {
0130 #ifdef CONFIG_DYNAMIC_FTRACE
0131     if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
0132         mutex_init(&ops->local_hash.regex_lock);
0133         ops->func_hash = &ops->local_hash;
0134         ops->flags |= FTRACE_OPS_FL_INITIALIZED;
0135     }
0136 #endif
0137 }
0138 
0139 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
0140                 struct ftrace_ops *op, struct ftrace_regs *fregs)
0141 {
0142     struct trace_array *tr = op->private;
0143     int pid;
0144 
0145     if (tr) {
0146         pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
0147         if (pid == FTRACE_PID_IGNORE)
0148             return;
0149         if (pid != FTRACE_PID_TRACE &&
0150             pid != current->pid)
0151             return;
0152     }
0153 
0154     op->saved_func(ip, parent_ip, op, fregs);
0155 }
0156 
0157 static void ftrace_sync_ipi(void *data)
0158 {
0159     /* Probably not needed, but do it anyway */
0160     smp_rmb();
0161 }
0162 
0163 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
0164 {
0165     /*
0166      * If this is a dynamic, RCU, or per CPU ops, or we force list func,
0167      * then it needs to call the list anyway.
0168      */
0169     if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
0170         FTRACE_FORCE_LIST_FUNC)
0171         return ftrace_ops_list_func;
0172 
0173     return ftrace_ops_get_func(ops);
0174 }
0175 
0176 static void update_ftrace_function(void)
0177 {
0178     ftrace_func_t func;
0179 
0180     /*
0181      * Prepare the ftrace_ops that the arch callback will use.
0182      * If there's only one ftrace_ops registered, the ftrace_ops_list
0183      * will point to the ops we want.
0184      */
0185     set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
0186                         lockdep_is_held(&ftrace_lock));
0187 
0188     /* If there's no ftrace_ops registered, just call the stub function */
0189     if (set_function_trace_op == &ftrace_list_end) {
0190         func = ftrace_stub;
0191 
0192     /*
0193      * If we are at the end of the list and this ops is
0194      * recursion safe and not dynamic and the arch supports passing ops,
0195      * then have the mcount trampoline call the function directly.
0196      */
0197     } else if (rcu_dereference_protected(ftrace_ops_list->next,
0198             lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
0199         func = ftrace_ops_get_list_func(ftrace_ops_list);
0200 
0201     } else {
0202         /* Just use the default ftrace_ops */
0203         set_function_trace_op = &ftrace_list_end;
0204         func = ftrace_ops_list_func;
0205     }
0206 
0207     update_function_graph_func();
0208 
0209     /* If there's no change, then do nothing more here */
0210     if (ftrace_trace_function == func)
0211         return;
0212 
0213     /*
0214      * If we are using the list function, it doesn't care
0215      * about the function_trace_ops.
0216      */
0217     if (func == ftrace_ops_list_func) {
0218         ftrace_trace_function = func;
0219         /*
0220          * Don't even bother setting function_trace_ops,
0221          * it would be racy to do so anyway.
0222          */
0223         return;
0224     }
0225 
0226 #ifndef CONFIG_DYNAMIC_FTRACE
0227     /*
0228      * For static tracing, we need to be a bit more careful.
0229      * The function change takes affect immediately. Thus,
0230      * we need to coordinate the setting of the function_trace_ops
0231      * with the setting of the ftrace_trace_function.
0232      *
0233      * Set the function to the list ops, which will call the
0234      * function we want, albeit indirectly, but it handles the
0235      * ftrace_ops and doesn't depend on function_trace_op.
0236      */
0237     ftrace_trace_function = ftrace_ops_list_func;
0238     /*
0239      * Make sure all CPUs see this. Yes this is slow, but static
0240      * tracing is slow and nasty to have enabled.
0241      */
0242     synchronize_rcu_tasks_rude();
0243     /* Now all cpus are using the list ops. */
0244     function_trace_op = set_function_trace_op;
0245     /* Make sure the function_trace_op is visible on all CPUs */
0246     smp_wmb();
0247     /* Nasty way to force a rmb on all cpus */
0248     smp_call_function(ftrace_sync_ipi, NULL, 1);
0249     /* OK, we are all set to update the ftrace_trace_function now! */
0250 #endif /* !CONFIG_DYNAMIC_FTRACE */
0251 
0252     ftrace_trace_function = func;
0253 }
0254 
0255 static void add_ftrace_ops(struct ftrace_ops __rcu **list,
0256                struct ftrace_ops *ops)
0257 {
0258     rcu_assign_pointer(ops->next, *list);
0259 
0260     /*
0261      * We are entering ops into the list but another
0262      * CPU might be walking that list. We need to make sure
0263      * the ops->next pointer is valid before another CPU sees
0264      * the ops pointer included into the list.
0265      */
0266     rcu_assign_pointer(*list, ops);
0267 }
0268 
0269 static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
0270                  struct ftrace_ops *ops)
0271 {
0272     struct ftrace_ops **p;
0273 
0274     /*
0275      * If we are removing the last function, then simply point
0276      * to the ftrace_stub.
0277      */
0278     if (rcu_dereference_protected(*list,
0279             lockdep_is_held(&ftrace_lock)) == ops &&
0280         rcu_dereference_protected(ops->next,
0281             lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
0282         *list = &ftrace_list_end;
0283         return 0;
0284     }
0285 
0286     for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
0287         if (*p == ops)
0288             break;
0289 
0290     if (*p != ops)
0291         return -1;
0292 
0293     *p = (*p)->next;
0294     return 0;
0295 }
0296 
0297 static void ftrace_update_trampoline(struct ftrace_ops *ops);
0298 
0299 int __register_ftrace_function(struct ftrace_ops *ops)
0300 {
0301     if (ops->flags & FTRACE_OPS_FL_DELETED)
0302         return -EINVAL;
0303 
0304     if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
0305         return -EBUSY;
0306 
0307 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
0308     /*
0309      * If the ftrace_ops specifies SAVE_REGS, then it only can be used
0310      * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
0311      * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
0312      */
0313     if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
0314         !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
0315         return -EINVAL;
0316 
0317     if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
0318         ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
0319 #endif
0320     if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
0321         return -EBUSY;
0322 
0323     if (!is_kernel_core_data((unsigned long)ops))
0324         ops->flags |= FTRACE_OPS_FL_DYNAMIC;
0325 
0326     add_ftrace_ops(&ftrace_ops_list, ops);
0327 
0328     /* Always save the function, and reset at unregistering */
0329     ops->saved_func = ops->func;
0330 
0331     if (ftrace_pids_enabled(ops))
0332         ops->func = ftrace_pid_func;
0333 
0334     ftrace_update_trampoline(ops);
0335 
0336     if (ftrace_enabled)
0337         update_ftrace_function();
0338 
0339     return 0;
0340 }
0341 
0342 int __unregister_ftrace_function(struct ftrace_ops *ops)
0343 {
0344     int ret;
0345 
0346     if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
0347         return -EBUSY;
0348 
0349     ret = remove_ftrace_ops(&ftrace_ops_list, ops);
0350 
0351     if (ret < 0)
0352         return ret;
0353 
0354     if (ftrace_enabled)
0355         update_ftrace_function();
0356 
0357     ops->func = ops->saved_func;
0358 
0359     return 0;
0360 }
0361 
0362 static void ftrace_update_pid_func(void)
0363 {
0364     struct ftrace_ops *op;
0365 
0366     /* Only do something if we are tracing something */
0367     if (ftrace_trace_function == ftrace_stub)
0368         return;
0369 
0370     do_for_each_ftrace_op(op, ftrace_ops_list) {
0371         if (op->flags & FTRACE_OPS_FL_PID) {
0372             op->func = ftrace_pids_enabled(op) ?
0373                 ftrace_pid_func : op->saved_func;
0374             ftrace_update_trampoline(op);
0375         }
0376     } while_for_each_ftrace_op(op);
0377 
0378     update_ftrace_function();
0379 }
0380 
0381 #ifdef CONFIG_FUNCTION_PROFILER
0382 struct ftrace_profile {
0383     struct hlist_node       node;
0384     unsigned long           ip;
0385     unsigned long           counter;
0386 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0387     unsigned long long      time;
0388     unsigned long long      time_squared;
0389 #endif
0390 };
0391 
0392 struct ftrace_profile_page {
0393     struct ftrace_profile_page  *next;
0394     unsigned long           index;
0395     struct ftrace_profile       records[];
0396 };
0397 
0398 struct ftrace_profile_stat {
0399     atomic_t            disabled;
0400     struct hlist_head       *hash;
0401     struct ftrace_profile_page  *pages;
0402     struct ftrace_profile_page  *start;
0403     struct tracer_stat      stat;
0404 };
0405 
0406 #define PROFILE_RECORDS_SIZE                        \
0407     (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
0408 
0409 #define PROFILES_PER_PAGE                   \
0410     (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
0411 
0412 static int ftrace_profile_enabled __read_mostly;
0413 
0414 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
0415 static DEFINE_MUTEX(ftrace_profile_lock);
0416 
0417 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
0418 
0419 #define FTRACE_PROFILE_HASH_BITS 10
0420 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
0421 
0422 static void *
0423 function_stat_next(void *v, int idx)
0424 {
0425     struct ftrace_profile *rec = v;
0426     struct ftrace_profile_page *pg;
0427 
0428     pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
0429 
0430  again:
0431     if (idx != 0)
0432         rec++;
0433 
0434     if ((void *)rec >= (void *)&pg->records[pg->index]) {
0435         pg = pg->next;
0436         if (!pg)
0437             return NULL;
0438         rec = &pg->records[0];
0439         if (!rec->counter)
0440             goto again;
0441     }
0442 
0443     return rec;
0444 }
0445 
0446 static void *function_stat_start(struct tracer_stat *trace)
0447 {
0448     struct ftrace_profile_stat *stat =
0449         container_of(trace, struct ftrace_profile_stat, stat);
0450 
0451     if (!stat || !stat->start)
0452         return NULL;
0453 
0454     return function_stat_next(&stat->start->records[0], 0);
0455 }
0456 
0457 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0458 /* function graph compares on total time */
0459 static int function_stat_cmp(const void *p1, const void *p2)
0460 {
0461     const struct ftrace_profile *a = p1;
0462     const struct ftrace_profile *b = p2;
0463 
0464     if (a->time < b->time)
0465         return -1;
0466     if (a->time > b->time)
0467         return 1;
0468     else
0469         return 0;
0470 }
0471 #else
0472 /* not function graph compares against hits */
0473 static int function_stat_cmp(const void *p1, const void *p2)
0474 {
0475     const struct ftrace_profile *a = p1;
0476     const struct ftrace_profile *b = p2;
0477 
0478     if (a->counter < b->counter)
0479         return -1;
0480     if (a->counter > b->counter)
0481         return 1;
0482     else
0483         return 0;
0484 }
0485 #endif
0486 
0487 static int function_stat_headers(struct seq_file *m)
0488 {
0489 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0490     seq_puts(m, "  Function                               "
0491          "Hit    Time            Avg             s^2\n"
0492             "  --------                               "
0493          "---    ----            ---             ---\n");
0494 #else
0495     seq_puts(m, "  Function                               Hit\n"
0496             "  --------                               ---\n");
0497 #endif
0498     return 0;
0499 }
0500 
0501 static int function_stat_show(struct seq_file *m, void *v)
0502 {
0503     struct ftrace_profile *rec = v;
0504     char str[KSYM_SYMBOL_LEN];
0505     int ret = 0;
0506 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0507     static struct trace_seq s;
0508     unsigned long long avg;
0509     unsigned long long stddev;
0510 #endif
0511     mutex_lock(&ftrace_profile_lock);
0512 
0513     /* we raced with function_profile_reset() */
0514     if (unlikely(rec->counter == 0)) {
0515         ret = -EBUSY;
0516         goto out;
0517     }
0518 
0519 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0520     avg = div64_ul(rec->time, rec->counter);
0521     if (tracing_thresh && (avg < tracing_thresh))
0522         goto out;
0523 #endif
0524 
0525     kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
0526     seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
0527 
0528 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0529     seq_puts(m, "    ");
0530 
0531     /* Sample standard deviation (s^2) */
0532     if (rec->counter <= 1)
0533         stddev = 0;
0534     else {
0535         /*
0536          * Apply Welford's method:
0537          * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
0538          */
0539         stddev = rec->counter * rec->time_squared -
0540              rec->time * rec->time;
0541 
0542         /*
0543          * Divide only 1000 for ns^2 -> us^2 conversion.
0544          * trace_print_graph_duration will divide 1000 again.
0545          */
0546         stddev = div64_ul(stddev,
0547                   rec->counter * (rec->counter - 1) * 1000);
0548     }
0549 
0550     trace_seq_init(&s);
0551     trace_print_graph_duration(rec->time, &s);
0552     trace_seq_puts(&s, "    ");
0553     trace_print_graph_duration(avg, &s);
0554     trace_seq_puts(&s, "    ");
0555     trace_print_graph_duration(stddev, &s);
0556     trace_print_seq(m, &s);
0557 #endif
0558     seq_putc(m, '\n');
0559 out:
0560     mutex_unlock(&ftrace_profile_lock);
0561 
0562     return ret;
0563 }
0564 
0565 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
0566 {
0567     struct ftrace_profile_page *pg;
0568 
0569     pg = stat->pages = stat->start;
0570 
0571     while (pg) {
0572         memset(pg->records, 0, PROFILE_RECORDS_SIZE);
0573         pg->index = 0;
0574         pg = pg->next;
0575     }
0576 
0577     memset(stat->hash, 0,
0578            FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
0579 }
0580 
0581 static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
0582 {
0583     struct ftrace_profile_page *pg;
0584     int functions;
0585     int pages;
0586     int i;
0587 
0588     /* If we already allocated, do nothing */
0589     if (stat->pages)
0590         return 0;
0591 
0592     stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
0593     if (!stat->pages)
0594         return -ENOMEM;
0595 
0596 #ifdef CONFIG_DYNAMIC_FTRACE
0597     functions = ftrace_update_tot_cnt;
0598 #else
0599     /*
0600      * We do not know the number of functions that exist because
0601      * dynamic tracing is what counts them. With past experience
0602      * we have around 20K functions. That should be more than enough.
0603      * It is highly unlikely we will execute every function in
0604      * the kernel.
0605      */
0606     functions = 20000;
0607 #endif
0608 
0609     pg = stat->start = stat->pages;
0610 
0611     pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
0612 
0613     for (i = 1; i < pages; i++) {
0614         pg->next = (void *)get_zeroed_page(GFP_KERNEL);
0615         if (!pg->next)
0616             goto out_free;
0617         pg = pg->next;
0618     }
0619 
0620     return 0;
0621 
0622  out_free:
0623     pg = stat->start;
0624     while (pg) {
0625         unsigned long tmp = (unsigned long)pg;
0626 
0627         pg = pg->next;
0628         free_page(tmp);
0629     }
0630 
0631     stat->pages = NULL;
0632     stat->start = NULL;
0633 
0634     return -ENOMEM;
0635 }
0636 
0637 static int ftrace_profile_init_cpu(int cpu)
0638 {
0639     struct ftrace_profile_stat *stat;
0640     int size;
0641 
0642     stat = &per_cpu(ftrace_profile_stats, cpu);
0643 
0644     if (stat->hash) {
0645         /* If the profile is already created, simply reset it */
0646         ftrace_profile_reset(stat);
0647         return 0;
0648     }
0649 
0650     /*
0651      * We are profiling all functions, but usually only a few thousand
0652      * functions are hit. We'll make a hash of 1024 items.
0653      */
0654     size = FTRACE_PROFILE_HASH_SIZE;
0655 
0656     stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
0657 
0658     if (!stat->hash)
0659         return -ENOMEM;
0660 
0661     /* Preallocate the function profiling pages */
0662     if (ftrace_profile_pages_init(stat) < 0) {
0663         kfree(stat->hash);
0664         stat->hash = NULL;
0665         return -ENOMEM;
0666     }
0667 
0668     return 0;
0669 }
0670 
0671 static int ftrace_profile_init(void)
0672 {
0673     int cpu;
0674     int ret = 0;
0675 
0676     for_each_possible_cpu(cpu) {
0677         ret = ftrace_profile_init_cpu(cpu);
0678         if (ret)
0679             break;
0680     }
0681 
0682     return ret;
0683 }
0684 
0685 /* interrupts must be disabled */
0686 static struct ftrace_profile *
0687 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
0688 {
0689     struct ftrace_profile *rec;
0690     struct hlist_head *hhd;
0691     unsigned long key;
0692 
0693     key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
0694     hhd = &stat->hash[key];
0695 
0696     if (hlist_empty(hhd))
0697         return NULL;
0698 
0699     hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
0700         if (rec->ip == ip)
0701             return rec;
0702     }
0703 
0704     return NULL;
0705 }
0706 
0707 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
0708                    struct ftrace_profile *rec)
0709 {
0710     unsigned long key;
0711 
0712     key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
0713     hlist_add_head_rcu(&rec->node, &stat->hash[key]);
0714 }
0715 
0716 /*
0717  * The memory is already allocated, this simply finds a new record to use.
0718  */
0719 static struct ftrace_profile *
0720 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
0721 {
0722     struct ftrace_profile *rec = NULL;
0723 
0724     /* prevent recursion (from NMIs) */
0725     if (atomic_inc_return(&stat->disabled) != 1)
0726         goto out;
0727 
0728     /*
0729      * Try to find the function again since an NMI
0730      * could have added it
0731      */
0732     rec = ftrace_find_profiled_func(stat, ip);
0733     if (rec)
0734         goto out;
0735 
0736     if (stat->pages->index == PROFILES_PER_PAGE) {
0737         if (!stat->pages->next)
0738             goto out;
0739         stat->pages = stat->pages->next;
0740     }
0741 
0742     rec = &stat->pages->records[stat->pages->index++];
0743     rec->ip = ip;
0744     ftrace_add_profile(stat, rec);
0745 
0746  out:
0747     atomic_dec(&stat->disabled);
0748 
0749     return rec;
0750 }
0751 
0752 static void
0753 function_profile_call(unsigned long ip, unsigned long parent_ip,
0754               struct ftrace_ops *ops, struct ftrace_regs *fregs)
0755 {
0756     struct ftrace_profile_stat *stat;
0757     struct ftrace_profile *rec;
0758     unsigned long flags;
0759 
0760     if (!ftrace_profile_enabled)
0761         return;
0762 
0763     local_irq_save(flags);
0764 
0765     stat = this_cpu_ptr(&ftrace_profile_stats);
0766     if (!stat->hash || !ftrace_profile_enabled)
0767         goto out;
0768 
0769     rec = ftrace_find_profiled_func(stat, ip);
0770     if (!rec) {
0771         rec = ftrace_profile_alloc(stat, ip);
0772         if (!rec)
0773             goto out;
0774     }
0775 
0776     rec->counter++;
0777  out:
0778     local_irq_restore(flags);
0779 }
0780 
0781 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0782 static bool fgraph_graph_time = true;
0783 
0784 void ftrace_graph_graph_time_control(bool enable)
0785 {
0786     fgraph_graph_time = enable;
0787 }
0788 
0789 static int profile_graph_entry(struct ftrace_graph_ent *trace)
0790 {
0791     struct ftrace_ret_stack *ret_stack;
0792 
0793     function_profile_call(trace->func, 0, NULL, NULL);
0794 
0795     /* If function graph is shutting down, ret_stack can be NULL */
0796     if (!current->ret_stack)
0797         return 0;
0798 
0799     ret_stack = ftrace_graph_get_ret_stack(current, 0);
0800     if (ret_stack)
0801         ret_stack->subtime = 0;
0802 
0803     return 1;
0804 }
0805 
0806 static void profile_graph_return(struct ftrace_graph_ret *trace)
0807 {
0808     struct ftrace_ret_stack *ret_stack;
0809     struct ftrace_profile_stat *stat;
0810     unsigned long long calltime;
0811     struct ftrace_profile *rec;
0812     unsigned long flags;
0813 
0814     local_irq_save(flags);
0815     stat = this_cpu_ptr(&ftrace_profile_stats);
0816     if (!stat->hash || !ftrace_profile_enabled)
0817         goto out;
0818 
0819     /* If the calltime was zero'd ignore it */
0820     if (!trace->calltime)
0821         goto out;
0822 
0823     calltime = trace->rettime - trace->calltime;
0824 
0825     if (!fgraph_graph_time) {
0826 
0827         /* Append this call time to the parent time to subtract */
0828         ret_stack = ftrace_graph_get_ret_stack(current, 1);
0829         if (ret_stack)
0830             ret_stack->subtime += calltime;
0831 
0832         ret_stack = ftrace_graph_get_ret_stack(current, 0);
0833         if (ret_stack && ret_stack->subtime < calltime)
0834             calltime -= ret_stack->subtime;
0835         else
0836             calltime = 0;
0837     }
0838 
0839     rec = ftrace_find_profiled_func(stat, trace->func);
0840     if (rec) {
0841         rec->time += calltime;
0842         rec->time_squared += calltime * calltime;
0843     }
0844 
0845  out:
0846     local_irq_restore(flags);
0847 }
0848 
0849 static struct fgraph_ops fprofiler_ops = {
0850     .entryfunc = &profile_graph_entry,
0851     .retfunc = &profile_graph_return,
0852 };
0853 
0854 static int register_ftrace_profiler(void)
0855 {
0856     return register_ftrace_graph(&fprofiler_ops);
0857 }
0858 
0859 static void unregister_ftrace_profiler(void)
0860 {
0861     unregister_ftrace_graph(&fprofiler_ops);
0862 }
0863 #else
0864 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
0865     .func       = function_profile_call,
0866     .flags      = FTRACE_OPS_FL_INITIALIZED,
0867     INIT_OPS_HASH(ftrace_profile_ops)
0868 };
0869 
0870 static int register_ftrace_profiler(void)
0871 {
0872     return register_ftrace_function(&ftrace_profile_ops);
0873 }
0874 
0875 static void unregister_ftrace_profiler(void)
0876 {
0877     unregister_ftrace_function(&ftrace_profile_ops);
0878 }
0879 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
0880 
0881 static ssize_t
0882 ftrace_profile_write(struct file *filp, const char __user *ubuf,
0883              size_t cnt, loff_t *ppos)
0884 {
0885     unsigned long val;
0886     int ret;
0887 
0888     ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
0889     if (ret)
0890         return ret;
0891 
0892     val = !!val;
0893 
0894     mutex_lock(&ftrace_profile_lock);
0895     if (ftrace_profile_enabled ^ val) {
0896         if (val) {
0897             ret = ftrace_profile_init();
0898             if (ret < 0) {
0899                 cnt = ret;
0900                 goto out;
0901             }
0902 
0903             ret = register_ftrace_profiler();
0904             if (ret < 0) {
0905                 cnt = ret;
0906                 goto out;
0907             }
0908             ftrace_profile_enabled = 1;
0909         } else {
0910             ftrace_profile_enabled = 0;
0911             /*
0912              * unregister_ftrace_profiler calls stop_machine
0913              * so this acts like an synchronize_rcu.
0914              */
0915             unregister_ftrace_profiler();
0916         }
0917     }
0918  out:
0919     mutex_unlock(&ftrace_profile_lock);
0920 
0921     *ppos += cnt;
0922 
0923     return cnt;
0924 }
0925 
0926 static ssize_t
0927 ftrace_profile_read(struct file *filp, char __user *ubuf,
0928              size_t cnt, loff_t *ppos)
0929 {
0930     char buf[64];       /* big enough to hold a number */
0931     int r;
0932 
0933     r = sprintf(buf, "%u\n", ftrace_profile_enabled);
0934     return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
0935 }
0936 
0937 static const struct file_operations ftrace_profile_fops = {
0938     .open       = tracing_open_generic,
0939     .read       = ftrace_profile_read,
0940     .write      = ftrace_profile_write,
0941     .llseek     = default_llseek,
0942 };
0943 
0944 /* used to initialize the real stat files */
0945 static struct tracer_stat function_stats __initdata = {
0946     .name       = "functions",
0947     .stat_start = function_stat_start,
0948     .stat_next  = function_stat_next,
0949     .stat_cmp   = function_stat_cmp,
0950     .stat_headers   = function_stat_headers,
0951     .stat_show  = function_stat_show
0952 };
0953 
0954 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
0955 {
0956     struct ftrace_profile_stat *stat;
0957     char *name;
0958     int ret;
0959     int cpu;
0960 
0961     for_each_possible_cpu(cpu) {
0962         stat = &per_cpu(ftrace_profile_stats, cpu);
0963 
0964         name = kasprintf(GFP_KERNEL, "function%d", cpu);
0965         if (!name) {
0966             /*
0967              * The files created are permanent, if something happens
0968              * we still do not free memory.
0969              */
0970             WARN(1,
0971                  "Could not allocate stat file for cpu %d\n",
0972                  cpu);
0973             return;
0974         }
0975         stat->stat = function_stats;
0976         stat->stat.name = name;
0977         ret = register_stat_tracer(&stat->stat);
0978         if (ret) {
0979             WARN(1,
0980                  "Could not register function stat for cpu %d\n",
0981                  cpu);
0982             kfree(name);
0983             return;
0984         }
0985     }
0986 
0987     trace_create_file("function_profile_enabled",
0988               TRACE_MODE_WRITE, d_tracer, NULL,
0989               &ftrace_profile_fops);
0990 }
0991 
0992 #else /* CONFIG_FUNCTION_PROFILER */
0993 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
0994 {
0995 }
0996 #endif /* CONFIG_FUNCTION_PROFILER */
0997 
0998 #ifdef CONFIG_DYNAMIC_FTRACE
0999 
1000 static struct ftrace_ops *removed_ops;
1001 
1002 /*
1003  * Set when doing a global update, like enabling all recs or disabling them.
1004  * It is not set when just updating a single ftrace_ops.
1005  */
1006 static bool update_all_ops;
1007 
1008 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1009 # error Dynamic ftrace depends on MCOUNT_RECORD
1010 #endif
1011 
1012 struct ftrace_func_probe {
1013     struct ftrace_probe_ops *probe_ops;
1014     struct ftrace_ops   ops;
1015     struct trace_array  *tr;
1016     struct list_head    list;
1017     void            *data;
1018     int         ref;
1019 };
1020 
1021 /*
1022  * We make these constant because no one should touch them,
1023  * but they are used as the default "empty hash", to avoid allocating
1024  * it all the time. These are in a read only section such that if
1025  * anyone does try to modify it, it will cause an exception.
1026  */
1027 static const struct hlist_head empty_buckets[1];
1028 static const struct ftrace_hash empty_hash = {
1029     .buckets = (struct hlist_head *)empty_buckets,
1030 };
1031 #define EMPTY_HASH  ((struct ftrace_hash *)&empty_hash)
1032 
1033 struct ftrace_ops global_ops = {
1034     .func               = ftrace_stub,
1035     .local_hash.notrace_hash    = EMPTY_HASH,
1036     .local_hash.filter_hash     = EMPTY_HASH,
1037     INIT_OPS_HASH(global_ops)
1038     .flags              = FTRACE_OPS_FL_INITIALIZED |
1039                       FTRACE_OPS_FL_PID,
1040 };
1041 
1042 /*
1043  * Used by the stack unwinder to know about dynamic ftrace trampolines.
1044  */
1045 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1046 {
1047     struct ftrace_ops *op = NULL;
1048 
1049     /*
1050      * Some of the ops may be dynamically allocated,
1051      * they are freed after a synchronize_rcu().
1052      */
1053     preempt_disable_notrace();
1054 
1055     do_for_each_ftrace_op(op, ftrace_ops_list) {
1056         /*
1057          * This is to check for dynamically allocated trampolines.
1058          * Trampolines that are in kernel text will have
1059          * core_kernel_text() return true.
1060          */
1061         if (op->trampoline && op->trampoline_size)
1062             if (addr >= op->trampoline &&
1063                 addr < op->trampoline + op->trampoline_size) {
1064                 preempt_enable_notrace();
1065                 return op;
1066             }
1067     } while_for_each_ftrace_op(op);
1068     preempt_enable_notrace();
1069 
1070     return NULL;
1071 }
1072 
1073 /*
1074  * This is used by __kernel_text_address() to return true if the
1075  * address is on a dynamically allocated trampoline that would
1076  * not return true for either core_kernel_text() or
1077  * is_module_text_address().
1078  */
1079 bool is_ftrace_trampoline(unsigned long addr)
1080 {
1081     return ftrace_ops_trampoline(addr) != NULL;
1082 }
1083 
1084 struct ftrace_page {
1085     struct ftrace_page  *next;
1086     struct dyn_ftrace   *records;
1087     int         index;
1088     int         order;
1089 };
1090 
1091 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1092 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1093 
1094 static struct ftrace_page   *ftrace_pages_start;
1095 static struct ftrace_page   *ftrace_pages;
1096 
1097 static __always_inline unsigned long
1098 ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1099 {
1100     if (hash->size_bits > 0)
1101         return hash_long(ip, hash->size_bits);
1102 
1103     return 0;
1104 }
1105 
1106 /* Only use this function if ftrace_hash_empty() has already been tested */
1107 static __always_inline struct ftrace_func_entry *
1108 __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1109 {
1110     unsigned long key;
1111     struct ftrace_func_entry *entry;
1112     struct hlist_head *hhd;
1113 
1114     key = ftrace_hash_key(hash, ip);
1115     hhd = &hash->buckets[key];
1116 
1117     hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1118         if (entry->ip == ip)
1119             return entry;
1120     }
1121     return NULL;
1122 }
1123 
1124 /**
1125  * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1126  * @hash: The hash to look at
1127  * @ip: The instruction pointer to test
1128  *
1129  * Search a given @hash to see if a given instruction pointer (@ip)
1130  * exists in it.
1131  *
1132  * Returns the entry that holds the @ip if found. NULL otherwise.
1133  */
1134 struct ftrace_func_entry *
1135 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1136 {
1137     if (ftrace_hash_empty(hash))
1138         return NULL;
1139 
1140     return __ftrace_lookup_ip(hash, ip);
1141 }
1142 
1143 static void __add_hash_entry(struct ftrace_hash *hash,
1144                  struct ftrace_func_entry *entry)
1145 {
1146     struct hlist_head *hhd;
1147     unsigned long key;
1148 
1149     key = ftrace_hash_key(hash, entry->ip);
1150     hhd = &hash->buckets[key];
1151     hlist_add_head(&entry->hlist, hhd);
1152     hash->count++;
1153 }
1154 
1155 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1156 {
1157     struct ftrace_func_entry *entry;
1158 
1159     entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1160     if (!entry)
1161         return -ENOMEM;
1162 
1163     entry->ip = ip;
1164     __add_hash_entry(hash, entry);
1165 
1166     return 0;
1167 }
1168 
1169 static void
1170 free_hash_entry(struct ftrace_hash *hash,
1171           struct ftrace_func_entry *entry)
1172 {
1173     hlist_del(&entry->hlist);
1174     kfree(entry);
1175     hash->count--;
1176 }
1177 
1178 static void
1179 remove_hash_entry(struct ftrace_hash *hash,
1180           struct ftrace_func_entry *entry)
1181 {
1182     hlist_del_rcu(&entry->hlist);
1183     hash->count--;
1184 }
1185 
1186 static void ftrace_hash_clear(struct ftrace_hash *hash)
1187 {
1188     struct hlist_head *hhd;
1189     struct hlist_node *tn;
1190     struct ftrace_func_entry *entry;
1191     int size = 1 << hash->size_bits;
1192     int i;
1193 
1194     if (!hash->count)
1195         return;
1196 
1197     for (i = 0; i < size; i++) {
1198         hhd = &hash->buckets[i];
1199         hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1200             free_hash_entry(hash, entry);
1201     }
1202     FTRACE_WARN_ON(hash->count);
1203 }
1204 
1205 static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1206 {
1207     list_del(&ftrace_mod->list);
1208     kfree(ftrace_mod->module);
1209     kfree(ftrace_mod->func);
1210     kfree(ftrace_mod);
1211 }
1212 
1213 static void clear_ftrace_mod_list(struct list_head *head)
1214 {
1215     struct ftrace_mod_load *p, *n;
1216 
1217     /* stack tracer isn't supported yet */
1218     if (!head)
1219         return;
1220 
1221     mutex_lock(&ftrace_lock);
1222     list_for_each_entry_safe(p, n, head, list)
1223         free_ftrace_mod(p);
1224     mutex_unlock(&ftrace_lock);
1225 }
1226 
1227 static void free_ftrace_hash(struct ftrace_hash *hash)
1228 {
1229     if (!hash || hash == EMPTY_HASH)
1230         return;
1231     ftrace_hash_clear(hash);
1232     kfree(hash->buckets);
1233     kfree(hash);
1234 }
1235 
1236 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1237 {
1238     struct ftrace_hash *hash;
1239 
1240     hash = container_of(rcu, struct ftrace_hash, rcu);
1241     free_ftrace_hash(hash);
1242 }
1243 
1244 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1245 {
1246     if (!hash || hash == EMPTY_HASH)
1247         return;
1248     call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1249 }
1250 
1251 void ftrace_free_filter(struct ftrace_ops *ops)
1252 {
1253     ftrace_ops_init(ops);
1254     free_ftrace_hash(ops->func_hash->filter_hash);
1255     free_ftrace_hash(ops->func_hash->notrace_hash);
1256 }
1257 
1258 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1259 {
1260     struct ftrace_hash *hash;
1261     int size;
1262 
1263     hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1264     if (!hash)
1265         return NULL;
1266 
1267     size = 1 << size_bits;
1268     hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1269 
1270     if (!hash->buckets) {
1271         kfree(hash);
1272         return NULL;
1273     }
1274 
1275     hash->size_bits = size_bits;
1276 
1277     return hash;
1278 }
1279 
1280 
1281 static int ftrace_add_mod(struct trace_array *tr,
1282               const char *func, const char *module,
1283               int enable)
1284 {
1285     struct ftrace_mod_load *ftrace_mod;
1286     struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1287 
1288     ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1289     if (!ftrace_mod)
1290         return -ENOMEM;
1291 
1292     ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1293     ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1294     ftrace_mod->enable = enable;
1295 
1296     if (!ftrace_mod->func || !ftrace_mod->module)
1297         goto out_free;
1298 
1299     list_add(&ftrace_mod->list, mod_head);
1300 
1301     return 0;
1302 
1303  out_free:
1304     free_ftrace_mod(ftrace_mod);
1305 
1306     return -ENOMEM;
1307 }
1308 
1309 static struct ftrace_hash *
1310 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1311 {
1312     struct ftrace_func_entry *entry;
1313     struct ftrace_hash *new_hash;
1314     int size;
1315     int ret;
1316     int i;
1317 
1318     new_hash = alloc_ftrace_hash(size_bits);
1319     if (!new_hash)
1320         return NULL;
1321 
1322     if (hash)
1323         new_hash->flags = hash->flags;
1324 
1325     /* Empty hash? */
1326     if (ftrace_hash_empty(hash))
1327         return new_hash;
1328 
1329     size = 1 << hash->size_bits;
1330     for (i = 0; i < size; i++) {
1331         hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1332             ret = add_hash_entry(new_hash, entry->ip);
1333             if (ret < 0)
1334                 goto free_hash;
1335         }
1336     }
1337 
1338     FTRACE_WARN_ON(new_hash->count != hash->count);
1339 
1340     return new_hash;
1341 
1342  free_hash:
1343     free_ftrace_hash(new_hash);
1344     return NULL;
1345 }
1346 
1347 static void
1348 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1349 static void
1350 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1351 
1352 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1353                        struct ftrace_hash *new_hash);
1354 
1355 static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size)
1356 {
1357     struct ftrace_func_entry *entry;
1358     struct ftrace_hash *new_hash;
1359     struct hlist_head *hhd;
1360     struct hlist_node *tn;
1361     int bits = 0;
1362     int i;
1363 
1364     /*
1365      * Use around half the size (max bit of it), but
1366      * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits).
1367      */
1368     bits = fls(size / 2);
1369 
1370     /* Don't allocate too much */
1371     if (bits > FTRACE_HASH_MAX_BITS)
1372         bits = FTRACE_HASH_MAX_BITS;
1373 
1374     new_hash = alloc_ftrace_hash(bits);
1375     if (!new_hash)
1376         return NULL;
1377 
1378     new_hash->flags = src->flags;
1379 
1380     size = 1 << src->size_bits;
1381     for (i = 0; i < size; i++) {
1382         hhd = &src->buckets[i];
1383         hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1384             remove_hash_entry(src, entry);
1385             __add_hash_entry(new_hash, entry);
1386         }
1387     }
1388     return new_hash;
1389 }
1390 
1391 static struct ftrace_hash *
1392 __ftrace_hash_move(struct ftrace_hash *src)
1393 {
1394     int size = src->count;
1395 
1396     /*
1397      * If the new source is empty, just return the empty_hash.
1398      */
1399     if (ftrace_hash_empty(src))
1400         return EMPTY_HASH;
1401 
1402     return dup_hash(src, size);
1403 }
1404 
1405 static int
1406 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1407          struct ftrace_hash **dst, struct ftrace_hash *src)
1408 {
1409     struct ftrace_hash *new_hash;
1410     int ret;
1411 
1412     /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1413     if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1414         return -EINVAL;
1415 
1416     new_hash = __ftrace_hash_move(src);
1417     if (!new_hash)
1418         return -ENOMEM;
1419 
1420     /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1421     if (enable) {
1422         /* IPMODIFY should be updated only when filter_hash updating */
1423         ret = ftrace_hash_ipmodify_update(ops, new_hash);
1424         if (ret < 0) {
1425             free_ftrace_hash(new_hash);
1426             return ret;
1427         }
1428     }
1429 
1430     /*
1431      * Remove the current set, update the hash and add
1432      * them back.
1433      */
1434     ftrace_hash_rec_disable_modify(ops, enable);
1435 
1436     rcu_assign_pointer(*dst, new_hash);
1437 
1438     ftrace_hash_rec_enable_modify(ops, enable);
1439 
1440     return 0;
1441 }
1442 
1443 static bool hash_contains_ip(unsigned long ip,
1444                  struct ftrace_ops_hash *hash)
1445 {
1446     /*
1447      * The function record is a match if it exists in the filter
1448      * hash and not in the notrace hash. Note, an empty hash is
1449      * considered a match for the filter hash, but an empty
1450      * notrace hash is considered not in the notrace hash.
1451      */
1452     return (ftrace_hash_empty(hash->filter_hash) ||
1453         __ftrace_lookup_ip(hash->filter_hash, ip)) &&
1454         (ftrace_hash_empty(hash->notrace_hash) ||
1455          !__ftrace_lookup_ip(hash->notrace_hash, ip));
1456 }
1457 
1458 /*
1459  * Test the hashes for this ops to see if we want to call
1460  * the ops->func or not.
1461  *
1462  * It's a match if the ip is in the ops->filter_hash or
1463  * the filter_hash does not exist or is empty,
1464  *  AND
1465  * the ip is not in the ops->notrace_hash.
1466  *
1467  * This needs to be called with preemption disabled as
1468  * the hashes are freed with call_rcu().
1469  */
1470 int
1471 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1472 {
1473     struct ftrace_ops_hash hash;
1474     int ret;
1475 
1476 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1477     /*
1478      * There's a small race when adding ops that the ftrace handler
1479      * that wants regs, may be called without them. We can not
1480      * allow that handler to be called if regs is NULL.
1481      */
1482     if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1483         return 0;
1484 #endif
1485 
1486     rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1487     rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1488 
1489     if (hash_contains_ip(ip, &hash))
1490         ret = 1;
1491     else
1492         ret = 0;
1493 
1494     return ret;
1495 }
1496 
1497 /*
1498  * This is a double for. Do not use 'break' to break out of the loop,
1499  * you must use a goto.
1500  */
1501 #define do_for_each_ftrace_rec(pg, rec)                 \
1502     for (pg = ftrace_pages_start; pg; pg = pg->next) {      \
1503         int _____i;                     \
1504         for (_____i = 0; _____i < pg->index; _____i++) {    \
1505             rec = &pg->records[_____i];
1506 
1507 #define while_for_each_ftrace_rec()     \
1508         }               \
1509     }
1510 
1511 
1512 static int ftrace_cmp_recs(const void *a, const void *b)
1513 {
1514     const struct dyn_ftrace *key = a;
1515     const struct dyn_ftrace *rec = b;
1516 
1517     if (key->flags < rec->ip)
1518         return -1;
1519     if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1520         return 1;
1521     return 0;
1522 }
1523 
1524 static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
1525 {
1526     struct ftrace_page *pg;
1527     struct dyn_ftrace *rec = NULL;
1528     struct dyn_ftrace key;
1529 
1530     key.ip = start;
1531     key.flags = end;    /* overload flags, as it is unsigned long */
1532 
1533     for (pg = ftrace_pages_start; pg; pg = pg->next) {
1534         if (end < pg->records[0].ip ||
1535             start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1536             continue;
1537         rec = bsearch(&key, pg->records, pg->index,
1538                   sizeof(struct dyn_ftrace),
1539                   ftrace_cmp_recs);
1540         if (rec)
1541             break;
1542     }
1543     return rec;
1544 }
1545 
1546 /**
1547  * ftrace_location_range - return the first address of a traced location
1548  *  if it touches the given ip range
1549  * @start: start of range to search.
1550  * @end: end of range to search (inclusive). @end points to the last byte
1551  *  to check.
1552  *
1553  * Returns rec->ip if the related ftrace location is a least partly within
1554  * the given address range. That is, the first address of the instruction
1555  * that is either a NOP or call to the function tracer. It checks the ftrace
1556  * internal tables to determine if the address belongs or not.
1557  */
1558 unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1559 {
1560     struct dyn_ftrace *rec;
1561 
1562     rec = lookup_rec(start, end);
1563     if (rec)
1564         return rec->ip;
1565 
1566     return 0;
1567 }
1568 
1569 /**
1570  * ftrace_location - return the ftrace location
1571  * @ip: the instruction pointer to check
1572  *
1573  * If @ip matches the ftrace location, return @ip.
1574  * If @ip matches sym+0, return sym's ftrace location.
1575  * Otherwise, return 0.
1576  */
1577 unsigned long ftrace_location(unsigned long ip)
1578 {
1579     struct dyn_ftrace *rec;
1580     unsigned long offset;
1581     unsigned long size;
1582 
1583     rec = lookup_rec(ip, ip);
1584     if (!rec) {
1585         if (!kallsyms_lookup_size_offset(ip, &size, &offset))
1586             goto out;
1587 
1588         /* map sym+0 to __fentry__ */
1589         if (!offset)
1590             rec = lookup_rec(ip, ip + size - 1);
1591     }
1592 
1593     if (rec)
1594         return rec->ip;
1595 
1596 out:
1597     return 0;
1598 }
1599 
1600 /**
1601  * ftrace_text_reserved - return true if range contains an ftrace location
1602  * @start: start of range to search
1603  * @end: end of range to search (inclusive). @end points to the last byte to check.
1604  *
1605  * Returns 1 if @start and @end contains a ftrace location.
1606  * That is, the instruction that is either a NOP or call to
1607  * the function tracer. It checks the ftrace internal tables to
1608  * determine if the address belongs or not.
1609  */
1610 int ftrace_text_reserved(const void *start, const void *end)
1611 {
1612     unsigned long ret;
1613 
1614     ret = ftrace_location_range((unsigned long)start,
1615                     (unsigned long)end);
1616 
1617     return (int)!!ret;
1618 }
1619 
1620 /* Test if ops registered to this rec needs regs */
1621 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1622 {
1623     struct ftrace_ops *ops;
1624     bool keep_regs = false;
1625 
1626     for (ops = ftrace_ops_list;
1627          ops != &ftrace_list_end; ops = ops->next) {
1628         /* pass rec in as regs to have non-NULL val */
1629         if (ftrace_ops_test(ops, rec->ip, rec)) {
1630             if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1631                 keep_regs = true;
1632                 break;
1633             }
1634         }
1635     }
1636 
1637     return  keep_regs;
1638 }
1639 
1640 static struct ftrace_ops *
1641 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1642 static struct ftrace_ops *
1643 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
1644 static struct ftrace_ops *
1645 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1646 
1647 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1648                      int filter_hash,
1649                      bool inc)
1650 {
1651     struct ftrace_hash *hash;
1652     struct ftrace_hash *other_hash;
1653     struct ftrace_page *pg;
1654     struct dyn_ftrace *rec;
1655     bool update = false;
1656     int count = 0;
1657     int all = false;
1658 
1659     /* Only update if the ops has been registered */
1660     if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1661         return false;
1662 
1663     /*
1664      * In the filter_hash case:
1665      *   If the count is zero, we update all records.
1666      *   Otherwise we just update the items in the hash.
1667      *
1668      * In the notrace_hash case:
1669      *   We enable the update in the hash.
1670      *   As disabling notrace means enabling the tracing,
1671      *   and enabling notrace means disabling, the inc variable
1672      *   gets inversed.
1673      */
1674     if (filter_hash) {
1675         hash = ops->func_hash->filter_hash;
1676         other_hash = ops->func_hash->notrace_hash;
1677         if (ftrace_hash_empty(hash))
1678             all = true;
1679     } else {
1680         inc = !inc;
1681         hash = ops->func_hash->notrace_hash;
1682         other_hash = ops->func_hash->filter_hash;
1683         /*
1684          * If the notrace hash has no items,
1685          * then there's nothing to do.
1686          */
1687         if (ftrace_hash_empty(hash))
1688             return false;
1689     }
1690 
1691     do_for_each_ftrace_rec(pg, rec) {
1692         int in_other_hash = 0;
1693         int in_hash = 0;
1694         int match = 0;
1695 
1696         if (rec->flags & FTRACE_FL_DISABLED)
1697             continue;
1698 
1699         if (all) {
1700             /*
1701              * Only the filter_hash affects all records.
1702              * Update if the record is not in the notrace hash.
1703              */
1704             if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1705                 match = 1;
1706         } else {
1707             in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1708             in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1709 
1710             /*
1711              * If filter_hash is set, we want to match all functions
1712              * that are in the hash but not in the other hash.
1713              *
1714              * If filter_hash is not set, then we are decrementing.
1715              * That means we match anything that is in the hash
1716              * and also in the other_hash. That is, we need to turn
1717              * off functions in the other hash because they are disabled
1718              * by this hash.
1719              */
1720             if (filter_hash && in_hash && !in_other_hash)
1721                 match = 1;
1722             else if (!filter_hash && in_hash &&
1723                  (in_other_hash || ftrace_hash_empty(other_hash)))
1724                 match = 1;
1725         }
1726         if (!match)
1727             continue;
1728 
1729         if (inc) {
1730             rec->flags++;
1731             if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1732                 return false;
1733 
1734             if (ops->flags & FTRACE_OPS_FL_DIRECT)
1735                 rec->flags |= FTRACE_FL_DIRECT;
1736 
1737             /*
1738              * If there's only a single callback registered to a
1739              * function, and the ops has a trampoline registered
1740              * for it, then we can call it directly.
1741              */
1742             if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1743                 rec->flags |= FTRACE_FL_TRAMP;
1744             else
1745                 /*
1746                  * If we are adding another function callback
1747                  * to this function, and the previous had a
1748                  * custom trampoline in use, then we need to go
1749                  * back to the default trampoline.
1750                  */
1751                 rec->flags &= ~FTRACE_FL_TRAMP;
1752 
1753             /*
1754              * If any ops wants regs saved for this function
1755              * then all ops will get saved regs.
1756              */
1757             if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1758                 rec->flags |= FTRACE_FL_REGS;
1759         } else {
1760             if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1761                 return false;
1762             rec->flags--;
1763 
1764             /*
1765              * Only the internal direct_ops should have the
1766              * DIRECT flag set. Thus, if it is removing a
1767              * function, then that function should no longer
1768              * be direct.
1769              */
1770             if (ops->flags & FTRACE_OPS_FL_DIRECT)
1771                 rec->flags &= ~FTRACE_FL_DIRECT;
1772 
1773             /*
1774              * If the rec had REGS enabled and the ops that is
1775              * being removed had REGS set, then see if there is
1776              * still any ops for this record that wants regs.
1777              * If not, we can stop recording them.
1778              */
1779             if (ftrace_rec_count(rec) > 0 &&
1780                 rec->flags & FTRACE_FL_REGS &&
1781                 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1782                 if (!test_rec_ops_needs_regs(rec))
1783                     rec->flags &= ~FTRACE_FL_REGS;
1784             }
1785 
1786             /*
1787              * The TRAMP needs to be set only if rec count
1788              * is decremented to one, and the ops that is
1789              * left has a trampoline. As TRAMP can only be
1790              * enabled if there is only a single ops attached
1791              * to it.
1792              */
1793             if (ftrace_rec_count(rec) == 1 &&
1794                 ftrace_find_tramp_ops_any_other(rec, ops))
1795                 rec->flags |= FTRACE_FL_TRAMP;
1796             else
1797                 rec->flags &= ~FTRACE_FL_TRAMP;
1798 
1799             /*
1800              * flags will be cleared in ftrace_check_record()
1801              * if rec count is zero.
1802              */
1803         }
1804         count++;
1805 
1806         /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1807         update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
1808 
1809         /* Shortcut, if we handled all records, we are done. */
1810         if (!all && count == hash->count)
1811             return update;
1812     } while_for_each_ftrace_rec();
1813 
1814     return update;
1815 }
1816 
1817 static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1818                     int filter_hash)
1819 {
1820     return __ftrace_hash_rec_update(ops, filter_hash, 0);
1821 }
1822 
1823 static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1824                    int filter_hash)
1825 {
1826     return __ftrace_hash_rec_update(ops, filter_hash, 1);
1827 }
1828 
1829 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1830                       int filter_hash, int inc)
1831 {
1832     struct ftrace_ops *op;
1833 
1834     __ftrace_hash_rec_update(ops, filter_hash, inc);
1835 
1836     if (ops->func_hash != &global_ops.local_hash)
1837         return;
1838 
1839     /*
1840      * If the ops shares the global_ops hash, then we need to update
1841      * all ops that are enabled and use this hash.
1842      */
1843     do_for_each_ftrace_op(op, ftrace_ops_list) {
1844         /* Already done */
1845         if (op == ops)
1846             continue;
1847         if (op->func_hash == &global_ops.local_hash)
1848             __ftrace_hash_rec_update(op, filter_hash, inc);
1849     } while_for_each_ftrace_op(op);
1850 }
1851 
1852 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1853                        int filter_hash)
1854 {
1855     ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1856 }
1857 
1858 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1859                       int filter_hash)
1860 {
1861     ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1862 }
1863 
1864 /*
1865  * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1866  * or no-needed to update, -EBUSY if it detects a conflict of the flag
1867  * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1868  * Note that old_hash and new_hash has below meanings
1869  *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1870  *  - If the hash is EMPTY_HASH, it hits nothing
1871  *  - Anything else hits the recs which match the hash entries.
1872  *
1873  * DIRECT ops does not have IPMODIFY flag, but we still need to check it
1874  * against functions with FTRACE_FL_IPMODIFY. If there is any overlap, call
1875  * ops_func(SHARE_IPMODIFY_SELF) to make sure current ops can share with
1876  * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate
1877  * the return value to the caller and eventually to the owner of the DIRECT
1878  * ops.
1879  */
1880 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1881                      struct ftrace_hash *old_hash,
1882                      struct ftrace_hash *new_hash)
1883 {
1884     struct ftrace_page *pg;
1885     struct dyn_ftrace *rec, *end = NULL;
1886     int in_old, in_new;
1887     bool is_ipmodify, is_direct;
1888 
1889     /* Only update if the ops has been registered */
1890     if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1891         return 0;
1892 
1893     is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY;
1894     is_direct = ops->flags & FTRACE_OPS_FL_DIRECT;
1895 
1896     /* neither IPMODIFY nor DIRECT, skip */
1897     if (!is_ipmodify && !is_direct)
1898         return 0;
1899 
1900     if (WARN_ON_ONCE(is_ipmodify && is_direct))
1901         return 0;
1902 
1903     /*
1904      * Since the IPMODIFY and DIRECT are very address sensitive
1905      * actions, we do not allow ftrace_ops to set all functions to new
1906      * hash.
1907      */
1908     if (!new_hash || !old_hash)
1909         return -EINVAL;
1910 
1911     /* Update rec->flags */
1912     do_for_each_ftrace_rec(pg, rec) {
1913 
1914         if (rec->flags & FTRACE_FL_DISABLED)
1915             continue;
1916 
1917         /* We need to update only differences of filter_hash */
1918         in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1919         in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1920         if (in_old == in_new)
1921             continue;
1922 
1923         if (in_new) {
1924             if (rec->flags & FTRACE_FL_IPMODIFY) {
1925                 int ret;
1926 
1927                 /* Cannot have two ipmodify on same rec */
1928                 if (is_ipmodify)
1929                     goto rollback;
1930 
1931                 FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT);
1932 
1933                 /*
1934                  * Another ops with IPMODIFY is already
1935                  * attached. We are now attaching a direct
1936                  * ops. Run SHARE_IPMODIFY_SELF, to check
1937                  * whether sharing is supported.
1938                  */
1939                 if (!ops->ops_func)
1940                     return -EBUSY;
1941                 ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF);
1942                 if (ret)
1943                     return ret;
1944             } else if (is_ipmodify) {
1945                 rec->flags |= FTRACE_FL_IPMODIFY;
1946             }
1947         } else if (is_ipmodify) {
1948             rec->flags &= ~FTRACE_FL_IPMODIFY;
1949         }
1950     } while_for_each_ftrace_rec();
1951 
1952     return 0;
1953 
1954 rollback:
1955     end = rec;
1956 
1957     /* Roll back what we did above */
1958     do_for_each_ftrace_rec(pg, rec) {
1959 
1960         if (rec->flags & FTRACE_FL_DISABLED)
1961             continue;
1962 
1963         if (rec == end)
1964             goto err_out;
1965 
1966         in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1967         in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1968         if (in_old == in_new)
1969             continue;
1970 
1971         if (in_new)
1972             rec->flags &= ~FTRACE_FL_IPMODIFY;
1973         else
1974             rec->flags |= FTRACE_FL_IPMODIFY;
1975     } while_for_each_ftrace_rec();
1976 
1977 err_out:
1978     return -EBUSY;
1979 }
1980 
1981 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1982 {
1983     struct ftrace_hash *hash = ops->func_hash->filter_hash;
1984 
1985     if (ftrace_hash_empty(hash))
1986         hash = NULL;
1987 
1988     return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1989 }
1990 
1991 /* Disabling always succeeds */
1992 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1993 {
1994     struct ftrace_hash *hash = ops->func_hash->filter_hash;
1995 
1996     if (ftrace_hash_empty(hash))
1997         hash = NULL;
1998 
1999     __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
2000 }
2001 
2002 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
2003                        struct ftrace_hash *new_hash)
2004 {
2005     struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
2006 
2007     if (ftrace_hash_empty(old_hash))
2008         old_hash = NULL;
2009 
2010     if (ftrace_hash_empty(new_hash))
2011         new_hash = NULL;
2012 
2013     return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
2014 }
2015 
2016 static void print_ip_ins(const char *fmt, const unsigned char *p)
2017 {
2018     char ins[MCOUNT_INSN_SIZE];
2019     int i;
2020 
2021     if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
2022         printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
2023         return;
2024     }
2025 
2026     printk(KERN_CONT "%s", fmt);
2027 
2028     for (i = 0; i < MCOUNT_INSN_SIZE; i++)
2029         printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]);
2030 }
2031 
2032 enum ftrace_bug_type ftrace_bug_type;
2033 const void *ftrace_expected;
2034 
2035 static void print_bug_type(void)
2036 {
2037     switch (ftrace_bug_type) {
2038     case FTRACE_BUG_UNKNOWN:
2039         break;
2040     case FTRACE_BUG_INIT:
2041         pr_info("Initializing ftrace call sites\n");
2042         break;
2043     case FTRACE_BUG_NOP:
2044         pr_info("Setting ftrace call site to NOP\n");
2045         break;
2046     case FTRACE_BUG_CALL:
2047         pr_info("Setting ftrace call site to call ftrace function\n");
2048         break;
2049     case FTRACE_BUG_UPDATE:
2050         pr_info("Updating ftrace call site to call a different ftrace function\n");
2051         break;
2052     }
2053 }
2054 
2055 /**
2056  * ftrace_bug - report and shutdown function tracer
2057  * @failed: The failed type (EFAULT, EINVAL, EPERM)
2058  * @rec: The record that failed
2059  *
2060  * The arch code that enables or disables the function tracing
2061  * can call ftrace_bug() when it has detected a problem in
2062  * modifying the code. @failed should be one of either:
2063  * EFAULT - if the problem happens on reading the @ip address
2064  * EINVAL - if what is read at @ip is not what was expected
2065  * EPERM - if the problem happens on writing to the @ip address
2066  */
2067 void ftrace_bug(int failed, struct dyn_ftrace *rec)
2068 {
2069     unsigned long ip = rec ? rec->ip : 0;
2070 
2071     pr_info("------------[ ftrace bug ]------------\n");
2072 
2073     switch (failed) {
2074     case -EFAULT:
2075         pr_info("ftrace faulted on modifying ");
2076         print_ip_sym(KERN_INFO, ip);
2077         break;
2078     case -EINVAL:
2079         pr_info("ftrace failed to modify ");
2080         print_ip_sym(KERN_INFO, ip);
2081         print_ip_ins(" actual:   ", (unsigned char *)ip);
2082         pr_cont("\n");
2083         if (ftrace_expected) {
2084             print_ip_ins(" expected: ", ftrace_expected);
2085             pr_cont("\n");
2086         }
2087         break;
2088     case -EPERM:
2089         pr_info("ftrace faulted on writing ");
2090         print_ip_sym(KERN_INFO, ip);
2091         break;
2092     default:
2093         pr_info("ftrace faulted on unknown error ");
2094         print_ip_sym(KERN_INFO, ip);
2095     }
2096     print_bug_type();
2097     if (rec) {
2098         struct ftrace_ops *ops = NULL;
2099 
2100         pr_info("ftrace record flags: %lx\n", rec->flags);
2101         pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2102             rec->flags & FTRACE_FL_REGS ? " R" : "  ");
2103         if (rec->flags & FTRACE_FL_TRAMP_EN) {
2104             ops = ftrace_find_tramp_ops_any(rec);
2105             if (ops) {
2106                 do {
2107                     pr_cont("\ttramp: %pS (%pS)",
2108                         (void *)ops->trampoline,
2109                         (void *)ops->func);
2110                     ops = ftrace_find_tramp_ops_next(rec, ops);
2111                 } while (ops);
2112             } else
2113                 pr_cont("\ttramp: ERROR!");
2114 
2115         }
2116         ip = ftrace_get_addr_curr(rec);
2117         pr_cont("\n expected tramp: %lx\n", ip);
2118     }
2119 
2120     FTRACE_WARN_ON_ONCE(1);
2121 }
2122 
2123 static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
2124 {
2125     unsigned long flag = 0UL;
2126 
2127     ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2128 
2129     if (rec->flags & FTRACE_FL_DISABLED)
2130         return FTRACE_UPDATE_IGNORE;
2131 
2132     /*
2133      * If we are updating calls:
2134      *
2135      *   If the record has a ref count, then we need to enable it
2136      *   because someone is using it.
2137      *
2138      *   Otherwise we make sure its disabled.
2139      *
2140      * If we are disabling calls, then disable all records that
2141      * are enabled.
2142      */
2143     if (enable && ftrace_rec_count(rec))
2144         flag = FTRACE_FL_ENABLED;
2145 
2146     /*
2147      * If enabling and the REGS flag does not match the REGS_EN, or
2148      * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2149      * this record. Set flags to fail the compare against ENABLED.
2150      * Same for direct calls.
2151      */
2152     if (flag) {
2153         if (!(rec->flags & FTRACE_FL_REGS) !=
2154             !(rec->flags & FTRACE_FL_REGS_EN))
2155             flag |= FTRACE_FL_REGS;
2156 
2157         if (!(rec->flags & FTRACE_FL_TRAMP) !=
2158             !(rec->flags & FTRACE_FL_TRAMP_EN))
2159             flag |= FTRACE_FL_TRAMP;
2160 
2161         /*
2162          * Direct calls are special, as count matters.
2163          * We must test the record for direct, if the
2164          * DIRECT and DIRECT_EN do not match, but only
2165          * if the count is 1. That's because, if the
2166          * count is something other than one, we do not
2167          * want the direct enabled (it will be done via the
2168          * direct helper). But if DIRECT_EN is set, and
2169          * the count is not one, we need to clear it.
2170          */
2171         if (ftrace_rec_count(rec) == 1) {
2172             if (!(rec->flags & FTRACE_FL_DIRECT) !=
2173                 !(rec->flags & FTRACE_FL_DIRECT_EN))
2174                 flag |= FTRACE_FL_DIRECT;
2175         } else if (rec->flags & FTRACE_FL_DIRECT_EN) {
2176             flag |= FTRACE_FL_DIRECT;
2177         }
2178     }
2179 
2180     /* If the state of this record hasn't changed, then do nothing */
2181     if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2182         return FTRACE_UPDATE_IGNORE;
2183 
2184     if (flag) {
2185         /* Save off if rec is being enabled (for return value) */
2186         flag ^= rec->flags & FTRACE_FL_ENABLED;
2187 
2188         if (update) {
2189             rec->flags |= FTRACE_FL_ENABLED;
2190             if (flag & FTRACE_FL_REGS) {
2191                 if (rec->flags & FTRACE_FL_REGS)
2192                     rec->flags |= FTRACE_FL_REGS_EN;
2193                 else
2194                     rec->flags &= ~FTRACE_FL_REGS_EN;
2195             }
2196             if (flag & FTRACE_FL_TRAMP) {
2197                 if (rec->flags & FTRACE_FL_TRAMP)
2198                     rec->flags |= FTRACE_FL_TRAMP_EN;
2199                 else
2200                     rec->flags &= ~FTRACE_FL_TRAMP_EN;
2201             }
2202 
2203             if (flag & FTRACE_FL_DIRECT) {
2204                 /*
2205                  * If there's only one user (direct_ops helper)
2206                  * then we can call the direct function
2207                  * directly (no ftrace trampoline).
2208                  */
2209                 if (ftrace_rec_count(rec) == 1) {
2210                     if (rec->flags & FTRACE_FL_DIRECT)
2211                         rec->flags |= FTRACE_FL_DIRECT_EN;
2212                     else
2213                         rec->flags &= ~FTRACE_FL_DIRECT_EN;
2214                 } else {
2215                     /*
2216                      * Can only call directly if there's
2217                      * only one callback to the function.
2218                      */
2219                     rec->flags &= ~FTRACE_FL_DIRECT_EN;
2220                 }
2221             }
2222         }
2223 
2224         /*
2225          * If this record is being updated from a nop, then
2226          *   return UPDATE_MAKE_CALL.
2227          * Otherwise,
2228          *   return UPDATE_MODIFY_CALL to tell the caller to convert
2229          *   from the save regs, to a non-save regs function or
2230          *   vice versa, or from a trampoline call.
2231          */
2232         if (flag & FTRACE_FL_ENABLED) {
2233             ftrace_bug_type = FTRACE_BUG_CALL;
2234             return FTRACE_UPDATE_MAKE_CALL;
2235         }
2236 
2237         ftrace_bug_type = FTRACE_BUG_UPDATE;
2238         return FTRACE_UPDATE_MODIFY_CALL;
2239     }
2240 
2241     if (update) {
2242         /* If there's no more users, clear all flags */
2243         if (!ftrace_rec_count(rec))
2244             rec->flags = 0;
2245         else
2246             /*
2247              * Just disable the record, but keep the ops TRAMP
2248              * and REGS states. The _EN flags must be disabled though.
2249              */
2250             rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2251                     FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN);
2252     }
2253 
2254     ftrace_bug_type = FTRACE_BUG_NOP;
2255     return FTRACE_UPDATE_MAKE_NOP;
2256 }
2257 
2258 /**
2259  * ftrace_update_record - set a record that now is tracing or not
2260  * @rec: the record to update
2261  * @enable: set to true if the record is tracing, false to force disable
2262  *
2263  * The records that represent all functions that can be traced need
2264  * to be updated when tracing has been enabled.
2265  */
2266 int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
2267 {
2268     return ftrace_check_record(rec, enable, true);
2269 }
2270 
2271 /**
2272  * ftrace_test_record - check if the record has been enabled or not
2273  * @rec: the record to test
2274  * @enable: set to true to check if enabled, false if it is disabled
2275  *
2276  * The arch code may need to test if a record is already set to
2277  * tracing to determine how to modify the function code that it
2278  * represents.
2279  */
2280 int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
2281 {
2282     return ftrace_check_record(rec, enable, false);
2283 }
2284 
2285 static struct ftrace_ops *
2286 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2287 {
2288     struct ftrace_ops *op;
2289     unsigned long ip = rec->ip;
2290 
2291     do_for_each_ftrace_op(op, ftrace_ops_list) {
2292 
2293         if (!op->trampoline)
2294             continue;
2295 
2296         if (hash_contains_ip(ip, op->func_hash))
2297             return op;
2298     } while_for_each_ftrace_op(op);
2299 
2300     return NULL;
2301 }
2302 
2303 static struct ftrace_ops *
2304 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
2305 {
2306     struct ftrace_ops *op;
2307     unsigned long ip = rec->ip;
2308 
2309     do_for_each_ftrace_op(op, ftrace_ops_list) {
2310 
2311         if (op == op_exclude || !op->trampoline)
2312             continue;
2313 
2314         if (hash_contains_ip(ip, op->func_hash))
2315             return op;
2316     } while_for_each_ftrace_op(op);
2317 
2318     return NULL;
2319 }
2320 
2321 static struct ftrace_ops *
2322 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2323                struct ftrace_ops *op)
2324 {
2325     unsigned long ip = rec->ip;
2326 
2327     while_for_each_ftrace_op(op) {
2328 
2329         if (!op->trampoline)
2330             continue;
2331 
2332         if (hash_contains_ip(ip, op->func_hash))
2333             return op;
2334     }
2335 
2336     return NULL;
2337 }
2338 
2339 static struct ftrace_ops *
2340 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2341 {
2342     struct ftrace_ops *op;
2343     unsigned long ip = rec->ip;
2344 
2345     /*
2346      * Need to check removed ops first.
2347      * If they are being removed, and this rec has a tramp,
2348      * and this rec is in the ops list, then it would be the
2349      * one with the tramp.
2350      */
2351     if (removed_ops) {
2352         if (hash_contains_ip(ip, &removed_ops->old_hash))
2353             return removed_ops;
2354     }
2355 
2356     /*
2357      * Need to find the current trampoline for a rec.
2358      * Now, a trampoline is only attached to a rec if there
2359      * was a single 'ops' attached to it. But this can be called
2360      * when we are adding another op to the rec or removing the
2361      * current one. Thus, if the op is being added, we can
2362      * ignore it because it hasn't attached itself to the rec
2363      * yet.
2364      *
2365      * If an ops is being modified (hooking to different functions)
2366      * then we don't care about the new functions that are being
2367      * added, just the old ones (that are probably being removed).
2368      *
2369      * If we are adding an ops to a function that already is using
2370      * a trampoline, it needs to be removed (trampolines are only
2371      * for single ops connected), then an ops that is not being
2372      * modified also needs to be checked.
2373      */
2374     do_for_each_ftrace_op(op, ftrace_ops_list) {
2375 
2376         if (!op->trampoline)
2377             continue;
2378 
2379         /*
2380          * If the ops is being added, it hasn't gotten to
2381          * the point to be removed from this tree yet.
2382          */
2383         if (op->flags & FTRACE_OPS_FL_ADDING)
2384             continue;
2385 
2386 
2387         /*
2388          * If the ops is being modified and is in the old
2389          * hash, then it is probably being removed from this
2390          * function.
2391          */
2392         if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2393             hash_contains_ip(ip, &op->old_hash))
2394             return op;
2395         /*
2396          * If the ops is not being added or modified, and it's
2397          * in its normal filter hash, then this must be the one
2398          * we want!
2399          */
2400         if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2401             hash_contains_ip(ip, op->func_hash))
2402             return op;
2403 
2404     } while_for_each_ftrace_op(op);
2405 
2406     return NULL;
2407 }
2408 
2409 static struct ftrace_ops *
2410 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2411 {
2412     struct ftrace_ops *op;
2413     unsigned long ip = rec->ip;
2414 
2415     do_for_each_ftrace_op(op, ftrace_ops_list) {
2416         /* pass rec in as regs to have non-NULL val */
2417         if (hash_contains_ip(ip, op->func_hash))
2418             return op;
2419     } while_for_each_ftrace_op(op);
2420 
2421     return NULL;
2422 }
2423 
2424 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
2425 /* Protected by rcu_tasks for reading, and direct_mutex for writing */
2426 static struct ftrace_hash *direct_functions = EMPTY_HASH;
2427 static DEFINE_MUTEX(direct_mutex);
2428 int ftrace_direct_func_count;
2429 
2430 /*
2431  * Search the direct_functions hash to see if the given instruction pointer
2432  * has a direct caller attached to it.
2433  */
2434 unsigned long ftrace_find_rec_direct(unsigned long ip)
2435 {
2436     struct ftrace_func_entry *entry;
2437 
2438     entry = __ftrace_lookup_ip(direct_functions, ip);
2439     if (!entry)
2440         return 0;
2441 
2442     return entry->direct;
2443 }
2444 
2445 static struct ftrace_func_entry*
2446 ftrace_add_rec_direct(unsigned long ip, unsigned long addr,
2447               struct ftrace_hash **free_hash)
2448 {
2449     struct ftrace_func_entry *entry;
2450 
2451     if (ftrace_hash_empty(direct_functions) ||
2452         direct_functions->count > 2 * (1 << direct_functions->size_bits)) {
2453         struct ftrace_hash *new_hash;
2454         int size = ftrace_hash_empty(direct_functions) ? 0 :
2455             direct_functions->count + 1;
2456 
2457         if (size < 32)
2458             size = 32;
2459 
2460         new_hash = dup_hash(direct_functions, size);
2461         if (!new_hash)
2462             return NULL;
2463 
2464         *free_hash = direct_functions;
2465         direct_functions = new_hash;
2466     }
2467 
2468     entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2469     if (!entry)
2470         return NULL;
2471 
2472     entry->ip = ip;
2473     entry->direct = addr;
2474     __add_hash_entry(direct_functions, entry);
2475     return entry;
2476 }
2477 
2478 static void call_direct_funcs(unsigned long ip, unsigned long pip,
2479                   struct ftrace_ops *ops, struct ftrace_regs *fregs)
2480 {
2481     struct pt_regs *regs = ftrace_get_regs(fregs);
2482     unsigned long addr;
2483 
2484     addr = ftrace_find_rec_direct(ip);
2485     if (!addr)
2486         return;
2487 
2488     arch_ftrace_set_direct_caller(regs, addr);
2489 }
2490 
2491 struct ftrace_ops direct_ops = {
2492     .func       = call_direct_funcs,
2493     .flags      = FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
2494               | FTRACE_OPS_FL_PERMANENT,
2495     /*
2496      * By declaring the main trampoline as this trampoline
2497      * it will never have one allocated for it. Allocated
2498      * trampolines should not call direct functions.
2499      * The direct_ops should only be called by the builtin
2500      * ftrace_regs_caller trampoline.
2501      */
2502     .trampoline = FTRACE_REGS_ADDR,
2503 };
2504 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
2505 
2506 /**
2507  * ftrace_get_addr_new - Get the call address to set to
2508  * @rec:  The ftrace record descriptor
2509  *
2510  * If the record has the FTRACE_FL_REGS set, that means that it
2511  * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2512  * is not set, then it wants to convert to the normal callback.
2513  *
2514  * Returns the address of the trampoline to set to
2515  */
2516 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2517 {
2518     struct ftrace_ops *ops;
2519     unsigned long addr;
2520 
2521     if ((rec->flags & FTRACE_FL_DIRECT) &&
2522         (ftrace_rec_count(rec) == 1)) {
2523         addr = ftrace_find_rec_direct(rec->ip);
2524         if (addr)
2525             return addr;
2526         WARN_ON_ONCE(1);
2527     }
2528 
2529     /* Trampolines take precedence over regs */
2530     if (rec->flags & FTRACE_FL_TRAMP) {
2531         ops = ftrace_find_tramp_ops_new(rec);
2532         if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2533             pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2534                 (void *)rec->ip, (void *)rec->ip, rec->flags);
2535             /* Ftrace is shutting down, return anything */
2536             return (unsigned long)FTRACE_ADDR;
2537         }
2538         return ops->trampoline;
2539     }
2540 
2541     if (rec->flags & FTRACE_FL_REGS)
2542         return (unsigned long)FTRACE_REGS_ADDR;
2543     else
2544         return (unsigned long)FTRACE_ADDR;
2545 }
2546 
2547 /**
2548  * ftrace_get_addr_curr - Get the call address that is already there
2549  * @rec:  The ftrace record descriptor
2550  *
2551  * The FTRACE_FL_REGS_EN is set when the record already points to
2552  * a function that saves all the regs. Basically the '_EN' version
2553  * represents the current state of the function.
2554  *
2555  * Returns the address of the trampoline that is currently being called
2556  */
2557 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2558 {
2559     struct ftrace_ops *ops;
2560     unsigned long addr;
2561 
2562     /* Direct calls take precedence over trampolines */
2563     if (rec->flags & FTRACE_FL_DIRECT_EN) {
2564         addr = ftrace_find_rec_direct(rec->ip);
2565         if (addr)
2566             return addr;
2567         WARN_ON_ONCE(1);
2568     }
2569 
2570     /* Trampolines take precedence over regs */
2571     if (rec->flags & FTRACE_FL_TRAMP_EN) {
2572         ops = ftrace_find_tramp_ops_curr(rec);
2573         if (FTRACE_WARN_ON(!ops)) {
2574             pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2575                 (void *)rec->ip, (void *)rec->ip);
2576             /* Ftrace is shutting down, return anything */
2577             return (unsigned long)FTRACE_ADDR;
2578         }
2579         return ops->trampoline;
2580     }
2581 
2582     if (rec->flags & FTRACE_FL_REGS_EN)
2583         return (unsigned long)FTRACE_REGS_ADDR;
2584     else
2585         return (unsigned long)FTRACE_ADDR;
2586 }
2587 
2588 static int
2589 __ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
2590 {
2591     unsigned long ftrace_old_addr;
2592     unsigned long ftrace_addr;
2593     int ret;
2594 
2595     ftrace_addr = ftrace_get_addr_new(rec);
2596 
2597     /* This needs to be done before we call ftrace_update_record */
2598     ftrace_old_addr = ftrace_get_addr_curr(rec);
2599 
2600     ret = ftrace_update_record(rec, enable);
2601 
2602     ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2603 
2604     switch (ret) {
2605     case FTRACE_UPDATE_IGNORE:
2606         return 0;
2607 
2608     case FTRACE_UPDATE_MAKE_CALL:
2609         ftrace_bug_type = FTRACE_BUG_CALL;
2610         return ftrace_make_call(rec, ftrace_addr);
2611 
2612     case FTRACE_UPDATE_MAKE_NOP:
2613         ftrace_bug_type = FTRACE_BUG_NOP;
2614         return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2615 
2616     case FTRACE_UPDATE_MODIFY_CALL:
2617         ftrace_bug_type = FTRACE_BUG_UPDATE;
2618         return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2619     }
2620 
2621     return -1; /* unknown ftrace bug */
2622 }
2623 
2624 void __weak ftrace_replace_code(int mod_flags)
2625 {
2626     struct dyn_ftrace *rec;
2627     struct ftrace_page *pg;
2628     bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2629     int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
2630     int failed;
2631 
2632     if (unlikely(ftrace_disabled))
2633         return;
2634 
2635     do_for_each_ftrace_rec(pg, rec) {
2636 
2637         if (rec->flags & FTRACE_FL_DISABLED)
2638             continue;
2639 
2640         failed = __ftrace_replace_code(rec, enable);
2641         if (failed) {
2642             ftrace_bug(failed, rec);
2643             /* Stop processing */
2644             return;
2645         }
2646         if (schedulable)
2647             cond_resched();
2648     } while_for_each_ftrace_rec();
2649 }
2650 
2651 struct ftrace_rec_iter {
2652     struct ftrace_page  *pg;
2653     int         index;
2654 };
2655 
2656 /**
2657  * ftrace_rec_iter_start - start up iterating over traced functions
2658  *
2659  * Returns an iterator handle that is used to iterate over all
2660  * the records that represent address locations where functions
2661  * are traced.
2662  *
2663  * May return NULL if no records are available.
2664  */
2665 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2666 {
2667     /*
2668      * We only use a single iterator.
2669      * Protected by the ftrace_lock mutex.
2670      */
2671     static struct ftrace_rec_iter ftrace_rec_iter;
2672     struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2673 
2674     iter->pg = ftrace_pages_start;
2675     iter->index = 0;
2676 
2677     /* Could have empty pages */
2678     while (iter->pg && !iter->pg->index)
2679         iter->pg = iter->pg->next;
2680 
2681     if (!iter->pg)
2682         return NULL;
2683 
2684     return iter;
2685 }
2686 
2687 /**
2688  * ftrace_rec_iter_next - get the next record to process.
2689  * @iter: The handle to the iterator.
2690  *
2691  * Returns the next iterator after the given iterator @iter.
2692  */
2693 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2694 {
2695     iter->index++;
2696 
2697     if (iter->index >= iter->pg->index) {
2698         iter->pg = iter->pg->next;
2699         iter->index = 0;
2700 
2701         /* Could have empty pages */
2702         while (iter->pg && !iter->pg->index)
2703             iter->pg = iter->pg->next;
2704     }
2705 
2706     if (!iter->pg)
2707         return NULL;
2708 
2709     return iter;
2710 }
2711 
2712 /**
2713  * ftrace_rec_iter_record - get the record at the iterator location
2714  * @iter: The current iterator location
2715  *
2716  * Returns the record that the current @iter is at.
2717  */
2718 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2719 {
2720     return &iter->pg->records[iter->index];
2721 }
2722 
2723 static int
2724 ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
2725 {
2726     int ret;
2727 
2728     if (unlikely(ftrace_disabled))
2729         return 0;
2730 
2731     ret = ftrace_init_nop(mod, rec);
2732     if (ret) {
2733         ftrace_bug_type = FTRACE_BUG_INIT;
2734         ftrace_bug(ret, rec);
2735         return 0;
2736     }
2737     return 1;
2738 }
2739 
2740 /*
2741  * archs can override this function if they must do something
2742  * before the modifying code is performed.
2743  */
2744 void __weak ftrace_arch_code_modify_prepare(void)
2745 {
2746 }
2747 
2748 /*
2749  * archs can override this function if they must do something
2750  * after the modifying code is performed.
2751  */
2752 void __weak ftrace_arch_code_modify_post_process(void)
2753 {
2754 }
2755 
2756 void ftrace_modify_all_code(int command)
2757 {
2758     int update = command & FTRACE_UPDATE_TRACE_FUNC;
2759     int mod_flags = 0;
2760     int err = 0;
2761 
2762     if (command & FTRACE_MAY_SLEEP)
2763         mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2764 
2765     /*
2766      * If the ftrace_caller calls a ftrace_ops func directly,
2767      * we need to make sure that it only traces functions it
2768      * expects to trace. When doing the switch of functions,
2769      * we need to update to the ftrace_ops_list_func first
2770      * before the transition between old and new calls are set,
2771      * as the ftrace_ops_list_func will check the ops hashes
2772      * to make sure the ops are having the right functions
2773      * traced.
2774      */
2775     if (update) {
2776         err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2777         if (FTRACE_WARN_ON(err))
2778             return;
2779     }
2780 
2781     if (command & FTRACE_UPDATE_CALLS)
2782         ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
2783     else if (command & FTRACE_DISABLE_CALLS)
2784         ftrace_replace_code(mod_flags);
2785 
2786     if (update && ftrace_trace_function != ftrace_ops_list_func) {
2787         function_trace_op = set_function_trace_op;
2788         smp_wmb();
2789         /* If irqs are disabled, we are in stop machine */
2790         if (!irqs_disabled())
2791             smp_call_function(ftrace_sync_ipi, NULL, 1);
2792         err = ftrace_update_ftrace_func(ftrace_trace_function);
2793         if (FTRACE_WARN_ON(err))
2794             return;
2795     }
2796 
2797     if (command & FTRACE_START_FUNC_RET)
2798         err = ftrace_enable_ftrace_graph_caller();
2799     else if (command & FTRACE_STOP_FUNC_RET)
2800         err = ftrace_disable_ftrace_graph_caller();
2801     FTRACE_WARN_ON(err);
2802 }
2803 
2804 static int __ftrace_modify_code(void *data)
2805 {
2806     int *command = data;
2807 
2808     ftrace_modify_all_code(*command);
2809 
2810     return 0;
2811 }
2812 
2813 /**
2814  * ftrace_run_stop_machine - go back to the stop machine method
2815  * @command: The command to tell ftrace what to do
2816  *
2817  * If an arch needs to fall back to the stop machine method, the
2818  * it can call this function.
2819  */
2820 void ftrace_run_stop_machine(int command)
2821 {
2822     stop_machine(__ftrace_modify_code, &command, NULL);
2823 }
2824 
2825 /**
2826  * arch_ftrace_update_code - modify the code to trace or not trace
2827  * @command: The command that needs to be done
2828  *
2829  * Archs can override this function if it does not need to
2830  * run stop_machine() to modify code.
2831  */
2832 void __weak arch_ftrace_update_code(int command)
2833 {
2834     ftrace_run_stop_machine(command);
2835 }
2836 
2837 static void ftrace_run_update_code(int command)
2838 {
2839     ftrace_arch_code_modify_prepare();
2840 
2841     /*
2842      * By default we use stop_machine() to modify the code.
2843      * But archs can do what ever they want as long as it
2844      * is safe. The stop_machine() is the safest, but also
2845      * produces the most overhead.
2846      */
2847     arch_ftrace_update_code(command);
2848 
2849     ftrace_arch_code_modify_post_process();
2850 }
2851 
2852 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2853                    struct ftrace_ops_hash *old_hash)
2854 {
2855     ops->flags |= FTRACE_OPS_FL_MODIFYING;
2856     ops->old_hash.filter_hash = old_hash->filter_hash;
2857     ops->old_hash.notrace_hash = old_hash->notrace_hash;
2858     ftrace_run_update_code(command);
2859     ops->old_hash.filter_hash = NULL;
2860     ops->old_hash.notrace_hash = NULL;
2861     ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2862 }
2863 
2864 static ftrace_func_t saved_ftrace_func;
2865 static int ftrace_start_up;
2866 
2867 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2868 {
2869 }
2870 
2871 /* List of trace_ops that have allocated trampolines */
2872 static LIST_HEAD(ftrace_ops_trampoline_list);
2873 
2874 static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
2875 {
2876     lockdep_assert_held(&ftrace_lock);
2877     list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
2878 }
2879 
2880 static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
2881 {
2882     lockdep_assert_held(&ftrace_lock);
2883     list_del_rcu(&ops->list);
2884     synchronize_rcu();
2885 }
2886 
2887 /*
2888  * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols
2889  * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is
2890  * not a module.
2891  */
2892 #define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
2893 #define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
2894 
2895 static void ftrace_trampoline_free(struct ftrace_ops *ops)
2896 {
2897     if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
2898         ops->trampoline) {
2899         /*
2900          * Record the text poke event before the ksymbol unregister
2901          * event.
2902          */
2903         perf_event_text_poke((void *)ops->trampoline,
2904                      (void *)ops->trampoline,
2905                      ops->trampoline_size, NULL, 0);
2906         perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
2907                    ops->trampoline, ops->trampoline_size,
2908                    true, FTRACE_TRAMPOLINE_SYM);
2909         /* Remove from kallsyms after the perf events */
2910         ftrace_remove_trampoline_from_kallsyms(ops);
2911     }
2912 
2913     arch_ftrace_trampoline_free(ops);
2914 }
2915 
2916 static void ftrace_startup_enable(int command)
2917 {
2918     if (saved_ftrace_func != ftrace_trace_function) {
2919         saved_ftrace_func = ftrace_trace_function;
2920         command |= FTRACE_UPDATE_TRACE_FUNC;
2921     }
2922 
2923     if (!command || !ftrace_enabled)
2924         return;
2925 
2926     ftrace_run_update_code(command);
2927 }
2928 
2929 static void ftrace_startup_all(int command)
2930 {
2931     update_all_ops = true;
2932     ftrace_startup_enable(command);
2933     update_all_ops = false;
2934 }
2935 
2936 int ftrace_startup(struct ftrace_ops *ops, int command)
2937 {
2938     int ret;
2939 
2940     if (unlikely(ftrace_disabled))
2941         return -ENODEV;
2942 
2943     ret = __register_ftrace_function(ops);
2944     if (ret)
2945         return ret;
2946 
2947     ftrace_start_up++;
2948 
2949     /*
2950      * Note that ftrace probes uses this to start up
2951      * and modify functions it will probe. But we still
2952      * set the ADDING flag for modification, as probes
2953      * do not have trampolines. If they add them in the
2954      * future, then the probes will need to distinguish
2955      * between adding and updating probes.
2956      */
2957     ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2958 
2959     ret = ftrace_hash_ipmodify_enable(ops);
2960     if (ret < 0) {
2961         /* Rollback registration process */
2962         __unregister_ftrace_function(ops);
2963         ftrace_start_up--;
2964         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2965         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2966             ftrace_trampoline_free(ops);
2967         return ret;
2968     }
2969 
2970     if (ftrace_hash_rec_enable(ops, 1))
2971         command |= FTRACE_UPDATE_CALLS;
2972 
2973     ftrace_startup_enable(command);
2974 
2975     /*
2976      * If ftrace is in an undefined state, we just remove ops from list
2977      * to prevent the NULL pointer, instead of totally rolling it back and
2978      * free trampoline, because those actions could cause further damage.
2979      */
2980     if (unlikely(ftrace_disabled)) {
2981         __unregister_ftrace_function(ops);
2982         return -ENODEV;
2983     }
2984 
2985     ops->flags &= ~FTRACE_OPS_FL_ADDING;
2986 
2987     return 0;
2988 }
2989 
2990 int ftrace_shutdown(struct ftrace_ops *ops, int command)
2991 {
2992     int ret;
2993 
2994     if (unlikely(ftrace_disabled))
2995         return -ENODEV;
2996 
2997     ret = __unregister_ftrace_function(ops);
2998     if (ret)
2999         return ret;
3000 
3001     ftrace_start_up--;
3002     /*
3003      * Just warn in case of unbalance, no need to kill ftrace, it's not
3004      * critical but the ftrace_call callers may be never nopped again after
3005      * further ftrace uses.
3006      */
3007     WARN_ON_ONCE(ftrace_start_up < 0);
3008 
3009     /* Disabling ipmodify never fails */
3010     ftrace_hash_ipmodify_disable(ops);
3011 
3012     if (ftrace_hash_rec_disable(ops, 1))
3013         command |= FTRACE_UPDATE_CALLS;
3014 
3015     ops->flags &= ~FTRACE_OPS_FL_ENABLED;
3016 
3017     if (saved_ftrace_func != ftrace_trace_function) {
3018         saved_ftrace_func = ftrace_trace_function;
3019         command |= FTRACE_UPDATE_TRACE_FUNC;
3020     }
3021 
3022     if (!command || !ftrace_enabled) {
3023         /*
3024          * If these are dynamic or per_cpu ops, they still
3025          * need their data freed. Since, function tracing is
3026          * not currently active, we can just free them
3027          * without synchronizing all CPUs.
3028          */
3029         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
3030             goto free_ops;
3031 
3032         return 0;
3033     }
3034 
3035     /*
3036      * If the ops uses a trampoline, then it needs to be
3037      * tested first on update.
3038      */
3039     ops->flags |= FTRACE_OPS_FL_REMOVING;
3040     removed_ops = ops;
3041 
3042     /* The trampoline logic checks the old hashes */
3043     ops->old_hash.filter_hash = ops->func_hash->filter_hash;
3044     ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
3045 
3046     ftrace_run_update_code(command);
3047 
3048     /*
3049      * If there's no more ops registered with ftrace, run a
3050      * sanity check to make sure all rec flags are cleared.
3051      */
3052     if (rcu_dereference_protected(ftrace_ops_list,
3053             lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
3054         struct ftrace_page *pg;
3055         struct dyn_ftrace *rec;
3056 
3057         do_for_each_ftrace_rec(pg, rec) {
3058             if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
3059                 pr_warn("  %pS flags:%lx\n",
3060                     (void *)rec->ip, rec->flags);
3061         } while_for_each_ftrace_rec();
3062     }
3063 
3064     ops->old_hash.filter_hash = NULL;
3065     ops->old_hash.notrace_hash = NULL;
3066 
3067     removed_ops = NULL;
3068     ops->flags &= ~FTRACE_OPS_FL_REMOVING;
3069 
3070     /*
3071      * Dynamic ops may be freed, we must make sure that all
3072      * callers are done before leaving this function.
3073      * The same goes for freeing the per_cpu data of the per_cpu
3074      * ops.
3075      */
3076     if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
3077         /*
3078          * We need to do a hard force of sched synchronization.
3079          * This is because we use preempt_disable() to do RCU, but
3080          * the function tracers can be called where RCU is not watching
3081          * (like before user_exit()). We can not rely on the RCU
3082          * infrastructure to do the synchronization, thus we must do it
3083          * ourselves.
3084          */
3085         synchronize_rcu_tasks_rude();
3086 
3087         /*
3088          * When the kernel is preemptive, tasks can be preempted
3089          * while on a ftrace trampoline. Just scheduling a task on
3090          * a CPU is not good enough to flush them. Calling
3091          * synchronize_rcu_tasks() will wait for those tasks to
3092          * execute and either schedule voluntarily or enter user space.
3093          */
3094         if (IS_ENABLED(CONFIG_PREEMPTION))
3095             synchronize_rcu_tasks();
3096 
3097  free_ops:
3098         ftrace_trampoline_free(ops);
3099     }
3100 
3101     return 0;
3102 }
3103 
3104 static u64      ftrace_update_time;
3105 unsigned long       ftrace_update_tot_cnt;
3106 unsigned long       ftrace_number_of_pages;
3107 unsigned long       ftrace_number_of_groups;
3108 
3109 static inline int ops_traces_mod(struct ftrace_ops *ops)
3110 {
3111     /*
3112      * Filter_hash being empty will default to trace module.
3113      * But notrace hash requires a test of individual module functions.
3114      */
3115     return ftrace_hash_empty(ops->func_hash->filter_hash) &&
3116         ftrace_hash_empty(ops->func_hash->notrace_hash);
3117 }
3118 
3119 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3120 {
3121     bool init_nop = ftrace_need_init_nop();
3122     struct ftrace_page *pg;
3123     struct dyn_ftrace *p;
3124     u64 start, stop;
3125     unsigned long update_cnt = 0;
3126     unsigned long rec_flags = 0;
3127     int i;
3128 
3129     start = ftrace_now(raw_smp_processor_id());
3130 
3131     /*
3132      * When a module is loaded, this function is called to convert
3133      * the calls to mcount in its text to nops, and also to create
3134      * an entry in the ftrace data. Now, if ftrace is activated
3135      * after this call, but before the module sets its text to
3136      * read-only, the modification of enabling ftrace can fail if
3137      * the read-only is done while ftrace is converting the calls.
3138      * To prevent this, the module's records are set as disabled
3139      * and will be enabled after the call to set the module's text
3140      * to read-only.
3141      */
3142     if (mod)
3143         rec_flags |= FTRACE_FL_DISABLED;
3144 
3145     for (pg = new_pgs; pg; pg = pg->next) {
3146 
3147         for (i = 0; i < pg->index; i++) {
3148 
3149             /* If something went wrong, bail without enabling anything */
3150             if (unlikely(ftrace_disabled))
3151                 return -1;
3152 
3153             p = &pg->records[i];
3154             p->flags = rec_flags;
3155 
3156             /*
3157              * Do the initial record conversion from mcount jump
3158              * to the NOP instructions.
3159              */
3160             if (init_nop && !ftrace_nop_initialize(mod, p))
3161                 break;
3162 
3163             update_cnt++;
3164         }
3165     }
3166 
3167     stop = ftrace_now(raw_smp_processor_id());
3168     ftrace_update_time = stop - start;
3169     ftrace_update_tot_cnt += update_cnt;
3170 
3171     return 0;
3172 }
3173 
3174 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3175 {
3176     int order;
3177     int pages;
3178     int cnt;
3179 
3180     if (WARN_ON(!count))
3181         return -EINVAL;
3182 
3183     /* We want to fill as much as possible, with no empty pages */
3184     pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
3185     order = fls(pages) - 1;
3186 
3187  again:
3188     pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3189 
3190     if (!pg->records) {
3191         /* if we can't allocate this size, try something smaller */
3192         if (!order)
3193             return -ENOMEM;
3194         order >>= 1;
3195         goto again;
3196     }
3197 
3198     ftrace_number_of_pages += 1 << order;
3199     ftrace_number_of_groups++;
3200 
3201     cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3202     pg->order = order;
3203 
3204     if (cnt > count)
3205         cnt = count;
3206 
3207     return cnt;
3208 }
3209 
3210 static struct ftrace_page *
3211 ftrace_allocate_pages(unsigned long num_to_init)
3212 {
3213     struct ftrace_page *start_pg;
3214     struct ftrace_page *pg;
3215     int cnt;
3216 
3217     if (!num_to_init)
3218         return NULL;
3219 
3220     start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3221     if (!pg)
3222         return NULL;
3223 
3224     /*
3225      * Try to allocate as much as possible in one continues
3226      * location that fills in all of the space. We want to
3227      * waste as little space as possible.
3228      */
3229     for (;;) {
3230         cnt = ftrace_allocate_records(pg, num_to_init);
3231         if (cnt < 0)
3232             goto free_pages;
3233 
3234         num_to_init -= cnt;
3235         if (!num_to_init)
3236             break;
3237 
3238         pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3239         if (!pg->next)
3240             goto free_pages;
3241 
3242         pg = pg->next;
3243     }
3244 
3245     return start_pg;
3246 
3247  free_pages:
3248     pg = start_pg;
3249     while (pg) {
3250         if (pg->records) {
3251             free_pages((unsigned long)pg->records, pg->order);
3252             ftrace_number_of_pages -= 1 << pg->order;
3253         }
3254         start_pg = pg->next;
3255         kfree(pg);
3256         pg = start_pg;
3257         ftrace_number_of_groups--;
3258     }
3259     pr_info("ftrace: FAILED to allocate memory for functions\n");
3260     return NULL;
3261 }
3262 
3263 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3264 
3265 struct ftrace_iterator {
3266     loff_t              pos;
3267     loff_t              func_pos;
3268     loff_t              mod_pos;
3269     struct ftrace_page      *pg;
3270     struct dyn_ftrace       *func;
3271     struct ftrace_func_probe    *probe;
3272     struct ftrace_func_entry    *probe_entry;
3273     struct trace_parser     parser;
3274     struct ftrace_hash      *hash;
3275     struct ftrace_ops       *ops;
3276     struct trace_array      *tr;
3277     struct list_head        *mod_list;
3278     int             pidx;
3279     int             idx;
3280     unsigned            flags;
3281 };
3282 
3283 static void *
3284 t_probe_next(struct seq_file *m, loff_t *pos)
3285 {
3286     struct ftrace_iterator *iter = m->private;
3287     struct trace_array *tr = iter->ops->private;
3288     struct list_head *func_probes;
3289     struct ftrace_hash *hash;
3290     struct list_head *next;
3291     struct hlist_node *hnd = NULL;
3292     struct hlist_head *hhd;
3293     int size;
3294 
3295     (*pos)++;
3296     iter->pos = *pos;
3297 
3298     if (!tr)
3299         return NULL;
3300 
3301     func_probes = &tr->func_probes;
3302     if (list_empty(func_probes))
3303         return NULL;
3304 
3305     if (!iter->probe) {
3306         next = func_probes->next;
3307         iter->probe = list_entry(next, struct ftrace_func_probe, list);
3308     }
3309 
3310     if (iter->probe_entry)
3311         hnd = &iter->probe_entry->hlist;
3312 
3313     hash = iter->probe->ops.func_hash->filter_hash;
3314 
3315     /*
3316      * A probe being registered may temporarily have an empty hash
3317      * and it's at the end of the func_probes list.
3318      */
3319     if (!hash || hash == EMPTY_HASH)
3320         return NULL;
3321 
3322     size = 1 << hash->size_bits;
3323 
3324  retry:
3325     if (iter->pidx >= size) {
3326         if (iter->probe->list.next == func_probes)
3327             return NULL;
3328         next = iter->probe->list.next;
3329         iter->probe = list_entry(next, struct ftrace_func_probe, list);
3330         hash = iter->probe->ops.func_hash->filter_hash;
3331         size = 1 << hash->size_bits;
3332         iter->pidx = 0;
3333     }
3334 
3335     hhd = &hash->buckets[iter->pidx];
3336 
3337     if (hlist_empty(hhd)) {
3338         iter->pidx++;
3339         hnd = NULL;
3340         goto retry;
3341     }
3342 
3343     if (!hnd)
3344         hnd = hhd->first;
3345     else {
3346         hnd = hnd->next;
3347         if (!hnd) {
3348             iter->pidx++;
3349             goto retry;
3350         }
3351     }
3352 
3353     if (WARN_ON_ONCE(!hnd))
3354         return NULL;
3355 
3356     iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3357 
3358     return iter;
3359 }
3360 
3361 static void *t_probe_start(struct seq_file *m, loff_t *pos)
3362 {
3363     struct ftrace_iterator *iter = m->private;
3364     void *p = NULL;
3365     loff_t l;
3366 
3367     if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3368         return NULL;
3369 
3370     if (iter->mod_pos > *pos)
3371         return NULL;
3372 
3373     iter->probe = NULL;
3374     iter->probe_entry = NULL;
3375     iter->pidx = 0;
3376     for (l = 0; l <= (*pos - iter->mod_pos); ) {
3377         p = t_probe_next(m, &l);
3378         if (!p)
3379             break;
3380     }
3381     if (!p)
3382         return NULL;
3383 
3384     /* Only set this if we have an item */
3385     iter->flags |= FTRACE_ITER_PROBE;
3386 
3387     return iter;
3388 }
3389 
3390 static int
3391 t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3392 {
3393     struct ftrace_func_entry *probe_entry;
3394     struct ftrace_probe_ops *probe_ops;
3395     struct ftrace_func_probe *probe;
3396 
3397     probe = iter->probe;
3398     probe_entry = iter->probe_entry;
3399 
3400     if (WARN_ON_ONCE(!probe || !probe_entry))
3401         return -EIO;
3402 
3403     probe_ops = probe->probe_ops;
3404 
3405     if (probe_ops->print)
3406         return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
3407 
3408     seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3409            (void *)probe_ops->func);
3410 
3411     return 0;
3412 }
3413 
3414 static void *
3415 t_mod_next(struct seq_file *m, loff_t *pos)
3416 {
3417     struct ftrace_iterator *iter = m->private;
3418     struct trace_array *tr = iter->tr;
3419 
3420     (*pos)++;
3421     iter->pos = *pos;
3422 
3423     iter->mod_list = iter->mod_list->next;
3424 
3425     if (iter->mod_list == &tr->mod_trace ||
3426         iter->mod_list == &tr->mod_notrace) {
3427         iter->flags &= ~FTRACE_ITER_MOD;
3428         return NULL;
3429     }
3430 
3431     iter->mod_pos = *pos;
3432 
3433     return iter;
3434 }
3435 
3436 static void *t_mod_start(struct seq_file *m, loff_t *pos)
3437 {
3438     struct ftrace_iterator *iter = m->private;
3439     void *p = NULL;
3440     loff_t l;
3441 
3442     if (iter->func_pos > *pos)
3443         return NULL;
3444 
3445     iter->mod_pos = iter->func_pos;
3446 
3447     /* probes are only available if tr is set */
3448     if (!iter->tr)
3449         return NULL;
3450 
3451     for (l = 0; l <= (*pos - iter->func_pos); ) {
3452         p = t_mod_next(m, &l);
3453         if (!p)
3454             break;
3455     }
3456     if (!p) {
3457         iter->flags &= ~FTRACE_ITER_MOD;
3458         return t_probe_start(m, pos);
3459     }
3460 
3461     /* Only set this if we have an item */
3462     iter->flags |= FTRACE_ITER_MOD;
3463 
3464     return iter;
3465 }
3466 
3467 static int
3468 t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
3469 {
3470     struct ftrace_mod_load *ftrace_mod;
3471     struct trace_array *tr = iter->tr;
3472 
3473     if (WARN_ON_ONCE(!iter->mod_list) ||
3474              iter->mod_list == &tr->mod_trace ||
3475              iter->mod_list == &tr->mod_notrace)
3476         return -EIO;
3477 
3478     ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
3479 
3480     if (ftrace_mod->func)
3481         seq_printf(m, "%s", ftrace_mod->func);
3482     else
3483         seq_putc(m, '*');
3484 
3485     seq_printf(m, ":mod:%s\n", ftrace_mod->module);
3486 
3487     return 0;
3488 }
3489 
3490 static void *
3491 t_func_next(struct seq_file *m, loff_t *pos)
3492 {
3493     struct ftrace_iterator *iter = m->private;
3494     struct dyn_ftrace *rec = NULL;
3495 
3496     (*pos)++;
3497 
3498  retry:
3499     if (iter->idx >= iter->pg->index) {
3500         if (iter->pg->next) {
3501             iter->pg = iter->pg->next;
3502             iter->idx = 0;
3503             goto retry;
3504         }
3505     } else {
3506         rec = &iter->pg->records[iter->idx++];
3507         if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3508              !ftrace_lookup_ip(iter->hash, rec->ip)) ||
3509 
3510             ((iter->flags & FTRACE_ITER_ENABLED) &&
3511              !(rec->flags & FTRACE_FL_ENABLED))) {
3512 
3513             rec = NULL;
3514             goto retry;
3515         }
3516     }
3517 
3518     if (!rec)
3519         return NULL;
3520 
3521     iter->pos = iter->func_pos = *pos;
3522     iter->func = rec;
3523 
3524     return iter;
3525 }
3526 
3527 static void *
3528 t_next(struct seq_file *m, void *v, loff_t *pos)
3529 {
3530     struct ftrace_iterator *iter = m->private;
3531     loff_t l = *pos; /* t_probe_start() must use original pos */
3532     void *ret;
3533 
3534     if (unlikely(ftrace_disabled))
3535         return NULL;
3536 
3537     if (iter->flags & FTRACE_ITER_PROBE)
3538         return t_probe_next(m, pos);
3539 
3540     if (iter->flags & FTRACE_ITER_MOD)
3541         return t_mod_next(m, pos);
3542 
3543     if (iter->flags & FTRACE_ITER_PRINTALL) {
3544         /* next must increment pos, and t_probe_start does not */
3545         (*pos)++;
3546         return t_mod_start(m, &l);
3547     }
3548 
3549     ret = t_func_next(m, pos);
3550 
3551     if (!ret)
3552         return t_mod_start(m, &l);
3553 
3554     return ret;
3555 }
3556 
3557 static void reset_iter_read(struct ftrace_iterator *iter)
3558 {
3559     iter->pos = 0;
3560     iter->func_pos = 0;
3561     iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
3562 }
3563 
3564 static void *t_start(struct seq_file *m, loff_t *pos)
3565 {
3566     struct ftrace_iterator *iter = m->private;
3567     void *p = NULL;
3568     loff_t l;
3569 
3570     mutex_lock(&ftrace_lock);
3571 
3572     if (unlikely(ftrace_disabled))
3573         return NULL;
3574 
3575     /*
3576      * If an lseek was done, then reset and start from beginning.
3577      */
3578     if (*pos < iter->pos)
3579         reset_iter_read(iter);
3580 
3581     /*
3582      * For set_ftrace_filter reading, if we have the filter
3583      * off, we can short cut and just print out that all
3584      * functions are enabled.
3585      */
3586     if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3587         ftrace_hash_empty(iter->hash)) {
3588         iter->func_pos = 1; /* Account for the message */
3589         if (*pos > 0)
3590             return t_mod_start(m, pos);
3591         iter->flags |= FTRACE_ITER_PRINTALL;
3592         /* reset in case of seek/pread */
3593         iter->flags &= ~FTRACE_ITER_PROBE;
3594         return iter;
3595     }
3596 
3597     if (iter->flags & FTRACE_ITER_MOD)
3598         return t_mod_start(m, pos);
3599 
3600     /*
3601      * Unfortunately, we need to restart at ftrace_pages_start
3602      * every time we let go of the ftrace_mutex. This is because
3603      * those pointers can change without the lock.
3604      */
3605     iter->pg = ftrace_pages_start;
3606     iter->idx = 0;
3607     for (l = 0; l <= *pos; ) {
3608         p = t_func_next(m, &l);
3609         if (!p)
3610             break;
3611     }
3612 
3613     if (!p)
3614         return t_mod_start(m, pos);
3615 
3616     return iter;
3617 }
3618 
3619 static void t_stop(struct seq_file *m, void *p)
3620 {
3621     mutex_unlock(&ftrace_lock);
3622 }
3623 
3624 void * __weak
3625 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3626 {
3627     return NULL;
3628 }
3629 
3630 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3631                 struct dyn_ftrace *rec)
3632 {
3633     void *ptr;
3634 
3635     ptr = arch_ftrace_trampoline_func(ops, rec);
3636     if (ptr)
3637         seq_printf(m, " ->%pS", ptr);
3638 }
3639 
3640 #ifdef FTRACE_MCOUNT_MAX_OFFSET
3641 /*
3642  * Weak functions can still have an mcount/fentry that is saved in
3643  * the __mcount_loc section. These can be detected by having a
3644  * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the
3645  * symbol found by kallsyms is not the function that the mcount/fentry
3646  * is part of. The offset is much greater in these cases.
3647  *
3648  * Test the record to make sure that the ip points to a valid kallsyms
3649  * and if not, mark it disabled.
3650  */
3651 static int test_for_valid_rec(struct dyn_ftrace *rec)
3652 {
3653     char str[KSYM_SYMBOL_LEN];
3654     unsigned long offset;
3655     const char *ret;
3656 
3657     ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str);
3658 
3659     /* Weak functions can cause invalid addresses */
3660     if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
3661         rec->flags |= FTRACE_FL_DISABLED;
3662         return 0;
3663     }
3664     return 1;
3665 }
3666 
3667 static struct workqueue_struct *ftrace_check_wq __initdata;
3668 static struct work_struct ftrace_check_work __initdata;
3669 
3670 /*
3671  * Scan all the mcount/fentry entries to make sure they are valid.
3672  */
3673 static __init void ftrace_check_work_func(struct work_struct *work)
3674 {
3675     struct ftrace_page *pg;
3676     struct dyn_ftrace *rec;
3677 
3678     mutex_lock(&ftrace_lock);
3679     do_for_each_ftrace_rec(pg, rec) {
3680         test_for_valid_rec(rec);
3681     } while_for_each_ftrace_rec();
3682     mutex_unlock(&ftrace_lock);
3683 }
3684 
3685 static int __init ftrace_check_for_weak_functions(void)
3686 {
3687     INIT_WORK(&ftrace_check_work, ftrace_check_work_func);
3688 
3689     ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0);
3690 
3691     queue_work(ftrace_check_wq, &ftrace_check_work);
3692     return 0;
3693 }
3694 
3695 static int __init ftrace_check_sync(void)
3696 {
3697     /* Make sure the ftrace_check updates are finished */
3698     if (ftrace_check_wq)
3699         destroy_workqueue(ftrace_check_wq);
3700     return 0;
3701 }
3702 
3703 late_initcall_sync(ftrace_check_sync);
3704 subsys_initcall(ftrace_check_for_weak_functions);
3705 
3706 static int print_rec(struct seq_file *m, unsigned long ip)
3707 {
3708     unsigned long offset;
3709     char str[KSYM_SYMBOL_LEN];
3710     char *modname;
3711     const char *ret;
3712 
3713     ret = kallsyms_lookup(ip, NULL, &offset, &modname, str);
3714     /* Weak functions can cause invalid addresses */
3715     if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
3716         snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld",
3717              FTRACE_INVALID_FUNCTION, offset);
3718         ret = NULL;
3719     }
3720 
3721     seq_puts(m, str);
3722     if (modname)
3723         seq_printf(m, " [%s]", modname);
3724     return ret == NULL ? -1 : 0;
3725 }
3726 #else
3727 static inline int test_for_valid_rec(struct dyn_ftrace *rec)
3728 {
3729     return 1;
3730 }
3731 
3732 static inline int print_rec(struct seq_file *m, unsigned long ip)
3733 {
3734     seq_printf(m, "%ps", (void *)ip);
3735     return 0;
3736 }
3737 #endif
3738 
3739 static int t_show(struct seq_file *m, void *v)
3740 {
3741     struct ftrace_iterator *iter = m->private;
3742     struct dyn_ftrace *rec;
3743 
3744     if (iter->flags & FTRACE_ITER_PROBE)
3745         return t_probe_show(m, iter);
3746 
3747     if (iter->flags & FTRACE_ITER_MOD)
3748         return t_mod_show(m, iter);
3749 
3750     if (iter->flags & FTRACE_ITER_PRINTALL) {
3751         if (iter->flags & FTRACE_ITER_NOTRACE)
3752             seq_puts(m, "#### no functions disabled ####\n");
3753         else
3754             seq_puts(m, "#### all functions enabled ####\n");
3755         return 0;
3756     }
3757 
3758     rec = iter->func;
3759 
3760     if (!rec)
3761         return 0;
3762 
3763     if (print_rec(m, rec->ip)) {
3764         /* This should only happen when a rec is disabled */
3765         WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED));
3766         seq_putc(m, '\n');
3767         return 0;
3768     }
3769 
3770     if (iter->flags & FTRACE_ITER_ENABLED) {
3771         struct ftrace_ops *ops;
3772 
3773         seq_printf(m, " (%ld)%s%s%s",
3774                ftrace_rec_count(rec),
3775                rec->flags & FTRACE_FL_REGS ? " R" : "  ",
3776                rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ",
3777                rec->flags & FTRACE_FL_DIRECT ? " D" : "  ");
3778         if (rec->flags & FTRACE_FL_TRAMP_EN) {
3779             ops = ftrace_find_tramp_ops_any(rec);
3780             if (ops) {
3781                 do {
3782                     seq_printf(m, "\ttramp: %pS (%pS)",
3783                            (void *)ops->trampoline,
3784                            (void *)ops->func);
3785                     add_trampoline_func(m, ops, rec);
3786                     ops = ftrace_find_tramp_ops_next(rec, ops);
3787                 } while (ops);
3788             } else
3789                 seq_puts(m, "\ttramp: ERROR!");
3790         } else {
3791             add_trampoline_func(m, NULL, rec);
3792         }
3793         if (rec->flags & FTRACE_FL_DIRECT) {
3794             unsigned long direct;
3795 
3796             direct = ftrace_find_rec_direct(rec->ip);
3797             if (direct)
3798                 seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
3799         }
3800     }
3801 
3802     seq_putc(m, '\n');
3803 
3804     return 0;
3805 }
3806 
3807 static const struct seq_operations show_ftrace_seq_ops = {
3808     .start = t_start,
3809     .next = t_next,
3810     .stop = t_stop,
3811     .show = t_show,
3812 };
3813 
3814 static int
3815 ftrace_avail_open(struct inode *inode, struct file *file)
3816 {
3817     struct ftrace_iterator *iter;
3818     int ret;
3819 
3820     ret = security_locked_down(LOCKDOWN_TRACEFS);
3821     if (ret)
3822         return ret;
3823 
3824     if (unlikely(ftrace_disabled))
3825         return -ENODEV;
3826 
3827     iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3828     if (!iter)
3829         return -ENOMEM;
3830 
3831     iter->pg = ftrace_pages_start;
3832     iter->ops = &global_ops;
3833 
3834     return 0;
3835 }
3836 
3837 static int
3838 ftrace_enabled_open(struct inode *inode, struct file *file)
3839 {
3840     struct ftrace_iterator *iter;
3841 
3842     /*
3843      * This shows us what functions are currently being
3844      * traced and by what. Not sure if we want lockdown
3845      * to hide such critical information for an admin.
3846      * Although, perhaps it can show information we don't
3847      * want people to see, but if something is tracing
3848      * something, we probably want to know about it.
3849      */
3850 
3851     iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3852     if (!iter)
3853         return -ENOMEM;
3854 
3855     iter->pg = ftrace_pages_start;
3856     iter->flags = FTRACE_ITER_ENABLED;
3857     iter->ops = &global_ops;
3858 
3859     return 0;
3860 }
3861 
3862 /**
3863  * ftrace_regex_open - initialize function tracer filter files
3864  * @ops: The ftrace_ops that hold the hash filters
3865  * @flag: The type of filter to process
3866  * @inode: The inode, usually passed in to your open routine
3867  * @file: The file, usually passed in to your open routine
3868  *
3869  * ftrace_regex_open() initializes the filter files for the
3870  * @ops. Depending on @flag it may process the filter hash or
3871  * the notrace hash of @ops. With this called from the open
3872  * routine, you can use ftrace_filter_write() for the write
3873  * routine if @flag has FTRACE_ITER_FILTER set, or
3874  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3875  * tracing_lseek() should be used as the lseek routine, and
3876  * release must call ftrace_regex_release().
3877  */
3878 int
3879 ftrace_regex_open(struct ftrace_ops *ops, int flag,
3880           struct inode *inode, struct file *file)
3881 {
3882     struct ftrace_iterator *iter;
3883     struct ftrace_hash *hash;
3884     struct list_head *mod_head;
3885     struct trace_array *tr = ops->private;
3886     int ret = -ENOMEM;
3887 
3888     ftrace_ops_init(ops);
3889 
3890     if (unlikely(ftrace_disabled))
3891         return -ENODEV;
3892 
3893     if (tracing_check_open_get_tr(tr))
3894         return -ENODEV;
3895 
3896     iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3897     if (!iter)
3898         goto out;
3899 
3900     if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
3901         goto out;
3902 
3903     iter->ops = ops;
3904     iter->flags = flag;
3905     iter->tr = tr;
3906 
3907     mutex_lock(&ops->func_hash->regex_lock);
3908 
3909     if (flag & FTRACE_ITER_NOTRACE) {
3910         hash = ops->func_hash->notrace_hash;
3911         mod_head = tr ? &tr->mod_notrace : NULL;
3912     } else {
3913         hash = ops->func_hash->filter_hash;
3914         mod_head = tr ? &tr->mod_trace : NULL;
3915     }
3916 
3917     iter->mod_list = mod_head;
3918 
3919     if (file->f_mode & FMODE_WRITE) {
3920         const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3921 
3922         if (file->f_flags & O_TRUNC) {
3923             iter->hash = alloc_ftrace_hash(size_bits);
3924             clear_ftrace_mod_list(mod_head);
3925             } else {
3926             iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3927         }
3928 
3929         if (!iter->hash) {
3930             trace_parser_put(&iter->parser);
3931             goto out_unlock;
3932         }
3933     } else
3934         iter->hash = hash;
3935 
3936     ret = 0;
3937 
3938     if (file->f_mode & FMODE_READ) {
3939         iter->pg = ftrace_pages_start;
3940 
3941         ret = seq_open(file, &show_ftrace_seq_ops);
3942         if (!ret) {
3943             struct seq_file *m = file->private_data;
3944             m->private = iter;
3945         } else {
3946             /* Failed */
3947             free_ftrace_hash(iter->hash);
3948             trace_parser_put(&iter->parser);
3949         }
3950     } else
3951         file->private_data = iter;
3952 
3953  out_unlock:
3954     mutex_unlock(&ops->func_hash->regex_lock);
3955 
3956  out:
3957     if (ret) {
3958         kfree(iter);
3959         if (tr)
3960             trace_array_put(tr);
3961     }
3962 
3963     return ret;
3964 }
3965 
3966 static int
3967 ftrace_filter_open(struct inode *inode, struct file *file)
3968 {
3969     struct ftrace_ops *ops = inode->i_private;
3970 
3971     /* Checks for tracefs lockdown */
3972     return ftrace_regex_open(ops,
3973             FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
3974             inode, file);
3975 }
3976 
3977 static int
3978 ftrace_notrace_open(struct inode *inode, struct file *file)
3979 {
3980     struct ftrace_ops *ops = inode->i_private;
3981 
3982     /* Checks for tracefs lockdown */
3983     return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3984                  inode, file);
3985 }
3986 
3987 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3988 struct ftrace_glob {
3989     char *search;
3990     unsigned len;
3991     int type;
3992 };
3993 
3994 /*
3995  * If symbols in an architecture don't correspond exactly to the user-visible
3996  * name of what they represent, it is possible to define this function to
3997  * perform the necessary adjustments.
3998 */
3999 char * __weak arch_ftrace_match_adjust(char *str, const char *search)
4000 {
4001     return str;
4002 }
4003 
4004 static int ftrace_match(char *str, struct ftrace_glob *g)
4005 {
4006     int matched = 0;
4007     int slen;
4008 
4009     str = arch_ftrace_match_adjust(str, g->search);
4010 
4011     switch (g->type) {
4012     case MATCH_FULL:
4013         if (strcmp(str, g->search) == 0)
4014             matched = 1;
4015         break;
4016     case MATCH_FRONT_ONLY:
4017         if (strncmp(str, g->search, g->len) == 0)
4018             matched = 1;
4019         break;
4020     case MATCH_MIDDLE_ONLY:
4021         if (strstr(str, g->search))
4022             matched = 1;
4023         break;
4024     case MATCH_END_ONLY:
4025         slen = strlen(str);
4026         if (slen >= g->len &&
4027             memcmp(str + slen - g->len, g->search, g->len) == 0)
4028             matched = 1;
4029         break;
4030     case MATCH_GLOB:
4031         if (glob_match(g->search, str))
4032             matched = 1;
4033         break;
4034     }
4035 
4036     return matched;
4037 }
4038 
4039 static int
4040 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
4041 {
4042     struct ftrace_func_entry *entry;
4043     int ret = 0;
4044 
4045     entry = ftrace_lookup_ip(hash, rec->ip);
4046     if (clear_filter) {
4047         /* Do nothing if it doesn't exist */
4048         if (!entry)
4049             return 0;
4050 
4051         free_hash_entry(hash, entry);
4052     } else {
4053         /* Do nothing if it exists */
4054         if (entry)
4055             return 0;
4056 
4057         ret = add_hash_entry(hash, rec->ip);
4058     }
4059     return ret;
4060 }
4061 
4062 static int
4063 add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
4064          int clear_filter)
4065 {
4066     long index = simple_strtoul(func_g->search, NULL, 0);
4067     struct ftrace_page *pg;
4068     struct dyn_ftrace *rec;
4069 
4070     /* The index starts at 1 */
4071     if (--index < 0)
4072         return 0;
4073 
4074     do_for_each_ftrace_rec(pg, rec) {
4075         if (pg->index <= index) {
4076             index -= pg->index;
4077             /* this is a double loop, break goes to the next page */
4078             break;
4079         }
4080         rec = &pg->records[index];
4081         enter_record(hash, rec, clear_filter);
4082         return 1;
4083     } while_for_each_ftrace_rec();
4084     return 0;
4085 }
4086 
4087 #ifdef FTRACE_MCOUNT_MAX_OFFSET
4088 static int lookup_ip(unsigned long ip, char **modname, char *str)
4089 {
4090     unsigned long offset;
4091 
4092     kallsyms_lookup(ip, NULL, &offset, modname, str);
4093     if (offset > FTRACE_MCOUNT_MAX_OFFSET)
4094         return -1;
4095     return 0;
4096 }
4097 #else
4098 static int lookup_ip(unsigned long ip, char **modname, char *str)
4099 {
4100     kallsyms_lookup(ip, NULL, NULL, modname, str);
4101     return 0;
4102 }
4103 #endif
4104 
4105 static int
4106 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
4107         struct ftrace_glob *mod_g, int exclude_mod)
4108 {
4109     char str[KSYM_SYMBOL_LEN];
4110     char *modname;
4111 
4112     if (lookup_ip(rec->ip, &modname, str)) {
4113         /* This should only happen when a rec is disabled */
4114         WARN_ON_ONCE(system_state == SYSTEM_RUNNING &&
4115                  !(rec->flags & FTRACE_FL_DISABLED));
4116         return 0;
4117     }
4118 
4119     if (mod_g) {
4120         int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
4121 
4122         /* blank module name to match all modules */
4123         if (!mod_g->len) {
4124             /* blank module globbing: modname xor exclude_mod */
4125             if (!exclude_mod != !modname)
4126                 goto func_match;
4127             return 0;
4128         }
4129 
4130         /*
4131          * exclude_mod is set to trace everything but the given
4132          * module. If it is set and the module matches, then
4133          * return 0. If it is not set, and the module doesn't match
4134          * also return 0. Otherwise, check the function to see if
4135          * that matches.
4136          */
4137         if (!mod_matches == !exclude_mod)
4138             return 0;
4139 func_match:
4140         /* blank search means to match all funcs in the mod */
4141         if (!func_g->len)
4142             return 1;
4143     }
4144 
4145     return ftrace_match(str, func_g);
4146 }
4147 
4148 static int
4149 match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
4150 {
4151     struct ftrace_page *pg;
4152     struct dyn_ftrace *rec;
4153     struct ftrace_glob func_g = { .type = MATCH_FULL };
4154     struct ftrace_glob mod_g = { .type = MATCH_FULL };
4155     struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
4156     int exclude_mod = 0;
4157     int found = 0;
4158     int ret;
4159     int clear_filter = 0;
4160 
4161     if (func) {
4162         func_g.type = filter_parse_regex(func, len, &func_g.search,
4163                          &clear_filter);
4164         func_g.len = strlen(func_g.search);
4165     }
4166 
4167     if (mod) {
4168         mod_g.type = filter_parse_regex(mod, strlen(mod),
4169                 &mod_g.search, &exclude_mod);
4170         mod_g.len = strlen(mod_g.search);
4171     }
4172 
4173     mutex_lock(&ftrace_lock);
4174 
4175     if (unlikely(ftrace_disabled))
4176         goto out_unlock;
4177 
4178     if (func_g.type == MATCH_INDEX) {
4179         found = add_rec_by_index(hash, &func_g, clear_filter);
4180         goto out_unlock;
4181     }
4182 
4183     do_for_each_ftrace_rec(pg, rec) {
4184 
4185         if (rec->flags & FTRACE_FL_DISABLED)
4186             continue;
4187 
4188         if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
4189             ret = enter_record(hash, rec, clear_filter);
4190             if (ret < 0) {
4191                 found = ret;
4192                 goto out_unlock;
4193             }
4194             found = 1;
4195         }
4196     } while_for_each_ftrace_rec();
4197  out_unlock:
4198     mutex_unlock(&ftrace_lock);
4199 
4200     return found;
4201 }
4202 
4203 static int
4204 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
4205 {
4206     return match_records(hash, buff, len, NULL);
4207 }
4208 
4209 static void ftrace_ops_update_code(struct ftrace_ops *ops,
4210                    struct ftrace_ops_hash *old_hash)
4211 {
4212     struct ftrace_ops *op;
4213 
4214     if (!ftrace_enabled)
4215         return;
4216 
4217     if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4218         ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4219         return;
4220     }
4221 
4222     /*
4223      * If this is the shared global_ops filter, then we need to
4224      * check if there is another ops that shares it, is enabled.
4225      * If so, we still need to run the modify code.
4226      */
4227     if (ops->func_hash != &global_ops.local_hash)
4228         return;
4229 
4230     do_for_each_ftrace_op(op, ftrace_ops_list) {
4231         if (op->func_hash == &global_ops.local_hash &&
4232             op->flags & FTRACE_OPS_FL_ENABLED) {
4233             ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4234             /* Only need to do this once */
4235             return;
4236         }
4237     } while_for_each_ftrace_op(op);
4238 }
4239 
4240 static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
4241                        struct ftrace_hash **orig_hash,
4242                        struct ftrace_hash *hash,
4243                        int enable)
4244 {
4245     struct ftrace_ops_hash old_hash_ops;
4246     struct ftrace_hash *old_hash;
4247     int ret;
4248 
4249     old_hash = *orig_hash;
4250     old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4251     old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4252     ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4253     if (!ret) {
4254         ftrace_ops_update_code(ops, &old_hash_ops);
4255         free_ftrace_hash_rcu(old_hash);
4256     }
4257     return ret;
4258 }
4259 
4260 static bool module_exists(const char *module)
4261 {
4262     /* All modules have the symbol __this_module */
4263     static const char this_mod[] = "__this_module";
4264     char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
4265     unsigned long val;
4266     int n;
4267 
4268     n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
4269 
4270     if (n > sizeof(modname) - 1)
4271         return false;
4272 
4273     val = module_kallsyms_lookup_name(modname);
4274     return val != 0;
4275 }
4276 
4277 static int cache_mod(struct trace_array *tr,
4278              const char *func, char *module, int enable)
4279 {
4280     struct ftrace_mod_load *ftrace_mod, *n;
4281     struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
4282     int ret;
4283 
4284     mutex_lock(&ftrace_lock);
4285 
4286     /* We do not cache inverse filters */
4287     if (func[0] == '!') {
4288         func++;
4289         ret = -EINVAL;
4290 
4291         /* Look to remove this hash */
4292         list_for_each_entry_safe(ftrace_mod, n, head, list) {
4293             if (strcmp(ftrace_mod->module, module) != 0)
4294                 continue;
4295 
4296             /* no func matches all */
4297             if (strcmp(func, "*") == 0 ||
4298                 (ftrace_mod->func &&
4299                  strcmp(ftrace_mod->func, func) == 0)) {
4300                 ret = 0;
4301                 free_ftrace_mod(ftrace_mod);
4302                 continue;
4303             }
4304         }
4305         goto out;
4306     }
4307 
4308     ret = -EINVAL;
4309     /* We only care about modules that have not been loaded yet */
4310     if (module_exists(module))
4311         goto out;
4312 
4313     /* Save this string off, and execute it when the module is loaded */
4314     ret = ftrace_add_mod(tr, func, module, enable);
4315  out:
4316     mutex_unlock(&ftrace_lock);
4317 
4318     return ret;
4319 }
4320 
4321 static int
4322 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4323          int reset, int enable);
4324 
4325 #ifdef CONFIG_MODULES
4326 static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
4327                  char *mod, bool enable)
4328 {
4329     struct ftrace_mod_load *ftrace_mod, *n;
4330     struct ftrace_hash **orig_hash, *new_hash;
4331     LIST_HEAD(process_mods);
4332     char *func;
4333 
4334     mutex_lock(&ops->func_hash->regex_lock);
4335 
4336     if (enable)
4337         orig_hash = &ops->func_hash->filter_hash;
4338     else
4339         orig_hash = &ops->func_hash->notrace_hash;
4340 
4341     new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
4342                           *orig_hash);
4343     if (!new_hash)
4344         goto out; /* warn? */
4345 
4346     mutex_lock(&ftrace_lock);
4347 
4348     list_for_each_entry_safe(ftrace_mod, n, head, list) {
4349 
4350         if (strcmp(ftrace_mod->module, mod) != 0)
4351             continue;
4352 
4353         if (ftrace_mod->func)
4354             func = kstrdup(ftrace_mod->func, GFP_KERNEL);
4355         else
4356             func = kstrdup("*", GFP_KERNEL);
4357 
4358         if (!func) /* warn? */
4359             continue;
4360 
4361         list_move(&ftrace_mod->list, &process_mods);
4362 
4363         /* Use the newly allocated func, as it may be "*" */
4364         kfree(ftrace_mod->func);
4365         ftrace_mod->func = func;
4366     }
4367 
4368     mutex_unlock(&ftrace_lock);
4369 
4370     list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
4371 
4372         func = ftrace_mod->func;
4373 
4374         /* Grabs ftrace_lock, which is why we have this extra step */
4375         match_records(new_hash, func, strlen(func), mod);
4376         free_ftrace_mod(ftrace_mod);
4377     }
4378 
4379     if (enable && list_empty(head))
4380         new_hash->flags &= ~FTRACE_HASH_FL_MOD;
4381 
4382     mutex_lock(&ftrace_lock);
4383 
4384     ftrace_hash_move_and_update_ops(ops, orig_hash,
4385                           new_hash, enable);
4386     mutex_unlock(&ftrace_lock);
4387 
4388  out:
4389     mutex_unlock(&ops->func_hash->regex_lock);
4390 
4391     free_ftrace_hash(new_hash);
4392 }
4393 
4394 static void process_cached_mods(const char *mod_name)
4395 {
4396     struct trace_array *tr;
4397     char *mod;
4398 
4399     mod = kstrdup(mod_name, GFP_KERNEL);
4400     if (!mod)
4401         return;
4402 
4403     mutex_lock(&trace_types_lock);
4404     list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4405         if (!list_empty(&tr->mod_trace))
4406             process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4407         if (!list_empty(&tr->mod_notrace))
4408             process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4409     }
4410     mutex_unlock(&trace_types_lock);
4411 
4412     kfree(mod);
4413 }
4414 #endif
4415 
4416 /*
4417  * We register the module command as a template to show others how
4418  * to register the a command as well.
4419  */
4420 
4421 static int
4422 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
4423             char *func_orig, char *cmd, char *module, int enable)
4424 {
4425     char *func;
4426     int ret;
4427 
4428     /* match_records() modifies func, and we need the original */
4429     func = kstrdup(func_orig, GFP_KERNEL);
4430     if (!func)
4431         return -ENOMEM;
4432 
4433     /*
4434      * cmd == 'mod' because we only registered this func
4435      * for the 'mod' ftrace_func_command.
4436      * But if you register one func with multiple commands,
4437      * you can tell which command was used by the cmd
4438      * parameter.
4439      */
4440     ret = match_records(hash, func, strlen(func), module);
4441     kfree(func);
4442 
4443     if (!ret)
4444         return cache_mod(tr, func_orig, module, enable);
4445     if (ret < 0)
4446         return ret;
4447     return 0;
4448 }
4449 
4450 static struct ftrace_func_command ftrace_mod_cmd = {
4451     .name           = "mod",
4452     .func           = ftrace_mod_callback,
4453 };
4454 
4455 static int __init ftrace_mod_cmd_init(void)
4456 {
4457     return register_ftrace_command(&ftrace_mod_cmd);
4458 }
4459 core_initcall(ftrace_mod_cmd_init);
4460 
4461 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
4462                       struct ftrace_ops *op, struct ftrace_regs *fregs)
4463 {
4464     struct ftrace_probe_ops *probe_ops;
4465     struct ftrace_func_probe *probe;
4466 
4467     probe = container_of(op, struct ftrace_func_probe, ops);
4468     probe_ops = probe->probe_ops;
4469 
4470     /*
4471      * Disable preemption for these calls to prevent a RCU grace
4472      * period. This syncs the hash iteration and freeing of items
4473      * on the hash. rcu_read_lock is too dangerous here.
4474      */
4475     preempt_disable_notrace();
4476     probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
4477     preempt_enable_notrace();
4478 }
4479 
4480 struct ftrace_func_map {
4481     struct ftrace_func_entry    entry;
4482     void                *data;
4483 };
4484 
4485 struct ftrace_func_mapper {
4486     struct ftrace_hash      hash;
4487 };
4488 
4489 /**
4490  * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
4491  *
4492  * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
4493  */
4494 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
4495 {
4496     struct ftrace_hash *hash;
4497 
4498     /*
4499      * The mapper is simply a ftrace_hash, but since the entries
4500      * in the hash are not ftrace_func_entry type, we define it
4501      * as a separate structure.
4502      */
4503     hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4504     return (struct ftrace_func_mapper *)hash;
4505 }
4506 
4507 /**
4508  * ftrace_func_mapper_find_ip - Find some data mapped to an ip
4509  * @mapper: The mapper that has the ip maps
4510  * @ip: the instruction pointer to find the data for
4511  *
4512  * Returns the data mapped to @ip if found otherwise NULL. The return
4513  * is actually the address of the mapper data pointer. The address is
4514  * returned for use cases where the data is no bigger than a long, and
4515  * the user can use the data pointer as its data instead of having to
4516  * allocate more memory for the reference.
4517  */
4518 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
4519                   unsigned long ip)
4520 {
4521     struct ftrace_func_entry *entry;
4522     struct ftrace_func_map *map;
4523 
4524     entry = ftrace_lookup_ip(&mapper->hash, ip);
4525     if (!entry)
4526         return NULL;
4527 
4528     map = (struct ftrace_func_map *)entry;
4529     return &map->data;
4530 }
4531 
4532 /**
4533  * ftrace_func_mapper_add_ip - Map some data to an ip
4534  * @mapper: The mapper that has the ip maps
4535  * @ip: The instruction pointer address to map @data to
4536  * @data: The data to map to @ip
4537  *
4538  * Returns 0 on success otherwise an error.
4539  */
4540 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
4541                   unsigned long ip, void *data)
4542 {
4543     struct ftrace_func_entry *entry;
4544     struct ftrace_func_map *map;
4545 
4546     entry = ftrace_lookup_ip(&mapper->hash, ip);
4547     if (entry)
4548         return -EBUSY;
4549 
4550     map = kmalloc(sizeof(*map), GFP_KERNEL);
4551     if (!map)
4552         return -ENOMEM;
4553 
4554     map->entry.ip = ip;
4555     map->data = data;
4556 
4557     __add_hash_entry(&mapper->hash, &map->entry);
4558 
4559     return 0;
4560 }
4561 
4562 /**
4563  * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
4564  * @mapper: The mapper that has the ip maps
4565  * @ip: The instruction pointer address to remove the data from
4566  *
4567  * Returns the data if it is found, otherwise NULL.
4568  * Note, if the data pointer is used as the data itself, (see
4569  * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
4570  * if the data pointer was set to zero.
4571  */
4572 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
4573                    unsigned long ip)
4574 {
4575     struct ftrace_func_entry *entry;
4576     struct ftrace_func_map *map;
4577     void *data;
4578 
4579     entry = ftrace_lookup_ip(&mapper->hash, ip);
4580     if (!entry)
4581         return NULL;
4582 
4583     map = (struct ftrace_func_map *)entry;
4584     data = map->data;
4585 
4586     remove_hash_entry(&mapper->hash, entry);
4587     kfree(entry);
4588 
4589     return data;
4590 }
4591 
4592 /**
4593  * free_ftrace_func_mapper - free a mapping of ips and data
4594  * @mapper: The mapper that has the ip maps
4595  * @free_func: A function to be called on each data item.
4596  *
4597  * This is used to free the function mapper. The @free_func is optional
4598  * and can be used if the data needs to be freed as well.
4599  */
4600 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
4601                  ftrace_mapper_func free_func)
4602 {
4603     struct ftrace_func_entry *entry;
4604     struct ftrace_func_map *map;
4605     struct hlist_head *hhd;
4606     int size, i;
4607 
4608     if (!mapper)
4609         return;
4610 
4611     if (free_func && mapper->hash.count) {
4612         size = 1 << mapper->hash.size_bits;
4613         for (i = 0; i < size; i++) {
4614             hhd = &mapper->hash.buckets[i];
4615             hlist_for_each_entry(entry, hhd, hlist) {
4616                 map = (struct ftrace_func_map *)entry;
4617                 free_func(map);
4618             }
4619         }
4620     }
4621     free_ftrace_hash(&mapper->hash);
4622 }
4623 
4624 static void release_probe(struct ftrace_func_probe *probe)
4625 {
4626     struct ftrace_probe_ops *probe_ops;
4627 
4628     mutex_lock(&ftrace_lock);
4629 
4630     WARN_ON(probe->ref <= 0);
4631 
4632     /* Subtract the ref that was used to protect this instance */
4633     probe->ref--;
4634 
4635     if (!probe->ref) {
4636         probe_ops = probe->probe_ops;
4637         /*
4638          * Sending zero as ip tells probe_ops to free
4639          * the probe->data itself
4640          */
4641         if (probe_ops->free)
4642             probe_ops->free(probe_ops, probe->tr, 0, probe->data);
4643         list_del(&probe->list);
4644         kfree(probe);
4645     }
4646     mutex_unlock(&ftrace_lock);
4647 }
4648 
4649 static void acquire_probe_locked(struct ftrace_func_probe *probe)
4650 {
4651     /*
4652      * Add one ref to keep it from being freed when releasing the
4653      * ftrace_lock mutex.
4654      */
4655     probe->ref++;
4656 }
4657 
4658 int
4659 register_ftrace_function_probe(char *glob, struct trace_array *tr,
4660                    struct ftrace_probe_ops *probe_ops,
4661                    void *data)
4662 {
4663     struct ftrace_func_probe *probe = NULL, *iter;
4664     struct ftrace_func_entry *entry;
4665     struct ftrace_hash **orig_hash;
4666     struct ftrace_hash *old_hash;
4667     struct ftrace_hash *hash;
4668     int count = 0;
4669     int size;
4670     int ret;
4671     int i;
4672 
4673     if (WARN_ON(!tr))
4674         return -EINVAL;
4675 
4676     /* We do not support '!' for function probes */
4677     if (WARN_ON(glob[0] == '!'))
4678         return -EINVAL;
4679 
4680 
4681     mutex_lock(&ftrace_lock);
4682     /* Check if the probe_ops is already registered */
4683     list_for_each_entry(iter, &tr->func_probes, list) {
4684         if (iter->probe_ops == probe_ops) {
4685             probe = iter;
4686             break;
4687         }
4688     }
4689     if (!probe) {
4690         probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4691         if (!probe) {
4692             mutex_unlock(&ftrace_lock);
4693             return -ENOMEM;
4694         }
4695         probe->probe_ops = probe_ops;
4696         probe->ops.func = function_trace_probe_call;
4697         probe->tr = tr;
4698         ftrace_ops_init(&probe->ops);
4699         list_add(&probe->list, &tr->func_probes);
4700     }
4701 
4702     acquire_probe_locked(probe);
4703 
4704     mutex_unlock(&ftrace_lock);
4705 
4706     /*
4707      * Note, there's a small window here that the func_hash->filter_hash
4708      * may be NULL or empty. Need to be careful when reading the loop.
4709      */
4710     mutex_lock(&probe->ops.func_hash->regex_lock);
4711 
4712     orig_hash = &probe->ops.func_hash->filter_hash;
4713     old_hash = *orig_hash;
4714     hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4715 
4716     if (!hash) {
4717         ret = -ENOMEM;
4718         goto out;
4719     }
4720 
4721     ret = ftrace_match_records(hash, glob, strlen(glob));
4722 
4723     /* Nothing found? */
4724     if (!ret)
4725         ret = -EINVAL;
4726 
4727     if (ret < 0)
4728         goto out;
4729 
4730     size = 1 << hash->size_bits;
4731     for (i = 0; i < size; i++) {
4732         hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4733             if (ftrace_lookup_ip(old_hash, entry->ip))
4734                 continue;
4735             /*
4736              * The caller might want to do something special
4737              * for each function we find. We call the callback
4738              * to give the caller an opportunity to do so.
4739              */
4740             if (probe_ops->init) {
4741                 ret = probe_ops->init(probe_ops, tr,
4742                               entry->ip, data,
4743                               &probe->data);
4744                 if (ret < 0) {
4745                     if (probe_ops->free && count)
4746                         probe_ops->free(probe_ops, tr,
4747                                 0, probe->data);
4748                     probe->data = NULL;
4749                     goto out;
4750                 }
4751             }
4752             count++;
4753         }
4754     }
4755 
4756     mutex_lock(&ftrace_lock);
4757 
4758     if (!count) {
4759         /* Nothing was added? */
4760         ret = -EINVAL;
4761         goto out_unlock;
4762     }
4763 
4764     ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4765                           hash, 1);
4766     if (ret < 0)
4767         goto err_unlock;
4768 
4769     /* One ref for each new function traced */
4770     probe->ref += count;
4771 
4772     if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4773         ret = ftrace_startup(&probe->ops, 0);
4774 
4775  out_unlock:
4776     mutex_unlock(&ftrace_lock);
4777 
4778     if (!ret)
4779         ret = count;
4780  out:
4781     mutex_unlock(&probe->ops.func_hash->regex_lock);
4782     free_ftrace_hash(hash);
4783 
4784     release_probe(probe);
4785 
4786     return ret;
4787 
4788  err_unlock:
4789     if (!probe_ops->free || !count)
4790         goto out_unlock;
4791 
4792     /* Failed to do the move, need to call the free functions */
4793     for (i = 0; i < size; i++) {
4794         hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4795             if (ftrace_lookup_ip(old_hash, entry->ip))
4796                 continue;
4797             probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4798         }
4799     }
4800     goto out_unlock;
4801 }
4802 
4803 int
4804 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4805                       struct ftrace_probe_ops *probe_ops)
4806 {
4807     struct ftrace_func_probe *probe = NULL, *iter;
4808     struct ftrace_ops_hash old_hash_ops;
4809     struct ftrace_func_entry *entry;
4810     struct ftrace_glob func_g;
4811     struct ftrace_hash **orig_hash;
4812     struct ftrace_hash *old_hash;
4813     struct ftrace_hash *hash = NULL;
4814     struct hlist_node *tmp;
4815     struct hlist_head hhd;
4816     char str[KSYM_SYMBOL_LEN];
4817     int count = 0;
4818     int i, ret = -ENODEV;
4819     int size;
4820 
4821     if (!glob || !strlen(glob) || !strcmp(glob, "*"))
4822         func_g.search = NULL;
4823     else {
4824         int not;
4825 
4826         func_g.type = filter_parse_regex(glob, strlen(glob),
4827                          &func_g.search, &not);
4828         func_g.len = strlen(func_g.search);
4829 
4830         /* we do not support '!' for function probes */
4831         if (WARN_ON(not))
4832             return -EINVAL;
4833     }
4834 
4835     mutex_lock(&ftrace_lock);
4836     /* Check if the probe_ops is already registered */
4837     list_for_each_entry(iter, &tr->func_probes, list) {
4838         if (iter->probe_ops == probe_ops) {
4839             probe = iter;
4840             break;
4841         }
4842     }
4843     if (!probe)
4844         goto err_unlock_ftrace;
4845 
4846     ret = -EINVAL;
4847     if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4848         goto err_unlock_ftrace;
4849 
4850     acquire_probe_locked(probe);
4851 
4852     mutex_unlock(&ftrace_lock);
4853 
4854     mutex_lock(&probe->ops.func_hash->regex_lock);
4855 
4856     orig_hash = &probe->ops.func_hash->filter_hash;
4857     old_hash = *orig_hash;
4858 
4859     if (ftrace_hash_empty(old_hash))
4860         goto out_unlock;
4861 
4862     old_hash_ops.filter_hash = old_hash;
4863     /* Probes only have filters */
4864     old_hash_ops.notrace_hash = NULL;
4865 
4866     ret = -ENOMEM;
4867     hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4868     if (!hash)
4869         goto out_unlock;
4870 
4871     INIT_HLIST_HEAD(&hhd);
4872 
4873     size = 1 << hash->size_bits;
4874     for (i = 0; i < size; i++) {
4875         hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
4876 
4877             if (func_g.search) {
4878                 kallsyms_lookup(entry->ip, NULL, NULL,
4879                         NULL, str);
4880                 if (!ftrace_match(str, &func_g))
4881                     continue;
4882             }
4883             count++;
4884             remove_hash_entry(hash, entry);
4885             hlist_add_head(&entry->hlist, &hhd);
4886         }
4887     }
4888 
4889     /* Nothing found? */
4890     if (!count) {
4891         ret = -EINVAL;
4892         goto out_unlock;
4893     }
4894 
4895     mutex_lock(&ftrace_lock);
4896 
4897     WARN_ON(probe->ref < count);
4898 
4899     probe->ref -= count;
4900 
4901     if (ftrace_hash_empty(hash))
4902         ftrace_shutdown(&probe->ops, 0);
4903 
4904     ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4905                           hash, 1);
4906 
4907     /* still need to update the function call sites */
4908     if (ftrace_enabled && !ftrace_hash_empty(hash))
4909         ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
4910                        &old_hash_ops);
4911     synchronize_rcu();
4912 
4913     hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4914         hlist_del(&entry->hlist);
4915         if (probe_ops->free)
4916             probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4917         kfree(entry);
4918     }
4919     mutex_unlock(&ftrace_lock);
4920 
4921  out_unlock:
4922     mutex_unlock(&probe->ops.func_hash->regex_lock);
4923     free_ftrace_hash(hash);
4924 
4925     release_probe(probe);
4926 
4927     return ret;
4928 
4929  err_unlock_ftrace:
4930     mutex_unlock(&ftrace_lock);
4931     return ret;
4932 }
4933 
4934 void clear_ftrace_function_probes(struct trace_array *tr)
4935 {
4936     struct ftrace_func_probe *probe, *n;
4937 
4938     list_for_each_entry_safe(probe, n, &tr->func_probes, list)
4939         unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
4940 }
4941 
4942 static LIST_HEAD(ftrace_commands);
4943 static DEFINE_MUTEX(ftrace_cmd_mutex);
4944 
4945 /*
4946  * Currently we only register ftrace commands from __init, so mark this
4947  * __init too.
4948  */
4949 __init int register_ftrace_command(struct ftrace_func_command *cmd)
4950 {
4951     struct ftrace_func_command *p;
4952     int ret = 0;
4953 
4954     mutex_lock(&ftrace_cmd_mutex);
4955     list_for_each_entry(p, &ftrace_commands, list) {
4956         if (strcmp(cmd->name, p->name) == 0) {
4957             ret = -EBUSY;
4958             goto out_unlock;
4959         }
4960     }
4961     list_add(&cmd->list, &ftrace_commands);
4962  out_unlock:
4963     mutex_unlock(&ftrace_cmd_mutex);
4964 
4965     return ret;
4966 }
4967 
4968 /*
4969  * Currently we only unregister ftrace commands from __init, so mark
4970  * this __init too.
4971  */
4972 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
4973 {
4974     struct ftrace_func_command *p, *n;
4975     int ret = -ENODEV;
4976 
4977     mutex_lock(&ftrace_cmd_mutex);
4978     list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4979         if (strcmp(cmd->name, p->name) == 0) {
4980             ret = 0;
4981             list_del_init(&p->list);
4982             goto out_unlock;
4983         }
4984     }
4985  out_unlock:
4986     mutex_unlock(&ftrace_cmd_mutex);
4987 
4988     return ret;
4989 }
4990 
4991 static int ftrace_process_regex(struct ftrace_iterator *iter,
4992                 char *buff, int len, int enable)
4993 {
4994     struct ftrace_hash *hash = iter->hash;
4995     struct trace_array *tr = iter->ops->private;
4996     char *func, *command, *next = buff;
4997     struct ftrace_func_command *p;
4998     int ret = -EINVAL;
4999 
5000     func = strsep(&next, ":");
5001 
5002     if (!next) {
5003         ret = ftrace_match_records(hash, func, len);
5004         if (!ret)
5005             ret = -EINVAL;
5006         if (ret < 0)
5007             return ret;
5008         return 0;
5009     }
5010 
5011     /* command found */
5012 
5013     command = strsep(&next, ":");
5014 
5015     mutex_lock(&ftrace_cmd_mutex);
5016     list_for_each_entry(p, &ftrace_commands, list) {
5017         if (strcmp(p->name, command) == 0) {
5018             ret = p->func(tr, hash, func, command, next, enable);
5019             goto out_unlock;
5020         }
5021     }
5022  out_unlock:
5023     mutex_unlock(&ftrace_cmd_mutex);
5024 
5025     return ret;
5026 }
5027 
5028 static ssize_t
5029 ftrace_regex_write(struct file *file, const char __user *ubuf,
5030            size_t cnt, loff_t *ppos, int enable)
5031 {
5032     struct ftrace_iterator *iter;
5033     struct trace_parser *parser;
5034     ssize_t ret, read;
5035 
5036     if (!cnt)
5037         return 0;
5038 
5039     if (file->f_mode & FMODE_READ) {
5040         struct seq_file *m = file->private_data;
5041         iter = m->private;
5042     } else
5043         iter = file->private_data;
5044 
5045     if (unlikely(ftrace_disabled))
5046         return -ENODEV;
5047 
5048     /* iter->hash is a local copy, so we don't need regex_lock */
5049 
5050     parser = &iter->parser;
5051     read = trace_get_user(parser, ubuf, cnt, ppos);
5052 
5053     if (read >= 0 && trace_parser_loaded(parser) &&
5054         !trace_parser_cont(parser)) {
5055         ret = ftrace_process_regex(iter, parser->buffer,
5056                        parser->idx, enable);
5057         trace_parser_clear(parser);
5058         if (ret < 0)
5059             goto out;
5060     }
5061 
5062     ret = read;
5063  out:
5064     return ret;
5065 }
5066 
5067 ssize_t
5068 ftrace_filter_write(struct file *file, const char __user *ubuf,
5069             size_t cnt, loff_t *ppos)
5070 {
5071     return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
5072 }
5073 
5074 ssize_t
5075 ftrace_notrace_write(struct file *file, const char __user *ubuf,
5076              size_t cnt, loff_t *ppos)
5077 {
5078     return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
5079 }
5080 
5081 static int
5082 __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
5083 {
5084     struct ftrace_func_entry *entry;
5085 
5086     ip = ftrace_location(ip);
5087     if (!ip)
5088         return -EINVAL;
5089 
5090     if (remove) {
5091         entry = ftrace_lookup_ip(hash, ip);
5092         if (!entry)
5093             return -ENOENT;
5094         free_hash_entry(hash, entry);
5095         return 0;
5096     }
5097 
5098     return add_hash_entry(hash, ip);
5099 }
5100 
5101 static int
5102 ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips,
5103           unsigned int cnt, int remove)
5104 {
5105     unsigned int i;
5106     int err;
5107 
5108     for (i = 0; i < cnt; i++) {
5109         err = __ftrace_match_addr(hash, ips[i], remove);
5110         if (err) {
5111             /*
5112              * This expects the @hash is a temporary hash and if this
5113              * fails the caller must free the @hash.
5114              */
5115             return err;
5116         }
5117     }
5118     return 0;
5119 }
5120 
5121 static int
5122 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
5123         unsigned long *ips, unsigned int cnt,
5124         int remove, int reset, int enable)
5125 {
5126     struct ftrace_hash **orig_hash;
5127     struct ftrace_hash *hash;
5128     int ret;
5129 
5130     if (unlikely(ftrace_disabled))
5131         return -ENODEV;
5132 
5133     mutex_lock(&ops->func_hash->regex_lock);
5134 
5135     if (enable)
5136         orig_hash = &ops->func_hash->filter_hash;
5137     else
5138         orig_hash = &ops->func_hash->notrace_hash;
5139 
5140     if (reset)
5141         hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5142     else
5143         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
5144 
5145     if (!hash) {
5146         ret = -ENOMEM;
5147         goto out_regex_unlock;
5148     }
5149 
5150     if (buf && !ftrace_match_records(hash, buf, len)) {
5151         ret = -EINVAL;
5152         goto out_regex_unlock;
5153     }
5154     if (ips) {
5155         ret = ftrace_match_addr(hash, ips, cnt, remove);
5156         if (ret < 0)
5157             goto out_regex_unlock;
5158     }
5159 
5160     mutex_lock(&ftrace_lock);
5161     ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
5162     mutex_unlock(&ftrace_lock);
5163 
5164  out_regex_unlock:
5165     mutex_unlock(&ops->func_hash->regex_lock);
5166 
5167     free_ftrace_hash(hash);
5168     return ret;
5169 }
5170 
5171 static int
5172 ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt,
5173         int remove, int reset, int enable)
5174 {
5175     return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable);
5176 }
5177 
5178 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
5179 
5180 struct ftrace_direct_func {
5181     struct list_head    next;
5182     unsigned long       addr;
5183     int         count;
5184 };
5185 
5186 static LIST_HEAD(ftrace_direct_funcs);
5187 
5188 /**
5189  * ftrace_find_direct_func - test an address if it is a registered direct caller
5190  * @addr: The address of a registered direct caller
5191  *
5192  * This searches to see if a ftrace direct caller has been registered
5193  * at a specific address, and if so, it returns a descriptor for it.
5194  *
5195  * This can be used by architecture code to see if an address is
5196  * a direct caller (trampoline) attached to a fentry/mcount location.
5197  * This is useful for the function_graph tracer, as it may need to
5198  * do adjustments if it traced a location that also has a direct
5199  * trampoline attached to it.
5200  */
5201 struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
5202 {
5203     struct ftrace_direct_func *entry;
5204     bool found = false;
5205 
5206     /* May be called by fgraph trampoline (protected by rcu tasks) */
5207     list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) {
5208         if (entry->addr == addr) {
5209             found = true;
5210             break;
5211         }
5212     }
5213     if (found)
5214         return entry;
5215 
5216     return NULL;
5217 }
5218 
5219 static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr)
5220 {
5221     struct ftrace_direct_func *direct;
5222 
5223     direct = kmalloc(sizeof(*direct), GFP_KERNEL);
5224     if (!direct)
5225         return NULL;
5226     direct->addr = addr;
5227     direct->count = 0;
5228     list_add_rcu(&direct->next, &ftrace_direct_funcs);
5229     ftrace_direct_func_count++;
5230     return direct;
5231 }
5232 
5233 static int register_ftrace_function_nolock(struct ftrace_ops *ops);
5234 
5235 /**
5236  * register_ftrace_direct - Call a custom trampoline directly
5237  * @ip: The address of the nop at the beginning of a function
5238  * @addr: The address of the trampoline to call at @ip
5239  *
5240  * This is used to connect a direct call from the nop location (@ip)
5241  * at the start of ftrace traced functions. The location that it calls
5242  * (@addr) must be able to handle a direct call, and save the parameters
5243  * of the function being traced, and restore them (or inject new ones
5244  * if needed), before returning.
5245  *
5246  * Returns:
5247  *  0 on success
5248  *  -EBUSY - Another direct function is already attached (there can be only one)
5249  *  -ENODEV - @ip does not point to a ftrace nop location (or not supported)
5250  *  -ENOMEM - There was an allocation failure.
5251  */
5252 int register_ftrace_direct(unsigned long ip, unsigned long addr)
5253 {
5254     struct ftrace_direct_func *direct;
5255     struct ftrace_func_entry *entry;
5256     struct ftrace_hash *free_hash = NULL;
5257     struct dyn_ftrace *rec;
5258     int ret = -ENODEV;
5259 
5260     mutex_lock(&direct_mutex);
5261 
5262     ip = ftrace_location(ip);
5263     if (!ip)
5264         goto out_unlock;
5265 
5266     /* See if there's a direct function at @ip already */
5267     ret = -EBUSY;
5268     if (ftrace_find_rec_direct(ip))
5269         goto out_unlock;
5270 
5271     ret = -ENODEV;
5272     rec = lookup_rec(ip, ip);
5273     if (!rec)
5274         goto out_unlock;
5275 
5276     /*
5277      * Check if the rec says it has a direct call but we didn't
5278      * find one earlier?
5279      */
5280     if (WARN_ON(rec->flags & FTRACE_FL_DIRECT))
5281         goto out_unlock;
5282 
5283     /* Make sure the ip points to the exact record */
5284     if (ip != rec->ip) {
5285         ip = rec->ip;
5286         /* Need to check this ip for a direct. */
5287         if (ftrace_find_rec_direct(ip))
5288             goto out_unlock;
5289     }
5290 
5291     ret = -ENOMEM;
5292     direct = ftrace_find_direct_func(addr);
5293     if (!direct) {
5294         direct = ftrace_alloc_direct_func(addr);
5295         if (!direct)
5296             goto out_unlock;
5297     }
5298 
5299     entry = ftrace_add_rec_direct(ip, addr, &free_hash);
5300     if (!entry)
5301         goto out_unlock;
5302 
5303     ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0);
5304 
5305     if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) {
5306         ret = register_ftrace_function_nolock(&direct_ops);
5307         if (ret)
5308             ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5309     }
5310 
5311     if (ret) {
5312         remove_hash_entry(direct_functions, entry);
5313         kfree(entry);
5314         if (!direct->count) {
5315             list_del_rcu(&direct->next);
5316             synchronize_rcu_tasks();
5317             kfree(direct);
5318             if (free_hash)
5319                 free_ftrace_hash(free_hash);
5320             free_hash = NULL;
5321             ftrace_direct_func_count--;
5322         }
5323     } else {
5324         direct->count++;
5325     }
5326  out_unlock:
5327     mutex_unlock(&direct_mutex);
5328 
5329     if (free_hash) {
5330         synchronize_rcu_tasks();
5331         free_ftrace_hash(free_hash);
5332     }
5333 
5334     return ret;
5335 }
5336 EXPORT_SYMBOL_GPL(register_ftrace_direct);
5337 
5338 static struct ftrace_func_entry *find_direct_entry(unsigned long *ip,
5339                            struct dyn_ftrace **recp)
5340 {
5341     struct ftrace_func_entry *entry;
5342     struct dyn_ftrace *rec;
5343 
5344     rec = lookup_rec(*ip, *ip);
5345     if (!rec)
5346         return NULL;
5347 
5348     entry = __ftrace_lookup_ip(direct_functions, rec->ip);
5349     if (!entry) {
5350         WARN_ON(rec->flags & FTRACE_FL_DIRECT);
5351         return NULL;
5352     }
5353 
5354     WARN_ON(!(rec->flags & FTRACE_FL_DIRECT));
5355 
5356     /* Passed in ip just needs to be on the call site */
5357     *ip = rec->ip;
5358 
5359     if (recp)
5360         *recp = rec;
5361 
5362     return entry;
5363 }
5364 
5365 int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
5366 {
5367     struct ftrace_direct_func *direct;
5368     struct ftrace_func_entry *entry;
5369     struct ftrace_hash *hash;
5370     int ret = -ENODEV;
5371 
5372     mutex_lock(&direct_mutex);
5373 
5374     ip = ftrace_location(ip);
5375     if (!ip)
5376         goto out_unlock;
5377 
5378     entry = find_direct_entry(&ip, NULL);
5379     if (!entry)
5380         goto out_unlock;
5381 
5382     hash = direct_ops.func_hash->filter_hash;
5383     if (hash->count == 1)
5384         unregister_ftrace_function(&direct_ops);
5385 
5386     ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5387 
5388     WARN_ON(ret);
5389 
5390     remove_hash_entry(direct_functions, entry);
5391 
5392     direct = ftrace_find_direct_func(addr);
5393     if (!WARN_ON(!direct)) {
5394         /* This is the good path (see the ! before WARN) */
5395         direct->count--;
5396         WARN_ON(direct->count < 0);
5397         if (!direct->count) {
5398             list_del_rcu(&direct->next);
5399             synchronize_rcu_tasks();
5400             kfree(direct);
5401             kfree(entry);
5402             ftrace_direct_func_count--;
5403         }
5404     }
5405  out_unlock:
5406     mutex_unlock(&direct_mutex);
5407 
5408     return ret;
5409 }
5410 EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
5411 
5412 static struct ftrace_ops stub_ops = {
5413     .func       = ftrace_stub,
5414 };
5415 
5416 /**
5417  * ftrace_modify_direct_caller - modify ftrace nop directly
5418  * @entry: The ftrace hash entry of the direct helper for @rec
5419  * @rec: The record representing the function site to patch
5420  * @old_addr: The location that the site at @rec->ip currently calls
5421  * @new_addr: The location that the site at @rec->ip should call
5422  *
5423  * An architecture may overwrite this function to optimize the
5424  * changing of the direct callback on an ftrace nop location.
5425  * This is called with the ftrace_lock mutex held, and no other
5426  * ftrace callbacks are on the associated record (@rec). Thus,
5427  * it is safe to modify the ftrace record, where it should be
5428  * currently calling @old_addr directly, to call @new_addr.
5429  *
5430  * Safety checks should be made to make sure that the code at
5431  * @rec->ip is currently calling @old_addr. And this must
5432  * also update entry->direct to @new_addr.
5433  */
5434 int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
5435                        struct dyn_ftrace *rec,
5436                        unsigned long old_addr,
5437                        unsigned long new_addr)
5438 {
5439     unsigned long ip = rec->ip;
5440     int ret;
5441 
5442     /*
5443      * The ftrace_lock was used to determine if the record
5444      * had more than one registered user to it. If it did,
5445      * we needed to prevent that from changing to do the quick
5446      * switch. But if it did not (only a direct caller was attached)
5447      * then this function is called. But this function can deal
5448      * with attached callers to the rec that we care about, and
5449      * since this function uses standard ftrace calls that take
5450      * the ftrace_lock mutex, we need to release it.
5451      */
5452     mutex_unlock(&ftrace_lock);
5453 
5454     /*
5455      * By setting a stub function at the same address, we force
5456      * the code to call the iterator and the direct_ops helper.
5457      * This means that @ip does not call the direct call, and
5458      * we can simply modify it.
5459      */
5460     ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0);
5461     if (ret)
5462         goto out_lock;
5463 
5464     ret = register_ftrace_function(&stub_ops);
5465     if (ret) {
5466         ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5467         goto out_lock;
5468     }
5469 
5470     entry->direct = new_addr;
5471 
5472     /*
5473      * By removing the stub, we put back the direct call, calling
5474      * the @new_addr.
5475      */
5476     unregister_ftrace_function(&stub_ops);
5477     ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5478 
5479  out_lock:
5480     mutex_lock(&ftrace_lock);
5481 
5482     return ret;
5483 }
5484 
5485 /**
5486  * modify_ftrace_direct - Modify an existing direct call to call something else
5487  * @ip: The instruction pointer to modify
5488  * @old_addr: The address that the current @ip calls directly
5489  * @new_addr: The address that the @ip should call
5490  *
5491  * This modifies a ftrace direct caller at an instruction pointer without
5492  * having to disable it first. The direct call will switch over to the
5493  * @new_addr without missing anything.
5494  *
5495  * Returns: zero on success. Non zero on error, which includes:
5496  *  -ENODEV : the @ip given has no direct caller attached
5497  *  -EINVAL : the @old_addr does not match the current direct caller
5498  */
5499 int modify_ftrace_direct(unsigned long ip,
5500              unsigned long old_addr, unsigned long new_addr)
5501 {
5502     struct ftrace_direct_func *direct, *new_direct = NULL;
5503     struct ftrace_func_entry *entry;
5504     struct dyn_ftrace *rec;
5505     int ret = -ENODEV;
5506 
5507     mutex_lock(&direct_mutex);
5508 
5509     mutex_lock(&ftrace_lock);
5510 
5511     ip = ftrace_location(ip);
5512     if (!ip)
5513         goto out_unlock;
5514 
5515     entry = find_direct_entry(&ip, &rec);
5516     if (!entry)
5517         goto out_unlock;
5518 
5519     ret = -EINVAL;
5520     if (entry->direct != old_addr)
5521         goto out_unlock;
5522 
5523     direct = ftrace_find_direct_func(old_addr);
5524     if (WARN_ON(!direct))
5525         goto out_unlock;
5526     if (direct->count > 1) {
5527         ret = -ENOMEM;
5528         new_direct = ftrace_alloc_direct_func(new_addr);
5529         if (!new_direct)
5530             goto out_unlock;
5531         direct->count--;
5532         new_direct->count++;
5533     } else {
5534         direct->addr = new_addr;
5535     }
5536 
5537     /*
5538      * If there's no other ftrace callback on the rec->ip location,
5539      * then it can be changed directly by the architecture.
5540      * If there is another caller, then we just need to change the
5541      * direct caller helper to point to @new_addr.
5542      */
5543     if (ftrace_rec_count(rec) == 1) {
5544         ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr);
5545     } else {
5546         entry->direct = new_addr;
5547         ret = 0;
5548     }
5549 
5550     if (unlikely(ret && new_direct)) {
5551         direct->count++;
5552         list_del_rcu(&new_direct->next);
5553         synchronize_rcu_tasks();
5554         kfree(new_direct);
5555         ftrace_direct_func_count--;
5556     }
5557 
5558  out_unlock:
5559     mutex_unlock(&ftrace_lock);
5560     mutex_unlock(&direct_mutex);
5561     return ret;
5562 }
5563 EXPORT_SYMBOL_GPL(modify_ftrace_direct);
5564 
5565 #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS)
5566 
5567 static int check_direct_multi(struct ftrace_ops *ops)
5568 {
5569     if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5570         return -EINVAL;
5571     if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS)
5572         return -EINVAL;
5573     return 0;
5574 }
5575 
5576 static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr)
5577 {
5578     struct ftrace_func_entry *entry, *del;
5579     int size, i;
5580 
5581     size = 1 << hash->size_bits;
5582     for (i = 0; i < size; i++) {
5583         hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5584             del = __ftrace_lookup_ip(direct_functions, entry->ip);
5585             if (del && del->direct == addr) {
5586                 remove_hash_entry(direct_functions, del);
5587                 kfree(del);
5588             }
5589         }
5590     }
5591 }
5592 
5593 /**
5594  * register_ftrace_direct_multi - Call a custom trampoline directly
5595  * for multiple functions registered in @ops
5596  * @ops: The address of the struct ftrace_ops object
5597  * @addr: The address of the trampoline to call at @ops functions
5598  *
5599  * This is used to connect a direct calls to @addr from the nop locations
5600  * of the functions registered in @ops (with by ftrace_set_filter_ip
5601  * function).
5602  *
5603  * The location that it calls (@addr) must be able to handle a direct call,
5604  * and save the parameters of the function being traced, and restore them
5605  * (or inject new ones if needed), before returning.
5606  *
5607  * Returns:
5608  *  0 on success
5609  *  -EINVAL  - The @ops object was already registered with this call or
5610  *             when there are no functions in @ops object.
5611  *  -EBUSY   - Another direct function is already attached (there can be only one)
5612  *  -ENODEV  - @ip does not point to a ftrace nop location (or not supported)
5613  *  -ENOMEM  - There was an allocation failure.
5614  */
5615 int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5616 {
5617     struct ftrace_hash *hash, *free_hash = NULL;
5618     struct ftrace_func_entry *entry, *new;
5619     int err = -EBUSY, size, i;
5620 
5621     if (ops->func || ops->trampoline)
5622         return -EINVAL;
5623     if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5624         return -EINVAL;
5625     if (ops->flags & FTRACE_OPS_FL_ENABLED)
5626         return -EINVAL;
5627 
5628     hash = ops->func_hash->filter_hash;
5629     if (ftrace_hash_empty(hash))
5630         return -EINVAL;
5631 
5632     mutex_lock(&direct_mutex);
5633 
5634     /* Make sure requested entries are not already registered.. */
5635     size = 1 << hash->size_bits;
5636     for (i = 0; i < size; i++) {
5637         hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5638             if (ftrace_find_rec_direct(entry->ip))
5639                 goto out_unlock;
5640         }
5641     }
5642 
5643     /* ... and insert them to direct_functions hash. */
5644     err = -ENOMEM;
5645     for (i = 0; i < size; i++) {
5646         hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5647             new = ftrace_add_rec_direct(entry->ip, addr, &free_hash);
5648             if (!new)
5649                 goto out_remove;
5650             entry->direct = addr;
5651         }
5652     }
5653 
5654     ops->func = call_direct_funcs;
5655     ops->flags = MULTI_FLAGS;
5656     ops->trampoline = FTRACE_REGS_ADDR;
5657 
5658     err = register_ftrace_function_nolock(ops);
5659 
5660  out_remove:
5661     if (err)
5662         remove_direct_functions_hash(hash, addr);
5663 
5664  out_unlock:
5665     mutex_unlock(&direct_mutex);
5666 
5667     if (free_hash) {
5668         synchronize_rcu_tasks();
5669         free_ftrace_hash(free_hash);
5670     }
5671     return err;
5672 }
5673 EXPORT_SYMBOL_GPL(register_ftrace_direct_multi);
5674 
5675 /**
5676  * unregister_ftrace_direct_multi - Remove calls to custom trampoline
5677  * previously registered by register_ftrace_direct_multi for @ops object.
5678  * @ops: The address of the struct ftrace_ops object
5679  *
5680  * This is used to remove a direct calls to @addr from the nop locations
5681  * of the functions registered in @ops (with by ftrace_set_filter_ip
5682  * function).
5683  *
5684  * Returns:
5685  *  0 on success
5686  *  -EINVAL - The @ops object was not properly registered.
5687  */
5688 int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5689 {
5690     struct ftrace_hash *hash = ops->func_hash->filter_hash;
5691     int err;
5692 
5693     if (check_direct_multi(ops))
5694         return -EINVAL;
5695     if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5696         return -EINVAL;
5697 
5698     mutex_lock(&direct_mutex);
5699     err = unregister_ftrace_function(ops);
5700     remove_direct_functions_hash(hash, addr);
5701     mutex_unlock(&direct_mutex);
5702 
5703     /* cleanup for possible another register call */
5704     ops->func = NULL;
5705     ops->trampoline = 0;
5706     return err;
5707 }
5708 EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi);
5709 
5710 static int
5711 __modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5712 {
5713     struct ftrace_hash *hash;
5714     struct ftrace_func_entry *entry, *iter;
5715     static struct ftrace_ops tmp_ops = {
5716         .func       = ftrace_stub,
5717         .flags      = FTRACE_OPS_FL_STUB,
5718     };
5719     int i, size;
5720     int err;
5721 
5722     lockdep_assert_held_once(&direct_mutex);
5723 
5724     /* Enable the tmp_ops to have the same functions as the direct ops */
5725     ftrace_ops_init(&tmp_ops);
5726     tmp_ops.func_hash = ops->func_hash;
5727 
5728     err = register_ftrace_function_nolock(&tmp_ops);
5729     if (err)
5730         return err;
5731 
5732     /*
5733      * Now the ftrace_ops_list_func() is called to do the direct callers.
5734      * We can safely change the direct functions attached to each entry.
5735      */
5736     mutex_lock(&ftrace_lock);
5737 
5738     hash = ops->func_hash->filter_hash;
5739     size = 1 << hash->size_bits;
5740     for (i = 0; i < size; i++) {
5741         hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
5742             entry = __ftrace_lookup_ip(direct_functions, iter->ip);
5743             if (!entry)
5744                 continue;
5745             entry->direct = addr;
5746         }
5747     }
5748 
5749     mutex_unlock(&ftrace_lock);
5750 
5751     /* Removing the tmp_ops will add the updated direct callers to the functions */
5752     unregister_ftrace_function(&tmp_ops);
5753 
5754     return err;
5755 }
5756 
5757 /**
5758  * modify_ftrace_direct_multi_nolock - Modify an existing direct 'multi' call
5759  * to call something else
5760  * @ops: The address of the struct ftrace_ops object
5761  * @addr: The address of the new trampoline to call at @ops functions
5762  *
5763  * This is used to unregister currently registered direct caller and
5764  * register new one @addr on functions registered in @ops object.
5765  *
5766  * Note there's window between ftrace_shutdown and ftrace_startup calls
5767  * where there will be no callbacks called.
5768  *
5769  * Caller should already have direct_mutex locked, so we don't lock
5770  * direct_mutex here.
5771  *
5772  * Returns: zero on success. Non zero on error, which includes:
5773  *  -EINVAL - The @ops object was not properly registered.
5774  */
5775 int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr)
5776 {
5777     if (check_direct_multi(ops))
5778         return -EINVAL;
5779     if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5780         return -EINVAL;
5781 
5782     return __modify_ftrace_direct_multi(ops, addr);
5783 }
5784 EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi_nolock);
5785 
5786 /**
5787  * modify_ftrace_direct_multi - Modify an existing direct 'multi' call
5788  * to call something else
5789  * @ops: The address of the struct ftrace_ops object
5790  * @addr: The address of the new trampoline to call at @ops functions
5791  *
5792  * This is used to unregister currently registered direct caller and
5793  * register new one @addr on functions registered in @ops object.
5794  *
5795  * Note there's window between ftrace_shutdown and ftrace_startup calls
5796  * where there will be no callbacks called.
5797  *
5798  * Returns: zero on success. Non zero on error, which includes:
5799  *  -EINVAL - The @ops object was not properly registered.
5800  */
5801 int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5802 {
5803     int err;
5804 
5805     if (check_direct_multi(ops))
5806         return -EINVAL;
5807     if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5808         return -EINVAL;
5809 
5810     mutex_lock(&direct_mutex);
5811     err = __modify_ftrace_direct_multi(ops, addr);
5812     mutex_unlock(&direct_mutex);
5813     return err;
5814 }
5815 EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi);
5816 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
5817 
5818 /**
5819  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
5820  * @ops - the ops to set the filter with
5821  * @ip - the address to add to or remove from the filter.
5822  * @remove - non zero to remove the ip from the filter
5823  * @reset - non zero to reset all filters before applying this filter.
5824  *
5825  * Filters denote which functions should be enabled when tracing is enabled
5826  * If @ip is NULL, it fails to update filter.
5827  */
5828 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
5829              int remove, int reset)
5830 {
5831     ftrace_ops_init(ops);
5832     return ftrace_set_addr(ops, &ip, 1, remove, reset, 1);
5833 }
5834 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
5835 
5836 /**
5837  * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses
5838  * @ops - the ops to set the filter with
5839  * @ips - the array of addresses to add to or remove from the filter.
5840  * @cnt - the number of addresses in @ips
5841  * @remove - non zero to remove ips from the filter
5842  * @reset - non zero to reset all filters before applying this filter.
5843  *
5844  * Filters denote which functions should be enabled when tracing is enabled
5845  * If @ips array or any ip specified within is NULL , it fails to update filter.
5846  */
5847 int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
5848               unsigned int cnt, int remove, int reset)
5849 {
5850     ftrace_ops_init(ops);
5851     return ftrace_set_addr(ops, ips, cnt, remove, reset, 1);
5852 }
5853 EXPORT_SYMBOL_GPL(ftrace_set_filter_ips);
5854 
5855 /**
5856  * ftrace_ops_set_global_filter - setup ops to use global filters
5857  * @ops - the ops which will use the global filters
5858  *
5859  * ftrace users who need global function trace filtering should call this.
5860  * It can set the global filter only if ops were not initialized before.
5861  */
5862 void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
5863 {
5864     if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
5865         return;
5866 
5867     ftrace_ops_init(ops);
5868     ops->func_hash = &global_ops.local_hash;
5869 }
5870 EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
5871 
5872 static int
5873 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
5874          int reset, int enable)
5875 {
5876     return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable);
5877 }
5878 
5879 /**
5880  * ftrace_set_filter - set a function to filter on in ftrace
5881  * @ops - the ops to set the filter with
5882  * @buf - the string that holds the function filter text.
5883  * @len - the length of the string.
5884  * @reset - non zero to reset all filters before applying this filter.
5885  *
5886  * Filters denote which functions should be enabled when tracing is enabled.
5887  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5888  */
5889 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
5890                int len, int reset)
5891 {
5892     ftrace_ops_init(ops);
5893     return ftrace_set_regex(ops, buf, len, reset, 1);
5894 }
5895 EXPORT_SYMBOL_GPL(ftrace_set_filter);
5896 
5897 /**
5898  * ftrace_set_notrace - set a function to not trace in ftrace
5899  * @ops - the ops to set the notrace filter with
5900  * @buf - the string that holds the function notrace text.
5901  * @len - the length of the string.
5902  * @reset - non zero to reset all filters before applying this filter.
5903  *
5904  * Notrace Filters denote which functions should not be enabled when tracing
5905  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5906  * for tracing.
5907  */
5908 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
5909             int len, int reset)
5910 {
5911     ftrace_ops_init(ops);
5912     return ftrace_set_regex(ops, buf, len, reset, 0);
5913 }
5914 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
5915 /**
5916  * ftrace_set_global_filter - set a function to filter on with global tracers
5917  * @buf - the string that holds the function filter text.
5918  * @len - the length of the string.
5919  * @reset - non zero to reset all filters before applying this filter.
5920  *
5921  * Filters denote which functions should be enabled when tracing is enabled.
5922  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5923  */
5924 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
5925 {
5926     ftrace_set_regex(&global_ops, buf, len, reset, 1);
5927 }
5928 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
5929 
5930 /**
5931  * ftrace_set_global_notrace - set a function to not trace with global tracers
5932  * @buf - the string that holds the function notrace text.
5933  * @len - the length of the string.
5934  * @reset - non zero to reset all filters before applying this filter.
5935  *
5936  * Notrace Filters denote which functions should not be enabled when tracing
5937  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5938  * for tracing.
5939  */
5940 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
5941 {
5942     ftrace_set_regex(&global_ops, buf, len, reset, 0);
5943 }
5944 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
5945 
5946 /*
5947  * command line interface to allow users to set filters on boot up.
5948  */
5949 #define FTRACE_FILTER_SIZE      COMMAND_LINE_SIZE
5950 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5951 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
5952 
5953 /* Used by function selftest to not test if filter is set */
5954 bool ftrace_filter_param __initdata;
5955 
5956 static int __init set_ftrace_notrace(char *str)
5957 {
5958     ftrace_filter_param = true;
5959     strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
5960     return 1;
5961 }
5962 __setup("ftrace_notrace=", set_ftrace_notrace);
5963 
5964 static int __init set_ftrace_filter(char *str)
5965 {
5966     ftrace_filter_param = true;
5967     strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
5968     return 1;
5969 }
5970 __setup("ftrace_filter=", set_ftrace_filter);
5971 
5972 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5973 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
5974 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5975 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
5976 
5977 static int __init set_graph_function(char *str)
5978 {
5979     strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
5980     return 1;
5981 }
5982 __setup("ftrace_graph_filter=", set_graph_function);
5983 
5984 static int __init set_graph_notrace_function(char *str)
5985 {
5986     strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
5987     return 1;
5988 }
5989 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
5990 
5991 static int __init set_graph_max_depth_function(char *str)
5992 {
5993     if (!str)
5994         return 0;
5995     fgraph_max_depth = simple_strtoul(str, NULL, 0);
5996     return 1;
5997 }
5998 __setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
5999 
6000 static void __init set_ftrace_early_graph(char *buf, int enable)
6001 {
6002     int ret;
6003     char *func;
6004     struct ftrace_hash *hash;
6005 
6006     hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
6007     if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
6008         return;
6009 
6010     while (buf) {
6011         func = strsep(&buf, ",");
6012         /* we allow only one expression at a time */
6013         ret = ftrace_graph_set_hash(hash, func);
6014         if (ret)
6015             printk(KERN_DEBUG "ftrace: function %s not "
6016                       "traceable\n", func);
6017     }
6018 
6019     if (enable)
6020         ftrace_graph_hash = hash;
6021     else
6022         ftrace_graph_notrace_hash = hash;
6023 }
6024 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6025 
6026 void __init
6027 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
6028 {
6029     char *func;
6030 
6031     ftrace_ops_init(ops);
6032 
6033     while (buf) {
6034         func = strsep(&buf, ",");
6035         ftrace_set_regex(ops, func, strlen(func), 0, enable);
6036     }
6037 }
6038 
6039 static void __init set_ftrace_early_filters(void)
6040 {
6041     if (ftrace_filter_buf[0])
6042         ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
6043     if (ftrace_notrace_buf[0])
6044         ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
6045 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6046     if (ftrace_graph_buf[0])
6047         set_ftrace_early_graph(ftrace_graph_buf, 1);
6048     if (ftrace_graph_notrace_buf[0])
6049         set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
6050 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6051 }
6052 
6053 int ftrace_regex_release(struct inode *inode, struct file *file)
6054 {
6055     struct seq_file *m = (struct seq_file *)file->private_data;
6056     struct ftrace_iterator *iter;
6057     struct ftrace_hash **orig_hash;
6058     struct trace_parser *parser;
6059     int filter_hash;
6060 
6061     if (file->f_mode & FMODE_READ) {
6062         iter = m->private;
6063         seq_release(inode, file);
6064     } else
6065         iter = file->private_data;
6066 
6067     parser = &iter->parser;
6068     if (trace_parser_loaded(parser)) {
6069         int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
6070 
6071         ftrace_process_regex(iter, parser->buffer,
6072                      parser->idx, enable);
6073     }
6074 
6075     trace_parser_put(parser);
6076 
6077     mutex_lock(&iter->ops->func_hash->regex_lock);
6078 
6079     if (file->f_mode & FMODE_WRITE) {
6080         filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
6081 
6082         if (filter_hash) {
6083             orig_hash = &iter->ops->func_hash->filter_hash;
6084             if (iter->tr && !list_empty(&iter->tr->mod_trace))
6085                 iter->hash->flags |= FTRACE_HASH_FL_MOD;
6086         } else
6087             orig_hash = &iter->ops->func_hash->notrace_hash;
6088 
6089         mutex_lock(&ftrace_lock);
6090         ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
6091                               iter->hash, filter_hash);
6092         mutex_unlock(&ftrace_lock);
6093     } else {
6094         /* For read only, the hash is the ops hash */
6095         iter->hash = NULL;
6096     }
6097 
6098     mutex_unlock(&iter->ops->func_hash->regex_lock);
6099     free_ftrace_hash(iter->hash);
6100     if (iter->tr)
6101         trace_array_put(iter->tr);
6102     kfree(iter);
6103 
6104     return 0;
6105 }
6106 
6107 static const struct file_operations ftrace_avail_fops = {
6108     .open = ftrace_avail_open,
6109     .read = seq_read,
6110     .llseek = seq_lseek,
6111     .release = seq_release_private,
6112 };
6113 
6114 static const struct file_operations ftrace_enabled_fops = {
6115     .open = ftrace_enabled_open,
6116     .read = seq_read,
6117     .llseek = seq_lseek,
6118     .release = seq_release_private,
6119 };
6120 
6121 static const struct file_operations ftrace_filter_fops = {
6122     .open = ftrace_filter_open,
6123     .read = seq_read,
6124     .write = ftrace_filter_write,
6125     .llseek = tracing_lseek,
6126     .release = ftrace_regex_release,
6127 };
6128 
6129 static const struct file_operations ftrace_notrace_fops = {
6130     .open = ftrace_notrace_open,
6131     .read = seq_read,
6132     .write = ftrace_notrace_write,
6133     .llseek = tracing_lseek,
6134     .release = ftrace_regex_release,
6135 };
6136 
6137 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6138 
6139 static DEFINE_MUTEX(graph_lock);
6140 
6141 struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
6142 struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
6143 
6144 enum graph_filter_type {
6145     GRAPH_FILTER_NOTRACE    = 0,
6146     GRAPH_FILTER_FUNCTION,
6147 };
6148 
6149 #define FTRACE_GRAPH_EMPTY  ((void *)1)
6150 
6151 struct ftrace_graph_data {
6152     struct ftrace_hash      *hash;
6153     struct ftrace_func_entry    *entry;
6154     int             idx;   /* for hash table iteration */
6155     enum graph_filter_type      type;
6156     struct ftrace_hash      *new_hash;
6157     const struct seq_operations *seq_ops;
6158     struct trace_parser     parser;
6159 };
6160 
6161 static void *
6162 __g_next(struct seq_file *m, loff_t *pos)
6163 {
6164     struct ftrace_graph_data *fgd = m->private;
6165     struct ftrace_func_entry *entry = fgd->entry;
6166     struct hlist_head *head;
6167     int i, idx = fgd->idx;
6168 
6169     if (*pos >= fgd->hash->count)
6170         return NULL;
6171 
6172     if (entry) {
6173         hlist_for_each_entry_continue(entry, hlist) {
6174             fgd->entry = entry;
6175             return entry;
6176         }
6177 
6178         idx++;
6179     }
6180 
6181     for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
6182         head = &fgd->hash->buckets[i];
6183         hlist_for_each_entry(entry, head, hlist) {
6184             fgd->entry = entry;
6185             fgd->idx = i;
6186             return entry;
6187         }
6188     }
6189     return NULL;
6190 }
6191 
6192 static void *
6193 g_next(struct seq_file *m, void *v, loff_t *pos)
6194 {
6195     (*pos)++;
6196     return __g_next(m, pos);
6197 }
6198 
6199 static void *g_start(struct seq_file *m, loff_t *pos)
6200 {
6201     struct ftrace_graph_data *fgd = m->private;
6202 
6203     mutex_lock(&graph_lock);
6204 
6205     if (fgd->type == GRAPH_FILTER_FUNCTION)
6206         fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6207                     lockdep_is_held(&graph_lock));
6208     else
6209         fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6210                     lockdep_is_held(&graph_lock));
6211 
6212     /* Nothing, tell g_show to print all functions are enabled */
6213     if (ftrace_hash_empty(fgd->hash) && !*pos)
6214         return FTRACE_GRAPH_EMPTY;
6215 
6216     fgd->idx = 0;
6217     fgd->entry = NULL;
6218     return __g_next(m, pos);
6219 }
6220 
6221 static void g_stop(struct seq_file *m, void *p)
6222 {
6223     mutex_unlock(&graph_lock);
6224 }
6225 
6226 static int g_show(struct seq_file *m, void *v)
6227 {
6228     struct ftrace_func_entry *entry = v;
6229 
6230     if (!entry)
6231         return 0;
6232 
6233     if (entry == FTRACE_GRAPH_EMPTY) {
6234         struct ftrace_graph_data *fgd = m->private;
6235 
6236         if (fgd->type == GRAPH_FILTER_FUNCTION)
6237             seq_puts(m, "#### all functions enabled ####\n");
6238         else
6239             seq_puts(m, "#### no functions disabled ####\n");
6240         return 0;
6241     }
6242 
6243     seq_printf(m, "%ps\n", (void *)entry->ip);
6244 
6245     return 0;
6246 }
6247 
6248 static const struct seq_operations ftrace_graph_seq_ops = {
6249     .start = g_start,
6250     .next = g_next,
6251     .stop = g_stop,
6252     .show = g_show,
6253 };
6254 
6255 static int
6256 __ftrace_graph_open(struct inode *inode, struct file *file,
6257             struct ftrace_graph_data *fgd)
6258 {
6259     int ret;
6260     struct ftrace_hash *new_hash = NULL;
6261 
6262     ret = security_locked_down(LOCKDOWN_TRACEFS);
6263     if (ret)
6264         return ret;
6265 
6266     if (file->f_mode & FMODE_WRITE) {
6267         const int size_bits = FTRACE_HASH_DEFAULT_BITS;
6268 
6269         if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
6270             return -ENOMEM;
6271 
6272         if (file->f_flags & O_TRUNC)
6273             new_hash = alloc_ftrace_hash(size_bits);
6274         else
6275             new_hash = alloc_and_copy_ftrace_hash(size_bits,
6276                                   fgd->hash);
6277         if (!new_hash) {
6278             ret = -ENOMEM;
6279             goto out;
6280         }
6281     }
6282 
6283     if (file->f_mode & FMODE_READ) {
6284         ret = seq_open(file, &ftrace_graph_seq_ops);
6285         if (!ret) {
6286             struct seq_file *m = file->private_data;
6287             m->private = fgd;
6288         } else {
6289             /* Failed */
6290             free_ftrace_hash(new_hash);
6291             new_hash = NULL;
6292         }
6293     } else
6294         file->private_data = fgd;
6295 
6296 out:
6297     if (ret < 0 && file->f_mode & FMODE_WRITE)
6298         trace_parser_put(&fgd->parser);
6299 
6300     fgd->new_hash = new_hash;
6301 
6302     /*
6303      * All uses of fgd->hash must be taken with the graph_lock
6304      * held. The graph_lock is going to be released, so force
6305      * fgd->hash to be reinitialized when it is taken again.
6306      */
6307     fgd->hash = NULL;
6308 
6309     return ret;
6310 }
6311 
6312 static int
6313 ftrace_graph_open(struct inode *inode, struct file *file)
6314 {
6315     struct ftrace_graph_data *fgd;
6316     int ret;
6317 
6318     if (unlikely(ftrace_disabled))
6319         return -ENODEV;
6320 
6321     fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6322     if (fgd == NULL)
6323         return -ENOMEM;
6324 
6325     mutex_lock(&graph_lock);
6326 
6327     fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6328                     lockdep_is_held(&graph_lock));
6329     fgd->type = GRAPH_FILTER_FUNCTION;
6330     fgd->seq_ops = &ftrace_graph_seq_ops;
6331 
6332     ret = __ftrace_graph_open(inode, file, fgd);
6333     if (ret < 0)
6334         kfree(fgd);
6335 
6336     mutex_unlock(&graph_lock);
6337     return ret;
6338 }
6339 
6340 static int
6341 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
6342 {
6343     struct ftrace_graph_data *fgd;
6344     int ret;
6345 
6346     if (unlikely(ftrace_disabled))
6347         return -ENODEV;
6348 
6349     fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6350     if (fgd == NULL)
6351         return -ENOMEM;
6352 
6353     mutex_lock(&graph_lock);
6354 
6355     fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6356                     lockdep_is_held(&graph_lock));
6357     fgd->type = GRAPH_FILTER_NOTRACE;
6358     fgd->seq_ops = &ftrace_graph_seq_ops;
6359 
6360     ret = __ftrace_graph_open(inode, file, fgd);
6361     if (ret < 0)
6362         kfree(fgd);
6363 
6364     mutex_unlock(&graph_lock);
6365     return ret;
6366 }
6367 
6368 static int
6369 ftrace_graph_release(struct inode *inode, struct file *file)
6370 {
6371     struct ftrace_graph_data *fgd;
6372     struct ftrace_hash *old_hash, *new_hash;
6373     struct trace_parser *parser;
6374     int ret = 0;
6375 
6376     if (file->f_mode & FMODE_READ) {
6377         struct seq_file *m = file->private_data;
6378 
6379         fgd = m->private;
6380         seq_release(inode, file);
6381     } else {
6382         fgd = file->private_data;
6383     }
6384 
6385 
6386     if (file->f_mode & FMODE_WRITE) {
6387 
6388         parser = &fgd->parser;
6389 
6390         if (trace_parser_loaded((parser))) {
6391             ret = ftrace_graph_set_hash(fgd->new_hash,
6392                             parser->buffer);
6393         }
6394 
6395         trace_parser_put(parser);
6396 
6397         new_hash = __ftrace_hash_move(fgd->new_hash);
6398         if (!new_hash) {
6399             ret = -ENOMEM;
6400             goto out;
6401         }
6402 
6403         mutex_lock(&graph_lock);
6404 
6405         if (fgd->type == GRAPH_FILTER_FUNCTION) {
6406             old_hash = rcu_dereference_protected(ftrace_graph_hash,
6407                     lockdep_is_held(&graph_lock));
6408             rcu_assign_pointer(ftrace_graph_hash, new_hash);
6409         } else {
6410             old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6411                     lockdep_is_held(&graph_lock));
6412             rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
6413         }
6414 
6415         mutex_unlock(&graph_lock);
6416 
6417         /*
6418          * We need to do a hard force of sched synchronization.
6419          * This is because we use preempt_disable() to do RCU, but
6420          * the function tracers can be called where RCU is not watching
6421          * (like before user_exit()). We can not rely on the RCU
6422          * infrastructure to do the synchronization, thus we must do it
6423          * ourselves.
6424          */
6425         if (old_hash != EMPTY_HASH)
6426             synchronize_rcu_tasks_rude();
6427 
6428         free_ftrace_hash(old_hash);
6429     }
6430 
6431  out:
6432     free_ftrace_hash(fgd->new_hash);
6433     kfree(fgd);
6434 
6435     return ret;
6436 }
6437 
6438 static int
6439 ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
6440 {
6441     struct ftrace_glob func_g;
6442     struct dyn_ftrace *rec;
6443     struct ftrace_page *pg;
6444     struct ftrace_func_entry *entry;
6445     int fail = 1;
6446     int not;
6447 
6448     /* decode regex */
6449     func_g.type = filter_parse_regex(buffer, strlen(buffer),
6450                      &func_g.search, &not);
6451 
6452     func_g.len = strlen(func_g.search);
6453 
6454     mutex_lock(&ftrace_lock);
6455 
6456     if (unlikely(ftrace_disabled)) {
6457         mutex_unlock(&ftrace_lock);
6458         return -ENODEV;
6459     }
6460 
6461     do_for_each_ftrace_rec(pg, rec) {
6462 
6463         if (rec->flags & FTRACE_FL_DISABLED)
6464             continue;
6465 
6466         if (ftrace_match_record(rec, &func_g, NULL, 0)) {
6467             entry = ftrace_lookup_ip(hash, rec->ip);
6468 
6469             if (!not) {
6470                 fail = 0;
6471 
6472                 if (entry)
6473                     continue;
6474                 if (add_hash_entry(hash, rec->ip) < 0)
6475                     goto out;
6476             } else {
6477                 if (entry) {
6478                     free_hash_entry(hash, entry);
6479                     fail = 0;
6480                 }
6481             }
6482         }
6483     } while_for_each_ftrace_rec();
6484 out:
6485     mutex_unlock(&ftrace_lock);
6486 
6487     if (fail)
6488         return -EINVAL;
6489 
6490     return 0;
6491 }
6492 
6493 static ssize_t
6494 ftrace_graph_write(struct file *file, const char __user *ubuf,
6495            size_t cnt, loff_t *ppos)
6496 {
6497     ssize_t read, ret = 0;
6498     struct ftrace_graph_data *fgd = file->private_data;
6499     struct trace_parser *parser;
6500 
6501     if (!cnt)
6502         return 0;
6503 
6504     /* Read mode uses seq functions */
6505     if (file->f_mode & FMODE_READ) {
6506         struct seq_file *m = file->private_data;
6507         fgd = m->private;
6508     }
6509 
6510     parser = &fgd->parser;
6511 
6512     read = trace_get_user(parser, ubuf, cnt, ppos);
6513 
6514     if (read >= 0 && trace_parser_loaded(parser) &&
6515         !trace_parser_cont(parser)) {
6516 
6517         ret = ftrace_graph_set_hash(fgd->new_hash,
6518                         parser->buffer);
6519         trace_parser_clear(parser);
6520     }
6521 
6522     if (!ret)
6523         ret = read;
6524 
6525     return ret;
6526 }
6527 
6528 static const struct file_operations ftrace_graph_fops = {
6529     .open       = ftrace_graph_open,
6530     .read       = seq_read,
6531     .write      = ftrace_graph_write,
6532     .llseek     = tracing_lseek,
6533     .release    = ftrace_graph_release,
6534 };
6535 
6536 static const struct file_operations ftrace_graph_notrace_fops = {
6537     .open       = ftrace_graph_notrace_open,
6538     .read       = seq_read,
6539     .write      = ftrace_graph_write,
6540     .llseek     = tracing_lseek,
6541     .release    = ftrace_graph_release,
6542 };
6543 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6544 
6545 void ftrace_create_filter_files(struct ftrace_ops *ops,
6546                 struct dentry *parent)
6547 {
6548 
6549     trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent,
6550               ops, &ftrace_filter_fops);
6551 
6552     trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent,
6553               ops, &ftrace_notrace_fops);
6554 }
6555 
6556 /*
6557  * The name "destroy_filter_files" is really a misnomer. Although
6558  * in the future, it may actually delete the files, but this is
6559  * really intended to make sure the ops passed in are disabled
6560  * and that when this function returns, the caller is free to
6561  * free the ops.
6562  *
6563  * The "destroy" name is only to match the "create" name that this
6564  * should be paired with.
6565  */
6566 void ftrace_destroy_filter_files(struct ftrace_ops *ops)
6567 {
6568     mutex_lock(&ftrace_lock);
6569     if (ops->flags & FTRACE_OPS_FL_ENABLED)
6570         ftrace_shutdown(ops, 0);
6571     ops->flags |= FTRACE_OPS_FL_DELETED;
6572     ftrace_free_filter(ops);
6573     mutex_unlock(&ftrace_lock);
6574 }
6575 
6576 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
6577 {
6578 
6579     trace_create_file("available_filter_functions", TRACE_MODE_READ,
6580             d_tracer, NULL, &ftrace_avail_fops);
6581 
6582     trace_create_file("enabled_functions", TRACE_MODE_READ,
6583             d_tracer, NULL, &ftrace_enabled_fops);
6584 
6585     ftrace_create_filter_files(&global_ops, d_tracer);
6586 
6587 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6588     trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer,
6589                     NULL,
6590                     &ftrace_graph_fops);
6591     trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer,
6592                     NULL,
6593                     &ftrace_graph_notrace_fops);
6594 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6595 
6596     return 0;
6597 }
6598 
6599 static int ftrace_cmp_ips(const void *a, const void *b)
6600 {
6601     const unsigned long *ipa = a;
6602     const unsigned long *ipb = b;
6603 
6604     if (*ipa > *ipb)
6605         return 1;
6606     if (*ipa < *ipb)
6607         return -1;
6608     return 0;
6609 }
6610 
6611 #ifdef CONFIG_FTRACE_SORT_STARTUP_TEST
6612 static void test_is_sorted(unsigned long *start, unsigned long count)
6613 {
6614     int i;
6615 
6616     for (i = 1; i < count; i++) {
6617         if (WARN(start[i - 1] > start[i],
6618              "[%d] %pS at %lx is not sorted with %pS at %lx\n", i,
6619              (void *)start[i - 1], start[i - 1],
6620              (void *)start[i], start[i]))
6621             break;
6622     }
6623     if (i == count)
6624         pr_info("ftrace section at %px sorted properly\n", start);
6625 }
6626 #else
6627 static void test_is_sorted(unsigned long *start, unsigned long count)
6628 {
6629 }
6630 #endif
6631 
6632 static int ftrace_process_locs(struct module *mod,
6633                    unsigned long *start,
6634                    unsigned long *end)
6635 {
6636     struct ftrace_page *start_pg;
6637     struct ftrace_page *pg;
6638     struct dyn_ftrace *rec;
6639     unsigned long count;
6640     unsigned long *p;
6641     unsigned long addr;
6642     unsigned long flags = 0; /* Shut up gcc */
6643     int ret = -ENOMEM;
6644 
6645     count = end - start;
6646 
6647     if (!count)
6648         return 0;
6649 
6650     /*
6651      * Sorting mcount in vmlinux at build time depend on
6652      * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in
6653      * modules can not be sorted at build time.
6654      */
6655     if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) {
6656         sort(start, count, sizeof(*start),
6657              ftrace_cmp_ips, NULL);
6658     } else {
6659         test_is_sorted(start, count);
6660     }
6661 
6662     start_pg = ftrace_allocate_pages(count);
6663     if (!start_pg)
6664         return -ENOMEM;
6665 
6666     mutex_lock(&ftrace_lock);
6667 
6668     /*
6669      * Core and each module needs their own pages, as
6670      * modules will free them when they are removed.
6671      * Force a new page to be allocated for modules.
6672      */
6673     if (!mod) {
6674         WARN_ON(ftrace_pages || ftrace_pages_start);
6675         /* First initialization */
6676         ftrace_pages = ftrace_pages_start = start_pg;
6677     } else {
6678         if (!ftrace_pages)
6679             goto out;
6680 
6681         if (WARN_ON(ftrace_pages->next)) {
6682             /* Hmm, we have free pages? */
6683             while (ftrace_pages->next)
6684                 ftrace_pages = ftrace_pages->next;
6685         }
6686 
6687         ftrace_pages->next = start_pg;
6688     }
6689 
6690     p = start;
6691     pg = start_pg;
6692     while (p < end) {
6693         unsigned long end_offset;
6694         addr = ftrace_call_adjust(*p++);
6695         /*
6696          * Some architecture linkers will pad between
6697          * the different mcount_loc sections of different
6698          * object files to satisfy alignments.
6699          * Skip any NULL pointers.
6700          */
6701         if (!addr)
6702             continue;
6703 
6704         end_offset = (pg->index+1) * sizeof(pg->records[0]);
6705         if (end_offset > PAGE_SIZE << pg->order) {
6706             /* We should have allocated enough */
6707             if (WARN_ON(!pg->next))
6708                 break;
6709             pg = pg->next;
6710         }
6711 
6712         rec = &pg->records[pg->index++];
6713         rec->ip = addr;
6714     }
6715 
6716     /* We should have used all pages */
6717     WARN_ON(pg->next);
6718 
6719     /* Assign the last page to ftrace_pages */
6720     ftrace_pages = pg;
6721 
6722     /*
6723      * We only need to disable interrupts on start up
6724      * because we are modifying code that an interrupt
6725      * may execute, and the modification is not atomic.
6726      * But for modules, nothing runs the code we modify
6727      * until we are finished with it, and there's no
6728      * reason to cause large interrupt latencies while we do it.
6729      */
6730     if (!mod)
6731         local_irq_save(flags);
6732     ftrace_update_code(mod, start_pg);
6733     if (!mod)
6734         local_irq_restore(flags);
6735     ret = 0;
6736  out:
6737     mutex_unlock(&ftrace_lock);
6738 
6739     return ret;
6740 }
6741 
6742 struct ftrace_mod_func {
6743     struct list_head    list;
6744     char            *name;
6745     unsigned long       ip;
6746     unsigned int        size;
6747 };
6748 
6749 struct ftrace_mod_map {
6750     struct rcu_head     rcu;
6751     struct list_head    list;
6752     struct module       *mod;
6753     unsigned long       start_addr;
6754     unsigned long       end_addr;
6755     struct list_head    funcs;
6756     unsigned int        num_funcs;
6757 };
6758 
6759 static int ftrace_get_trampoline_kallsym(unsigned int symnum,
6760                      unsigned long *value, char *type,
6761                      char *name, char *module_name,
6762                      int *exported)
6763 {
6764     struct ftrace_ops *op;
6765 
6766     list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
6767         if (!op->trampoline || symnum--)
6768             continue;
6769         *value = op->trampoline;
6770         *type = 't';
6771         strlcpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
6772         strlcpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
6773         *exported = 0;
6774         return 0;
6775     }
6776 
6777     return -ERANGE;
6778 }
6779 
6780 #if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES)
6781 /*
6782  * Check if the current ops references the given ip.
6783  *
6784  * If the ops traces all functions, then it was already accounted for.
6785  * If the ops does not trace the current record function, skip it.
6786  * If the ops ignores the function via notrace filter, skip it.
6787  */
6788 static bool
6789 ops_references_ip(struct ftrace_ops *ops, unsigned long ip)
6790 {
6791     /* If ops isn't enabled, ignore it */
6792     if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
6793         return false;
6794 
6795     /* If ops traces all then it includes this function */
6796     if (ops_traces_mod(ops))
6797         return true;
6798 
6799     /* The function must be in the filter */
6800     if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
6801         !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip))
6802         return false;
6803 
6804     /* If in notrace hash, we ignore it too */
6805     if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip))
6806         return false;
6807 
6808     return true;
6809 }
6810 #endif
6811 
6812 #ifdef CONFIG_MODULES
6813 
6814 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
6815 
6816 static LIST_HEAD(ftrace_mod_maps);
6817 
6818 static int referenced_filters(struct dyn_ftrace *rec)
6819 {
6820     struct ftrace_ops *ops;
6821     int cnt = 0;
6822 
6823     for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
6824         if (ops_references_ip(ops, rec->ip)) {
6825             if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
6826                 continue;
6827             if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
6828                 continue;
6829             cnt++;
6830             if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
6831                 rec->flags |= FTRACE_FL_REGS;
6832             if (cnt == 1 && ops->trampoline)
6833                 rec->flags |= FTRACE_FL_TRAMP;
6834             else
6835                 rec->flags &= ~FTRACE_FL_TRAMP;
6836         }
6837     }
6838 
6839     return cnt;
6840 }
6841 
6842 static void
6843 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
6844 {
6845     struct ftrace_func_entry *entry;
6846     struct dyn_ftrace *rec;
6847     int i;
6848 
6849     if (ftrace_hash_empty(hash))
6850         return;
6851 
6852     for (i = 0; i < pg->index; i++) {
6853         rec = &pg->records[i];
6854         entry = __ftrace_lookup_ip(hash, rec->ip);
6855         /*
6856          * Do not allow this rec to match again.
6857          * Yeah, it may waste some memory, but will be removed
6858          * if/when the hash is modified again.
6859          */
6860         if (entry)
6861             entry->ip = 0;
6862     }
6863 }
6864 
6865 /* Clear any records from hashes */
6866 static void clear_mod_from_hashes(struct ftrace_page *pg)
6867 {
6868     struct trace_array *tr;
6869 
6870     mutex_lock(&trace_types_lock);
6871     list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6872         if (!tr->ops || !tr->ops->func_hash)
6873             continue;
6874         mutex_lock(&tr->ops->func_hash->regex_lock);
6875         clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
6876         clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
6877         mutex_unlock(&tr->ops->func_hash->regex_lock);
6878     }
6879     mutex_unlock(&trace_types_lock);
6880 }
6881 
6882 static void ftrace_free_mod_map(struct rcu_head *rcu)
6883 {
6884     struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
6885     struct ftrace_mod_func *mod_func;
6886     struct ftrace_mod_func *n;
6887 
6888     /* All the contents of mod_map are now not visible to readers */
6889     list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
6890         kfree(mod_func->name);
6891         list_del(&mod_func->list);
6892         kfree(mod_func);
6893     }
6894 
6895     kfree(mod_map);
6896 }
6897 
6898 void ftrace_release_mod(struct module *mod)
6899 {
6900     struct ftrace_mod_map *mod_map;
6901     struct ftrace_mod_map *n;
6902     struct dyn_ftrace *rec;
6903     struct ftrace_page **last_pg;
6904     struct ftrace_page *tmp_page = NULL;
6905     struct ftrace_page *pg;
6906 
6907     mutex_lock(&ftrace_lock);
6908 
6909     if (ftrace_disabled)
6910         goto out_unlock;
6911 
6912     list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
6913         if (mod_map->mod == mod) {
6914             list_del_rcu(&mod_map->list);
6915             call_rcu(&mod_map->rcu, ftrace_free_mod_map);
6916             break;
6917         }
6918     }
6919 
6920     /*
6921      * Each module has its own ftrace_pages, remove
6922      * them from the list.
6923      */
6924     last_pg = &ftrace_pages_start;
6925     for (pg = ftrace_pages_start; pg; pg = *last_pg) {
6926         rec = &pg->records[0];
6927         if (within_module_core(rec->ip, mod) ||
6928             within_module_init(rec->ip, mod)) {
6929             /*
6930              * As core pages are first, the first
6931              * page should never be a module page.
6932              */
6933             if (WARN_ON(pg == ftrace_pages_start))
6934                 goto out_unlock;
6935 
6936             /* Check if we are deleting the last page */
6937             if (pg == ftrace_pages)
6938                 ftrace_pages = next_to_ftrace_page(last_pg);
6939 
6940             ftrace_update_tot_cnt -= pg->index;
6941             *last_pg = pg->next;
6942 
6943             pg->next = tmp_page;
6944             tmp_page = pg;
6945         } else
6946             last_pg = &pg->next;
6947     }
6948  out_unlock:
6949     mutex_unlock(&ftrace_lock);
6950 
6951     for (pg = tmp_page; pg; pg = tmp_page) {
6952 
6953         /* Needs to be called outside of ftrace_lock */
6954         clear_mod_from_hashes(pg);
6955 
6956         if (pg->records) {
6957             free_pages((unsigned long)pg->records, pg->order);
6958             ftrace_number_of_pages -= 1 << pg->order;
6959         }
6960         tmp_page = pg->next;
6961         kfree(pg);
6962         ftrace_number_of_groups--;
6963     }
6964 }
6965 
6966 void ftrace_module_enable(struct module *mod)
6967 {
6968     struct dyn_ftrace *rec;
6969     struct ftrace_page *pg;
6970 
6971     mutex_lock(&ftrace_lock);
6972 
6973     if (ftrace_disabled)
6974         goto out_unlock;
6975 
6976     /*
6977      * If the tracing is enabled, go ahead and enable the record.
6978      *
6979      * The reason not to enable the record immediately is the
6980      * inherent check of ftrace_make_nop/ftrace_make_call for
6981      * correct previous instructions.  Making first the NOP
6982      * conversion puts the module to the correct state, thus
6983      * passing the ftrace_make_call check.
6984      *
6985      * We also delay this to after the module code already set the
6986      * text to read-only, as we now need to set it back to read-write
6987      * so that we can modify the text.
6988      */
6989     if (ftrace_start_up)
6990         ftrace_arch_code_modify_prepare();
6991 
6992     do_for_each_ftrace_rec(pg, rec) {
6993         int cnt;
6994         /*
6995          * do_for_each_ftrace_rec() is a double loop.
6996          * module text shares the pg. If a record is
6997          * not part of this module, then skip this pg,
6998          * which the "break" will do.
6999          */
7000         if (!within_module_core(rec->ip, mod) &&
7001             !within_module_init(rec->ip, mod))
7002             break;
7003 
7004         /* Weak functions should still be ignored */
7005         if (!test_for_valid_rec(rec)) {
7006             /* Clear all other flags. Should not be enabled anyway */
7007             rec->flags = FTRACE_FL_DISABLED;
7008             continue;
7009         }
7010 
7011         cnt = 0;
7012 
7013         /*
7014          * When adding a module, we need to check if tracers are
7015          * currently enabled and if they are, and can trace this record,
7016          * we need to enable the module functions as well as update the
7017          * reference counts for those function records.
7018          */
7019         if (ftrace_start_up)
7020             cnt += referenced_filters(rec);
7021 
7022         rec->flags &= ~FTRACE_FL_DISABLED;
7023         rec->flags += cnt;
7024 
7025         if (ftrace_start_up && cnt) {
7026             int failed = __ftrace_replace_code(rec, 1);
7027             if (failed) {
7028                 ftrace_bug(failed, rec);
7029                 goto out_loop;
7030             }
7031         }
7032 
7033     } while_for_each_ftrace_rec();
7034 
7035  out_loop:
7036     if (ftrace_start_up)
7037         ftrace_arch_code_modify_post_process();
7038 
7039  out_unlock:
7040     mutex_unlock(&ftrace_lock);
7041 
7042     process_cached_mods(mod->name);
7043 }
7044 
7045 void ftrace_module_init(struct module *mod)
7046 {
7047     int ret;
7048 
7049     if (ftrace_disabled || !mod->num_ftrace_callsites)
7050         return;
7051 
7052     ret = ftrace_process_locs(mod, mod->ftrace_callsites,
7053                   mod->ftrace_callsites + mod->num_ftrace_callsites);
7054     if (ret)
7055         pr_warn("ftrace: failed to allocate entries for module '%s' functions\n",
7056             mod->name);
7057 }
7058 
7059 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7060                 struct dyn_ftrace *rec)
7061 {
7062     struct ftrace_mod_func *mod_func;
7063     unsigned long symsize;
7064     unsigned long offset;
7065     char str[KSYM_SYMBOL_LEN];
7066     char *modname;
7067     const char *ret;
7068 
7069     ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
7070     if (!ret)
7071         return;
7072 
7073     mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
7074     if (!mod_func)
7075         return;
7076 
7077     mod_func->name = kstrdup(str, GFP_KERNEL);
7078     if (!mod_func->name) {
7079         kfree(mod_func);
7080         return;
7081     }
7082 
7083     mod_func->ip = rec->ip - offset;
7084     mod_func->size = symsize;
7085 
7086     mod_map->num_funcs++;
7087 
7088     list_add_rcu(&mod_func->list, &mod_map->funcs);
7089 }
7090 
7091 static struct ftrace_mod_map *
7092 allocate_ftrace_mod_map(struct module *mod,
7093             unsigned long start, unsigned long end)
7094 {
7095     struct ftrace_mod_map *mod_map;
7096 
7097     mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
7098     if (!mod_map)
7099         return NULL;
7100 
7101     mod_map->mod = mod;
7102     mod_map->start_addr = start;
7103     mod_map->end_addr = end;
7104     mod_map->num_funcs = 0;
7105 
7106     INIT_LIST_HEAD_RCU(&mod_map->funcs);
7107 
7108     list_add_rcu(&mod_map->list, &ftrace_mod_maps);
7109 
7110     return mod_map;
7111 }
7112 
7113 static const char *
7114 ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
7115                unsigned long addr, unsigned long *size,
7116                unsigned long *off, char *sym)
7117 {
7118     struct ftrace_mod_func *found_func =  NULL;
7119     struct ftrace_mod_func *mod_func;
7120 
7121     list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7122         if (addr >= mod_func->ip &&
7123             addr < mod_func->ip + mod_func->size) {
7124             found_func = mod_func;
7125             break;
7126         }
7127     }
7128 
7129     if (found_func) {
7130         if (size)
7131             *size = found_func->size;
7132         if (off)
7133             *off = addr - found_func->ip;
7134         if (sym)
7135             strlcpy(sym, found_func->name, KSYM_NAME_LEN);
7136 
7137         return found_func->name;
7138     }
7139 
7140     return NULL;
7141 }
7142 
7143 const char *
7144 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
7145            unsigned long *off, char **modname, char *sym)
7146 {
7147     struct ftrace_mod_map *mod_map;
7148     const char *ret = NULL;
7149 
7150     /* mod_map is freed via call_rcu() */
7151     preempt_disable();
7152     list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7153         ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
7154         if (ret) {
7155             if (modname)
7156                 *modname = mod_map->mod->name;
7157             break;
7158         }
7159     }
7160     preempt_enable();
7161 
7162     return ret;
7163 }
7164 
7165 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7166                char *type, char *name,
7167                char *module_name, int *exported)
7168 {
7169     struct ftrace_mod_map *mod_map;
7170     struct ftrace_mod_func *mod_func;
7171     int ret;
7172 
7173     preempt_disable();
7174     list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7175 
7176         if (symnum >= mod_map->num_funcs) {
7177             symnum -= mod_map->num_funcs;
7178             continue;
7179         }
7180 
7181         list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7182             if (symnum > 1) {
7183                 symnum--;
7184                 continue;
7185             }
7186 
7187             *value = mod_func->ip;
7188             *type = 'T';
7189             strlcpy(name, mod_func->name, KSYM_NAME_LEN);
7190             strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
7191             *exported = 1;
7192             preempt_enable();
7193             return 0;
7194         }
7195         WARN_ON(1);
7196         break;
7197     }
7198     ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7199                         module_name, exported);
7200     preempt_enable();
7201     return ret;
7202 }
7203 
7204 #else
7205 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7206                 struct dyn_ftrace *rec) { }
7207 static inline struct ftrace_mod_map *
7208 allocate_ftrace_mod_map(struct module *mod,
7209             unsigned long start, unsigned long end)
7210 {
7211     return NULL;
7212 }
7213 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7214                char *type, char *name, char *module_name,
7215                int *exported)
7216 {
7217     int ret;
7218 
7219     preempt_disable();
7220     ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7221                         module_name, exported);
7222     preempt_enable();
7223     return ret;
7224 }
7225 #endif /* CONFIG_MODULES */
7226 
7227 struct ftrace_init_func {
7228     struct list_head list;
7229     unsigned long ip;
7230 };
7231 
7232 /* Clear any init ips from hashes */
7233 static void
7234 clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
7235 {
7236     struct ftrace_func_entry *entry;
7237 
7238     entry = ftrace_lookup_ip(hash, func->ip);
7239     /*
7240      * Do not allow this rec to match again.
7241      * Yeah, it may waste some memory, but will be removed
7242      * if/when the hash is modified again.
7243      */
7244     if (entry)
7245         entry->ip = 0;
7246 }
7247 
7248 static void
7249 clear_func_from_hashes(struct ftrace_init_func *func)
7250 {
7251     struct trace_array *tr;
7252 
7253     mutex_lock(&trace_types_lock);
7254     list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7255         if (!tr->ops || !tr->ops->func_hash)
7256             continue;
7257         mutex_lock(&tr->ops->func_hash->regex_lock);
7258         clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
7259         clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
7260         mutex_unlock(&tr->ops->func_hash->regex_lock);
7261     }
7262     mutex_unlock(&trace_types_lock);
7263 }
7264 
7265 static void add_to_clear_hash_list(struct list_head *clear_list,
7266                    struct dyn_ftrace *rec)
7267 {
7268     struct ftrace_init_func *func;
7269 
7270     func = kmalloc(sizeof(*func), GFP_KERNEL);
7271     if (!func) {
7272         MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
7273         return;
7274     }
7275 
7276     func->ip = rec->ip;
7277     list_add(&func->list, clear_list);
7278 }
7279 
7280 void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
7281 {
7282     unsigned long start = (unsigned long)(start_ptr);
7283     unsigned long end = (unsigned long)(end_ptr);
7284     struct ftrace_page **last_pg = &ftrace_pages_start;
7285     struct ftrace_page *pg;
7286     struct dyn_ftrace *rec;
7287     struct dyn_ftrace key;
7288     struct ftrace_mod_map *mod_map = NULL;
7289     struct ftrace_init_func *func, *func_next;
7290     struct list_head clear_hash;
7291 
7292     INIT_LIST_HEAD(&clear_hash);
7293 
7294     key.ip = start;
7295     key.flags = end;    /* overload flags, as it is unsigned long */
7296 
7297     mutex_lock(&ftrace_lock);
7298 
7299     /*
7300      * If we are freeing module init memory, then check if
7301      * any tracer is active. If so, we need to save a mapping of
7302      * the module functions being freed with the address.
7303      */
7304     if (mod && ftrace_ops_list != &ftrace_list_end)
7305         mod_map = allocate_ftrace_mod_map(mod, start, end);
7306 
7307     for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
7308         if (end < pg->records[0].ip ||
7309             start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
7310             continue;
7311  again:
7312         rec = bsearch(&key, pg->records, pg->index,
7313                   sizeof(struct dyn_ftrace),
7314                   ftrace_cmp_recs);
7315         if (!rec)
7316             continue;
7317 
7318         /* rec will be cleared from hashes after ftrace_lock unlock */
7319         add_to_clear_hash_list(&clear_hash, rec);
7320 
7321         if (mod_map)
7322             save_ftrace_mod_rec(mod_map, rec);
7323 
7324         pg->index--;
7325         ftrace_update_tot_cnt--;
7326         if (!pg->index) {
7327             *last_pg = pg->next;
7328             if (pg->records) {
7329                 free_pages((unsigned long)pg->records, pg->order);
7330                 ftrace_number_of_pages -= 1 << pg->order;
7331             }
7332             ftrace_number_of_groups--;
7333             kfree(pg);
7334             pg = container_of(last_pg, struct ftrace_page, next);
7335             if (!(*last_pg))
7336                 ftrace_pages = pg;
7337             continue;
7338         }
7339         memmove(rec, rec + 1,
7340             (pg->index - (rec - pg->records)) * sizeof(*rec));
7341         /* More than one function may be in this block */
7342         goto again;
7343     }
7344     mutex_unlock(&ftrace_lock);
7345 
7346     list_for_each_entry_safe(func, func_next, &clear_hash, list) {
7347         clear_func_from_hashes(func);
7348         kfree(func);
7349     }
7350 }
7351 
7352 void __init ftrace_free_init_mem(void)
7353 {
7354     void *start = (void *)(&__init_begin);
7355     void *end = (void *)(&__init_end);
7356 
7357     ftrace_boot_snapshot();
7358 
7359     ftrace_free_mem(NULL, start, end);
7360 }
7361 
7362 int __init __weak ftrace_dyn_arch_init(void)
7363 {
7364     return 0;
7365 }
7366 
7367 void __init ftrace_init(void)
7368 {
7369     extern unsigned long __start_mcount_loc[];
7370     extern unsigned long __stop_mcount_loc[];
7371     unsigned long count, flags;
7372     int ret;
7373 
7374     local_irq_save(flags);
7375     ret = ftrace_dyn_arch_init();
7376     local_irq_restore(flags);
7377     if (ret)
7378         goto failed;
7379 
7380     count = __stop_mcount_loc - __start_mcount_loc;
7381     if (!count) {
7382         pr_info("ftrace: No functions to be traced?\n");
7383         goto failed;
7384     }
7385 
7386     pr_info("ftrace: allocating %ld entries in %ld pages\n",
7387         count, count / ENTRIES_PER_PAGE + 1);
7388 
7389     ret = ftrace_process_locs(NULL,
7390                   __start_mcount_loc,
7391                   __stop_mcount_loc);
7392     if (ret) {
7393         pr_warn("ftrace: failed to allocate entries for functions\n");
7394         goto failed;
7395     }
7396 
7397     pr_info("ftrace: allocated %ld pages with %ld groups\n",
7398         ftrace_number_of_pages, ftrace_number_of_groups);
7399 
7400     last_ftrace_enabled = ftrace_enabled = 1;
7401 
7402     set_ftrace_early_filters();
7403 
7404     return;
7405  failed:
7406     ftrace_disabled = 1;
7407 }
7408 
7409 /* Do nothing if arch does not support this */
7410 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
7411 {
7412 }
7413 
7414 static void ftrace_update_trampoline(struct ftrace_ops *ops)
7415 {
7416     unsigned long trampoline = ops->trampoline;
7417 
7418     arch_ftrace_update_trampoline(ops);
7419     if (ops->trampoline && ops->trampoline != trampoline &&
7420         (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
7421         /* Add to kallsyms before the perf events */
7422         ftrace_add_trampoline_to_kallsyms(ops);
7423         perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
7424                    ops->trampoline, ops->trampoline_size, false,
7425                    FTRACE_TRAMPOLINE_SYM);
7426         /*
7427          * Record the perf text poke event after the ksymbol register
7428          * event.
7429          */
7430         perf_event_text_poke((void *)ops->trampoline, NULL, 0,
7431                      (void *)ops->trampoline,
7432                      ops->trampoline_size);
7433     }
7434 }
7435 
7436 void ftrace_init_trace_array(struct trace_array *tr)
7437 {
7438     INIT_LIST_HEAD(&tr->func_probes);
7439     INIT_LIST_HEAD(&tr->mod_trace);
7440     INIT_LIST_HEAD(&tr->mod_notrace);
7441 }
7442 #else
7443 
7444 struct ftrace_ops global_ops = {
7445     .func           = ftrace_stub,
7446     .flags          = FTRACE_OPS_FL_INITIALIZED |
7447                   FTRACE_OPS_FL_PID,
7448 };
7449 
7450 static int __init ftrace_nodyn_init(void)
7451 {
7452     ftrace_enabled = 1;
7453     return 0;
7454 }
7455 core_initcall(ftrace_nodyn_init);
7456 
7457 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
7458 static inline void ftrace_startup_all(int command) { }
7459 
7460 static void ftrace_update_trampoline(struct ftrace_ops *ops)
7461 {
7462 }
7463 
7464 #endif /* CONFIG_DYNAMIC_FTRACE */
7465 
7466 __init void ftrace_init_global_array_ops(struct trace_array *tr)
7467 {
7468     tr->ops = &global_ops;
7469     tr->ops->private = tr;
7470     ftrace_init_trace_array(tr);
7471 }
7472 
7473 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
7474 {
7475     /* If we filter on pids, update to use the pid function */
7476     if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
7477         if (WARN_ON(tr->ops->func != ftrace_stub))
7478             printk("ftrace ops had %pS for function\n",
7479                    tr->ops->func);
7480     }
7481     tr->ops->func = func;
7482     tr->ops->private = tr;
7483 }
7484 
7485 void ftrace_reset_array_ops(struct trace_array *tr)
7486 {
7487     tr->ops->func = ftrace_stub;
7488 }
7489 
7490 static nokprobe_inline void
7491 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7492                struct ftrace_ops *ignored, struct ftrace_regs *fregs)
7493 {
7494     struct pt_regs *regs = ftrace_get_regs(fregs);
7495     struct ftrace_ops *op;
7496     int bit;
7497 
7498     /*
7499      * The ftrace_test_and_set_recursion() will disable preemption,
7500      * which is required since some of the ops may be dynamically
7501      * allocated, they must be freed after a synchronize_rcu().
7502      */
7503     bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7504     if (bit < 0)
7505         return;
7506 
7507     do_for_each_ftrace_op(op, ftrace_ops_list) {
7508         /* Stub functions don't need to be called nor tested */
7509         if (op->flags & FTRACE_OPS_FL_STUB)
7510             continue;
7511         /*
7512          * Check the following for each ops before calling their func:
7513          *  if RCU flag is set, then rcu_is_watching() must be true
7514          *  if PER_CPU is set, then ftrace_function_local_disable()
7515          *                          must be false
7516          *  Otherwise test if the ip matches the ops filter
7517          *
7518          * If any of the above fails then the op->func() is not executed.
7519          */
7520         if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
7521             ftrace_ops_test(op, ip, regs)) {
7522             if (FTRACE_WARN_ON(!op->func)) {
7523                 pr_warn("op=%p %pS\n", op, op);
7524                 goto out;
7525             }
7526             op->func(ip, parent_ip, op, fregs);
7527         }
7528     } while_for_each_ftrace_op(op);
7529 out:
7530     trace_clear_recursion(bit);
7531 }
7532 
7533 /*
7534  * Some archs only support passing ip and parent_ip. Even though
7535  * the list function ignores the op parameter, we do not want any
7536  * C side effects, where a function is called without the caller
7537  * sending a third parameter.
7538  * Archs are to support both the regs and ftrace_ops at the same time.
7539  * If they support ftrace_ops, it is assumed they support regs.
7540  * If call backs want to use regs, they must either check for regs
7541  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
7542  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
7543  * An architecture can pass partial regs with ftrace_ops and still
7544  * set the ARCH_SUPPORTS_FTRACE_OPS.
7545  *
7546  * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be
7547  * arch_ftrace_ops_list_func.
7548  */
7549 #if ARCH_SUPPORTS_FTRACE_OPS
7550 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7551                    struct ftrace_ops *op, struct ftrace_regs *fregs)
7552 {
7553     __ftrace_ops_list_func(ip, parent_ip, NULL, fregs);
7554 }
7555 #else
7556 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
7557 {
7558     __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
7559 }
7560 #endif
7561 NOKPROBE_SYMBOL(arch_ftrace_ops_list_func);
7562 
7563 /*
7564  * If there's only one function registered but it does not support
7565  * recursion, needs RCU protection and/or requires per cpu handling, then
7566  * this function will be called by the mcount trampoline.
7567  */
7568 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
7569                    struct ftrace_ops *op, struct ftrace_regs *fregs)
7570 {
7571     int bit;
7572 
7573     bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7574     if (bit < 0)
7575         return;
7576 
7577     if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
7578         op->func(ip, parent_ip, op, fregs);
7579 
7580     trace_clear_recursion(bit);
7581 }
7582 NOKPROBE_SYMBOL(ftrace_ops_assist_func);
7583 
7584 /**
7585  * ftrace_ops_get_func - get the function a trampoline should call
7586  * @ops: the ops to get the function for
7587  *
7588  * Normally the mcount trampoline will call the ops->func, but there
7589  * are times that it should not. For example, if the ops does not
7590  * have its own recursion protection, then it should call the
7591  * ftrace_ops_assist_func() instead.
7592  *
7593  * Returns the function that the trampoline should call for @ops.
7594  */
7595 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
7596 {
7597     /*
7598      * If the function does not handle recursion or needs to be RCU safe,
7599      * then we need to call the assist handler.
7600      */
7601     if (ops->flags & (FTRACE_OPS_FL_RECURSION |
7602               FTRACE_OPS_FL_RCU))
7603         return ftrace_ops_assist_func;
7604 
7605     return ops->func;
7606 }
7607 
7608 static void
7609 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
7610                      struct task_struct *prev,
7611                      struct task_struct *next,
7612                      unsigned int prev_state)
7613 {
7614     struct trace_array *tr = data;
7615     struct trace_pid_list *pid_list;
7616     struct trace_pid_list *no_pid_list;
7617 
7618     pid_list = rcu_dereference_sched(tr->function_pids);
7619     no_pid_list = rcu_dereference_sched(tr->function_no_pids);
7620 
7621     if (trace_ignore_this_task(pid_list, no_pid_list, next))
7622         this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7623                    FTRACE_PID_IGNORE);
7624     else
7625         this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7626                    next->pid);
7627 }
7628 
7629 static void
7630 ftrace_pid_follow_sched_process_fork(void *data,
7631                      struct task_struct *self,
7632                      struct task_struct *task)
7633 {
7634     struct trace_pid_list *pid_list;
7635     struct trace_array *tr = data;
7636 
7637     pid_list = rcu_dereference_sched(tr->function_pids);
7638     trace_filter_add_remove_task(pid_list, self, task);
7639 
7640     pid_list = rcu_dereference_sched(tr->function_no_pids);
7641     trace_filter_add_remove_task(pid_list, self, task);
7642 }
7643 
7644 static void
7645 ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
7646 {
7647     struct trace_pid_list *pid_list;
7648     struct trace_array *tr = data;
7649 
7650     pid_list = rcu_dereference_sched(tr->function_pids);
7651     trace_filter_add_remove_task(pid_list, NULL, task);
7652 
7653     pid_list = rcu_dereference_sched(tr->function_no_pids);
7654     trace_filter_add_remove_task(pid_list, NULL, task);
7655 }
7656 
7657 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
7658 {
7659     if (enable) {
7660         register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
7661                           tr);
7662         register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
7663                           tr);
7664     } else {
7665         unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
7666                             tr);
7667         unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
7668                             tr);
7669     }
7670 }
7671 
7672 static void clear_ftrace_pids(struct trace_array *tr, int type)
7673 {
7674     struct trace_pid_list *pid_list;
7675     struct trace_pid_list *no_pid_list;
7676     int cpu;
7677 
7678     pid_list = rcu_dereference_protected(tr->function_pids,
7679                          lockdep_is_held(&ftrace_lock));
7680     no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7681                         lockdep_is_held(&ftrace_lock));
7682 
7683     /* Make sure there's something to do */
7684     if (!pid_type_enabled(type, pid_list, no_pid_list))
7685         return;
7686 
7687     /* See if the pids still need to be checked after this */
7688     if (!still_need_pid_events(type, pid_list, no_pid_list)) {
7689         unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7690         for_each_possible_cpu(cpu)
7691             per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
7692     }
7693 
7694     if (type & TRACE_PIDS)
7695         rcu_assign_pointer(tr->function_pids, NULL);
7696 
7697     if (type & TRACE_NO_PIDS)
7698         rcu_assign_pointer(tr->function_no_pids, NULL);
7699 
7700     /* Wait till all users are no longer using pid filtering */
7701     synchronize_rcu();
7702 
7703     if ((type & TRACE_PIDS) && pid_list)
7704         trace_pid_list_free(pid_list);
7705 
7706     if ((type & TRACE_NO_PIDS) && no_pid_list)
7707         trace_pid_list_free(no_pid_list);
7708 }
7709 
7710 void ftrace_clear_pids(struct trace_array *tr)
7711 {
7712     mutex_lock(&ftrace_lock);
7713 
7714     clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
7715 
7716     mutex_unlock(&ftrace_lock);
7717 }
7718 
7719 static void ftrace_pid_reset(struct trace_array *tr, int type)
7720 {
7721     mutex_lock(&ftrace_lock);
7722     clear_ftrace_pids(tr, type);
7723 
7724     ftrace_update_pid_func();
7725     ftrace_startup_all(0);
7726 
7727     mutex_unlock(&ftrace_lock);
7728 }
7729 
7730 /* Greater than any max PID */
7731 #define FTRACE_NO_PIDS      (void *)(PID_MAX_LIMIT + 1)
7732 
7733 static void *fpid_start(struct seq_file *m, loff_t *pos)
7734     __acquires(RCU)
7735 {
7736     struct trace_pid_list *pid_list;
7737     struct trace_array *tr = m->private;
7738 
7739     mutex_lock(&ftrace_lock);
7740     rcu_read_lock_sched();
7741 
7742     pid_list = rcu_dereference_sched(tr->function_pids);
7743 
7744     if (!pid_list)
7745         return !(*pos) ? FTRACE_NO_PIDS : NULL;
7746 
7747     return trace_pid_start(pid_list, pos);
7748 }
7749 
7750 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
7751 {
7752     struct trace_array *tr = m->private;
7753     struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
7754 
7755     if (v == FTRACE_NO_PIDS) {
7756         (*pos)++;
7757         return NULL;
7758     }
7759     return trace_pid_next(pid_list, v, pos);
7760 }
7761 
7762 static void fpid_stop(struct seq_file *m, void *p)
7763     __releases(RCU)
7764 {
7765     rcu_read_unlock_sched();
7766     mutex_unlock(&ftrace_lock);
7767 }
7768 
7769 static int fpid_show(struct seq_file *m, void *v)
7770 {
7771     if (v == FTRACE_NO_PIDS) {
7772         seq_puts(m, "no pid\n");
7773         return 0;
7774     }
7775 
7776     return trace_pid_show(m, v);
7777 }
7778 
7779 static const struct seq_operations ftrace_pid_sops = {
7780     .start = fpid_start,
7781     .next = fpid_next,
7782     .stop = fpid_stop,
7783     .show = fpid_show,
7784 };
7785 
7786 static void *fnpid_start(struct seq_file *m, loff_t *pos)
7787     __acquires(RCU)
7788 {
7789     struct trace_pid_list *pid_list;
7790     struct trace_array *tr = m->private;
7791 
7792     mutex_lock(&ftrace_lock);
7793     rcu_read_lock_sched();
7794 
7795     pid_list = rcu_dereference_sched(tr->function_no_pids);
7796 
7797     if (!pid_list)
7798         return !(*pos) ? FTRACE_NO_PIDS : NULL;
7799 
7800     return trace_pid_start(pid_list, pos);
7801 }
7802 
7803 static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
7804 {
7805     struct trace_array *tr = m->private;
7806     struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
7807 
7808     if (v == FTRACE_NO_PIDS) {
7809         (*pos)++;
7810         return NULL;
7811     }
7812     return trace_pid_next(pid_list, v, pos);
7813 }
7814 
7815 static const struct seq_operations ftrace_no_pid_sops = {
7816     .start = fnpid_start,
7817     .next = fnpid_next,
7818     .stop = fpid_stop,
7819     .show = fpid_show,
7820 };
7821 
7822 static int pid_open(struct inode *inode, struct file *file, int type)
7823 {
7824     const struct seq_operations *seq_ops;
7825     struct trace_array *tr = inode->i_private;
7826     struct seq_file *m;
7827     int ret = 0;
7828 
7829     ret = tracing_check_open_get_tr(tr);
7830     if (ret)
7831         return ret;
7832 
7833     if ((file->f_mode & FMODE_WRITE) &&
7834         (file->f_flags & O_TRUNC))
7835         ftrace_pid_reset(tr, type);
7836 
7837     switch (type) {
7838     case TRACE_PIDS:
7839         seq_ops = &ftrace_pid_sops;
7840         break;
7841     case TRACE_NO_PIDS:
7842         seq_ops = &ftrace_no_pid_sops;
7843         break;
7844     default:
7845         trace_array_put(tr);
7846         WARN_ON_ONCE(1);
7847         return -EINVAL;
7848     }
7849 
7850     ret = seq_open(file, seq_ops);
7851     if (ret < 0) {
7852         trace_array_put(tr);
7853     } else {
7854         m = file->private_data;
7855         /* copy tr over to seq ops */
7856         m->private = tr;
7857     }
7858 
7859     return ret;
7860 }
7861 
7862 static int
7863 ftrace_pid_open(struct inode *inode, struct file *file)
7864 {
7865     return pid_open(inode, file, TRACE_PIDS);
7866 }
7867 
7868 static int
7869 ftrace_no_pid_open(struct inode *inode, struct file *file)
7870 {
7871     return pid_open(inode, file, TRACE_NO_PIDS);
7872 }
7873 
7874 static void ignore_task_cpu(void *data)
7875 {
7876     struct trace_array *tr = data;
7877     struct trace_pid_list *pid_list;
7878     struct trace_pid_list *no_pid_list;
7879 
7880     /*
7881      * This function is called by on_each_cpu() while the
7882      * event_mutex is held.
7883      */
7884     pid_list = rcu_dereference_protected(tr->function_pids,
7885                          mutex_is_locked(&ftrace_lock));
7886     no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7887                         mutex_is_locked(&ftrace_lock));
7888 
7889     if (trace_ignore_this_task(pid_list, no_pid_list, current))
7890         this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7891                    FTRACE_PID_IGNORE);
7892     else
7893         this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7894                    current->pid);
7895 }
7896 
7897 static ssize_t
7898 pid_write(struct file *filp, const char __user *ubuf,
7899       size_t cnt, loff_t *ppos, int type)
7900 {
7901     struct seq_file *m = filp->private_data;
7902     struct trace_array *tr = m->private;
7903     struct trace_pid_list *filtered_pids;
7904     struct trace_pid_list *other_pids;
7905     struct trace_pid_list *pid_list;
7906     ssize_t ret;
7907 
7908     if (!cnt)
7909         return 0;
7910 
7911     mutex_lock(&ftrace_lock);
7912 
7913     switch (type) {
7914     case TRACE_PIDS:
7915         filtered_pids = rcu_dereference_protected(tr->function_pids,
7916                          lockdep_is_held(&ftrace_lock));
7917         other_pids = rcu_dereference_protected(tr->function_no_pids,
7918                          lockdep_is_held(&ftrace_lock));
7919         break;
7920     case TRACE_NO_PIDS:
7921         filtered_pids = rcu_dereference_protected(tr->function_no_pids,
7922                          lockdep_is_held(&ftrace_lock));
7923         other_pids = rcu_dereference_protected(tr->function_pids,
7924                          lockdep_is_held(&ftrace_lock));
7925         break;
7926     default:
7927         ret = -EINVAL;
7928         WARN_ON_ONCE(1);
7929         goto out;
7930     }
7931 
7932     ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
7933     if (ret < 0)
7934         goto out;
7935 
7936     switch (type) {
7937     case TRACE_PIDS:
7938         rcu_assign_pointer(tr->function_pids, pid_list);
7939         break;
7940     case TRACE_NO_PIDS:
7941         rcu_assign_pointer(tr->function_no_pids, pid_list);
7942         break;
7943     }
7944 
7945 
7946     if (filtered_pids) {
7947         synchronize_rcu();
7948         trace_pid_list_free(filtered_pids);
7949     } else if (pid_list && !other_pids) {
7950         /* Register a probe to set whether to ignore the tracing of a task */
7951         register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7952     }
7953 
7954     /*
7955      * Ignoring of pids is done at task switch. But we have to
7956      * check for those tasks that are currently running.
7957      * Always do this in case a pid was appended or removed.
7958      */
7959     on_each_cpu(ignore_task_cpu, tr, 1);
7960 
7961     ftrace_update_pid_func();
7962     ftrace_startup_all(0);
7963  out:
7964     mutex_unlock(&ftrace_lock);
7965 
7966     if (ret > 0)
7967         *ppos += ret;
7968 
7969     return ret;
7970 }
7971 
7972 static ssize_t
7973 ftrace_pid_write(struct file *filp, const char __user *ubuf,
7974          size_t cnt, loff_t *ppos)
7975 {
7976     return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
7977 }
7978 
7979 static ssize_t
7980 ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
7981             size_t cnt, loff_t *ppos)
7982 {
7983     return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
7984 }
7985 
7986 static int
7987 ftrace_pid_release(struct inode *inode, struct file *file)
7988 {
7989     struct trace_array *tr = inode->i_private;
7990 
7991     trace_array_put(tr);
7992 
7993     return seq_release(inode, file);
7994 }
7995 
7996 static const struct file_operations ftrace_pid_fops = {
7997     .open       = ftrace_pid_open,
7998     .write      = ftrace_pid_write,
7999     .read       = seq_read,
8000     .llseek     = tracing_lseek,
8001     .release    = ftrace_pid_release,
8002 };
8003 
8004 static const struct file_operations ftrace_no_pid_fops = {
8005     .open       = ftrace_no_pid_open,
8006     .write      = ftrace_no_pid_write,
8007     .read       = seq_read,
8008     .llseek     = tracing_lseek,
8009     .release    = ftrace_pid_release,
8010 };
8011 
8012 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8013 {
8014     trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer,
8015                 tr, &ftrace_pid_fops);
8016     trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE,
8017               d_tracer, tr, &ftrace_no_pid_fops);
8018 }
8019 
8020 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
8021                      struct dentry *d_tracer)
8022 {
8023     /* Only the top level directory has the dyn_tracefs and profile */
8024     WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
8025 
8026     ftrace_init_dyn_tracefs(d_tracer);
8027     ftrace_profile_tracefs(d_tracer);
8028 }
8029 
8030 /**
8031  * ftrace_kill - kill ftrace
8032  *
8033  * This function should be used by panic code. It stops ftrace
8034  * but in a not so nice way. If you need to simply kill ftrace
8035  * from a non-atomic section, use ftrace_kill.
8036  */
8037 void ftrace_kill(void)
8038 {
8039     ftrace_disabled = 1;
8040     ftrace_enabled = 0;
8041     ftrace_trace_function = ftrace_stub;
8042 }
8043 
8044 /**
8045  * ftrace_is_dead - Test if ftrace is dead or not.
8046  *
8047  * Returns 1 if ftrace is "dead", zero otherwise.
8048  */
8049 int ftrace_is_dead(void)
8050 {
8051     return ftrace_disabled;
8052 }
8053 
8054 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
8055 /*
8056  * When registering ftrace_ops with IPMODIFY, it is necessary to make sure
8057  * it doesn't conflict with any direct ftrace_ops. If there is existing
8058  * direct ftrace_ops on a kernel function being patched, call
8059  * FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER on it to enable sharing.
8060  *
8061  * @ops:     ftrace_ops being registered.
8062  *
8063  * Returns:
8064  *         0 on success;
8065  *         Negative on failure.
8066  */
8067 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
8068 {
8069     struct ftrace_func_entry *entry;
8070     struct ftrace_hash *hash;
8071     struct ftrace_ops *op;
8072     int size, i, ret;
8073 
8074     lockdep_assert_held_once(&direct_mutex);
8075 
8076     if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
8077         return 0;
8078 
8079     hash = ops->func_hash->filter_hash;
8080     size = 1 << hash->size_bits;
8081     for (i = 0; i < size; i++) {
8082         hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
8083             unsigned long ip = entry->ip;
8084             bool found_op = false;
8085 
8086             mutex_lock(&ftrace_lock);
8087             do_for_each_ftrace_op(op, ftrace_ops_list) {
8088                 if (!(op->flags & FTRACE_OPS_FL_DIRECT))
8089                     continue;
8090                 if (ops_references_ip(op, ip)) {
8091                     found_op = true;
8092                     break;
8093                 }
8094             } while_for_each_ftrace_op(op);
8095             mutex_unlock(&ftrace_lock);
8096 
8097             if (found_op) {
8098                 if (!op->ops_func)
8099                     return -EBUSY;
8100 
8101                 ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER);
8102                 if (ret)
8103                     return ret;
8104             }
8105         }
8106     }
8107 
8108     return 0;
8109 }
8110 
8111 /*
8112  * Similar to prepare_direct_functions_for_ipmodify, clean up after ops
8113  * with IPMODIFY is unregistered. The cleanup is optional for most DIRECT
8114  * ops.
8115  */
8116 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
8117 {
8118     struct ftrace_func_entry *entry;
8119     struct ftrace_hash *hash;
8120     struct ftrace_ops *op;
8121     int size, i;
8122 
8123     if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
8124         return;
8125 
8126     mutex_lock(&direct_mutex);
8127 
8128     hash = ops->func_hash->filter_hash;
8129     size = 1 << hash->size_bits;
8130     for (i = 0; i < size; i++) {
8131         hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
8132             unsigned long ip = entry->ip;
8133             bool found_op = false;
8134 
8135             mutex_lock(&ftrace_lock);
8136             do_for_each_ftrace_op(op, ftrace_ops_list) {
8137                 if (!(op->flags & FTRACE_OPS_FL_DIRECT))
8138                     continue;
8139                 if (ops_references_ip(op, ip)) {
8140                     found_op = true;
8141                     break;
8142                 }
8143             } while_for_each_ftrace_op(op);
8144             mutex_unlock(&ftrace_lock);
8145 
8146             /* The cleanup is optional, ignore any errors */
8147             if (found_op && op->ops_func)
8148                 op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER);
8149         }
8150     }
8151     mutex_unlock(&direct_mutex);
8152 }
8153 
8154 #define lock_direct_mutex() mutex_lock(&direct_mutex)
8155 #define unlock_direct_mutex()   mutex_unlock(&direct_mutex)
8156 
8157 #else  /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
8158 
8159 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
8160 {
8161     return 0;
8162 }
8163 
8164 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
8165 {
8166 }
8167 
8168 #define lock_direct_mutex() do { } while (0)
8169 #define unlock_direct_mutex()   do { } while (0)
8170 
8171 #endif  /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
8172 
8173 /*
8174  * Similar to register_ftrace_function, except we don't lock direct_mutex.
8175  */
8176 static int register_ftrace_function_nolock(struct ftrace_ops *ops)
8177 {
8178     int ret;
8179 
8180     ftrace_ops_init(ops);
8181 
8182     mutex_lock(&ftrace_lock);
8183 
8184     ret = ftrace_startup(ops, 0);
8185 
8186     mutex_unlock(&ftrace_lock);
8187 
8188     return ret;
8189 }
8190 
8191 /**
8192  * register_ftrace_function - register a function for profiling
8193  * @ops:    ops structure that holds the function for profiling.
8194  *
8195  * Register a function to be called by all functions in the
8196  * kernel.
8197  *
8198  * Note: @ops->func and all the functions it calls must be labeled
8199  *       with "notrace", otherwise it will go into a
8200  *       recursive loop.
8201  */
8202 int register_ftrace_function(struct ftrace_ops *ops)
8203 {
8204     int ret;
8205 
8206     lock_direct_mutex();
8207     ret = prepare_direct_functions_for_ipmodify(ops);
8208     if (ret < 0)
8209         goto out_unlock;
8210 
8211     ret = register_ftrace_function_nolock(ops);
8212 
8213 out_unlock:
8214     unlock_direct_mutex();
8215     return ret;
8216 }
8217 EXPORT_SYMBOL_GPL(register_ftrace_function);
8218 
8219 /**
8220  * unregister_ftrace_function - unregister a function for profiling.
8221  * @ops:    ops structure that holds the function to unregister
8222  *
8223  * Unregister a function that was added to be called by ftrace profiling.
8224  */
8225 int unregister_ftrace_function(struct ftrace_ops *ops)
8226 {
8227     int ret;
8228 
8229     mutex_lock(&ftrace_lock);
8230     ret = ftrace_shutdown(ops, 0);
8231     mutex_unlock(&ftrace_lock);
8232 
8233     cleanup_direct_functions_after_ipmodify(ops);
8234     return ret;
8235 }
8236 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
8237 
8238 static int symbols_cmp(const void *a, const void *b)
8239 {
8240     const char **str_a = (const char **) a;
8241     const char **str_b = (const char **) b;
8242 
8243     return strcmp(*str_a, *str_b);
8244 }
8245 
8246 struct kallsyms_data {
8247     unsigned long *addrs;
8248     const char **syms;
8249     size_t cnt;
8250     size_t found;
8251 };
8252 
8253 static int kallsyms_callback(void *data, const char *name,
8254                  struct module *mod, unsigned long addr)
8255 {
8256     struct kallsyms_data *args = data;
8257     const char **sym;
8258     int idx;
8259 
8260     sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp);
8261     if (!sym)
8262         return 0;
8263 
8264     idx = sym - args->syms;
8265     if (args->addrs[idx])
8266         return 0;
8267 
8268     addr = ftrace_location(addr);
8269     if (!addr)
8270         return 0;
8271 
8272     args->addrs[idx] = addr;
8273     args->found++;
8274     return args->found == args->cnt ? 1 : 0;
8275 }
8276 
8277 /**
8278  * ftrace_lookup_symbols - Lookup addresses for array of symbols
8279  *
8280  * @sorted_syms: array of symbols pointers symbols to resolve,
8281  * must be alphabetically sorted
8282  * @cnt: number of symbols/addresses in @syms/@addrs arrays
8283  * @addrs: array for storing resulting addresses
8284  *
8285  * This function looks up addresses for array of symbols provided in
8286  * @syms array (must be alphabetically sorted) and stores them in
8287  * @addrs array, which needs to be big enough to store at least @cnt
8288  * addresses.
8289  *
8290  * This function returns 0 if all provided symbols are found,
8291  * -ESRCH otherwise.
8292  */
8293 int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
8294 {
8295     struct kallsyms_data args;
8296     int err;
8297 
8298     memset(addrs, 0, sizeof(*addrs) * cnt);
8299     args.addrs = addrs;
8300     args.syms = sorted_syms;
8301     args.cnt = cnt;
8302     args.found = 0;
8303     err = kallsyms_on_each_symbol(kallsyms_callback, &args);
8304     if (err < 0)
8305         return err;
8306     return args.found == args.cnt ? 0 : -ESRCH;
8307 }
8308 
8309 #ifdef CONFIG_SYSCTL
8310 
8311 #ifdef CONFIG_DYNAMIC_FTRACE
8312 static void ftrace_startup_sysctl(void)
8313 {
8314     int command;
8315 
8316     if (unlikely(ftrace_disabled))
8317         return;
8318 
8319     /* Force update next time */
8320     saved_ftrace_func = NULL;
8321     /* ftrace_start_up is true if we want ftrace running */
8322     if (ftrace_start_up) {
8323         command = FTRACE_UPDATE_CALLS;
8324         if (ftrace_graph_active)
8325             command |= FTRACE_START_FUNC_RET;
8326         ftrace_startup_enable(command);
8327     }
8328 }
8329 
8330 static void ftrace_shutdown_sysctl(void)
8331 {
8332     int command;
8333 
8334     if (unlikely(ftrace_disabled))
8335         return;
8336 
8337     /* ftrace_start_up is true if ftrace is running */
8338     if (ftrace_start_up) {
8339         command = FTRACE_DISABLE_CALLS;
8340         if (ftrace_graph_active)
8341             command |= FTRACE_STOP_FUNC_RET;
8342         ftrace_run_update_code(command);
8343     }
8344 }
8345 #else
8346 # define ftrace_startup_sysctl()       do { } while (0)
8347 # define ftrace_shutdown_sysctl()      do { } while (0)
8348 #endif /* CONFIG_DYNAMIC_FTRACE */
8349 
8350 static bool is_permanent_ops_registered(void)
8351 {
8352     struct ftrace_ops *op;
8353 
8354     do_for_each_ftrace_op(op, ftrace_ops_list) {
8355         if (op->flags & FTRACE_OPS_FL_PERMANENT)
8356             return true;
8357     } while_for_each_ftrace_op(op);
8358 
8359     return false;
8360 }
8361 
8362 static int
8363 ftrace_enable_sysctl(struct ctl_table *table, int write,
8364              void *buffer, size_t *lenp, loff_t *ppos)
8365 {
8366     int ret = -ENODEV;
8367 
8368     mutex_lock(&ftrace_lock);
8369 
8370     if (unlikely(ftrace_disabled))
8371         goto out;
8372 
8373     ret = proc_dointvec(table, write, buffer, lenp, ppos);
8374 
8375     if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
8376         goto out;
8377 
8378     if (ftrace_enabled) {
8379 
8380         /* we are starting ftrace again */
8381         if (rcu_dereference_protected(ftrace_ops_list,
8382             lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
8383             update_ftrace_function();
8384 
8385         ftrace_startup_sysctl();
8386 
8387     } else {
8388         if (is_permanent_ops_registered()) {
8389             ftrace_enabled = true;
8390             ret = -EBUSY;
8391             goto out;
8392         }
8393 
8394         /* stopping ftrace calls (just send to ftrace_stub) */
8395         ftrace_trace_function = ftrace_stub;
8396 
8397         ftrace_shutdown_sysctl();
8398     }
8399 
8400     last_ftrace_enabled = !!ftrace_enabled;
8401  out:
8402     mutex_unlock(&ftrace_lock);
8403     return ret;
8404 }
8405 
8406 static struct ctl_table ftrace_sysctls[] = {
8407     {
8408         .procname       = "ftrace_enabled",
8409         .data           = &ftrace_enabled,
8410         .maxlen         = sizeof(int),
8411         .mode           = 0644,
8412         .proc_handler   = ftrace_enable_sysctl,
8413     },
8414     {}
8415 };
8416 
8417 static int __init ftrace_sysctl_init(void)
8418 {
8419     register_sysctl_init("kernel", ftrace_sysctls);
8420     return 0;
8421 }
8422 late_initcall(ftrace_sysctl_init);
8423 #endif