Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Kprobes-based tracing events
0004  *
0005  * Created by Masami Hiramatsu <mhiramat@redhat.com>
0006  *
0007  */
0008 #define pr_fmt(fmt) "trace_kprobe: " fmt
0009 
0010 #include <linux/bpf-cgroup.h>
0011 #include <linux/security.h>
0012 #include <linux/module.h>
0013 #include <linux/uaccess.h>
0014 #include <linux/rculist.h>
0015 #include <linux/error-injection.h>
0016 
0017 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
0018 
0019 #include "trace_dynevent.h"
0020 #include "trace_kprobe_selftest.h"
0021 #include "trace_probe.h"
0022 #include "trace_probe_tmpl.h"
0023 
0024 #define KPROBE_EVENT_SYSTEM "kprobes"
0025 #define KRETPROBE_MAXACTIVE_MAX 4096
0026 
0027 /* Kprobe early definition from command line */
0028 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
0029 
0030 static int __init set_kprobe_boot_events(char *str)
0031 {
0032     strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
0033     disable_tracing_selftest("running kprobe events");
0034 
0035     return 1;
0036 }
0037 __setup("kprobe_event=", set_kprobe_boot_events);
0038 
0039 static int trace_kprobe_create(const char *raw_command);
0040 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
0041 static int trace_kprobe_release(struct dyn_event *ev);
0042 static bool trace_kprobe_is_busy(struct dyn_event *ev);
0043 static bool trace_kprobe_match(const char *system, const char *event,
0044             int argc, const char **argv, struct dyn_event *ev);
0045 
0046 static struct dyn_event_operations trace_kprobe_ops = {
0047     .create = trace_kprobe_create,
0048     .show = trace_kprobe_show,
0049     .is_busy = trace_kprobe_is_busy,
0050     .free = trace_kprobe_release,
0051     .match = trace_kprobe_match,
0052 };
0053 
0054 /*
0055  * Kprobe event core functions
0056  */
0057 struct trace_kprobe {
0058     struct dyn_event    devent;
0059     struct kretprobe    rp; /* Use rp.kp for kprobe use */
0060     unsigned long __percpu *nhit;
0061     const char      *symbol;    /* symbol name */
0062     struct trace_probe  tp;
0063 };
0064 
0065 static bool is_trace_kprobe(struct dyn_event *ev)
0066 {
0067     return ev->ops == &trace_kprobe_ops;
0068 }
0069 
0070 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
0071 {
0072     return container_of(ev, struct trace_kprobe, devent);
0073 }
0074 
0075 /**
0076  * for_each_trace_kprobe - iterate over the trace_kprobe list
0077  * @pos:    the struct trace_kprobe * for each entry
0078  * @dpos:   the struct dyn_event * to use as a loop cursor
0079  */
0080 #define for_each_trace_kprobe(pos, dpos)    \
0081     for_each_dyn_event(dpos)        \
0082         if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
0083 
0084 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
0085 {
0086     return tk->rp.handler != NULL;
0087 }
0088 
0089 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
0090 {
0091     return tk->symbol ? tk->symbol : "unknown";
0092 }
0093 
0094 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
0095 {
0096     return tk->rp.kp.offset;
0097 }
0098 
0099 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
0100 {
0101     return kprobe_gone(&tk->rp.kp);
0102 }
0103 
0104 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
0105                          struct module *mod)
0106 {
0107     int len = strlen(module_name(mod));
0108     const char *name = trace_kprobe_symbol(tk);
0109 
0110     return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
0111 }
0112 
0113 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
0114 {
0115     char *p;
0116     bool ret;
0117 
0118     if (!tk->symbol)
0119         return false;
0120     p = strchr(tk->symbol, ':');
0121     if (!p)
0122         return true;
0123     *p = '\0';
0124     rcu_read_lock_sched();
0125     ret = !!find_module(tk->symbol);
0126     rcu_read_unlock_sched();
0127     *p = ':';
0128 
0129     return ret;
0130 }
0131 
0132 static bool trace_kprobe_is_busy(struct dyn_event *ev)
0133 {
0134     struct trace_kprobe *tk = to_trace_kprobe(ev);
0135 
0136     return trace_probe_is_enabled(&tk->tp);
0137 }
0138 
0139 static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
0140                         int argc, const char **argv)
0141 {
0142     char buf[MAX_ARGSTR_LEN + 1];
0143 
0144     if (!argc)
0145         return true;
0146 
0147     if (!tk->symbol)
0148         snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
0149     else if (tk->rp.kp.offset)
0150         snprintf(buf, sizeof(buf), "%s+%u",
0151              trace_kprobe_symbol(tk), tk->rp.kp.offset);
0152     else
0153         snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
0154     if (strcmp(buf, argv[0]))
0155         return false;
0156     argc--; argv++;
0157 
0158     return trace_probe_match_command_args(&tk->tp, argc, argv);
0159 }
0160 
0161 static bool trace_kprobe_match(const char *system, const char *event,
0162             int argc, const char **argv, struct dyn_event *ev)
0163 {
0164     struct trace_kprobe *tk = to_trace_kprobe(ev);
0165 
0166     return (event[0] == '\0' ||
0167         strcmp(trace_probe_name(&tk->tp), event) == 0) &&
0168         (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
0169         trace_kprobe_match_command_head(tk, argc, argv);
0170 }
0171 
0172 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
0173 {
0174     unsigned long nhit = 0;
0175     int cpu;
0176 
0177     for_each_possible_cpu(cpu)
0178         nhit += *per_cpu_ptr(tk->nhit, cpu);
0179 
0180     return nhit;
0181 }
0182 
0183 static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
0184 {
0185     return !(list_empty(&tk->rp.kp.list) &&
0186          hlist_unhashed(&tk->rp.kp.hlist));
0187 }
0188 
0189 /* Return 0 if it fails to find the symbol address */
0190 static nokprobe_inline
0191 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
0192 {
0193     unsigned long addr;
0194 
0195     if (tk->symbol) {
0196         addr = (unsigned long)
0197             kallsyms_lookup_name(trace_kprobe_symbol(tk));
0198         if (addr)
0199             addr += tk->rp.kp.offset;
0200     } else {
0201         addr = (unsigned long)tk->rp.kp.addr;
0202     }
0203     return addr;
0204 }
0205 
0206 static nokprobe_inline struct trace_kprobe *
0207 trace_kprobe_primary_from_call(struct trace_event_call *call)
0208 {
0209     struct trace_probe *tp;
0210 
0211     tp = trace_probe_primary_from_call(call);
0212     if (WARN_ON_ONCE(!tp))
0213         return NULL;
0214 
0215     return container_of(tp, struct trace_kprobe, tp);
0216 }
0217 
0218 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
0219 {
0220     struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
0221 
0222     return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
0223             tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
0224             tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
0225 }
0226 
0227 bool trace_kprobe_error_injectable(struct trace_event_call *call)
0228 {
0229     struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
0230 
0231     return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
0232            false;
0233 }
0234 
0235 static int register_kprobe_event(struct trace_kprobe *tk);
0236 static int unregister_kprobe_event(struct trace_kprobe *tk);
0237 
0238 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
0239 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
0240                 struct pt_regs *regs);
0241 
0242 static void free_trace_kprobe(struct trace_kprobe *tk)
0243 {
0244     if (tk) {
0245         trace_probe_cleanup(&tk->tp);
0246         kfree(tk->symbol);
0247         free_percpu(tk->nhit);
0248         kfree(tk);
0249     }
0250 }
0251 
0252 /*
0253  * Allocate new trace_probe and initialize it (including kprobes).
0254  */
0255 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
0256                          const char *event,
0257                          void *addr,
0258                          const char *symbol,
0259                          unsigned long offs,
0260                          int maxactive,
0261                          int nargs, bool is_return)
0262 {
0263     struct trace_kprobe *tk;
0264     int ret = -ENOMEM;
0265 
0266     tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL);
0267     if (!tk)
0268         return ERR_PTR(ret);
0269 
0270     tk->nhit = alloc_percpu(unsigned long);
0271     if (!tk->nhit)
0272         goto error;
0273 
0274     if (symbol) {
0275         tk->symbol = kstrdup(symbol, GFP_KERNEL);
0276         if (!tk->symbol)
0277             goto error;
0278         tk->rp.kp.symbol_name = tk->symbol;
0279         tk->rp.kp.offset = offs;
0280     } else
0281         tk->rp.kp.addr = addr;
0282 
0283     if (is_return)
0284         tk->rp.handler = kretprobe_dispatcher;
0285     else
0286         tk->rp.kp.pre_handler = kprobe_dispatcher;
0287 
0288     tk->rp.maxactive = maxactive;
0289     INIT_HLIST_NODE(&tk->rp.kp.hlist);
0290     INIT_LIST_HEAD(&tk->rp.kp.list);
0291 
0292     ret = trace_probe_init(&tk->tp, event, group, false);
0293     if (ret < 0)
0294         goto error;
0295 
0296     dyn_event_init(&tk->devent, &trace_kprobe_ops);
0297     return tk;
0298 error:
0299     free_trace_kprobe(tk);
0300     return ERR_PTR(ret);
0301 }
0302 
0303 static struct trace_kprobe *find_trace_kprobe(const char *event,
0304                           const char *group)
0305 {
0306     struct dyn_event *pos;
0307     struct trace_kprobe *tk;
0308 
0309     for_each_trace_kprobe(tk, pos)
0310         if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
0311             strcmp(trace_probe_group_name(&tk->tp), group) == 0)
0312             return tk;
0313     return NULL;
0314 }
0315 
0316 static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
0317 {
0318     int ret = 0;
0319 
0320     if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
0321         if (trace_kprobe_is_return(tk))
0322             ret = enable_kretprobe(&tk->rp);
0323         else
0324             ret = enable_kprobe(&tk->rp.kp);
0325     }
0326 
0327     return ret;
0328 }
0329 
0330 static void __disable_trace_kprobe(struct trace_probe *tp)
0331 {
0332     struct trace_kprobe *tk;
0333 
0334     list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
0335         if (!trace_kprobe_is_registered(tk))
0336             continue;
0337         if (trace_kprobe_is_return(tk))
0338             disable_kretprobe(&tk->rp);
0339         else
0340             disable_kprobe(&tk->rp.kp);
0341     }
0342 }
0343 
0344 /*
0345  * Enable trace_probe
0346  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
0347  */
0348 static int enable_trace_kprobe(struct trace_event_call *call,
0349                 struct trace_event_file *file)
0350 {
0351     struct trace_probe *tp;
0352     struct trace_kprobe *tk;
0353     bool enabled;
0354     int ret = 0;
0355 
0356     tp = trace_probe_primary_from_call(call);
0357     if (WARN_ON_ONCE(!tp))
0358         return -ENODEV;
0359     enabled = trace_probe_is_enabled(tp);
0360 
0361     /* This also changes "enabled" state */
0362     if (file) {
0363         ret = trace_probe_add_file(tp, file);
0364         if (ret)
0365             return ret;
0366     } else
0367         trace_probe_set_flag(tp, TP_FLAG_PROFILE);
0368 
0369     if (enabled)
0370         return 0;
0371 
0372     list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
0373         if (trace_kprobe_has_gone(tk))
0374             continue;
0375         ret = __enable_trace_kprobe(tk);
0376         if (ret)
0377             break;
0378         enabled = true;
0379     }
0380 
0381     if (ret) {
0382         /* Failed to enable one of them. Roll back all */
0383         if (enabled)
0384             __disable_trace_kprobe(tp);
0385         if (file)
0386             trace_probe_remove_file(tp, file);
0387         else
0388             trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
0389     }
0390 
0391     return ret;
0392 }
0393 
0394 /*
0395  * Disable trace_probe
0396  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
0397  */
0398 static int disable_trace_kprobe(struct trace_event_call *call,
0399                 struct trace_event_file *file)
0400 {
0401     struct trace_probe *tp;
0402 
0403     tp = trace_probe_primary_from_call(call);
0404     if (WARN_ON_ONCE(!tp))
0405         return -ENODEV;
0406 
0407     if (file) {
0408         if (!trace_probe_get_file_link(tp, file))
0409             return -ENOENT;
0410         if (!trace_probe_has_single_file(tp))
0411             goto out;
0412         trace_probe_clear_flag(tp, TP_FLAG_TRACE);
0413     } else
0414         trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
0415 
0416     if (!trace_probe_is_enabled(tp))
0417         __disable_trace_kprobe(tp);
0418 
0419  out:
0420     if (file)
0421         /*
0422          * Synchronization is done in below function. For perf event,
0423          * file == NULL and perf_trace_event_unreg() calls
0424          * tracepoint_synchronize_unregister() to ensure synchronize
0425          * event. We don't need to care about it.
0426          */
0427         trace_probe_remove_file(tp, file);
0428 
0429     return 0;
0430 }
0431 
0432 #if defined(CONFIG_DYNAMIC_FTRACE) && \
0433     !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
0434 static bool __within_notrace_func(unsigned long addr)
0435 {
0436     unsigned long offset, size;
0437 
0438     if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
0439         return false;
0440 
0441     /* Get the entry address of the target function */
0442     addr -= offset;
0443 
0444     /*
0445      * Since ftrace_location_range() does inclusive range check, we need
0446      * to subtract 1 byte from the end address.
0447      */
0448     return !ftrace_location_range(addr, addr + size - 1);
0449 }
0450 
0451 static bool within_notrace_func(struct trace_kprobe *tk)
0452 {
0453     unsigned long addr = trace_kprobe_address(tk);
0454     char symname[KSYM_NAME_LEN], *p;
0455 
0456     if (!__within_notrace_func(addr))
0457         return false;
0458 
0459     /* Check if the address is on a suffixed-symbol */
0460     if (!lookup_symbol_name(addr, symname)) {
0461         p = strchr(symname, '.');
0462         if (!p)
0463             return true;
0464         *p = '\0';
0465         addr = (unsigned long)kprobe_lookup_name(symname, 0);
0466         if (addr)
0467             return __within_notrace_func(addr);
0468     }
0469 
0470     return true;
0471 }
0472 #else
0473 #define within_notrace_func(tk) (false)
0474 #endif
0475 
0476 /* Internal register function - just handle k*probes and flags */
0477 static int __register_trace_kprobe(struct trace_kprobe *tk)
0478 {
0479     int i, ret;
0480 
0481     ret = security_locked_down(LOCKDOWN_KPROBES);
0482     if (ret)
0483         return ret;
0484 
0485     if (trace_kprobe_is_registered(tk))
0486         return -EINVAL;
0487 
0488     if (within_notrace_func(tk)) {
0489         pr_warn("Could not probe notrace function %s\n",
0490             trace_kprobe_symbol(tk));
0491         return -EINVAL;
0492     }
0493 
0494     for (i = 0; i < tk->tp.nr_args; i++) {
0495         ret = traceprobe_update_arg(&tk->tp.args[i]);
0496         if (ret)
0497             return ret;
0498     }
0499 
0500     /* Set/clear disabled flag according to tp->flag */
0501     if (trace_probe_is_enabled(&tk->tp))
0502         tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
0503     else
0504         tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
0505 
0506     if (trace_kprobe_is_return(tk))
0507         ret = register_kretprobe(&tk->rp);
0508     else
0509         ret = register_kprobe(&tk->rp.kp);
0510 
0511     return ret;
0512 }
0513 
0514 /* Internal unregister function - just handle k*probes and flags */
0515 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
0516 {
0517     if (trace_kprobe_is_registered(tk)) {
0518         if (trace_kprobe_is_return(tk))
0519             unregister_kretprobe(&tk->rp);
0520         else
0521             unregister_kprobe(&tk->rp.kp);
0522         /* Cleanup kprobe for reuse and mark it unregistered */
0523         INIT_HLIST_NODE(&tk->rp.kp.hlist);
0524         INIT_LIST_HEAD(&tk->rp.kp.list);
0525         if (tk->rp.kp.symbol_name)
0526             tk->rp.kp.addr = NULL;
0527     }
0528 }
0529 
0530 /* Unregister a trace_probe and probe_event */
0531 static int unregister_trace_kprobe(struct trace_kprobe *tk)
0532 {
0533     /* If other probes are on the event, just unregister kprobe */
0534     if (trace_probe_has_sibling(&tk->tp))
0535         goto unreg;
0536 
0537     /* Enabled event can not be unregistered */
0538     if (trace_probe_is_enabled(&tk->tp))
0539         return -EBUSY;
0540 
0541     /* If there's a reference to the dynamic event */
0542     if (trace_event_dyn_busy(trace_probe_event_call(&tk->tp)))
0543         return -EBUSY;
0544 
0545     /* Will fail if probe is being used by ftrace or perf */
0546     if (unregister_kprobe_event(tk))
0547         return -EBUSY;
0548 
0549 unreg:
0550     __unregister_trace_kprobe(tk);
0551     dyn_event_remove(&tk->devent);
0552     trace_probe_unlink(&tk->tp);
0553 
0554     return 0;
0555 }
0556 
0557 static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
0558                      struct trace_kprobe *comp)
0559 {
0560     struct trace_probe_event *tpe = orig->tp.event;
0561     int i;
0562 
0563     list_for_each_entry(orig, &tpe->probes, tp.list) {
0564         if (strcmp(trace_kprobe_symbol(orig),
0565                trace_kprobe_symbol(comp)) ||
0566             trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
0567             continue;
0568 
0569         /*
0570          * trace_probe_compare_arg_type() ensured that nr_args and
0571          * each argument name and type are same. Let's compare comm.
0572          */
0573         for (i = 0; i < orig->tp.nr_args; i++) {
0574             if (strcmp(orig->tp.args[i].comm,
0575                    comp->tp.args[i].comm))
0576                 break;
0577         }
0578 
0579         if (i == orig->tp.nr_args)
0580             return true;
0581     }
0582 
0583     return false;
0584 }
0585 
0586 static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
0587 {
0588     int ret;
0589 
0590     ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
0591     if (ret) {
0592         /* Note that argument starts index = 2 */
0593         trace_probe_log_set_index(ret + 1);
0594         trace_probe_log_err(0, DIFF_ARG_TYPE);
0595         return -EEXIST;
0596     }
0597     if (trace_kprobe_has_same_kprobe(to, tk)) {
0598         trace_probe_log_set_index(0);
0599         trace_probe_log_err(0, SAME_PROBE);
0600         return -EEXIST;
0601     }
0602 
0603     /* Append to existing event */
0604     ret = trace_probe_append(&tk->tp, &to->tp);
0605     if (ret)
0606         return ret;
0607 
0608     /* Register k*probe */
0609     ret = __register_trace_kprobe(tk);
0610     if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
0611         pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
0612         ret = 0;
0613     }
0614 
0615     if (ret)
0616         trace_probe_unlink(&tk->tp);
0617     else
0618         dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
0619 
0620     return ret;
0621 }
0622 
0623 /* Register a trace_probe and probe_event */
0624 static int register_trace_kprobe(struct trace_kprobe *tk)
0625 {
0626     struct trace_kprobe *old_tk;
0627     int ret;
0628 
0629     mutex_lock(&event_mutex);
0630 
0631     old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
0632                    trace_probe_group_name(&tk->tp));
0633     if (old_tk) {
0634         if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
0635             trace_probe_log_set_index(0);
0636             trace_probe_log_err(0, DIFF_PROBE_TYPE);
0637             ret = -EEXIST;
0638         } else {
0639             ret = append_trace_kprobe(tk, old_tk);
0640         }
0641         goto end;
0642     }
0643 
0644     /* Register new event */
0645     ret = register_kprobe_event(tk);
0646     if (ret) {
0647         if (ret == -EEXIST) {
0648             trace_probe_log_set_index(0);
0649             trace_probe_log_err(0, EVENT_EXIST);
0650         } else
0651             pr_warn("Failed to register probe event(%d)\n", ret);
0652         goto end;
0653     }
0654 
0655     /* Register k*probe */
0656     ret = __register_trace_kprobe(tk);
0657     if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
0658         pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
0659         ret = 0;
0660     }
0661 
0662     if (ret < 0)
0663         unregister_kprobe_event(tk);
0664     else
0665         dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
0666 
0667 end:
0668     mutex_unlock(&event_mutex);
0669     return ret;
0670 }
0671 
0672 /* Module notifier call back, checking event on the module */
0673 static int trace_kprobe_module_callback(struct notifier_block *nb,
0674                        unsigned long val, void *data)
0675 {
0676     struct module *mod = data;
0677     struct dyn_event *pos;
0678     struct trace_kprobe *tk;
0679     int ret;
0680 
0681     if (val != MODULE_STATE_COMING)
0682         return NOTIFY_DONE;
0683 
0684     /* Update probes on coming module */
0685     mutex_lock(&event_mutex);
0686     for_each_trace_kprobe(tk, pos) {
0687         if (trace_kprobe_within_module(tk, mod)) {
0688             /* Don't need to check busy - this should have gone. */
0689             __unregister_trace_kprobe(tk);
0690             ret = __register_trace_kprobe(tk);
0691             if (ret)
0692                 pr_warn("Failed to re-register probe %s on %s: %d\n",
0693                     trace_probe_name(&tk->tp),
0694                     module_name(mod), ret);
0695         }
0696     }
0697     mutex_unlock(&event_mutex);
0698 
0699     return NOTIFY_DONE;
0700 }
0701 
0702 static struct notifier_block trace_kprobe_module_nb = {
0703     .notifier_call = trace_kprobe_module_callback,
0704     .priority = 1   /* Invoked after kprobe module callback */
0705 };
0706 
0707 static int __trace_kprobe_create(int argc, const char *argv[])
0708 {
0709     /*
0710      * Argument syntax:
0711      *  - Add kprobe:
0712      *      p[:[GRP/][EVENT]] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
0713      *  - Add kretprobe:
0714      *      r[MAXACTIVE][:[GRP/][EVENT]] [MOD:]KSYM[+0] [FETCHARGS]
0715      *    Or
0716      *      p[:[GRP/][EVENT]] [MOD:]KSYM[+0]%return [FETCHARGS]
0717      *
0718      * Fetch args:
0719      *  $retval : fetch return value
0720      *  $stack  : fetch stack address
0721      *  $stackN : fetch Nth of stack (N:0-)
0722      *  $comm       : fetch current task comm
0723      *  @ADDR   : fetch memory at ADDR (ADDR should be in kernel)
0724      *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
0725      *  %REG    : fetch register REG
0726      * Dereferencing memory fetch:
0727      *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
0728      * Alias name of args:
0729      *  NAME=FETCHARG : set NAME as alias of FETCHARG.
0730      * Type of args:
0731      *  FETCHARG:TYPE : use TYPE instead of unsigned long.
0732      */
0733     struct trace_kprobe *tk = NULL;
0734     int i, len, ret = 0;
0735     bool is_return = false;
0736     char *symbol = NULL, *tmp = NULL;
0737     const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
0738     enum probe_print_type ptype;
0739     int maxactive = 0;
0740     long offset = 0;
0741     void *addr = NULL;
0742     char buf[MAX_EVENT_NAME_LEN];
0743     char gbuf[MAX_EVENT_NAME_LEN];
0744     unsigned int flags = TPARG_FL_KERNEL;
0745 
0746     switch (argv[0][0]) {
0747     case 'r':
0748         is_return = true;
0749         break;
0750     case 'p':
0751         break;
0752     default:
0753         return -ECANCELED;
0754     }
0755     if (argc < 2)
0756         return -ECANCELED;
0757 
0758     trace_probe_log_init("trace_kprobe", argc, argv);
0759 
0760     event = strchr(&argv[0][1], ':');
0761     if (event)
0762         event++;
0763 
0764     if (isdigit(argv[0][1])) {
0765         if (!is_return) {
0766             trace_probe_log_err(1, MAXACT_NO_KPROBE);
0767             goto parse_error;
0768         }
0769         if (event)
0770             len = event - &argv[0][1] - 1;
0771         else
0772             len = strlen(&argv[0][1]);
0773         if (len > MAX_EVENT_NAME_LEN - 1) {
0774             trace_probe_log_err(1, BAD_MAXACT);
0775             goto parse_error;
0776         }
0777         memcpy(buf, &argv[0][1], len);
0778         buf[len] = '\0';
0779         ret = kstrtouint(buf, 0, &maxactive);
0780         if (ret || !maxactive) {
0781             trace_probe_log_err(1, BAD_MAXACT);
0782             goto parse_error;
0783         }
0784         /* kretprobes instances are iterated over via a list. The
0785          * maximum should stay reasonable.
0786          */
0787         if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
0788             trace_probe_log_err(1, MAXACT_TOO_BIG);
0789             goto parse_error;
0790         }
0791     }
0792 
0793     /* try to parse an address. if that fails, try to read the
0794      * input as a symbol. */
0795     if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
0796         trace_probe_log_set_index(1);
0797         /* Check whether uprobe event specified */
0798         if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
0799             ret = -ECANCELED;
0800             goto error;
0801         }
0802         /* a symbol specified */
0803         symbol = kstrdup(argv[1], GFP_KERNEL);
0804         if (!symbol)
0805             return -ENOMEM;
0806 
0807         tmp = strchr(symbol, '%');
0808         if (tmp) {
0809             if (!strcmp(tmp, "%return")) {
0810                 *tmp = '\0';
0811                 is_return = true;
0812             } else {
0813                 trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
0814                 goto parse_error;
0815             }
0816         }
0817 
0818         /* TODO: support .init module functions */
0819         ret = traceprobe_split_symbol_offset(symbol, &offset);
0820         if (ret || offset < 0 || offset > UINT_MAX) {
0821             trace_probe_log_err(0, BAD_PROBE_ADDR);
0822             goto parse_error;
0823         }
0824         if (is_return)
0825             flags |= TPARG_FL_RETURN;
0826         ret = kprobe_on_func_entry(NULL, symbol, offset);
0827         if (ret == 0)
0828             flags |= TPARG_FL_FENTRY;
0829         /* Defer the ENOENT case until register kprobe */
0830         if (ret == -EINVAL && is_return) {
0831             trace_probe_log_err(0, BAD_RETPROBE);
0832             goto parse_error;
0833         }
0834     }
0835 
0836     trace_probe_log_set_index(0);
0837     if (event) {
0838         ret = traceprobe_parse_event_name(&event, &group, gbuf,
0839                           event - argv[0]);
0840         if (ret)
0841             goto parse_error;
0842     }
0843 
0844     if (!event) {
0845         /* Make a new event name */
0846         if (symbol)
0847             snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
0848                  is_return ? 'r' : 'p', symbol, offset);
0849         else
0850             snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
0851                  is_return ? 'r' : 'p', addr);
0852         sanitize_event_name(buf);
0853         event = buf;
0854     }
0855 
0856     /* setup a probe */
0857     tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
0858                    argc - 2, is_return);
0859     if (IS_ERR(tk)) {
0860         ret = PTR_ERR(tk);
0861         /* This must return -ENOMEM, else there is a bug */
0862         WARN_ON_ONCE(ret != -ENOMEM);
0863         goto out;   /* We know tk is not allocated */
0864     }
0865     argc -= 2; argv += 2;
0866 
0867     /* parse arguments */
0868     for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
0869         trace_probe_log_set_index(i + 2);
0870         ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], flags);
0871         if (ret)
0872             goto error; /* This can be -ENOMEM */
0873     }
0874 
0875     ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
0876     ret = traceprobe_set_print_fmt(&tk->tp, ptype);
0877     if (ret < 0)
0878         goto error;
0879 
0880     ret = register_trace_kprobe(tk);
0881     if (ret) {
0882         trace_probe_log_set_index(1);
0883         if (ret == -EILSEQ)
0884             trace_probe_log_err(0, BAD_INSN_BNDRY);
0885         else if (ret == -ENOENT)
0886             trace_probe_log_err(0, BAD_PROBE_ADDR);
0887         else if (ret != -ENOMEM && ret != -EEXIST)
0888             trace_probe_log_err(0, FAIL_REG_PROBE);
0889         goto error;
0890     }
0891 
0892 out:
0893     trace_probe_log_clear();
0894     kfree(symbol);
0895     return ret;
0896 
0897 parse_error:
0898     ret = -EINVAL;
0899 error:
0900     free_trace_kprobe(tk);
0901     goto out;
0902 }
0903 
0904 static int trace_kprobe_create(const char *raw_command)
0905 {
0906     return trace_probe_create(raw_command, __trace_kprobe_create);
0907 }
0908 
0909 static int create_or_delete_trace_kprobe(const char *raw_command)
0910 {
0911     int ret;
0912 
0913     if (raw_command[0] == '-')
0914         return dyn_event_release(raw_command, &trace_kprobe_ops);
0915 
0916     ret = trace_kprobe_create(raw_command);
0917     return ret == -ECANCELED ? -EINVAL : ret;
0918 }
0919 
0920 static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
0921 {
0922     return create_or_delete_trace_kprobe(cmd->seq.buffer);
0923 }
0924 
0925 /**
0926  * kprobe_event_cmd_init - Initialize a kprobe event command object
0927  * @cmd: A pointer to the dynevent_cmd struct representing the new event
0928  * @buf: A pointer to the buffer used to build the command
0929  * @maxlen: The length of the buffer passed in @buf
0930  *
0931  * Initialize a synthetic event command object.  Use this before
0932  * calling any of the other kprobe_event functions.
0933  */
0934 void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
0935 {
0936     dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
0937               trace_kprobe_run_command);
0938 }
0939 EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
0940 
0941 /**
0942  * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
0943  * @cmd: A pointer to the dynevent_cmd struct representing the new event
0944  * @name: The name of the kprobe event
0945  * @loc: The location of the kprobe event
0946  * @kretprobe: Is this a return probe?
0947  * @args: Variable number of arg (pairs), one pair for each field
0948  *
0949  * NOTE: Users normally won't want to call this function directly, but
0950  * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
0951  * adds a NULL to the end of the arg list.  If this function is used
0952  * directly, make sure the last arg in the variable arg list is NULL.
0953  *
0954  * Generate a kprobe event command to be executed by
0955  * kprobe_event_gen_cmd_end().  This function can be used to generate the
0956  * complete command or only the first part of it; in the latter case,
0957  * kprobe_event_add_fields() can be used to add more fields following this.
0958  *
0959  * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
0960  * returns -EINVAL if @loc == NULL.
0961  *
0962  * Return: 0 if successful, error otherwise.
0963  */
0964 int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
0965                  const char *name, const char *loc, ...)
0966 {
0967     char buf[MAX_EVENT_NAME_LEN];
0968     struct dynevent_arg arg;
0969     va_list args;
0970     int ret;
0971 
0972     if (cmd->type != DYNEVENT_TYPE_KPROBE)
0973         return -EINVAL;
0974 
0975     if (!loc)
0976         return -EINVAL;
0977 
0978     if (kretprobe)
0979         snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
0980     else
0981         snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
0982 
0983     ret = dynevent_str_add(cmd, buf);
0984     if (ret)
0985         return ret;
0986 
0987     dynevent_arg_init(&arg, 0);
0988     arg.str = loc;
0989     ret = dynevent_arg_add(cmd, &arg, NULL);
0990     if (ret)
0991         return ret;
0992 
0993     va_start(args, loc);
0994     for (;;) {
0995         const char *field;
0996 
0997         field = va_arg(args, const char *);
0998         if (!field)
0999             break;
1000 
1001         if (++cmd->n_fields > MAX_TRACE_ARGS) {
1002             ret = -EINVAL;
1003             break;
1004         }
1005 
1006         arg.str = field;
1007         ret = dynevent_arg_add(cmd, &arg, NULL);
1008         if (ret)
1009             break;
1010     }
1011     va_end(args);
1012 
1013     return ret;
1014 }
1015 EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
1016 
1017 /**
1018  * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
1019  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1020  * @args: Variable number of arg (pairs), one pair for each field
1021  *
1022  * NOTE: Users normally won't want to call this function directly, but
1023  * rather use the kprobe_event_add_fields() wrapper, which
1024  * automatically adds a NULL to the end of the arg list.  If this
1025  * function is used directly, make sure the last arg in the variable
1026  * arg list is NULL.
1027  *
1028  * Add probe fields to an existing kprobe command using a variable
1029  * list of args.  Fields are added in the same order they're listed.
1030  *
1031  * Return: 0 if successful, error otherwise.
1032  */
1033 int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
1034 {
1035     struct dynevent_arg arg;
1036     va_list args;
1037     int ret = 0;
1038 
1039     if (cmd->type != DYNEVENT_TYPE_KPROBE)
1040         return -EINVAL;
1041 
1042     dynevent_arg_init(&arg, 0);
1043 
1044     va_start(args, cmd);
1045     for (;;) {
1046         const char *field;
1047 
1048         field = va_arg(args, const char *);
1049         if (!field)
1050             break;
1051 
1052         if (++cmd->n_fields > MAX_TRACE_ARGS) {
1053             ret = -EINVAL;
1054             break;
1055         }
1056 
1057         arg.str = field;
1058         ret = dynevent_arg_add(cmd, &arg, NULL);
1059         if (ret)
1060             break;
1061     }
1062     va_end(args);
1063 
1064     return ret;
1065 }
1066 EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
1067 
1068 /**
1069  * kprobe_event_delete - Delete a kprobe event
1070  * @name: The name of the kprobe event to delete
1071  *
1072  * Delete a kprobe event with the give @name from kernel code rather
1073  * than directly from the command line.
1074  *
1075  * Return: 0 if successful, error otherwise.
1076  */
1077 int kprobe_event_delete(const char *name)
1078 {
1079     char buf[MAX_EVENT_NAME_LEN];
1080 
1081     snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
1082 
1083     return create_or_delete_trace_kprobe(buf);
1084 }
1085 EXPORT_SYMBOL_GPL(kprobe_event_delete);
1086 
1087 static int trace_kprobe_release(struct dyn_event *ev)
1088 {
1089     struct trace_kprobe *tk = to_trace_kprobe(ev);
1090     int ret = unregister_trace_kprobe(tk);
1091 
1092     if (!ret)
1093         free_trace_kprobe(tk);
1094     return ret;
1095 }
1096 
1097 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
1098 {
1099     struct trace_kprobe *tk = to_trace_kprobe(ev);
1100     int i;
1101 
1102     seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
1103     if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
1104         seq_printf(m, "%d", tk->rp.maxactive);
1105     seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
1106                 trace_probe_name(&tk->tp));
1107 
1108     if (!tk->symbol)
1109         seq_printf(m, " 0x%p", tk->rp.kp.addr);
1110     else if (tk->rp.kp.offset)
1111         seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
1112                tk->rp.kp.offset);
1113     else
1114         seq_printf(m, " %s", trace_kprobe_symbol(tk));
1115 
1116     for (i = 0; i < tk->tp.nr_args; i++)
1117         seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
1118     seq_putc(m, '\n');
1119 
1120     return 0;
1121 }
1122 
1123 static int probes_seq_show(struct seq_file *m, void *v)
1124 {
1125     struct dyn_event *ev = v;
1126 
1127     if (!is_trace_kprobe(ev))
1128         return 0;
1129 
1130     return trace_kprobe_show(m, ev);
1131 }
1132 
1133 static const struct seq_operations probes_seq_op = {
1134     .start  = dyn_event_seq_start,
1135     .next   = dyn_event_seq_next,
1136     .stop   = dyn_event_seq_stop,
1137     .show   = probes_seq_show
1138 };
1139 
1140 static int probes_open(struct inode *inode, struct file *file)
1141 {
1142     int ret;
1143 
1144     ret = security_locked_down(LOCKDOWN_TRACEFS);
1145     if (ret)
1146         return ret;
1147 
1148     if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1149         ret = dyn_events_release_all(&trace_kprobe_ops);
1150         if (ret < 0)
1151             return ret;
1152     }
1153 
1154     return seq_open(file, &probes_seq_op);
1155 }
1156 
1157 static ssize_t probes_write(struct file *file, const char __user *buffer,
1158                 size_t count, loff_t *ppos)
1159 {
1160     return trace_parse_run_command(file, buffer, count, ppos,
1161                        create_or_delete_trace_kprobe);
1162 }
1163 
1164 static const struct file_operations kprobe_events_ops = {
1165     .owner          = THIS_MODULE,
1166     .open           = probes_open,
1167     .read           = seq_read,
1168     .llseek         = seq_lseek,
1169     .release        = seq_release,
1170     .write      = probes_write,
1171 };
1172 
1173 /* Probes profiling interfaces */
1174 static int probes_profile_seq_show(struct seq_file *m, void *v)
1175 {
1176     struct dyn_event *ev = v;
1177     struct trace_kprobe *tk;
1178     unsigned long nmissed;
1179 
1180     if (!is_trace_kprobe(ev))
1181         return 0;
1182 
1183     tk = to_trace_kprobe(ev);
1184     nmissed = trace_kprobe_is_return(tk) ?
1185         tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed;
1186     seq_printf(m, "  %-44s %15lu %15lu\n",
1187            trace_probe_name(&tk->tp),
1188            trace_kprobe_nhit(tk),
1189            nmissed);
1190 
1191     return 0;
1192 }
1193 
1194 static const struct seq_operations profile_seq_op = {
1195     .start  = dyn_event_seq_start,
1196     .next   = dyn_event_seq_next,
1197     .stop   = dyn_event_seq_stop,
1198     .show   = probes_profile_seq_show
1199 };
1200 
1201 static int profile_open(struct inode *inode, struct file *file)
1202 {
1203     int ret;
1204 
1205     ret = security_locked_down(LOCKDOWN_TRACEFS);
1206     if (ret)
1207         return ret;
1208 
1209     return seq_open(file, &profile_seq_op);
1210 }
1211 
1212 static const struct file_operations kprobe_profile_ops = {
1213     .owner          = THIS_MODULE,
1214     .open           = profile_open,
1215     .read           = seq_read,
1216     .llseek         = seq_lseek,
1217     .release        = seq_release,
1218 };
1219 
1220 /* Kprobe specific fetch functions */
1221 
1222 /* Return the length of string -- including null terminal byte */
1223 static nokprobe_inline int
1224 fetch_store_strlen_user(unsigned long addr)
1225 {
1226     const void __user *uaddr =  (__force const void __user *)addr;
1227 
1228     return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
1229 }
1230 
1231 /* Return the length of string -- including null terminal byte */
1232 static nokprobe_inline int
1233 fetch_store_strlen(unsigned long addr)
1234 {
1235     int ret, len = 0;
1236     u8 c;
1237 
1238 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1239     if (addr < TASK_SIZE)
1240         return fetch_store_strlen_user(addr);
1241 #endif
1242 
1243     do {
1244         ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1);
1245         len++;
1246     } while (c && ret == 0 && len < MAX_STRING_SIZE);
1247 
1248     return (ret < 0) ? ret : len;
1249 }
1250 
1251 /*
1252  * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
1253  * with max length and relative data location.
1254  */
1255 static nokprobe_inline int
1256 fetch_store_string_user(unsigned long addr, void *dest, void *base)
1257 {
1258     const void __user *uaddr =  (__force const void __user *)addr;
1259     int maxlen = get_loc_len(*(u32 *)dest);
1260     void *__dest;
1261     long ret;
1262 
1263     if (unlikely(!maxlen))
1264         return -ENOMEM;
1265 
1266     __dest = get_loc_data(dest, base);
1267 
1268     ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
1269     if (ret >= 0)
1270         *(u32 *)dest = make_data_loc(ret, __dest - base);
1271 
1272     return ret;
1273 }
1274 
1275 /*
1276  * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
1277  * length and relative data location.
1278  */
1279 static nokprobe_inline int
1280 fetch_store_string(unsigned long addr, void *dest, void *base)
1281 {
1282     int maxlen = get_loc_len(*(u32 *)dest);
1283     void *__dest;
1284     long ret;
1285 
1286 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1287     if ((unsigned long)addr < TASK_SIZE)
1288         return fetch_store_string_user(addr, dest, base);
1289 #endif
1290 
1291     if (unlikely(!maxlen))
1292         return -ENOMEM;
1293 
1294     __dest = get_loc_data(dest, base);
1295 
1296     /*
1297      * Try to get string again, since the string can be changed while
1298      * probing.
1299      */
1300     ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
1301     if (ret >= 0)
1302         *(u32 *)dest = make_data_loc(ret, __dest - base);
1303 
1304     return ret;
1305 }
1306 
1307 static nokprobe_inline int
1308 probe_mem_read_user(void *dest, void *src, size_t size)
1309 {
1310     const void __user *uaddr =  (__force const void __user *)src;
1311 
1312     return copy_from_user_nofault(dest, uaddr, size);
1313 }
1314 
1315 static nokprobe_inline int
1316 probe_mem_read(void *dest, void *src, size_t size)
1317 {
1318 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1319     if ((unsigned long)src < TASK_SIZE)
1320         return probe_mem_read_user(dest, src, size);
1321 #endif
1322     return copy_from_kernel_nofault(dest, src, size);
1323 }
1324 
1325 /* Note that we don't verify it, since the code does not come from user space */
1326 static int
1327 process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
1328            void *base)
1329 {
1330     struct pt_regs *regs = rec;
1331     unsigned long val;
1332 
1333 retry:
1334     /* 1st stage: get value from context */
1335     switch (code->op) {
1336     case FETCH_OP_REG:
1337         val = regs_get_register(regs, code->param);
1338         break;
1339     case FETCH_OP_STACK:
1340         val = regs_get_kernel_stack_nth(regs, code->param);
1341         break;
1342     case FETCH_OP_STACKP:
1343         val = kernel_stack_pointer(regs);
1344         break;
1345     case FETCH_OP_RETVAL:
1346         val = regs_return_value(regs);
1347         break;
1348     case FETCH_OP_IMM:
1349         val = code->immediate;
1350         break;
1351     case FETCH_OP_COMM:
1352         val = (unsigned long)current->comm;
1353         break;
1354     case FETCH_OP_DATA:
1355         val = (unsigned long)code->data;
1356         break;
1357 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1358     case FETCH_OP_ARG:
1359         val = regs_get_kernel_argument(regs, code->param);
1360         break;
1361 #endif
1362     case FETCH_NOP_SYMBOL:  /* Ignore a place holder */
1363         code++;
1364         goto retry;
1365     default:
1366         return -EILSEQ;
1367     }
1368     code++;
1369 
1370     return process_fetch_insn_bottom(code, val, dest, base);
1371 }
1372 NOKPROBE_SYMBOL(process_fetch_insn)
1373 
1374 /* Kprobe handler */
1375 static nokprobe_inline void
1376 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1377             struct trace_event_file *trace_file)
1378 {
1379     struct kprobe_trace_entry_head *entry;
1380     struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1381     struct trace_event_buffer fbuffer;
1382     int dsize;
1383 
1384     WARN_ON(call != trace_file->event_call);
1385 
1386     if (trace_trigger_soft_disabled(trace_file))
1387         return;
1388 
1389     dsize = __get_data_size(&tk->tp, regs);
1390 
1391     entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1392                        sizeof(*entry) + tk->tp.size + dsize);
1393     if (!entry)
1394         return;
1395 
1396     fbuffer.regs = regs;
1397     entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1398     entry->ip = (unsigned long)tk->rp.kp.addr;
1399     store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1400 
1401     trace_event_buffer_commit(&fbuffer);
1402 }
1403 
1404 static void
1405 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1406 {
1407     struct event_file_link *link;
1408 
1409     trace_probe_for_each_link_rcu(link, &tk->tp)
1410         __kprobe_trace_func(tk, regs, link->file);
1411 }
1412 NOKPROBE_SYMBOL(kprobe_trace_func);
1413 
1414 /* Kretprobe handler */
1415 static nokprobe_inline void
1416 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1417                struct pt_regs *regs,
1418                struct trace_event_file *trace_file)
1419 {
1420     struct kretprobe_trace_entry_head *entry;
1421     struct trace_event_buffer fbuffer;
1422     struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1423     int dsize;
1424 
1425     WARN_ON(call != trace_file->event_call);
1426 
1427     if (trace_trigger_soft_disabled(trace_file))
1428         return;
1429 
1430     dsize = __get_data_size(&tk->tp, regs);
1431 
1432     entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1433                        sizeof(*entry) + tk->tp.size + dsize);
1434     if (!entry)
1435         return;
1436 
1437     fbuffer.regs = regs;
1438     entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1439     entry->func = (unsigned long)tk->rp.kp.addr;
1440     entry->ret_ip = get_kretprobe_retaddr(ri);
1441     store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1442 
1443     trace_event_buffer_commit(&fbuffer);
1444 }
1445 
1446 static void
1447 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1448              struct pt_regs *regs)
1449 {
1450     struct event_file_link *link;
1451 
1452     trace_probe_for_each_link_rcu(link, &tk->tp)
1453         __kretprobe_trace_func(tk, ri, regs, link->file);
1454 }
1455 NOKPROBE_SYMBOL(kretprobe_trace_func);
1456 
1457 /* Event entry printers */
1458 static enum print_line_t
1459 print_kprobe_event(struct trace_iterator *iter, int flags,
1460            struct trace_event *event)
1461 {
1462     struct kprobe_trace_entry_head *field;
1463     struct trace_seq *s = &iter->seq;
1464     struct trace_probe *tp;
1465 
1466     field = (struct kprobe_trace_entry_head *)iter->ent;
1467     tp = trace_probe_primary_from_call(
1468         container_of(event, struct trace_event_call, event));
1469     if (WARN_ON_ONCE(!tp))
1470         goto out;
1471 
1472     trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1473 
1474     if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1475         goto out;
1476 
1477     trace_seq_putc(s, ')');
1478 
1479     if (print_probe_args(s, tp->args, tp->nr_args,
1480                  (u8 *)&field[1], field) < 0)
1481         goto out;
1482 
1483     trace_seq_putc(s, '\n');
1484  out:
1485     return trace_handle_return(s);
1486 }
1487 
1488 static enum print_line_t
1489 print_kretprobe_event(struct trace_iterator *iter, int flags,
1490               struct trace_event *event)
1491 {
1492     struct kretprobe_trace_entry_head *field;
1493     struct trace_seq *s = &iter->seq;
1494     struct trace_probe *tp;
1495 
1496     field = (struct kretprobe_trace_entry_head *)iter->ent;
1497     tp = trace_probe_primary_from_call(
1498         container_of(event, struct trace_event_call, event));
1499     if (WARN_ON_ONCE(!tp))
1500         goto out;
1501 
1502     trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1503 
1504     if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1505         goto out;
1506 
1507     trace_seq_puts(s, " <- ");
1508 
1509     if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1510         goto out;
1511 
1512     trace_seq_putc(s, ')');
1513 
1514     if (print_probe_args(s, tp->args, tp->nr_args,
1515                  (u8 *)&field[1], field) < 0)
1516         goto out;
1517 
1518     trace_seq_putc(s, '\n');
1519 
1520  out:
1521     return trace_handle_return(s);
1522 }
1523 
1524 
1525 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1526 {
1527     int ret;
1528     struct kprobe_trace_entry_head field;
1529     struct trace_probe *tp;
1530 
1531     tp = trace_probe_primary_from_call(event_call);
1532     if (WARN_ON_ONCE(!tp))
1533         return -ENOENT;
1534 
1535     DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1536 
1537     return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1538 }
1539 
1540 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1541 {
1542     int ret;
1543     struct kretprobe_trace_entry_head field;
1544     struct trace_probe *tp;
1545 
1546     tp = trace_probe_primary_from_call(event_call);
1547     if (WARN_ON_ONCE(!tp))
1548         return -ENOENT;
1549 
1550     DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1551     DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1552 
1553     return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1554 }
1555 
1556 #ifdef CONFIG_PERF_EVENTS
1557 
1558 /* Kprobe profile handler */
1559 static int
1560 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1561 {
1562     struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1563     struct kprobe_trace_entry_head *entry;
1564     struct hlist_head *head;
1565     int size, __size, dsize;
1566     int rctx;
1567 
1568     if (bpf_prog_array_valid(call)) {
1569         unsigned long orig_ip = instruction_pointer(regs);
1570         int ret;
1571 
1572         ret = trace_call_bpf(call, regs);
1573 
1574         /*
1575          * We need to check and see if we modified the pc of the
1576          * pt_regs, and if so return 1 so that we don't do the
1577          * single stepping.
1578          */
1579         if (orig_ip != instruction_pointer(regs))
1580             return 1;
1581         if (!ret)
1582             return 0;
1583     }
1584 
1585     head = this_cpu_ptr(call->perf_events);
1586     if (hlist_empty(head))
1587         return 0;
1588 
1589     dsize = __get_data_size(&tk->tp, regs);
1590     __size = sizeof(*entry) + tk->tp.size + dsize;
1591     size = ALIGN(__size + sizeof(u32), sizeof(u64));
1592     size -= sizeof(u32);
1593 
1594     entry = perf_trace_buf_alloc(size, NULL, &rctx);
1595     if (!entry)
1596         return 0;
1597 
1598     entry->ip = (unsigned long)tk->rp.kp.addr;
1599     memset(&entry[1], 0, dsize);
1600     store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1601     perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1602                   head, NULL);
1603     return 0;
1604 }
1605 NOKPROBE_SYMBOL(kprobe_perf_func);
1606 
1607 /* Kretprobe profile handler */
1608 static void
1609 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1610             struct pt_regs *regs)
1611 {
1612     struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1613     struct kretprobe_trace_entry_head *entry;
1614     struct hlist_head *head;
1615     int size, __size, dsize;
1616     int rctx;
1617 
1618     if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1619         return;
1620 
1621     head = this_cpu_ptr(call->perf_events);
1622     if (hlist_empty(head))
1623         return;
1624 
1625     dsize = __get_data_size(&tk->tp, regs);
1626     __size = sizeof(*entry) + tk->tp.size + dsize;
1627     size = ALIGN(__size + sizeof(u32), sizeof(u64));
1628     size -= sizeof(u32);
1629 
1630     entry = perf_trace_buf_alloc(size, NULL, &rctx);
1631     if (!entry)
1632         return;
1633 
1634     entry->func = (unsigned long)tk->rp.kp.addr;
1635     entry->ret_ip = get_kretprobe_retaddr(ri);
1636     store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1637     perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1638                   head, NULL);
1639 }
1640 NOKPROBE_SYMBOL(kretprobe_perf_func);
1641 
1642 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1643             const char **symbol, u64 *probe_offset,
1644             u64 *probe_addr, bool perf_type_tracepoint)
1645 {
1646     const char *pevent = trace_event_name(event->tp_event);
1647     const char *group = event->tp_event->class->system;
1648     struct trace_kprobe *tk;
1649 
1650     if (perf_type_tracepoint)
1651         tk = find_trace_kprobe(pevent, group);
1652     else
1653         tk = trace_kprobe_primary_from_call(event->tp_event);
1654     if (!tk)
1655         return -EINVAL;
1656 
1657     *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1658                           : BPF_FD_TYPE_KPROBE;
1659     if (tk->symbol) {
1660         *symbol = tk->symbol;
1661         *probe_offset = tk->rp.kp.offset;
1662         *probe_addr = 0;
1663     } else {
1664         *symbol = NULL;
1665         *probe_offset = 0;
1666         *probe_addr = (unsigned long)tk->rp.kp.addr;
1667     }
1668     return 0;
1669 }
1670 #endif  /* CONFIG_PERF_EVENTS */
1671 
1672 /*
1673  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1674  *
1675  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1676  * lockless, but we can't race with this __init function.
1677  */
1678 static int kprobe_register(struct trace_event_call *event,
1679                enum trace_reg type, void *data)
1680 {
1681     struct trace_event_file *file = data;
1682 
1683     switch (type) {
1684     case TRACE_REG_REGISTER:
1685         return enable_trace_kprobe(event, file);
1686     case TRACE_REG_UNREGISTER:
1687         return disable_trace_kprobe(event, file);
1688 
1689 #ifdef CONFIG_PERF_EVENTS
1690     case TRACE_REG_PERF_REGISTER:
1691         return enable_trace_kprobe(event, NULL);
1692     case TRACE_REG_PERF_UNREGISTER:
1693         return disable_trace_kprobe(event, NULL);
1694     case TRACE_REG_PERF_OPEN:
1695     case TRACE_REG_PERF_CLOSE:
1696     case TRACE_REG_PERF_ADD:
1697     case TRACE_REG_PERF_DEL:
1698         return 0;
1699 #endif
1700     }
1701     return 0;
1702 }
1703 
1704 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1705 {
1706     struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1707     int ret = 0;
1708 
1709     raw_cpu_inc(*tk->nhit);
1710 
1711     if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1712         kprobe_trace_func(tk, regs);
1713 #ifdef CONFIG_PERF_EVENTS
1714     if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1715         ret = kprobe_perf_func(tk, regs);
1716 #endif
1717     return ret;
1718 }
1719 NOKPROBE_SYMBOL(kprobe_dispatcher);
1720 
1721 static int
1722 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1723 {
1724     struct kretprobe *rp = get_kretprobe(ri);
1725     struct trace_kprobe *tk;
1726 
1727     /*
1728      * There is a small chance that get_kretprobe(ri) returns NULL when
1729      * the kretprobe is unregister on another CPU between kretprobe's
1730      * trampoline_handler and this function.
1731      */
1732     if (unlikely(!rp))
1733         return 0;
1734 
1735     tk = container_of(rp, struct trace_kprobe, rp);
1736     raw_cpu_inc(*tk->nhit);
1737 
1738     if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1739         kretprobe_trace_func(tk, ri, regs);
1740 #ifdef CONFIG_PERF_EVENTS
1741     if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1742         kretprobe_perf_func(tk, ri, regs);
1743 #endif
1744     return 0;   /* We don't tweak kernel, so just return 0 */
1745 }
1746 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1747 
1748 static struct trace_event_functions kretprobe_funcs = {
1749     .trace      = print_kretprobe_event
1750 };
1751 
1752 static struct trace_event_functions kprobe_funcs = {
1753     .trace      = print_kprobe_event
1754 };
1755 
1756 static struct trace_event_fields kretprobe_fields_array[] = {
1757     { .type = TRACE_FUNCTION_TYPE,
1758       .define_fields = kretprobe_event_define_fields },
1759     {}
1760 };
1761 
1762 static struct trace_event_fields kprobe_fields_array[] = {
1763     { .type = TRACE_FUNCTION_TYPE,
1764       .define_fields = kprobe_event_define_fields },
1765     {}
1766 };
1767 
1768 static inline void init_trace_event_call(struct trace_kprobe *tk)
1769 {
1770     struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1771 
1772     if (trace_kprobe_is_return(tk)) {
1773         call->event.funcs = &kretprobe_funcs;
1774         call->class->fields_array = kretprobe_fields_array;
1775     } else {
1776         call->event.funcs = &kprobe_funcs;
1777         call->class->fields_array = kprobe_fields_array;
1778     }
1779 
1780     call->flags = TRACE_EVENT_FL_KPROBE;
1781     call->class->reg = kprobe_register;
1782 }
1783 
1784 static int register_kprobe_event(struct trace_kprobe *tk)
1785 {
1786     init_trace_event_call(tk);
1787 
1788     return trace_probe_register_event_call(&tk->tp);
1789 }
1790 
1791 static int unregister_kprobe_event(struct trace_kprobe *tk)
1792 {
1793     return trace_probe_unregister_event_call(&tk->tp);
1794 }
1795 
1796 #ifdef CONFIG_PERF_EVENTS
1797 /* create a trace_kprobe, but don't add it to global lists */
1798 struct trace_event_call *
1799 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1800               bool is_return)
1801 {
1802     enum probe_print_type ptype;
1803     struct trace_kprobe *tk;
1804     int ret;
1805     char *event;
1806 
1807     /*
1808      * local trace_kprobes are not added to dyn_event, so they are never
1809      * searched in find_trace_kprobe(). Therefore, there is no concern of
1810      * duplicated name here.
1811      */
1812     event = func ? func : "DUMMY_EVENT";
1813 
1814     tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1815                 offs, 0 /* maxactive */, 0 /* nargs */,
1816                 is_return);
1817 
1818     if (IS_ERR(tk)) {
1819         pr_info("Failed to allocate trace_probe.(%d)\n",
1820             (int)PTR_ERR(tk));
1821         return ERR_CAST(tk);
1822     }
1823 
1824     init_trace_event_call(tk);
1825 
1826     ptype = trace_kprobe_is_return(tk) ?
1827         PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1828     if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0) {
1829         ret = -ENOMEM;
1830         goto error;
1831     }
1832 
1833     ret = __register_trace_kprobe(tk);
1834     if (ret < 0)
1835         goto error;
1836 
1837     return trace_probe_event_call(&tk->tp);
1838 error:
1839     free_trace_kprobe(tk);
1840     return ERR_PTR(ret);
1841 }
1842 
1843 void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1844 {
1845     struct trace_kprobe *tk;
1846 
1847     tk = trace_kprobe_primary_from_call(event_call);
1848     if (unlikely(!tk))
1849         return;
1850 
1851     if (trace_probe_is_enabled(&tk->tp)) {
1852         WARN_ON(1);
1853         return;
1854     }
1855 
1856     __unregister_trace_kprobe(tk);
1857 
1858     free_trace_kprobe(tk);
1859 }
1860 #endif /* CONFIG_PERF_EVENTS */
1861 
1862 static __init void enable_boot_kprobe_events(void)
1863 {
1864     struct trace_array *tr = top_trace_array();
1865     struct trace_event_file *file;
1866     struct trace_kprobe *tk;
1867     struct dyn_event *pos;
1868 
1869     mutex_lock(&event_mutex);
1870     for_each_trace_kprobe(tk, pos) {
1871         list_for_each_entry(file, &tr->events, list)
1872             if (file->event_call == trace_probe_event_call(&tk->tp))
1873                 trace_event_enable_disable(file, 1, 0);
1874     }
1875     mutex_unlock(&event_mutex);
1876 }
1877 
1878 static __init void setup_boot_kprobe_events(void)
1879 {
1880     char *p, *cmd = kprobe_boot_events_buf;
1881     int ret;
1882 
1883     strreplace(kprobe_boot_events_buf, ',', ' ');
1884 
1885     while (cmd && *cmd != '\0') {
1886         p = strchr(cmd, ';');
1887         if (p)
1888             *p++ = '\0';
1889 
1890         ret = create_or_delete_trace_kprobe(cmd);
1891         if (ret)
1892             pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1893 
1894         cmd = p;
1895     }
1896 
1897     enable_boot_kprobe_events();
1898 }
1899 
1900 /*
1901  * Register dynevent at core_initcall. This allows kernel to setup kprobe
1902  * events in postcore_initcall without tracefs.
1903  */
1904 static __init int init_kprobe_trace_early(void)
1905 {
1906     int ret;
1907 
1908     ret = dyn_event_register(&trace_kprobe_ops);
1909     if (ret)
1910         return ret;
1911 
1912     if (register_module_notifier(&trace_kprobe_module_nb))
1913         return -EINVAL;
1914 
1915     return 0;
1916 }
1917 core_initcall(init_kprobe_trace_early);
1918 
1919 /* Make a tracefs interface for controlling probe points */
1920 static __init int init_kprobe_trace(void)
1921 {
1922     int ret;
1923 
1924     ret = tracing_init_dentry();
1925     if (ret)
1926         return 0;
1927 
1928     /* Event list interface */
1929     trace_create_file("kprobe_events", TRACE_MODE_WRITE,
1930               NULL, NULL, &kprobe_events_ops);
1931 
1932     /* Profile interface */
1933     trace_create_file("kprobe_profile", TRACE_MODE_READ,
1934               NULL, NULL, &kprobe_profile_ops);
1935 
1936     setup_boot_kprobe_events();
1937 
1938     return 0;
1939 }
1940 fs_initcall(init_kprobe_trace);
1941 
1942 
1943 #ifdef CONFIG_FTRACE_STARTUP_TEST
1944 static __init struct trace_event_file *
1945 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1946 {
1947     struct trace_event_file *file;
1948 
1949     list_for_each_entry(file, &tr->events, list)
1950         if (file->event_call == trace_probe_event_call(&tk->tp))
1951             return file;
1952 
1953     return NULL;
1954 }
1955 
1956 /*
1957  * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1958  * stage, we can do this lockless.
1959  */
1960 static __init int kprobe_trace_self_tests_init(void)
1961 {
1962     int ret, warn = 0;
1963     int (*target)(int, int, int, int, int, int);
1964     struct trace_kprobe *tk;
1965     struct trace_event_file *file;
1966 
1967     if (tracing_is_disabled())
1968         return -ENODEV;
1969 
1970     if (tracing_selftest_disabled)
1971         return 0;
1972 
1973     target = kprobe_trace_selftest_target;
1974 
1975     pr_info("Testing kprobe tracing: ");
1976 
1977     ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)");
1978     if (WARN_ON_ONCE(ret)) {
1979         pr_warn("error on probing function entry.\n");
1980         warn++;
1981     } else {
1982         /* Enable trace point */
1983         tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1984         if (WARN_ON_ONCE(tk == NULL)) {
1985             pr_warn("error on getting new probe.\n");
1986             warn++;
1987         } else {
1988             file = find_trace_probe_file(tk, top_trace_array());
1989             if (WARN_ON_ONCE(file == NULL)) {
1990                 pr_warn("error on getting probe file.\n");
1991                 warn++;
1992             } else
1993                 enable_trace_kprobe(
1994                     trace_probe_event_call(&tk->tp), file);
1995         }
1996     }
1997 
1998     ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval");
1999     if (WARN_ON_ONCE(ret)) {
2000         pr_warn("error on probing function return.\n");
2001         warn++;
2002     } else {
2003         /* Enable trace point */
2004         tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2005         if (WARN_ON_ONCE(tk == NULL)) {
2006             pr_warn("error on getting 2nd new probe.\n");
2007             warn++;
2008         } else {
2009             file = find_trace_probe_file(tk, top_trace_array());
2010             if (WARN_ON_ONCE(file == NULL)) {
2011                 pr_warn("error on getting probe file.\n");
2012                 warn++;
2013             } else
2014                 enable_trace_kprobe(
2015                     trace_probe_event_call(&tk->tp), file);
2016         }
2017     }
2018 
2019     if (warn)
2020         goto end;
2021 
2022     ret = target(1, 2, 3, 4, 5, 6);
2023 
2024     /*
2025      * Not expecting an error here, the check is only to prevent the
2026      * optimizer from removing the call to target() as otherwise there
2027      * are no side-effects and the call is never performed.
2028      */
2029     if (ret != 21)
2030         warn++;
2031 
2032     /* Disable trace points before removing it */
2033     tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
2034     if (WARN_ON_ONCE(tk == NULL)) {
2035         pr_warn("error on getting test probe.\n");
2036         warn++;
2037     } else {
2038         if (trace_kprobe_nhit(tk) != 1) {
2039             pr_warn("incorrect number of testprobe hits\n");
2040             warn++;
2041         }
2042 
2043         file = find_trace_probe_file(tk, top_trace_array());
2044         if (WARN_ON_ONCE(file == NULL)) {
2045             pr_warn("error on getting probe file.\n");
2046             warn++;
2047         } else
2048             disable_trace_kprobe(
2049                 trace_probe_event_call(&tk->tp), file);
2050     }
2051 
2052     tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2053     if (WARN_ON_ONCE(tk == NULL)) {
2054         pr_warn("error on getting 2nd test probe.\n");
2055         warn++;
2056     } else {
2057         if (trace_kprobe_nhit(tk) != 1) {
2058             pr_warn("incorrect number of testprobe2 hits\n");
2059             warn++;
2060         }
2061 
2062         file = find_trace_probe_file(tk, top_trace_array());
2063         if (WARN_ON_ONCE(file == NULL)) {
2064             pr_warn("error on getting probe file.\n");
2065             warn++;
2066         } else
2067             disable_trace_kprobe(
2068                 trace_probe_event_call(&tk->tp), file);
2069     }
2070 
2071     ret = create_or_delete_trace_kprobe("-:testprobe");
2072     if (WARN_ON_ONCE(ret)) {
2073         pr_warn("error on deleting a probe.\n");
2074         warn++;
2075     }
2076 
2077     ret = create_or_delete_trace_kprobe("-:testprobe2");
2078     if (WARN_ON_ONCE(ret)) {
2079         pr_warn("error on deleting a probe.\n");
2080         warn++;
2081     }
2082 
2083 end:
2084     ret = dyn_events_release_all(&trace_kprobe_ops);
2085     if (WARN_ON_ONCE(ret)) {
2086         pr_warn("error on cleaning up probes.\n");
2087         warn++;
2088     }
2089     /*
2090      * Wait for the optimizer work to finish. Otherwise it might fiddle
2091      * with probes in already freed __init text.
2092      */
2093     wait_for_kprobe_optimizer();
2094     if (warn)
2095         pr_cont("NG: Some tests are failed. Please check them.\n");
2096     else
2097         pr_cont("OK\n");
2098     return 0;
2099 }
2100 
2101 late_initcall(kprobe_trace_self_tests_init);
2102 
2103 #endif