Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Copyright (C) 2008-2014 Mathieu Desnoyers
0004  */
0005 #include <linux/module.h>
0006 #include <linux/mutex.h>
0007 #include <linux/types.h>
0008 #include <linux/jhash.h>
0009 #include <linux/list.h>
0010 #include <linux/rcupdate.h>
0011 #include <linux/tracepoint.h>
0012 #include <linux/err.h>
0013 #include <linux/slab.h>
0014 #include <linux/sched/signal.h>
0015 #include <linux/sched/task.h>
0016 #include <linux/static_key.h>
0017 
0018 enum tp_func_state {
0019     TP_FUNC_0,
0020     TP_FUNC_1,
0021     TP_FUNC_2,
0022     TP_FUNC_N,
0023 };
0024 
0025 extern tracepoint_ptr_t __start___tracepoints_ptrs[];
0026 extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
0027 
0028 DEFINE_SRCU(tracepoint_srcu);
0029 EXPORT_SYMBOL_GPL(tracepoint_srcu);
0030 
0031 enum tp_transition_sync {
0032     TP_TRANSITION_SYNC_1_0_1,
0033     TP_TRANSITION_SYNC_N_2_1,
0034 
0035     _NR_TP_TRANSITION_SYNC,
0036 };
0037 
0038 struct tp_transition_snapshot {
0039     unsigned long rcu;
0040     unsigned long srcu;
0041     bool ongoing;
0042 };
0043 
0044 /* Protected by tracepoints_mutex */
0045 static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC];
0046 
0047 static void tp_rcu_get_state(enum tp_transition_sync sync)
0048 {
0049     struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
0050 
0051     /* Keep the latest get_state snapshot. */
0052     snapshot->rcu = get_state_synchronize_rcu();
0053     snapshot->srcu = start_poll_synchronize_srcu(&tracepoint_srcu);
0054     snapshot->ongoing = true;
0055 }
0056 
0057 static void tp_rcu_cond_sync(enum tp_transition_sync sync)
0058 {
0059     struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
0060 
0061     if (!snapshot->ongoing)
0062         return;
0063     cond_synchronize_rcu(snapshot->rcu);
0064     if (!poll_state_synchronize_srcu(&tracepoint_srcu, snapshot->srcu))
0065         synchronize_srcu(&tracepoint_srcu);
0066     snapshot->ongoing = false;
0067 }
0068 
0069 /* Set to 1 to enable tracepoint debug output */
0070 static const int tracepoint_debug;
0071 
0072 #ifdef CONFIG_MODULES
0073 /*
0074  * Tracepoint module list mutex protects the local module list.
0075  */
0076 static DEFINE_MUTEX(tracepoint_module_list_mutex);
0077 
0078 /* Local list of struct tp_module */
0079 static LIST_HEAD(tracepoint_module_list);
0080 #endif /* CONFIG_MODULES */
0081 
0082 /*
0083  * tracepoints_mutex protects the builtin and module tracepoints.
0084  * tracepoints_mutex nests inside tracepoint_module_list_mutex.
0085  */
0086 static DEFINE_MUTEX(tracepoints_mutex);
0087 
0088 static struct rcu_head *early_probes;
0089 static bool ok_to_free_tracepoints;
0090 
0091 /*
0092  * Note about RCU :
0093  * It is used to delay the free of multiple probes array until a quiescent
0094  * state is reached.
0095  */
0096 struct tp_probes {
0097     struct rcu_head rcu;
0098     struct tracepoint_func probes[];
0099 };
0100 
0101 /* Called in removal of a func but failed to allocate a new tp_funcs */
0102 static void tp_stub_func(void)
0103 {
0104     return;
0105 }
0106 
0107 static inline void *allocate_probes(int count)
0108 {
0109     struct tp_probes *p  = kmalloc(struct_size(p, probes, count),
0110                        GFP_KERNEL);
0111     return p == NULL ? NULL : p->probes;
0112 }
0113 
0114 static void srcu_free_old_probes(struct rcu_head *head)
0115 {
0116     kfree(container_of(head, struct tp_probes, rcu));
0117 }
0118 
0119 static void rcu_free_old_probes(struct rcu_head *head)
0120 {
0121     call_srcu(&tracepoint_srcu, head, srcu_free_old_probes);
0122 }
0123 
0124 static __init int release_early_probes(void)
0125 {
0126     struct rcu_head *tmp;
0127 
0128     ok_to_free_tracepoints = true;
0129 
0130     while (early_probes) {
0131         tmp = early_probes;
0132         early_probes = tmp->next;
0133         call_rcu(tmp, rcu_free_old_probes);
0134     }
0135 
0136     return 0;
0137 }
0138 
0139 /* SRCU is initialized at core_initcall */
0140 postcore_initcall(release_early_probes);
0141 
0142 static inline void release_probes(struct tracepoint_func *old)
0143 {
0144     if (old) {
0145         struct tp_probes *tp_probes = container_of(old,
0146             struct tp_probes, probes[0]);
0147 
0148         /*
0149          * We can't free probes if SRCU is not initialized yet.
0150          * Postpone the freeing till after SRCU is initialized.
0151          */
0152         if (unlikely(!ok_to_free_tracepoints)) {
0153             tp_probes->rcu.next = early_probes;
0154             early_probes = &tp_probes->rcu;
0155             return;
0156         }
0157 
0158         /*
0159          * Tracepoint probes are protected by both sched RCU and SRCU,
0160          * by calling the SRCU callback in the sched RCU callback we
0161          * cover both cases. So let us chain the SRCU and sched RCU
0162          * callbacks to wait for both grace periods.
0163          */
0164         call_rcu(&tp_probes->rcu, rcu_free_old_probes);
0165     }
0166 }
0167 
0168 static void debug_print_probes(struct tracepoint_func *funcs)
0169 {
0170     int i;
0171 
0172     if (!tracepoint_debug || !funcs)
0173         return;
0174 
0175     for (i = 0; funcs[i].func; i++)
0176         printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func);
0177 }
0178 
0179 static struct tracepoint_func *
0180 func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
0181      int prio)
0182 {
0183     struct tracepoint_func *old, *new;
0184     int iter_probes;    /* Iterate over old probe array. */
0185     int nr_probes = 0;  /* Counter for probes */
0186     int pos = -1;       /* Insertion position into new array */
0187 
0188     if (WARN_ON(!tp_func->func))
0189         return ERR_PTR(-EINVAL);
0190 
0191     debug_print_probes(*funcs);
0192     old = *funcs;
0193     if (old) {
0194         /* (N -> N+1), (N != 0, 1) probes */
0195         for (iter_probes = 0; old[iter_probes].func; iter_probes++) {
0196             if (old[iter_probes].func == tp_stub_func)
0197                 continue;   /* Skip stub functions. */
0198             if (old[iter_probes].func == tp_func->func &&
0199                 old[iter_probes].data == tp_func->data)
0200                 return ERR_PTR(-EEXIST);
0201             nr_probes++;
0202         }
0203     }
0204     /* + 2 : one for new probe, one for NULL func */
0205     new = allocate_probes(nr_probes + 2);
0206     if (new == NULL)
0207         return ERR_PTR(-ENOMEM);
0208     if (old) {
0209         nr_probes = 0;
0210         for (iter_probes = 0; old[iter_probes].func; iter_probes++) {
0211             if (old[iter_probes].func == tp_stub_func)
0212                 continue;
0213             /* Insert before probes of lower priority */
0214             if (pos < 0 && old[iter_probes].prio < prio)
0215                 pos = nr_probes++;
0216             new[nr_probes++] = old[iter_probes];
0217         }
0218         if (pos < 0)
0219             pos = nr_probes++;
0220         /* nr_probes now points to the end of the new array */
0221     } else {
0222         pos = 0;
0223         nr_probes = 1; /* must point at end of array */
0224     }
0225     new[pos] = *tp_func;
0226     new[nr_probes].func = NULL;
0227     *funcs = new;
0228     debug_print_probes(*funcs);
0229     return old;
0230 }
0231 
0232 static void *func_remove(struct tracepoint_func **funcs,
0233         struct tracepoint_func *tp_func)
0234 {
0235     int nr_probes = 0, nr_del = 0, i;
0236     struct tracepoint_func *old, *new;
0237 
0238     old = *funcs;
0239 
0240     if (!old)
0241         return ERR_PTR(-ENOENT);
0242 
0243     debug_print_probes(*funcs);
0244     /* (N -> M), (N > 1, M >= 0) probes */
0245     if (tp_func->func) {
0246         for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
0247             if ((old[nr_probes].func == tp_func->func &&
0248                  old[nr_probes].data == tp_func->data) ||
0249                 old[nr_probes].func == tp_stub_func)
0250                 nr_del++;
0251         }
0252     }
0253 
0254     /*
0255      * If probe is NULL, then nr_probes = nr_del = 0, and then the
0256      * entire entry will be removed.
0257      */
0258     if (nr_probes - nr_del == 0) {
0259         /* N -> 0, (N > 1) */
0260         *funcs = NULL;
0261         debug_print_probes(*funcs);
0262         return old;
0263     } else {
0264         int j = 0;
0265         /* N -> M, (N > 1, M > 0) */
0266         /* + 1 for NULL */
0267         new = allocate_probes(nr_probes - nr_del + 1);
0268         if (new) {
0269             for (i = 0; old[i].func; i++) {
0270                 if ((old[i].func != tp_func->func ||
0271                      old[i].data != tp_func->data) &&
0272                     old[i].func != tp_stub_func)
0273                     new[j++] = old[i];
0274             }
0275             new[nr_probes - nr_del].func = NULL;
0276             *funcs = new;
0277         } else {
0278             /*
0279              * Failed to allocate, replace the old function
0280              * with calls to tp_stub_func.
0281              */
0282             for (i = 0; old[i].func; i++) {
0283                 if (old[i].func == tp_func->func &&
0284                     old[i].data == tp_func->data)
0285                     WRITE_ONCE(old[i].func, tp_stub_func);
0286             }
0287             *funcs = old;
0288         }
0289     }
0290     debug_print_probes(*funcs);
0291     return old;
0292 }
0293 
0294 /*
0295  * Count the number of functions (enum tp_func_state) in a tp_funcs array.
0296  */
0297 static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs)
0298 {
0299     if (!tp_funcs)
0300         return TP_FUNC_0;
0301     if (!tp_funcs[1].func)
0302         return TP_FUNC_1;
0303     if (!tp_funcs[2].func)
0304         return TP_FUNC_2;
0305     return TP_FUNC_N;   /* 3 or more */
0306 }
0307 
0308 static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs)
0309 {
0310     void *func = tp->iterator;
0311 
0312     /* Synthetic events do not have static call sites */
0313     if (!tp->static_call_key)
0314         return;
0315     if (nr_func_state(tp_funcs) == TP_FUNC_1)
0316         func = tp_funcs[0].func;
0317     __static_call_update(tp->static_call_key, tp->static_call_tramp, func);
0318 }
0319 
0320 /*
0321  * Add the probe function to a tracepoint.
0322  */
0323 static int tracepoint_add_func(struct tracepoint *tp,
0324                    struct tracepoint_func *func, int prio,
0325                    bool warn)
0326 {
0327     struct tracepoint_func *old, *tp_funcs;
0328     int ret;
0329 
0330     if (tp->regfunc && !static_key_enabled(&tp->key)) {
0331         ret = tp->regfunc();
0332         if (ret < 0)
0333             return ret;
0334     }
0335 
0336     tp_funcs = rcu_dereference_protected(tp->funcs,
0337             lockdep_is_held(&tracepoints_mutex));
0338     old = func_add(&tp_funcs, func, prio);
0339     if (IS_ERR(old)) {
0340         WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM);
0341         return PTR_ERR(old);
0342     }
0343 
0344     /*
0345      * rcu_assign_pointer has as smp_store_release() which makes sure
0346      * that the new probe callbacks array is consistent before setting
0347      * a pointer to it.  This array is referenced by __DO_TRACE from
0348      * include/linux/tracepoint.h using rcu_dereference_sched().
0349      */
0350     switch (nr_func_state(tp_funcs)) {
0351     case TP_FUNC_1:     /* 0->1 */
0352         /*
0353          * Make sure new static func never uses old data after a
0354          * 1->0->1 transition sequence.
0355          */
0356         tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1);
0357         /* Set static call to first function */
0358         tracepoint_update_call(tp, tp_funcs);
0359         /* Both iterator and static call handle NULL tp->funcs */
0360         rcu_assign_pointer(tp->funcs, tp_funcs);
0361         static_key_enable(&tp->key);
0362         break;
0363     case TP_FUNC_2:     /* 1->2 */
0364         /* Set iterator static call */
0365         tracepoint_update_call(tp, tp_funcs);
0366         /*
0367          * Iterator callback installed before updating tp->funcs.
0368          * Requires ordering between RCU assign/dereference and
0369          * static call update/call.
0370          */
0371         fallthrough;
0372     case TP_FUNC_N:     /* N->N+1 (N>1) */
0373         rcu_assign_pointer(tp->funcs, tp_funcs);
0374         /*
0375          * Make sure static func never uses incorrect data after a
0376          * N->...->2->1 (N>1) transition sequence.
0377          */
0378         if (tp_funcs[0].data != old[0].data)
0379             tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
0380         break;
0381     default:
0382         WARN_ON_ONCE(1);
0383         break;
0384     }
0385 
0386     release_probes(old);
0387     return 0;
0388 }
0389 
0390 /*
0391  * Remove a probe function from a tracepoint.
0392  * Note: only waiting an RCU period after setting elem->call to the empty
0393  * function insures that the original callback is not used anymore. This insured
0394  * by preempt_disable around the call site.
0395  */
0396 static int tracepoint_remove_func(struct tracepoint *tp,
0397         struct tracepoint_func *func)
0398 {
0399     struct tracepoint_func *old, *tp_funcs;
0400 
0401     tp_funcs = rcu_dereference_protected(tp->funcs,
0402             lockdep_is_held(&tracepoints_mutex));
0403     old = func_remove(&tp_funcs, func);
0404     if (WARN_ON_ONCE(IS_ERR(old)))
0405         return PTR_ERR(old);
0406 
0407     if (tp_funcs == old)
0408         /* Failed allocating new tp_funcs, replaced func with stub */
0409         return 0;
0410 
0411     switch (nr_func_state(tp_funcs)) {
0412     case TP_FUNC_0:     /* 1->0 */
0413         /* Removed last function */
0414         if (tp->unregfunc && static_key_enabled(&tp->key))
0415             tp->unregfunc();
0416 
0417         static_key_disable(&tp->key);
0418         /* Set iterator static call */
0419         tracepoint_update_call(tp, tp_funcs);
0420         /* Both iterator and static call handle NULL tp->funcs */
0421         rcu_assign_pointer(tp->funcs, NULL);
0422         /*
0423          * Make sure new static func never uses old data after a
0424          * 1->0->1 transition sequence.
0425          */
0426         tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1);
0427         break;
0428     case TP_FUNC_1:     /* 2->1 */
0429         rcu_assign_pointer(tp->funcs, tp_funcs);
0430         /*
0431          * Make sure static func never uses incorrect data after a
0432          * N->...->2->1 (N>2) transition sequence. If the first
0433          * element's data has changed, then force the synchronization
0434          * to prevent current readers that have loaded the old data
0435          * from calling the new function.
0436          */
0437         if (tp_funcs[0].data != old[0].data)
0438             tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
0439         tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1);
0440         /* Set static call to first function */
0441         tracepoint_update_call(tp, tp_funcs);
0442         break;
0443     case TP_FUNC_2:     /* N->N-1 (N>2) */
0444         fallthrough;
0445     case TP_FUNC_N:
0446         rcu_assign_pointer(tp->funcs, tp_funcs);
0447         /*
0448          * Make sure static func never uses incorrect data after a
0449          * N->...->2->1 (N>2) transition sequence.
0450          */
0451         if (tp_funcs[0].data != old[0].data)
0452             tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
0453         break;
0454     default:
0455         WARN_ON_ONCE(1);
0456         break;
0457     }
0458     release_probes(old);
0459     return 0;
0460 }
0461 
0462 /**
0463  * tracepoint_probe_register_prio_may_exist -  Connect a probe to a tracepoint with priority
0464  * @tp: tracepoint
0465  * @probe: probe handler
0466  * @data: tracepoint data
0467  * @prio: priority of this function over other registered functions
0468  *
0469  * Same as tracepoint_probe_register_prio() except that it will not warn
0470  * if the tracepoint is already registered.
0471  */
0472 int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe,
0473                          void *data, int prio)
0474 {
0475     struct tracepoint_func tp_func;
0476     int ret;
0477 
0478     mutex_lock(&tracepoints_mutex);
0479     tp_func.func = probe;
0480     tp_func.data = data;
0481     tp_func.prio = prio;
0482     ret = tracepoint_add_func(tp, &tp_func, prio, false);
0483     mutex_unlock(&tracepoints_mutex);
0484     return ret;
0485 }
0486 EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist);
0487 
0488 /**
0489  * tracepoint_probe_register_prio -  Connect a probe to a tracepoint with priority
0490  * @tp: tracepoint
0491  * @probe: probe handler
0492  * @data: tracepoint data
0493  * @prio: priority of this function over other registered functions
0494  *
0495  * Returns 0 if ok, error value on error.
0496  * Note: if @tp is within a module, the caller is responsible for
0497  * unregistering the probe before the module is gone. This can be
0498  * performed either with a tracepoint module going notifier, or from
0499  * within module exit functions.
0500  */
0501 int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
0502                    void *data, int prio)
0503 {
0504     struct tracepoint_func tp_func;
0505     int ret;
0506 
0507     mutex_lock(&tracepoints_mutex);
0508     tp_func.func = probe;
0509     tp_func.data = data;
0510     tp_func.prio = prio;
0511     ret = tracepoint_add_func(tp, &tp_func, prio, true);
0512     mutex_unlock(&tracepoints_mutex);
0513     return ret;
0514 }
0515 EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio);
0516 
0517 /**
0518  * tracepoint_probe_register -  Connect a probe to a tracepoint
0519  * @tp: tracepoint
0520  * @probe: probe handler
0521  * @data: tracepoint data
0522  *
0523  * Returns 0 if ok, error value on error.
0524  * Note: if @tp is within a module, the caller is responsible for
0525  * unregistering the probe before the module is gone. This can be
0526  * performed either with a tracepoint module going notifier, or from
0527  * within module exit functions.
0528  */
0529 int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
0530 {
0531     return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO);
0532 }
0533 EXPORT_SYMBOL_GPL(tracepoint_probe_register);
0534 
0535 /**
0536  * tracepoint_probe_unregister -  Disconnect a probe from a tracepoint
0537  * @tp: tracepoint
0538  * @probe: probe function pointer
0539  * @data: tracepoint data
0540  *
0541  * Returns 0 if ok, error value on error.
0542  */
0543 int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
0544 {
0545     struct tracepoint_func tp_func;
0546     int ret;
0547 
0548     mutex_lock(&tracepoints_mutex);
0549     tp_func.func = probe;
0550     tp_func.data = data;
0551     ret = tracepoint_remove_func(tp, &tp_func);
0552     mutex_unlock(&tracepoints_mutex);
0553     return ret;
0554 }
0555 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
0556 
0557 static void for_each_tracepoint_range(
0558         tracepoint_ptr_t *begin, tracepoint_ptr_t *end,
0559         void (*fct)(struct tracepoint *tp, void *priv),
0560         void *priv)
0561 {
0562     tracepoint_ptr_t *iter;
0563 
0564     if (!begin)
0565         return;
0566     for (iter = begin; iter < end; iter++)
0567         fct(tracepoint_ptr_deref(iter), priv);
0568 }
0569 
0570 #ifdef CONFIG_MODULES
0571 bool trace_module_has_bad_taint(struct module *mod)
0572 {
0573     return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) |
0574                    (1 << TAINT_UNSIGNED_MODULE) |
0575                    (1 << TAINT_TEST));
0576 }
0577 
0578 static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
0579 
0580 /**
0581  * register_tracepoint_module_notifier - register tracepoint coming/going notifier
0582  * @nb: notifier block
0583  *
0584  * Notifiers registered with this function are called on module
0585  * coming/going with the tracepoint_module_list_mutex held.
0586  * The notifier block callback should expect a "struct tp_module" data
0587  * pointer.
0588  */
0589 int register_tracepoint_module_notifier(struct notifier_block *nb)
0590 {
0591     struct tp_module *tp_mod;
0592     int ret;
0593 
0594     mutex_lock(&tracepoint_module_list_mutex);
0595     ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb);
0596     if (ret)
0597         goto end;
0598     list_for_each_entry(tp_mod, &tracepoint_module_list, list)
0599         (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod);
0600 end:
0601     mutex_unlock(&tracepoint_module_list_mutex);
0602     return ret;
0603 }
0604 EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier);
0605 
0606 /**
0607  * unregister_tracepoint_module_notifier - unregister tracepoint coming/going notifier
0608  * @nb: notifier block
0609  *
0610  * The notifier block callback should expect a "struct tp_module" data
0611  * pointer.
0612  */
0613 int unregister_tracepoint_module_notifier(struct notifier_block *nb)
0614 {
0615     struct tp_module *tp_mod;
0616     int ret;
0617 
0618     mutex_lock(&tracepoint_module_list_mutex);
0619     ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb);
0620     if (ret)
0621         goto end;
0622     list_for_each_entry(tp_mod, &tracepoint_module_list, list)
0623         (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod);
0624 end:
0625     mutex_unlock(&tracepoint_module_list_mutex);
0626     return ret;
0627 
0628 }
0629 EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier);
0630 
0631 /*
0632  * Ensure the tracer unregistered the module's probes before the module
0633  * teardown is performed. Prevents leaks of probe and data pointers.
0634  */
0635 static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv)
0636 {
0637     WARN_ON_ONCE(tp->funcs);
0638 }
0639 
0640 static int tracepoint_module_coming(struct module *mod)
0641 {
0642     struct tp_module *tp_mod;
0643     int ret = 0;
0644 
0645     if (!mod->num_tracepoints)
0646         return 0;
0647 
0648     /*
0649      * We skip modules that taint the kernel, especially those with different
0650      * module headers (for forced load), to make sure we don't cause a crash.
0651      * Staging, out-of-tree, unsigned GPL, and test modules are fine.
0652      */
0653     if (trace_module_has_bad_taint(mod))
0654         return 0;
0655     mutex_lock(&tracepoint_module_list_mutex);
0656     tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
0657     if (!tp_mod) {
0658         ret = -ENOMEM;
0659         goto end;
0660     }
0661     tp_mod->mod = mod;
0662     list_add_tail(&tp_mod->list, &tracepoint_module_list);
0663     blocking_notifier_call_chain(&tracepoint_notify_list,
0664             MODULE_STATE_COMING, tp_mod);
0665 end:
0666     mutex_unlock(&tracepoint_module_list_mutex);
0667     return ret;
0668 }
0669 
0670 static void tracepoint_module_going(struct module *mod)
0671 {
0672     struct tp_module *tp_mod;
0673 
0674     if (!mod->num_tracepoints)
0675         return;
0676 
0677     mutex_lock(&tracepoint_module_list_mutex);
0678     list_for_each_entry(tp_mod, &tracepoint_module_list, list) {
0679         if (tp_mod->mod == mod) {
0680             blocking_notifier_call_chain(&tracepoint_notify_list,
0681                     MODULE_STATE_GOING, tp_mod);
0682             list_del(&tp_mod->list);
0683             kfree(tp_mod);
0684             /*
0685              * Called the going notifier before checking for
0686              * quiescence.
0687              */
0688             for_each_tracepoint_range(mod->tracepoints_ptrs,
0689                 mod->tracepoints_ptrs + mod->num_tracepoints,
0690                 tp_module_going_check_quiescent, NULL);
0691             break;
0692         }
0693     }
0694     /*
0695      * In the case of modules that were tainted at "coming", we'll simply
0696      * walk through the list without finding it. We cannot use the "tainted"
0697      * flag on "going", in case a module taints the kernel only after being
0698      * loaded.
0699      */
0700     mutex_unlock(&tracepoint_module_list_mutex);
0701 }
0702 
0703 static int tracepoint_module_notify(struct notifier_block *self,
0704         unsigned long val, void *data)
0705 {
0706     struct module *mod = data;
0707     int ret = 0;
0708 
0709     switch (val) {
0710     case MODULE_STATE_COMING:
0711         ret = tracepoint_module_coming(mod);
0712         break;
0713     case MODULE_STATE_LIVE:
0714         break;
0715     case MODULE_STATE_GOING:
0716         tracepoint_module_going(mod);
0717         break;
0718     case MODULE_STATE_UNFORMED:
0719         break;
0720     }
0721     return notifier_from_errno(ret);
0722 }
0723 
0724 static struct notifier_block tracepoint_module_nb = {
0725     .notifier_call = tracepoint_module_notify,
0726     .priority = 0,
0727 };
0728 
0729 static __init int init_tracepoints(void)
0730 {
0731     int ret;
0732 
0733     ret = register_module_notifier(&tracepoint_module_nb);
0734     if (ret)
0735         pr_warn("Failed to register tracepoint module enter notifier\n");
0736 
0737     return ret;
0738 }
0739 __initcall(init_tracepoints);
0740 #endif /* CONFIG_MODULES */
0741 
0742 /**
0743  * for_each_kernel_tracepoint - iteration on all kernel tracepoints
0744  * @fct: callback
0745  * @priv: private data
0746  */
0747 void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
0748         void *priv)
0749 {
0750     for_each_tracepoint_range(__start___tracepoints_ptrs,
0751         __stop___tracepoints_ptrs, fct, priv);
0752 }
0753 EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint);
0754 
0755 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
0756 
0757 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
0758 static int sys_tracepoint_refcount;
0759 
0760 int syscall_regfunc(void)
0761 {
0762     struct task_struct *p, *t;
0763 
0764     if (!sys_tracepoint_refcount) {
0765         read_lock(&tasklist_lock);
0766         for_each_process_thread(p, t) {
0767             set_task_syscall_work(t, SYSCALL_TRACEPOINT);
0768         }
0769         read_unlock(&tasklist_lock);
0770     }
0771     sys_tracepoint_refcount++;
0772 
0773     return 0;
0774 }
0775 
0776 void syscall_unregfunc(void)
0777 {
0778     struct task_struct *p, *t;
0779 
0780     sys_tracepoint_refcount--;
0781     if (!sys_tracepoint_refcount) {
0782         read_lock(&tasklist_lock);
0783         for_each_process_thread(p, t) {
0784             clear_task_syscall_work(t, SYSCALL_TRACEPOINT);
0785         }
0786         read_unlock(&tasklist_lock);
0787     }
0788 }
0789 #endif