Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Ftrace header.  For implementation details beyond the random comments
0004  * scattered below, see: Documentation/trace/ftrace-design.rst
0005  */
0006 
0007 #ifndef _LINUX_FTRACE_H
0008 #define _LINUX_FTRACE_H
0009 
0010 #include <linux/trace_recursion.h>
0011 #include <linux/trace_clock.h>
0012 #include <linux/jump_label.h>
0013 #include <linux/kallsyms.h>
0014 #include <linux/linkage.h>
0015 #include <linux/bitops.h>
0016 #include <linux/ptrace.h>
0017 #include <linux/ktime.h>
0018 #include <linux/sched.h>
0019 #include <linux/types.h>
0020 #include <linux/init.h>
0021 #include <linux/fs.h>
0022 
0023 #include <asm/ftrace.h>
0024 
0025 /*
0026  * If the arch supports passing the variable contents of
0027  * function_trace_op as the third parameter back from the
0028  * mcount call, then the arch should define this as 1.
0029  */
0030 #ifndef ARCH_SUPPORTS_FTRACE_OPS
0031 #define ARCH_SUPPORTS_FTRACE_OPS 0
0032 #endif
0033 
0034 #ifdef CONFIG_TRACING
0035 extern void ftrace_boot_snapshot(void);
0036 #else
0037 static inline void ftrace_boot_snapshot(void) { }
0038 #endif
0039 
0040 #ifdef CONFIG_FUNCTION_TRACER
0041 struct ftrace_ops;
0042 struct ftrace_regs;
0043 /*
0044  * If the arch's mcount caller does not support all of ftrace's
0045  * features, then it must call an indirect function that
0046  * does. Or at least does enough to prevent any unwelcome side effects.
0047  *
0048  * Also define the function prototype that these architectures use
0049  * to call the ftrace_ops_list_func().
0050  */
0051 #if !ARCH_SUPPORTS_FTRACE_OPS
0052 # define FTRACE_FORCE_LIST_FUNC 1
0053 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
0054 #else
0055 # define FTRACE_FORCE_LIST_FUNC 0
0056 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
0057                    struct ftrace_ops *op, struct ftrace_regs *fregs);
0058 #endif
0059 #endif /* CONFIG_FUNCTION_TRACER */
0060 
0061 /* Main tracing buffer and events set up */
0062 #ifdef CONFIG_TRACING
0063 void trace_init(void);
0064 void early_trace_init(void);
0065 #else
0066 static inline void trace_init(void) { }
0067 static inline void early_trace_init(void) { }
0068 #endif
0069 
0070 struct module;
0071 struct ftrace_hash;
0072 struct ftrace_direct_func;
0073 
0074 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
0075     defined(CONFIG_DYNAMIC_FTRACE)
0076 const char *
0077 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
0078            unsigned long *off, char **modname, char *sym);
0079 #else
0080 static inline const char *
0081 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
0082            unsigned long *off, char **modname, char *sym)
0083 {
0084     return NULL;
0085 }
0086 #endif
0087 
0088 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
0089 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
0090                char *type, char *name,
0091                char *module_name, int *exported);
0092 #else
0093 static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
0094                      char *type, char *name,
0095                      char *module_name, int *exported)
0096 {
0097     return -1;
0098 }
0099 #endif
0100 
0101 #ifdef CONFIG_FUNCTION_TRACER
0102 
0103 extern int ftrace_enabled;
0104 
0105 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
0106 
0107 struct ftrace_regs {
0108     struct pt_regs      regs;
0109 };
0110 #define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
0111 
0112 /*
0113  * ftrace_instruction_pointer_set() is to be defined by the architecture
0114  * if to allow setting of the instruction pointer from the ftrace_regs
0115  * when HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports
0116  * live kernel patching.
0117  */
0118 #define ftrace_instruction_pointer_set(fregs, ip) do { } while (0)
0119 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
0120 
0121 static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
0122 {
0123     if (!fregs)
0124         return NULL;
0125 
0126     return arch_ftrace_get_regs(fregs);
0127 }
0128 
0129 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
0130                   struct ftrace_ops *op, struct ftrace_regs *fregs);
0131 
0132 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
0133 
0134 /*
0135  * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
0136  * set in the flags member.
0137  * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and
0138  * IPMODIFY are a kind of attribute flags which can be set only before
0139  * registering the ftrace_ops, and can not be modified while registered.
0140  * Changing those attribute flags after registering ftrace_ops will
0141  * cause unexpected results.
0142  *
0143  * ENABLED - set/unset when ftrace_ops is registered/unregistered
0144  * DYNAMIC - set when ftrace_ops is registered to denote dynamically
0145  *           allocated ftrace_ops which need special care
0146  * SAVE_REGS - The ftrace_ops wants regs saved at each function called
0147  *            and passed to the callback. If this flag is set, but the
0148  *            architecture does not support passing regs
0149  *            (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
0150  *            ftrace_ops will fail to register, unless the next flag
0151  *            is set.
0152  * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
0153  *            handler can handle an arch that does not save regs
0154  *            (the handler tests if regs == NULL), then it can set
0155  *            this flag instead. It will not fail registering the ftrace_ops
0156  *            but, the regs field will be NULL if the arch does not support
0157  *            passing regs to the handler.
0158  *            Note, if this flag is set, the SAVE_REGS flag will automatically
0159  *            get set upon registering the ftrace_ops, if the arch supports it.
0160  * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure
0161  *            that the call back needs recursion protection. If it does
0162  *            not set this, then the ftrace infrastructure will assume
0163  *            that the callback can handle recursion on its own.
0164  * STUB   - The ftrace_ops is just a place holder.
0165  * INITIALIZED - The ftrace_ops has already been initialized (first use time
0166  *            register_ftrace_function() is called, it will initialized the ops)
0167  * DELETED - The ops are being deleted, do not let them be registered again.
0168  * ADDING  - The ops is in the process of being added.
0169  * REMOVING - The ops is in the process of being removed.
0170  * MODIFYING - The ops is in the process of changing its filter functions.
0171  * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
0172  *            The arch specific code sets this flag when it allocated a
0173  *            trampoline. This lets the arch know that it can update the
0174  *            trampoline in case the callback function changes.
0175  *            The ftrace_ops trampoline can be set by the ftrace users, and
0176  *            in such cases the arch must not modify it. Only the arch ftrace
0177  *            core code should set this flag.
0178  * IPMODIFY - The ops can modify the IP register. This can only be set with
0179  *            SAVE_REGS. If another ops with this flag set is already registered
0180  *            for any of the functions that this ops will be registered for, then
0181  *            this ops will fail to register or set_filter_ip.
0182  * PID     - Is affected by set_ftrace_pid (allows filtering on those pids)
0183  * RCU     - Set when the ops can only be called when RCU is watching.
0184  * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
0185  * PERMANENT - Set when the ops is permanent and should not be affected by
0186  *             ftrace_enabled.
0187  * DIRECT - Used by the direct ftrace_ops helper for direct functions
0188  *            (internal ftrace only, should not be used by others)
0189  */
0190 enum {
0191     FTRACE_OPS_FL_ENABLED           = BIT(0),
0192     FTRACE_OPS_FL_DYNAMIC           = BIT(1),
0193     FTRACE_OPS_FL_SAVE_REGS         = BIT(2),
0194     FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED    = BIT(3),
0195     FTRACE_OPS_FL_RECURSION         = BIT(4),
0196     FTRACE_OPS_FL_STUB          = BIT(5),
0197     FTRACE_OPS_FL_INITIALIZED       = BIT(6),
0198     FTRACE_OPS_FL_DELETED           = BIT(7),
0199     FTRACE_OPS_FL_ADDING            = BIT(8),
0200     FTRACE_OPS_FL_REMOVING          = BIT(9),
0201     FTRACE_OPS_FL_MODIFYING         = BIT(10),
0202     FTRACE_OPS_FL_ALLOC_TRAMP       = BIT(11),
0203     FTRACE_OPS_FL_IPMODIFY          = BIT(12),
0204     FTRACE_OPS_FL_PID           = BIT(13),
0205     FTRACE_OPS_FL_RCU           = BIT(14),
0206     FTRACE_OPS_FL_TRACE_ARRAY       = BIT(15),
0207     FTRACE_OPS_FL_PERMANENT                 = BIT(16),
0208     FTRACE_OPS_FL_DIRECT            = BIT(17),
0209 };
0210 
0211 /*
0212  * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes
0213  * to a ftrace_ops. Note, the requests may fail.
0214  *
0215  * ENABLE_SHARE_IPMODIFY_SELF - enable a DIRECT ops to work on the same
0216  *                              function as an ops with IPMODIFY. Called
0217  *                              when the DIRECT ops is being registered.
0218  *                              This is called with both direct_mutex and
0219  *                              ftrace_lock are locked.
0220  *
0221  * ENABLE_SHARE_IPMODIFY_PEER - enable a DIRECT ops to work on the same
0222  *                              function as an ops with IPMODIFY. Called
0223  *                              when the other ops (the one with IPMODIFY)
0224  *                              is being registered.
0225  *                              This is called with direct_mutex locked.
0226  *
0227  * DISABLE_SHARE_IPMODIFY_PEER - disable a DIRECT ops to work on the same
0228  *                               function as an ops with IPMODIFY. Called
0229  *                               when the other ops (the one with IPMODIFY)
0230  *                               is being unregistered.
0231  *                               This is called with direct_mutex locked.
0232  */
0233 enum ftrace_ops_cmd {
0234     FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF,
0235     FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER,
0236     FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER,
0237 };
0238 
0239 /*
0240  * For most ftrace_ops_cmd,
0241  * Returns:
0242  *        0 - Success.
0243  *        Negative on failure. The return value is dependent on the
0244  *        callback.
0245  */
0246 typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd);
0247 
0248 #ifdef CONFIG_DYNAMIC_FTRACE
0249 /* The hash used to know what functions callbacks trace */
0250 struct ftrace_ops_hash {
0251     struct ftrace_hash __rcu    *notrace_hash;
0252     struct ftrace_hash __rcu    *filter_hash;
0253     struct mutex            regex_lock;
0254 };
0255 
0256 void ftrace_free_init_mem(void);
0257 void ftrace_free_mem(struct module *mod, void *start, void *end);
0258 #else
0259 static inline void ftrace_free_init_mem(void)
0260 {
0261     ftrace_boot_snapshot();
0262 }
0263 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
0264 #endif
0265 
0266 /*
0267  * Note, ftrace_ops can be referenced outside of RCU protection, unless
0268  * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
0269  * core data, the unregistering of it will perform a scheduling on all CPUs
0270  * to make sure that there are no more users. Depending on the load of the
0271  * system that may take a bit of time.
0272  *
0273  * Any private data added must also take care not to be freed and if private
0274  * data is added to a ftrace_ops that is in core code, the user of the
0275  * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
0276  */
0277 struct ftrace_ops {
0278     ftrace_func_t           func;
0279     struct ftrace_ops __rcu     *next;
0280     unsigned long           flags;
0281     void                *private;
0282     ftrace_func_t           saved_func;
0283 #ifdef CONFIG_DYNAMIC_FTRACE
0284     struct ftrace_ops_hash      local_hash;
0285     struct ftrace_ops_hash      *func_hash;
0286     struct ftrace_ops_hash      old_hash;
0287     unsigned long           trampoline;
0288     unsigned long           trampoline_size;
0289     struct list_head        list;
0290     ftrace_ops_func_t       ops_func;
0291 #endif
0292 };
0293 
0294 extern struct ftrace_ops __rcu *ftrace_ops_list;
0295 extern struct ftrace_ops ftrace_list_end;
0296 
0297 /*
0298  * Traverse the ftrace_ops_list, invoking all entries.  The reason that we
0299  * can use rcu_dereference_raw_check() is that elements removed from this list
0300  * are simply leaked, so there is no need to interact with a grace-period
0301  * mechanism.  The rcu_dereference_raw_check() calls are needed to handle
0302  * concurrent insertions into the ftrace_ops_list.
0303  *
0304  * Silly Alpha and silly pointer-speculation compiler optimizations!
0305  */
0306 #define do_for_each_ftrace_op(op, list)         \
0307     op = rcu_dereference_raw_check(list);           \
0308     do
0309 
0310 /*
0311  * Optimized for just a single item in the list (as that is the normal case).
0312  */
0313 #define while_for_each_ftrace_op(op)                \
0314     while (likely(op = rcu_dereference_raw_check((op)->next)) &&    \
0315            unlikely((op) != &ftrace_list_end))
0316 
0317 /*
0318  * Type of the current tracing.
0319  */
0320 enum ftrace_tracing_type_t {
0321     FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
0322     FTRACE_TYPE_RETURN, /* Hook the return of the function */
0323 };
0324 
0325 /* Current tracing type, default is FTRACE_TYPE_ENTER */
0326 extern enum ftrace_tracing_type_t ftrace_tracing_type;
0327 
0328 /*
0329  * The ftrace_ops must be a static and should also
0330  * be read_mostly.  These functions do modify read_mostly variables
0331  * so use them sparely. Never free an ftrace_op or modify the
0332  * next pointer after it has been registered. Even after unregistering
0333  * it, the next pointer may still be used internally.
0334  */
0335 int register_ftrace_function(struct ftrace_ops *ops);
0336 int unregister_ftrace_function(struct ftrace_ops *ops);
0337 
0338 extern void ftrace_stub(unsigned long a0, unsigned long a1,
0339             struct ftrace_ops *op, struct ftrace_regs *fregs);
0340 
0341 
0342 int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs);
0343 #else /* !CONFIG_FUNCTION_TRACER */
0344 /*
0345  * (un)register_ftrace_function must be a macro since the ops parameter
0346  * must not be evaluated.
0347  */
0348 #define register_ftrace_function(ops) ({ 0; })
0349 #define unregister_ftrace_function(ops) ({ 0; })
0350 static inline void ftrace_kill(void) { }
0351 static inline void ftrace_free_init_mem(void) { }
0352 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
0353 static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
0354 {
0355     return -EOPNOTSUPP;
0356 }
0357 #endif /* CONFIG_FUNCTION_TRACER */
0358 
0359 struct ftrace_func_entry {
0360     struct hlist_node hlist;
0361     unsigned long ip;
0362     unsigned long direct; /* for direct lookup only */
0363 };
0364 
0365 struct dyn_ftrace;
0366 
0367 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
0368 extern int ftrace_direct_func_count;
0369 int register_ftrace_direct(unsigned long ip, unsigned long addr);
0370 int unregister_ftrace_direct(unsigned long ip, unsigned long addr);
0371 int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr);
0372 struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr);
0373 int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
0374                 struct dyn_ftrace *rec,
0375                 unsigned long old_addr,
0376                 unsigned long new_addr);
0377 unsigned long ftrace_find_rec_direct(unsigned long ip);
0378 int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr);
0379 int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr);
0380 int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr);
0381 int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr);
0382 
0383 #else
0384 struct ftrace_ops;
0385 # define ftrace_direct_func_count 0
0386 static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
0387 {
0388     return -ENOTSUPP;
0389 }
0390 static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
0391 {
0392     return -ENOTSUPP;
0393 }
0394 static inline int modify_ftrace_direct(unsigned long ip,
0395                        unsigned long old_addr, unsigned long new_addr)
0396 {
0397     return -ENOTSUPP;
0398 }
0399 static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
0400 {
0401     return NULL;
0402 }
0403 static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
0404                           struct dyn_ftrace *rec,
0405                           unsigned long old_addr,
0406                           unsigned long new_addr)
0407 {
0408     return -ENODEV;
0409 }
0410 static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
0411 {
0412     return 0;
0413 }
0414 static inline int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
0415 {
0416     return -ENODEV;
0417 }
0418 static inline int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
0419 {
0420     return -ENODEV;
0421 }
0422 static inline int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
0423 {
0424     return -ENODEV;
0425 }
0426 static inline int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr)
0427 {
0428     return -ENODEV;
0429 }
0430 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
0431 
0432 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
0433 /*
0434  * This must be implemented by the architecture.
0435  * It is the way the ftrace direct_ops helper, when called
0436  * via ftrace (because there's other callbacks besides the
0437  * direct call), can inform the architecture's trampoline that this
0438  * routine has a direct caller, and what the caller is.
0439  *
0440  * For example, in x86, it returns the direct caller
0441  * callback function via the regs->orig_ax parameter.
0442  * Then in the ftrace trampoline, if this is set, it makes
0443  * the return from the trampoline jump to the direct caller
0444  * instead of going back to the function it just traced.
0445  */
0446 static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
0447                          unsigned long addr) { }
0448 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
0449 
0450 #ifdef CONFIG_STACK_TRACER
0451 
0452 extern int stack_tracer_enabled;
0453 
0454 int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
0455                size_t *lenp, loff_t *ppos);
0456 
0457 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
0458 DECLARE_PER_CPU(int, disable_stack_tracer);
0459 
0460 /**
0461  * stack_tracer_disable - temporarily disable the stack tracer
0462  *
0463  * There's a few locations (namely in RCU) where stack tracing
0464  * cannot be executed. This function is used to disable stack
0465  * tracing during those critical sections.
0466  *
0467  * This function must be called with preemption or interrupts
0468  * disabled and stack_tracer_enable() must be called shortly after
0469  * while preemption or interrupts are still disabled.
0470  */
0471 static inline void stack_tracer_disable(void)
0472 {
0473     /* Preemption or interrupts must be disabled */
0474     if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
0475         WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
0476     this_cpu_inc(disable_stack_tracer);
0477 }
0478 
0479 /**
0480  * stack_tracer_enable - re-enable the stack tracer
0481  *
0482  * After stack_tracer_disable() is called, stack_tracer_enable()
0483  * must be called shortly afterward.
0484  */
0485 static inline void stack_tracer_enable(void)
0486 {
0487     if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
0488         WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
0489     this_cpu_dec(disable_stack_tracer);
0490 }
0491 #else
0492 static inline void stack_tracer_disable(void) { }
0493 static inline void stack_tracer_enable(void) { }
0494 #endif
0495 
0496 #ifdef CONFIG_DYNAMIC_FTRACE
0497 
0498 void ftrace_arch_code_modify_prepare(void);
0499 void ftrace_arch_code_modify_post_process(void);
0500 
0501 enum ftrace_bug_type {
0502     FTRACE_BUG_UNKNOWN,
0503     FTRACE_BUG_INIT,
0504     FTRACE_BUG_NOP,
0505     FTRACE_BUG_CALL,
0506     FTRACE_BUG_UPDATE,
0507 };
0508 extern enum ftrace_bug_type ftrace_bug_type;
0509 
0510 /*
0511  * Archs can set this to point to a variable that holds the value that was
0512  * expected at the call site before calling ftrace_bug().
0513  */
0514 extern const void *ftrace_expected;
0515 
0516 void ftrace_bug(int err, struct dyn_ftrace *rec);
0517 
0518 struct seq_file;
0519 
0520 extern int ftrace_text_reserved(const void *start, const void *end);
0521 
0522 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
0523 
0524 bool is_ftrace_trampoline(unsigned long addr);
0525 
0526 /*
0527  * The dyn_ftrace record's flags field is split into two parts.
0528  * the first part which is '0-FTRACE_REF_MAX' is a counter of
0529  * the number of callbacks that have registered the function that
0530  * the dyn_ftrace descriptor represents.
0531  *
0532  * The second part is a mask:
0533  *  ENABLED - the function is being traced
0534  *  REGS    - the record wants the function to save regs
0535  *  REGS_EN - the function is set up to save regs.
0536  *  IPMODIFY - the record allows for the IP address to be changed.
0537  *  DISABLED - the record is not ready to be touched yet
0538  *  DIRECT   - there is a direct function to call
0539  *
0540  * When a new ftrace_ops is registered and wants a function to save
0541  * pt_regs, the rec->flags REGS is set. When the function has been
0542  * set up to save regs, the REG_EN flag is set. Once a function
0543  * starts saving regs it will do so until all ftrace_ops are removed
0544  * from tracing that function.
0545  */
0546 enum {
0547     FTRACE_FL_ENABLED   = (1UL << 31),
0548     FTRACE_FL_REGS      = (1UL << 30),
0549     FTRACE_FL_REGS_EN   = (1UL << 29),
0550     FTRACE_FL_TRAMP     = (1UL << 28),
0551     FTRACE_FL_TRAMP_EN  = (1UL << 27),
0552     FTRACE_FL_IPMODIFY  = (1UL << 26),
0553     FTRACE_FL_DISABLED  = (1UL << 25),
0554     FTRACE_FL_DIRECT    = (1UL << 24),
0555     FTRACE_FL_DIRECT_EN = (1UL << 23),
0556 };
0557 
0558 #define FTRACE_REF_MAX_SHIFT    23
0559 #define FTRACE_REF_MAX      ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
0560 
0561 #define ftrace_rec_count(rec)   ((rec)->flags & FTRACE_REF_MAX)
0562 
0563 struct dyn_ftrace {
0564     unsigned long       ip; /* address of mcount call-site */
0565     unsigned long       flags;
0566     struct dyn_arch_ftrace  arch;
0567 };
0568 
0569 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
0570              int remove, int reset);
0571 int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
0572               unsigned int cnt, int remove, int reset);
0573 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
0574                int len, int reset);
0575 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
0576             int len, int reset);
0577 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
0578 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
0579 void ftrace_free_filter(struct ftrace_ops *ops);
0580 void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
0581 
0582 enum {
0583     FTRACE_UPDATE_CALLS     = (1 << 0),
0584     FTRACE_DISABLE_CALLS        = (1 << 1),
0585     FTRACE_UPDATE_TRACE_FUNC    = (1 << 2),
0586     FTRACE_START_FUNC_RET       = (1 << 3),
0587     FTRACE_STOP_FUNC_RET        = (1 << 4),
0588     FTRACE_MAY_SLEEP        = (1 << 5),
0589 };
0590 
0591 /*
0592  * The FTRACE_UPDATE_* enum is used to pass information back
0593  * from the ftrace_update_record() and ftrace_test_record()
0594  * functions. These are called by the code update routines
0595  * to find out what is to be done for a given function.
0596  *
0597  *  IGNORE           - The function is already what we want it to be
0598  *  MAKE_CALL        - Start tracing the function
0599  *  MODIFY_CALL      - Stop saving regs for the function
0600  *  MAKE_NOP         - Stop tracing the function
0601  */
0602 enum {
0603     FTRACE_UPDATE_IGNORE,
0604     FTRACE_UPDATE_MAKE_CALL,
0605     FTRACE_UPDATE_MODIFY_CALL,
0606     FTRACE_UPDATE_MAKE_NOP,
0607 };
0608 
0609 enum {
0610     FTRACE_ITER_FILTER  = (1 << 0),
0611     FTRACE_ITER_NOTRACE = (1 << 1),
0612     FTRACE_ITER_PRINTALL    = (1 << 2),
0613     FTRACE_ITER_DO_PROBES   = (1 << 3),
0614     FTRACE_ITER_PROBE   = (1 << 4),
0615     FTRACE_ITER_MOD     = (1 << 5),
0616     FTRACE_ITER_ENABLED = (1 << 6),
0617 };
0618 
0619 void arch_ftrace_update_code(int command);
0620 void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
0621 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
0622 void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
0623 
0624 struct ftrace_rec_iter;
0625 
0626 struct ftrace_rec_iter *ftrace_rec_iter_start(void);
0627 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
0628 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
0629 
0630 #define for_ftrace_rec_iter(iter)       \
0631     for (iter = ftrace_rec_iter_start();    \
0632          iter;              \
0633          iter = ftrace_rec_iter_next(iter))
0634 
0635 
0636 int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
0637 int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
0638 void ftrace_run_stop_machine(int command);
0639 unsigned long ftrace_location(unsigned long ip);
0640 unsigned long ftrace_location_range(unsigned long start, unsigned long end);
0641 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
0642 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
0643 
0644 extern ftrace_func_t ftrace_trace_function;
0645 
0646 int ftrace_regex_open(struct ftrace_ops *ops, int flag,
0647           struct inode *inode, struct file *file);
0648 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
0649                 size_t cnt, loff_t *ppos);
0650 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
0651                  size_t cnt, loff_t *ppos);
0652 int ftrace_regex_release(struct inode *inode, struct file *file);
0653 
0654 void __init
0655 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
0656 
0657 /* defined in arch */
0658 extern int ftrace_ip_converted(unsigned long ip);
0659 extern int ftrace_dyn_arch_init(void);
0660 extern void ftrace_replace_code(int enable);
0661 extern int ftrace_update_ftrace_func(ftrace_func_t func);
0662 extern void ftrace_caller(void);
0663 extern void ftrace_regs_caller(void);
0664 extern void ftrace_call(void);
0665 extern void ftrace_regs_call(void);
0666 extern void mcount_call(void);
0667 
0668 void ftrace_modify_all_code(int command);
0669 
0670 #ifndef FTRACE_ADDR
0671 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
0672 #endif
0673 
0674 #ifndef FTRACE_GRAPH_ADDR
0675 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
0676 #endif
0677 
0678 #ifndef FTRACE_REGS_ADDR
0679 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
0680 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
0681 #else
0682 # define FTRACE_REGS_ADDR FTRACE_ADDR
0683 #endif
0684 #endif
0685 
0686 /*
0687  * If an arch would like functions that are only traced
0688  * by the function graph tracer to jump directly to its own
0689  * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
0690  * to be that address to jump to.
0691  */
0692 #ifndef FTRACE_GRAPH_TRAMP_ADDR
0693 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
0694 #endif
0695 
0696 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0697 extern void ftrace_graph_caller(void);
0698 extern int ftrace_enable_ftrace_graph_caller(void);
0699 extern int ftrace_disable_ftrace_graph_caller(void);
0700 #else
0701 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
0702 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
0703 #endif
0704 
0705 /**
0706  * ftrace_make_nop - convert code into nop
0707  * @mod: module structure if called by module load initialization
0708  * @rec: the call site record (e.g. mcount/fentry)
0709  * @addr: the address that the call site should be calling
0710  *
0711  * This is a very sensitive operation and great care needs
0712  * to be taken by the arch.  The operation should carefully
0713  * read the location, check to see if what is read is indeed
0714  * what we expect it to be, and then on success of the compare,
0715  * it should write to the location.
0716  *
0717  * The code segment at @rec->ip should be a caller to @addr
0718  *
0719  * Return must be:
0720  *  0 on success
0721  *  -EFAULT on error reading the location
0722  *  -EINVAL on a failed compare of the contents
0723  *  -EPERM  on error writing to the location
0724  * Any other value will be considered a failure.
0725  */
0726 extern int ftrace_make_nop(struct module *mod,
0727                struct dyn_ftrace *rec, unsigned long addr);
0728 
0729 /**
0730  * ftrace_need_init_nop - return whether nop call sites should be initialized
0731  *
0732  * Normally the compiler's -mnop-mcount generates suitable nops, so we don't
0733  * need to call ftrace_init_nop() if the code is built with that flag.
0734  * Architectures where this is not always the case may define their own
0735  * condition.
0736  *
0737  * Return must be:
0738  *  0       if ftrace_init_nop() should be called
0739  *  Nonzero if ftrace_init_nop() should not be called
0740  */
0741 
0742 #ifndef ftrace_need_init_nop
0743 #define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
0744 #endif
0745 
0746 /**
0747  * ftrace_init_nop - initialize a nop call site
0748  * @mod: module structure if called by module load initialization
0749  * @rec: the call site record (e.g. mcount/fentry)
0750  *
0751  * This is a very sensitive operation and great care needs
0752  * to be taken by the arch.  The operation should carefully
0753  * read the location, check to see if what is read is indeed
0754  * what we expect it to be, and then on success of the compare,
0755  * it should write to the location.
0756  *
0757  * The code segment at @rec->ip should contain the contents created by
0758  * the compiler
0759  *
0760  * Return must be:
0761  *  0 on success
0762  *  -EFAULT on error reading the location
0763  *  -EINVAL on a failed compare of the contents
0764  *  -EPERM  on error writing to the location
0765  * Any other value will be considered a failure.
0766  */
0767 #ifndef ftrace_init_nop
0768 static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
0769 {
0770     return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
0771 }
0772 #endif
0773 
0774 /**
0775  * ftrace_make_call - convert a nop call site into a call to addr
0776  * @rec: the call site record (e.g. mcount/fentry)
0777  * @addr: the address that the call site should call
0778  *
0779  * This is a very sensitive operation and great care needs
0780  * to be taken by the arch.  The operation should carefully
0781  * read the location, check to see if what is read is indeed
0782  * what we expect it to be, and then on success of the compare,
0783  * it should write to the location.
0784  *
0785  * The code segment at @rec->ip should be a nop
0786  *
0787  * Return must be:
0788  *  0 on success
0789  *  -EFAULT on error reading the location
0790  *  -EINVAL on a failed compare of the contents
0791  *  -EPERM  on error writing to the location
0792  * Any other value will be considered a failure.
0793  */
0794 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
0795 
0796 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
0797 /**
0798  * ftrace_modify_call - convert from one addr to another (no nop)
0799  * @rec: the call site record (e.g. mcount/fentry)
0800  * @old_addr: the address expected to be currently called to
0801  * @addr: the address to change to
0802  *
0803  * This is a very sensitive operation and great care needs
0804  * to be taken by the arch.  The operation should carefully
0805  * read the location, check to see if what is read is indeed
0806  * what we expect it to be, and then on success of the compare,
0807  * it should write to the location.
0808  *
0809  * The code segment at @rec->ip should be a caller to @old_addr
0810  *
0811  * Return must be:
0812  *  0 on success
0813  *  -EFAULT on error reading the location
0814  *  -EINVAL on a failed compare of the contents
0815  *  -EPERM  on error writing to the location
0816  * Any other value will be considered a failure.
0817  */
0818 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
0819                   unsigned long addr);
0820 #else
0821 /* Should never be called */
0822 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
0823                      unsigned long addr)
0824 {
0825     return -EINVAL;
0826 }
0827 #endif
0828 
0829 /* May be defined in arch */
0830 extern int ftrace_arch_read_dyn_info(char *buf, int size);
0831 
0832 extern int skip_trace(unsigned long ip);
0833 extern void ftrace_module_init(struct module *mod);
0834 extern void ftrace_module_enable(struct module *mod);
0835 extern void ftrace_release_mod(struct module *mod);
0836 
0837 extern void ftrace_disable_daemon(void);
0838 extern void ftrace_enable_daemon(void);
0839 #else /* CONFIG_DYNAMIC_FTRACE */
0840 static inline int skip_trace(unsigned long ip) { return 0; }
0841 static inline void ftrace_disable_daemon(void) { }
0842 static inline void ftrace_enable_daemon(void) { }
0843 static inline void ftrace_module_init(struct module *mod) { }
0844 static inline void ftrace_module_enable(struct module *mod) { }
0845 static inline void ftrace_release_mod(struct module *mod) { }
0846 static inline int ftrace_text_reserved(const void *start, const void *end)
0847 {
0848     return 0;
0849 }
0850 static inline unsigned long ftrace_location(unsigned long ip)
0851 {
0852     return 0;
0853 }
0854 
0855 /*
0856  * Again users of functions that have ftrace_ops may not
0857  * have them defined when ftrace is not enabled, but these
0858  * functions may still be called. Use a macro instead of inline.
0859  */
0860 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
0861 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
0862 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
0863 #define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; })
0864 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
0865 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
0866 #define ftrace_free_filter(ops) do { } while (0)
0867 #define ftrace_ops_set_global_filter(ops) do { } while (0)
0868 
0869 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
0870                 size_t cnt, loff_t *ppos) { return -ENODEV; }
0871 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
0872                  size_t cnt, loff_t *ppos) { return -ENODEV; }
0873 static inline int
0874 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
0875 
0876 static inline bool is_ftrace_trampoline(unsigned long addr)
0877 {
0878     return false;
0879 }
0880 #endif /* CONFIG_DYNAMIC_FTRACE */
0881 
0882 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0883 #ifndef ftrace_graph_func
0884 #define ftrace_graph_func ftrace_stub
0885 #define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB
0886 #else
0887 #define FTRACE_OPS_GRAPH_STUB 0
0888 #endif
0889 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
0890 
0891 /* totally disable ftrace - can not re-enable after this */
0892 void ftrace_kill(void);
0893 
0894 static inline void tracer_disable(void)
0895 {
0896 #ifdef CONFIG_FUNCTION_TRACER
0897     ftrace_enabled = 0;
0898 #endif
0899 }
0900 
0901 /*
0902  * Ftrace disable/restore without lock. Some synchronization mechanism
0903  * must be used to prevent ftrace_enabled to be changed between
0904  * disable/restore.
0905  */
0906 static inline int __ftrace_enabled_save(void)
0907 {
0908 #ifdef CONFIG_FUNCTION_TRACER
0909     int saved_ftrace_enabled = ftrace_enabled;
0910     ftrace_enabled = 0;
0911     return saved_ftrace_enabled;
0912 #else
0913     return 0;
0914 #endif
0915 }
0916 
0917 static inline void __ftrace_enabled_restore(int enabled)
0918 {
0919 #ifdef CONFIG_FUNCTION_TRACER
0920     ftrace_enabled = enabled;
0921 #endif
0922 }
0923 
0924 /* All archs should have this, but we define it for consistency */
0925 #ifndef ftrace_return_address0
0926 # define ftrace_return_address0 __builtin_return_address(0)
0927 #endif
0928 
0929 /* Archs may use other ways for ADDR1 and beyond */
0930 #ifndef ftrace_return_address
0931 # ifdef CONFIG_FRAME_POINTER
0932 #  define ftrace_return_address(n) __builtin_return_address(n)
0933 # else
0934 #  define ftrace_return_address(n) 0UL
0935 # endif
0936 #endif
0937 
0938 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
0939 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
0940 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
0941 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
0942 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
0943 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
0944 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
0945 
0946 static inline unsigned long get_lock_parent_ip(void)
0947 {
0948     unsigned long addr = CALLER_ADDR0;
0949 
0950     if (!in_lock_functions(addr))
0951         return addr;
0952     addr = CALLER_ADDR1;
0953     if (!in_lock_functions(addr))
0954         return addr;
0955     return CALLER_ADDR2;
0956 }
0957 
0958 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
0959   extern void trace_preempt_on(unsigned long a0, unsigned long a1);
0960   extern void trace_preempt_off(unsigned long a0, unsigned long a1);
0961 #else
0962 /*
0963  * Use defines instead of static inlines because some arches will make code out
0964  * of the CALLER_ADDR, when we really want these to be a real nop.
0965  */
0966 # define trace_preempt_on(a0, a1) do { } while (0)
0967 # define trace_preempt_off(a0, a1) do { } while (0)
0968 #endif
0969 
0970 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
0971 extern void ftrace_init(void);
0972 #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
0973 #define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
0974 #else
0975 #define FTRACE_CALLSITE_SECTION "__mcount_loc"
0976 #endif
0977 #else
0978 static inline void ftrace_init(void) { }
0979 #endif
0980 
0981 /*
0982  * Structure that defines an entry function trace.
0983  * It's already packed but the attribute "packed" is needed
0984  * to remove extra padding at the end.
0985  */
0986 struct ftrace_graph_ent {
0987     unsigned long func; /* Current function */
0988     int depth;
0989 } __packed;
0990 
0991 /*
0992  * Structure that defines a return function trace.
0993  * It's already packed but the attribute "packed" is needed
0994  * to remove extra padding at the end.
0995  */
0996 struct ftrace_graph_ret {
0997     unsigned long func; /* Current function */
0998     int depth;
0999     /* Number of functions that overran the depth limit for current task */
1000     unsigned int overrun;
1001     unsigned long long calltime;
1002     unsigned long long rettime;
1003 } __packed;
1004 
1005 /* Type of the callback handlers for tracing function graph*/
1006 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
1007 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
1008 
1009 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
1010 
1011 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1012 
1013 struct fgraph_ops {
1014     trace_func_graph_ent_t      entryfunc;
1015     trace_func_graph_ret_t      retfunc;
1016 };
1017 
1018 /*
1019  * Stack of return addresses for functions
1020  * of a thread.
1021  * Used in struct thread_info
1022  */
1023 struct ftrace_ret_stack {
1024     unsigned long ret;
1025     unsigned long func;
1026     unsigned long long calltime;
1027 #ifdef CONFIG_FUNCTION_PROFILER
1028     unsigned long long subtime;
1029 #endif
1030 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
1031     unsigned long fp;
1032 #endif
1033 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
1034     unsigned long *retp;
1035 #endif
1036 };
1037 
1038 /*
1039  * Primary handler of a function return.
1040  * It relays on ftrace_return_to_handler.
1041  * Defined in entry_32/64.S
1042  */
1043 extern void return_to_handler(void);
1044 
1045 extern int
1046 function_graph_enter(unsigned long ret, unsigned long func,
1047              unsigned long frame_pointer, unsigned long *retp);
1048 
1049 struct ftrace_ret_stack *
1050 ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
1051 
1052 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
1053                     unsigned long ret, unsigned long *retp);
1054 
1055 /*
1056  * Sometimes we don't want to trace a function with the function
1057  * graph tracer but we want them to keep traced by the usual function
1058  * tracer if the function graph tracer is not configured.
1059  */
1060 #define __notrace_funcgraph     notrace
1061 
1062 #define FTRACE_RETFUNC_DEPTH 50
1063 #define FTRACE_RETSTACK_ALLOC_SIZE 32
1064 
1065 extern int register_ftrace_graph(struct fgraph_ops *ops);
1066 extern void unregister_ftrace_graph(struct fgraph_ops *ops);
1067 
1068 /**
1069  * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
1070  *
1071  * ftrace_graph_stop() is called when a severe error is detected in
1072  * the function graph tracing. This function is called by the critical
1073  * paths of function graph to keep those paths from doing any more harm.
1074  */
1075 DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph);
1076 
1077 static inline bool ftrace_graph_is_dead(void)
1078 {
1079     return static_branch_unlikely(&kill_ftrace_graph);
1080 }
1081 
1082 extern void ftrace_graph_stop(void);
1083 
1084 /* The current handlers in use */
1085 extern trace_func_graph_ret_t ftrace_graph_return;
1086 extern trace_func_graph_ent_t ftrace_graph_entry;
1087 
1088 extern void ftrace_graph_init_task(struct task_struct *t);
1089 extern void ftrace_graph_exit_task(struct task_struct *t);
1090 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
1091 
1092 static inline void pause_graph_tracing(void)
1093 {
1094     atomic_inc(&current->tracing_graph_pause);
1095 }
1096 
1097 static inline void unpause_graph_tracing(void)
1098 {
1099     atomic_dec(&current->tracing_graph_pause);
1100 }
1101 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
1102 
1103 #define __notrace_funcgraph
1104 
1105 static inline void ftrace_graph_init_task(struct task_struct *t) { }
1106 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
1107 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
1108 
1109 /* Define as macros as fgraph_ops may not be defined */
1110 #define register_ftrace_graph(ops) ({ -1; })
1111 #define unregister_ftrace_graph(ops) do { } while (0)
1112 
1113 static inline unsigned long
1114 ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
1115               unsigned long *retp)
1116 {
1117     return ret;
1118 }
1119 
1120 static inline void pause_graph_tracing(void) { }
1121 static inline void unpause_graph_tracing(void) { }
1122 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1123 
1124 #ifdef CONFIG_TRACING
1125 
1126 /* flags for current->trace */
1127 enum {
1128     TSK_TRACE_FL_TRACE_BIT  = 0,
1129     TSK_TRACE_FL_GRAPH_BIT  = 1,
1130 };
1131 enum {
1132     TSK_TRACE_FL_TRACE  = 1 << TSK_TRACE_FL_TRACE_BIT,
1133     TSK_TRACE_FL_GRAPH  = 1 << TSK_TRACE_FL_GRAPH_BIT,
1134 };
1135 
1136 static inline void set_tsk_trace_trace(struct task_struct *tsk)
1137 {
1138     set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
1139 }
1140 
1141 static inline void clear_tsk_trace_trace(struct task_struct *tsk)
1142 {
1143     clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
1144 }
1145 
1146 static inline int test_tsk_trace_trace(struct task_struct *tsk)
1147 {
1148     return tsk->trace & TSK_TRACE_FL_TRACE;
1149 }
1150 
1151 static inline void set_tsk_trace_graph(struct task_struct *tsk)
1152 {
1153     set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
1154 }
1155 
1156 static inline void clear_tsk_trace_graph(struct task_struct *tsk)
1157 {
1158     clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
1159 }
1160 
1161 static inline int test_tsk_trace_graph(struct task_struct *tsk)
1162 {
1163     return tsk->trace & TSK_TRACE_FL_GRAPH;
1164 }
1165 
1166 enum ftrace_dump_mode;
1167 
1168 extern enum ftrace_dump_mode ftrace_dump_on_oops;
1169 extern int tracepoint_printk;
1170 
1171 extern void disable_trace_on_warning(void);
1172 extern int __disable_trace_on_warning;
1173 
1174 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
1175                  void *buffer, size_t *lenp, loff_t *ppos);
1176 
1177 #else /* CONFIG_TRACING */
1178 static inline void  disable_trace_on_warning(void) { }
1179 #endif /* CONFIG_TRACING */
1180 
1181 #ifdef CONFIG_FTRACE_SYSCALLS
1182 
1183 unsigned long arch_syscall_addr(int nr);
1184 
1185 #endif /* CONFIG_FTRACE_SYSCALLS */
1186 
1187 #endif /* _LINUX_FTRACE_H */