Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 
0003 #ifndef _LINUX_TRACE_EVENT_H
0004 #define _LINUX_TRACE_EVENT_H
0005 
0006 #include <linux/ring_buffer.h>
0007 #include <linux/trace_seq.h>
0008 #include <linux/percpu.h>
0009 #include <linux/hardirq.h>
0010 #include <linux/perf_event.h>
0011 #include <linux/tracepoint.h>
0012 
0013 struct trace_array;
0014 struct array_buffer;
0015 struct tracer;
0016 struct dentry;
0017 struct bpf_prog;
0018 union bpf_attr;
0019 
0020 const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
0021                   unsigned long flags,
0022                   const struct trace_print_flags *flag_array);
0023 
0024 const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
0025                     const struct trace_print_flags *symbol_array);
0026 
0027 #if BITS_PER_LONG == 32
0028 const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
0029               unsigned long long flags,
0030               const struct trace_print_flags_u64 *flag_array);
0031 
0032 const char *trace_print_symbols_seq_u64(struct trace_seq *p,
0033                     unsigned long long val,
0034                     const struct trace_print_flags_u64
0035                                  *symbol_array);
0036 #endif
0037 
0038 const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
0039                     unsigned int bitmask_size);
0040 
0041 const char *trace_print_hex_seq(struct trace_seq *p,
0042                 const unsigned char *buf, int len,
0043                 bool concatenate);
0044 
0045 const char *trace_print_array_seq(struct trace_seq *p,
0046                    const void *buf, int count,
0047                    size_t el_size);
0048 
0049 const char *
0050 trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
0051              int prefix_type, int rowsize, int groupsize,
0052              const void *buf, size_t len, bool ascii);
0053 
0054 struct trace_iterator;
0055 struct trace_event;
0056 
0057 int trace_raw_output_prep(struct trace_iterator *iter,
0058               struct trace_event *event);
0059 extern __printf(2, 3)
0060 void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
0061 
0062 /*
0063  * The trace entry - the most basic unit of tracing. This is what
0064  * is printed in the end as a single line in the trace output, such as:
0065  *
0066  *     bash-15816 [01]   235.197585: idle_cpu <- irq_enter
0067  */
0068 struct trace_entry {
0069     unsigned short      type;
0070     unsigned char       flags;
0071     unsigned char       preempt_count;
0072     int         pid;
0073 };
0074 
0075 #define TRACE_EVENT_TYPE_MAX                        \
0076     ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
0077 
0078 /*
0079  * Trace iterator - used by printout routines who present trace
0080  * results to users and which routines might sleep, etc:
0081  */
0082 struct trace_iterator {
0083     struct trace_array  *tr;
0084     struct tracer       *trace;
0085     struct array_buffer *array_buffer;
0086     void            *private;
0087     int         cpu_file;
0088     struct mutex        mutex;
0089     struct ring_buffer_iter **buffer_iter;
0090     unsigned long       iter_flags;
0091     void            *temp;  /* temp holder */
0092     unsigned int        temp_size;
0093     char            *fmt;   /* modified format holder */
0094     unsigned int        fmt_size;
0095 
0096     /* trace_seq for __print_flags() and __print_symbolic() etc. */
0097     struct trace_seq    tmp_seq;
0098 
0099     cpumask_var_t       started;
0100 
0101     /* it's true when current open file is snapshot */
0102     bool            snapshot;
0103 
0104     /* The below is zeroed out in pipe_read */
0105     struct trace_seq    seq;
0106     struct trace_entry  *ent;
0107     unsigned long       lost_events;
0108     int         leftover;
0109     int         ent_size;
0110     int         cpu;
0111     u64         ts;
0112 
0113     loff_t          pos;
0114     long            idx;
0115 
0116     /* All new field here will be zeroed out in pipe_read */
0117 };
0118 
0119 enum trace_iter_flags {
0120     TRACE_FILE_LAT_FMT  = 1,
0121     TRACE_FILE_ANNOTATE = 2,
0122     TRACE_FILE_TIME_IN_NS   = 4,
0123 };
0124 
0125 
0126 typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
0127                       int flags, struct trace_event *event);
0128 
0129 struct trace_event_functions {
0130     trace_print_func    trace;
0131     trace_print_func    raw;
0132     trace_print_func    hex;
0133     trace_print_func    binary;
0134 };
0135 
0136 struct trace_event {
0137     struct hlist_node       node;
0138     struct list_head        list;
0139     int             type;
0140     struct trace_event_functions    *funcs;
0141 };
0142 
0143 extern int register_trace_event(struct trace_event *event);
0144 extern int unregister_trace_event(struct trace_event *event);
0145 
0146 /* Return values for print_line callback */
0147 enum print_line_t {
0148     TRACE_TYPE_PARTIAL_LINE = 0,    /* Retry after flushing the seq */
0149     TRACE_TYPE_HANDLED  = 1,
0150     TRACE_TYPE_UNHANDLED    = 2,    /* Relay to other output functions */
0151     TRACE_TYPE_NO_CONSUME   = 3 /* Handled but ask to not consume */
0152 };
0153 
0154 enum print_line_t trace_handle_return(struct trace_seq *s);
0155 
0156 static inline void tracing_generic_entry_update(struct trace_entry *entry,
0157                         unsigned short type,
0158                         unsigned int trace_ctx)
0159 {
0160     entry->preempt_count        = trace_ctx & 0xff;
0161     entry->pid          = current->pid;
0162     entry->type         = type;
0163     entry->flags =          trace_ctx >> 16;
0164 }
0165 
0166 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
0167 
0168 enum trace_flag_type {
0169     TRACE_FLAG_IRQS_OFF     = 0x01,
0170     TRACE_FLAG_IRQS_NOSUPPORT   = 0x02,
0171     TRACE_FLAG_NEED_RESCHED     = 0x04,
0172     TRACE_FLAG_HARDIRQ      = 0x08,
0173     TRACE_FLAG_SOFTIRQ      = 0x10,
0174     TRACE_FLAG_PREEMPT_RESCHED  = 0x20,
0175     TRACE_FLAG_NMI          = 0x40,
0176     TRACE_FLAG_BH_OFF       = 0x80,
0177 };
0178 
0179 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
0180 static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
0181 {
0182     unsigned int irq_status = irqs_disabled_flags(irqflags) ?
0183         TRACE_FLAG_IRQS_OFF : 0;
0184     return tracing_gen_ctx_irq_test(irq_status);
0185 }
0186 static inline unsigned int tracing_gen_ctx(void)
0187 {
0188     unsigned long irqflags;
0189 
0190     local_save_flags(irqflags);
0191     return tracing_gen_ctx_flags(irqflags);
0192 }
0193 #else
0194 
0195 static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
0196 {
0197     return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
0198 }
0199 static inline unsigned int tracing_gen_ctx(void)
0200 {
0201     return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
0202 }
0203 #endif
0204 
0205 static inline unsigned int tracing_gen_ctx_dec(void)
0206 {
0207     unsigned int trace_ctx;
0208 
0209     trace_ctx = tracing_gen_ctx();
0210     /*
0211      * Subtract one from the preemption counter if preemption is enabled,
0212      * see trace_event_buffer_reserve()for details.
0213      */
0214     if (IS_ENABLED(CONFIG_PREEMPTION))
0215         trace_ctx--;
0216     return trace_ctx;
0217 }
0218 
0219 struct trace_event_file;
0220 
0221 struct ring_buffer_event *
0222 trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
0223                 struct trace_event_file *trace_file,
0224                 int type, unsigned long len,
0225                 unsigned int trace_ctx);
0226 
0227 #define TRACE_RECORD_CMDLINE    BIT(0)
0228 #define TRACE_RECORD_TGID   BIT(1)
0229 
0230 void tracing_record_taskinfo(struct task_struct *task, int flags);
0231 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
0232                       struct task_struct *next, int flags);
0233 
0234 void tracing_record_cmdline(struct task_struct *task);
0235 void tracing_record_tgid(struct task_struct *task);
0236 
0237 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
0238 
0239 struct event_filter;
0240 
0241 enum trace_reg {
0242     TRACE_REG_REGISTER,
0243     TRACE_REG_UNREGISTER,
0244 #ifdef CONFIG_PERF_EVENTS
0245     TRACE_REG_PERF_REGISTER,
0246     TRACE_REG_PERF_UNREGISTER,
0247     TRACE_REG_PERF_OPEN,
0248     TRACE_REG_PERF_CLOSE,
0249     /*
0250      * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
0251      * custom action was taken and the default action is not to be
0252      * performed.
0253      */
0254     TRACE_REG_PERF_ADD,
0255     TRACE_REG_PERF_DEL,
0256 #endif
0257 };
0258 
0259 struct trace_event_call;
0260 
0261 #define TRACE_FUNCTION_TYPE ((const char *)~0UL)
0262 
0263 struct trace_event_fields {
0264     const char *type;
0265     union {
0266         struct {
0267             const char *name;
0268             const int  size;
0269             const int  align;
0270             const int  is_signed;
0271             const int  filter_type;
0272         };
0273         int (*define_fields)(struct trace_event_call *);
0274     };
0275 };
0276 
0277 struct trace_event_class {
0278     const char      *system;
0279     void            *probe;
0280 #ifdef CONFIG_PERF_EVENTS
0281     void            *perf_probe;
0282 #endif
0283     int         (*reg)(struct trace_event_call *event,
0284                        enum trace_reg type, void *data);
0285     struct trace_event_fields *fields_array;
0286     struct list_head    *(*get_fields)(struct trace_event_call *);
0287     struct list_head    fields;
0288     int         (*raw_init)(struct trace_event_call *);
0289 };
0290 
0291 extern int trace_event_reg(struct trace_event_call *event,
0292                 enum trace_reg type, void *data);
0293 
0294 struct trace_event_buffer {
0295     struct trace_buffer     *buffer;
0296     struct ring_buffer_event    *event;
0297     struct trace_event_file     *trace_file;
0298     void                *entry;
0299     unsigned int            trace_ctx;
0300     struct pt_regs          *regs;
0301 };
0302 
0303 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
0304                   struct trace_event_file *trace_file,
0305                   unsigned long len);
0306 
0307 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
0308 
0309 enum {
0310     TRACE_EVENT_FL_FILTERED_BIT,
0311     TRACE_EVENT_FL_CAP_ANY_BIT,
0312     TRACE_EVENT_FL_NO_SET_FILTER_BIT,
0313     TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
0314     TRACE_EVENT_FL_TRACEPOINT_BIT,
0315     TRACE_EVENT_FL_DYNAMIC_BIT,
0316     TRACE_EVENT_FL_KPROBE_BIT,
0317     TRACE_EVENT_FL_UPROBE_BIT,
0318     TRACE_EVENT_FL_EPROBE_BIT,
0319     TRACE_EVENT_FL_CUSTOM_BIT,
0320 };
0321 
0322 /*
0323  * Event flags:
0324  *  FILTERED      - The event has a filter attached
0325  *  CAP_ANY   - Any user can enable for perf
0326  *  NO_SET_FILTER - Set when filter has error and is to be ignored
0327  *  IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
0328  *  TRACEPOINT    - Event is a tracepoint
0329  *  DYNAMIC       - Event is a dynamic event (created at run time)
0330  *  KPROBE        - Event is a kprobe
0331  *  UPROBE        - Event is a uprobe
0332  *  EPROBE        - Event is an event probe
0333  *  CUSTOM        - Event is a custom event (to be attached to an exsiting tracepoint)
0334  *                   This is set when the custom event has not been attached
0335  *                   to a tracepoint yet, then it is cleared when it is.
0336  */
0337 enum {
0338     TRACE_EVENT_FL_FILTERED     = (1 << TRACE_EVENT_FL_FILTERED_BIT),
0339     TRACE_EVENT_FL_CAP_ANY      = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
0340     TRACE_EVENT_FL_NO_SET_FILTER    = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
0341     TRACE_EVENT_FL_IGNORE_ENABLE    = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
0342     TRACE_EVENT_FL_TRACEPOINT   = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
0343     TRACE_EVENT_FL_DYNAMIC      = (1 << TRACE_EVENT_FL_DYNAMIC_BIT),
0344     TRACE_EVENT_FL_KPROBE       = (1 << TRACE_EVENT_FL_KPROBE_BIT),
0345     TRACE_EVENT_FL_UPROBE       = (1 << TRACE_EVENT_FL_UPROBE_BIT),
0346     TRACE_EVENT_FL_EPROBE       = (1 << TRACE_EVENT_FL_EPROBE_BIT),
0347     TRACE_EVENT_FL_CUSTOM       = (1 << TRACE_EVENT_FL_CUSTOM_BIT),
0348 };
0349 
0350 #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
0351 
0352 struct trace_event_call {
0353     struct list_head    list;
0354     struct trace_event_class *class;
0355     union {
0356         char            *name;
0357         /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
0358         struct tracepoint   *tp;
0359     };
0360     struct trace_event  event;
0361     char            *print_fmt;
0362     struct event_filter *filter;
0363     /*
0364      * Static events can disappear with modules,
0365      * where as dynamic ones need their own ref count.
0366      */
0367     union {
0368         void                *module;
0369         atomic_t            refcnt;
0370     };
0371     void            *data;
0372 
0373     /* See the TRACE_EVENT_FL_* flags above */
0374     int         flags; /* static flags of different events */
0375 
0376 #ifdef CONFIG_PERF_EVENTS
0377     int             perf_refcount;
0378     struct hlist_head __percpu  *perf_events;
0379     struct bpf_prog_array __rcu *prog_array;
0380 
0381     int (*perf_perm)(struct trace_event_call *,
0382                  struct perf_event *);
0383 #endif
0384 };
0385 
0386 #ifdef CONFIG_DYNAMIC_EVENTS
0387 bool trace_event_dyn_try_get_ref(struct trace_event_call *call);
0388 void trace_event_dyn_put_ref(struct trace_event_call *call);
0389 bool trace_event_dyn_busy(struct trace_event_call *call);
0390 #else
0391 static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call)
0392 {
0393     /* Without DYNAMIC_EVENTS configured, nothing should be calling this */
0394     return false;
0395 }
0396 static inline void trace_event_dyn_put_ref(struct trace_event_call *call)
0397 {
0398 }
0399 static inline bool trace_event_dyn_busy(struct trace_event_call *call)
0400 {
0401     /* Nothing should call this without DYNAIMIC_EVENTS configured. */
0402     return true;
0403 }
0404 #endif
0405 
0406 static inline bool trace_event_try_get_ref(struct trace_event_call *call)
0407 {
0408     if (call->flags & TRACE_EVENT_FL_DYNAMIC)
0409         return trace_event_dyn_try_get_ref(call);
0410     else
0411         return try_module_get(call->module);
0412 }
0413 
0414 static inline void trace_event_put_ref(struct trace_event_call *call)
0415 {
0416     if (call->flags & TRACE_EVENT_FL_DYNAMIC)
0417         trace_event_dyn_put_ref(call);
0418     else
0419         module_put(call->module);
0420 }
0421 
0422 #ifdef CONFIG_PERF_EVENTS
0423 static inline bool bpf_prog_array_valid(struct trace_event_call *call)
0424 {
0425     /*
0426      * This inline function checks whether call->prog_array
0427      * is valid or not. The function is called in various places,
0428      * outside rcu_read_lock/unlock, as a heuristic to speed up execution.
0429      *
0430      * If this function returns true, and later call->prog_array
0431      * becomes false inside rcu_read_lock/unlock region,
0432      * we bail out then. If this function return false,
0433      * there is a risk that we might miss a few events if the checking
0434      * were delayed until inside rcu_read_lock/unlock region and
0435      * call->prog_array happened to become non-NULL then.
0436      *
0437      * Here, READ_ONCE() is used instead of rcu_access_pointer().
0438      * rcu_access_pointer() requires the actual definition of
0439      * "struct bpf_prog_array" while READ_ONCE() only needs
0440      * a declaration of the same type.
0441      */
0442     return !!READ_ONCE(call->prog_array);
0443 }
0444 #endif
0445 
0446 static inline const char *
0447 trace_event_name(struct trace_event_call *call)
0448 {
0449     if (call->flags & TRACE_EVENT_FL_CUSTOM)
0450         return call->name;
0451     else if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
0452         return call->tp ? call->tp->name : NULL;
0453     else
0454         return call->name;
0455 }
0456 
0457 static inline struct list_head *
0458 trace_get_fields(struct trace_event_call *event_call)
0459 {
0460     if (!event_call->class->get_fields)
0461         return &event_call->class->fields;
0462     return event_call->class->get_fields(event_call);
0463 }
0464 
0465 struct trace_subsystem_dir;
0466 
0467 enum {
0468     EVENT_FILE_FL_ENABLED_BIT,
0469     EVENT_FILE_FL_RECORDED_CMD_BIT,
0470     EVENT_FILE_FL_RECORDED_TGID_BIT,
0471     EVENT_FILE_FL_FILTERED_BIT,
0472     EVENT_FILE_FL_NO_SET_FILTER_BIT,
0473     EVENT_FILE_FL_SOFT_MODE_BIT,
0474     EVENT_FILE_FL_SOFT_DISABLED_BIT,
0475     EVENT_FILE_FL_TRIGGER_MODE_BIT,
0476     EVENT_FILE_FL_TRIGGER_COND_BIT,
0477     EVENT_FILE_FL_PID_FILTER_BIT,
0478     EVENT_FILE_FL_WAS_ENABLED_BIT,
0479 };
0480 
0481 extern struct trace_event_file *trace_get_event_file(const char *instance,
0482                              const char *system,
0483                              const char *event);
0484 extern void trace_put_event_file(struct trace_event_file *file);
0485 
0486 #define MAX_DYNEVENT_CMD_LEN    (2048)
0487 
0488 enum dynevent_type {
0489     DYNEVENT_TYPE_SYNTH = 1,
0490     DYNEVENT_TYPE_KPROBE,
0491     DYNEVENT_TYPE_NONE,
0492 };
0493 
0494 struct dynevent_cmd;
0495 
0496 typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd);
0497 
0498 struct dynevent_cmd {
0499     struct seq_buf      seq;
0500     const char      *event_name;
0501     unsigned int        n_fields;
0502     enum dynevent_type  type;
0503     dynevent_create_fn_t    run_command;
0504     void            *private_data;
0505 };
0506 
0507 extern int dynevent_create(struct dynevent_cmd *cmd);
0508 
0509 extern int synth_event_delete(const char *name);
0510 
0511 extern void synth_event_cmd_init(struct dynevent_cmd *cmd,
0512                  char *buf, int maxlen);
0513 
0514 extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd,
0515                        const char *name,
0516                        struct module *mod, ...);
0517 
0518 #define synth_event_gen_cmd_start(cmd, name, mod, ...)  \
0519     __synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL)
0520 
0521 struct synth_field_desc {
0522     const char *type;
0523     const char *name;
0524 };
0525 
0526 extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd,
0527                        const char *name,
0528                        struct module *mod,
0529                        struct synth_field_desc *fields,
0530                        unsigned int n_fields);
0531 extern int synth_event_create(const char *name,
0532                   struct synth_field_desc *fields,
0533                   unsigned int n_fields, struct module *mod);
0534 
0535 extern int synth_event_add_field(struct dynevent_cmd *cmd,
0536                  const char *type,
0537                  const char *name);
0538 extern int synth_event_add_field_str(struct dynevent_cmd *cmd,
0539                      const char *type_name);
0540 extern int synth_event_add_fields(struct dynevent_cmd *cmd,
0541                   struct synth_field_desc *fields,
0542                   unsigned int n_fields);
0543 
0544 #define synth_event_gen_cmd_end(cmd)    \
0545     dynevent_create(cmd)
0546 
0547 struct synth_event;
0548 
0549 struct synth_event_trace_state {
0550     struct trace_event_buffer fbuffer;
0551     struct synth_trace_event *entry;
0552     struct trace_buffer *buffer;
0553     struct synth_event *event;
0554     unsigned int cur_field;
0555     unsigned int n_u64;
0556     bool disabled;
0557     bool add_next;
0558     bool add_name;
0559 };
0560 
0561 extern int synth_event_trace(struct trace_event_file *file,
0562                  unsigned int n_vals, ...);
0563 extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
0564                    unsigned int n_vals);
0565 extern int synth_event_trace_start(struct trace_event_file *file,
0566                    struct synth_event_trace_state *trace_state);
0567 extern int synth_event_add_next_val(u64 val,
0568                     struct synth_event_trace_state *trace_state);
0569 extern int synth_event_add_val(const char *field_name, u64 val,
0570                    struct synth_event_trace_state *trace_state);
0571 extern int synth_event_trace_end(struct synth_event_trace_state *trace_state);
0572 
0573 extern int kprobe_event_delete(const char *name);
0574 
0575 extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd,
0576                   char *buf, int maxlen);
0577 
0578 #define kprobe_event_gen_cmd_start(cmd, name, loc, ...)         \
0579     __kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL)
0580 
0581 #define kretprobe_event_gen_cmd_start(cmd, name, loc, ...)      \
0582     __kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL)
0583 
0584 extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd,
0585                     bool kretprobe,
0586                     const char *name,
0587                     const char *loc, ...);
0588 
0589 #define kprobe_event_add_fields(cmd, ...)   \
0590     __kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL)
0591 
0592 #define kprobe_event_add_field(cmd, field)  \
0593     __kprobe_event_add_fields(cmd, field, NULL)
0594 
0595 extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
0596 
0597 #define kprobe_event_gen_cmd_end(cmd)       \
0598     dynevent_create(cmd)
0599 
0600 #define kretprobe_event_gen_cmd_end(cmd)    \
0601     dynevent_create(cmd)
0602 
0603 /*
0604  * Event file flags:
0605  *  ENABLED   - The event is enabled
0606  *  RECORDED_CMD  - The comms should be recorded at sched_switch
0607  *  RECORDED_TGID - The tgids should be recorded at sched_switch
0608  *  FILTERED      - The event has a filter attached
0609  *  NO_SET_FILTER - Set when filter has error and is to be ignored
0610  *  SOFT_MODE     - The event is enabled/disabled by SOFT_DISABLED
0611  *  SOFT_DISABLED - When set, do not trace the event (even though its
0612  *                   tracepoint may be enabled)
0613  *  TRIGGER_MODE  - When set, invoke the triggers associated with the event
0614  *  TRIGGER_COND  - When set, one or more triggers has an associated filter
0615  *  PID_FILTER    - When set, the event is filtered based on pid
0616  *  WAS_ENABLED   - Set when enabled to know to clear trace on module removal
0617  */
0618 enum {
0619     EVENT_FILE_FL_ENABLED       = (1 << EVENT_FILE_FL_ENABLED_BIT),
0620     EVENT_FILE_FL_RECORDED_CMD  = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
0621     EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
0622     EVENT_FILE_FL_FILTERED      = (1 << EVENT_FILE_FL_FILTERED_BIT),
0623     EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
0624     EVENT_FILE_FL_SOFT_MODE     = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
0625     EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
0626     EVENT_FILE_FL_TRIGGER_MODE  = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
0627     EVENT_FILE_FL_TRIGGER_COND  = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
0628     EVENT_FILE_FL_PID_FILTER    = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
0629     EVENT_FILE_FL_WAS_ENABLED   = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
0630 };
0631 
0632 struct trace_event_file {
0633     struct list_head        list;
0634     struct trace_event_call     *event_call;
0635     struct event_filter __rcu   *filter;
0636     struct dentry           *dir;
0637     struct trace_array      *tr;
0638     struct trace_subsystem_dir  *system;
0639     struct list_head        triggers;
0640 
0641     /*
0642      * 32 bit flags:
0643      *   bit 0:     enabled
0644      *   bit 1:     enabled cmd record
0645      *   bit 2:     enable/disable with the soft disable bit
0646      *   bit 3:     soft disabled
0647      *   bit 4:     trigger enabled
0648      *
0649      * Note: The bits must be set atomically to prevent races
0650      * from other writers. Reads of flags do not need to be in
0651      * sync as they occur in critical sections. But the way flags
0652      * is currently used, these changes do not affect the code
0653      * except that when a change is made, it may have a slight
0654      * delay in propagating the changes to other CPUs due to
0655      * caching and such. Which is mostly OK ;-)
0656      */
0657     unsigned long       flags;
0658     atomic_t        sm_ref; /* soft-mode reference counter */
0659     atomic_t        tm_ref; /* trigger-mode reference counter */
0660 };
0661 
0662 #define __TRACE_EVENT_FLAGS(name, value)                \
0663     static int __init trace_init_flags_##name(void)         \
0664     {                               \
0665         event_##name.flags |= value;                \
0666         return 0;                       \
0667     }                               \
0668     early_initcall(trace_init_flags_##name);
0669 
0670 #define __TRACE_EVENT_PERF_PERM(name, expr...)              \
0671     static int perf_perm_##name(struct trace_event_call *tp_event, \
0672                     struct perf_event *p_event)     \
0673     {                               \
0674         return ({ expr; });                 \
0675     }                               \
0676     static int __init trace_init_perf_perm_##name(void)     \
0677     {                               \
0678         event_##name.perf_perm = &perf_perm_##name;     \
0679         return 0;                       \
0680     }                               \
0681     early_initcall(trace_init_perf_perm_##name);
0682 
0683 #define PERF_MAX_TRACE_SIZE 8192
0684 
0685 #define MAX_FILTER_STR_VAL  256U    /* Should handle KSYM_SYMBOL_LEN */
0686 
0687 enum event_trigger_type {
0688     ETT_NONE        = (0),
0689     ETT_TRACE_ONOFF     = (1 << 0),
0690     ETT_SNAPSHOT        = (1 << 1),
0691     ETT_STACKTRACE      = (1 << 2),
0692     ETT_EVENT_ENABLE    = (1 << 3),
0693     ETT_EVENT_HIST      = (1 << 4),
0694     ETT_HIST_ENABLE     = (1 << 5),
0695     ETT_EVENT_EPROBE    = (1 << 6),
0696 };
0697 
0698 extern int filter_match_preds(struct event_filter *filter, void *rec);
0699 
0700 extern enum event_trigger_type
0701 event_triggers_call(struct trace_event_file *file,
0702             struct trace_buffer *buffer, void *rec,
0703             struct ring_buffer_event *event);
0704 extern void
0705 event_triggers_post_call(struct trace_event_file *file,
0706              enum event_trigger_type tt);
0707 
0708 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
0709 
0710 bool __trace_trigger_soft_disabled(struct trace_event_file *file);
0711 
0712 /**
0713  * trace_trigger_soft_disabled - do triggers and test if soft disabled
0714  * @file: The file pointer of the event to test
0715  *
0716  * If any triggers without filters are attached to this event, they
0717  * will be called here. If the event is soft disabled and has no
0718  * triggers that require testing the fields, it will return true,
0719  * otherwise false.
0720  */
0721 static __always_inline bool
0722 trace_trigger_soft_disabled(struct trace_event_file *file)
0723 {
0724     unsigned long eflags = file->flags;
0725 
0726     if (likely(!(eflags & (EVENT_FILE_FL_TRIGGER_MODE |
0727                    EVENT_FILE_FL_SOFT_DISABLED |
0728                    EVENT_FILE_FL_PID_FILTER))))
0729         return false;
0730 
0731     if (likely(eflags & EVENT_FILE_FL_TRIGGER_COND))
0732         return false;
0733 
0734     return __trace_trigger_soft_disabled(file);
0735 }
0736 
0737 #ifdef CONFIG_BPF_EVENTS
0738 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
0739 int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
0740 void perf_event_detach_bpf_prog(struct perf_event *event);
0741 int perf_event_query_prog_array(struct perf_event *event, void __user *info);
0742 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
0743 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
0744 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
0745 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
0746 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
0747                 u32 *fd_type, const char **buf,
0748                 u64 *probe_offset, u64 *probe_addr);
0749 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
0750 #else
0751 static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
0752 {
0753     return 1;
0754 }
0755 
0756 static inline int
0757 perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie)
0758 {
0759     return -EOPNOTSUPP;
0760 }
0761 
0762 static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
0763 
0764 static inline int
0765 perf_event_query_prog_array(struct perf_event *event, void __user *info)
0766 {
0767     return -EOPNOTSUPP;
0768 }
0769 static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p)
0770 {
0771     return -EOPNOTSUPP;
0772 }
0773 static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p)
0774 {
0775     return -EOPNOTSUPP;
0776 }
0777 static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
0778 {
0779     return NULL;
0780 }
0781 static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
0782 {
0783 }
0784 static inline int bpf_get_perf_event_info(const struct perf_event *event,
0785                       u32 *prog_id, u32 *fd_type,
0786                       const char **buf, u64 *probe_offset,
0787                       u64 *probe_addr)
0788 {
0789     return -EOPNOTSUPP;
0790 }
0791 static inline int
0792 bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
0793 {
0794     return -EOPNOTSUPP;
0795 }
0796 #endif
0797 
0798 enum {
0799     FILTER_OTHER = 0,
0800     FILTER_STATIC_STRING,
0801     FILTER_DYN_STRING,
0802     FILTER_RDYN_STRING,
0803     FILTER_PTR_STRING,
0804     FILTER_TRACE_FN,
0805     FILTER_COMM,
0806     FILTER_CPU,
0807 };
0808 
0809 extern int trace_event_raw_init(struct trace_event_call *call);
0810 extern int trace_define_field(struct trace_event_call *call, const char *type,
0811                   const char *name, int offset, int size,
0812                   int is_signed, int filter_type);
0813 extern int trace_add_event_call(struct trace_event_call *call);
0814 extern int trace_remove_event_call(struct trace_event_call *call);
0815 extern int trace_event_get_offsets(struct trace_event_call *call);
0816 
0817 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
0818 int trace_set_clr_event(const char *system, const char *event, int set);
0819 int trace_array_set_clr_event(struct trace_array *tr, const char *system,
0820         const char *event, bool enable);
0821 /*
0822  * The double __builtin_constant_p is because gcc will give us an error
0823  * if we try to allocate the static variable to fmt if it is not a
0824  * constant. Even with the outer if statement optimizing out.
0825  */
0826 #define event_trace_printk(ip, fmt, args...)                \
0827 do {                                    \
0828     __trace_printk_check_format(fmt, ##args);           \
0829     tracing_record_cmdline(current);                \
0830     if (__builtin_constant_p(fmt)) {                \
0831         static const char *trace_printk_fmt         \
0832           __section("__trace_printk_fmt") =         \
0833             __builtin_constant_p(fmt) ? fmt : NULL;     \
0834                                     \
0835         __trace_bprintk(ip, trace_printk_fmt, ##args);      \
0836     } else                              \
0837         __trace_printk(ip, fmt, ##args);            \
0838 } while (0)
0839 
0840 #ifdef CONFIG_PERF_EVENTS
0841 struct perf_event;
0842 
0843 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
0844 DECLARE_PER_CPU(int, bpf_kprobe_override);
0845 
0846 extern int  perf_trace_init(struct perf_event *event);
0847 extern void perf_trace_destroy(struct perf_event *event);
0848 extern int  perf_trace_add(struct perf_event *event, int flags);
0849 extern void perf_trace_del(struct perf_event *event, int flags);
0850 #ifdef CONFIG_KPROBE_EVENTS
0851 extern int  perf_kprobe_init(struct perf_event *event, bool is_retprobe);
0852 extern void perf_kprobe_destroy(struct perf_event *event);
0853 extern int bpf_get_kprobe_info(const struct perf_event *event,
0854                    u32 *fd_type, const char **symbol,
0855                    u64 *probe_offset, u64 *probe_addr,
0856                    bool perf_type_tracepoint);
0857 #endif
0858 #ifdef CONFIG_UPROBE_EVENTS
0859 extern int  perf_uprobe_init(struct perf_event *event,
0860                  unsigned long ref_ctr_offset, bool is_retprobe);
0861 extern void perf_uprobe_destroy(struct perf_event *event);
0862 extern int bpf_get_uprobe_info(const struct perf_event *event,
0863                    u32 *fd_type, const char **filename,
0864                    u64 *probe_offset, bool perf_type_tracepoint);
0865 #endif
0866 extern int  ftrace_profile_set_filter(struct perf_event *event, int event_id,
0867                      char *filter_str);
0868 extern void ftrace_profile_free_filter(struct perf_event *event);
0869 void perf_trace_buf_update(void *record, u16 type);
0870 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
0871 
0872 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
0873 void perf_event_free_bpf_prog(struct perf_event *event);
0874 
0875 void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
0876 void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
0877 void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
0878             u64 arg3);
0879 void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2,
0880             u64 arg3, u64 arg4);
0881 void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2,
0882             u64 arg3, u64 arg4, u64 arg5);
0883 void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2,
0884             u64 arg3, u64 arg4, u64 arg5, u64 arg6);
0885 void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2,
0886             u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
0887 void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2,
0888             u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
0889             u64 arg8);
0890 void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2,
0891             u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
0892             u64 arg8, u64 arg9);
0893 void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2,
0894              u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
0895              u64 arg8, u64 arg9, u64 arg10);
0896 void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2,
0897              u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
0898              u64 arg8, u64 arg9, u64 arg10, u64 arg11);
0899 void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2,
0900              u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
0901              u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
0902 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
0903                    struct trace_event_call *call, u64 count,
0904                    struct pt_regs *regs, struct hlist_head *head,
0905                    struct task_struct *task);
0906 
0907 static inline void
0908 perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
0909                u64 count, struct pt_regs *regs, void *head,
0910                struct task_struct *task)
0911 {
0912     perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
0913 }
0914 
0915 #endif
0916 
0917 #define TRACE_EVENT_STR_MAX 512
0918 
0919 /*
0920  * gcc warns that you can not use a va_list in an inlined
0921  * function. But lets me make it into a macro :-/
0922  */
0923 #define __trace_event_vstr_len(fmt, va)         \
0924 ({                          \
0925     va_list __ap;                   \
0926     int __ret;                  \
0927                             \
0928     va_copy(__ap, *(va));               \
0929     __ret = vsnprintf(NULL, 0, fmt, __ap) + 1;  \
0930     va_end(__ap);                   \
0931                             \
0932     min(__ret, TRACE_EVENT_STR_MAX);        \
0933 })
0934 
0935 #endif /* _LINUX_TRACE_EVENT_H */
0936 
0937 /*
0938  * Note: we keep the TRACE_CUSTOM_EVENT outside the include file ifdef protection.
0939  *  This is due to the way trace custom events work. If a file includes two
0940  *  trace event headers under one "CREATE_CUSTOM_TRACE_EVENTS" the first include
0941  *  will override the TRACE_CUSTOM_EVENT and break the second include.
0942  */
0943 
0944 #ifndef TRACE_CUSTOM_EVENT
0945 
0946 #define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print)
0947 #define DEFINE_CUSTOM_EVENT(template, name, proto, args)
0948 #define TRACE_CUSTOM_EVENT(name, proto, args, struct, assign, print)
0949 
0950 #endif /* ifdef TRACE_CUSTOM_EVENT (see note above) */