0001 #ifndef __PERF_FTRACE_H__
0002 #define __PERF_FTRACE_H__
0003
0004 #include <linux/list.h>
0005
0006 #include "target.h"
0007
0008 struct evlist;
0009
0010 struct perf_ftrace {
0011 struct evlist *evlist;
0012 struct target target;
0013 const char *tracer;
0014 struct list_head filters;
0015 struct list_head notrace;
0016 struct list_head graph_funcs;
0017 struct list_head nograph_funcs;
0018 unsigned long percpu_buffer_size;
0019 bool inherit;
0020 bool use_nsec;
0021 int graph_depth;
0022 int func_stack_trace;
0023 int func_irq_info;
0024 int graph_nosleep_time;
0025 int graph_noirqs;
0026 int graph_verbose;
0027 int graph_thresh;
0028 unsigned int initial_delay;
0029 };
0030
0031 struct filter_entry {
0032 struct list_head list;
0033 char name[];
0034 };
0035
0036 #define NUM_BUCKET 22
0037
0038 #ifdef HAVE_BPF_SKEL
0039
0040 int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace);
0041 int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace);
0042 int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace);
0043 int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace,
0044 int buckets[]);
0045 int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace);
0046
0047 #else
0048
0049 static inline int
0050 perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace __maybe_unused)
0051 {
0052 return -1;
0053 }
0054
0055 static inline int
0056 perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused)
0057 {
0058 return -1;
0059 }
0060
0061 static inline int
0062 perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
0063 {
0064 return -1;
0065 }
0066
0067 static inline int
0068 perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
0069 int buckets[] __maybe_unused)
0070 {
0071 return -1;
0072 }
0073
0074 static inline int
0075 perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused)
0076 {
0077 return -1;
0078 }
0079
0080 #endif
0081
0082 #endif