0001 #ifndef PERF_UTIL_KWORK_H
0002 #define PERF_UTIL_KWORK_H
0003
0004 #include "perf.h"
0005
0006 #include "util/tool.h"
0007 #include "util/event.h"
0008 #include "util/evlist.h"
0009 #include "util/session.h"
0010 #include "util/time-utils.h"
0011
0012 #include <linux/list.h>
0013 #include <linux/bitmap.h>
0014
0015 enum kwork_class_type {
0016 KWORK_CLASS_IRQ,
0017 KWORK_CLASS_SOFTIRQ,
0018 KWORK_CLASS_WORKQUEUE,
0019 KWORK_CLASS_MAX,
0020 };
0021
0022 enum kwork_report_type {
0023 KWORK_REPORT_RUNTIME,
0024 KWORK_REPORT_LATENCY,
0025 KWORK_REPORT_TIMEHIST,
0026 };
0027
0028 enum kwork_trace_type {
0029 KWORK_TRACE_RAISE,
0030 KWORK_TRACE_ENTRY,
0031 KWORK_TRACE_EXIT,
0032 KWORK_TRACE_MAX,
0033 };
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078 struct kwork_atom {
0079 struct list_head list;
0080 u64 time;
0081 struct kwork_atom *prev;
0082
0083 void *page_addr;
0084 unsigned long bit_inpage;
0085 };
0086
0087 #define NR_ATOM_PER_PAGE 128
0088 struct kwork_atom_page {
0089 struct list_head list;
0090 struct kwork_atom atoms[NR_ATOM_PER_PAGE];
0091 DECLARE_BITMAP(bitmap, NR_ATOM_PER_PAGE);
0092 };
0093
0094 struct kwork_class;
0095 struct kwork_work {
0096
0097
0098
0099 struct rb_node node;
0100 struct kwork_class *class;
0101
0102
0103
0104
0105 u64 id;
0106 int cpu;
0107 char *name;
0108
0109
0110
0111
0112 u64 nr_atoms;
0113 struct list_head atom_list[KWORK_TRACE_MAX];
0114
0115
0116
0117
0118 u64 max_runtime;
0119 u64 max_runtime_start;
0120 u64 max_runtime_end;
0121 u64 total_runtime;
0122
0123
0124
0125
0126 u64 max_latency;
0127 u64 max_latency_start;
0128 u64 max_latency_end;
0129 u64 total_latency;
0130 };
0131
0132 struct kwork_class {
0133 struct list_head list;
0134 const char *name;
0135 enum kwork_class_type type;
0136
0137 unsigned int nr_tracepoints;
0138 const struct evsel_str_handler *tp_handlers;
0139
0140 struct rb_root_cached work_root;
0141
0142 int (*class_init)(struct kwork_class *class,
0143 struct perf_session *session);
0144
0145 void (*work_init)(struct kwork_class *class,
0146 struct kwork_work *work,
0147 struct evsel *evsel,
0148 struct perf_sample *sample,
0149 struct machine *machine);
0150
0151 void (*work_name)(struct kwork_work *work,
0152 char *buf, int len);
0153 };
0154
0155 struct perf_kwork;
0156 struct trace_kwork_handler {
0157 int (*raise_event)(struct perf_kwork *kwork,
0158 struct kwork_class *class, struct evsel *evsel,
0159 struct perf_sample *sample, struct machine *machine);
0160
0161 int (*entry_event)(struct perf_kwork *kwork,
0162 struct kwork_class *class, struct evsel *evsel,
0163 struct perf_sample *sample, struct machine *machine);
0164
0165 int (*exit_event)(struct perf_kwork *kwork,
0166 struct kwork_class *class, struct evsel *evsel,
0167 struct perf_sample *sample, struct machine *machine);
0168 };
0169
0170 struct perf_kwork {
0171
0172
0173
0174 struct perf_tool tool;
0175 struct list_head class_list;
0176 struct list_head atom_page_list;
0177 struct list_head sort_list, cmp_id;
0178 struct rb_root_cached sorted_work_root;
0179 const struct trace_kwork_handler *tp_handler;
0180
0181
0182
0183
0184 const char *profile_name;
0185
0186 const char *cpu_list;
0187 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
0188
0189 const char *time_str;
0190 struct perf_time_interval ptime;
0191
0192
0193
0194
0195 bool force;
0196 const char *event_list_str;
0197 enum kwork_report_type report;
0198
0199
0200
0201
0202 bool summary;
0203 const char *sort_order;
0204 bool show_callchain;
0205 unsigned int max_stack;
0206 bool use_bpf;
0207
0208
0209
0210
0211 u64 timestart;
0212 u64 timeend;
0213
0214 unsigned long nr_events;
0215 unsigned long nr_lost_chunks;
0216 unsigned long nr_lost_events;
0217
0218 u64 all_runtime;
0219 u64 all_count;
0220 u64 nr_skipped_events[KWORK_TRACE_MAX + 1];
0221 };
0222
0223 struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
0224 struct kwork_class *class,
0225 struct kwork_work *key);
0226
0227 #ifdef HAVE_BPF_SKEL
0228
0229 int perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork);
0230 int perf_kwork__report_read_bpf(struct perf_kwork *kwork);
0231 void perf_kwork__report_cleanup_bpf(void);
0232
0233 void perf_kwork__trace_start(void);
0234 void perf_kwork__trace_finish(void);
0235
0236 #else
0237
0238 static inline int
0239 perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork __maybe_unused)
0240 {
0241 return -1;
0242 }
0243
0244 static inline int
0245 perf_kwork__report_read_bpf(struct perf_kwork *kwork __maybe_unused)
0246 {
0247 return -1;
0248 }
0249
0250 static inline void perf_kwork__report_cleanup_bpf(void) {}
0251
0252 static inline void perf_kwork__trace_start(void) {}
0253 static inline void perf_kwork__trace_finish(void) {}
0254
0255 #endif
0256
0257 #endif