0001
0002
0003
0004 #include "vmlinux.h"
0005 #include <bpf/bpf_helpers.h>
0006 #include <bpf/bpf_tracing.h>
0007
0008 #define KWORK_COUNT 100
0009 #define MAX_KWORKNAME 128
0010
0011
0012
0013
0014 enum kwork_class_type {
0015 KWORK_CLASS_IRQ,
0016 KWORK_CLASS_SOFTIRQ,
0017 KWORK_CLASS_WORKQUEUE,
0018 KWORK_CLASS_MAX,
0019 };
0020
0021 struct work_key {
0022 __u32 type;
0023 __u32 cpu;
0024 __u64 id;
0025 };
0026
0027 struct report_data {
0028 __u64 nr;
0029 __u64 total_time;
0030 __u64 max_time;
0031 __u64 max_time_start;
0032 __u64 max_time_end;
0033 };
0034
0035 struct {
0036 __uint(type, BPF_MAP_TYPE_HASH);
0037 __uint(key_size, sizeof(struct work_key));
0038 __uint(value_size, MAX_KWORKNAME);
0039 __uint(max_entries, KWORK_COUNT);
0040 } perf_kwork_names SEC(".maps");
0041
0042 struct {
0043 __uint(type, BPF_MAP_TYPE_HASH);
0044 __uint(key_size, sizeof(struct work_key));
0045 __uint(value_size, sizeof(__u64));
0046 __uint(max_entries, KWORK_COUNT);
0047 } perf_kwork_time SEC(".maps");
0048
0049 struct {
0050 __uint(type, BPF_MAP_TYPE_HASH);
0051 __uint(key_size, sizeof(struct work_key));
0052 __uint(value_size, sizeof(struct report_data));
0053 __uint(max_entries, KWORK_COUNT);
0054 } perf_kwork_report SEC(".maps");
0055
0056 struct {
0057 __uint(type, BPF_MAP_TYPE_HASH);
0058 __uint(key_size, sizeof(__u32));
0059 __uint(value_size, sizeof(__u8));
0060 __uint(max_entries, 1);
0061 } perf_kwork_cpu_filter SEC(".maps");
0062
0063 struct {
0064 __uint(type, BPF_MAP_TYPE_ARRAY);
0065 __uint(key_size, sizeof(__u32));
0066 __uint(value_size, MAX_KWORKNAME);
0067 __uint(max_entries, 1);
0068 } perf_kwork_name_filter SEC(".maps");
0069
0070 int enabled = 0;
0071 int has_cpu_filter = 0;
0072 int has_name_filter = 0;
0073
0074 static __always_inline int local_strncmp(const char *s1,
0075 unsigned int sz, const char *s2)
0076 {
0077 int ret = 0;
0078 unsigned int i;
0079
0080 for (i = 0; i < sz; i++) {
0081 ret = (unsigned char)s1[i] - (unsigned char)s2[i];
0082 if (ret || !s1[i] || !s2[i])
0083 break;
0084 }
0085
0086 return ret;
0087 }
0088
0089 static __always_inline int trace_event_match(struct work_key *key, char *name)
0090 {
0091 __u8 *cpu_val;
0092 char *name_val;
0093 __u32 zero = 0;
0094 __u32 cpu = bpf_get_smp_processor_id();
0095
0096 if (!enabled)
0097 return 0;
0098
0099 if (has_cpu_filter) {
0100 cpu_val = bpf_map_lookup_elem(&perf_kwork_cpu_filter, &cpu);
0101 if (!cpu_val)
0102 return 0;
0103 }
0104
0105 if (has_name_filter && (name != NULL)) {
0106 name_val = bpf_map_lookup_elem(&perf_kwork_name_filter, &zero);
0107 if (name_val &&
0108 (local_strncmp(name_val, MAX_KWORKNAME, name) != 0)) {
0109 return 0;
0110 }
0111 }
0112
0113 return 1;
0114 }
0115
0116 static __always_inline void do_update_time(void *map, struct work_key *key,
0117 __u64 time_start, __u64 time_end)
0118 {
0119 struct report_data zero, *data;
0120 __s64 delta = time_end - time_start;
0121
0122 if (delta < 0)
0123 return;
0124
0125 data = bpf_map_lookup_elem(map, key);
0126 if (!data) {
0127 __builtin_memset(&zero, 0, sizeof(zero));
0128 bpf_map_update_elem(map, key, &zero, BPF_NOEXIST);
0129 data = bpf_map_lookup_elem(map, key);
0130 if (!data)
0131 return;
0132 }
0133
0134 if ((delta > data->max_time) ||
0135 (data->max_time == 0)) {
0136 data->max_time = delta;
0137 data->max_time_start = time_start;
0138 data->max_time_end = time_end;
0139 }
0140
0141 data->total_time += delta;
0142 data->nr++;
0143 }
0144
0145 static __always_inline void do_update_timestart(void *map, struct work_key *key)
0146 {
0147 __u64 ts = bpf_ktime_get_ns();
0148
0149 bpf_map_update_elem(map, key, &ts, BPF_ANY);
0150 }
0151
0152 static __always_inline void do_update_timeend(void *report_map, void *time_map,
0153 struct work_key *key)
0154 {
0155 __u64 *time = bpf_map_lookup_elem(time_map, key);
0156
0157 if (time) {
0158 bpf_map_delete_elem(time_map, key);
0159 do_update_time(report_map, key, *time, bpf_ktime_get_ns());
0160 }
0161 }
0162
0163 static __always_inline void do_update_name(void *map,
0164 struct work_key *key, char *name)
0165 {
0166 if (!bpf_map_lookup_elem(map, key))
0167 bpf_map_update_elem(map, key, name, BPF_ANY);
0168 }
0169
0170 static __always_inline int update_timestart(void *map, struct work_key *key)
0171 {
0172 if (!trace_event_match(key, NULL))
0173 return 0;
0174
0175 do_update_timestart(map, key);
0176 return 0;
0177 }
0178
0179 static __always_inline int update_timestart_and_name(void *time_map,
0180 void *names_map,
0181 struct work_key *key,
0182 char *name)
0183 {
0184 if (!trace_event_match(key, name))
0185 return 0;
0186
0187 do_update_timestart(time_map, key);
0188 do_update_name(names_map, key, name);
0189
0190 return 0;
0191 }
0192
0193 static __always_inline int update_timeend(void *report_map,
0194 void *time_map, struct work_key *key)
0195 {
0196 if (!trace_event_match(key, NULL))
0197 return 0;
0198
0199 do_update_timeend(report_map, time_map, key);
0200
0201 return 0;
0202 }
0203
0204 static __always_inline int update_timeend_and_name(void *report_map,
0205 void *time_map,
0206 void *names_map,
0207 struct work_key *key,
0208 char *name)
0209 {
0210 if (!trace_event_match(key, name))
0211 return 0;
0212
0213 do_update_timeend(report_map, time_map, key);
0214 do_update_name(names_map, key, name);
0215
0216 return 0;
0217 }
0218
0219 SEC("tracepoint/irq/irq_handler_entry")
0220 int report_irq_handler_entry(struct trace_event_raw_irq_handler_entry *ctx)
0221 {
0222 char name[MAX_KWORKNAME];
0223 struct work_key key = {
0224 .type = KWORK_CLASS_IRQ,
0225 .cpu = bpf_get_smp_processor_id(),
0226 .id = (__u64)ctx->irq,
0227 };
0228 void *name_addr = (void *)ctx + (ctx->__data_loc_name & 0xffff);
0229
0230 bpf_probe_read_kernel_str(name, sizeof(name), name_addr);
0231
0232 return update_timestart_and_name(&perf_kwork_time,
0233 &perf_kwork_names, &key, name);
0234 }
0235
0236 SEC("tracepoint/irq/irq_handler_exit")
0237 int report_irq_handler_exit(struct trace_event_raw_irq_handler_exit *ctx)
0238 {
0239 struct work_key key = {
0240 .type = KWORK_CLASS_IRQ,
0241 .cpu = bpf_get_smp_processor_id(),
0242 .id = (__u64)ctx->irq,
0243 };
0244
0245 return update_timeend(&perf_kwork_report, &perf_kwork_time, &key);
0246 }
0247
0248 static char softirq_name_list[NR_SOFTIRQS][MAX_KWORKNAME] = {
0249 { "HI" },
0250 { "TIMER" },
0251 { "NET_TX" },
0252 { "NET_RX" },
0253 { "BLOCK" },
0254 { "IRQ_POLL" },
0255 { "TASKLET" },
0256 { "SCHED" },
0257 { "HRTIMER" },
0258 { "RCU" },
0259 };
0260
0261 SEC("tracepoint/irq/softirq_entry")
0262 int report_softirq_entry(struct trace_event_raw_softirq *ctx)
0263 {
0264 unsigned int vec = ctx->vec;
0265 struct work_key key = {
0266 .type = KWORK_CLASS_SOFTIRQ,
0267 .cpu = bpf_get_smp_processor_id(),
0268 .id = (__u64)vec,
0269 };
0270
0271 if (vec < NR_SOFTIRQS) {
0272 return update_timestart_and_name(&perf_kwork_time,
0273 &perf_kwork_names, &key,
0274 softirq_name_list[vec]);
0275 }
0276
0277 return 0;
0278 }
0279
0280 SEC("tracepoint/irq/softirq_exit")
0281 int report_softirq_exit(struct trace_event_raw_softirq *ctx)
0282 {
0283 struct work_key key = {
0284 .type = KWORK_CLASS_SOFTIRQ,
0285 .cpu = bpf_get_smp_processor_id(),
0286 .id = (__u64)ctx->vec,
0287 };
0288
0289 return update_timeend(&perf_kwork_report, &perf_kwork_time, &key);
0290 }
0291
0292 SEC("tracepoint/irq/softirq_raise")
0293 int latency_softirq_raise(struct trace_event_raw_softirq *ctx)
0294 {
0295 unsigned int vec = ctx->vec;
0296 struct work_key key = {
0297 .type = KWORK_CLASS_SOFTIRQ,
0298 .cpu = bpf_get_smp_processor_id(),
0299 .id = (__u64)vec,
0300 };
0301
0302 if (vec < NR_SOFTIRQS) {
0303 return update_timestart_and_name(&perf_kwork_time,
0304 &perf_kwork_names, &key,
0305 softirq_name_list[vec]);
0306 }
0307
0308 return 0;
0309 }
0310
0311 SEC("tracepoint/irq/softirq_entry")
0312 int latency_softirq_entry(struct trace_event_raw_softirq *ctx)
0313 {
0314 struct work_key key = {
0315 .type = KWORK_CLASS_SOFTIRQ,
0316 .cpu = bpf_get_smp_processor_id(),
0317 .id = (__u64)ctx->vec,
0318 };
0319
0320 return update_timeend(&perf_kwork_report, &perf_kwork_time, &key);
0321 }
0322
0323 SEC("tracepoint/workqueue/workqueue_execute_start")
0324 int report_workqueue_execute_start(struct trace_event_raw_workqueue_execute_start *ctx)
0325 {
0326 struct work_key key = {
0327 .type = KWORK_CLASS_WORKQUEUE,
0328 .cpu = bpf_get_smp_processor_id(),
0329 .id = (__u64)ctx->work,
0330 };
0331
0332 return update_timestart(&perf_kwork_time, &key);
0333 }
0334
0335 SEC("tracepoint/workqueue/workqueue_execute_end")
0336 int report_workqueue_execute_end(struct trace_event_raw_workqueue_execute_end *ctx)
0337 {
0338 char name[MAX_KWORKNAME];
0339 struct work_key key = {
0340 .type = KWORK_CLASS_WORKQUEUE,
0341 .cpu = bpf_get_smp_processor_id(),
0342 .id = (__u64)ctx->work,
0343 };
0344 unsigned long long func_addr = (unsigned long long)ctx->function;
0345
0346 __builtin_memset(name, 0, sizeof(name));
0347 bpf_snprintf(name, sizeof(name), "%ps", &func_addr, sizeof(func_addr));
0348
0349 return update_timeend_and_name(&perf_kwork_report, &perf_kwork_time,
0350 &perf_kwork_names, &key, name);
0351 }
0352
0353 SEC("tracepoint/workqueue/workqueue_activate_work")
0354 int latency_workqueue_activate_work(struct trace_event_raw_workqueue_activate_work *ctx)
0355 {
0356 struct work_key key = {
0357 .type = KWORK_CLASS_WORKQUEUE,
0358 .cpu = bpf_get_smp_processor_id(),
0359 .id = (__u64)ctx->work,
0360 };
0361
0362 return update_timestart(&perf_kwork_time, &key);
0363 }
0364
0365 SEC("tracepoint/workqueue/workqueue_execute_start")
0366 int latency_workqueue_execute_start(struct trace_event_raw_workqueue_execute_start *ctx)
0367 {
0368 char name[MAX_KWORKNAME];
0369 struct work_key key = {
0370 .type = KWORK_CLASS_WORKQUEUE,
0371 .cpu = bpf_get_smp_processor_id(),
0372 .id = (__u64)ctx->work,
0373 };
0374 unsigned long long func_addr = (unsigned long long)ctx->function;
0375
0376 __builtin_memset(name, 0, sizeof(name));
0377 bpf_snprintf(name, sizeof(name), "%ps", &func_addr, sizeof(func_addr));
0378
0379 return update_timeend_and_name(&perf_kwork_report, &perf_kwork_time,
0380 &perf_kwork_names, &key, name);
0381 }
0382
0383 char LICENSE[] SEC("license") = "Dual BSD/GPL";