0001
0002
0003 #include "vmlinux.h"
0004 #include <bpf/bpf_helpers.h>
0005 #include <bpf/bpf_tracing.h>
0006
0007
0008 #define NUM_BUCKET 22
0009
0010 struct {
0011 __uint(type, BPF_MAP_TYPE_HASH);
0012 __uint(key_size, sizeof(__u64));
0013 __uint(value_size, sizeof(__u64));
0014 __uint(max_entries, 10000);
0015 } functime SEC(".maps");
0016
0017 struct {
0018 __uint(type, BPF_MAP_TYPE_HASH);
0019 __uint(key_size, sizeof(__u32));
0020 __uint(value_size, sizeof(__u8));
0021 __uint(max_entries, 1);
0022 } cpu_filter SEC(".maps");
0023
0024 struct {
0025 __uint(type, BPF_MAP_TYPE_HASH);
0026 __uint(key_size, sizeof(__u32));
0027 __uint(value_size, sizeof(__u8));
0028 __uint(max_entries, 1);
0029 } task_filter SEC(".maps");
0030
0031 struct {
0032 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
0033 __uint(key_size, sizeof(__u32));
0034 __uint(value_size, sizeof(__u64));
0035 __uint(max_entries, NUM_BUCKET);
0036 } latency SEC(".maps");
0037
0038
0039 int enabled = 0;
0040 int has_cpu = 0;
0041 int has_task = 0;
0042 int use_nsec = 0;
0043
0044 SEC("kprobe/func")
0045 int BPF_PROG(func_begin)
0046 {
0047 __u64 key, now;
0048
0049 if (!enabled)
0050 return 0;
0051
0052 key = bpf_get_current_pid_tgid();
0053
0054 if (has_cpu) {
0055 __u32 cpu = bpf_get_smp_processor_id();
0056 __u8 *ok;
0057
0058 ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
0059 if (!ok)
0060 return 0;
0061 }
0062
0063 if (has_task) {
0064 __u32 pid = key & 0xffffffff;
0065 __u8 *ok;
0066
0067 ok = bpf_map_lookup_elem(&task_filter, &pid);
0068 if (!ok)
0069 return 0;
0070 }
0071
0072 now = bpf_ktime_get_ns();
0073
0074
0075 bpf_map_update_elem(&functime, &key, &now, BPF_ANY);
0076 return 0;
0077 }
0078
0079 SEC("kretprobe/func")
0080 int BPF_PROG(func_end)
0081 {
0082 __u64 tid;
0083 __u64 *start;
0084 __u64 cmp_base = use_nsec ? 1 : 1000;
0085
0086 if (!enabled)
0087 return 0;
0088
0089 tid = bpf_get_current_pid_tgid();
0090
0091 start = bpf_map_lookup_elem(&functime, &tid);
0092 if (start) {
0093 __s64 delta = bpf_ktime_get_ns() - *start;
0094 __u32 key;
0095 __u64 *hist;
0096
0097 bpf_map_delete_elem(&functime, &tid);
0098
0099 if (delta < 0)
0100 return 0;
0101
0102
0103 for (key = 0; key < (NUM_BUCKET - 1); key++) {
0104 if (delta < (cmp_base << key))
0105 break;
0106 }
0107
0108 hist = bpf_map_lookup_elem(&latency, &key);
0109 if (!hist)
0110 return 0;
0111
0112 *hist += 1;
0113 }
0114
0115 return 0;
0116 }