Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include "util/debug.h"
0003 #include "util/evlist.h"
0004 #include "util/machine.h"
0005 #include "util/map.h"
0006 #include "util/symbol.h"
0007 #include "util/target.h"
0008 #include "util/thread_map.h"
0009 #include "util/lock-contention.h"
0010 #include <linux/zalloc.h>
0011 #include <bpf/bpf.h>
0012 
0013 #include "bpf_skel/lock_contention.skel.h"
0014 
0015 static struct lock_contention_bpf *skel;
0016 
0017 /* should be same as bpf_skel/lock_contention.bpf.c */
0018 struct lock_contention_key {
0019     s32 stack_id;
0020 };
0021 
0022 struct lock_contention_data {
0023     u64 total_time;
0024     u64 min_time;
0025     u64 max_time;
0026     u32 count;
0027     u32 flags;
0028 };
0029 
0030 int lock_contention_prepare(struct lock_contention *con)
0031 {
0032     int i, fd;
0033     int ncpus = 1, ntasks = 1;
0034     struct evlist *evlist = con->evlist;
0035     struct target *target = con->target;
0036 
0037     skel = lock_contention_bpf__open();
0038     if (!skel) {
0039         pr_err("Failed to open lock-contention BPF skeleton\n");
0040         return -1;
0041     }
0042 
0043     bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries);
0044     bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries);
0045 
0046     if (target__has_cpu(target))
0047         ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
0048     if (target__has_task(target))
0049         ntasks = perf_thread_map__nr(evlist->core.threads);
0050 
0051     bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
0052     bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
0053 
0054     if (lock_contention_bpf__load(skel) < 0) {
0055         pr_err("Failed to load lock-contention BPF skeleton\n");
0056         return -1;
0057     }
0058 
0059     if (target__has_cpu(target)) {
0060         u32 cpu;
0061         u8 val = 1;
0062 
0063         skel->bss->has_cpu = 1;
0064         fd = bpf_map__fd(skel->maps.cpu_filter);
0065 
0066         for (i = 0; i < ncpus; i++) {
0067             cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
0068             bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
0069         }
0070     }
0071 
0072     if (target__has_task(target)) {
0073         u32 pid;
0074         u8 val = 1;
0075 
0076         skel->bss->has_task = 1;
0077         fd = bpf_map__fd(skel->maps.task_filter);
0078 
0079         for (i = 0; i < ntasks; i++) {
0080             pid = perf_thread_map__pid(evlist->core.threads, i);
0081             bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
0082         }
0083     }
0084 
0085     if (target__none(target) && evlist->workload.pid > 0) {
0086         u32 pid = evlist->workload.pid;
0087         u8 val = 1;
0088 
0089         skel->bss->has_task = 1;
0090         fd = bpf_map__fd(skel->maps.task_filter);
0091         bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
0092     }
0093 
0094     lock_contention_bpf__attach(skel);
0095     return 0;
0096 }
0097 
0098 int lock_contention_start(void)
0099 {
0100     skel->bss->enabled = 1;
0101     return 0;
0102 }
0103 
0104 int lock_contention_stop(void)
0105 {
0106     skel->bss->enabled = 0;
0107     return 0;
0108 }
0109 
0110 int lock_contention_read(struct lock_contention *con)
0111 {
0112     int fd, stack;
0113     s32 prev_key, key;
0114     struct lock_contention_data data;
0115     struct lock_stat *st;
0116     struct machine *machine = con->machine;
0117     u64 stack_trace[CONTENTION_STACK_DEPTH];
0118 
0119     fd = bpf_map__fd(skel->maps.lock_stat);
0120     stack = bpf_map__fd(skel->maps.stacks);
0121 
0122     con->lost = skel->bss->lost;
0123 
0124     prev_key = 0;
0125     while (!bpf_map_get_next_key(fd, &prev_key, &key)) {
0126         struct map *kmap;
0127         struct symbol *sym;
0128         int idx;
0129 
0130         bpf_map_lookup_elem(fd, &key, &data);
0131         st = zalloc(sizeof(*st));
0132         if (st == NULL)
0133             return -1;
0134 
0135         st->nr_contended = data.count;
0136         st->wait_time_total = data.total_time;
0137         st->wait_time_max = data.max_time;
0138         st->wait_time_min = data.min_time;
0139 
0140         if (data.count)
0141             st->avg_wait_time = data.total_time / data.count;
0142 
0143         st->flags = data.flags;
0144 
0145         bpf_map_lookup_elem(stack, &key, stack_trace);
0146 
0147         /* skip BPF + lock internal functions */
0148         idx = CONTENTION_STACK_SKIP;
0149         while (is_lock_function(machine, stack_trace[idx]) &&
0150                idx < CONTENTION_STACK_DEPTH - 1)
0151             idx++;
0152 
0153         st->addr = stack_trace[idx];
0154         sym = machine__find_kernel_symbol(machine, st->addr, &kmap);
0155 
0156         if (sym) {
0157             unsigned long offset;
0158             int ret = 0;
0159 
0160             offset = kmap->map_ip(kmap, st->addr) - sym->start;
0161 
0162             if (offset)
0163                 ret = asprintf(&st->name, "%s+%#lx", sym->name, offset);
0164             else
0165                 st->name = strdup(sym->name);
0166 
0167             if (ret < 0 || st->name == NULL)
0168                 return -1;
0169         } else if (asprintf(&st->name, "%#lx", (unsigned long)st->addr) < 0) {
0170             free(st);
0171             return -1;
0172         }
0173 
0174         hlist_add_head(&st->hash_entry, con->result);
0175         prev_key = key;
0176     }
0177 
0178     return 0;
0179 }
0180 
0181 int lock_contention_finish(void)
0182 {
0183     if (skel) {
0184         skel->bss->enabled = 0;
0185         lock_contention_bpf__destroy(skel);
0186     }
0187 
0188     return 0;
0189 }