Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * bpf_kwork.c
0004  *
0005  * Copyright (c) 2022  Huawei Inc,  Yang Jihong <yangjihong1@huawei.com>
0006  */
0007 
0008 #include <time.h>
0009 #include <fcntl.h>
0010 #include <stdio.h>
0011 #include <unistd.h>
0012 
0013 #include <linux/time64.h>
0014 
0015 #include "util/debug.h"
0016 #include "util/kwork.h"
0017 
0018 #include <bpf/bpf.h>
0019 
0020 #include "util/bpf_skel/kwork_trace.skel.h"
0021 
0022 /*
0023  * This should be in sync with "util/kwork_trace.bpf.c"
0024  */
0025 #define MAX_KWORKNAME 128
0026 
0027 struct work_key {
0028     u32 type;
0029     u32 cpu;
0030     u64 id;
0031 };
0032 
0033 struct report_data {
0034     u64 nr;
0035     u64 total_time;
0036     u64 max_time;
0037     u64 max_time_start;
0038     u64 max_time_end;
0039 };
0040 
0041 struct kwork_class_bpf {
0042     struct kwork_class *class;
0043 
0044     void (*load_prepare)(struct perf_kwork *kwork);
0045     int  (*get_work_name)(struct work_key *key, char **ret_name);
0046 };
0047 
0048 static struct kwork_trace_bpf *skel;
0049 
0050 static struct timespec ts_start;
0051 static struct timespec ts_end;
0052 
0053 void perf_kwork__trace_start(void)
0054 {
0055     clock_gettime(CLOCK_MONOTONIC, &ts_start);
0056     skel->bss->enabled = 1;
0057 }
0058 
0059 void perf_kwork__trace_finish(void)
0060 {
0061     clock_gettime(CLOCK_MONOTONIC, &ts_end);
0062     skel->bss->enabled = 0;
0063 }
0064 
0065 static int get_work_name_from_map(struct work_key *key, char **ret_name)
0066 {
0067     char name[MAX_KWORKNAME] = { 0 };
0068     int fd = bpf_map__fd(skel->maps.perf_kwork_names);
0069 
0070     *ret_name = NULL;
0071 
0072     if (fd < 0) {
0073         pr_debug("Invalid names map fd\n");
0074         return 0;
0075     }
0076 
0077     if ((bpf_map_lookup_elem(fd, key, name) == 0) && (strlen(name) != 0)) {
0078         *ret_name = strdup(name);
0079         if (*ret_name == NULL) {
0080             pr_err("Failed to copy work name\n");
0081             return -1;
0082         }
0083     }
0084 
0085     return 0;
0086 }
0087 
0088 static void irq_load_prepare(struct perf_kwork *kwork)
0089 {
0090     if (kwork->report == KWORK_REPORT_RUNTIME) {
0091         bpf_program__set_autoload(skel->progs.report_irq_handler_entry, true);
0092         bpf_program__set_autoload(skel->progs.report_irq_handler_exit, true);
0093     }
0094 }
0095 
0096 static struct kwork_class_bpf kwork_irq_bpf = {
0097     .load_prepare  = irq_load_prepare,
0098     .get_work_name = get_work_name_from_map,
0099 };
0100 
0101 static void softirq_load_prepare(struct perf_kwork *kwork)
0102 {
0103     if (kwork->report == KWORK_REPORT_RUNTIME) {
0104         bpf_program__set_autoload(skel->progs.report_softirq_entry, true);
0105         bpf_program__set_autoload(skel->progs.report_softirq_exit, true);
0106     } else if (kwork->report == KWORK_REPORT_LATENCY) {
0107         bpf_program__set_autoload(skel->progs.latency_softirq_raise, true);
0108         bpf_program__set_autoload(skel->progs.latency_softirq_entry, true);
0109     }
0110 }
0111 
0112 static struct kwork_class_bpf kwork_softirq_bpf = {
0113     .load_prepare  = softirq_load_prepare,
0114     .get_work_name = get_work_name_from_map,
0115 };
0116 
0117 static void workqueue_load_prepare(struct perf_kwork *kwork)
0118 {
0119     if (kwork->report == KWORK_REPORT_RUNTIME) {
0120         bpf_program__set_autoload(skel->progs.report_workqueue_execute_start, true);
0121         bpf_program__set_autoload(skel->progs.report_workqueue_execute_end, true);
0122     } else if (kwork->report == KWORK_REPORT_LATENCY) {
0123         bpf_program__set_autoload(skel->progs.latency_workqueue_activate_work, true);
0124         bpf_program__set_autoload(skel->progs.latency_workqueue_execute_start, true);
0125     }
0126 }
0127 
0128 static struct kwork_class_bpf kwork_workqueue_bpf = {
0129     .load_prepare  = workqueue_load_prepare,
0130     .get_work_name = get_work_name_from_map,
0131 };
0132 
0133 static struct kwork_class_bpf *
0134 kwork_class_bpf_supported_list[KWORK_CLASS_MAX] = {
0135     [KWORK_CLASS_IRQ]       = &kwork_irq_bpf,
0136     [KWORK_CLASS_SOFTIRQ]   = &kwork_softirq_bpf,
0137     [KWORK_CLASS_WORKQUEUE] = &kwork_workqueue_bpf,
0138 };
0139 
0140 static bool valid_kwork_class_type(enum kwork_class_type type)
0141 {
0142     return type >= 0 && type < KWORK_CLASS_MAX ? true : false;
0143 }
0144 
0145 static int setup_filters(struct perf_kwork *kwork)
0146 {
0147     u8 val = 1;
0148     int i, nr_cpus, key, fd;
0149     struct perf_cpu_map *map;
0150 
0151     if (kwork->cpu_list != NULL) {
0152         fd = bpf_map__fd(skel->maps.perf_kwork_cpu_filter);
0153         if (fd < 0) {
0154             pr_debug("Invalid cpu filter fd\n");
0155             return -1;
0156         }
0157 
0158         map = perf_cpu_map__new(kwork->cpu_list);
0159         if (map == NULL) {
0160             pr_debug("Invalid cpu_list\n");
0161             return -1;
0162         }
0163 
0164         nr_cpus = libbpf_num_possible_cpus();
0165         for (i = 0; i < perf_cpu_map__nr(map); i++) {
0166             struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
0167 
0168             if (cpu.cpu >= nr_cpus) {
0169                 perf_cpu_map__put(map);
0170                 pr_err("Requested cpu %d too large\n", cpu.cpu);
0171                 return -1;
0172             }
0173             bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY);
0174         }
0175         perf_cpu_map__put(map);
0176 
0177         skel->bss->has_cpu_filter = 1;
0178     }
0179 
0180     if (kwork->profile_name != NULL) {
0181         if (strlen(kwork->profile_name) >= MAX_KWORKNAME) {
0182             pr_err("Requested name filter %s too large, limit to %d\n",
0183                    kwork->profile_name, MAX_KWORKNAME - 1);
0184             return -1;
0185         }
0186 
0187         fd = bpf_map__fd(skel->maps.perf_kwork_name_filter);
0188         if (fd < 0) {
0189             pr_debug("Invalid name filter fd\n");
0190             return -1;
0191         }
0192 
0193         key = 0;
0194         bpf_map_update_elem(fd, &key, kwork->profile_name, BPF_ANY);
0195 
0196         skel->bss->has_name_filter = 1;
0197     }
0198 
0199     return 0;
0200 }
0201 
0202 int perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork)
0203 {
0204     struct bpf_program *prog;
0205     struct kwork_class *class;
0206     struct kwork_class_bpf *class_bpf;
0207     enum kwork_class_type type;
0208 
0209     skel = kwork_trace_bpf__open();
0210     if (!skel) {
0211         pr_debug("Failed to open kwork trace skeleton\n");
0212         return -1;
0213     }
0214 
0215     /*
0216      * set all progs to non-autoload,
0217      * then set corresponding progs according to config
0218      */
0219     bpf_object__for_each_program(prog, skel->obj)
0220         bpf_program__set_autoload(prog, false);
0221 
0222     list_for_each_entry(class, &kwork->class_list, list) {
0223         type = class->type;
0224         if (!valid_kwork_class_type(type) ||
0225             (kwork_class_bpf_supported_list[type] == NULL)) {
0226             pr_err("Unsupported bpf trace class %s\n", class->name);
0227             goto out;
0228         }
0229 
0230         class_bpf = kwork_class_bpf_supported_list[type];
0231         class_bpf->class = class;
0232 
0233         if (class_bpf->load_prepare != NULL)
0234             class_bpf->load_prepare(kwork);
0235     }
0236 
0237     if (kwork_trace_bpf__load(skel)) {
0238         pr_debug("Failed to load kwork trace skeleton\n");
0239         goto out;
0240     }
0241 
0242     if (setup_filters(kwork))
0243         goto out;
0244 
0245     if (kwork_trace_bpf__attach(skel)) {
0246         pr_debug("Failed to attach kwork trace skeleton\n");
0247         goto out;
0248     }
0249 
0250     return 0;
0251 
0252 out:
0253     kwork_trace_bpf__destroy(skel);
0254     return -1;
0255 }
0256 
0257 static int add_work(struct perf_kwork *kwork,
0258             struct work_key *key,
0259             struct report_data *data)
0260 {
0261     struct kwork_work *work;
0262     struct kwork_class_bpf *bpf_trace;
0263     struct kwork_work tmp = {
0264         .id = key->id,
0265         .name = NULL,
0266         .cpu = key->cpu,
0267     };
0268     enum kwork_class_type type = key->type;
0269 
0270     if (!valid_kwork_class_type(type)) {
0271         pr_debug("Invalid class type %d to add work\n", type);
0272         return -1;
0273     }
0274 
0275     bpf_trace = kwork_class_bpf_supported_list[type];
0276     tmp.class = bpf_trace->class;
0277 
0278     if ((bpf_trace->get_work_name != NULL) &&
0279         (bpf_trace->get_work_name(key, &tmp.name)))
0280         return -1;
0281 
0282     work = perf_kwork_add_work(kwork, tmp.class, &tmp);
0283     if (work == NULL)
0284         return -1;
0285 
0286     if (kwork->report == KWORK_REPORT_RUNTIME) {
0287         work->nr_atoms = data->nr;
0288         work->total_runtime = data->total_time;
0289         work->max_runtime = data->max_time;
0290         work->max_runtime_start = data->max_time_start;
0291         work->max_runtime_end = data->max_time_end;
0292     } else if (kwork->report == KWORK_REPORT_LATENCY) {
0293         work->nr_atoms = data->nr;
0294         work->total_latency = data->total_time;
0295         work->max_latency = data->max_time;
0296         work->max_latency_start = data->max_time_start;
0297         work->max_latency_end = data->max_time_end;
0298     } else {
0299         pr_debug("Invalid bpf report type %d\n", kwork->report);
0300         return -1;
0301     }
0302 
0303     kwork->timestart = (u64)ts_start.tv_sec * NSEC_PER_SEC + ts_start.tv_nsec;
0304     kwork->timeend = (u64)ts_end.tv_sec * NSEC_PER_SEC + ts_end.tv_nsec;
0305 
0306     return 0;
0307 }
0308 
0309 int perf_kwork__report_read_bpf(struct perf_kwork *kwork)
0310 {
0311     struct report_data data;
0312     struct work_key key = {
0313         .type = 0,
0314         .cpu  = 0,
0315         .id   = 0,
0316     };
0317     struct work_key prev = {
0318         .type = 0,
0319         .cpu  = 0,
0320         .id   = 0,
0321     };
0322     int fd = bpf_map__fd(skel->maps.perf_kwork_report);
0323 
0324     if (fd < 0) {
0325         pr_debug("Invalid report fd\n");
0326         return -1;
0327     }
0328 
0329     while (!bpf_map_get_next_key(fd, &prev, &key)) {
0330         if ((bpf_map_lookup_elem(fd, &key, &data)) != 0) {
0331             pr_debug("Failed to lookup report elem\n");
0332             return -1;
0333         }
0334 
0335         if ((data.nr != 0) && (add_work(kwork, &key, &data) != 0))
0336             return -1;
0337 
0338         prev = key;
0339     }
0340     return 0;
0341 }
0342 
0343 void perf_kwork__report_cleanup_bpf(void)
0344 {
0345     kwork_trace_bpf__destroy(skel);
0346 }