0001
0002
0003
0004
0005
0006 #include <assert.h>
0007 #include <limits.h>
0008 #include <unistd.h>
0009 #include <sys/file.h>
0010 #include <sys/time.h>
0011 #include <sys/resource.h>
0012 #include <linux/err.h>
0013 #include <linux/zalloc.h>
0014 #include <linux/perf_event.h>
0015 #include <api/fs/fs.h>
0016 #include <perf/bpf_perf.h>
0017
0018 #include "affinity.h"
0019 #include "bpf_counter.h"
0020 #include "cgroup.h"
0021 #include "counts.h"
0022 #include "debug.h"
0023 #include "evsel.h"
0024 #include "evlist.h"
0025 #include "target.h"
0026 #include "cpumap.h"
0027 #include "thread_map.h"
0028
0029 #include "bpf_skel/bperf_cgroup.skel.h"
0030
0031 static struct perf_event_attr cgrp_switch_attr = {
0032 .type = PERF_TYPE_SOFTWARE,
0033 .config = PERF_COUNT_SW_CGROUP_SWITCHES,
0034 .size = sizeof(cgrp_switch_attr),
0035 .sample_period = 1,
0036 .disabled = 1,
0037 };
0038
0039 static struct evsel *cgrp_switch;
0040 static struct bperf_cgroup_bpf *skel;
0041
0042 #define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0))
0043
0044 static int bperf_load_program(struct evlist *evlist)
0045 {
0046 struct bpf_link *link;
0047 struct evsel *evsel;
0048 struct cgroup *cgrp, *leader_cgrp;
0049 int i, j;
0050 struct perf_cpu cpu;
0051 int total_cpus = cpu__max_cpu().cpu;
0052 int map_size, map_fd;
0053 int prog_fd, err;
0054
0055 skel = bperf_cgroup_bpf__open();
0056 if (!skel) {
0057 pr_err("Failed to open cgroup skeleton\n");
0058 return -1;
0059 }
0060
0061 skel->rodata->num_cpus = total_cpus;
0062 skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups;
0063
0064 BUG_ON(evlist->core.nr_entries % nr_cgroups != 0);
0065
0066
0067 map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
0068 bpf_map__set_max_entries(skel->maps.events, map_size);
0069 bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups);
0070
0071 map_size = evlist->core.nr_entries / nr_cgroups;
0072 bpf_map__set_max_entries(skel->maps.prev_readings, map_size);
0073
0074 map_size = evlist->core.nr_entries;
0075 bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size);
0076
0077 set_max_rlimit();
0078
0079 err = bperf_cgroup_bpf__load(skel);
0080 if (err) {
0081 pr_err("Failed to load cgroup skeleton\n");
0082 goto out;
0083 }
0084
0085 if (cgroup_is_v2("perf_event") > 0)
0086 skel->bss->use_cgroup_v2 = 1;
0087
0088 err = -1;
0089
0090 cgrp_switch = evsel__new(&cgrp_switch_attr);
0091 if (evsel__open_per_cpu(cgrp_switch, evlist->core.all_cpus, -1) < 0) {
0092 pr_err("Failed to open cgroup switches event\n");
0093 goto out;
0094 }
0095
0096 perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
0097 link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
0098 FD(cgrp_switch, i));
0099 if (IS_ERR(link)) {
0100 pr_err("Failed to attach cgroup program\n");
0101 err = PTR_ERR(link);
0102 goto out;
0103 }
0104 }
0105
0106
0107
0108
0109 cgrp = NULL;
0110 i = 0;
0111
0112 evlist__for_each_entry(evlist, evsel) {
0113 if (cgrp == NULL || evsel->cgrp == leader_cgrp) {
0114 leader_cgrp = evsel->cgrp;
0115 evsel->cgrp = NULL;
0116
0117
0118 err = evsel__open_per_cpu(evsel, evsel->core.cpus, -1);
0119 if (err) {
0120 pr_err("Failed to open first cgroup events\n");
0121 goto out;
0122 }
0123
0124 map_fd = bpf_map__fd(skel->maps.events);
0125 perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) {
0126 int fd = FD(evsel, j);
0127 __u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
0128
0129 err = bpf_map_update_elem(map_fd, &idx, &fd,
0130 BPF_ANY);
0131 if (err < 0) {
0132 pr_err("Failed to update perf_event fd\n");
0133 goto out;
0134 }
0135 }
0136
0137 evsel->cgrp = leader_cgrp;
0138 }
0139 evsel->supported = true;
0140
0141 if (evsel->cgrp == cgrp)
0142 continue;
0143
0144 cgrp = evsel->cgrp;
0145
0146 if (read_cgroup_id(cgrp) < 0) {
0147 pr_err("Failed to get cgroup id\n");
0148 err = -1;
0149 goto out;
0150 }
0151
0152 map_fd = bpf_map__fd(skel->maps.cgrp_idx);
0153 err = bpf_map_update_elem(map_fd, &cgrp->id, &i, BPF_ANY);
0154 if (err < 0) {
0155 pr_err("Failed to update cgroup index map\n");
0156 goto out;
0157 }
0158
0159 i++;
0160 }
0161
0162
0163
0164
0165
0166 prog_fd = bpf_program__fd(skel->progs.trigger_read);
0167 err = bperf_trigger_reading(prog_fd, 0);
0168 if (err) {
0169 pr_warning("The kernel does not support test_run for raw_tp BPF programs.\n"
0170 "Therefore, --for-each-cgroup might show inaccurate readings\n");
0171 err = 0;
0172 }
0173
0174 out:
0175 return err;
0176 }
0177
0178 static int bperf_cgrp__load(struct evsel *evsel,
0179 struct target *target __maybe_unused)
0180 {
0181 static bool bperf_loaded = false;
0182
0183 evsel->bperf_leader_prog_fd = -1;
0184 evsel->bperf_leader_link_fd = -1;
0185
0186 if (!bperf_loaded && bperf_load_program(evsel->evlist))
0187 return -1;
0188
0189 bperf_loaded = true;
0190
0191 evsel->follower_skel = (struct bperf_follower_bpf *)skel;
0192
0193 return 0;
0194 }
0195
0196 static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
0197 int cpu __maybe_unused, int fd __maybe_unused)
0198 {
0199
0200 return 0;
0201 }
0202
0203
0204
0205
0206
0207 static int bperf_cgrp__sync_counters(struct evlist *evlist)
0208 {
0209 struct perf_cpu cpu;
0210 int idx;
0211 int prog_fd = bpf_program__fd(skel->progs.trigger_read);
0212
0213 perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus)
0214 bperf_trigger_reading(prog_fd, cpu.cpu);
0215
0216 return 0;
0217 }
0218
0219 static int bperf_cgrp__enable(struct evsel *evsel)
0220 {
0221 if (evsel->core.idx)
0222 return 0;
0223
0224 bperf_cgrp__sync_counters(evsel->evlist);
0225
0226 skel->bss->enabled = 1;
0227 return 0;
0228 }
0229
0230 static int bperf_cgrp__disable(struct evsel *evsel)
0231 {
0232 if (evsel->core.idx)
0233 return 0;
0234
0235 bperf_cgrp__sync_counters(evsel->evlist);
0236
0237 skel->bss->enabled = 0;
0238 return 0;
0239 }
0240
0241 static int bperf_cgrp__read(struct evsel *evsel)
0242 {
0243 struct evlist *evlist = evsel->evlist;
0244 int total_cpus = cpu__max_cpu().cpu;
0245 struct perf_counts_values *counts;
0246 struct bpf_perf_event_value *values;
0247 int reading_map_fd, err = 0;
0248
0249 if (evsel->core.idx)
0250 return 0;
0251
0252 bperf_cgrp__sync_counters(evsel->evlist);
0253
0254 values = calloc(total_cpus, sizeof(*values));
0255 if (values == NULL)
0256 return -ENOMEM;
0257
0258 reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings);
0259
0260 evlist__for_each_entry(evlist, evsel) {
0261 __u32 idx = evsel->core.idx;
0262 int i;
0263 struct perf_cpu cpu;
0264
0265 err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
0266 if (err) {
0267 pr_err("bpf map lookup failed: idx=%u, event=%s, cgrp=%s\n",
0268 idx, evsel__name(evsel), evsel->cgrp->name);
0269 goto out;
0270 }
0271
0272 perf_cpu_map__for_each_cpu(cpu, i, evsel->core.cpus) {
0273 counts = perf_counts(evsel->counts, i, 0);
0274 counts->val = values[cpu.cpu].counter;
0275 counts->ena = values[cpu.cpu].enabled;
0276 counts->run = values[cpu.cpu].running;
0277 }
0278 }
0279
0280 out:
0281 free(values);
0282 return err;
0283 }
0284
0285 static int bperf_cgrp__destroy(struct evsel *evsel)
0286 {
0287 if (evsel->core.idx)
0288 return 0;
0289
0290 bperf_cgroup_bpf__destroy(skel);
0291 evsel__delete(cgrp_switch);
0292
0293 return 0;
0294 }
0295
0296 struct bpf_counter_ops bperf_cgrp_ops = {
0297 .load = bperf_cgrp__load,
0298 .enable = bperf_cgrp__enable,
0299 .disable = bperf_cgrp__disable,
0300 .read = bperf_cgrp__read,
0301 .install_pe = bperf_cgrp__install_pe,
0302 .destroy = bperf_cgrp__destroy,
0303 };