0001
0002 #define _GNU_SOURCE
0003
0004 #include <assert.h>
0005 #include <fcntl.h>
0006 #include <linux/perf_event.h>
0007 #include <sched.h>
0008 #include <stdio.h>
0009 #include <stdlib.h>
0010 #include <sys/ioctl.h>
0011 #include <sys/time.h>
0012 #include <sys/types.h>
0013 #include <sys/wait.h>
0014 #include <unistd.h>
0015
0016 #include <bpf/bpf.h>
0017 #include <bpf/libbpf.h>
0018 #include "perf-sys.h"
0019
0020 #define SAMPLE_PERIOD 0x7fffffffffffffffULL
0021
0022
0023 static int map_fd[3];
0024
0025 static void check_on_cpu(int cpu, struct perf_event_attr *attr)
0026 {
0027 struct bpf_perf_event_value value2;
0028 int pmu_fd, error = 0;
0029 cpu_set_t set;
0030 __u64 value;
0031
0032
0033 CPU_ZERO(&set);
0034 CPU_SET(cpu, &set);
0035 assert(sched_setaffinity(0, sizeof(set), &set) == 0);
0036
0037 pmu_fd = sys_perf_event_open(attr, -1, cpu, -1, 0);
0038 if (pmu_fd < 0) {
0039 fprintf(stderr, "sys_perf_event_open failed on CPU %d\n", cpu);
0040 error = 1;
0041 goto on_exit;
0042 }
0043 assert(bpf_map_update_elem(map_fd[0], &cpu, &pmu_fd, BPF_ANY) == 0);
0044 assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0) == 0);
0045
0046 bpf_map_get_next_key(map_fd[1], &cpu, NULL);
0047
0048 if (bpf_map_lookup_elem(map_fd[1], &cpu, &value)) {
0049 fprintf(stderr, "Value missing for CPU %d\n", cpu);
0050 error = 1;
0051 goto on_exit;
0052 } else {
0053 fprintf(stderr, "CPU %d: %llu\n", cpu, value);
0054 }
0055
0056 if (bpf_map_lookup_elem(map_fd[2], &cpu, &value2)) {
0057 fprintf(stderr, "Value2 missing for CPU %d\n", cpu);
0058 error = 1;
0059 goto on_exit;
0060 } else {
0061 fprintf(stderr, "CPU %d: counter: %llu, enabled: %llu, running: %llu\n", cpu,
0062 value2.counter, value2.enabled, value2.running);
0063 }
0064
0065 on_exit:
0066 assert(bpf_map_delete_elem(map_fd[0], &cpu) == 0 || error);
0067 assert(ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE, 0) == 0 || error);
0068 assert(close(pmu_fd) == 0 || error);
0069 assert(bpf_map_delete_elem(map_fd[1], &cpu) == 0 || error);
0070 exit(error);
0071 }
0072
0073 static void test_perf_event_array(struct perf_event_attr *attr,
0074 const char *name)
0075 {
0076 int i, status, nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
0077 pid_t pid[nr_cpus];
0078 int err = 0;
0079
0080 printf("Test reading %s counters\n", name);
0081
0082 for (i = 0; i < nr_cpus; i++) {
0083 pid[i] = fork();
0084 assert(pid[i] >= 0);
0085 if (pid[i] == 0) {
0086 check_on_cpu(i, attr);
0087 exit(1);
0088 }
0089 }
0090
0091 for (i = 0; i < nr_cpus; i++) {
0092 assert(waitpid(pid[i], &status, 0) == pid[i]);
0093 err |= status;
0094 }
0095
0096 if (err)
0097 printf("Test: %s FAILED\n", name);
0098 }
0099
0100 static void test_bpf_perf_event(void)
0101 {
0102 struct perf_event_attr attr_cycles = {
0103 .freq = 0,
0104 .sample_period = SAMPLE_PERIOD,
0105 .inherit = 0,
0106 .type = PERF_TYPE_HARDWARE,
0107 .read_format = 0,
0108 .sample_type = 0,
0109 .config = PERF_COUNT_HW_CPU_CYCLES,
0110 };
0111 struct perf_event_attr attr_clock = {
0112 .freq = 0,
0113 .sample_period = SAMPLE_PERIOD,
0114 .inherit = 0,
0115 .type = PERF_TYPE_SOFTWARE,
0116 .read_format = 0,
0117 .sample_type = 0,
0118 .config = PERF_COUNT_SW_CPU_CLOCK,
0119 };
0120 struct perf_event_attr attr_raw = {
0121 .freq = 0,
0122 .sample_period = SAMPLE_PERIOD,
0123 .inherit = 0,
0124 .type = PERF_TYPE_RAW,
0125 .read_format = 0,
0126 .sample_type = 0,
0127
0128 .config = 0xc0,
0129 };
0130 struct perf_event_attr attr_l1d_load = {
0131 .freq = 0,
0132 .sample_period = SAMPLE_PERIOD,
0133 .inherit = 0,
0134 .type = PERF_TYPE_HW_CACHE,
0135 .read_format = 0,
0136 .sample_type = 0,
0137 .config =
0138 PERF_COUNT_HW_CACHE_L1D |
0139 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
0140 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
0141 };
0142 struct perf_event_attr attr_llc_miss = {
0143 .freq = 0,
0144 .sample_period = SAMPLE_PERIOD,
0145 .inherit = 0,
0146 .type = PERF_TYPE_HW_CACHE,
0147 .read_format = 0,
0148 .sample_type = 0,
0149 .config =
0150 PERF_COUNT_HW_CACHE_LL |
0151 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
0152 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
0153 };
0154 struct perf_event_attr attr_msr_tsc = {
0155 .freq = 0,
0156 .sample_period = 0,
0157 .inherit = 0,
0158
0159 .type = 7,
0160 .read_format = 0,
0161 .sample_type = 0,
0162 .config = 0,
0163 };
0164
0165 test_perf_event_array(&attr_cycles, "HARDWARE-cycles");
0166 test_perf_event_array(&attr_clock, "SOFTWARE-clock");
0167 test_perf_event_array(&attr_raw, "RAW-instruction-retired");
0168 test_perf_event_array(&attr_l1d_load, "HW_CACHE-L1D-load");
0169
0170
0171 test_perf_event_array(&attr_llc_miss, "HW_CACHE-LLC-miss");
0172 test_perf_event_array(&attr_msr_tsc, "Dynamic-msr-tsc");
0173 }
0174
0175 int main(int argc, char **argv)
0176 {
0177 struct bpf_link *links[2];
0178 struct bpf_program *prog;
0179 struct bpf_object *obj;
0180 char filename[256];
0181 int i = 0;
0182
0183 snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
0184 obj = bpf_object__open_file(filename, NULL);
0185 if (libbpf_get_error(obj)) {
0186 fprintf(stderr, "ERROR: opening BPF object file failed\n");
0187 return 0;
0188 }
0189
0190
0191 if (bpf_object__load(obj)) {
0192 fprintf(stderr, "ERROR: loading BPF object file failed\n");
0193 goto cleanup;
0194 }
0195
0196 map_fd[0] = bpf_object__find_map_fd_by_name(obj, "counters");
0197 map_fd[1] = bpf_object__find_map_fd_by_name(obj, "values");
0198 map_fd[2] = bpf_object__find_map_fd_by_name(obj, "values2");
0199 if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0) {
0200 fprintf(stderr, "ERROR: finding a map in obj file failed\n");
0201 goto cleanup;
0202 }
0203
0204 bpf_object__for_each_program(prog, obj) {
0205 links[i] = bpf_program__attach(prog);
0206 if (libbpf_get_error(links[i])) {
0207 fprintf(stderr, "ERROR: bpf_program__attach failed\n");
0208 links[i] = NULL;
0209 goto cleanup;
0210 }
0211 i++;
0212 }
0213
0214 test_bpf_perf_event();
0215
0216 cleanup:
0217 for (i--; i >= 0; i--)
0218 bpf_link__destroy(links[i]);
0219
0220 bpf_object__close(obj);
0221 return 0;
0222 }