0001
0002 #include <linux/hw_breakpoint.h>
0003 #include <linux/err.h>
0004 #include <linux/zalloc.h>
0005 #include <dirent.h>
0006 #include <errno.h>
0007 #include <sys/ioctl.h>
0008 #include <sys/param.h>
0009 #include "term.h"
0010 #include "evlist.h"
0011 #include "evsel.h"
0012 #include <subcmd/parse-options.h>
0013 #include "parse-events.h"
0014 #include "string2.h"
0015 #include "strlist.h"
0016 #include "bpf-loader.h"
0017 #include "debug.h"
0018 #include <api/fs/tracing_path.h>
0019 #include <perf/cpumap.h>
0020 #include "parse-events-bison.h"
0021 #include "parse-events-flex.h"
0022 #include "pmu.h"
0023 #include "asm/bug.h"
0024 #include "util/parse-branch-options.h"
0025 #include "util/evsel_config.h"
0026 #include "util/event.h"
0027 #include "perf.h"
0028 #include "util/parse-events-hybrid.h"
0029 #include "util/pmu-hybrid.h"
0030 #include "tracepoint.h"
0031 #include "thread_map.h"
0032
0033 #define MAX_NAME_LEN 100
0034
0035 struct perf_pmu_event_symbol {
0036 char *symbol;
0037 enum perf_pmu_event_symbol_type type;
0038 };
0039
0040 #ifdef PARSER_DEBUG
0041 extern int parse_events_debug;
0042 #endif
0043 int parse_events_parse(void *parse_state, void *scanner);
0044 static int get_config_terms(struct list_head *head_config,
0045 struct list_head *head_terms __maybe_unused);
0046 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state,
0047 const char *str, char *pmu_name,
0048 struct list_head *list);
0049
0050 static struct perf_pmu_event_symbol *perf_pmu_events_list;
0051
0052
0053
0054
0055
0056
0057 static int perf_pmu_events_list_num;
0058
0059 struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
0060 [PERF_COUNT_HW_CPU_CYCLES] = {
0061 .symbol = "cpu-cycles",
0062 .alias = "cycles",
0063 },
0064 [PERF_COUNT_HW_INSTRUCTIONS] = {
0065 .symbol = "instructions",
0066 .alias = "",
0067 },
0068 [PERF_COUNT_HW_CACHE_REFERENCES] = {
0069 .symbol = "cache-references",
0070 .alias = "",
0071 },
0072 [PERF_COUNT_HW_CACHE_MISSES] = {
0073 .symbol = "cache-misses",
0074 .alias = "",
0075 },
0076 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
0077 .symbol = "branch-instructions",
0078 .alias = "branches",
0079 },
0080 [PERF_COUNT_HW_BRANCH_MISSES] = {
0081 .symbol = "branch-misses",
0082 .alias = "",
0083 },
0084 [PERF_COUNT_HW_BUS_CYCLES] = {
0085 .symbol = "bus-cycles",
0086 .alias = "",
0087 },
0088 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
0089 .symbol = "stalled-cycles-frontend",
0090 .alias = "idle-cycles-frontend",
0091 },
0092 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
0093 .symbol = "stalled-cycles-backend",
0094 .alias = "idle-cycles-backend",
0095 },
0096 [PERF_COUNT_HW_REF_CPU_CYCLES] = {
0097 .symbol = "ref-cycles",
0098 .alias = "",
0099 },
0100 };
0101
0102 struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
0103 [PERF_COUNT_SW_CPU_CLOCK] = {
0104 .symbol = "cpu-clock",
0105 .alias = "",
0106 },
0107 [PERF_COUNT_SW_TASK_CLOCK] = {
0108 .symbol = "task-clock",
0109 .alias = "",
0110 },
0111 [PERF_COUNT_SW_PAGE_FAULTS] = {
0112 .symbol = "page-faults",
0113 .alias = "faults",
0114 },
0115 [PERF_COUNT_SW_CONTEXT_SWITCHES] = {
0116 .symbol = "context-switches",
0117 .alias = "cs",
0118 },
0119 [PERF_COUNT_SW_CPU_MIGRATIONS] = {
0120 .symbol = "cpu-migrations",
0121 .alias = "migrations",
0122 },
0123 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
0124 .symbol = "minor-faults",
0125 .alias = "",
0126 },
0127 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
0128 .symbol = "major-faults",
0129 .alias = "",
0130 },
0131 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
0132 .symbol = "alignment-faults",
0133 .alias = "",
0134 },
0135 [PERF_COUNT_SW_EMULATION_FAULTS] = {
0136 .symbol = "emulation-faults",
0137 .alias = "",
0138 },
0139 [PERF_COUNT_SW_DUMMY] = {
0140 .symbol = "dummy",
0141 .alias = "",
0142 },
0143 [PERF_COUNT_SW_BPF_OUTPUT] = {
0144 .symbol = "bpf-output",
0145 .alias = "",
0146 },
0147 [PERF_COUNT_SW_CGROUP_SWITCHES] = {
0148 .symbol = "cgroup-switches",
0149 .alias = "",
0150 },
0151 };
0152
0153 #define __PERF_EVENT_FIELD(config, name) \
0154 ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
0155
0156 #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
0157 #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
0158 #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
0159 #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
0160
0161 bool is_event_supported(u8 type, u64 config)
0162 {
0163 bool ret = true;
0164 int open_return;
0165 struct evsel *evsel;
0166 struct perf_event_attr attr = {
0167 .type = type,
0168 .config = config,
0169 .disabled = 1,
0170 };
0171 struct perf_thread_map *tmap = thread_map__new_by_tid(0);
0172
0173 if (tmap == NULL)
0174 return false;
0175
0176 evsel = evsel__new(&attr);
0177 if (evsel) {
0178 open_return = evsel__open(evsel, NULL, tmap);
0179 ret = open_return >= 0;
0180
0181 if (open_return == -EACCES) {
0182
0183
0184
0185
0186
0187
0188
0189 evsel->core.attr.exclude_kernel = 1;
0190 ret = evsel__open(evsel, NULL, tmap) >= 0;
0191 }
0192 evsel__delete(evsel);
0193 }
0194
0195 perf_thread_map__put(tmap);
0196 return ret;
0197 }
0198
0199 const char *event_type(int type)
0200 {
0201 switch (type) {
0202 case PERF_TYPE_HARDWARE:
0203 return "hardware";
0204
0205 case PERF_TYPE_SOFTWARE:
0206 return "software";
0207
0208 case PERF_TYPE_TRACEPOINT:
0209 return "tracepoint";
0210
0211 case PERF_TYPE_HW_CACHE:
0212 return "hardware-cache";
0213
0214 default:
0215 break;
0216 }
0217
0218 return "unknown";
0219 }
0220
0221 static char *get_config_str(struct list_head *head_terms, int type_term)
0222 {
0223 struct parse_events_term *term;
0224
0225 if (!head_terms)
0226 return NULL;
0227
0228 list_for_each_entry(term, head_terms, list)
0229 if (term->type_term == type_term)
0230 return term->val.str;
0231
0232 return NULL;
0233 }
0234
0235 static char *get_config_metric_id(struct list_head *head_terms)
0236 {
0237 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
0238 }
0239
0240 static char *get_config_name(struct list_head *head_terms)
0241 {
0242 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
0243 }
0244
0245 static struct evsel *
0246 __add_event(struct list_head *list, int *idx,
0247 struct perf_event_attr *attr,
0248 bool init_attr,
0249 const char *name, const char *metric_id, struct perf_pmu *pmu,
0250 struct list_head *config_terms, bool auto_merge_stats,
0251 const char *cpu_list)
0252 {
0253 struct evsel *evsel;
0254 struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) :
0255 cpu_list ? perf_cpu_map__new(cpu_list) : NULL;
0256
0257 if (pmu && attr->type == PERF_TYPE_RAW)
0258 perf_pmu__warn_invalid_config(pmu, attr->config, name);
0259
0260 if (init_attr)
0261 event_attr_init(attr);
0262
0263 evsel = evsel__new_idx(attr, *idx);
0264 if (!evsel) {
0265 perf_cpu_map__put(cpus);
0266 return NULL;
0267 }
0268
0269 (*idx)++;
0270 evsel->core.cpus = cpus;
0271 evsel->core.own_cpus = perf_cpu_map__get(cpus);
0272 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
0273 evsel->auto_merge_stats = auto_merge_stats;
0274
0275 if (name)
0276 evsel->name = strdup(name);
0277
0278 if (metric_id)
0279 evsel->metric_id = strdup(metric_id);
0280
0281 if (config_terms)
0282 list_splice_init(config_terms, &evsel->config_terms);
0283
0284 if (list)
0285 list_add_tail(&evsel->core.node, list);
0286
0287 return evsel;
0288 }
0289
0290 struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
0291 const char *name, const char *metric_id,
0292 struct perf_pmu *pmu)
0293 {
0294 return __add_event(NULL, &idx, attr, false, name,
0295 metric_id, pmu, NULL,
0296 false, NULL);
0297 }
0298
0299 static int add_event(struct list_head *list, int *idx,
0300 struct perf_event_attr *attr, const char *name,
0301 const char *metric_id, struct list_head *config_terms)
0302 {
0303 return __add_event(list, idx, attr, true, name, metric_id,
0304 NULL, config_terms,
0305 false, NULL) ? 0 : -ENOMEM;
0306 }
0307
0308 static int add_event_tool(struct list_head *list, int *idx,
0309 enum perf_tool_event tool_event)
0310 {
0311 struct evsel *evsel;
0312 struct perf_event_attr attr = {
0313 .type = PERF_TYPE_SOFTWARE,
0314 .config = PERF_COUNT_SW_DUMMY,
0315 };
0316
0317 evsel = __add_event(list, idx, &attr, true, NULL,
0318 NULL, NULL,
0319 NULL, false,
0320 "0");
0321 if (!evsel)
0322 return -ENOMEM;
0323 evsel->tool_event = tool_event;
0324 if (tool_event == PERF_TOOL_DURATION_TIME
0325 || tool_event == PERF_TOOL_USER_TIME
0326 || tool_event == PERF_TOOL_SYSTEM_TIME) {
0327 free((char *)evsel->unit);
0328 evsel->unit = strdup("ns");
0329 }
0330 return 0;
0331 }
0332
0333 static int parse_aliases(char *str, const char *const names[][EVSEL__MAX_ALIASES], int size)
0334 {
0335 int i, j;
0336 int n, longest = -1;
0337
0338 for (i = 0; i < size; i++) {
0339 for (j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) {
0340 n = strlen(names[i][j]);
0341 if (n > longest && !strncasecmp(str, names[i][j], n))
0342 longest = n;
0343 }
0344 if (longest > 0)
0345 return i;
0346 }
0347
0348 return -1;
0349 }
0350
0351 typedef int config_term_func_t(struct perf_event_attr *attr,
0352 struct parse_events_term *term,
0353 struct parse_events_error *err);
0354 static int config_term_common(struct perf_event_attr *attr,
0355 struct parse_events_term *term,
0356 struct parse_events_error *err);
0357 static int config_attr(struct perf_event_attr *attr,
0358 struct list_head *head,
0359 struct parse_events_error *err,
0360 config_term_func_t config_term);
0361
0362 int parse_events_add_cache(struct list_head *list, int *idx,
0363 char *type, char *op_result1, char *op_result2,
0364 struct parse_events_error *err,
0365 struct list_head *head_config,
0366 struct parse_events_state *parse_state)
0367 {
0368 struct perf_event_attr attr;
0369 LIST_HEAD(config_terms);
0370 char name[MAX_NAME_LEN];
0371 const char *config_name, *metric_id;
0372 int cache_type = -1, cache_op = -1, cache_result = -1;
0373 char *op_result[2] = { op_result1, op_result2 };
0374 int i, n, ret;
0375 bool hybrid;
0376
0377
0378
0379
0380
0381 cache_type = parse_aliases(type, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX);
0382 if (cache_type == -1)
0383 return -EINVAL;
0384
0385 config_name = get_config_name(head_config);
0386 n = snprintf(name, MAX_NAME_LEN, "%s", type);
0387
0388 for (i = 0; (i < 2) && (op_result[i]); i++) {
0389 char *str = op_result[i];
0390
0391 n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str);
0392
0393 if (cache_op == -1) {
0394 cache_op = parse_aliases(str, evsel__hw_cache_op,
0395 PERF_COUNT_HW_CACHE_OP_MAX);
0396 if (cache_op >= 0) {
0397 if (!evsel__is_cache_op_valid(cache_type, cache_op))
0398 return -EINVAL;
0399 continue;
0400 }
0401 }
0402
0403 if (cache_result == -1) {
0404 cache_result = parse_aliases(str, evsel__hw_cache_result,
0405 PERF_COUNT_HW_CACHE_RESULT_MAX);
0406 if (cache_result >= 0)
0407 continue;
0408 }
0409 }
0410
0411
0412
0413
0414 if (cache_op == -1)
0415 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
0416
0417
0418
0419
0420 if (cache_result == -1)
0421 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
0422
0423 memset(&attr, 0, sizeof(attr));
0424 attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
0425 attr.type = PERF_TYPE_HW_CACHE;
0426
0427 if (head_config) {
0428 if (config_attr(&attr, head_config, err,
0429 config_term_common))
0430 return -EINVAL;
0431
0432 if (get_config_terms(head_config, &config_terms))
0433 return -ENOMEM;
0434 }
0435
0436 metric_id = get_config_metric_id(head_config);
0437 ret = parse_events__add_cache_hybrid(list, idx, &attr,
0438 config_name ? : name,
0439 metric_id,
0440 &config_terms,
0441 &hybrid, parse_state);
0442 if (hybrid)
0443 goto out_free_terms;
0444
0445 ret = add_event(list, idx, &attr, config_name ? : name, metric_id,
0446 &config_terms);
0447 out_free_terms:
0448 free_config_terms(&config_terms);
0449 return ret;
0450 }
0451
0452 static void tracepoint_error(struct parse_events_error *e, int err,
0453 const char *sys, const char *name)
0454 {
0455 const char *str;
0456 char help[BUFSIZ];
0457
0458 if (!e)
0459 return;
0460
0461
0462
0463
0464
0465 err = abs(err);
0466
0467 switch (err) {
0468 case EACCES:
0469 str = "can't access trace events";
0470 break;
0471 case ENOENT:
0472 str = "unknown tracepoint";
0473 break;
0474 default:
0475 str = "failed to add tracepoint";
0476 break;
0477 }
0478
0479 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
0480 parse_events_error__handle(e, 0, strdup(str), strdup(help));
0481 }
0482
0483 static int add_tracepoint(struct list_head *list, int *idx,
0484 const char *sys_name, const char *evt_name,
0485 struct parse_events_error *err,
0486 struct list_head *head_config)
0487 {
0488 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++);
0489
0490 if (IS_ERR(evsel)) {
0491 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name);
0492 return PTR_ERR(evsel);
0493 }
0494
0495 if (head_config) {
0496 LIST_HEAD(config_terms);
0497
0498 if (get_config_terms(head_config, &config_terms))
0499 return -ENOMEM;
0500 list_splice(&config_terms, &evsel->config_terms);
0501 }
0502
0503 list_add_tail(&evsel->core.node, list);
0504 return 0;
0505 }
0506
0507 static int add_tracepoint_multi_event(struct list_head *list, int *idx,
0508 const char *sys_name, const char *evt_name,
0509 struct parse_events_error *err,
0510 struct list_head *head_config)
0511 {
0512 char *evt_path;
0513 struct dirent *evt_ent;
0514 DIR *evt_dir;
0515 int ret = 0, found = 0;
0516
0517 evt_path = get_events_file(sys_name);
0518 if (!evt_path) {
0519 tracepoint_error(err, errno, sys_name, evt_name);
0520 return -1;
0521 }
0522 evt_dir = opendir(evt_path);
0523 if (!evt_dir) {
0524 put_events_file(evt_path);
0525 tracepoint_error(err, errno, sys_name, evt_name);
0526 return -1;
0527 }
0528
0529 while (!ret && (evt_ent = readdir(evt_dir))) {
0530 if (!strcmp(evt_ent->d_name, ".")
0531 || !strcmp(evt_ent->d_name, "..")
0532 || !strcmp(evt_ent->d_name, "enable")
0533 || !strcmp(evt_ent->d_name, "filter"))
0534 continue;
0535
0536 if (!strglobmatch(evt_ent->d_name, evt_name))
0537 continue;
0538
0539 found++;
0540
0541 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name,
0542 err, head_config);
0543 }
0544
0545 if (!found) {
0546 tracepoint_error(err, ENOENT, sys_name, evt_name);
0547 ret = -1;
0548 }
0549
0550 put_events_file(evt_path);
0551 closedir(evt_dir);
0552 return ret;
0553 }
0554
0555 static int add_tracepoint_event(struct list_head *list, int *idx,
0556 const char *sys_name, const char *evt_name,
0557 struct parse_events_error *err,
0558 struct list_head *head_config)
0559 {
0560 return strpbrk(evt_name, "*?") ?
0561 add_tracepoint_multi_event(list, idx, sys_name, evt_name,
0562 err, head_config) :
0563 add_tracepoint(list, idx, sys_name, evt_name,
0564 err, head_config);
0565 }
0566
0567 static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
0568 const char *sys_name, const char *evt_name,
0569 struct parse_events_error *err,
0570 struct list_head *head_config)
0571 {
0572 struct dirent *events_ent;
0573 DIR *events_dir;
0574 int ret = 0;
0575
0576 events_dir = tracing_events__opendir();
0577 if (!events_dir) {
0578 tracepoint_error(err, errno, sys_name, evt_name);
0579 return -1;
0580 }
0581
0582 while (!ret && (events_ent = readdir(events_dir))) {
0583 if (!strcmp(events_ent->d_name, ".")
0584 || !strcmp(events_ent->d_name, "..")
0585 || !strcmp(events_ent->d_name, "enable")
0586 || !strcmp(events_ent->d_name, "header_event")
0587 || !strcmp(events_ent->d_name, "header_page"))
0588 continue;
0589
0590 if (!strglobmatch(events_ent->d_name, sys_name))
0591 continue;
0592
0593 ret = add_tracepoint_event(list, idx, events_ent->d_name,
0594 evt_name, err, head_config);
0595 }
0596
0597 closedir(events_dir);
0598 return ret;
0599 }
0600
0601 #ifdef HAVE_LIBBPF_SUPPORT
0602 struct __add_bpf_event_param {
0603 struct parse_events_state *parse_state;
0604 struct list_head *list;
0605 struct list_head *head_config;
0606 };
0607
0608 static int add_bpf_event(const char *group, const char *event, int fd, struct bpf_object *obj,
0609 void *_param)
0610 {
0611 LIST_HEAD(new_evsels);
0612 struct __add_bpf_event_param *param = _param;
0613 struct parse_events_state *parse_state = param->parse_state;
0614 struct list_head *list = param->list;
0615 struct evsel *pos;
0616 int err;
0617
0618
0619
0620
0621
0622
0623
0624 if (group[0] == '!')
0625 return 0;
0626
0627 pr_debug("add bpf event %s:%s and attach bpf program %d\n",
0628 group, event, fd);
0629
0630 err = parse_events_add_tracepoint(&new_evsels, &parse_state->idx, group,
0631 event, parse_state->error,
0632 param->head_config);
0633 if (err) {
0634 struct evsel *evsel, *tmp;
0635
0636 pr_debug("Failed to add BPF event %s:%s\n",
0637 group, event);
0638 list_for_each_entry_safe(evsel, tmp, &new_evsels, core.node) {
0639 list_del_init(&evsel->core.node);
0640 evsel__delete(evsel);
0641 }
0642 return err;
0643 }
0644 pr_debug("adding %s:%s\n", group, event);
0645
0646 list_for_each_entry(pos, &new_evsels, core.node) {
0647 pr_debug("adding %s:%s to %p\n",
0648 group, event, pos);
0649 pos->bpf_fd = fd;
0650 pos->bpf_obj = obj;
0651 }
0652 list_splice(&new_evsels, list);
0653 return 0;
0654 }
0655
0656 int parse_events_load_bpf_obj(struct parse_events_state *parse_state,
0657 struct list_head *list,
0658 struct bpf_object *obj,
0659 struct list_head *head_config)
0660 {
0661 int err;
0662 char errbuf[BUFSIZ];
0663 struct __add_bpf_event_param param = {parse_state, list, head_config};
0664 static bool registered_unprobe_atexit = false;
0665
0666 if (IS_ERR(obj) || !obj) {
0667 snprintf(errbuf, sizeof(errbuf),
0668 "Internal error: load bpf obj with NULL");
0669 err = -EINVAL;
0670 goto errout;
0671 }
0672
0673
0674
0675
0676
0677
0678 if (!registered_unprobe_atexit) {
0679 atexit(bpf__clear);
0680 registered_unprobe_atexit = true;
0681 }
0682
0683 err = bpf__probe(obj);
0684 if (err) {
0685 bpf__strerror_probe(obj, err, errbuf, sizeof(errbuf));
0686 goto errout;
0687 }
0688
0689 err = bpf__load(obj);
0690 if (err) {
0691 bpf__strerror_load(obj, err, errbuf, sizeof(errbuf));
0692 goto errout;
0693 }
0694
0695 err = bpf__foreach_event(obj, add_bpf_event, ¶m);
0696 if (err) {
0697 snprintf(errbuf, sizeof(errbuf),
0698 "Attach events in BPF object failed");
0699 goto errout;
0700 }
0701
0702 return 0;
0703 errout:
0704 parse_events_error__handle(parse_state->error, 0,
0705 strdup(errbuf), strdup("(add -v to see detail)"));
0706 return err;
0707 }
0708
0709 static int
0710 parse_events_config_bpf(struct parse_events_state *parse_state,
0711 struct bpf_object *obj,
0712 struct list_head *head_config)
0713 {
0714 struct parse_events_term *term;
0715 int error_pos;
0716
0717 if (!head_config || list_empty(head_config))
0718 return 0;
0719
0720 list_for_each_entry(term, head_config, list) {
0721 int err;
0722
0723 if (term->type_term != PARSE_EVENTS__TERM_TYPE_USER) {
0724 parse_events_error__handle(parse_state->error, term->err_term,
0725 strdup("Invalid config term for BPF object"),
0726 NULL);
0727 return -EINVAL;
0728 }
0729
0730 err = bpf__config_obj(obj, term, parse_state->evlist, &error_pos);
0731 if (err) {
0732 char errbuf[BUFSIZ];
0733 int idx;
0734
0735 bpf__strerror_config_obj(obj, term, parse_state->evlist,
0736 &error_pos, err, errbuf,
0737 sizeof(errbuf));
0738
0739 if (err == -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE)
0740 idx = term->err_val;
0741 else
0742 idx = term->err_term + error_pos;
0743
0744 parse_events_error__handle(parse_state->error, idx,
0745 strdup(errbuf),
0746 strdup(
0747 "Hint:\tValid config terms:\n"
0748 " \tmap:[<arraymap>].value<indices>=[value]\n"
0749 " \tmap:[<eventmap>].event<indices>=[event]\n"
0750 "\n"
0751 " \twhere <indices> is something like [0,3...5] or [all]\n"
0752 " \t(add -v to see detail)"));
0753 return err;
0754 }
0755 }
0756 return 0;
0757 }
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769 static void
0770 split_bpf_config_terms(struct list_head *evt_head_config,
0771 struct list_head *obj_head_config)
0772 {
0773 struct parse_events_term *term, *temp;
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783 list_for_each_entry_safe(term, temp, evt_head_config, list)
0784 if (!parse_events__is_hardcoded_term(term))
0785 list_move_tail(&term->list, obj_head_config);
0786 }
0787
0788 int parse_events_load_bpf(struct parse_events_state *parse_state,
0789 struct list_head *list,
0790 char *bpf_file_name,
0791 bool source,
0792 struct list_head *head_config)
0793 {
0794 int err;
0795 struct bpf_object *obj;
0796 LIST_HEAD(obj_head_config);
0797
0798 if (head_config)
0799 split_bpf_config_terms(head_config, &obj_head_config);
0800
0801 obj = bpf__prepare_load(bpf_file_name, source);
0802 if (IS_ERR(obj)) {
0803 char errbuf[BUFSIZ];
0804
0805 err = PTR_ERR(obj);
0806
0807 if (err == -ENOTSUP)
0808 snprintf(errbuf, sizeof(errbuf),
0809 "BPF support is not compiled");
0810 else
0811 bpf__strerror_prepare_load(bpf_file_name,
0812 source,
0813 -err, errbuf,
0814 sizeof(errbuf));
0815
0816 parse_events_error__handle(parse_state->error, 0,
0817 strdup(errbuf), strdup("(add -v to see detail)"));
0818 return err;
0819 }
0820
0821 err = parse_events_load_bpf_obj(parse_state, list, obj, head_config);
0822 if (err)
0823 return err;
0824 err = parse_events_config_bpf(parse_state, obj, &obj_head_config);
0825
0826
0827
0828
0829
0830 if (head_config)
0831 list_splice_tail(&obj_head_config, head_config);
0832 return err;
0833 }
0834 #else
0835 int parse_events_load_bpf_obj(struct parse_events_state *parse_state,
0836 struct list_head *list __maybe_unused,
0837 struct bpf_object *obj __maybe_unused,
0838 struct list_head *head_config __maybe_unused)
0839 {
0840 parse_events_error__handle(parse_state->error, 0,
0841 strdup("BPF support is not compiled"),
0842 strdup("Make sure libbpf-devel is available at build time."));
0843 return -ENOTSUP;
0844 }
0845
0846 int parse_events_load_bpf(struct parse_events_state *parse_state,
0847 struct list_head *list __maybe_unused,
0848 char *bpf_file_name __maybe_unused,
0849 bool source __maybe_unused,
0850 struct list_head *head_config __maybe_unused)
0851 {
0852 parse_events_error__handle(parse_state->error, 0,
0853 strdup("BPF support is not compiled"),
0854 strdup("Make sure libbpf-devel is available at build time."));
0855 return -ENOTSUP;
0856 }
0857 #endif
0858
0859 static int
0860 parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
0861 {
0862 int i;
0863
0864 for (i = 0; i < 3; i++) {
0865 if (!type || !type[i])
0866 break;
0867
0868 #define CHECK_SET_TYPE(bit) \
0869 do { \
0870 if (attr->bp_type & bit) \
0871 return -EINVAL; \
0872 else \
0873 attr->bp_type |= bit; \
0874 } while (0)
0875
0876 switch (type[i]) {
0877 case 'r':
0878 CHECK_SET_TYPE(HW_BREAKPOINT_R);
0879 break;
0880 case 'w':
0881 CHECK_SET_TYPE(HW_BREAKPOINT_W);
0882 break;
0883 case 'x':
0884 CHECK_SET_TYPE(HW_BREAKPOINT_X);
0885 break;
0886 default:
0887 return -EINVAL;
0888 }
0889 }
0890
0891 #undef CHECK_SET_TYPE
0892
0893 if (!attr->bp_type)
0894 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
0895
0896 return 0;
0897 }
0898
0899 int parse_events_add_breakpoint(struct list_head *list, int *idx,
0900 u64 addr, char *type, u64 len)
0901 {
0902 struct perf_event_attr attr;
0903
0904 memset(&attr, 0, sizeof(attr));
0905 attr.bp_addr = addr;
0906
0907 if (parse_breakpoint_type(type, &attr))
0908 return -EINVAL;
0909
0910
0911 if (!len) {
0912 if (attr.bp_type == HW_BREAKPOINT_X)
0913 len = sizeof(long);
0914 else
0915 len = HW_BREAKPOINT_LEN_4;
0916 }
0917
0918 attr.bp_len = len;
0919
0920 attr.type = PERF_TYPE_BREAKPOINT;
0921 attr.sample_period = 1;
0922
0923 return add_event(list, idx, &attr, NULL, NULL,
0924 NULL);
0925 }
0926
0927 static int check_type_val(struct parse_events_term *term,
0928 struct parse_events_error *err,
0929 int type)
0930 {
0931 if (type == term->type_val)
0932 return 0;
0933
0934 if (err) {
0935 parse_events_error__handle(err, term->err_val,
0936 type == PARSE_EVENTS__TERM_TYPE_NUM
0937 ? strdup("expected numeric value")
0938 : strdup("expected string value"),
0939 NULL);
0940 }
0941 return -EINVAL;
0942 }
0943
0944
0945
0946
0947 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
0948 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>",
0949 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config",
0950 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1",
0951 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2",
0952 [PARSE_EVENTS__TERM_TYPE_NAME] = "name",
0953 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period",
0954 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq",
0955 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type",
0956 [PARSE_EVENTS__TERM_TYPE_TIME] = "time",
0957 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph",
0958 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size",
0959 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit",
0960 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit",
0961 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack",
0962 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr",
0963 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite",
0964 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite",
0965 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config",
0966 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
0967 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output",
0968 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
0969 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id",
0970 };
0971
0972 static bool config_term_shrinked;
0973
0974 static bool
0975 config_term_avail(int term_type, struct parse_events_error *err)
0976 {
0977 char *err_str;
0978
0979 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
0980 parse_events_error__handle(err, -1,
0981 strdup("Invalid term_type"), NULL);
0982 return false;
0983 }
0984 if (!config_term_shrinked)
0985 return true;
0986
0987 switch (term_type) {
0988 case PARSE_EVENTS__TERM_TYPE_CONFIG:
0989 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
0990 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
0991 case PARSE_EVENTS__TERM_TYPE_NAME:
0992 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
0993 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
0994 case PARSE_EVENTS__TERM_TYPE_PERCORE:
0995 return true;
0996 default:
0997 if (!err)
0998 return false;
0999
1000
1001 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
1002 config_term_names[term_type]) >= 0)
1003 parse_events_error__handle(err, -1, err_str, NULL);
1004 return false;
1005 }
1006 }
1007
1008 void parse_events__shrink_config_terms(void)
1009 {
1010 config_term_shrinked = true;
1011 }
1012
1013 static int config_term_common(struct perf_event_attr *attr,
1014 struct parse_events_term *term,
1015 struct parse_events_error *err)
1016 {
1017 #define CHECK_TYPE_VAL(type) \
1018 do { \
1019 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
1020 return -EINVAL; \
1021 } while (0)
1022
1023 switch (term->type_term) {
1024 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1025 CHECK_TYPE_VAL(NUM);
1026 attr->config = term->val.num;
1027 break;
1028 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1029 CHECK_TYPE_VAL(NUM);
1030 attr->config1 = term->val.num;
1031 break;
1032 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1033 CHECK_TYPE_VAL(NUM);
1034 attr->config2 = term->val.num;
1035 break;
1036 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1037 CHECK_TYPE_VAL(NUM);
1038 break;
1039 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1040 CHECK_TYPE_VAL(NUM);
1041 break;
1042 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1043 CHECK_TYPE_VAL(STR);
1044 if (strcmp(term->val.str, "no") &&
1045 parse_branch_str(term->val.str,
1046 &attr->branch_sample_type)) {
1047 parse_events_error__handle(err, term->err_val,
1048 strdup("invalid branch sample type"),
1049 NULL);
1050 return -EINVAL;
1051 }
1052 break;
1053 case PARSE_EVENTS__TERM_TYPE_TIME:
1054 CHECK_TYPE_VAL(NUM);
1055 if (term->val.num > 1) {
1056 parse_events_error__handle(err, term->err_val,
1057 strdup("expected 0 or 1"),
1058 NULL);
1059 return -EINVAL;
1060 }
1061 break;
1062 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1063 CHECK_TYPE_VAL(STR);
1064 break;
1065 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1066 CHECK_TYPE_VAL(NUM);
1067 break;
1068 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1069 CHECK_TYPE_VAL(NUM);
1070 break;
1071 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1072 CHECK_TYPE_VAL(NUM);
1073 break;
1074 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1075 CHECK_TYPE_VAL(NUM);
1076 break;
1077 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1078 CHECK_TYPE_VAL(NUM);
1079 break;
1080 case PARSE_EVENTS__TERM_TYPE_NAME:
1081 CHECK_TYPE_VAL(STR);
1082 break;
1083 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1084 CHECK_TYPE_VAL(STR);
1085 break;
1086 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1087 CHECK_TYPE_VAL(NUM);
1088 break;
1089 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1090 CHECK_TYPE_VAL(NUM);
1091 break;
1092 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1093 CHECK_TYPE_VAL(NUM);
1094 if ((unsigned int)term->val.num > 1) {
1095 parse_events_error__handle(err, term->err_val,
1096 strdup("expected 0 or 1"),
1097 NULL);
1098 return -EINVAL;
1099 }
1100 break;
1101 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1102 CHECK_TYPE_VAL(NUM);
1103 break;
1104 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1105 CHECK_TYPE_VAL(NUM);
1106 if (term->val.num > UINT_MAX) {
1107 parse_events_error__handle(err, term->err_val,
1108 strdup("too big"),
1109 NULL);
1110 return -EINVAL;
1111 }
1112 break;
1113 default:
1114 parse_events_error__handle(err, term->err_term,
1115 strdup("unknown term"),
1116 parse_events_formats_error_string(NULL));
1117 return -EINVAL;
1118 }
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129 if (!config_term_avail(term->type_term, err))
1130 return -EINVAL;
1131 return 0;
1132 #undef CHECK_TYPE_VAL
1133 }
1134
1135 static int config_term_pmu(struct perf_event_attr *attr,
1136 struct parse_events_term *term,
1137 struct parse_events_error *err)
1138 {
1139 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
1140 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG)
1141
1142
1143
1144
1145 return 0;
1146 else
1147 return config_term_common(attr, term, err);
1148 }
1149
1150 static int config_term_tracepoint(struct perf_event_attr *attr,
1151 struct parse_events_term *term,
1152 struct parse_events_error *err)
1153 {
1154 switch (term->type_term) {
1155 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1156 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1157 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1158 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1159 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1160 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1161 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1162 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1163 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1164 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1165 return config_term_common(attr, term, err);
1166 default:
1167 if (err) {
1168 parse_events_error__handle(err, term->err_term,
1169 strdup("unknown term"),
1170 strdup("valid terms: call-graph,stack-size\n"));
1171 }
1172 return -EINVAL;
1173 }
1174
1175 return 0;
1176 }
1177
1178 static int config_attr(struct perf_event_attr *attr,
1179 struct list_head *head,
1180 struct parse_events_error *err,
1181 config_term_func_t config_term)
1182 {
1183 struct parse_events_term *term;
1184
1185 list_for_each_entry(term, head, list)
1186 if (config_term(attr, term, err))
1187 return -EINVAL;
1188
1189 return 0;
1190 }
1191
1192 static int get_config_terms(struct list_head *head_config,
1193 struct list_head *head_terms __maybe_unused)
1194 {
1195 #define ADD_CONFIG_TERM(__type, __weak) \
1196 struct evsel_config_term *__t; \
1197 \
1198 __t = zalloc(sizeof(*__t)); \
1199 if (!__t) \
1200 return -ENOMEM; \
1201 \
1202 INIT_LIST_HEAD(&__t->list); \
1203 __t->type = EVSEL__CONFIG_TERM_ ## __type; \
1204 __t->weak = __weak; \
1205 list_add_tail(&__t->list, head_terms)
1206
1207 #define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \
1208 do { \
1209 ADD_CONFIG_TERM(__type, __weak); \
1210 __t->val.__name = __val; \
1211 } while (0)
1212
1213 #define ADD_CONFIG_TERM_STR(__type, __val, __weak) \
1214 do { \
1215 ADD_CONFIG_TERM(__type, __weak); \
1216 __t->val.str = strdup(__val); \
1217 if (!__t->val.str) { \
1218 zfree(&__t); \
1219 return -ENOMEM; \
1220 } \
1221 __t->free_str = true; \
1222 } while (0)
1223
1224 struct parse_events_term *term;
1225
1226 list_for_each_entry(term, head_config, list) {
1227 switch (term->type_term) {
1228 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1229 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
1230 break;
1231 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1232 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak);
1233 break;
1234 case PARSE_EVENTS__TERM_TYPE_TIME:
1235 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak);
1236 break;
1237 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1238 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak);
1239 break;
1240 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1241 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak);
1242 break;
1243 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1244 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
1245 term->val.num, term->weak);
1246 break;
1247 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1248 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1249 term->val.num ? 1 : 0, term->weak);
1250 break;
1251 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1252 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1253 term->val.num ? 0 : 1, term->weak);
1254 break;
1255 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1256 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
1257 term->val.num, term->weak);
1258 break;
1259 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1260 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
1261 term->val.num, term->weak);
1262 break;
1263 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1264 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1265 term->val.num ? 1 : 0, term->weak);
1266 break;
1267 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1268 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1269 term->val.num ? 0 : 1, term->weak);
1270 break;
1271 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1272 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak);
1273 break;
1274 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1275 ADD_CONFIG_TERM_VAL(PERCORE, percore,
1276 term->val.num ? true : false, term->weak);
1277 break;
1278 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1279 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
1280 term->val.num ? 1 : 0, term->weak);
1281 break;
1282 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1283 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
1284 term->val.num, term->weak);
1285 break;
1286 default:
1287 break;
1288 }
1289 }
1290 return 0;
1291 }
1292
1293
1294
1295
1296
1297 static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
1298 struct list_head *head_terms)
1299 {
1300 struct parse_events_term *term;
1301 u64 bits = 0;
1302 int type;
1303
1304 list_for_each_entry(term, head_config, list) {
1305 switch (term->type_term) {
1306 case PARSE_EVENTS__TERM_TYPE_USER:
1307 type = perf_pmu__format_type(&pmu->format, term->config);
1308 if (type != PERF_PMU_FORMAT_VALUE_CONFIG)
1309 continue;
1310 bits |= perf_pmu__format_bits(&pmu->format, term->config);
1311 break;
1312 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1313 bits = ~(u64)0;
1314 break;
1315 default:
1316 break;
1317 }
1318 }
1319
1320 if (bits)
1321 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
1322
1323 #undef ADD_CONFIG_TERM
1324 return 0;
1325 }
1326
1327 int parse_events_add_tracepoint(struct list_head *list, int *idx,
1328 const char *sys, const char *event,
1329 struct parse_events_error *err,
1330 struct list_head *head_config)
1331 {
1332 if (head_config) {
1333 struct perf_event_attr attr;
1334
1335 if (config_attr(&attr, head_config, err,
1336 config_term_tracepoint))
1337 return -EINVAL;
1338 }
1339
1340 if (strpbrk(sys, "*?"))
1341 return add_tracepoint_multi_sys(list, idx, sys, event,
1342 err, head_config);
1343 else
1344 return add_tracepoint_event(list, idx, sys, event,
1345 err, head_config);
1346 }
1347
1348 int parse_events_add_numeric(struct parse_events_state *parse_state,
1349 struct list_head *list,
1350 u32 type, u64 config,
1351 struct list_head *head_config)
1352 {
1353 struct perf_event_attr attr;
1354 LIST_HEAD(config_terms);
1355 const char *name, *metric_id;
1356 bool hybrid;
1357 int ret;
1358
1359 memset(&attr, 0, sizeof(attr));
1360 attr.type = type;
1361 attr.config = config;
1362
1363 if (head_config) {
1364 if (config_attr(&attr, head_config, parse_state->error,
1365 config_term_common))
1366 return -EINVAL;
1367
1368 if (get_config_terms(head_config, &config_terms))
1369 return -ENOMEM;
1370 }
1371
1372 name = get_config_name(head_config);
1373 metric_id = get_config_metric_id(head_config);
1374 ret = parse_events__add_numeric_hybrid(parse_state, list, &attr,
1375 name, metric_id,
1376 &config_terms, &hybrid);
1377 if (hybrid)
1378 goto out_free_terms;
1379
1380 ret = add_event(list, &parse_state->idx, &attr, name, metric_id,
1381 &config_terms);
1382 out_free_terms:
1383 free_config_terms(&config_terms);
1384 return ret;
1385 }
1386
1387 int parse_events_add_tool(struct parse_events_state *parse_state,
1388 struct list_head *list,
1389 int tool_event)
1390 {
1391 return add_event_tool(list, &parse_state->idx, tool_event);
1392 }
1393
1394 static bool config_term_percore(struct list_head *config_terms)
1395 {
1396 struct evsel_config_term *term;
1397
1398 list_for_each_entry(term, config_terms, list) {
1399 if (term->type == EVSEL__CONFIG_TERM_PERCORE)
1400 return term->val.percore;
1401 }
1402
1403 return false;
1404 }
1405
1406 static int parse_events__inside_hybrid_pmu(struct parse_events_state *parse_state,
1407 struct list_head *list, char *name,
1408 struct list_head *head_config)
1409 {
1410 struct parse_events_term *term;
1411 int ret = -1;
1412
1413 if (parse_state->fake_pmu || !head_config || list_empty(head_config) ||
1414 !perf_pmu__is_hybrid(name)) {
1415 return -1;
1416 }
1417
1418
1419
1420
1421 if (head_config->next && head_config->next->next != head_config)
1422 return -1;
1423
1424 term = list_first_entry(head_config, struct parse_events_term, list);
1425 if (term && term->config && strcmp(term->config, "event")) {
1426 ret = parse_events__with_hybrid_pmu(parse_state, term->config,
1427 name, list);
1428 }
1429
1430 return ret;
1431 }
1432
1433 int parse_events_add_pmu(struct parse_events_state *parse_state,
1434 struct list_head *list, char *name,
1435 struct list_head *head_config,
1436 bool auto_merge_stats,
1437 bool use_alias)
1438 {
1439 struct perf_event_attr attr;
1440 struct perf_pmu_info info;
1441 struct perf_pmu *pmu;
1442 struct evsel *evsel;
1443 struct parse_events_error *err = parse_state->error;
1444 bool use_uncore_alias;
1445 LIST_HEAD(config_terms);
1446
1447 pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
1448
1449 if (verbose > 1 && !(pmu && pmu->selectable)) {
1450 fprintf(stderr, "Attempting to add event pmu '%s' with '",
1451 name);
1452 if (head_config) {
1453 struct parse_events_term *term;
1454
1455 list_for_each_entry(term, head_config, list) {
1456 fprintf(stderr, "%s,", term->config);
1457 }
1458 }
1459 fprintf(stderr, "' that may result in non-fatal errors\n");
1460 }
1461
1462 if (!pmu) {
1463 char *err_str;
1464
1465 if (asprintf(&err_str,
1466 "Cannot find PMU `%s'. Missing kernel support?",
1467 name) >= 0)
1468 parse_events_error__handle(err, 0, err_str, NULL);
1469 return -EINVAL;
1470 }
1471
1472 if (pmu->default_config) {
1473 memcpy(&attr, pmu->default_config,
1474 sizeof(struct perf_event_attr));
1475 } else {
1476 memset(&attr, 0, sizeof(attr));
1477 }
1478
1479 use_uncore_alias = (pmu->is_uncore && use_alias);
1480
1481 if (!head_config) {
1482 attr.type = pmu->type;
1483 evsel = __add_event(list, &parse_state->idx, &attr,
1484 true, NULL,
1485 NULL, pmu,
1486 NULL, auto_merge_stats,
1487 NULL);
1488 if (evsel) {
1489 evsel->pmu_name = name ? strdup(name) : NULL;
1490 evsel->use_uncore_alias = use_uncore_alias;
1491 return 0;
1492 } else {
1493 return -ENOMEM;
1494 }
1495 }
1496
1497 if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info))
1498 return -EINVAL;
1499
1500 if (verbose > 1) {
1501 fprintf(stderr, "After aliases, add event pmu '%s' with '",
1502 name);
1503 if (head_config) {
1504 struct parse_events_term *term;
1505
1506 list_for_each_entry(term, head_config, list) {
1507 fprintf(stderr, "%s,", term->config);
1508 }
1509 }
1510 fprintf(stderr, "' that may result in non-fatal errors\n");
1511 }
1512
1513
1514
1515
1516
1517 if (config_attr(&attr, head_config, parse_state->error, config_term_pmu))
1518 return -EINVAL;
1519
1520 if (get_config_terms(head_config, &config_terms))
1521 return -ENOMEM;
1522
1523
1524
1525
1526
1527 if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms))
1528 return -ENOMEM;
1529
1530 if (!parse_events__inside_hybrid_pmu(parse_state, list, name,
1531 head_config)) {
1532 return 0;
1533 }
1534
1535 if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
1536 free_config_terms(&config_terms);
1537 return -EINVAL;
1538 }
1539
1540 evsel = __add_event(list, &parse_state->idx, &attr, true,
1541 get_config_name(head_config),
1542 get_config_metric_id(head_config), pmu,
1543 &config_terms, auto_merge_stats, NULL);
1544 if (!evsel)
1545 return -ENOMEM;
1546
1547 if (evsel->name)
1548 evsel->use_config_name = true;
1549
1550 evsel->pmu_name = name ? strdup(name) : NULL;
1551 evsel->use_uncore_alias = use_uncore_alias;
1552 evsel->percore = config_term_percore(&evsel->config_terms);
1553
1554 if (parse_state->fake_pmu)
1555 return 0;
1556
1557 free((char *)evsel->unit);
1558 evsel->unit = strdup(info.unit);
1559 evsel->scale = info.scale;
1560 evsel->per_pkg = info.per_pkg;
1561 evsel->snapshot = info.snapshot;
1562 evsel->metric_expr = info.metric_expr;
1563 evsel->metric_name = info.metric_name;
1564 return 0;
1565 }
1566
1567 int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
1568 char *str, struct list_head *head,
1569 struct list_head **listp)
1570 {
1571 struct parse_events_term *term;
1572 struct list_head *list = NULL;
1573 struct list_head *orig_head = NULL;
1574 struct perf_pmu *pmu = NULL;
1575 int ok = 0;
1576 char *config;
1577
1578 *listp = NULL;
1579
1580 if (!head) {
1581 head = malloc(sizeof(struct list_head));
1582 if (!head)
1583 goto out_err;
1584
1585 INIT_LIST_HEAD(head);
1586 }
1587 config = strdup(str);
1588 if (!config)
1589 goto out_err;
1590
1591 if (parse_events_term__num(&term,
1592 PARSE_EVENTS__TERM_TYPE_USER,
1593 config, 1, false, &config,
1594 NULL) < 0) {
1595 free(config);
1596 goto out_err;
1597 }
1598 list_add_tail(&term->list, head);
1599
1600
1601 list = malloc(sizeof(struct list_head));
1602 if (!list)
1603 goto out_err;
1604
1605 INIT_LIST_HEAD(list);
1606
1607 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
1608 struct perf_pmu_alias *alias;
1609
1610 list_for_each_entry(alias, &pmu->aliases, list) {
1611 if (!strcasecmp(alias->name, str)) {
1612 parse_events_copy_term_list(head, &orig_head);
1613 if (!parse_events_add_pmu(parse_state, list,
1614 pmu->name, orig_head,
1615 true, true)) {
1616 pr_debug("%s -> %s/%s/\n", str,
1617 pmu->name, alias->str);
1618 ok++;
1619 }
1620 parse_events_terms__delete(orig_head);
1621 }
1622 }
1623 }
1624
1625 if (parse_state->fake_pmu) {
1626 if (!parse_events_add_pmu(parse_state, list, str, head,
1627 true, true)) {
1628 pr_debug("%s -> %s/%s/\n", str, "fake_pmu", str);
1629 ok++;
1630 }
1631 }
1632
1633 out_err:
1634 if (ok)
1635 *listp = list;
1636 else
1637 free(list);
1638
1639 parse_events_terms__delete(head);
1640 return ok ? 0 : -1;
1641 }
1642
1643 int parse_events__modifier_group(struct list_head *list,
1644 char *event_mod)
1645 {
1646 return parse_events__modifier_event(list, event_mod, true);
1647 }
1648
1649
1650
1651
1652
1653 static bool is_same_uncore_block(const char *pmu_name_a, const char *pmu_name_b)
1654 {
1655 char *end_a, *end_b;
1656
1657 end_a = strrchr(pmu_name_a, '_');
1658 end_b = strrchr(pmu_name_b, '_');
1659
1660 if (!end_a || !end_b)
1661 return false;
1662
1663 if ((end_a - pmu_name_a) != (end_b - pmu_name_b))
1664 return false;
1665
1666 return (strncmp(pmu_name_a, pmu_name_b, end_a - pmu_name_a) == 0);
1667 }
1668
1669 static int
1670 parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list,
1671 struct parse_events_state *parse_state)
1672 {
1673 struct evsel *evsel, *leader;
1674 uintptr_t *leaders;
1675 bool is_leader = true;
1676 int i, nr_pmu = 0, total_members, ret = 0;
1677
1678 leader = list_first_entry(list, struct evsel, core.node);
1679 evsel = list_last_entry(list, struct evsel, core.node);
1680 total_members = evsel->core.idx - leader->core.idx + 1;
1681
1682 leaders = calloc(total_members, sizeof(uintptr_t));
1683 if (WARN_ON(!leaders))
1684 return 0;
1685
1686
1687
1688
1689
1690
1691 __evlist__for_each_entry(list, evsel) {
1692
1693
1694 if (!evsel->use_uncore_alias)
1695 goto out;
1696
1697
1698 if (!is_same_uncore_block(leader->pmu_name, evsel->pmu_name))
1699 goto out;
1700
1701 if (!is_leader)
1702 continue;
1703
1704
1705
1706
1707
1708 if ((leader != evsel) &&
1709 !strcmp(leader->pmu_name, evsel->pmu_name)) {
1710 is_leader = false;
1711 continue;
1712 }
1713
1714
1715 leaders[nr_pmu++] = (uintptr_t) evsel;
1716 }
1717
1718
1719 if (nr_pmu == total_members) {
1720 parse_state->nr_groups--;
1721 goto handled;
1722 }
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736 i = 0;
1737 __evlist__for_each_entry(list, evsel) {
1738 if (i >= nr_pmu)
1739 i = 0;
1740 evsel__set_leader(evsel, (struct evsel *) leaders[i++]);
1741 }
1742
1743
1744 for (i = 0; i < nr_pmu; i++) {
1745 evsel = (struct evsel *) leaders[i];
1746 evsel->core.nr_members = total_members / nr_pmu;
1747 evsel->group_name = name ? strdup(name) : NULL;
1748 }
1749
1750
1751 parse_state->nr_groups += nr_pmu - 1;
1752
1753 handled:
1754 ret = 1;
1755 out:
1756 free(leaders);
1757 return ret;
1758 }
1759
1760 __weak struct evsel *arch_evlist__leader(struct list_head *list)
1761 {
1762 return list_first_entry(list, struct evsel, core.node);
1763 }
1764
1765 void parse_events__set_leader(char *name, struct list_head *list,
1766 struct parse_events_state *parse_state)
1767 {
1768 struct evsel *leader;
1769
1770 if (list_empty(list)) {
1771 WARN_ONCE(true, "WARNING: failed to set leader: empty list");
1772 return;
1773 }
1774
1775 if (parse_events__set_leader_for_uncore_aliase(name, list, parse_state))
1776 return;
1777
1778 leader = arch_evlist__leader(list);
1779 __perf_evlist__set_leader(list, &leader->core);
1780 leader->group_name = name ? strdup(name) : NULL;
1781 list_move(&leader->core.node, list);
1782 }
1783
1784
1785 void parse_events_update_lists(struct list_head *list_event,
1786 struct list_head *list_all)
1787 {
1788
1789
1790
1791
1792
1793 list_splice_tail(list_event, list_all);
1794 free(list_event);
1795 }
1796
1797 struct event_modifier {
1798 int eu;
1799 int ek;
1800 int eh;
1801 int eH;
1802 int eG;
1803 int eI;
1804 int precise;
1805 int precise_max;
1806 int exclude_GH;
1807 int sample_read;
1808 int pinned;
1809 int weak;
1810 int exclusive;
1811 int bpf_counter;
1812 };
1813
1814 static int get_event_modifier(struct event_modifier *mod, char *str,
1815 struct evsel *evsel)
1816 {
1817 int eu = evsel ? evsel->core.attr.exclude_user : 0;
1818 int ek = evsel ? evsel->core.attr.exclude_kernel : 0;
1819 int eh = evsel ? evsel->core.attr.exclude_hv : 0;
1820 int eH = evsel ? evsel->core.attr.exclude_host : 0;
1821 int eG = evsel ? evsel->core.attr.exclude_guest : 0;
1822 int eI = evsel ? evsel->core.attr.exclude_idle : 0;
1823 int precise = evsel ? evsel->core.attr.precise_ip : 0;
1824 int precise_max = 0;
1825 int sample_read = 0;
1826 int pinned = evsel ? evsel->core.attr.pinned : 0;
1827 int exclusive = evsel ? evsel->core.attr.exclusive : 0;
1828
1829 int exclude = eu | ek | eh;
1830 int exclude_GH = evsel ? evsel->exclude_GH : 0;
1831 int weak = 0;
1832 int bpf_counter = 0;
1833
1834 memset(mod, 0, sizeof(*mod));
1835
1836 while (*str) {
1837 if (*str == 'u') {
1838 if (!exclude)
1839 exclude = eu = ek = eh = 1;
1840 if (!exclude_GH && !perf_guest)
1841 eG = 1;
1842 eu = 0;
1843 } else if (*str == 'k') {
1844 if (!exclude)
1845 exclude = eu = ek = eh = 1;
1846 ek = 0;
1847 } else if (*str == 'h') {
1848 if (!exclude)
1849 exclude = eu = ek = eh = 1;
1850 eh = 0;
1851 } else if (*str == 'G') {
1852 if (!exclude_GH)
1853 exclude_GH = eG = eH = 1;
1854 eG = 0;
1855 } else if (*str == 'H') {
1856 if (!exclude_GH)
1857 exclude_GH = eG = eH = 1;
1858 eH = 0;
1859 } else if (*str == 'I') {
1860 eI = 1;
1861 } else if (*str == 'p') {
1862 precise++;
1863
1864 if (!exclude_GH)
1865 eG = 1;
1866 } else if (*str == 'P') {
1867 precise_max = 1;
1868 } else if (*str == 'S') {
1869 sample_read = 1;
1870 } else if (*str == 'D') {
1871 pinned = 1;
1872 } else if (*str == 'e') {
1873 exclusive = 1;
1874 } else if (*str == 'W') {
1875 weak = 1;
1876 } else if (*str == 'b') {
1877 bpf_counter = 1;
1878 } else
1879 break;
1880
1881 ++str;
1882 }
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894 if (precise > 3)
1895 return -EINVAL;
1896
1897 mod->eu = eu;
1898 mod->ek = ek;
1899 mod->eh = eh;
1900 mod->eH = eH;
1901 mod->eG = eG;
1902 mod->eI = eI;
1903 mod->precise = precise;
1904 mod->precise_max = precise_max;
1905 mod->exclude_GH = exclude_GH;
1906 mod->sample_read = sample_read;
1907 mod->pinned = pinned;
1908 mod->weak = weak;
1909 mod->bpf_counter = bpf_counter;
1910 mod->exclusive = exclusive;
1911
1912 return 0;
1913 }
1914
1915
1916
1917
1918
1919 static int check_modifier(char *str)
1920 {
1921 char *p = str;
1922
1923
1924 if (strlen(str) > (sizeof("ukhGHpppPSDIWeb") - 1))
1925 return -1;
1926
1927 while (*p) {
1928 if (*p != 'p' && strchr(p + 1, *p))
1929 return -1;
1930 p++;
1931 }
1932
1933 return 0;
1934 }
1935
1936 int parse_events__modifier_event(struct list_head *list, char *str, bool add)
1937 {
1938 struct evsel *evsel;
1939 struct event_modifier mod;
1940
1941 if (str == NULL)
1942 return 0;
1943
1944 if (check_modifier(str))
1945 return -EINVAL;
1946
1947 if (!add && get_event_modifier(&mod, str, NULL))
1948 return -EINVAL;
1949
1950 __evlist__for_each_entry(list, evsel) {
1951 if (add && get_event_modifier(&mod, str, evsel))
1952 return -EINVAL;
1953
1954 evsel->core.attr.exclude_user = mod.eu;
1955 evsel->core.attr.exclude_kernel = mod.ek;
1956 evsel->core.attr.exclude_hv = mod.eh;
1957 evsel->core.attr.precise_ip = mod.precise;
1958 evsel->core.attr.exclude_host = mod.eH;
1959 evsel->core.attr.exclude_guest = mod.eG;
1960 evsel->core.attr.exclude_idle = mod.eI;
1961 evsel->exclude_GH = mod.exclude_GH;
1962 evsel->sample_read = mod.sample_read;
1963 evsel->precise_max = mod.precise_max;
1964 evsel->weak_group = mod.weak;
1965 evsel->bpf_counter = mod.bpf_counter;
1966
1967 if (evsel__is_group_leader(evsel)) {
1968 evsel->core.attr.pinned = mod.pinned;
1969 evsel->core.attr.exclusive = mod.exclusive;
1970 }
1971 }
1972
1973 return 0;
1974 }
1975
1976 int parse_events_name(struct list_head *list, const char *name)
1977 {
1978 struct evsel *evsel;
1979
1980 __evlist__for_each_entry(list, evsel) {
1981 if (!evsel->name)
1982 evsel->name = strdup(name);
1983 }
1984
1985 return 0;
1986 }
1987
1988 static int
1989 comp_pmu(const void *p1, const void *p2)
1990 {
1991 struct perf_pmu_event_symbol *pmu1 = (struct perf_pmu_event_symbol *) p1;
1992 struct perf_pmu_event_symbol *pmu2 = (struct perf_pmu_event_symbol *) p2;
1993
1994 return strcasecmp(pmu1->symbol, pmu2->symbol);
1995 }
1996
1997 static void perf_pmu__parse_cleanup(void)
1998 {
1999 if (perf_pmu_events_list_num > 0) {
2000 struct perf_pmu_event_symbol *p;
2001 int i;
2002
2003 for (i = 0; i < perf_pmu_events_list_num; i++) {
2004 p = perf_pmu_events_list + i;
2005 zfree(&p->symbol);
2006 }
2007 zfree(&perf_pmu_events_list);
2008 perf_pmu_events_list_num = 0;
2009 }
2010 }
2011
2012 #define SET_SYMBOL(str, stype) \
2013 do { \
2014 p->symbol = str; \
2015 if (!p->symbol) \
2016 goto err; \
2017 p->type = stype; \
2018 } while (0)
2019
2020
2021
2022
2023
2024 static void perf_pmu__parse_init(void)
2025 {
2026
2027 struct perf_pmu *pmu = NULL;
2028 struct perf_pmu_alias *alias;
2029 int len = 0;
2030
2031 pmu = NULL;
2032 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
2033 list_for_each_entry(alias, &pmu->aliases, list) {
2034 char *tmp = strchr(alias->name, '-');
2035
2036 if (tmp) {
2037 char *tmp2 = NULL;
2038
2039 tmp2 = strchr(tmp + 1, '-');
2040 len++;
2041 if (tmp2)
2042 len++;
2043 }
2044
2045 len++;
2046 }
2047 }
2048
2049 if (len == 0) {
2050 perf_pmu_events_list_num = -1;
2051 return;
2052 }
2053 perf_pmu_events_list = malloc(sizeof(struct perf_pmu_event_symbol) * len);
2054 if (!perf_pmu_events_list)
2055 return;
2056 perf_pmu_events_list_num = len;
2057
2058 len = 0;
2059 pmu = NULL;
2060 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
2061 list_for_each_entry(alias, &pmu->aliases, list) {
2062 struct perf_pmu_event_symbol *p = perf_pmu_events_list + len;
2063 char *tmp = strchr(alias->name, '-');
2064 char *tmp2 = NULL;
2065
2066 if (tmp)
2067 tmp2 = strchr(tmp + 1, '-');
2068 if (tmp2) {
2069 SET_SYMBOL(strndup(alias->name, tmp - alias->name),
2070 PMU_EVENT_SYMBOL_PREFIX);
2071 p++;
2072 tmp++;
2073 SET_SYMBOL(strndup(tmp, tmp2 - tmp), PMU_EVENT_SYMBOL_SUFFIX);
2074 p++;
2075 SET_SYMBOL(strdup(++tmp2), PMU_EVENT_SYMBOL_SUFFIX2);
2076 len += 3;
2077 } else if (tmp) {
2078 SET_SYMBOL(strndup(alias->name, tmp - alias->name),
2079 PMU_EVENT_SYMBOL_PREFIX);
2080 p++;
2081 SET_SYMBOL(strdup(++tmp), PMU_EVENT_SYMBOL_SUFFIX);
2082 len += 2;
2083 } else {
2084 SET_SYMBOL(strdup(alias->name), PMU_EVENT_SYMBOL);
2085 len++;
2086 }
2087 }
2088 }
2089 qsort(perf_pmu_events_list, len,
2090 sizeof(struct perf_pmu_event_symbol), comp_pmu);
2091
2092 return;
2093 err:
2094 perf_pmu__parse_cleanup();
2095 }
2096
2097
2098
2099
2100
2101
2102 int perf_pmu__test_parse_init(void)
2103 {
2104 struct perf_pmu_event_symbol *list, *tmp, symbols[] = {
2105 {(char *)"read", PMU_EVENT_SYMBOL},
2106 {(char *)"event", PMU_EVENT_SYMBOL_PREFIX},
2107 {(char *)"two", PMU_EVENT_SYMBOL_SUFFIX},
2108 {(char *)"hyphen", PMU_EVENT_SYMBOL_SUFFIX},
2109 {(char *)"hyph", PMU_EVENT_SYMBOL_SUFFIX2},
2110 };
2111 unsigned long i, j;
2112
2113 tmp = list = malloc(sizeof(*list) * ARRAY_SIZE(symbols));
2114 if (!list)
2115 return -ENOMEM;
2116
2117 for (i = 0; i < ARRAY_SIZE(symbols); i++, tmp++) {
2118 tmp->type = symbols[i].type;
2119 tmp->symbol = strdup(symbols[i].symbol);
2120 if (!tmp->symbol)
2121 goto err_free;
2122 }
2123
2124 perf_pmu_events_list = list;
2125 perf_pmu_events_list_num = ARRAY_SIZE(symbols);
2126
2127 qsort(perf_pmu_events_list, ARRAY_SIZE(symbols),
2128 sizeof(struct perf_pmu_event_symbol), comp_pmu);
2129 return 0;
2130
2131 err_free:
2132 for (j = 0, tmp = list; j < i; j++, tmp++)
2133 free(tmp->symbol);
2134 free(list);
2135 return -ENOMEM;
2136 }
2137
2138 enum perf_pmu_event_symbol_type
2139 perf_pmu__parse_check(const char *name)
2140 {
2141 struct perf_pmu_event_symbol p, *r;
2142
2143
2144 if (perf_pmu_events_list_num == 0)
2145 perf_pmu__parse_init();
2146
2147
2148
2149
2150
2151 if ((perf_pmu_events_list_num <= 0) || !strcmp(name, "cpu"))
2152 return PMU_EVENT_SYMBOL_ERR;
2153
2154 p.symbol = strdup(name);
2155 r = bsearch(&p, perf_pmu_events_list,
2156 (size_t) perf_pmu_events_list_num,
2157 sizeof(struct perf_pmu_event_symbol), comp_pmu);
2158 zfree(&p.symbol);
2159 return r ? r->type : PMU_EVENT_SYMBOL_ERR;
2160 }
2161
2162 static int parse_events__scanner(const char *str,
2163 struct parse_events_state *parse_state)
2164 {
2165 YY_BUFFER_STATE buffer;
2166 void *scanner;
2167 int ret;
2168
2169 ret = parse_events_lex_init_extra(parse_state, &scanner);
2170 if (ret)
2171 return ret;
2172
2173 buffer = parse_events__scan_string(str, scanner);
2174
2175 #ifdef PARSER_DEBUG
2176 parse_events_debug = 1;
2177 parse_events_set_debug(1, scanner);
2178 #endif
2179 ret = parse_events_parse(parse_state, scanner);
2180
2181 parse_events__flush_buffer(buffer, scanner);
2182 parse_events__delete_buffer(buffer, scanner);
2183 parse_events_lex_destroy(scanner);
2184 return ret;
2185 }
2186
2187
2188
2189
2190 int parse_events_terms(struct list_head *terms, const char *str)
2191 {
2192 struct parse_events_state parse_state = {
2193 .terms = NULL,
2194 .stoken = PE_START_TERMS,
2195 };
2196 int ret;
2197
2198 ret = parse_events__scanner(str, &parse_state);
2199 perf_pmu__parse_cleanup();
2200
2201 if (!ret) {
2202 list_splice(parse_state.terms, terms);
2203 zfree(&parse_state.terms);
2204 return 0;
2205 }
2206
2207 parse_events_terms__delete(parse_state.terms);
2208 return ret;
2209 }
2210
2211 static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state,
2212 const char *str, char *pmu_name,
2213 struct list_head *list)
2214 {
2215 struct parse_events_state ps = {
2216 .list = LIST_HEAD_INIT(ps.list),
2217 .stoken = PE_START_EVENTS,
2218 .hybrid_pmu_name = pmu_name,
2219 .idx = parse_state->idx,
2220 };
2221 int ret;
2222
2223 ret = parse_events__scanner(str, &ps);
2224 perf_pmu__parse_cleanup();
2225
2226 if (!ret) {
2227 if (!list_empty(&ps.list)) {
2228 list_splice(&ps.list, list);
2229 parse_state->idx = ps.idx;
2230 return 0;
2231 } else
2232 return -1;
2233 }
2234
2235 return ret;
2236 }
2237
2238 int __parse_events(struct evlist *evlist, const char *str,
2239 struct parse_events_error *err, struct perf_pmu *fake_pmu)
2240 {
2241 struct parse_events_state parse_state = {
2242 .list = LIST_HEAD_INIT(parse_state.list),
2243 .idx = evlist->core.nr_entries,
2244 .error = err,
2245 .evlist = evlist,
2246 .stoken = PE_START_EVENTS,
2247 .fake_pmu = fake_pmu,
2248 };
2249 int ret;
2250
2251 ret = parse_events__scanner(str, &parse_state);
2252 perf_pmu__parse_cleanup();
2253
2254 if (!ret && list_empty(&parse_state.list)) {
2255 WARN_ONCE(true, "WARNING: event parser found nothing\n");
2256 return -1;
2257 }
2258
2259
2260
2261
2262 evlist__splice_list_tail(evlist, &parse_state.list);
2263
2264 if (!ret) {
2265 struct evsel *last;
2266
2267 evlist->core.nr_groups += parse_state.nr_groups;
2268 last = evlist__last(evlist);
2269 last->cmdline_group_boundary = true;
2270
2271 return 0;
2272 }
2273
2274
2275
2276
2277
2278
2279 return ret;
2280 }
2281
2282 int parse_event(struct evlist *evlist, const char *str)
2283 {
2284 struct parse_events_error err;
2285 int ret;
2286
2287 parse_events_error__init(&err);
2288 ret = parse_events(evlist, str, &err);
2289 parse_events_error__exit(&err);
2290 return ret;
2291 }
2292
2293 void parse_events_error__init(struct parse_events_error *err)
2294 {
2295 bzero(err, sizeof(*err));
2296 }
2297
2298 void parse_events_error__exit(struct parse_events_error *err)
2299 {
2300 zfree(&err->str);
2301 zfree(&err->help);
2302 zfree(&err->first_str);
2303 zfree(&err->first_help);
2304 }
2305
2306 void parse_events_error__handle(struct parse_events_error *err, int idx,
2307 char *str, char *help)
2308 {
2309 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n"))
2310 goto out_free;
2311 switch (err->num_errors) {
2312 case 0:
2313 err->idx = idx;
2314 err->str = str;
2315 err->help = help;
2316 break;
2317 case 1:
2318 err->first_idx = err->idx;
2319 err->idx = idx;
2320 err->first_str = err->str;
2321 err->str = str;
2322 err->first_help = err->help;
2323 err->help = help;
2324 break;
2325 default:
2326 pr_debug("Multiple errors dropping message: %s (%s)\n",
2327 err->str, err->help);
2328 free(err->str);
2329 err->str = str;
2330 free(err->help);
2331 err->help = help;
2332 break;
2333 }
2334 err->num_errors++;
2335 return;
2336
2337 out_free:
2338 free(str);
2339 free(help);
2340 }
2341
2342 #define MAX_WIDTH 1000
2343 static int get_term_width(void)
2344 {
2345 struct winsize ws;
2346
2347 get_term_dimensions(&ws);
2348 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
2349 }
2350
2351 static void __parse_events_error__print(int err_idx, const char *err_str,
2352 const char *err_help, const char *event)
2353 {
2354 const char *str = "invalid or unsupported event: ";
2355 char _buf[MAX_WIDTH];
2356 char *buf = (char *) event;
2357 int idx = 0;
2358 if (err_str) {
2359
2360 int width = get_term_width() - 2;
2361 int len_event = strlen(event);
2362 int len_str, max_len, cut = 0;
2363
2364
2365
2366
2367
2368 int max_err_idx = 13;
2369
2370
2371
2372
2373
2374 str = "event syntax error: ";
2375 len_str = strlen(str);
2376 max_len = width - len_str;
2377
2378 buf = _buf;
2379
2380
2381 if (err_idx > max_err_idx)
2382 cut = err_idx - max_err_idx;
2383
2384 strncpy(buf, event + cut, max_len);
2385
2386
2387 if (cut)
2388 buf[0] = buf[1] = '.';
2389
2390 if ((len_event - cut) > max_len) {
2391 buf[max_len - 1] = buf[max_len - 2] = '.';
2392 buf[max_len] = 0;
2393 }
2394
2395 idx = len_str + err_idx - cut;
2396 }
2397
2398 fprintf(stderr, "%s'%s'\n", str, buf);
2399 if (idx) {
2400 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str);
2401 if (err_help)
2402 fprintf(stderr, "\n%s\n", err_help);
2403 }
2404 }
2405
2406 void parse_events_error__print(struct parse_events_error *err,
2407 const char *event)
2408 {
2409 if (!err->num_errors)
2410 return;
2411
2412 __parse_events_error__print(err->idx, err->str, err->help, event);
2413
2414 if (err->num_errors > 1) {
2415 fputs("\nInitial error:\n", stderr);
2416 __parse_events_error__print(err->first_idx, err->first_str,
2417 err->first_help, event);
2418 }
2419 }
2420
2421 #undef MAX_WIDTH
2422
2423 int parse_events_option(const struct option *opt, const char *str,
2424 int unset __maybe_unused)
2425 {
2426 struct evlist *evlist = *(struct evlist **)opt->value;
2427 struct parse_events_error err;
2428 int ret;
2429
2430 parse_events_error__init(&err);
2431 ret = parse_events(evlist, str, &err);
2432
2433 if (ret) {
2434 parse_events_error__print(&err, str);
2435 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
2436 }
2437 parse_events_error__exit(&err);
2438
2439 return ret;
2440 }
2441
2442 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset)
2443 {
2444 struct evlist **evlistp = opt->value;
2445 int ret;
2446
2447 if (*evlistp == NULL) {
2448 *evlistp = evlist__new();
2449
2450 if (*evlistp == NULL) {
2451 fprintf(stderr, "Not enough memory to create evlist\n");
2452 return -1;
2453 }
2454 }
2455
2456 ret = parse_events_option(opt, str, unset);
2457 if (ret) {
2458 evlist__delete(*evlistp);
2459 *evlistp = NULL;
2460 }
2461
2462 return ret;
2463 }
2464
2465 static int
2466 foreach_evsel_in_last_glob(struct evlist *evlist,
2467 int (*func)(struct evsel *evsel,
2468 const void *arg),
2469 const void *arg)
2470 {
2471 struct evsel *last = NULL;
2472 int err;
2473
2474
2475
2476
2477
2478
2479
2480 if (evlist->core.nr_entries > 0)
2481 last = evlist__last(evlist);
2482
2483 do {
2484 err = (*func)(last, arg);
2485 if (err)
2486 return -1;
2487 if (!last)
2488 return 0;
2489
2490 if (last->core.node.prev == &evlist->core.entries)
2491 return 0;
2492 last = list_entry(last->core.node.prev, struct evsel, core.node);
2493 } while (!last->cmdline_group_boundary);
2494
2495 return 0;
2496 }
2497
2498 static int set_filter(struct evsel *evsel, const void *arg)
2499 {
2500 const char *str = arg;
2501 bool found = false;
2502 int nr_addr_filters = 0;
2503 struct perf_pmu *pmu = NULL;
2504
2505 if (evsel == NULL) {
2506 fprintf(stderr,
2507 "--filter option should follow a -e tracepoint or HW tracer option\n");
2508 return -1;
2509 }
2510
2511 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
2512 if (evsel__append_tp_filter(evsel, str) < 0) {
2513 fprintf(stderr,
2514 "not enough memory to hold filter string\n");
2515 return -1;
2516 }
2517
2518 return 0;
2519 }
2520
2521 while ((pmu = perf_pmu__scan(pmu)) != NULL)
2522 if (pmu->type == evsel->core.attr.type) {
2523 found = true;
2524 break;
2525 }
2526
2527 if (found)
2528 perf_pmu__scan_file(pmu, "nr_addr_filters",
2529 "%d", &nr_addr_filters);
2530
2531 if (!nr_addr_filters) {
2532 fprintf(stderr,
2533 "This CPU does not support address filtering\n");
2534 return -1;
2535 }
2536
2537 if (evsel__append_addr_filter(evsel, str) < 0) {
2538 fprintf(stderr,
2539 "not enough memory to hold filter string\n");
2540 return -1;
2541 }
2542
2543 return 0;
2544 }
2545
2546 int parse_filter(const struct option *opt, const char *str,
2547 int unset __maybe_unused)
2548 {
2549 struct evlist *evlist = *(struct evlist **)opt->value;
2550
2551 return foreach_evsel_in_last_glob(evlist, set_filter,
2552 (const void *)str);
2553 }
2554
2555 static int add_exclude_perf_filter(struct evsel *evsel,
2556 const void *arg __maybe_unused)
2557 {
2558 char new_filter[64];
2559
2560 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2561 fprintf(stderr,
2562 "--exclude-perf option should follow a -e tracepoint option\n");
2563 return -1;
2564 }
2565
2566 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid());
2567
2568 if (evsel__append_tp_filter(evsel, new_filter) < 0) {
2569 fprintf(stderr,
2570 "not enough memory to hold filter string\n");
2571 return -1;
2572 }
2573
2574 return 0;
2575 }
2576
2577 int exclude_perf(const struct option *opt,
2578 const char *arg __maybe_unused,
2579 int unset __maybe_unused)
2580 {
2581 struct evlist *evlist = *(struct evlist **)opt->value;
2582
2583 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter,
2584 NULL);
2585 }
2586
2587 int parse_events__is_hardcoded_term(struct parse_events_term *term)
2588 {
2589 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
2590 }
2591
2592 static int new_term(struct parse_events_term **_term,
2593 struct parse_events_term *temp,
2594 char *str, u64 num)
2595 {
2596 struct parse_events_term *term;
2597
2598 term = malloc(sizeof(*term));
2599 if (!term)
2600 return -ENOMEM;
2601
2602 *term = *temp;
2603 INIT_LIST_HEAD(&term->list);
2604 term->weak = false;
2605
2606 switch (term->type_val) {
2607 case PARSE_EVENTS__TERM_TYPE_NUM:
2608 term->val.num = num;
2609 break;
2610 case PARSE_EVENTS__TERM_TYPE_STR:
2611 term->val.str = str;
2612 break;
2613 default:
2614 free(term);
2615 return -EINVAL;
2616 }
2617
2618 *_term = term;
2619 return 0;
2620 }
2621
2622 int parse_events_term__num(struct parse_events_term **term,
2623 int type_term, char *config, u64 num,
2624 bool no_value,
2625 void *loc_term_, void *loc_val_)
2626 {
2627 YYLTYPE *loc_term = loc_term_;
2628 YYLTYPE *loc_val = loc_val_;
2629
2630 struct parse_events_term temp = {
2631 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
2632 .type_term = type_term,
2633 .config = config ? : strdup(config_term_names[type_term]),
2634 .no_value = no_value,
2635 .err_term = loc_term ? loc_term->first_column : 0,
2636 .err_val = loc_val ? loc_val->first_column : 0,
2637 };
2638
2639 return new_term(term, &temp, NULL, num);
2640 }
2641
2642 int parse_events_term__str(struct parse_events_term **term,
2643 int type_term, char *config, char *str,
2644 void *loc_term_, void *loc_val_)
2645 {
2646 YYLTYPE *loc_term = loc_term_;
2647 YYLTYPE *loc_val = loc_val_;
2648
2649 struct parse_events_term temp = {
2650 .type_val = PARSE_EVENTS__TERM_TYPE_STR,
2651 .type_term = type_term,
2652 .config = config,
2653 .err_term = loc_term ? loc_term->first_column : 0,
2654 .err_val = loc_val ? loc_val->first_column : 0,
2655 };
2656
2657 return new_term(term, &temp, str, 0);
2658 }
2659
2660 int parse_events_term__sym_hw(struct parse_events_term **term,
2661 char *config, unsigned idx)
2662 {
2663 struct event_symbol *sym;
2664 char *str;
2665 struct parse_events_term temp = {
2666 .type_val = PARSE_EVENTS__TERM_TYPE_STR,
2667 .type_term = PARSE_EVENTS__TERM_TYPE_USER,
2668 .config = config,
2669 };
2670
2671 if (!temp.config) {
2672 temp.config = strdup("event");
2673 if (!temp.config)
2674 return -ENOMEM;
2675 }
2676 BUG_ON(idx >= PERF_COUNT_HW_MAX);
2677 sym = &event_symbols_hw[idx];
2678
2679 str = strdup(sym->symbol);
2680 if (!str)
2681 return -ENOMEM;
2682 return new_term(term, &temp, str, 0);
2683 }
2684
2685 int parse_events_term__clone(struct parse_events_term **new,
2686 struct parse_events_term *term)
2687 {
2688 char *str;
2689 struct parse_events_term temp = {
2690 .type_val = term->type_val,
2691 .type_term = term->type_term,
2692 .config = NULL,
2693 .err_term = term->err_term,
2694 .err_val = term->err_val,
2695 };
2696
2697 if (term->config) {
2698 temp.config = strdup(term->config);
2699 if (!temp.config)
2700 return -ENOMEM;
2701 }
2702 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2703 return new_term(new, &temp, NULL, term->val.num);
2704
2705 str = strdup(term->val.str);
2706 if (!str)
2707 return -ENOMEM;
2708 return new_term(new, &temp, str, 0);
2709 }
2710
2711 void parse_events_term__delete(struct parse_events_term *term)
2712 {
2713 if (term->array.nr_ranges)
2714 zfree(&term->array.ranges);
2715
2716 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
2717 zfree(&term->val.str);
2718
2719 zfree(&term->config);
2720 free(term);
2721 }
2722
2723 int parse_events_copy_term_list(struct list_head *old,
2724 struct list_head **new)
2725 {
2726 struct parse_events_term *term, *n;
2727 int ret;
2728
2729 if (!old) {
2730 *new = NULL;
2731 return 0;
2732 }
2733
2734 *new = malloc(sizeof(struct list_head));
2735 if (!*new)
2736 return -ENOMEM;
2737 INIT_LIST_HEAD(*new);
2738
2739 list_for_each_entry (term, old, list) {
2740 ret = parse_events_term__clone(&n, term);
2741 if (ret)
2742 return ret;
2743 list_add_tail(&n->list, *new);
2744 }
2745 return 0;
2746 }
2747
2748 void parse_events_terms__purge(struct list_head *terms)
2749 {
2750 struct parse_events_term *term, *h;
2751
2752 list_for_each_entry_safe(term, h, terms, list) {
2753 list_del_init(&term->list);
2754 parse_events_term__delete(term);
2755 }
2756 }
2757
2758 void parse_events_terms__delete(struct list_head *terms)
2759 {
2760 if (!terms)
2761 return;
2762 parse_events_terms__purge(terms);
2763 free(terms);
2764 }
2765
2766 void parse_events__clear_array(struct parse_events_array *a)
2767 {
2768 zfree(&a->ranges);
2769 }
2770
2771 void parse_events_evlist_error(struct parse_events_state *parse_state,
2772 int idx, const char *str)
2773 {
2774 if (!parse_state->error)
2775 return;
2776
2777 parse_events_error__handle(parse_state->error, idx, strdup(str), NULL);
2778 }
2779
2780 static void config_terms_list(char *buf, size_t buf_sz)
2781 {
2782 int i;
2783 bool first = true;
2784
2785 buf[0] = '\0';
2786 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) {
2787 const char *name = config_term_names[i];
2788
2789 if (!config_term_avail(i, NULL))
2790 continue;
2791 if (!name)
2792 continue;
2793 if (name[0] == '<')
2794 continue;
2795
2796 if (strlen(buf) + strlen(name) + 2 >= buf_sz)
2797 return;
2798
2799 if (!first)
2800 strcat(buf, ",");
2801 else
2802 first = false;
2803 strcat(buf, name);
2804 }
2805 }
2806
2807
2808
2809
2810
2811 char *parse_events_formats_error_string(char *additional_terms)
2812 {
2813 char *str;
2814
2815 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR *
2816 (sizeof("no-overwrite") - 1)];
2817
2818 config_terms_list(static_terms, sizeof(static_terms));
2819
2820 if (additional_terms) {
2821 if (asprintf(&str, "valid terms: %s,%s",
2822 additional_terms, static_terms) < 0)
2823 goto fail;
2824 } else {
2825 if (asprintf(&str, "valid terms: %s", static_terms) < 0)
2826 goto fail;
2827 }
2828 return str;
2829
2830 fail:
2831 return NULL;
2832 }
2833
2834 struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx,
2835 struct perf_event_attr *attr,
2836 const char *name,
2837 const char *metric_id,
2838 struct perf_pmu *pmu,
2839 struct list_head *config_terms)
2840 {
2841 return __add_event(list, idx, attr, true, name, metric_id,
2842 pmu, config_terms, false,
2843 NULL);
2844 }