0001
0002
0003
0004
0005
0006
0007
0008 #include <api/fs/fs.h>
0009 #include <errno.h>
0010 #include <inttypes.h>
0011 #include <poll.h>
0012 #include "cpumap.h"
0013 #include "util/mmap.h"
0014 #include "thread_map.h"
0015 #include "target.h"
0016 #include "evlist.h"
0017 #include "evsel.h"
0018 #include "debug.h"
0019 #include "units.h"
0020 #include "bpf_counter.h"
0021 #include <internal/lib.h> // page_size
0022 #include "affinity.h"
0023 #include "../perf.h"
0024 #include "asm/bug.h"
0025 #include "bpf-event.h"
0026 #include "util/string2.h"
0027 #include "util/perf_api_probe.h"
0028 #include "util/evsel_fprintf.h"
0029 #include "util/evlist-hybrid.h"
0030 #include "util/pmu.h"
0031 #include <signal.h>
0032 #include <unistd.h>
0033 #include <sched.h>
0034 #include <stdlib.h>
0035
0036 #include "parse-events.h"
0037 #include <subcmd/parse-options.h>
0038
0039 #include <fcntl.h>
0040 #include <sys/ioctl.h>
0041 #include <sys/mman.h>
0042 #include <sys/prctl.h>
0043
0044 #include <linux/bitops.h>
0045 #include <linux/hash.h>
0046 #include <linux/log2.h>
0047 #include <linux/err.h>
0048 #include <linux/string.h>
0049 #include <linux/zalloc.h>
0050 #include <perf/evlist.h>
0051 #include <perf/evsel.h>
0052 #include <perf/cpumap.h>
0053 #include <perf/mmap.h>
0054
0055 #include <internal/xyarray.h>
0056
0057 #ifdef LACKS_SIGQUEUE_PROTOTYPE
0058 int sigqueue(pid_t pid, int sig, const union sigval value);
0059 #endif
0060
0061 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
0062 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
0063
0064 void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
0065 struct perf_thread_map *threads)
0066 {
0067 perf_evlist__init(&evlist->core);
0068 perf_evlist__set_maps(&evlist->core, cpus, threads);
0069 evlist->workload.pid = -1;
0070 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
0071 evlist->ctl_fd.fd = -1;
0072 evlist->ctl_fd.ack = -1;
0073 evlist->ctl_fd.pos = -1;
0074 }
0075
0076 struct evlist *evlist__new(void)
0077 {
0078 struct evlist *evlist = zalloc(sizeof(*evlist));
0079
0080 if (evlist != NULL)
0081 evlist__init(evlist, NULL, NULL);
0082
0083 return evlist;
0084 }
0085
0086 struct evlist *evlist__new_default(void)
0087 {
0088 struct evlist *evlist = evlist__new();
0089
0090 if (evlist && evlist__add_default(evlist)) {
0091 evlist__delete(evlist);
0092 evlist = NULL;
0093 }
0094
0095 return evlist;
0096 }
0097
0098 struct evlist *evlist__new_dummy(void)
0099 {
0100 struct evlist *evlist = evlist__new();
0101
0102 if (evlist && evlist__add_dummy(evlist)) {
0103 evlist__delete(evlist);
0104 evlist = NULL;
0105 }
0106
0107 return evlist;
0108 }
0109
0110
0111
0112
0113
0114
0115
0116
0117 void evlist__set_id_pos(struct evlist *evlist)
0118 {
0119 struct evsel *first = evlist__first(evlist);
0120
0121 evlist->id_pos = first->id_pos;
0122 evlist->is_pos = first->is_pos;
0123 }
0124
0125 static void evlist__update_id_pos(struct evlist *evlist)
0126 {
0127 struct evsel *evsel;
0128
0129 evlist__for_each_entry(evlist, evsel)
0130 evsel__calc_id_pos(evsel);
0131
0132 evlist__set_id_pos(evlist);
0133 }
0134
0135 static void evlist__purge(struct evlist *evlist)
0136 {
0137 struct evsel *pos, *n;
0138
0139 evlist__for_each_entry_safe(evlist, n, pos) {
0140 list_del_init(&pos->core.node);
0141 pos->evlist = NULL;
0142 evsel__delete(pos);
0143 }
0144
0145 evlist->core.nr_entries = 0;
0146 }
0147
0148 void evlist__exit(struct evlist *evlist)
0149 {
0150 zfree(&evlist->mmap);
0151 zfree(&evlist->overwrite_mmap);
0152 perf_evlist__exit(&evlist->core);
0153 }
0154
0155 void evlist__delete(struct evlist *evlist)
0156 {
0157 if (evlist == NULL)
0158 return;
0159
0160 evlist__munmap(evlist);
0161 evlist__close(evlist);
0162 evlist__purge(evlist);
0163 evlist__exit(evlist);
0164 free(evlist);
0165 }
0166
0167 void evlist__add(struct evlist *evlist, struct evsel *entry)
0168 {
0169 perf_evlist__add(&evlist->core, &entry->core);
0170 entry->evlist = evlist;
0171 entry->tracking = !entry->core.idx;
0172
0173 if (evlist->core.nr_entries == 1)
0174 evlist__set_id_pos(evlist);
0175 }
0176
0177 void evlist__remove(struct evlist *evlist, struct evsel *evsel)
0178 {
0179 evsel->evlist = NULL;
0180 perf_evlist__remove(&evlist->core, &evsel->core);
0181 }
0182
0183 void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list)
0184 {
0185 while (!list_empty(list)) {
0186 struct evsel *evsel, *temp, *leader = NULL;
0187
0188 __evlist__for_each_entry_safe(list, temp, evsel) {
0189 list_del_init(&evsel->core.node);
0190 evlist__add(evlist, evsel);
0191 leader = evsel;
0192 break;
0193 }
0194
0195 __evlist__for_each_entry_safe(list, temp, evsel) {
0196 if (evsel__has_leader(evsel, leader)) {
0197 list_del_init(&evsel->core.node);
0198 evlist__add(evlist, evsel);
0199 }
0200 }
0201 }
0202 }
0203
0204 int __evlist__set_tracepoints_handlers(struct evlist *evlist,
0205 const struct evsel_str_handler *assocs, size_t nr_assocs)
0206 {
0207 size_t i;
0208 int err;
0209
0210 for (i = 0; i < nr_assocs; i++) {
0211
0212 struct evsel *evsel = evlist__find_tracepoint_by_name(evlist, assocs[i].name);
0213 if (evsel == NULL)
0214 continue;
0215
0216 err = -EEXIST;
0217 if (evsel->handler != NULL)
0218 goto out;
0219 evsel->handler = assocs[i].handler;
0220 }
0221
0222 err = 0;
0223 out:
0224 return err;
0225 }
0226
0227 void evlist__set_leader(struct evlist *evlist)
0228 {
0229 perf_evlist__set_leader(&evlist->core);
0230 }
0231
0232 int __evlist__add_default(struct evlist *evlist, bool precise)
0233 {
0234 struct evsel *evsel;
0235
0236 evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE,
0237 PERF_COUNT_HW_CPU_CYCLES);
0238 if (evsel == NULL)
0239 return -ENOMEM;
0240
0241 evlist__add(evlist, evsel);
0242 return 0;
0243 }
0244
0245 static struct evsel *evlist__dummy_event(struct evlist *evlist)
0246 {
0247 struct perf_event_attr attr = {
0248 .type = PERF_TYPE_SOFTWARE,
0249 .config = PERF_COUNT_SW_DUMMY,
0250 .size = sizeof(attr),
0251 };
0252
0253 return evsel__new_idx(&attr, evlist->core.nr_entries);
0254 }
0255
0256 int evlist__add_dummy(struct evlist *evlist)
0257 {
0258 struct evsel *evsel = evlist__dummy_event(evlist);
0259
0260 if (evsel == NULL)
0261 return -ENOMEM;
0262
0263 evlist__add(evlist, evsel);
0264 return 0;
0265 }
0266
0267 static void evlist__add_on_all_cpus(struct evlist *evlist, struct evsel *evsel)
0268 {
0269 evsel->core.system_wide = true;
0270
0271
0272
0273
0274
0275
0276
0277 perf_cpu_map__put(evsel->core.own_cpus);
0278 evsel->core.own_cpus = perf_cpu_map__new(NULL);
0279 perf_cpu_map__put(evsel->core.cpus);
0280 evsel->core.cpus = perf_cpu_map__get(evsel->core.own_cpus);
0281
0282
0283 perf_thread_map__put(evsel->core.threads);
0284 evsel->core.threads = perf_thread_map__new_dummy();
0285
0286 evlist__add(evlist, evsel);
0287 }
0288
0289 struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
0290 {
0291 struct evsel *evsel = evlist__dummy_event(evlist);
0292
0293 if (!evsel)
0294 return NULL;
0295
0296 evsel->core.attr.exclude_kernel = 1;
0297 evsel->core.attr.exclude_guest = 1;
0298 evsel->core.attr.exclude_hv = 1;
0299 evsel->core.attr.freq = 0;
0300 evsel->core.attr.sample_period = 1;
0301 evsel->no_aux_samples = true;
0302 evsel->name = strdup("dummy:u");
0303
0304 if (system_wide)
0305 evlist__add_on_all_cpus(evlist, evsel);
0306 else
0307 evlist__add(evlist, evsel);
0308
0309 return evsel;
0310 }
0311
0312 int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
0313 {
0314 struct evsel *evsel, *n;
0315 LIST_HEAD(head);
0316 size_t i;
0317
0318 for (i = 0; i < nr_attrs; i++) {
0319 evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
0320 if (evsel == NULL)
0321 goto out_delete_partial_list;
0322 list_add_tail(&evsel->core.node, &head);
0323 }
0324
0325 evlist__splice_list_tail(evlist, &head);
0326
0327 return 0;
0328
0329 out_delete_partial_list:
0330 __evlist__for_each_entry_safe(&head, n, evsel)
0331 evsel__delete(evsel);
0332 return -1;
0333 }
0334
0335 int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
0336 {
0337 size_t i;
0338
0339 for (i = 0; i < nr_attrs; i++)
0340 event_attr_init(attrs + i);
0341
0342 return evlist__add_attrs(evlist, attrs, nr_attrs);
0343 }
0344
0345 __weak int arch_evlist__add_default_attrs(struct evlist *evlist,
0346 struct perf_event_attr *attrs,
0347 size_t nr_attrs)
0348 {
0349 if (!nr_attrs)
0350 return 0;
0351
0352 return __evlist__add_default_attrs(evlist, attrs, nr_attrs);
0353 }
0354
0355 struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
0356 {
0357 struct evsel *evsel;
0358
0359 evlist__for_each_entry(evlist, evsel) {
0360 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
0361 (int)evsel->core.attr.config == id)
0362 return evsel;
0363 }
0364
0365 return NULL;
0366 }
0367
0368 struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name)
0369 {
0370 struct evsel *evsel;
0371
0372 evlist__for_each_entry(evlist, evsel) {
0373 if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
0374 (strcmp(evsel->name, name) == 0))
0375 return evsel;
0376 }
0377
0378 return NULL;
0379 }
0380
0381 int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler)
0382 {
0383 struct evsel *evsel = evsel__newtp(sys, name);
0384
0385 if (IS_ERR(evsel))
0386 return -1;
0387
0388 evsel->handler = handler;
0389 evlist__add(evlist, evsel);
0390 return 0;
0391 }
0392
0393 struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity)
0394 {
0395 struct evlist_cpu_iterator itr = {
0396 .container = evlist,
0397 .evsel = NULL,
0398 .cpu_map_idx = 0,
0399 .evlist_cpu_map_idx = 0,
0400 .evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus),
0401 .cpu = (struct perf_cpu){ .cpu = -1},
0402 .affinity = affinity,
0403 };
0404
0405 if (evlist__empty(evlist)) {
0406
0407 itr.evlist_cpu_map_idx = itr.evlist_cpu_map_nr;
0408 } else {
0409 itr.evsel = evlist__first(evlist);
0410 if (itr.affinity) {
0411 itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0);
0412 affinity__set(itr.affinity, itr.cpu.cpu);
0413 itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu);
0414
0415
0416
0417
0418 if (itr.cpu_map_idx == -1)
0419 evlist_cpu_iterator__next(&itr);
0420 }
0421 }
0422 return itr;
0423 }
0424
0425 void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr)
0426 {
0427 while (evlist_cpu_itr->evsel != evlist__last(evlist_cpu_itr->container)) {
0428 evlist_cpu_itr->evsel = evsel__next(evlist_cpu_itr->evsel);
0429 evlist_cpu_itr->cpu_map_idx =
0430 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus,
0431 evlist_cpu_itr->cpu);
0432 if (evlist_cpu_itr->cpu_map_idx != -1)
0433 return;
0434 }
0435 evlist_cpu_itr->evlist_cpu_map_idx++;
0436 if (evlist_cpu_itr->evlist_cpu_map_idx < evlist_cpu_itr->evlist_cpu_map_nr) {
0437 evlist_cpu_itr->evsel = evlist__first(evlist_cpu_itr->container);
0438 evlist_cpu_itr->cpu =
0439 perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus,
0440 evlist_cpu_itr->evlist_cpu_map_idx);
0441 if (evlist_cpu_itr->affinity)
0442 affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu.cpu);
0443 evlist_cpu_itr->cpu_map_idx =
0444 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus,
0445 evlist_cpu_itr->cpu);
0446
0447
0448
0449
0450 if (evlist_cpu_itr->cpu_map_idx == -1)
0451 evlist_cpu_iterator__next(evlist_cpu_itr);
0452 }
0453 }
0454
0455 bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr)
0456 {
0457 return evlist_cpu_itr->evlist_cpu_map_idx >= evlist_cpu_itr->evlist_cpu_map_nr;
0458 }
0459
0460 static int evsel__strcmp(struct evsel *pos, char *evsel_name)
0461 {
0462 if (!evsel_name)
0463 return 0;
0464 if (evsel__is_dummy_event(pos))
0465 return 1;
0466 return strcmp(pos->name, evsel_name);
0467 }
0468
0469 static int evlist__is_enabled(struct evlist *evlist)
0470 {
0471 struct evsel *pos;
0472
0473 evlist__for_each_entry(evlist, pos) {
0474 if (!evsel__is_group_leader(pos) || !pos->core.fd)
0475 continue;
0476
0477 if (!pos->disabled)
0478 return true;
0479 }
0480 return false;
0481 }
0482
0483 static void __evlist__disable(struct evlist *evlist, char *evsel_name)
0484 {
0485 struct evsel *pos;
0486 struct evlist_cpu_iterator evlist_cpu_itr;
0487 struct affinity saved_affinity, *affinity = NULL;
0488 bool has_imm = false;
0489
0490
0491 if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
0492 if (affinity__setup(&saved_affinity) < 0)
0493 return;
0494 affinity = &saved_affinity;
0495 }
0496
0497
0498 for (int imm = 0; imm <= 1; imm++) {
0499 evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) {
0500 pos = evlist_cpu_itr.evsel;
0501 if (evsel__strcmp(pos, evsel_name))
0502 continue;
0503 if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
0504 continue;
0505 if (pos->immediate)
0506 has_imm = true;
0507 if (pos->immediate != imm)
0508 continue;
0509 evsel__disable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
0510 }
0511 if (!has_imm)
0512 break;
0513 }
0514
0515 affinity__cleanup(affinity);
0516 evlist__for_each_entry(evlist, pos) {
0517 if (evsel__strcmp(pos, evsel_name))
0518 continue;
0519 if (!evsel__is_group_leader(pos) || !pos->core.fd)
0520 continue;
0521 pos->disabled = true;
0522 }
0523
0524
0525
0526
0527
0528 if (evsel_name)
0529 evlist->enabled = evlist__is_enabled(evlist);
0530 else
0531 evlist->enabled = false;
0532 }
0533
0534 void evlist__disable(struct evlist *evlist)
0535 {
0536 __evlist__disable(evlist, NULL);
0537 }
0538
0539 void evlist__disable_evsel(struct evlist *evlist, char *evsel_name)
0540 {
0541 __evlist__disable(evlist, evsel_name);
0542 }
0543
0544 static void __evlist__enable(struct evlist *evlist, char *evsel_name)
0545 {
0546 struct evsel *pos;
0547 struct evlist_cpu_iterator evlist_cpu_itr;
0548 struct affinity saved_affinity, *affinity = NULL;
0549
0550
0551 if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
0552 if (affinity__setup(&saved_affinity) < 0)
0553 return;
0554 affinity = &saved_affinity;
0555 }
0556
0557 evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) {
0558 pos = evlist_cpu_itr.evsel;
0559 if (evsel__strcmp(pos, evsel_name))
0560 continue;
0561 if (!evsel__is_group_leader(pos) || !pos->core.fd)
0562 continue;
0563 evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
0564 }
0565 affinity__cleanup(affinity);
0566 evlist__for_each_entry(evlist, pos) {
0567 if (evsel__strcmp(pos, evsel_name))
0568 continue;
0569 if (!evsel__is_group_leader(pos) || !pos->core.fd)
0570 continue;
0571 pos->disabled = false;
0572 }
0573
0574
0575
0576
0577
0578
0579 evlist->enabled = true;
0580 }
0581
0582 void evlist__enable(struct evlist *evlist)
0583 {
0584 __evlist__enable(evlist, NULL);
0585 }
0586
0587 void evlist__enable_evsel(struct evlist *evlist, char *evsel_name)
0588 {
0589 __evlist__enable(evlist, evsel_name);
0590 }
0591
0592 void evlist__toggle_enable(struct evlist *evlist)
0593 {
0594 (evlist->enabled ? evlist__disable : evlist__enable)(evlist);
0595 }
0596
0597 int evlist__add_pollfd(struct evlist *evlist, int fd)
0598 {
0599 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default);
0600 }
0601
0602 int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
0603 {
0604 return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
0605 }
0606
0607 #ifdef HAVE_EVENTFD_SUPPORT
0608 int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd)
0609 {
0610 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
0611 fdarray_flag__nonfilterable);
0612 }
0613 #endif
0614
0615 int evlist__poll(struct evlist *evlist, int timeout)
0616 {
0617 return perf_evlist__poll(&evlist->core, timeout);
0618 }
0619
0620 struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id)
0621 {
0622 struct hlist_head *head;
0623 struct perf_sample_id *sid;
0624 int hash;
0625
0626 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
0627 head = &evlist->core.heads[hash];
0628
0629 hlist_for_each_entry(sid, head, node)
0630 if (sid->id == id)
0631 return sid;
0632
0633 return NULL;
0634 }
0635
0636 struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id)
0637 {
0638 struct perf_sample_id *sid;
0639
0640 if (evlist->core.nr_entries == 1 || !id)
0641 return evlist__first(evlist);
0642
0643 sid = evlist__id2sid(evlist, id);
0644 if (sid)
0645 return container_of(sid->evsel, struct evsel, core);
0646
0647 if (!evlist__sample_id_all(evlist))
0648 return evlist__first(evlist);
0649
0650 return NULL;
0651 }
0652
0653 struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id)
0654 {
0655 struct perf_sample_id *sid;
0656
0657 if (!id)
0658 return NULL;
0659
0660 sid = evlist__id2sid(evlist, id);
0661 if (sid)
0662 return container_of(sid->evsel, struct evsel, core);
0663
0664 return NULL;
0665 }
0666
0667 static int evlist__event2id(struct evlist *evlist, union perf_event *event, u64 *id)
0668 {
0669 const __u64 *array = event->sample.array;
0670 ssize_t n;
0671
0672 n = (event->header.size - sizeof(event->header)) >> 3;
0673
0674 if (event->header.type == PERF_RECORD_SAMPLE) {
0675 if (evlist->id_pos >= n)
0676 return -1;
0677 *id = array[evlist->id_pos];
0678 } else {
0679 if (evlist->is_pos > n)
0680 return -1;
0681 n -= evlist->is_pos;
0682 *id = array[n];
0683 }
0684 return 0;
0685 }
0686
0687 struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event)
0688 {
0689 struct evsel *first = evlist__first(evlist);
0690 struct hlist_head *head;
0691 struct perf_sample_id *sid;
0692 int hash;
0693 u64 id;
0694
0695 if (evlist->core.nr_entries == 1)
0696 return first;
0697
0698 if (!first->core.attr.sample_id_all &&
0699 event->header.type != PERF_RECORD_SAMPLE)
0700 return first;
0701
0702 if (evlist__event2id(evlist, event, &id))
0703 return NULL;
0704
0705
0706 if (!id)
0707 return first;
0708
0709 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
0710 head = &evlist->core.heads[hash];
0711
0712 hlist_for_each_entry(sid, head, node) {
0713 if (sid->id == id)
0714 return container_of(sid->evsel, struct evsel, core);
0715 }
0716 return NULL;
0717 }
0718
0719 static int evlist__set_paused(struct evlist *evlist, bool value)
0720 {
0721 int i;
0722
0723 if (!evlist->overwrite_mmap)
0724 return 0;
0725
0726 for (i = 0; i < evlist->core.nr_mmaps; i++) {
0727 int fd = evlist->overwrite_mmap[i].core.fd;
0728 int err;
0729
0730 if (fd < 0)
0731 continue;
0732 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
0733 if (err)
0734 return err;
0735 }
0736 return 0;
0737 }
0738
0739 static int evlist__pause(struct evlist *evlist)
0740 {
0741 return evlist__set_paused(evlist, true);
0742 }
0743
0744 static int evlist__resume(struct evlist *evlist)
0745 {
0746 return evlist__set_paused(evlist, false);
0747 }
0748
0749 static void evlist__munmap_nofree(struct evlist *evlist)
0750 {
0751 int i;
0752
0753 if (evlist->mmap)
0754 for (i = 0; i < evlist->core.nr_mmaps; i++)
0755 perf_mmap__munmap(&evlist->mmap[i].core);
0756
0757 if (evlist->overwrite_mmap)
0758 for (i = 0; i < evlist->core.nr_mmaps; i++)
0759 perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
0760 }
0761
0762 void evlist__munmap(struct evlist *evlist)
0763 {
0764 evlist__munmap_nofree(evlist);
0765 zfree(&evlist->mmap);
0766 zfree(&evlist->overwrite_mmap);
0767 }
0768
0769 static void perf_mmap__unmap_cb(struct perf_mmap *map)
0770 {
0771 struct mmap *m = container_of(map, struct mmap, core);
0772
0773 mmap__munmap(m);
0774 }
0775
0776 static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
0777 bool overwrite)
0778 {
0779 int i;
0780 struct mmap *map;
0781
0782 map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
0783 if (!map)
0784 return NULL;
0785
0786 for (i = 0; i < evlist->core.nr_mmaps; i++) {
0787 struct perf_mmap *prev = i ? &map[i - 1].core : NULL;
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798 perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb);
0799 }
0800
0801 return map;
0802 }
0803
0804 static void
0805 perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
0806 struct perf_evsel *_evsel,
0807 struct perf_mmap_param *_mp,
0808 int idx)
0809 {
0810 struct evlist *evlist = container_of(_evlist, struct evlist, core);
0811 struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
0812 struct evsel *evsel = container_of(_evsel, struct evsel, core);
0813
0814 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, evsel, idx);
0815 }
0816
0817 static struct perf_mmap*
0818 perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
0819 {
0820 struct evlist *evlist = container_of(_evlist, struct evlist, core);
0821 struct mmap *maps;
0822
0823 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
0824
0825 if (!maps) {
0826 maps = evlist__alloc_mmap(evlist, overwrite);
0827 if (!maps)
0828 return NULL;
0829
0830 if (overwrite) {
0831 evlist->overwrite_mmap = maps;
0832 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
0833 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
0834 } else {
0835 evlist->mmap = maps;
0836 }
0837 }
0838
0839 return &maps[idx].core;
0840 }
0841
0842 static int
0843 perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp,
0844 int output, struct perf_cpu cpu)
0845 {
0846 struct mmap *map = container_of(_map, struct mmap, core);
0847 struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
0848
0849 return mmap__mmap(map, mp, output, cpu);
0850 }
0851
0852 unsigned long perf_event_mlock_kb_in_pages(void)
0853 {
0854 unsigned long pages;
0855 int max;
0856
0857 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
0858
0859
0860
0861
0862
0863 max = 512;
0864 } else {
0865 max -= (page_size / 1024);
0866 }
0867
0868 pages = (max * 1024) / page_size;
0869 if (!is_power_of_2(pages))
0870 pages = rounddown_pow_of_two(pages);
0871
0872 return pages;
0873 }
0874
0875 size_t evlist__mmap_size(unsigned long pages)
0876 {
0877 if (pages == UINT_MAX)
0878 pages = perf_event_mlock_kb_in_pages();
0879 else if (!is_power_of_2(pages))
0880 return 0;
0881
0882 return (pages + 1) * page_size;
0883 }
0884
0885 static long parse_pages_arg(const char *str, unsigned long min,
0886 unsigned long max)
0887 {
0888 unsigned long pages, val;
0889 static struct parse_tag tags[] = {
0890 { .tag = 'B', .mult = 1 },
0891 { .tag = 'K', .mult = 1 << 10 },
0892 { .tag = 'M', .mult = 1 << 20 },
0893 { .tag = 'G', .mult = 1 << 30 },
0894 { .tag = 0 },
0895 };
0896
0897 if (str == NULL)
0898 return -EINVAL;
0899
0900 val = parse_tag_value(str, tags);
0901 if (val != (unsigned long) -1) {
0902
0903 pages = PERF_ALIGN(val, page_size) / page_size;
0904 } else {
0905
0906 char *eptr;
0907 pages = strtoul(str, &eptr, 10);
0908 if (*eptr != '\0')
0909 return -EINVAL;
0910 }
0911
0912 if (pages == 0 && min == 0) {
0913
0914 } else if (!is_power_of_2(pages)) {
0915 char buf[100];
0916
0917
0918 pages = roundup_pow_of_two(pages);
0919 if (!pages)
0920 return -EINVAL;
0921
0922 unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
0923 pr_info("rounding mmap pages size to %s (%lu pages)\n",
0924 buf, pages);
0925 }
0926
0927 if (pages > max)
0928 return -EINVAL;
0929
0930 return pages;
0931 }
0932
0933 int __evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
0934 {
0935 unsigned long max = UINT_MAX;
0936 long pages;
0937
0938 if (max > SIZE_MAX / page_size)
0939 max = SIZE_MAX / page_size;
0940
0941 pages = parse_pages_arg(str, 1, max);
0942 if (pages < 0) {
0943 pr_err("Invalid argument for --mmap_pages/-m\n");
0944 return -1;
0945 }
0946
0947 *mmap_pages = pages;
0948 return 0;
0949 }
0950
0951 int evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset __maybe_unused)
0952 {
0953 return __evlist__parse_mmap_pages(opt->value, str);
0954 }
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973 int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
0974 unsigned int auxtrace_pages,
0975 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
0976 int comp_level)
0977 {
0978
0979
0980
0981
0982
0983 struct mmap_params mp = {
0984 .nr_cblocks = nr_cblocks,
0985 .affinity = affinity,
0986 .flush = flush,
0987 .comp_level = comp_level
0988 };
0989 struct perf_evlist_mmap_ops ops = {
0990 .idx = perf_evlist__mmap_cb_idx,
0991 .get = perf_evlist__mmap_cb_get,
0992 .mmap = perf_evlist__mmap_cb_mmap,
0993 };
0994
0995 evlist->core.mmap_len = evlist__mmap_size(pages);
0996 pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
0997
0998 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
0999 auxtrace_pages, auxtrace_overwrite);
1000
1001 return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core);
1002 }
1003
1004 int evlist__mmap(struct evlist *evlist, unsigned int pages)
1005 {
1006 return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
1007 }
1008
1009 int evlist__create_maps(struct evlist *evlist, struct target *target)
1010 {
1011 bool all_threads = (target->per_thread && target->system_wide);
1012 struct perf_cpu_map *cpus;
1013 struct perf_thread_map *threads;
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033 threads = thread_map__new_str(target->pid, target->tid, target->uid,
1034 all_threads);
1035
1036 if (!threads)
1037 return -1;
1038
1039 if (target__uses_dummy_map(target))
1040 cpus = perf_cpu_map__dummy_new();
1041 else
1042 cpus = perf_cpu_map__new(target->cpu_list);
1043
1044 if (!cpus)
1045 goto out_delete_threads;
1046
1047 evlist->core.has_user_cpus = !!target->cpu_list && !target->hybrid;
1048
1049 perf_evlist__set_maps(&evlist->core, cpus, threads);
1050
1051
1052 perf_cpu_map__put(cpus);
1053 perf_thread_map__put(threads);
1054
1055 return 0;
1056
1057 out_delete_threads:
1058 perf_thread_map__put(threads);
1059 return -1;
1060 }
1061
1062 int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
1063 {
1064 struct evsel *evsel;
1065 int err = 0;
1066
1067 evlist__for_each_entry(evlist, evsel) {
1068 if (evsel->filter == NULL)
1069 continue;
1070
1071
1072
1073
1074
1075 err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
1076 if (err) {
1077 *err_evsel = evsel;
1078 break;
1079 }
1080 }
1081
1082 return err;
1083 }
1084
1085 int evlist__set_tp_filter(struct evlist *evlist, const char *filter)
1086 {
1087 struct evsel *evsel;
1088 int err = 0;
1089
1090 if (filter == NULL)
1091 return -1;
1092
1093 evlist__for_each_entry(evlist, evsel) {
1094 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1095 continue;
1096
1097 err = evsel__set_filter(evsel, filter);
1098 if (err)
1099 break;
1100 }
1101
1102 return err;
1103 }
1104
1105 int evlist__append_tp_filter(struct evlist *evlist, const char *filter)
1106 {
1107 struct evsel *evsel;
1108 int err = 0;
1109
1110 if (filter == NULL)
1111 return -1;
1112
1113 evlist__for_each_entry(evlist, evsel) {
1114 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1115 continue;
1116
1117 err = evsel__append_tp_filter(evsel, filter);
1118 if (err)
1119 break;
1120 }
1121
1122 return err;
1123 }
1124
1125 char *asprintf__tp_filter_pids(size_t npids, pid_t *pids)
1126 {
1127 char *filter;
1128 size_t i;
1129
1130 for (i = 0; i < npids; ++i) {
1131 if (i == 0) {
1132 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1133 return NULL;
1134 } else {
1135 char *tmp;
1136
1137 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1138 goto out_free;
1139
1140 free(filter);
1141 filter = tmp;
1142 }
1143 }
1144
1145 return filter;
1146 out_free:
1147 free(filter);
1148 return NULL;
1149 }
1150
1151 int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1152 {
1153 char *filter = asprintf__tp_filter_pids(npids, pids);
1154 int ret = evlist__set_tp_filter(evlist, filter);
1155
1156 free(filter);
1157 return ret;
1158 }
1159
1160 int evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
1161 {
1162 return evlist__set_tp_filter_pids(evlist, 1, &pid);
1163 }
1164
1165 int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1166 {
1167 char *filter = asprintf__tp_filter_pids(npids, pids);
1168 int ret = evlist__append_tp_filter(evlist, filter);
1169
1170 free(filter);
1171 return ret;
1172 }
1173
1174 int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
1175 {
1176 return evlist__append_tp_filter_pids(evlist, 1, &pid);
1177 }
1178
1179 bool evlist__valid_sample_type(struct evlist *evlist)
1180 {
1181 struct evsel *pos;
1182
1183 if (evlist->core.nr_entries == 1)
1184 return true;
1185
1186 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1187 return false;
1188
1189 evlist__for_each_entry(evlist, pos) {
1190 if (pos->id_pos != evlist->id_pos ||
1191 pos->is_pos != evlist->is_pos)
1192 return false;
1193 }
1194
1195 return true;
1196 }
1197
1198 u64 __evlist__combined_sample_type(struct evlist *evlist)
1199 {
1200 struct evsel *evsel;
1201
1202 if (evlist->combined_sample_type)
1203 return evlist->combined_sample_type;
1204
1205 evlist__for_each_entry(evlist, evsel)
1206 evlist->combined_sample_type |= evsel->core.attr.sample_type;
1207
1208 return evlist->combined_sample_type;
1209 }
1210
1211 u64 evlist__combined_sample_type(struct evlist *evlist)
1212 {
1213 evlist->combined_sample_type = 0;
1214 return __evlist__combined_sample_type(evlist);
1215 }
1216
1217 u64 evlist__combined_branch_type(struct evlist *evlist)
1218 {
1219 struct evsel *evsel;
1220 u64 branch_type = 0;
1221
1222 evlist__for_each_entry(evlist, evsel)
1223 branch_type |= evsel->core.attr.branch_sample_type;
1224 return branch_type;
1225 }
1226
1227 bool evlist__valid_read_format(struct evlist *evlist)
1228 {
1229 struct evsel *first = evlist__first(evlist), *pos = first;
1230 u64 read_format = first->core.attr.read_format;
1231 u64 sample_type = first->core.attr.sample_type;
1232
1233 evlist__for_each_entry(evlist, pos) {
1234 if (read_format != pos->core.attr.read_format) {
1235 pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n",
1236 read_format, (u64)pos->core.attr.read_format);
1237 }
1238 }
1239
1240
1241 if ((sample_type & PERF_SAMPLE_READ) &&
1242 !(read_format & PERF_FORMAT_ID)) {
1243 return false;
1244 }
1245
1246 return true;
1247 }
1248
1249 u16 evlist__id_hdr_size(struct evlist *evlist)
1250 {
1251 struct evsel *first = evlist__first(evlist);
1252
1253 return first->core.attr.sample_id_all ? evsel__id_hdr_size(first) : 0;
1254 }
1255
1256 bool evlist__valid_sample_id_all(struct evlist *evlist)
1257 {
1258 struct evsel *first = evlist__first(evlist), *pos = first;
1259
1260 evlist__for_each_entry_continue(evlist, pos) {
1261 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
1262 return false;
1263 }
1264
1265 return true;
1266 }
1267
1268 bool evlist__sample_id_all(struct evlist *evlist)
1269 {
1270 struct evsel *first = evlist__first(evlist);
1271 return first->core.attr.sample_id_all;
1272 }
1273
1274 void evlist__set_selected(struct evlist *evlist, struct evsel *evsel)
1275 {
1276 evlist->selected = evsel;
1277 }
1278
1279 void evlist__close(struct evlist *evlist)
1280 {
1281 struct evsel *evsel;
1282 struct evlist_cpu_iterator evlist_cpu_itr;
1283 struct affinity affinity;
1284
1285
1286
1287
1288
1289 if (!evlist->core.user_requested_cpus ||
1290 cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
1291 evlist__for_each_entry_reverse(evlist, evsel)
1292 evsel__close(evsel);
1293 return;
1294 }
1295
1296 if (affinity__setup(&affinity) < 0)
1297 return;
1298
1299 evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) {
1300 perf_evsel__close_cpu(&evlist_cpu_itr.evsel->core,
1301 evlist_cpu_itr.cpu_map_idx);
1302 }
1303
1304 affinity__cleanup(&affinity);
1305 evlist__for_each_entry_reverse(evlist, evsel) {
1306 perf_evsel__free_fd(&evsel->core);
1307 perf_evsel__free_id(&evsel->core);
1308 }
1309 perf_evlist__reset_id_hash(&evlist->core);
1310 }
1311
1312 static int evlist__create_syswide_maps(struct evlist *evlist)
1313 {
1314 struct perf_cpu_map *cpus;
1315 struct perf_thread_map *threads;
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326 cpus = perf_cpu_map__new(NULL);
1327 if (!cpus)
1328 goto out;
1329
1330 threads = perf_thread_map__new_dummy();
1331 if (!threads)
1332 goto out_put;
1333
1334 perf_evlist__set_maps(&evlist->core, cpus, threads);
1335
1336 perf_thread_map__put(threads);
1337 out_put:
1338 perf_cpu_map__put(cpus);
1339 out:
1340 return -ENOMEM;
1341 }
1342
1343 int evlist__open(struct evlist *evlist)
1344 {
1345 struct evsel *evsel;
1346 int err;
1347
1348
1349
1350
1351
1352 if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) {
1353 err = evlist__create_syswide_maps(evlist);
1354 if (err < 0)
1355 goto out_err;
1356 }
1357
1358 evlist__update_id_pos(evlist);
1359
1360 evlist__for_each_entry(evlist, evsel) {
1361 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
1362 if (err < 0)
1363 goto out_err;
1364 }
1365
1366 return 0;
1367 out_err:
1368 evlist__close(evlist);
1369 errno = -err;
1370 return err;
1371 }
1372
1373 int evlist__prepare_workload(struct evlist *evlist, struct target *target, const char *argv[],
1374 bool pipe_output, void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1375 {
1376 int child_ready_pipe[2], go_pipe[2];
1377 char bf;
1378
1379 if (pipe(child_ready_pipe) < 0) {
1380 perror("failed to create 'ready' pipe");
1381 return -1;
1382 }
1383
1384 if (pipe(go_pipe) < 0) {
1385 perror("failed to create 'go' pipe");
1386 goto out_close_ready_pipe;
1387 }
1388
1389 evlist->workload.pid = fork();
1390 if (evlist->workload.pid < 0) {
1391 perror("failed to fork");
1392 goto out_close_pipes;
1393 }
1394
1395 if (!evlist->workload.pid) {
1396 int ret;
1397
1398 if (pipe_output)
1399 dup2(2, 1);
1400
1401 signal(SIGTERM, SIG_DFL);
1402
1403 close(child_ready_pipe[0]);
1404 close(go_pipe[1]);
1405 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1406
1407
1408
1409
1410
1411
1412 prctl(PR_SET_NAME, "perf-exec");
1413
1414
1415
1416
1417 close(child_ready_pipe[1]);
1418
1419
1420
1421
1422 ret = read(go_pipe[0], &bf, 1);
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433 if (ret != 1) {
1434 if (ret == -1)
1435 perror("unable to read pipe");
1436 exit(ret);
1437 }
1438
1439 execvp(argv[0], (char **)argv);
1440
1441 if (exec_error) {
1442 union sigval val;
1443
1444 val.sival_int = errno;
1445 if (sigqueue(getppid(), SIGUSR1, val))
1446 perror(argv[0]);
1447 } else
1448 perror(argv[0]);
1449 exit(-1);
1450 }
1451
1452 if (exec_error) {
1453 struct sigaction act = {
1454 .sa_flags = SA_SIGINFO,
1455 .sa_sigaction = exec_error,
1456 };
1457 sigaction(SIGUSR1, &act, NULL);
1458 }
1459
1460 if (target__none(target)) {
1461 if (evlist->core.threads == NULL) {
1462 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1463 __func__, __LINE__);
1464 goto out_close_pipes;
1465 }
1466 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
1467 }
1468
1469 close(child_ready_pipe[1]);
1470 close(go_pipe[0]);
1471
1472
1473
1474 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1475 perror("unable to read pipe");
1476 goto out_close_pipes;
1477 }
1478
1479 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1480 evlist->workload.cork_fd = go_pipe[1];
1481 close(child_ready_pipe[0]);
1482 return 0;
1483
1484 out_close_pipes:
1485 close(go_pipe[0]);
1486 close(go_pipe[1]);
1487 out_close_ready_pipe:
1488 close(child_ready_pipe[0]);
1489 close(child_ready_pipe[1]);
1490 return -1;
1491 }
1492
1493 int evlist__start_workload(struct evlist *evlist)
1494 {
1495 if (evlist->workload.cork_fd > 0) {
1496 char bf = 0;
1497 int ret;
1498
1499
1500
1501 ret = write(evlist->workload.cork_fd, &bf, 1);
1502 if (ret < 0)
1503 perror("unable to write to pipe");
1504
1505 close(evlist->workload.cork_fd);
1506 return ret;
1507 }
1508
1509 return 0;
1510 }
1511
1512 int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
1513 {
1514 struct evsel *evsel = evlist__event2evsel(evlist, event);
1515 int ret;
1516
1517 if (!evsel)
1518 return -EFAULT;
1519 ret = evsel__parse_sample(evsel, event, sample);
1520 if (ret)
1521 return ret;
1522 if (perf_guest && sample->id) {
1523 struct perf_sample_id *sid = evlist__id2sid(evlist, sample->id);
1524
1525 if (sid) {
1526 sample->machine_pid = sid->machine_pid;
1527 sample->vcpu = sid->vcpu.cpu;
1528 }
1529 }
1530 return 0;
1531 }
1532
1533 int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp)
1534 {
1535 struct evsel *evsel = evlist__event2evsel(evlist, event);
1536
1537 if (!evsel)
1538 return -EFAULT;
1539 return evsel__parse_sample_timestamp(evsel, event, timestamp);
1540 }
1541
1542 int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size)
1543 {
1544 int printed, value;
1545 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1546
1547 switch (err) {
1548 case EACCES:
1549 case EPERM:
1550 printed = scnprintf(buf, size,
1551 "Error:\t%s.\n"
1552 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1553
1554 value = perf_event_paranoid();
1555
1556 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1557
1558 if (value >= 2) {
1559 printed += scnprintf(buf + printed, size - printed,
1560 "For your workloads it needs to be <= 1\nHint:\t");
1561 }
1562 printed += scnprintf(buf + printed, size - printed,
1563 "For system wide tracing it needs to be set to -1.\n");
1564
1565 printed += scnprintf(buf + printed, size - printed,
1566 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1567 "Hint:\tThe current value is %d.", value);
1568 break;
1569 case EINVAL: {
1570 struct evsel *first = evlist__first(evlist);
1571 int max_freq;
1572
1573 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1574 goto out_default;
1575
1576 if (first->core.attr.sample_freq < (u64)max_freq)
1577 goto out_default;
1578
1579 printed = scnprintf(buf, size,
1580 "Error:\t%s.\n"
1581 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1582 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1583 emsg, max_freq, first->core.attr.sample_freq);
1584 break;
1585 }
1586 default:
1587 out_default:
1588 scnprintf(buf, size, "%s", emsg);
1589 break;
1590 }
1591
1592 return 0;
1593 }
1594
1595 int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
1596 {
1597 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1598 int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
1599
1600 switch (err) {
1601 case EPERM:
1602 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1603 printed += scnprintf(buf + printed, size - printed,
1604 "Error:\t%s.\n"
1605 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1606 "Hint:\tTried using %zd kB.\n",
1607 emsg, pages_max_per_user, pages_attempted);
1608
1609 if (pages_attempted >= pages_max_per_user) {
1610 printed += scnprintf(buf + printed, size - printed,
1611 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1612 pages_max_per_user + pages_attempted);
1613 }
1614
1615 printed += scnprintf(buf + printed, size - printed,
1616 "Hint:\tTry using a smaller -m/--mmap-pages value.");
1617 break;
1618 default:
1619 scnprintf(buf, size, "%s", emsg);
1620 break;
1621 }
1622
1623 return 0;
1624 }
1625
1626 void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel)
1627 {
1628 struct evsel *evsel, *n;
1629 LIST_HEAD(move);
1630
1631 if (move_evsel == evlist__first(evlist))
1632 return;
1633
1634 evlist__for_each_entry_safe(evlist, n, evsel) {
1635 if (evsel__leader(evsel) == evsel__leader(move_evsel))
1636 list_move_tail(&evsel->core.node, &move);
1637 }
1638
1639 list_splice(&move, &evlist->core.entries);
1640 }
1641
1642 struct evsel *evlist__get_tracking_event(struct evlist *evlist)
1643 {
1644 struct evsel *evsel;
1645
1646 evlist__for_each_entry(evlist, evsel) {
1647 if (evsel->tracking)
1648 return evsel;
1649 }
1650
1651 return evlist__first(evlist);
1652 }
1653
1654 void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel)
1655 {
1656 struct evsel *evsel;
1657
1658 if (tracking_evsel->tracking)
1659 return;
1660
1661 evlist__for_each_entry(evlist, evsel) {
1662 if (evsel != tracking_evsel)
1663 evsel->tracking = false;
1664 }
1665
1666 tracking_evsel->tracking = true;
1667 }
1668
1669 struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str)
1670 {
1671 struct evsel *evsel;
1672
1673 evlist__for_each_entry(evlist, evsel) {
1674 if (!evsel->name)
1675 continue;
1676 if (strcmp(str, evsel->name) == 0)
1677 return evsel;
1678 }
1679
1680 return NULL;
1681 }
1682
1683 void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state)
1684 {
1685 enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1686 enum action {
1687 NONE,
1688 PAUSE,
1689 RESUME,
1690 } action = NONE;
1691
1692 if (!evlist->overwrite_mmap)
1693 return;
1694
1695 switch (old_state) {
1696 case BKW_MMAP_NOTREADY: {
1697 if (state != BKW_MMAP_RUNNING)
1698 goto state_err;
1699 break;
1700 }
1701 case BKW_MMAP_RUNNING: {
1702 if (state != BKW_MMAP_DATA_PENDING)
1703 goto state_err;
1704 action = PAUSE;
1705 break;
1706 }
1707 case BKW_MMAP_DATA_PENDING: {
1708 if (state != BKW_MMAP_EMPTY)
1709 goto state_err;
1710 break;
1711 }
1712 case BKW_MMAP_EMPTY: {
1713 if (state != BKW_MMAP_RUNNING)
1714 goto state_err;
1715 action = RESUME;
1716 break;
1717 }
1718 default:
1719 WARN_ONCE(1, "Shouldn't get there\n");
1720 }
1721
1722 evlist->bkw_mmap_state = state;
1723
1724 switch (action) {
1725 case PAUSE:
1726 evlist__pause(evlist);
1727 break;
1728 case RESUME:
1729 evlist__resume(evlist);
1730 break;
1731 case NONE:
1732 default:
1733 break;
1734 }
1735
1736 state_err:
1737 return;
1738 }
1739
1740 bool evlist__exclude_kernel(struct evlist *evlist)
1741 {
1742 struct evsel *evsel;
1743
1744 evlist__for_each_entry(evlist, evsel) {
1745 if (!evsel->core.attr.exclude_kernel)
1746 return false;
1747 }
1748
1749 return true;
1750 }
1751
1752
1753
1754
1755
1756
1757 void evlist__force_leader(struct evlist *evlist)
1758 {
1759 if (!evlist->core.nr_groups) {
1760 struct evsel *leader = evlist__first(evlist);
1761
1762 evlist__set_leader(evlist);
1763 leader->forced_leader = true;
1764 }
1765 }
1766
1767 struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *evsel, bool close)
1768 {
1769 struct evsel *c2, *leader;
1770 bool is_open = true;
1771
1772 leader = evsel__leader(evsel);
1773
1774 pr_debug("Weak group for %s/%d failed\n",
1775 leader->name, leader->core.nr_members);
1776
1777
1778
1779
1780
1781 evlist__for_each_entry(evsel_list, c2) {
1782 if (c2 == evsel)
1783 is_open = false;
1784 if (evsel__has_leader(c2, leader)) {
1785 if (is_open && close)
1786 perf_evsel__close(&c2->core);
1787
1788
1789
1790
1791
1792 evsel__remove_from_group(c2, leader);
1793
1794
1795
1796
1797
1798 c2->reset_group = true;
1799 }
1800 }
1801
1802 if (leader->core.nr_members == 1)
1803 leader->core.nr_members = 0;
1804 return leader;
1805 }
1806
1807 static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
1808 {
1809 char *s, *p;
1810 int ret = 0, fd;
1811
1812 if (strncmp(str, "fifo:", 5))
1813 return -EINVAL;
1814
1815 str += 5;
1816 if (!*str || *str == ',')
1817 return -EINVAL;
1818
1819 s = strdup(str);
1820 if (!s)
1821 return -ENOMEM;
1822
1823 p = strchr(s, ',');
1824 if (p)
1825 *p = '\0';
1826
1827
1828
1829
1830
1831 fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC);
1832 if (fd < 0) {
1833 pr_err("Failed to open '%s'\n", s);
1834 ret = -errno;
1835 goto out_free;
1836 }
1837 *ctl_fd = fd;
1838 *ctl_fd_close = true;
1839
1840 if (p && *++p) {
1841
1842 fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC);
1843 if (fd < 0) {
1844 pr_err("Failed to open '%s'\n", p);
1845 ret = -errno;
1846 goto out_free;
1847 }
1848 *ctl_fd_ack = fd;
1849 }
1850
1851 out_free:
1852 free(s);
1853 return ret;
1854 }
1855
1856 int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
1857 {
1858 char *comma = NULL, *endptr = NULL;
1859
1860 *ctl_fd_close = false;
1861
1862 if (strncmp(str, "fd:", 3))
1863 return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close);
1864
1865 *ctl_fd = strtoul(&str[3], &endptr, 0);
1866 if (endptr == &str[3])
1867 return -EINVAL;
1868
1869 comma = strchr(str, ',');
1870 if (comma) {
1871 if (endptr != comma)
1872 return -EINVAL;
1873
1874 *ctl_fd_ack = strtoul(comma + 1, &endptr, 0);
1875 if (endptr == comma + 1 || *endptr != '\0')
1876 return -EINVAL;
1877 }
1878
1879 return 0;
1880 }
1881
1882 void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close)
1883 {
1884 if (*ctl_fd_close) {
1885 *ctl_fd_close = false;
1886 close(ctl_fd);
1887 if (ctl_fd_ack >= 0)
1888 close(ctl_fd_ack);
1889 }
1890 }
1891
1892 int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack)
1893 {
1894 if (fd == -1) {
1895 pr_debug("Control descriptor is not initialized\n");
1896 return 0;
1897 }
1898
1899 evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
1900 fdarray_flag__nonfilterable);
1901 if (evlist->ctl_fd.pos < 0) {
1902 evlist->ctl_fd.pos = -1;
1903 pr_err("Failed to add ctl fd entry: %m\n");
1904 return -1;
1905 }
1906
1907 evlist->ctl_fd.fd = fd;
1908 evlist->ctl_fd.ack = ack;
1909
1910 return 0;
1911 }
1912
1913 bool evlist__ctlfd_initialized(struct evlist *evlist)
1914 {
1915 return evlist->ctl_fd.pos >= 0;
1916 }
1917
1918 int evlist__finalize_ctlfd(struct evlist *evlist)
1919 {
1920 struct pollfd *entries = evlist->core.pollfd.entries;
1921
1922 if (!evlist__ctlfd_initialized(evlist))
1923 return 0;
1924
1925 entries[evlist->ctl_fd.pos].fd = -1;
1926 entries[evlist->ctl_fd.pos].events = 0;
1927 entries[evlist->ctl_fd.pos].revents = 0;
1928
1929 evlist->ctl_fd.pos = -1;
1930 evlist->ctl_fd.ack = -1;
1931 evlist->ctl_fd.fd = -1;
1932
1933 return 0;
1934 }
1935
1936 static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd,
1937 char *cmd_data, size_t data_size)
1938 {
1939 int err;
1940 char c;
1941 size_t bytes_read = 0;
1942
1943 *cmd = EVLIST_CTL_CMD_UNSUPPORTED;
1944 memset(cmd_data, 0, data_size);
1945 data_size--;
1946
1947 do {
1948 err = read(evlist->ctl_fd.fd, &c, 1);
1949 if (err > 0) {
1950 if (c == '\n' || c == '\0')
1951 break;
1952 cmd_data[bytes_read++] = c;
1953 if (bytes_read == data_size)
1954 break;
1955 continue;
1956 } else if (err == -1) {
1957 if (errno == EINTR)
1958 continue;
1959 if (errno == EAGAIN || errno == EWOULDBLOCK)
1960 err = 0;
1961 else
1962 pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd);
1963 }
1964 break;
1965 } while (1);
1966
1967 pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data,
1968 bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0");
1969
1970 if (bytes_read > 0) {
1971 if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG,
1972 (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) {
1973 *cmd = EVLIST_CTL_CMD_ENABLE;
1974 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG,
1975 (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) {
1976 *cmd = EVLIST_CTL_CMD_DISABLE;
1977 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG,
1978 (sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) {
1979 *cmd = EVLIST_CTL_CMD_SNAPSHOT;
1980 pr_debug("is snapshot\n");
1981 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_EVLIST_TAG,
1982 (sizeof(EVLIST_CTL_CMD_EVLIST_TAG)-1))) {
1983 *cmd = EVLIST_CTL_CMD_EVLIST;
1984 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_STOP_TAG,
1985 (sizeof(EVLIST_CTL_CMD_STOP_TAG)-1))) {
1986 *cmd = EVLIST_CTL_CMD_STOP;
1987 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_PING_TAG,
1988 (sizeof(EVLIST_CTL_CMD_PING_TAG)-1))) {
1989 *cmd = EVLIST_CTL_CMD_PING;
1990 }
1991 }
1992
1993 return bytes_read ? (int)bytes_read : err;
1994 }
1995
1996 int evlist__ctlfd_ack(struct evlist *evlist)
1997 {
1998 int err;
1999
2000 if (evlist->ctl_fd.ack == -1)
2001 return 0;
2002
2003 err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG,
2004 sizeof(EVLIST_CTL_CMD_ACK_TAG));
2005 if (err == -1)
2006 pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack);
2007
2008 return err;
2009 }
2010
2011 static int get_cmd_arg(char *cmd_data, size_t cmd_size, char **arg)
2012 {
2013 char *data = cmd_data + cmd_size;
2014
2015
2016 if (!*data)
2017 return 0;
2018
2019
2020 if (*data == ' ') {
2021 *arg = data + 1;
2022 return 1;
2023 }
2024
2025
2026 return -1;
2027 }
2028
2029 static int evlist__ctlfd_enable(struct evlist *evlist, char *cmd_data, bool enable)
2030 {
2031 struct evsel *evsel;
2032 char *name;
2033 int err;
2034
2035 err = get_cmd_arg(cmd_data,
2036 enable ? sizeof(EVLIST_CTL_CMD_ENABLE_TAG) - 1 :
2037 sizeof(EVLIST_CTL_CMD_DISABLE_TAG) - 1,
2038 &name);
2039 if (err < 0) {
2040 pr_info("failed: wrong command\n");
2041 return -1;
2042 }
2043
2044 if (err) {
2045 evsel = evlist__find_evsel_by_str(evlist, name);
2046 if (evsel) {
2047 if (enable)
2048 evlist__enable_evsel(evlist, name);
2049 else
2050 evlist__disable_evsel(evlist, name);
2051 pr_info("Event %s %s\n", evsel->name,
2052 enable ? "enabled" : "disabled");
2053 } else {
2054 pr_info("failed: can't find '%s' event\n", name);
2055 }
2056 } else {
2057 if (enable) {
2058 evlist__enable(evlist);
2059 pr_info(EVLIST_ENABLED_MSG);
2060 } else {
2061 evlist__disable(evlist);
2062 pr_info(EVLIST_DISABLED_MSG);
2063 }
2064 }
2065
2066 return 0;
2067 }
2068
2069 static int evlist__ctlfd_list(struct evlist *evlist, char *cmd_data)
2070 {
2071 struct perf_attr_details details = { .verbose = false, };
2072 struct evsel *evsel;
2073 char *arg;
2074 int err;
2075
2076 err = get_cmd_arg(cmd_data,
2077 sizeof(EVLIST_CTL_CMD_EVLIST_TAG) - 1,
2078 &arg);
2079 if (err < 0) {
2080 pr_info("failed: wrong command\n");
2081 return -1;
2082 }
2083
2084 if (err) {
2085 if (!strcmp(arg, "-v")) {
2086 details.verbose = true;
2087 } else if (!strcmp(arg, "-g")) {
2088 details.event_group = true;
2089 } else if (!strcmp(arg, "-F")) {
2090 details.freq = true;
2091 } else {
2092 pr_info("failed: wrong command\n");
2093 return -1;
2094 }
2095 }
2096
2097 evlist__for_each_entry(evlist, evsel)
2098 evsel__fprintf(evsel, &details, stderr);
2099
2100 return 0;
2101 }
2102
2103 int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd)
2104 {
2105 int err = 0;
2106 char cmd_data[EVLIST_CTL_CMD_MAX_LEN];
2107 int ctlfd_pos = evlist->ctl_fd.pos;
2108 struct pollfd *entries = evlist->core.pollfd.entries;
2109
2110 if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents)
2111 return 0;
2112
2113 if (entries[ctlfd_pos].revents & POLLIN) {
2114 err = evlist__ctlfd_recv(evlist, cmd, cmd_data,
2115 EVLIST_CTL_CMD_MAX_LEN);
2116 if (err > 0) {
2117 switch (*cmd) {
2118 case EVLIST_CTL_CMD_ENABLE:
2119 case EVLIST_CTL_CMD_DISABLE:
2120 err = evlist__ctlfd_enable(evlist, cmd_data,
2121 *cmd == EVLIST_CTL_CMD_ENABLE);
2122 break;
2123 case EVLIST_CTL_CMD_EVLIST:
2124 err = evlist__ctlfd_list(evlist, cmd_data);
2125 break;
2126 case EVLIST_CTL_CMD_SNAPSHOT:
2127 case EVLIST_CTL_CMD_STOP:
2128 case EVLIST_CTL_CMD_PING:
2129 break;
2130 case EVLIST_CTL_CMD_ACK:
2131 case EVLIST_CTL_CMD_UNSUPPORTED:
2132 default:
2133 pr_debug("ctlfd: unsupported %d\n", *cmd);
2134 break;
2135 }
2136 if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED ||
2137 *cmd == EVLIST_CTL_CMD_SNAPSHOT))
2138 evlist__ctlfd_ack(evlist);
2139 }
2140 }
2141
2142 if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR))
2143 evlist__finalize_ctlfd(evlist);
2144 else
2145 entries[ctlfd_pos].revents = 0;
2146
2147 return err;
2148 }
2149
2150 int evlist__ctlfd_update(struct evlist *evlist, struct pollfd *update)
2151 {
2152 int ctlfd_pos = evlist->ctl_fd.pos;
2153 struct pollfd *entries = evlist->core.pollfd.entries;
2154
2155 if (!evlist__ctlfd_initialized(evlist))
2156 return 0;
2157
2158 if (entries[ctlfd_pos].fd != update->fd ||
2159 entries[ctlfd_pos].events != update->events)
2160 return -1;
2161
2162 entries[ctlfd_pos].revents = update->revents;
2163 return 0;
2164 }
2165
2166 struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
2167 {
2168 struct evsel *evsel;
2169
2170 evlist__for_each_entry(evlist, evsel) {
2171 if (evsel->core.idx == idx)
2172 return evsel;
2173 }
2174 return NULL;
2175 }
2176
2177 int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf)
2178 {
2179 struct evsel *evsel;
2180 int printed = 0;
2181
2182 evlist__for_each_entry(evlist, evsel) {
2183 if (evsel__is_dummy_event(evsel))
2184 continue;
2185 if (size > (strlen(evsel__name(evsel)) + (printed ? 2 : 1))) {
2186 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "," : "", evsel__name(evsel));
2187 } else {
2188 printed += scnprintf(bf + printed, size - printed, "%s...", printed ? "," : "");
2189 break;
2190 }
2191 }
2192
2193 return printed;
2194 }
2195
2196 void evlist__check_mem_load_aux(struct evlist *evlist)
2197 {
2198 struct evsel *leader, *evsel, *pos;
2199
2200
2201
2202
2203
2204
2205
2206
2207 evlist__for_each_entry(evlist, evsel) {
2208 leader = evsel__leader(evsel);
2209 if (leader == evsel)
2210 continue;
2211
2212 if (leader->name && strstr(leader->name, "mem-loads-aux")) {
2213 for_each_group_evsel(pos, leader) {
2214 evsel__set_leader(pos, pos);
2215 pos->core.nr_members = 0;
2216 }
2217 }
2218 }
2219 }