0001
0002
0003
0004
0005
0006
0007 #include <inttypes.h>
0008 #include <sys/types.h>
0009 #include <sys/mman.h>
0010 #include <stdbool.h>
0011 #include <string.h>
0012 #include <limits.h>
0013 #include <errno.h>
0014
0015 #include <linux/kernel.h>
0016 #include <linux/perf_event.h>
0017 #include <linux/types.h>
0018 #include <linux/bitops.h>
0019 #include <linux/log2.h>
0020 #include <linux/string.h>
0021 #include <linux/time64.h>
0022
0023 #include <sys/param.h>
0024 #include <stdlib.h>
0025 #include <stdio.h>
0026 #include <linux/list.h>
0027 #include <linux/zalloc.h>
0028
0029 #include "evlist.h"
0030 #include "dso.h"
0031 #include "map.h"
0032 #include "pmu.h"
0033 #include "evsel.h"
0034 #include "evsel_config.h"
0035 #include "symbol.h"
0036 #include "util/perf_api_probe.h"
0037 #include "util/synthetic-events.h"
0038 #include "thread_map.h"
0039 #include "asm/bug.h"
0040 #include "auxtrace.h"
0041
0042 #include <linux/hash.h>
0043
0044 #include "event.h"
0045 #include "record.h"
0046 #include "session.h"
0047 #include "debug.h"
0048 #include <subcmd/parse-options.h>
0049
0050 #include "cs-etm.h"
0051 #include "intel-pt.h"
0052 #include "intel-bts.h"
0053 #include "arm-spe.h"
0054 #include "s390-cpumsf.h"
0055 #include "util/mmap.h"
0056
0057 #include <linux/ctype.h>
0058 #include "symbol/kallsyms.h"
0059 #include <internal/lib.h>
0060
0061
0062
0063
0064
0065 static int evlist__regroup(struct evlist *evlist, struct evsel *leader, struct evsel *last)
0066 {
0067 struct evsel *evsel;
0068 bool grp;
0069
0070 if (!evsel__is_group_leader(leader))
0071 return -EINVAL;
0072
0073 grp = false;
0074 evlist__for_each_entry(evlist, evsel) {
0075 if (grp) {
0076 if (!(evsel__leader(evsel) == leader ||
0077 (evsel__leader(evsel) == evsel &&
0078 evsel->core.nr_members <= 1)))
0079 return -EINVAL;
0080 } else if (evsel == leader) {
0081 grp = true;
0082 }
0083 if (evsel == last)
0084 break;
0085 }
0086
0087 grp = false;
0088 evlist__for_each_entry(evlist, evsel) {
0089 if (grp) {
0090 if (!evsel__has_leader(evsel, leader)) {
0091 evsel__set_leader(evsel, leader);
0092 if (leader->core.nr_members < 1)
0093 leader->core.nr_members = 1;
0094 leader->core.nr_members += 1;
0095 }
0096 } else if (evsel == leader) {
0097 grp = true;
0098 }
0099 if (evsel == last)
0100 break;
0101 }
0102
0103 return 0;
0104 }
0105
0106 static bool auxtrace__dont_decode(struct perf_session *session)
0107 {
0108 return !session->itrace_synth_opts ||
0109 session->itrace_synth_opts->dont_decode;
0110 }
0111
0112 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
0113 struct auxtrace_mmap_params *mp,
0114 void *userpg, int fd)
0115 {
0116 struct perf_event_mmap_page *pc = userpg;
0117
0118 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
0119
0120 mm->userpg = userpg;
0121 mm->mask = mp->mask;
0122 mm->len = mp->len;
0123 mm->prev = 0;
0124 mm->idx = mp->idx;
0125 mm->tid = mp->tid;
0126 mm->cpu = mp->cpu.cpu;
0127
0128 if (!mp->len || !mp->mmap_needed) {
0129 mm->base = NULL;
0130 return 0;
0131 }
0132
0133 pc->aux_offset = mp->offset;
0134 pc->aux_size = mp->len;
0135
0136 mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
0137 if (mm->base == MAP_FAILED) {
0138 pr_debug2("failed to mmap AUX area\n");
0139 mm->base = NULL;
0140 return -1;
0141 }
0142
0143 return 0;
0144 }
0145
0146 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
0147 {
0148 if (mm->base) {
0149 munmap(mm->base, mm->len);
0150 mm->base = NULL;
0151 }
0152 }
0153
0154 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
0155 off_t auxtrace_offset,
0156 unsigned int auxtrace_pages,
0157 bool auxtrace_overwrite)
0158 {
0159 if (auxtrace_pages) {
0160 mp->offset = auxtrace_offset;
0161 mp->len = auxtrace_pages * (size_t)page_size;
0162 mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
0163 mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
0164 pr_debug2("AUX area mmap length %zu\n", mp->len);
0165 } else {
0166 mp->len = 0;
0167 }
0168 }
0169
0170 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
0171 struct evlist *evlist,
0172 struct evsel *evsel, int idx)
0173 {
0174 bool per_cpu = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
0175
0176 mp->mmap_needed = evsel->needs_auxtrace_mmap;
0177
0178 if (!mp->mmap_needed)
0179 return;
0180
0181 mp->idx = idx;
0182
0183 if (per_cpu) {
0184 mp->cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
0185 if (evlist->core.threads)
0186 mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
0187 else
0188 mp->tid = -1;
0189 } else {
0190 mp->cpu.cpu = -1;
0191 mp->tid = perf_thread_map__pid(evlist->core.threads, idx);
0192 }
0193 }
0194
0195 #define AUXTRACE_INIT_NR_QUEUES 32
0196
0197 static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
0198 {
0199 struct auxtrace_queue *queue_array;
0200 unsigned int max_nr_queues, i;
0201
0202 max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
0203 if (nr_queues > max_nr_queues)
0204 return NULL;
0205
0206 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
0207 if (!queue_array)
0208 return NULL;
0209
0210 for (i = 0; i < nr_queues; i++) {
0211 INIT_LIST_HEAD(&queue_array[i].head);
0212 queue_array[i].priv = NULL;
0213 }
0214
0215 return queue_array;
0216 }
0217
0218 int auxtrace_queues__init(struct auxtrace_queues *queues)
0219 {
0220 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
0221 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
0222 if (!queues->queue_array)
0223 return -ENOMEM;
0224 return 0;
0225 }
0226
0227 static int auxtrace_queues__grow(struct auxtrace_queues *queues,
0228 unsigned int new_nr_queues)
0229 {
0230 unsigned int nr_queues = queues->nr_queues;
0231 struct auxtrace_queue *queue_array;
0232 unsigned int i;
0233
0234 if (!nr_queues)
0235 nr_queues = AUXTRACE_INIT_NR_QUEUES;
0236
0237 while (nr_queues && nr_queues < new_nr_queues)
0238 nr_queues <<= 1;
0239
0240 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
0241 return -EINVAL;
0242
0243 queue_array = auxtrace_alloc_queue_array(nr_queues);
0244 if (!queue_array)
0245 return -ENOMEM;
0246
0247 for (i = 0; i < queues->nr_queues; i++) {
0248 list_splice_tail(&queues->queue_array[i].head,
0249 &queue_array[i].head);
0250 queue_array[i].tid = queues->queue_array[i].tid;
0251 queue_array[i].cpu = queues->queue_array[i].cpu;
0252 queue_array[i].set = queues->queue_array[i].set;
0253 queue_array[i].priv = queues->queue_array[i].priv;
0254 }
0255
0256 queues->nr_queues = nr_queues;
0257 queues->queue_array = queue_array;
0258
0259 return 0;
0260 }
0261
0262 static void *auxtrace_copy_data(u64 size, struct perf_session *session)
0263 {
0264 int fd = perf_data__fd(session->data);
0265 void *p;
0266 ssize_t ret;
0267
0268 if (size > SSIZE_MAX)
0269 return NULL;
0270
0271 p = malloc(size);
0272 if (!p)
0273 return NULL;
0274
0275 ret = readn(fd, p, size);
0276 if (ret != (ssize_t)size) {
0277 free(p);
0278 return NULL;
0279 }
0280
0281 return p;
0282 }
0283
0284 static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
0285 unsigned int idx,
0286 struct auxtrace_buffer *buffer)
0287 {
0288 struct auxtrace_queue *queue;
0289 int err;
0290
0291 if (idx >= queues->nr_queues) {
0292 err = auxtrace_queues__grow(queues, idx + 1);
0293 if (err)
0294 return err;
0295 }
0296
0297 queue = &queues->queue_array[idx];
0298
0299 if (!queue->set) {
0300 queue->set = true;
0301 queue->tid = buffer->tid;
0302 queue->cpu = buffer->cpu.cpu;
0303 }
0304
0305 buffer->buffer_nr = queues->next_buffer_nr++;
0306
0307 list_add_tail(&buffer->list, &queue->head);
0308
0309 queues->new_data = true;
0310 queues->populated = true;
0311
0312 return 0;
0313 }
0314
0315
0316 #define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
0317
0318 static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
0319 unsigned int idx,
0320 struct auxtrace_buffer *buffer)
0321 {
0322 u64 sz = buffer->size;
0323 bool consecutive = false;
0324 struct auxtrace_buffer *b;
0325 int err;
0326
0327 while (sz > BUFFER_LIMIT_FOR_32_BIT) {
0328 b = memdup(buffer, sizeof(struct auxtrace_buffer));
0329 if (!b)
0330 return -ENOMEM;
0331 b->size = BUFFER_LIMIT_FOR_32_BIT;
0332 b->consecutive = consecutive;
0333 err = auxtrace_queues__queue_buffer(queues, idx, b);
0334 if (err) {
0335 auxtrace_buffer__free(b);
0336 return err;
0337 }
0338 buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
0339 sz -= BUFFER_LIMIT_FOR_32_BIT;
0340 consecutive = true;
0341 }
0342
0343 buffer->size = sz;
0344 buffer->consecutive = consecutive;
0345
0346 return 0;
0347 }
0348
0349 static bool filter_cpu(struct perf_session *session, struct perf_cpu cpu)
0350 {
0351 unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
0352
0353 return cpu_bitmap && cpu.cpu != -1 && !test_bit(cpu.cpu, cpu_bitmap);
0354 }
0355
0356 static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
0357 struct perf_session *session,
0358 unsigned int idx,
0359 struct auxtrace_buffer *buffer,
0360 struct auxtrace_buffer **buffer_ptr)
0361 {
0362 int err = -ENOMEM;
0363
0364 if (filter_cpu(session, buffer->cpu))
0365 return 0;
0366
0367 buffer = memdup(buffer, sizeof(*buffer));
0368 if (!buffer)
0369 return -ENOMEM;
0370
0371 if (session->one_mmap) {
0372 buffer->data = buffer->data_offset - session->one_mmap_offset +
0373 session->one_mmap_addr;
0374 } else if (perf_data__is_pipe(session->data)) {
0375 buffer->data = auxtrace_copy_data(buffer->size, session);
0376 if (!buffer->data)
0377 goto out_free;
0378 buffer->data_needs_freeing = true;
0379 } else if (BITS_PER_LONG == 32 &&
0380 buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
0381 err = auxtrace_queues__split_buffer(queues, idx, buffer);
0382 if (err)
0383 goto out_free;
0384 }
0385
0386 err = auxtrace_queues__queue_buffer(queues, idx, buffer);
0387 if (err)
0388 goto out_free;
0389
0390
0391 if (buffer_ptr)
0392 *buffer_ptr = buffer;
0393
0394 return 0;
0395
0396 out_free:
0397 auxtrace_buffer__free(buffer);
0398 return err;
0399 }
0400
0401 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
0402 struct perf_session *session,
0403 union perf_event *event, off_t data_offset,
0404 struct auxtrace_buffer **buffer_ptr)
0405 {
0406 struct auxtrace_buffer buffer = {
0407 .pid = -1,
0408 .tid = event->auxtrace.tid,
0409 .cpu = { event->auxtrace.cpu },
0410 .data_offset = data_offset,
0411 .offset = event->auxtrace.offset,
0412 .reference = event->auxtrace.reference,
0413 .size = event->auxtrace.size,
0414 };
0415 unsigned int idx = event->auxtrace.idx;
0416
0417 return auxtrace_queues__add_buffer(queues, session, idx, &buffer,
0418 buffer_ptr);
0419 }
0420
0421 static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
0422 struct perf_session *session,
0423 off_t file_offset, size_t sz)
0424 {
0425 union perf_event *event;
0426 int err;
0427 char buf[PERF_SAMPLE_MAX_SIZE];
0428
0429 err = perf_session__peek_event(session, file_offset, buf,
0430 PERF_SAMPLE_MAX_SIZE, &event, NULL);
0431 if (err)
0432 return err;
0433
0434 if (event->header.type == PERF_RECORD_AUXTRACE) {
0435 if (event->header.size < sizeof(struct perf_record_auxtrace) ||
0436 event->header.size != sz) {
0437 err = -EINVAL;
0438 goto out;
0439 }
0440 file_offset += event->header.size;
0441 err = auxtrace_queues__add_event(queues, session, event,
0442 file_offset, NULL);
0443 }
0444 out:
0445 return err;
0446 }
0447
0448 void auxtrace_queues__free(struct auxtrace_queues *queues)
0449 {
0450 unsigned int i;
0451
0452 for (i = 0; i < queues->nr_queues; i++) {
0453 while (!list_empty(&queues->queue_array[i].head)) {
0454 struct auxtrace_buffer *buffer;
0455
0456 buffer = list_entry(queues->queue_array[i].head.next,
0457 struct auxtrace_buffer, list);
0458 list_del_init(&buffer->list);
0459 auxtrace_buffer__free(buffer);
0460 }
0461 }
0462
0463 zfree(&queues->queue_array);
0464 queues->nr_queues = 0;
0465 }
0466
0467 static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
0468 unsigned int pos, unsigned int queue_nr,
0469 u64 ordinal)
0470 {
0471 unsigned int parent;
0472
0473 while (pos) {
0474 parent = (pos - 1) >> 1;
0475 if (heap_array[parent].ordinal <= ordinal)
0476 break;
0477 heap_array[pos] = heap_array[parent];
0478 pos = parent;
0479 }
0480 heap_array[pos].queue_nr = queue_nr;
0481 heap_array[pos].ordinal = ordinal;
0482 }
0483
0484 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
0485 u64 ordinal)
0486 {
0487 struct auxtrace_heap_item *heap_array;
0488
0489 if (queue_nr >= heap->heap_sz) {
0490 unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
0491
0492 while (heap_sz <= queue_nr)
0493 heap_sz <<= 1;
0494 heap_array = realloc(heap->heap_array,
0495 heap_sz * sizeof(struct auxtrace_heap_item));
0496 if (!heap_array)
0497 return -ENOMEM;
0498 heap->heap_array = heap_array;
0499 heap->heap_sz = heap_sz;
0500 }
0501
0502 auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
0503
0504 return 0;
0505 }
0506
0507 void auxtrace_heap__free(struct auxtrace_heap *heap)
0508 {
0509 zfree(&heap->heap_array);
0510 heap->heap_cnt = 0;
0511 heap->heap_sz = 0;
0512 }
0513
0514 void auxtrace_heap__pop(struct auxtrace_heap *heap)
0515 {
0516 unsigned int pos, last, heap_cnt = heap->heap_cnt;
0517 struct auxtrace_heap_item *heap_array;
0518
0519 if (!heap_cnt)
0520 return;
0521
0522 heap->heap_cnt -= 1;
0523
0524 heap_array = heap->heap_array;
0525
0526 pos = 0;
0527 while (1) {
0528 unsigned int left, right;
0529
0530 left = (pos << 1) + 1;
0531 if (left >= heap_cnt)
0532 break;
0533 right = left + 1;
0534 if (right >= heap_cnt) {
0535 heap_array[pos] = heap_array[left];
0536 return;
0537 }
0538 if (heap_array[left].ordinal < heap_array[right].ordinal) {
0539 heap_array[pos] = heap_array[left];
0540 pos = left;
0541 } else {
0542 heap_array[pos] = heap_array[right];
0543 pos = right;
0544 }
0545 }
0546
0547 last = heap_cnt - 1;
0548 auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
0549 heap_array[last].ordinal);
0550 }
0551
0552 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
0553 struct evlist *evlist)
0554 {
0555 if (itr)
0556 return itr->info_priv_size(itr, evlist);
0557 return 0;
0558 }
0559
0560 static int auxtrace_not_supported(void)
0561 {
0562 pr_err("AUX area tracing is not supported on this architecture\n");
0563 return -EINVAL;
0564 }
0565
0566 int auxtrace_record__info_fill(struct auxtrace_record *itr,
0567 struct perf_session *session,
0568 struct perf_record_auxtrace_info *auxtrace_info,
0569 size_t priv_size)
0570 {
0571 if (itr)
0572 return itr->info_fill(itr, session, auxtrace_info, priv_size);
0573 return auxtrace_not_supported();
0574 }
0575
0576 void auxtrace_record__free(struct auxtrace_record *itr)
0577 {
0578 if (itr)
0579 itr->free(itr);
0580 }
0581
0582 int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
0583 {
0584 if (itr && itr->snapshot_start)
0585 return itr->snapshot_start(itr);
0586 return 0;
0587 }
0588
0589 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit)
0590 {
0591 if (!on_exit && itr && itr->snapshot_finish)
0592 return itr->snapshot_finish(itr);
0593 return 0;
0594 }
0595
0596 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
0597 struct auxtrace_mmap *mm,
0598 unsigned char *data, u64 *head, u64 *old)
0599 {
0600 if (itr && itr->find_snapshot)
0601 return itr->find_snapshot(itr, idx, mm, data, head, old);
0602 return 0;
0603 }
0604
0605 int auxtrace_record__options(struct auxtrace_record *itr,
0606 struct evlist *evlist,
0607 struct record_opts *opts)
0608 {
0609 if (itr) {
0610 itr->evlist = evlist;
0611 return itr->recording_options(itr, evlist, opts);
0612 }
0613 return 0;
0614 }
0615
0616 u64 auxtrace_record__reference(struct auxtrace_record *itr)
0617 {
0618 if (itr)
0619 return itr->reference(itr);
0620 return 0;
0621 }
0622
0623 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
0624 struct record_opts *opts, const char *str)
0625 {
0626 if (!str)
0627 return 0;
0628
0629
0630 switch (*str) {
0631 case 'e':
0632 opts->auxtrace_snapshot_on_exit = true;
0633 str++;
0634 break;
0635 default:
0636 break;
0637 }
0638
0639 if (itr && itr->parse_snapshot_options)
0640 return itr->parse_snapshot_options(itr, opts, str);
0641
0642 pr_err("No AUX area tracing to snapshot\n");
0643 return -EINVAL;
0644 }
0645
0646 static int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
0647 {
0648 bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
0649
0650 if (per_cpu_mmaps) {
0651 struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
0652 int cpu_map_idx = perf_cpu_map__idx(evsel->core.cpus, evlist_cpu);
0653
0654 if (cpu_map_idx == -1)
0655 return -EINVAL;
0656 return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx);
0657 }
0658
0659 return perf_evsel__enable_thread(&evsel->core, idx);
0660 }
0661
0662 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx)
0663 {
0664 struct evsel *evsel;
0665
0666 if (!itr->evlist || !itr->pmu)
0667 return -EINVAL;
0668
0669 evlist__for_each_entry(itr->evlist, evsel) {
0670 if (evsel->core.attr.type == itr->pmu->type) {
0671 if (evsel->disabled)
0672 return 0;
0673 return evlist__enable_event_idx(itr->evlist, evsel, idx);
0674 }
0675 }
0676 return -EINVAL;
0677 }
0678
0679
0680
0681
0682
0683
0684 #define MAX_AUX_SAMPLE_SIZE (60 * 1024)
0685
0686
0687 #define DEFAULT_AUX_SAMPLE_SIZE (4 * 1024)
0688
0689 static int auxtrace_validate_aux_sample_size(struct evlist *evlist,
0690 struct record_opts *opts)
0691 {
0692 struct evsel *evsel;
0693 bool has_aux_leader = false;
0694 u32 sz;
0695
0696 evlist__for_each_entry(evlist, evsel) {
0697 sz = evsel->core.attr.aux_sample_size;
0698 if (evsel__is_group_leader(evsel)) {
0699 has_aux_leader = evsel__is_aux_event(evsel);
0700 if (sz) {
0701 if (has_aux_leader)
0702 pr_err("Cannot add AUX area sampling to an AUX area event\n");
0703 else
0704 pr_err("Cannot add AUX area sampling to a group leader\n");
0705 return -EINVAL;
0706 }
0707 }
0708 if (sz > MAX_AUX_SAMPLE_SIZE) {
0709 pr_err("AUX area sample size %u too big, max. %d\n",
0710 sz, MAX_AUX_SAMPLE_SIZE);
0711 return -EINVAL;
0712 }
0713 if (sz) {
0714 if (!has_aux_leader) {
0715 pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n");
0716 return -EINVAL;
0717 }
0718 evsel__set_sample_bit(evsel, AUX);
0719 opts->auxtrace_sample_mode = true;
0720 } else {
0721 evsel__reset_sample_bit(evsel, AUX);
0722 }
0723 }
0724
0725 if (!opts->auxtrace_sample_mode) {
0726 pr_err("AUX area sampling requires an AUX area event group leader plus other events to which to add samples\n");
0727 return -EINVAL;
0728 }
0729
0730 if (!perf_can_aux_sample()) {
0731 pr_err("AUX area sampling is not supported by kernel\n");
0732 return -EINVAL;
0733 }
0734
0735 return 0;
0736 }
0737
0738 int auxtrace_parse_sample_options(struct auxtrace_record *itr,
0739 struct evlist *evlist,
0740 struct record_opts *opts, const char *str)
0741 {
0742 struct evsel_config_term *term;
0743 struct evsel *aux_evsel;
0744 bool has_aux_sample_size = false;
0745 bool has_aux_leader = false;
0746 struct evsel *evsel;
0747 char *endptr;
0748 unsigned long sz;
0749
0750 if (!str)
0751 goto no_opt;
0752
0753 if (!itr) {
0754 pr_err("No AUX area event to sample\n");
0755 return -EINVAL;
0756 }
0757
0758 sz = strtoul(str, &endptr, 0);
0759 if (*endptr || sz > UINT_MAX) {
0760 pr_err("Bad AUX area sampling option: '%s'\n", str);
0761 return -EINVAL;
0762 }
0763
0764 if (!sz)
0765 sz = itr->default_aux_sample_size;
0766
0767 if (!sz)
0768 sz = DEFAULT_AUX_SAMPLE_SIZE;
0769
0770
0771 evlist__for_each_entry(evlist, evsel) {
0772 if (evsel__is_group_leader(evsel)) {
0773 has_aux_leader = evsel__is_aux_event(evsel);
0774 } else if (has_aux_leader) {
0775 evsel->core.attr.aux_sample_size = sz;
0776 }
0777 }
0778 no_opt:
0779 aux_evsel = NULL;
0780
0781 evlist__for_each_entry(evlist, evsel) {
0782 if (evsel__is_aux_event(evsel))
0783 aux_evsel = evsel;
0784 term = evsel__get_config_term(evsel, AUX_SAMPLE_SIZE);
0785 if (term) {
0786 has_aux_sample_size = true;
0787 evsel->core.attr.aux_sample_size = term->val.aux_sample_size;
0788
0789 if (aux_evsel && evsel->core.attr.aux_sample_size)
0790 evlist__regroup(evlist, aux_evsel, evsel);
0791 }
0792 }
0793
0794 if (!str && !has_aux_sample_size)
0795 return 0;
0796
0797 if (!itr) {
0798 pr_err("No AUX area event to sample\n");
0799 return -EINVAL;
0800 }
0801
0802 return auxtrace_validate_aux_sample_size(evlist, opts);
0803 }
0804
0805 void auxtrace_regroup_aux_output(struct evlist *evlist)
0806 {
0807 struct evsel *evsel, *aux_evsel = NULL;
0808 struct evsel_config_term *term;
0809
0810 evlist__for_each_entry(evlist, evsel) {
0811 if (evsel__is_aux_event(evsel))
0812 aux_evsel = evsel;
0813 term = evsel__get_config_term(evsel, AUX_OUTPUT);
0814
0815 if (term && aux_evsel)
0816 evlist__regroup(evlist, aux_evsel, evsel);
0817 }
0818 }
0819
0820 struct auxtrace_record *__weak
0821 auxtrace_record__init(struct evlist *evlist __maybe_unused, int *err)
0822 {
0823 *err = 0;
0824 return NULL;
0825 }
0826
0827 static int auxtrace_index__alloc(struct list_head *head)
0828 {
0829 struct auxtrace_index *auxtrace_index;
0830
0831 auxtrace_index = malloc(sizeof(struct auxtrace_index));
0832 if (!auxtrace_index)
0833 return -ENOMEM;
0834
0835 auxtrace_index->nr = 0;
0836 INIT_LIST_HEAD(&auxtrace_index->list);
0837
0838 list_add_tail(&auxtrace_index->list, head);
0839
0840 return 0;
0841 }
0842
0843 void auxtrace_index__free(struct list_head *head)
0844 {
0845 struct auxtrace_index *auxtrace_index, *n;
0846
0847 list_for_each_entry_safe(auxtrace_index, n, head, list) {
0848 list_del_init(&auxtrace_index->list);
0849 free(auxtrace_index);
0850 }
0851 }
0852
0853 static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
0854 {
0855 struct auxtrace_index *auxtrace_index;
0856 int err;
0857
0858 if (list_empty(head)) {
0859 err = auxtrace_index__alloc(head);
0860 if (err)
0861 return NULL;
0862 }
0863
0864 auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
0865
0866 if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
0867 err = auxtrace_index__alloc(head);
0868 if (err)
0869 return NULL;
0870 auxtrace_index = list_entry(head->prev, struct auxtrace_index,
0871 list);
0872 }
0873
0874 return auxtrace_index;
0875 }
0876
0877 int auxtrace_index__auxtrace_event(struct list_head *head,
0878 union perf_event *event, off_t file_offset)
0879 {
0880 struct auxtrace_index *auxtrace_index;
0881 size_t nr;
0882
0883 auxtrace_index = auxtrace_index__last(head);
0884 if (!auxtrace_index)
0885 return -ENOMEM;
0886
0887 nr = auxtrace_index->nr;
0888 auxtrace_index->entries[nr].file_offset = file_offset;
0889 auxtrace_index->entries[nr].sz = event->header.size;
0890 auxtrace_index->nr += 1;
0891
0892 return 0;
0893 }
0894
0895 static int auxtrace_index__do_write(int fd,
0896 struct auxtrace_index *auxtrace_index)
0897 {
0898 struct auxtrace_index_entry ent;
0899 size_t i;
0900
0901 for (i = 0; i < auxtrace_index->nr; i++) {
0902 ent.file_offset = auxtrace_index->entries[i].file_offset;
0903 ent.sz = auxtrace_index->entries[i].sz;
0904 if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
0905 return -errno;
0906 }
0907 return 0;
0908 }
0909
0910 int auxtrace_index__write(int fd, struct list_head *head)
0911 {
0912 struct auxtrace_index *auxtrace_index;
0913 u64 total = 0;
0914 int err;
0915
0916 list_for_each_entry(auxtrace_index, head, list)
0917 total += auxtrace_index->nr;
0918
0919 if (writen(fd, &total, sizeof(total)) != sizeof(total))
0920 return -errno;
0921
0922 list_for_each_entry(auxtrace_index, head, list) {
0923 err = auxtrace_index__do_write(fd, auxtrace_index);
0924 if (err)
0925 return err;
0926 }
0927
0928 return 0;
0929 }
0930
0931 static int auxtrace_index__process_entry(int fd, struct list_head *head,
0932 bool needs_swap)
0933 {
0934 struct auxtrace_index *auxtrace_index;
0935 struct auxtrace_index_entry ent;
0936 size_t nr;
0937
0938 if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
0939 return -1;
0940
0941 auxtrace_index = auxtrace_index__last(head);
0942 if (!auxtrace_index)
0943 return -1;
0944
0945 nr = auxtrace_index->nr;
0946 if (needs_swap) {
0947 auxtrace_index->entries[nr].file_offset =
0948 bswap_64(ent.file_offset);
0949 auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
0950 } else {
0951 auxtrace_index->entries[nr].file_offset = ent.file_offset;
0952 auxtrace_index->entries[nr].sz = ent.sz;
0953 }
0954
0955 auxtrace_index->nr = nr + 1;
0956
0957 return 0;
0958 }
0959
0960 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
0961 bool needs_swap)
0962 {
0963 struct list_head *head = &session->auxtrace_index;
0964 u64 nr;
0965
0966 if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
0967 return -1;
0968
0969 if (needs_swap)
0970 nr = bswap_64(nr);
0971
0972 if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
0973 return -1;
0974
0975 while (nr--) {
0976 int err;
0977
0978 err = auxtrace_index__process_entry(fd, head, needs_swap);
0979 if (err)
0980 return -1;
0981 }
0982
0983 return 0;
0984 }
0985
0986 static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
0987 struct perf_session *session,
0988 struct auxtrace_index_entry *ent)
0989 {
0990 return auxtrace_queues__add_indexed_event(queues, session,
0991 ent->file_offset, ent->sz);
0992 }
0993
0994 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
0995 struct perf_session *session)
0996 {
0997 struct auxtrace_index *auxtrace_index;
0998 struct auxtrace_index_entry *ent;
0999 size_t i;
1000 int err;
1001
1002 if (auxtrace__dont_decode(session))
1003 return 0;
1004
1005 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
1006 for (i = 0; i < auxtrace_index->nr; i++) {
1007 ent = &auxtrace_index->entries[i];
1008 err = auxtrace_queues__process_index_entry(queues,
1009 session,
1010 ent);
1011 if (err)
1012 return err;
1013 }
1014 }
1015 return 0;
1016 }
1017
1018 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
1019 struct auxtrace_buffer *buffer)
1020 {
1021 if (buffer) {
1022 if (list_is_last(&buffer->list, &queue->head))
1023 return NULL;
1024 return list_entry(buffer->list.next, struct auxtrace_buffer,
1025 list);
1026 } else {
1027 if (list_empty(&queue->head))
1028 return NULL;
1029 return list_entry(queue->head.next, struct auxtrace_buffer,
1030 list);
1031 }
1032 }
1033
1034 struct auxtrace_queue *auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
1035 struct perf_sample *sample,
1036 struct perf_session *session)
1037 {
1038 struct perf_sample_id *sid;
1039 unsigned int idx;
1040 u64 id;
1041
1042 id = sample->id;
1043 if (!id)
1044 return NULL;
1045
1046 sid = evlist__id2sid(session->evlist, id);
1047 if (!sid)
1048 return NULL;
1049
1050 idx = sid->idx;
1051
1052 if (idx >= queues->nr_queues)
1053 return NULL;
1054
1055 return &queues->queue_array[idx];
1056 }
1057
1058 int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
1059 struct perf_session *session,
1060 struct perf_sample *sample, u64 data_offset,
1061 u64 reference)
1062 {
1063 struct auxtrace_buffer buffer = {
1064 .pid = -1,
1065 .data_offset = data_offset,
1066 .reference = reference,
1067 .size = sample->aux_sample.size,
1068 };
1069 struct perf_sample_id *sid;
1070 u64 id = sample->id;
1071 unsigned int idx;
1072
1073 if (!id)
1074 return -EINVAL;
1075
1076 sid = evlist__id2sid(session->evlist, id);
1077 if (!sid)
1078 return -ENOENT;
1079
1080 idx = sid->idx;
1081 buffer.tid = sid->tid;
1082 buffer.cpu = sid->cpu;
1083
1084 return auxtrace_queues__add_buffer(queues, session, idx, &buffer, NULL);
1085 }
1086
1087 struct queue_data {
1088 bool samples;
1089 bool events;
1090 };
1091
1092 static int auxtrace_queue_data_cb(struct perf_session *session,
1093 union perf_event *event, u64 offset,
1094 void *data)
1095 {
1096 struct queue_data *qd = data;
1097 struct perf_sample sample;
1098 int err;
1099
1100 if (qd->events && event->header.type == PERF_RECORD_AUXTRACE) {
1101 if (event->header.size < sizeof(struct perf_record_auxtrace))
1102 return -EINVAL;
1103 offset += event->header.size;
1104 return session->auxtrace->queue_data(session, NULL, event,
1105 offset);
1106 }
1107
1108 if (!qd->samples || event->header.type != PERF_RECORD_SAMPLE)
1109 return 0;
1110
1111 err = evlist__parse_sample(session->evlist, event, &sample);
1112 if (err)
1113 return err;
1114
1115 if (!sample.aux_sample.size)
1116 return 0;
1117
1118 offset += sample.aux_sample.data - (void *)event;
1119
1120 return session->auxtrace->queue_data(session, &sample, NULL, offset);
1121 }
1122
1123 int auxtrace_queue_data(struct perf_session *session, bool samples, bool events)
1124 {
1125 struct queue_data qd = {
1126 .samples = samples,
1127 .events = events,
1128 };
1129
1130 if (auxtrace__dont_decode(session))
1131 return 0;
1132
1133 if (!session->auxtrace || !session->auxtrace->queue_data)
1134 return -EINVAL;
1135
1136 return perf_session__peek_events(session, session->header.data_offset,
1137 session->header.data_size,
1138 auxtrace_queue_data_cb, &qd);
1139 }
1140
1141 void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw)
1142 {
1143 int prot = rw ? PROT_READ | PROT_WRITE : PROT_READ;
1144 size_t adj = buffer->data_offset & (page_size - 1);
1145 size_t size = buffer->size + adj;
1146 off_t file_offset = buffer->data_offset - adj;
1147 void *addr;
1148
1149 if (buffer->data)
1150 return buffer->data;
1151
1152 addr = mmap(NULL, size, prot, MAP_SHARED, fd, file_offset);
1153 if (addr == MAP_FAILED)
1154 return NULL;
1155
1156 buffer->mmap_addr = addr;
1157 buffer->mmap_size = size;
1158
1159 buffer->data = addr + adj;
1160
1161 return buffer->data;
1162 }
1163
1164 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
1165 {
1166 if (!buffer->data || !buffer->mmap_addr)
1167 return;
1168 munmap(buffer->mmap_addr, buffer->mmap_size);
1169 buffer->mmap_addr = NULL;
1170 buffer->mmap_size = 0;
1171 buffer->data = NULL;
1172 buffer->use_data = NULL;
1173 }
1174
1175 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
1176 {
1177 auxtrace_buffer__put_data(buffer);
1178 if (buffer->data_needs_freeing) {
1179 buffer->data_needs_freeing = false;
1180 zfree(&buffer->data);
1181 buffer->use_data = NULL;
1182 buffer->size = 0;
1183 }
1184 }
1185
1186 void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
1187 {
1188 auxtrace_buffer__drop_data(buffer);
1189 free(buffer);
1190 }
1191
1192 void auxtrace_synth_guest_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
1193 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
1194 const char *msg, u64 timestamp,
1195 pid_t machine_pid, int vcpu)
1196 {
1197 size_t size;
1198
1199 memset(auxtrace_error, 0, sizeof(struct perf_record_auxtrace_error));
1200
1201 auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
1202 auxtrace_error->type = type;
1203 auxtrace_error->code = code;
1204 auxtrace_error->cpu = cpu;
1205 auxtrace_error->pid = pid;
1206 auxtrace_error->tid = tid;
1207 auxtrace_error->fmt = 1;
1208 auxtrace_error->ip = ip;
1209 auxtrace_error->time = timestamp;
1210 strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
1211 if (machine_pid) {
1212 auxtrace_error->fmt = 2;
1213 auxtrace_error->machine_pid = machine_pid;
1214 auxtrace_error->vcpu = vcpu;
1215 size = sizeof(*auxtrace_error);
1216 } else {
1217 size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
1218 strlen(auxtrace_error->msg) + 1;
1219 }
1220 auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
1221 }
1222
1223 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
1224 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
1225 const char *msg, u64 timestamp)
1226 {
1227 auxtrace_synth_guest_error(auxtrace_error, type, code, cpu, pid, tid,
1228 ip, msg, timestamp, 0, -1);
1229 }
1230
1231 int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
1232 struct perf_tool *tool,
1233 struct perf_session *session,
1234 perf_event__handler_t process)
1235 {
1236 union perf_event *ev;
1237 size_t priv_size;
1238 int err;
1239
1240 pr_debug2("Synthesizing auxtrace information\n");
1241 priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
1242 ev = zalloc(sizeof(struct perf_record_auxtrace_info) + priv_size);
1243 if (!ev)
1244 return -ENOMEM;
1245
1246 ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
1247 ev->auxtrace_info.header.size = sizeof(struct perf_record_auxtrace_info) +
1248 priv_size;
1249 err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
1250 priv_size);
1251 if (err)
1252 goto out_free;
1253
1254 err = process(tool, ev, NULL, NULL);
1255 out_free:
1256 free(ev);
1257 return err;
1258 }
1259
1260 static void unleader_evsel(struct evlist *evlist, struct evsel *leader)
1261 {
1262 struct evsel *new_leader = NULL;
1263 struct evsel *evsel;
1264
1265
1266 evlist__for_each_entry(evlist, evsel) {
1267 if (!evsel__has_leader(evsel, leader) || evsel == leader)
1268 continue;
1269 if (!new_leader)
1270 new_leader = evsel;
1271 evsel__set_leader(evsel, new_leader);
1272 }
1273
1274
1275 if (new_leader) {
1276 zfree(&new_leader->group_name);
1277 new_leader->group_name = leader->group_name;
1278 leader->group_name = NULL;
1279
1280 new_leader->core.nr_members = leader->core.nr_members - 1;
1281 leader->core.nr_members = 1;
1282 }
1283 }
1284
1285 static void unleader_auxtrace(struct perf_session *session)
1286 {
1287 struct evsel *evsel;
1288
1289 evlist__for_each_entry(session->evlist, evsel) {
1290 if (auxtrace__evsel_is_auxtrace(session, evsel) &&
1291 evsel__is_group_leader(evsel)) {
1292 unleader_evsel(session->evlist, evsel);
1293 }
1294 }
1295 }
1296
1297 int perf_event__process_auxtrace_info(struct perf_session *session,
1298 union perf_event *event)
1299 {
1300 enum auxtrace_type type = event->auxtrace_info.type;
1301 int err;
1302
1303 if (dump_trace)
1304 fprintf(stdout, " type: %u\n", type);
1305
1306 switch (type) {
1307 case PERF_AUXTRACE_INTEL_PT:
1308 err = intel_pt_process_auxtrace_info(event, session);
1309 break;
1310 case PERF_AUXTRACE_INTEL_BTS:
1311 err = intel_bts_process_auxtrace_info(event, session);
1312 break;
1313 case PERF_AUXTRACE_ARM_SPE:
1314 err = arm_spe_process_auxtrace_info(event, session);
1315 break;
1316 case PERF_AUXTRACE_CS_ETM:
1317 err = cs_etm__process_auxtrace_info(event, session);
1318 break;
1319 case PERF_AUXTRACE_S390_CPUMSF:
1320 err = s390_cpumsf_process_auxtrace_info(event, session);
1321 break;
1322 case PERF_AUXTRACE_UNKNOWN:
1323 default:
1324 return -EINVAL;
1325 }
1326
1327 if (err)
1328 return err;
1329
1330 unleader_auxtrace(session);
1331
1332 return 0;
1333 }
1334
1335 s64 perf_event__process_auxtrace(struct perf_session *session,
1336 union perf_event *event)
1337 {
1338 s64 err;
1339
1340 if (dump_trace)
1341 fprintf(stdout, " size: %#"PRI_lx64" offset: %#"PRI_lx64" ref: %#"PRI_lx64" idx: %u tid: %d cpu: %d\n",
1342 event->auxtrace.size, event->auxtrace.offset,
1343 event->auxtrace.reference, event->auxtrace.idx,
1344 event->auxtrace.tid, event->auxtrace.cpu);
1345
1346 if (auxtrace__dont_decode(session))
1347 return event->auxtrace.size;
1348
1349 if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
1350 return -EINVAL;
1351
1352 err = session->auxtrace->process_auxtrace_event(session, event, session->tool);
1353 if (err < 0)
1354 return err;
1355
1356 return event->auxtrace.size;
1357 }
1358
1359 #define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS
1360 #define PERF_ITRACE_DEFAULT_PERIOD 100000
1361 #define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
1362 #define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
1363 #define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64
1364 #define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024
1365
1366 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
1367 bool no_sample)
1368 {
1369 synth_opts->branches = true;
1370 synth_opts->transactions = true;
1371 synth_opts->ptwrites = true;
1372 synth_opts->pwr_events = true;
1373 synth_opts->other_events = true;
1374 synth_opts->intr_events = true;
1375 synth_opts->errors = true;
1376 synth_opts->flc = true;
1377 synth_opts->llc = true;
1378 synth_opts->tlb = true;
1379 synth_opts->mem = true;
1380 synth_opts->remote_access = true;
1381
1382 if (no_sample) {
1383 synth_opts->period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS;
1384 synth_opts->period = 1;
1385 synth_opts->calls = true;
1386 } else {
1387 synth_opts->instructions = true;
1388 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1389 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1390 }
1391 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1392 synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1393 synth_opts->initial_skip = 0;
1394 }
1395
1396 static int get_flag(const char **ptr, unsigned int *flags)
1397 {
1398 while (1) {
1399 char c = **ptr;
1400
1401 if (c >= 'a' && c <= 'z') {
1402 *flags |= 1 << (c - 'a');
1403 ++*ptr;
1404 return 0;
1405 } else if (c == ' ') {
1406 ++*ptr;
1407 continue;
1408 } else {
1409 return -1;
1410 }
1411 }
1412 }
1413
1414 static int get_flags(const char **ptr, unsigned int *plus_flags, unsigned int *minus_flags)
1415 {
1416 while (1) {
1417 switch (**ptr) {
1418 case '+':
1419 ++*ptr;
1420 if (get_flag(ptr, plus_flags))
1421 return -1;
1422 break;
1423 case '-':
1424 ++*ptr;
1425 if (get_flag(ptr, minus_flags))
1426 return -1;
1427 break;
1428 case ' ':
1429 ++*ptr;
1430 break;
1431 default:
1432 return 0;
1433 }
1434 }
1435 }
1436
1437
1438
1439
1440
1441
1442 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
1443 const char *str, int unset)
1444 {
1445 const char *p;
1446 char *endptr;
1447 bool period_type_set = false;
1448 bool period_set = false;
1449
1450 synth_opts->set = true;
1451
1452 if (unset) {
1453 synth_opts->dont_decode = true;
1454 return 0;
1455 }
1456
1457 if (!str) {
1458 itrace_synth_opts__set_default(synth_opts,
1459 synth_opts->default_no_sample);
1460 return 0;
1461 }
1462
1463 for (p = str; *p;) {
1464 switch (*p++) {
1465 case 'i':
1466 synth_opts->instructions = true;
1467 while (*p == ' ' || *p == ',')
1468 p += 1;
1469 if (isdigit(*p)) {
1470 synth_opts->period = strtoull(p, &endptr, 10);
1471 period_set = true;
1472 p = endptr;
1473 while (*p == ' ' || *p == ',')
1474 p += 1;
1475 switch (*p++) {
1476 case 'i':
1477 synth_opts->period_type =
1478 PERF_ITRACE_PERIOD_INSTRUCTIONS;
1479 period_type_set = true;
1480 break;
1481 case 't':
1482 synth_opts->period_type =
1483 PERF_ITRACE_PERIOD_TICKS;
1484 period_type_set = true;
1485 break;
1486 case 'm':
1487 synth_opts->period *= 1000;
1488
1489 case 'u':
1490 synth_opts->period *= 1000;
1491
1492 case 'n':
1493 if (*p++ != 's')
1494 goto out_err;
1495 synth_opts->period_type =
1496 PERF_ITRACE_PERIOD_NANOSECS;
1497 period_type_set = true;
1498 break;
1499 case '\0':
1500 goto out;
1501 default:
1502 goto out_err;
1503 }
1504 }
1505 break;
1506 case 'b':
1507 synth_opts->branches = true;
1508 break;
1509 case 'x':
1510 synth_opts->transactions = true;
1511 break;
1512 case 'w':
1513 synth_opts->ptwrites = true;
1514 break;
1515 case 'p':
1516 synth_opts->pwr_events = true;
1517 break;
1518 case 'o':
1519 synth_opts->other_events = true;
1520 break;
1521 case 'I':
1522 synth_opts->intr_events = true;
1523 break;
1524 case 'e':
1525 synth_opts->errors = true;
1526 if (get_flags(&p, &synth_opts->error_plus_flags,
1527 &synth_opts->error_minus_flags))
1528 goto out_err;
1529 break;
1530 case 'd':
1531 synth_opts->log = true;
1532 if (get_flags(&p, &synth_opts->log_plus_flags,
1533 &synth_opts->log_minus_flags))
1534 goto out_err;
1535 break;
1536 case 'c':
1537 synth_opts->branches = true;
1538 synth_opts->calls = true;
1539 break;
1540 case 'r':
1541 synth_opts->branches = true;
1542 synth_opts->returns = true;
1543 break;
1544 case 'G':
1545 case 'g':
1546 if (p[-1] == 'G')
1547 synth_opts->add_callchain = true;
1548 else
1549 synth_opts->callchain = true;
1550 synth_opts->callchain_sz =
1551 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1552 while (*p == ' ' || *p == ',')
1553 p += 1;
1554 if (isdigit(*p)) {
1555 unsigned int val;
1556
1557 val = strtoul(p, &endptr, 10);
1558 p = endptr;
1559 if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
1560 goto out_err;
1561 synth_opts->callchain_sz = val;
1562 }
1563 break;
1564 case 'L':
1565 case 'l':
1566 if (p[-1] == 'L')
1567 synth_opts->add_last_branch = true;
1568 else
1569 synth_opts->last_branch = true;
1570 synth_opts->last_branch_sz =
1571 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1572 while (*p == ' ' || *p == ',')
1573 p += 1;
1574 if (isdigit(*p)) {
1575 unsigned int val;
1576
1577 val = strtoul(p, &endptr, 10);
1578 p = endptr;
1579 if (!val ||
1580 val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
1581 goto out_err;
1582 synth_opts->last_branch_sz = val;
1583 }
1584 break;
1585 case 's':
1586 synth_opts->initial_skip = strtoul(p, &endptr, 10);
1587 if (p == endptr)
1588 goto out_err;
1589 p = endptr;
1590 break;
1591 case 'f':
1592 synth_opts->flc = true;
1593 break;
1594 case 'm':
1595 synth_opts->llc = true;
1596 break;
1597 case 't':
1598 synth_opts->tlb = true;
1599 break;
1600 case 'a':
1601 synth_opts->remote_access = true;
1602 break;
1603 case 'M':
1604 synth_opts->mem = true;
1605 break;
1606 case 'q':
1607 synth_opts->quick += 1;
1608 break;
1609 case 'A':
1610 synth_opts->approx_ipc = true;
1611 break;
1612 case 'Z':
1613 synth_opts->timeless_decoding = true;
1614 break;
1615 case ' ':
1616 case ',':
1617 break;
1618 default:
1619 goto out_err;
1620 }
1621 }
1622 out:
1623 if (synth_opts->instructions) {
1624 if (!period_type_set)
1625 synth_opts->period_type =
1626 PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1627 if (!period_set)
1628 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1629 }
1630
1631 return 0;
1632
1633 out_err:
1634 pr_err("Bad Instruction Tracing options '%s'\n", str);
1635 return -EINVAL;
1636 }
1637
1638 int itrace_parse_synth_opts(const struct option *opt, const char *str, int unset)
1639 {
1640 return itrace_do_parse_synth_opts(opt->value, str, unset);
1641 }
1642
1643 static const char * const auxtrace_error_type_name[] = {
1644 [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
1645 };
1646
1647 static const char *auxtrace_error_name(int type)
1648 {
1649 const char *error_type_name = NULL;
1650
1651 if (type < PERF_AUXTRACE_ERROR_MAX)
1652 error_type_name = auxtrace_error_type_name[type];
1653 if (!error_type_name)
1654 error_type_name = "unknown AUX";
1655 return error_type_name;
1656 }
1657
1658 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
1659 {
1660 struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1661 unsigned long long nsecs = e->time;
1662 const char *msg = e->msg;
1663 int ret;
1664
1665 ret = fprintf(fp, " %s error type %u",
1666 auxtrace_error_name(e->type), e->type);
1667
1668 if (e->fmt && nsecs) {
1669 unsigned long secs = nsecs / NSEC_PER_SEC;
1670
1671 nsecs -= secs * NSEC_PER_SEC;
1672 ret += fprintf(fp, " time %lu.%09llu", secs, nsecs);
1673 } else {
1674 ret += fprintf(fp, " time 0");
1675 }
1676
1677 if (!e->fmt)
1678 msg = (const char *)&e->time;
1679
1680 if (e->fmt >= 2 && e->machine_pid)
1681 ret += fprintf(fp, " machine_pid %d vcpu %d", e->machine_pid, e->vcpu);
1682
1683 ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRI_lx64" code %u: %s\n",
1684 e->cpu, e->pid, e->tid, e->ip, e->code, msg);
1685 return ret;
1686 }
1687
1688 void perf_session__auxtrace_error_inc(struct perf_session *session,
1689 union perf_event *event)
1690 {
1691 struct perf_record_auxtrace_error *e = &event->auxtrace_error;
1692
1693 if (e->type < PERF_AUXTRACE_ERROR_MAX)
1694 session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
1695 }
1696
1697 void events_stats__auxtrace_error_warn(const struct events_stats *stats)
1698 {
1699 int i;
1700
1701 for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
1702 if (!stats->nr_auxtrace_errors[i])
1703 continue;
1704 ui__warning("%u %s errors\n",
1705 stats->nr_auxtrace_errors[i],
1706 auxtrace_error_name(i));
1707 }
1708 }
1709
1710 int perf_event__process_auxtrace_error(struct perf_session *session,
1711 union perf_event *event)
1712 {
1713 if (auxtrace__dont_decode(session))
1714 return 0;
1715
1716 perf_event__fprintf_auxtrace_error(event, stdout);
1717 return 0;
1718 }
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763 u64 __weak compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
1764 {
1765 struct perf_event_mmap_page *pc = mm->userpg;
1766 u64 first, second, last;
1767 u64 mask = (u64)(UINT32_MAX) << 32;
1768
1769 do {
1770 first = READ_ONCE(pc->aux_head);
1771
1772 smp_rmb();
1773 second = READ_ONCE(pc->aux_head);
1774
1775 smp_rmb();
1776 last = READ_ONCE(pc->aux_head);
1777 } while ((first & mask) != (last & mask));
1778
1779 return second;
1780 }
1781
1782 int __weak compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
1783 {
1784 struct perf_event_mmap_page *pc = mm->userpg;
1785 u64 mask = (u64)(UINT32_MAX) << 32;
1786
1787 if (tail & mask)
1788 return -1;
1789
1790
1791 smp_mb();
1792 WRITE_ONCE(pc->aux_tail, tail);
1793 return 0;
1794 }
1795
1796 static int __auxtrace_mmap__read(struct mmap *map,
1797 struct auxtrace_record *itr,
1798 struct perf_tool *tool, process_auxtrace_t fn,
1799 bool snapshot, size_t snapshot_size)
1800 {
1801 struct auxtrace_mmap *mm = &map->auxtrace_mmap;
1802 u64 head, old = mm->prev, offset, ref;
1803 unsigned char *data = mm->base;
1804 size_t size, head_off, old_off, len1, len2, padding;
1805 union perf_event ev;
1806 void *data1, *data2;
1807 int kernel_is_64_bit = perf_env__kernel_is_64_bit(evsel__env(NULL));
1808
1809 head = auxtrace_mmap__read_head(mm, kernel_is_64_bit);
1810
1811 if (snapshot &&
1812 auxtrace_record__find_snapshot(itr, mm->idx, mm, data, &head, &old))
1813 return -1;
1814
1815 if (old == head)
1816 return 0;
1817
1818 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
1819 mm->idx, old, head, head - old);
1820
1821 if (mm->mask) {
1822 head_off = head & mm->mask;
1823 old_off = old & mm->mask;
1824 } else {
1825 head_off = head % mm->len;
1826 old_off = old % mm->len;
1827 }
1828
1829 if (head_off > old_off)
1830 size = head_off - old_off;
1831 else
1832 size = mm->len - (old_off - head_off);
1833
1834 if (snapshot && size > snapshot_size)
1835 size = snapshot_size;
1836
1837 ref = auxtrace_record__reference(itr);
1838
1839 if (head > old || size <= head || mm->mask) {
1840 offset = head - size;
1841 } else {
1842
1843
1844
1845
1846
1847 u64 rem = (0ULL - mm->len) % mm->len;
1848
1849 offset = head - size - rem;
1850 }
1851
1852 if (size > head_off) {
1853 len1 = size - head_off;
1854 data1 = &data[mm->len - len1];
1855 len2 = head_off;
1856 data2 = &data[0];
1857 } else {
1858 len1 = size;
1859 data1 = &data[head_off - len1];
1860 len2 = 0;
1861 data2 = NULL;
1862 }
1863
1864 if (itr->alignment) {
1865 unsigned int unwanted = len1 % itr->alignment;
1866
1867 len1 -= unwanted;
1868 size -= unwanted;
1869 }
1870
1871
1872 padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
1873 if (padding)
1874 padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
1875
1876 memset(&ev, 0, sizeof(ev));
1877 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
1878 ev.auxtrace.header.size = sizeof(ev.auxtrace);
1879 ev.auxtrace.size = size + padding;
1880 ev.auxtrace.offset = offset;
1881 ev.auxtrace.reference = ref;
1882 ev.auxtrace.idx = mm->idx;
1883 ev.auxtrace.tid = mm->tid;
1884 ev.auxtrace.cpu = mm->cpu;
1885
1886 if (fn(tool, map, &ev, data1, len1, data2, len2))
1887 return -1;
1888
1889 mm->prev = head;
1890
1891 if (!snapshot) {
1892 int err;
1893
1894 err = auxtrace_mmap__write_tail(mm, head, kernel_is_64_bit);
1895 if (err < 0)
1896 return err;
1897
1898 if (itr->read_finish) {
1899 err = itr->read_finish(itr, mm->idx);
1900 if (err < 0)
1901 return err;
1902 }
1903 }
1904
1905 return 1;
1906 }
1907
1908 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
1909 struct perf_tool *tool, process_auxtrace_t fn)
1910 {
1911 return __auxtrace_mmap__read(map, itr, tool, fn, false, 0);
1912 }
1913
1914 int auxtrace_mmap__read_snapshot(struct mmap *map,
1915 struct auxtrace_record *itr,
1916 struct perf_tool *tool, process_auxtrace_t fn,
1917 size_t snapshot_size)
1918 {
1919 return __auxtrace_mmap__read(map, itr, tool, fn, true, snapshot_size);
1920 }
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932 struct auxtrace_cache {
1933 struct hlist_head *hashtable;
1934 size_t sz;
1935 size_t entry_size;
1936 size_t limit;
1937 size_t cnt;
1938 unsigned int bits;
1939 };
1940
1941 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
1942 unsigned int limit_percent)
1943 {
1944 struct auxtrace_cache *c;
1945 struct hlist_head *ht;
1946 size_t sz, i;
1947
1948 c = zalloc(sizeof(struct auxtrace_cache));
1949 if (!c)
1950 return NULL;
1951
1952 sz = 1UL << bits;
1953
1954 ht = calloc(sz, sizeof(struct hlist_head));
1955 if (!ht)
1956 goto out_free;
1957
1958 for (i = 0; i < sz; i++)
1959 INIT_HLIST_HEAD(&ht[i]);
1960
1961 c->hashtable = ht;
1962 c->sz = sz;
1963 c->entry_size = entry_size;
1964 c->limit = (c->sz * limit_percent) / 100;
1965 c->bits = bits;
1966
1967 return c;
1968
1969 out_free:
1970 free(c);
1971 return NULL;
1972 }
1973
1974 static void auxtrace_cache__drop(struct auxtrace_cache *c)
1975 {
1976 struct auxtrace_cache_entry *entry;
1977 struct hlist_node *tmp;
1978 size_t i;
1979
1980 if (!c)
1981 return;
1982
1983 for (i = 0; i < c->sz; i++) {
1984 hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
1985 hlist_del(&entry->hash);
1986 auxtrace_cache__free_entry(c, entry);
1987 }
1988 }
1989
1990 c->cnt = 0;
1991 }
1992
1993 void auxtrace_cache__free(struct auxtrace_cache *c)
1994 {
1995 if (!c)
1996 return;
1997
1998 auxtrace_cache__drop(c);
1999 zfree(&c->hashtable);
2000 free(c);
2001 }
2002
2003 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
2004 {
2005 return malloc(c->entry_size);
2006 }
2007
2008 void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
2009 void *entry)
2010 {
2011 free(entry);
2012 }
2013
2014 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
2015 struct auxtrace_cache_entry *entry)
2016 {
2017 if (c->limit && ++c->cnt > c->limit)
2018 auxtrace_cache__drop(c);
2019
2020 entry->key = key;
2021 hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
2022
2023 return 0;
2024 }
2025
2026 static struct auxtrace_cache_entry *auxtrace_cache__rm(struct auxtrace_cache *c,
2027 u32 key)
2028 {
2029 struct auxtrace_cache_entry *entry;
2030 struct hlist_head *hlist;
2031 struct hlist_node *n;
2032
2033 if (!c)
2034 return NULL;
2035
2036 hlist = &c->hashtable[hash_32(key, c->bits)];
2037 hlist_for_each_entry_safe(entry, n, hlist, hash) {
2038 if (entry->key == key) {
2039 hlist_del(&entry->hash);
2040 return entry;
2041 }
2042 }
2043
2044 return NULL;
2045 }
2046
2047 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key)
2048 {
2049 struct auxtrace_cache_entry *entry = auxtrace_cache__rm(c, key);
2050
2051 auxtrace_cache__free_entry(c, entry);
2052 }
2053
2054 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
2055 {
2056 struct auxtrace_cache_entry *entry;
2057 struct hlist_head *hlist;
2058
2059 if (!c)
2060 return NULL;
2061
2062 hlist = &c->hashtable[hash_32(key, c->bits)];
2063 hlist_for_each_entry(entry, hlist, hash) {
2064 if (entry->key == key)
2065 return entry;
2066 }
2067
2068 return NULL;
2069 }
2070
2071 static void addr_filter__free_str(struct addr_filter *filt)
2072 {
2073 zfree(&filt->str);
2074 filt->action = NULL;
2075 filt->sym_from = NULL;
2076 filt->sym_to = NULL;
2077 filt->filename = NULL;
2078 }
2079
2080 static struct addr_filter *addr_filter__new(void)
2081 {
2082 struct addr_filter *filt = zalloc(sizeof(*filt));
2083
2084 if (filt)
2085 INIT_LIST_HEAD(&filt->list);
2086
2087 return filt;
2088 }
2089
2090 static void addr_filter__free(struct addr_filter *filt)
2091 {
2092 if (filt)
2093 addr_filter__free_str(filt);
2094 free(filt);
2095 }
2096
2097 static void addr_filters__add(struct addr_filters *filts,
2098 struct addr_filter *filt)
2099 {
2100 list_add_tail(&filt->list, &filts->head);
2101 filts->cnt += 1;
2102 }
2103
2104 static void addr_filters__del(struct addr_filters *filts,
2105 struct addr_filter *filt)
2106 {
2107 list_del_init(&filt->list);
2108 filts->cnt -= 1;
2109 }
2110
2111 void addr_filters__init(struct addr_filters *filts)
2112 {
2113 INIT_LIST_HEAD(&filts->head);
2114 filts->cnt = 0;
2115 }
2116
2117 void addr_filters__exit(struct addr_filters *filts)
2118 {
2119 struct addr_filter *filt, *n;
2120
2121 list_for_each_entry_safe(filt, n, &filts->head, list) {
2122 addr_filters__del(filts, filt);
2123 addr_filter__free(filt);
2124 }
2125 }
2126
2127 static int parse_num_or_str(char **inp, u64 *num, const char **str,
2128 const char *str_delim)
2129 {
2130 *inp += strspn(*inp, " ");
2131
2132 if (isdigit(**inp)) {
2133 char *endptr;
2134
2135 if (!num)
2136 return -EINVAL;
2137 errno = 0;
2138 *num = strtoull(*inp, &endptr, 0);
2139 if (errno)
2140 return -errno;
2141 if (endptr == *inp)
2142 return -EINVAL;
2143 *inp = endptr;
2144 } else {
2145 size_t n;
2146
2147 if (!str)
2148 return -EINVAL;
2149 *inp += strspn(*inp, " ");
2150 *str = *inp;
2151 n = strcspn(*inp, str_delim);
2152 if (!n)
2153 return -EINVAL;
2154 *inp += n;
2155 if (**inp) {
2156 **inp = '\0';
2157 *inp += 1;
2158 }
2159 }
2160 return 0;
2161 }
2162
2163 static int parse_action(struct addr_filter *filt)
2164 {
2165 if (!strcmp(filt->action, "filter")) {
2166 filt->start = true;
2167 filt->range = true;
2168 } else if (!strcmp(filt->action, "start")) {
2169 filt->start = true;
2170 } else if (!strcmp(filt->action, "stop")) {
2171 filt->start = false;
2172 } else if (!strcmp(filt->action, "tracestop")) {
2173 filt->start = false;
2174 filt->range = true;
2175 filt->action += 5;
2176 } else {
2177 return -EINVAL;
2178 }
2179 return 0;
2180 }
2181
2182 static int parse_sym_idx(char **inp, int *idx)
2183 {
2184 *idx = -1;
2185
2186 *inp += strspn(*inp, " ");
2187
2188 if (**inp != '#')
2189 return 0;
2190
2191 *inp += 1;
2192
2193 if (**inp == 'g' || **inp == 'G') {
2194 *inp += 1;
2195 *idx = 0;
2196 } else {
2197 unsigned long num;
2198 char *endptr;
2199
2200 errno = 0;
2201 num = strtoul(*inp, &endptr, 0);
2202 if (errno)
2203 return -errno;
2204 if (endptr == *inp || num > INT_MAX)
2205 return -EINVAL;
2206 *inp = endptr;
2207 *idx = num;
2208 }
2209
2210 return 0;
2211 }
2212
2213 static int parse_addr_size(char **inp, u64 *num, const char **str, int *idx)
2214 {
2215 int err = parse_num_or_str(inp, num, str, " ");
2216
2217 if (!err && *str)
2218 err = parse_sym_idx(inp, idx);
2219
2220 return err;
2221 }
2222
2223 static int parse_one_filter(struct addr_filter *filt, const char **filter_inp)
2224 {
2225 char *fstr;
2226 int err;
2227
2228 filt->str = fstr = strdup(*filter_inp);
2229 if (!fstr)
2230 return -ENOMEM;
2231
2232 err = parse_num_or_str(&fstr, NULL, &filt->action, " ");
2233 if (err)
2234 goto out_err;
2235
2236 err = parse_action(filt);
2237 if (err)
2238 goto out_err;
2239
2240 err = parse_addr_size(&fstr, &filt->addr, &filt->sym_from,
2241 &filt->sym_from_idx);
2242 if (err)
2243 goto out_err;
2244
2245 fstr += strspn(fstr, " ");
2246
2247 if (*fstr == '/') {
2248 fstr += 1;
2249 err = parse_addr_size(&fstr, &filt->size, &filt->sym_to,
2250 &filt->sym_to_idx);
2251 if (err)
2252 goto out_err;
2253 filt->range = true;
2254 }
2255
2256 fstr += strspn(fstr, " ");
2257
2258 if (*fstr == '@') {
2259 fstr += 1;
2260 err = parse_num_or_str(&fstr, NULL, &filt->filename, " ,");
2261 if (err)
2262 goto out_err;
2263 }
2264
2265 fstr += strspn(fstr, " ,");
2266
2267 *filter_inp += fstr - filt->str;
2268
2269 return 0;
2270
2271 out_err:
2272 addr_filter__free_str(filt);
2273
2274 return err;
2275 }
2276
2277 int addr_filters__parse_bare_filter(struct addr_filters *filts,
2278 const char *filter)
2279 {
2280 struct addr_filter *filt;
2281 const char *fstr = filter;
2282 int err;
2283
2284 while (*fstr) {
2285 filt = addr_filter__new();
2286 err = parse_one_filter(filt, &fstr);
2287 if (err) {
2288 addr_filter__free(filt);
2289 addr_filters__exit(filts);
2290 return err;
2291 }
2292 addr_filters__add(filts, filt);
2293 }
2294
2295 return 0;
2296 }
2297
2298 struct sym_args {
2299 const char *name;
2300 u64 start;
2301 u64 size;
2302 int idx;
2303 int cnt;
2304 bool started;
2305 bool global;
2306 bool selected;
2307 bool duplicate;
2308 bool near;
2309 };
2310
2311 static bool kern_sym_match(struct sym_args *args, const char *name, char type)
2312 {
2313
2314 return kallsyms__is_function(type) &&
2315 !strcmp(name, args->name) &&
2316 ((args->global && isupper(type)) ||
2317 (args->selected && ++(args->cnt) == args->idx) ||
2318 (!args->global && !args->selected));
2319 }
2320
2321 static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2322 {
2323 struct sym_args *args = arg;
2324
2325 if (args->started) {
2326 if (!args->size)
2327 args->size = start - args->start;
2328 if (args->selected) {
2329 if (args->size)
2330 return 1;
2331 } else if (kern_sym_match(args, name, type)) {
2332 args->duplicate = true;
2333 return 1;
2334 }
2335 } else if (kern_sym_match(args, name, type)) {
2336 args->started = true;
2337 args->start = start;
2338 }
2339
2340 return 0;
2341 }
2342
2343 static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start)
2344 {
2345 struct sym_args *args = arg;
2346
2347 if (kern_sym_match(args, name, type)) {
2348 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2349 ++args->cnt, start, type, name);
2350 args->near = true;
2351 } else if (args->near) {
2352 args->near = false;
2353 pr_err("\t\twhich is near\t\t%s\n", name);
2354 }
2355
2356 return 0;
2357 }
2358
2359 static int sym_not_found_error(const char *sym_name, int idx)
2360 {
2361 if (idx > 0) {
2362 pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n",
2363 idx, sym_name);
2364 } else if (!idx) {
2365 pr_err("Global symbol '%s' not found.\n", sym_name);
2366 } else {
2367 pr_err("Symbol '%s' not found.\n", sym_name);
2368 }
2369 pr_err("Note that symbols must be functions.\n");
2370
2371 return -EINVAL;
2372 }
2373
2374 static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx)
2375 {
2376 struct sym_args args = {
2377 .name = sym_name,
2378 .idx = idx,
2379 .global = !idx,
2380 .selected = idx > 0,
2381 };
2382 int err;
2383
2384 *start = 0;
2385 *size = 0;
2386
2387 err = kallsyms__parse("/proc/kallsyms", &args, find_kern_sym_cb);
2388 if (err < 0) {
2389 pr_err("Failed to parse /proc/kallsyms\n");
2390 return err;
2391 }
2392
2393 if (args.duplicate) {
2394 pr_err("Multiple kernel symbols with name '%s'\n", sym_name);
2395 args.cnt = 0;
2396 kallsyms__parse("/proc/kallsyms", &args, print_kern_sym_cb);
2397 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2398 sym_name);
2399 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2400 return -EINVAL;
2401 }
2402
2403 if (!args.started) {
2404 pr_err("Kernel symbol lookup: ");
2405 return sym_not_found_error(sym_name, idx);
2406 }
2407
2408 *start = args.start;
2409 *size = args.size;
2410
2411 return 0;
2412 }
2413
2414 static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
2415 char type, u64 start)
2416 {
2417 struct sym_args *args = arg;
2418
2419 if (!kallsyms__is_function(type))
2420 return 0;
2421
2422 if (!args->started) {
2423 args->started = true;
2424 args->start = start;
2425 }
2426
2427 args->size = round_up(start, page_size) + page_size - args->start;
2428
2429 return 0;
2430 }
2431
2432 static int addr_filter__entire_kernel(struct addr_filter *filt)
2433 {
2434 struct sym_args args = { .started = false };
2435 int err;
2436
2437 err = kallsyms__parse("/proc/kallsyms", &args, find_entire_kern_cb);
2438 if (err < 0 || !args.started) {
2439 pr_err("Failed to parse /proc/kallsyms\n");
2440 return err;
2441 }
2442
2443 filt->addr = args.start;
2444 filt->size = args.size;
2445
2446 return 0;
2447 }
2448
2449 static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size)
2450 {
2451 if (start + size >= filt->addr)
2452 return 0;
2453
2454 if (filt->sym_from) {
2455 pr_err("Symbol '%s' (0x%"PRIx64") comes before '%s' (0x%"PRIx64")\n",
2456 filt->sym_to, start, filt->sym_from, filt->addr);
2457 } else {
2458 pr_err("Symbol '%s' (0x%"PRIx64") comes before address 0x%"PRIx64")\n",
2459 filt->sym_to, start, filt->addr);
2460 }
2461
2462 return -EINVAL;
2463 }
2464
2465 static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
2466 {
2467 bool no_size = false;
2468 u64 start, size;
2469 int err;
2470
2471 if (symbol_conf.kptr_restrict) {
2472 pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n");
2473 return -EINVAL;
2474 }
2475
2476 if (filt->sym_from && !strcmp(filt->sym_from, "*"))
2477 return addr_filter__entire_kernel(filt);
2478
2479 if (filt->sym_from) {
2480 err = find_kern_sym(filt->sym_from, &start, &size,
2481 filt->sym_from_idx);
2482 if (err)
2483 return err;
2484 filt->addr = start;
2485 if (filt->range && !filt->size && !filt->sym_to) {
2486 filt->size = size;
2487 no_size = !size;
2488 }
2489 }
2490
2491 if (filt->sym_to) {
2492 err = find_kern_sym(filt->sym_to, &start, &size,
2493 filt->sym_to_idx);
2494 if (err)
2495 return err;
2496
2497 err = check_end_after_start(filt, start, size);
2498 if (err)
2499 return err;
2500 filt->size = start + size - filt->addr;
2501 no_size = !size;
2502 }
2503
2504
2505 if (no_size) {
2506 pr_err("Cannot determine size of symbol '%s'\n",
2507 filt->sym_to ? filt->sym_to : filt->sym_from);
2508 return -EINVAL;
2509 }
2510
2511 return 0;
2512 }
2513
2514 static struct dso *load_dso(const char *name)
2515 {
2516 struct map *map;
2517 struct dso *dso;
2518
2519 map = dso__new_map(name);
2520 if (!map)
2521 return NULL;
2522
2523 if (map__load(map) < 0)
2524 pr_err("File '%s' not found or has no symbols.\n", name);
2525
2526 dso = dso__get(map->dso);
2527
2528 map__put(map);
2529
2530 return dso;
2531 }
2532
2533 static bool dso_sym_match(struct symbol *sym, const char *name, int *cnt,
2534 int idx)
2535 {
2536
2537 return !arch__compare_symbol_names(name, sym->name) &&
2538 ((!idx && sym->binding == STB_GLOBAL) ||
2539 (idx > 0 && ++*cnt == idx) ||
2540 idx < 0);
2541 }
2542
2543 static void print_duplicate_syms(struct dso *dso, const char *sym_name)
2544 {
2545 struct symbol *sym;
2546 bool near = false;
2547 int cnt = 0;
2548
2549 pr_err("Multiple symbols with name '%s'\n", sym_name);
2550
2551 sym = dso__first_symbol(dso);
2552 while (sym) {
2553 if (dso_sym_match(sym, sym_name, &cnt, -1)) {
2554 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
2555 ++cnt, sym->start,
2556 sym->binding == STB_GLOBAL ? 'g' :
2557 sym->binding == STB_LOCAL ? 'l' : 'w',
2558 sym->name);
2559 near = true;
2560 } else if (near) {
2561 near = false;
2562 pr_err("\t\twhich is near\t\t%s\n", sym->name);
2563 }
2564 sym = dso__next_symbol(sym);
2565 }
2566
2567 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
2568 sym_name);
2569 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
2570 }
2571
2572 static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
2573 u64 *size, int idx)
2574 {
2575 struct symbol *sym;
2576 int cnt = 0;
2577
2578 *start = 0;
2579 *size = 0;
2580
2581 sym = dso__first_symbol(dso);
2582 while (sym) {
2583 if (*start) {
2584 if (!*size)
2585 *size = sym->start - *start;
2586 if (idx > 0) {
2587 if (*size)
2588 return 1;
2589 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2590 print_duplicate_syms(dso, sym_name);
2591 return -EINVAL;
2592 }
2593 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
2594 *start = sym->start;
2595 *size = sym->end - sym->start;
2596 }
2597 sym = dso__next_symbol(sym);
2598 }
2599
2600 if (!*start)
2601 return sym_not_found_error(sym_name, idx);
2602
2603 return 0;
2604 }
2605
2606 static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
2607 {
2608 if (dso__data_file_size(dso, NULL)) {
2609 pr_err("Failed to determine filter for %s\nCannot determine file size.\n",
2610 filt->filename);
2611 return -EINVAL;
2612 }
2613
2614 filt->addr = 0;
2615 filt->size = dso->data.file_size;
2616
2617 return 0;
2618 }
2619
2620 static int addr_filter__resolve_syms(struct addr_filter *filt)
2621 {
2622 u64 start, size;
2623 struct dso *dso;
2624 int err = 0;
2625
2626 if (!filt->sym_from && !filt->sym_to)
2627 return 0;
2628
2629 if (!filt->filename)
2630 return addr_filter__resolve_kernel_syms(filt);
2631
2632 dso = load_dso(filt->filename);
2633 if (!dso) {
2634 pr_err("Failed to load symbols from: %s\n", filt->filename);
2635 return -EINVAL;
2636 }
2637
2638 if (filt->sym_from && !strcmp(filt->sym_from, "*")) {
2639 err = addr_filter__entire_dso(filt, dso);
2640 goto put_dso;
2641 }
2642
2643 if (filt->sym_from) {
2644 err = find_dso_sym(dso, filt->sym_from, &start, &size,
2645 filt->sym_from_idx);
2646 if (err)
2647 goto put_dso;
2648 filt->addr = start;
2649 if (filt->range && !filt->size && !filt->sym_to)
2650 filt->size = size;
2651 }
2652
2653 if (filt->sym_to) {
2654 err = find_dso_sym(dso, filt->sym_to, &start, &size,
2655 filt->sym_to_idx);
2656 if (err)
2657 goto put_dso;
2658
2659 err = check_end_after_start(filt, start, size);
2660 if (err)
2661 return err;
2662
2663 filt->size = start + size - filt->addr;
2664 }
2665
2666 put_dso:
2667 dso__put(dso);
2668
2669 return err;
2670 }
2671
2672 static char *addr_filter__to_str(struct addr_filter *filt)
2673 {
2674 char filename_buf[PATH_MAX];
2675 const char *at = "";
2676 const char *fn = "";
2677 char *filter;
2678 int err;
2679
2680 if (filt->filename) {
2681 at = "@";
2682 fn = realpath(filt->filename, filename_buf);
2683 if (!fn)
2684 return NULL;
2685 }
2686
2687 if (filt->range) {
2688 err = asprintf(&filter, "%s 0x%"PRIx64"/0x%"PRIx64"%s%s",
2689 filt->action, filt->addr, filt->size, at, fn);
2690 } else {
2691 err = asprintf(&filter, "%s 0x%"PRIx64"%s%s",
2692 filt->action, filt->addr, at, fn);
2693 }
2694
2695 return err < 0 ? NULL : filter;
2696 }
2697
2698 static int parse_addr_filter(struct evsel *evsel, const char *filter,
2699 int max_nr)
2700 {
2701 struct addr_filters filts;
2702 struct addr_filter *filt;
2703 int err;
2704
2705 addr_filters__init(&filts);
2706
2707 err = addr_filters__parse_bare_filter(&filts, filter);
2708 if (err)
2709 goto out_exit;
2710
2711 if (filts.cnt > max_nr) {
2712 pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n",
2713 filts.cnt, max_nr);
2714 err = -EINVAL;
2715 goto out_exit;
2716 }
2717
2718 list_for_each_entry(filt, &filts.head, list) {
2719 char *new_filter;
2720
2721 err = addr_filter__resolve_syms(filt);
2722 if (err)
2723 goto out_exit;
2724
2725 new_filter = addr_filter__to_str(filt);
2726 if (!new_filter) {
2727 err = -ENOMEM;
2728 goto out_exit;
2729 }
2730
2731 if (evsel__append_addr_filter(evsel, new_filter)) {
2732 err = -ENOMEM;
2733 goto out_exit;
2734 }
2735 }
2736
2737 out_exit:
2738 addr_filters__exit(&filts);
2739
2740 if (err) {
2741 pr_err("Failed to parse address filter: '%s'\n", filter);
2742 pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n");
2743 pr_err("Where multiple filters are separated by space or comma.\n");
2744 }
2745
2746 return err;
2747 }
2748
2749 static int evsel__nr_addr_filter(struct evsel *evsel)
2750 {
2751 struct perf_pmu *pmu = evsel__find_pmu(evsel);
2752 int nr_addr_filters = 0;
2753
2754 if (!pmu)
2755 return 0;
2756
2757 perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters);
2758
2759 return nr_addr_filters;
2760 }
2761
2762 int auxtrace_parse_filters(struct evlist *evlist)
2763 {
2764 struct evsel *evsel;
2765 char *filter;
2766 int err, max_nr;
2767
2768 evlist__for_each_entry(evlist, evsel) {
2769 filter = evsel->filter;
2770 max_nr = evsel__nr_addr_filter(evsel);
2771 if (!filter || !max_nr)
2772 continue;
2773 evsel->filter = NULL;
2774 err = parse_addr_filter(evsel, filter, max_nr);
2775 free(filter);
2776 if (err)
2777 return err;
2778 pr_debug("Address filter: %s\n", evsel->filter);
2779 }
2780
2781 return 0;
2782 }
2783
2784 int auxtrace__process_event(struct perf_session *session, union perf_event *event,
2785 struct perf_sample *sample, struct perf_tool *tool)
2786 {
2787 if (!session->auxtrace)
2788 return 0;
2789
2790 return session->auxtrace->process_event(session, event, sample, tool);
2791 }
2792
2793 void auxtrace__dump_auxtrace_sample(struct perf_session *session,
2794 struct perf_sample *sample)
2795 {
2796 if (!session->auxtrace || !session->auxtrace->dump_auxtrace_sample ||
2797 auxtrace__dont_decode(session))
2798 return;
2799
2800 session->auxtrace->dump_auxtrace_sample(session, sample);
2801 }
2802
2803 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool)
2804 {
2805 if (!session->auxtrace)
2806 return 0;
2807
2808 return session->auxtrace->flush_events(session, tool);
2809 }
2810
2811 void auxtrace__free_events(struct perf_session *session)
2812 {
2813 if (!session->auxtrace)
2814 return;
2815
2816 return session->auxtrace->free_events(session);
2817 }
2818
2819 void auxtrace__free(struct perf_session *session)
2820 {
2821 if (!session->auxtrace)
2822 return;
2823
2824 return session->auxtrace->free(session);
2825 }
2826
2827 bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
2828 struct evsel *evsel)
2829 {
2830 if (!session->auxtrace || !session->auxtrace->evsel_is_auxtrace)
2831 return false;
2832
2833 return session->auxtrace->evsel_is_auxtrace(session, evsel);
2834 }