0001
0002 #include <errno.h>
0003 #include <inttypes.h>
0004 #include <linux/err.h>
0005 #include <linux/kernel.h>
0006 #include <linux/zalloc.h>
0007 #include <api/fs/fs.h>
0008
0009 #include <byteswap.h>
0010 #include <unistd.h>
0011 #include <sys/types.h>
0012 #include <sys/mman.h>
0013 #include <perf/cpumap.h>
0014
0015 #include "map_symbol.h"
0016 #include "branch.h"
0017 #include "debug.h"
0018 #include "env.h"
0019 #include "evlist.h"
0020 #include "evsel.h"
0021 #include "memswap.h"
0022 #include "map.h"
0023 #include "symbol.h"
0024 #include "session.h"
0025 #include "tool.h"
0026 #include "perf_regs.h"
0027 #include "asm/bug.h"
0028 #include "auxtrace.h"
0029 #include "thread.h"
0030 #include "thread-stack.h"
0031 #include "sample-raw.h"
0032 #include "stat.h"
0033 #include "tsc.h"
0034 #include "ui/progress.h"
0035 #include "../perf.h"
0036 #include "arch/common.h"
0037 #include "units.h"
0038 #include <internal/lib.h>
0039
0040 #ifdef HAVE_ZSTD_SUPPORT
0041 static int perf_session__process_compressed_event(struct perf_session *session,
0042 union perf_event *event, u64 file_offset,
0043 const char *file_path)
0044 {
0045 void *src;
0046 size_t decomp_size, src_size;
0047 u64 decomp_last_rem = 0;
0048 size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
0049 struct decomp *decomp, *decomp_last = session->active_decomp->decomp_last;
0050
0051 if (decomp_last) {
0052 decomp_last_rem = decomp_last->size - decomp_last->head;
0053 decomp_len += decomp_last_rem;
0054 }
0055
0056 mmap_len = sizeof(struct decomp) + decomp_len;
0057 decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
0058 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
0059 if (decomp == MAP_FAILED) {
0060 pr_err("Couldn't allocate memory for decompression\n");
0061 return -1;
0062 }
0063
0064 decomp->file_pos = file_offset;
0065 decomp->file_path = file_path;
0066 decomp->mmap_len = mmap_len;
0067 decomp->head = 0;
0068
0069 if (decomp_last_rem) {
0070 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
0071 decomp->size = decomp_last_rem;
0072 }
0073
0074 src = (void *)event + sizeof(struct perf_record_compressed);
0075 src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
0076
0077 decomp_size = zstd_decompress_stream(session->active_decomp->zstd_decomp, src, src_size,
0078 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
0079 if (!decomp_size) {
0080 munmap(decomp, mmap_len);
0081 pr_err("Couldn't decompress data\n");
0082 return -1;
0083 }
0084
0085 decomp->size += decomp_size;
0086
0087 if (session->active_decomp->decomp == NULL)
0088 session->active_decomp->decomp = decomp;
0089 else
0090 session->active_decomp->decomp_last->next = decomp;
0091
0092 session->active_decomp->decomp_last = decomp;
0093
0094 pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
0095
0096 return 0;
0097 }
0098 #else
0099 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
0100 #endif
0101
0102 static int perf_session__deliver_event(struct perf_session *session,
0103 union perf_event *event,
0104 struct perf_tool *tool,
0105 u64 file_offset,
0106 const char *file_path);
0107
0108 static int perf_session__open(struct perf_session *session, int repipe_fd)
0109 {
0110 struct perf_data *data = session->data;
0111
0112 if (perf_session__read_header(session, repipe_fd) < 0) {
0113 pr_err("incompatible file format (rerun with -v to learn more)\n");
0114 return -1;
0115 }
0116
0117 if (perf_data__is_pipe(data))
0118 return 0;
0119
0120 if (perf_header__has_feat(&session->header, HEADER_STAT))
0121 return 0;
0122
0123 if (!evlist__valid_sample_type(session->evlist)) {
0124 pr_err("non matching sample_type\n");
0125 return -1;
0126 }
0127
0128 if (!evlist__valid_sample_id_all(session->evlist)) {
0129 pr_err("non matching sample_id_all\n");
0130 return -1;
0131 }
0132
0133 if (!evlist__valid_read_format(session->evlist)) {
0134 pr_err("non matching read_format\n");
0135 return -1;
0136 }
0137
0138 return 0;
0139 }
0140
0141 void perf_session__set_id_hdr_size(struct perf_session *session)
0142 {
0143 u16 id_hdr_size = evlist__id_hdr_size(session->evlist);
0144
0145 machines__set_id_hdr_size(&session->machines, id_hdr_size);
0146 }
0147
0148 int perf_session__create_kernel_maps(struct perf_session *session)
0149 {
0150 int ret = machine__create_kernel_maps(&session->machines.host);
0151
0152 if (ret >= 0)
0153 ret = machines__create_guest_kernel_maps(&session->machines);
0154 return ret;
0155 }
0156
0157 static void perf_session__destroy_kernel_maps(struct perf_session *session)
0158 {
0159 machines__destroy_kernel_maps(&session->machines);
0160 }
0161
0162 static bool perf_session__has_comm_exec(struct perf_session *session)
0163 {
0164 struct evsel *evsel;
0165
0166 evlist__for_each_entry(session->evlist, evsel) {
0167 if (evsel->core.attr.comm_exec)
0168 return true;
0169 }
0170
0171 return false;
0172 }
0173
0174 static void perf_session__set_comm_exec(struct perf_session *session)
0175 {
0176 bool comm_exec = perf_session__has_comm_exec(session);
0177
0178 machines__set_comm_exec(&session->machines, comm_exec);
0179 }
0180
0181 static int ordered_events__deliver_event(struct ordered_events *oe,
0182 struct ordered_event *event)
0183 {
0184 struct perf_session *session = container_of(oe, struct perf_session,
0185 ordered_events);
0186
0187 return perf_session__deliver_event(session, event->event,
0188 session->tool, event->file_offset,
0189 event->file_path);
0190 }
0191
0192 struct perf_session *__perf_session__new(struct perf_data *data,
0193 bool repipe, int repipe_fd,
0194 struct perf_tool *tool)
0195 {
0196 int ret = -ENOMEM;
0197 struct perf_session *session = zalloc(sizeof(*session));
0198
0199 if (!session)
0200 goto out;
0201
0202 session->repipe = repipe;
0203 session->tool = tool;
0204 session->decomp_data.zstd_decomp = &session->zstd_data;
0205 session->active_decomp = &session->decomp_data;
0206 INIT_LIST_HEAD(&session->auxtrace_index);
0207 machines__init(&session->machines);
0208 ordered_events__init(&session->ordered_events,
0209 ordered_events__deliver_event, NULL);
0210
0211 perf_env__init(&session->header.env);
0212 if (data) {
0213 ret = perf_data__open(data);
0214 if (ret < 0)
0215 goto out_delete;
0216
0217 session->data = data;
0218
0219 if (perf_data__is_read(data)) {
0220 ret = perf_session__open(session, repipe_fd);
0221 if (ret < 0)
0222 goto out_delete;
0223
0224
0225
0226
0227
0228 if (!data->is_pipe) {
0229 perf_session__set_id_hdr_size(session);
0230 perf_session__set_comm_exec(session);
0231 }
0232
0233 evlist__init_trace_event_sample_raw(session->evlist);
0234
0235
0236 if (data->is_dir) {
0237 ret = perf_data__open_dir(data);
0238 if (ret)
0239 goto out_delete;
0240 }
0241
0242 if (!symbol_conf.kallsyms_name &&
0243 !symbol_conf.vmlinux_name)
0244 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
0245 }
0246 } else {
0247 session->machines.host.env = &perf_env;
0248 }
0249
0250 session->machines.host.single_address_space =
0251 perf_env__single_address_space(session->machines.host.env);
0252
0253 if (!data || perf_data__is_write(data)) {
0254
0255
0256
0257
0258 if (perf_session__create_kernel_maps(session) < 0)
0259 pr_warning("Cannot read kernel map\n");
0260 }
0261
0262
0263
0264
0265
0266 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
0267 tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
0268 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
0269 tool->ordered_events = false;
0270 }
0271
0272 return session;
0273
0274 out_delete:
0275 perf_session__delete(session);
0276 out:
0277 return ERR_PTR(ret);
0278 }
0279
0280 static void perf_session__delete_threads(struct perf_session *session)
0281 {
0282 machine__delete_threads(&session->machines.host);
0283 }
0284
0285 static void perf_decomp__release_events(struct decomp *next)
0286 {
0287 struct decomp *decomp;
0288 size_t mmap_len;
0289
0290 do {
0291 decomp = next;
0292 if (decomp == NULL)
0293 break;
0294 next = decomp->next;
0295 mmap_len = decomp->mmap_len;
0296 munmap(decomp, mmap_len);
0297 } while (1);
0298 }
0299
0300 void perf_session__delete(struct perf_session *session)
0301 {
0302 if (session == NULL)
0303 return;
0304 auxtrace__free(session);
0305 auxtrace_index__free(&session->auxtrace_index);
0306 perf_session__destroy_kernel_maps(session);
0307 perf_session__delete_threads(session);
0308 perf_decomp__release_events(session->decomp_data.decomp);
0309 perf_env__exit(&session->header.env);
0310 machines__exit(&session->machines);
0311 if (session->data) {
0312 if (perf_data__is_read(session->data))
0313 evlist__delete(session->evlist);
0314 perf_data__close(session->data);
0315 }
0316 trace_event__cleanup(&session->tevent);
0317 free(session);
0318 }
0319
0320 static int process_event_synth_tracing_data_stub(struct perf_session *session
0321 __maybe_unused,
0322 union perf_event *event
0323 __maybe_unused)
0324 {
0325 dump_printf(": unhandled!\n");
0326 return 0;
0327 }
0328
0329 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
0330 union perf_event *event __maybe_unused,
0331 struct evlist **pevlist
0332 __maybe_unused)
0333 {
0334 dump_printf(": unhandled!\n");
0335 return 0;
0336 }
0337
0338 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
0339 union perf_event *event __maybe_unused,
0340 struct evlist **pevlist
0341 __maybe_unused)
0342 {
0343 if (dump_trace)
0344 perf_event__fprintf_event_update(event, stdout);
0345
0346 dump_printf(": unhandled!\n");
0347 return 0;
0348 }
0349
0350 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
0351 union perf_event *event __maybe_unused,
0352 struct perf_sample *sample __maybe_unused,
0353 struct evsel *evsel __maybe_unused,
0354 struct machine *machine __maybe_unused)
0355 {
0356 dump_printf(": unhandled!\n");
0357 return 0;
0358 }
0359
0360 static int process_event_stub(struct perf_tool *tool __maybe_unused,
0361 union perf_event *event __maybe_unused,
0362 struct perf_sample *sample __maybe_unused,
0363 struct machine *machine __maybe_unused)
0364 {
0365 dump_printf(": unhandled!\n");
0366 return 0;
0367 }
0368
0369 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
0370 union perf_event *event __maybe_unused,
0371 struct ordered_events *oe __maybe_unused)
0372 {
0373 dump_printf(": unhandled!\n");
0374 return 0;
0375 }
0376
0377 static int skipn(int fd, off_t n)
0378 {
0379 char buf[4096];
0380 ssize_t ret;
0381
0382 while (n > 0) {
0383 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
0384 if (ret <= 0)
0385 return ret;
0386 n -= ret;
0387 }
0388
0389 return 0;
0390 }
0391
0392 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
0393 union perf_event *event)
0394 {
0395 dump_printf(": unhandled!\n");
0396 if (perf_data__is_pipe(session->data))
0397 skipn(perf_data__fd(session->data), event->auxtrace.size);
0398 return event->auxtrace.size;
0399 }
0400
0401 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
0402 union perf_event *event __maybe_unused)
0403 {
0404 dump_printf(": unhandled!\n");
0405 return 0;
0406 }
0407
0408
0409 static
0410 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
0411 union perf_event *event __maybe_unused)
0412 {
0413 if (dump_trace)
0414 perf_event__fprintf_thread_map(event, stdout);
0415
0416 dump_printf(": unhandled!\n");
0417 return 0;
0418 }
0419
0420 static
0421 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
0422 union perf_event *event __maybe_unused)
0423 {
0424 if (dump_trace)
0425 perf_event__fprintf_cpu_map(event, stdout);
0426
0427 dump_printf(": unhandled!\n");
0428 return 0;
0429 }
0430
0431 static
0432 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
0433 union perf_event *event __maybe_unused)
0434 {
0435 if (dump_trace)
0436 perf_event__fprintf_stat_config(event, stdout);
0437
0438 dump_printf(": unhandled!\n");
0439 return 0;
0440 }
0441
0442 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
0443 union perf_event *event)
0444 {
0445 if (dump_trace)
0446 perf_event__fprintf_stat(event, stdout);
0447
0448 dump_printf(": unhandled!\n");
0449 return 0;
0450 }
0451
0452 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
0453 union perf_event *event)
0454 {
0455 if (dump_trace)
0456 perf_event__fprintf_stat_round(event, stdout);
0457
0458 dump_printf(": unhandled!\n");
0459 return 0;
0460 }
0461
0462 static int process_event_time_conv_stub(struct perf_session *perf_session __maybe_unused,
0463 union perf_event *event)
0464 {
0465 if (dump_trace)
0466 perf_event__fprintf_time_conv(event, stdout);
0467
0468 dump_printf(": unhandled!\n");
0469 return 0;
0470 }
0471
0472 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
0473 union perf_event *event __maybe_unused,
0474 u64 file_offset __maybe_unused,
0475 const char *file_path __maybe_unused)
0476 {
0477 dump_printf(": unhandled!\n");
0478 return 0;
0479 }
0480
0481 void perf_tool__fill_defaults(struct perf_tool *tool)
0482 {
0483 if (tool->sample == NULL)
0484 tool->sample = process_event_sample_stub;
0485 if (tool->mmap == NULL)
0486 tool->mmap = process_event_stub;
0487 if (tool->mmap2 == NULL)
0488 tool->mmap2 = process_event_stub;
0489 if (tool->comm == NULL)
0490 tool->comm = process_event_stub;
0491 if (tool->namespaces == NULL)
0492 tool->namespaces = process_event_stub;
0493 if (tool->cgroup == NULL)
0494 tool->cgroup = process_event_stub;
0495 if (tool->fork == NULL)
0496 tool->fork = process_event_stub;
0497 if (tool->exit == NULL)
0498 tool->exit = process_event_stub;
0499 if (tool->lost == NULL)
0500 tool->lost = perf_event__process_lost;
0501 if (tool->lost_samples == NULL)
0502 tool->lost_samples = perf_event__process_lost_samples;
0503 if (tool->aux == NULL)
0504 tool->aux = perf_event__process_aux;
0505 if (tool->itrace_start == NULL)
0506 tool->itrace_start = perf_event__process_itrace_start;
0507 if (tool->context_switch == NULL)
0508 tool->context_switch = perf_event__process_switch;
0509 if (tool->ksymbol == NULL)
0510 tool->ksymbol = perf_event__process_ksymbol;
0511 if (tool->bpf == NULL)
0512 tool->bpf = perf_event__process_bpf;
0513 if (tool->text_poke == NULL)
0514 tool->text_poke = perf_event__process_text_poke;
0515 if (tool->aux_output_hw_id == NULL)
0516 tool->aux_output_hw_id = perf_event__process_aux_output_hw_id;
0517 if (tool->read == NULL)
0518 tool->read = process_event_sample_stub;
0519 if (tool->throttle == NULL)
0520 tool->throttle = process_event_stub;
0521 if (tool->unthrottle == NULL)
0522 tool->unthrottle = process_event_stub;
0523 if (tool->attr == NULL)
0524 tool->attr = process_event_synth_attr_stub;
0525 if (tool->event_update == NULL)
0526 tool->event_update = process_event_synth_event_update_stub;
0527 if (tool->tracing_data == NULL)
0528 tool->tracing_data = process_event_synth_tracing_data_stub;
0529 if (tool->build_id == NULL)
0530 tool->build_id = process_event_op2_stub;
0531 if (tool->finished_round == NULL) {
0532 if (tool->ordered_events)
0533 tool->finished_round = perf_event__process_finished_round;
0534 else
0535 tool->finished_round = process_finished_round_stub;
0536 }
0537 if (tool->id_index == NULL)
0538 tool->id_index = process_event_op2_stub;
0539 if (tool->auxtrace_info == NULL)
0540 tool->auxtrace_info = process_event_op2_stub;
0541 if (tool->auxtrace == NULL)
0542 tool->auxtrace = process_event_auxtrace_stub;
0543 if (tool->auxtrace_error == NULL)
0544 tool->auxtrace_error = process_event_op2_stub;
0545 if (tool->thread_map == NULL)
0546 tool->thread_map = process_event_thread_map_stub;
0547 if (tool->cpu_map == NULL)
0548 tool->cpu_map = process_event_cpu_map_stub;
0549 if (tool->stat_config == NULL)
0550 tool->stat_config = process_event_stat_config_stub;
0551 if (tool->stat == NULL)
0552 tool->stat = process_stat_stub;
0553 if (tool->stat_round == NULL)
0554 tool->stat_round = process_stat_round_stub;
0555 if (tool->time_conv == NULL)
0556 tool->time_conv = process_event_time_conv_stub;
0557 if (tool->feature == NULL)
0558 tool->feature = process_event_op2_stub;
0559 if (tool->compressed == NULL)
0560 tool->compressed = perf_session__process_compressed_event;
0561 if (tool->finished_init == NULL)
0562 tool->finished_init = process_event_op2_stub;
0563 }
0564
0565 static void swap_sample_id_all(union perf_event *event, void *data)
0566 {
0567 void *end = (void *) event + event->header.size;
0568 int size = end - data;
0569
0570 BUG_ON(size % sizeof(u64));
0571 mem_bswap_64(data, size);
0572 }
0573
0574 static void perf_event__all64_swap(union perf_event *event,
0575 bool sample_id_all __maybe_unused)
0576 {
0577 struct perf_event_header *hdr = &event->header;
0578 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
0579 }
0580
0581 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
0582 {
0583 event->comm.pid = bswap_32(event->comm.pid);
0584 event->comm.tid = bswap_32(event->comm.tid);
0585
0586 if (sample_id_all) {
0587 void *data = &event->comm.comm;
0588
0589 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
0590 swap_sample_id_all(event, data);
0591 }
0592 }
0593
0594 static void perf_event__mmap_swap(union perf_event *event,
0595 bool sample_id_all)
0596 {
0597 event->mmap.pid = bswap_32(event->mmap.pid);
0598 event->mmap.tid = bswap_32(event->mmap.tid);
0599 event->mmap.start = bswap_64(event->mmap.start);
0600 event->mmap.len = bswap_64(event->mmap.len);
0601 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
0602
0603 if (sample_id_all) {
0604 void *data = &event->mmap.filename;
0605
0606 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
0607 swap_sample_id_all(event, data);
0608 }
0609 }
0610
0611 static void perf_event__mmap2_swap(union perf_event *event,
0612 bool sample_id_all)
0613 {
0614 event->mmap2.pid = bswap_32(event->mmap2.pid);
0615 event->mmap2.tid = bswap_32(event->mmap2.tid);
0616 event->mmap2.start = bswap_64(event->mmap2.start);
0617 event->mmap2.len = bswap_64(event->mmap2.len);
0618 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
0619
0620 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
0621 event->mmap2.maj = bswap_32(event->mmap2.maj);
0622 event->mmap2.min = bswap_32(event->mmap2.min);
0623 event->mmap2.ino = bswap_64(event->mmap2.ino);
0624 event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
0625 }
0626
0627 if (sample_id_all) {
0628 void *data = &event->mmap2.filename;
0629
0630 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
0631 swap_sample_id_all(event, data);
0632 }
0633 }
0634 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
0635 {
0636 event->fork.pid = bswap_32(event->fork.pid);
0637 event->fork.tid = bswap_32(event->fork.tid);
0638 event->fork.ppid = bswap_32(event->fork.ppid);
0639 event->fork.ptid = bswap_32(event->fork.ptid);
0640 event->fork.time = bswap_64(event->fork.time);
0641
0642 if (sample_id_all)
0643 swap_sample_id_all(event, &event->fork + 1);
0644 }
0645
0646 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
0647 {
0648 event->read.pid = bswap_32(event->read.pid);
0649 event->read.tid = bswap_32(event->read.tid);
0650 event->read.value = bswap_64(event->read.value);
0651 event->read.time_enabled = bswap_64(event->read.time_enabled);
0652 event->read.time_running = bswap_64(event->read.time_running);
0653 event->read.id = bswap_64(event->read.id);
0654
0655 if (sample_id_all)
0656 swap_sample_id_all(event, &event->read + 1);
0657 }
0658
0659 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
0660 {
0661 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
0662 event->aux.aux_size = bswap_64(event->aux.aux_size);
0663 event->aux.flags = bswap_64(event->aux.flags);
0664
0665 if (sample_id_all)
0666 swap_sample_id_all(event, &event->aux + 1);
0667 }
0668
0669 static void perf_event__itrace_start_swap(union perf_event *event,
0670 bool sample_id_all)
0671 {
0672 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
0673 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
0674
0675 if (sample_id_all)
0676 swap_sample_id_all(event, &event->itrace_start + 1);
0677 }
0678
0679 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
0680 {
0681 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
0682 event->context_switch.next_prev_pid =
0683 bswap_32(event->context_switch.next_prev_pid);
0684 event->context_switch.next_prev_tid =
0685 bswap_32(event->context_switch.next_prev_tid);
0686 }
0687
0688 if (sample_id_all)
0689 swap_sample_id_all(event, &event->context_switch + 1);
0690 }
0691
0692 static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
0693 {
0694 event->text_poke.addr = bswap_64(event->text_poke.addr);
0695 event->text_poke.old_len = bswap_16(event->text_poke.old_len);
0696 event->text_poke.new_len = bswap_16(event->text_poke.new_len);
0697
0698 if (sample_id_all) {
0699 size_t len = sizeof(event->text_poke.old_len) +
0700 sizeof(event->text_poke.new_len) +
0701 event->text_poke.old_len +
0702 event->text_poke.new_len;
0703 void *data = &event->text_poke.old_len;
0704
0705 data += PERF_ALIGN(len, sizeof(u64));
0706 swap_sample_id_all(event, data);
0707 }
0708 }
0709
0710 static void perf_event__throttle_swap(union perf_event *event,
0711 bool sample_id_all)
0712 {
0713 event->throttle.time = bswap_64(event->throttle.time);
0714 event->throttle.id = bswap_64(event->throttle.id);
0715 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
0716
0717 if (sample_id_all)
0718 swap_sample_id_all(event, &event->throttle + 1);
0719 }
0720
0721 static void perf_event__namespaces_swap(union perf_event *event,
0722 bool sample_id_all)
0723 {
0724 u64 i;
0725
0726 event->namespaces.pid = bswap_32(event->namespaces.pid);
0727 event->namespaces.tid = bswap_32(event->namespaces.tid);
0728 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
0729
0730 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
0731 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
0732
0733 ns->dev = bswap_64(ns->dev);
0734 ns->ino = bswap_64(ns->ino);
0735 }
0736
0737 if (sample_id_all)
0738 swap_sample_id_all(event, &event->namespaces.link_info[i]);
0739 }
0740
0741 static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
0742 {
0743 event->cgroup.id = bswap_64(event->cgroup.id);
0744
0745 if (sample_id_all) {
0746 void *data = &event->cgroup.path;
0747
0748 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
0749 swap_sample_id_all(event, data);
0750 }
0751 }
0752
0753 static u8 revbyte(u8 b)
0754 {
0755 int rev = (b >> 4) | ((b & 0xf) << 4);
0756 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
0757 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
0758 return (u8) rev;
0759 }
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775 static void swap_bitfield(u8 *p, unsigned len)
0776 {
0777 unsigned i;
0778
0779 for (i = 0; i < len; i++) {
0780 *p = revbyte(*p);
0781 p++;
0782 }
0783 }
0784
0785
0786 void perf_event__attr_swap(struct perf_event_attr *attr)
0787 {
0788 attr->type = bswap_32(attr->type);
0789 attr->size = bswap_32(attr->size);
0790
0791 #define bswap_safe(f, n) \
0792 (attr->size > (offsetof(struct perf_event_attr, f) + \
0793 sizeof(attr->f) * (n)))
0794 #define bswap_field(f, sz) \
0795 do { \
0796 if (bswap_safe(f, 0)) \
0797 attr->f = bswap_##sz(attr->f); \
0798 } while(0)
0799 #define bswap_field_16(f) bswap_field(f, 16)
0800 #define bswap_field_32(f) bswap_field(f, 32)
0801 #define bswap_field_64(f) bswap_field(f, 64)
0802
0803 bswap_field_64(config);
0804 bswap_field_64(sample_period);
0805 bswap_field_64(sample_type);
0806 bswap_field_64(read_format);
0807 bswap_field_32(wakeup_events);
0808 bswap_field_32(bp_type);
0809 bswap_field_64(bp_addr);
0810 bswap_field_64(bp_len);
0811 bswap_field_64(branch_sample_type);
0812 bswap_field_64(sample_regs_user);
0813 bswap_field_32(sample_stack_user);
0814 bswap_field_32(aux_watermark);
0815 bswap_field_16(sample_max_stack);
0816 bswap_field_32(aux_sample_size);
0817
0818
0819
0820
0821
0822 if (bswap_safe(read_format, 1))
0823 swap_bitfield((u8 *) (&attr->read_format + 1),
0824 sizeof(u64));
0825 #undef bswap_field_64
0826 #undef bswap_field_32
0827 #undef bswap_field
0828 #undef bswap_safe
0829 }
0830
0831 static void perf_event__hdr_attr_swap(union perf_event *event,
0832 bool sample_id_all __maybe_unused)
0833 {
0834 size_t size;
0835
0836 perf_event__attr_swap(&event->attr.attr);
0837
0838 size = event->header.size;
0839 size -= (void *)&event->attr.id - (void *)event;
0840 mem_bswap_64(event->attr.id, size);
0841 }
0842
0843 static void perf_event__event_update_swap(union perf_event *event,
0844 bool sample_id_all __maybe_unused)
0845 {
0846 event->event_update.type = bswap_64(event->event_update.type);
0847 event->event_update.id = bswap_64(event->event_update.id);
0848 }
0849
0850 static void perf_event__event_type_swap(union perf_event *event,
0851 bool sample_id_all __maybe_unused)
0852 {
0853 event->event_type.event_type.event_id =
0854 bswap_64(event->event_type.event_type.event_id);
0855 }
0856
0857 static void perf_event__tracing_data_swap(union perf_event *event,
0858 bool sample_id_all __maybe_unused)
0859 {
0860 event->tracing_data.size = bswap_32(event->tracing_data.size);
0861 }
0862
0863 static void perf_event__auxtrace_info_swap(union perf_event *event,
0864 bool sample_id_all __maybe_unused)
0865 {
0866 size_t size;
0867
0868 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
0869
0870 size = event->header.size;
0871 size -= (void *)&event->auxtrace_info.priv - (void *)event;
0872 mem_bswap_64(event->auxtrace_info.priv, size);
0873 }
0874
0875 static void perf_event__auxtrace_swap(union perf_event *event,
0876 bool sample_id_all __maybe_unused)
0877 {
0878 event->auxtrace.size = bswap_64(event->auxtrace.size);
0879 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
0880 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
0881 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
0882 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
0883 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
0884 }
0885
0886 static void perf_event__auxtrace_error_swap(union perf_event *event,
0887 bool sample_id_all __maybe_unused)
0888 {
0889 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
0890 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
0891 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
0892 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
0893 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
0894 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
0895 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
0896 if (event->auxtrace_error.fmt)
0897 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
0898 if (event->auxtrace_error.fmt >= 2) {
0899 event->auxtrace_error.machine_pid = bswap_32(event->auxtrace_error.machine_pid);
0900 event->auxtrace_error.vcpu = bswap_32(event->auxtrace_error.vcpu);
0901 }
0902 }
0903
0904 static void perf_event__thread_map_swap(union perf_event *event,
0905 bool sample_id_all __maybe_unused)
0906 {
0907 unsigned i;
0908
0909 event->thread_map.nr = bswap_64(event->thread_map.nr);
0910
0911 for (i = 0; i < event->thread_map.nr; i++)
0912 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
0913 }
0914
0915 static void perf_event__cpu_map_swap(union perf_event *event,
0916 bool sample_id_all __maybe_unused)
0917 {
0918 struct perf_record_cpu_map_data *data = &event->cpu_map.data;
0919
0920 data->type = bswap_16(data->type);
0921
0922 switch (data->type) {
0923 case PERF_CPU_MAP__CPUS:
0924 data->cpus_data.nr = bswap_16(data->cpus_data.nr);
0925
0926 for (unsigned i = 0; i < data->cpus_data.nr; i++)
0927 data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]);
0928 break;
0929 case PERF_CPU_MAP__MASK:
0930 data->mask32_data.long_size = bswap_16(data->mask32_data.long_size);
0931
0932 switch (data->mask32_data.long_size) {
0933 case 4:
0934 data->mask32_data.nr = bswap_16(data->mask32_data.nr);
0935 for (unsigned i = 0; i < data->mask32_data.nr; i++)
0936 data->mask32_data.mask[i] = bswap_32(data->mask32_data.mask[i]);
0937 break;
0938 case 8:
0939 data->mask64_data.nr = bswap_16(data->mask64_data.nr);
0940 for (unsigned i = 0; i < data->mask64_data.nr; i++)
0941 data->mask64_data.mask[i] = bswap_64(data->mask64_data.mask[i]);
0942 break;
0943 default:
0944 pr_err("cpu_map swap: unsupported long size\n");
0945 }
0946 default:
0947 break;
0948 }
0949 }
0950
0951 static void perf_event__stat_config_swap(union perf_event *event,
0952 bool sample_id_all __maybe_unused)
0953 {
0954 u64 size;
0955
0956 size = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]);
0957 size += 1;
0958 mem_bswap_64(&event->stat_config.nr, size);
0959 }
0960
0961 static void perf_event__stat_swap(union perf_event *event,
0962 bool sample_id_all __maybe_unused)
0963 {
0964 event->stat.id = bswap_64(event->stat.id);
0965 event->stat.thread = bswap_32(event->stat.thread);
0966 event->stat.cpu = bswap_32(event->stat.cpu);
0967 event->stat.val = bswap_64(event->stat.val);
0968 event->stat.ena = bswap_64(event->stat.ena);
0969 event->stat.run = bswap_64(event->stat.run);
0970 }
0971
0972 static void perf_event__stat_round_swap(union perf_event *event,
0973 bool sample_id_all __maybe_unused)
0974 {
0975 event->stat_round.type = bswap_64(event->stat_round.type);
0976 event->stat_round.time = bswap_64(event->stat_round.time);
0977 }
0978
0979 static void perf_event__time_conv_swap(union perf_event *event,
0980 bool sample_id_all __maybe_unused)
0981 {
0982 event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
0983 event->time_conv.time_mult = bswap_64(event->time_conv.time_mult);
0984 event->time_conv.time_zero = bswap_64(event->time_conv.time_zero);
0985
0986 if (event_contains(event->time_conv, time_cycles)) {
0987 event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
0988 event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
0989 }
0990 }
0991
0992 typedef void (*perf_event__swap_op)(union perf_event *event,
0993 bool sample_id_all);
0994
0995 static perf_event__swap_op perf_event__swap_ops[] = {
0996 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
0997 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
0998 [PERF_RECORD_COMM] = perf_event__comm_swap,
0999 [PERF_RECORD_FORK] = perf_event__task_swap,
1000 [PERF_RECORD_EXIT] = perf_event__task_swap,
1001 [PERF_RECORD_LOST] = perf_event__all64_swap,
1002 [PERF_RECORD_READ] = perf_event__read_swap,
1003 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
1004 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
1005 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
1006 [PERF_RECORD_AUX] = perf_event__aux_swap,
1007 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
1008 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
1009 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
1010 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
1011 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
1012 [PERF_RECORD_CGROUP] = perf_event__cgroup_swap,
1013 [PERF_RECORD_TEXT_POKE] = perf_event__text_poke_swap,
1014 [PERF_RECORD_AUX_OUTPUT_HW_ID] = perf_event__all64_swap,
1015 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
1016 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
1017 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
1018 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
1019 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
1020 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
1021 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
1022 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
1023 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
1024 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
1025 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
1026 [PERF_RECORD_STAT] = perf_event__stat_swap,
1027 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
1028 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
1029 [PERF_RECORD_TIME_CONV] = perf_event__time_conv_swap,
1030 [PERF_RECORD_HEADER_MAX] = NULL,
1031 };
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072 int perf_event__process_finished_round(struct perf_tool *tool __maybe_unused,
1073 union perf_event *event __maybe_unused,
1074 struct ordered_events *oe)
1075 {
1076 if (dump_trace)
1077 fprintf(stdout, "\n");
1078 return ordered_events__flush(oe, OE_FLUSH__ROUND);
1079 }
1080
1081 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1082 u64 timestamp, u64 file_offset, const char *file_path)
1083 {
1084 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset, file_path);
1085 }
1086
1087 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1088 {
1089 struct ip_callchain *callchain = sample->callchain;
1090 struct branch_stack *lbr_stack = sample->branch_stack;
1091 struct branch_entry *entries = perf_sample__branch_entries(sample);
1092 u64 kernel_callchain_nr = callchain->nr;
1093 unsigned int i;
1094
1095 for (i = 0; i < kernel_callchain_nr; i++) {
1096 if (callchain->ips[i] == PERF_CONTEXT_USER)
1097 break;
1098 }
1099
1100 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1101 u64 total_nr;
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118 total_nr = i + 1 + lbr_stack->nr + 1;
1119 kernel_callchain_nr = i + 1;
1120
1121 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1122
1123 for (i = 0; i < kernel_callchain_nr; i++)
1124 printf("..... %2d: %016" PRIx64 "\n",
1125 i, callchain->ips[i]);
1126
1127 printf("..... %2d: %016" PRIx64 "\n",
1128 (int)(kernel_callchain_nr), entries[0].to);
1129 for (i = 0; i < lbr_stack->nr; i++)
1130 printf("..... %2d: %016" PRIx64 "\n",
1131 (int)(i + kernel_callchain_nr + 1), entries[i].from);
1132 }
1133 }
1134
1135 static void callchain__printf(struct evsel *evsel,
1136 struct perf_sample *sample)
1137 {
1138 unsigned int i;
1139 struct ip_callchain *callchain = sample->callchain;
1140
1141 if (evsel__has_branch_callstack(evsel))
1142 callchain__lbr_callstack_printf(sample);
1143
1144 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1145
1146 for (i = 0; i < callchain->nr; i++)
1147 printf("..... %2d: %016" PRIx64 "\n",
1148 i, callchain->ips[i]);
1149 }
1150
1151 static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1152 {
1153 struct branch_entry *entries = perf_sample__branch_entries(sample);
1154 uint64_t i;
1155
1156 if (!callstack) {
1157 printf("%s: nr:%" PRIu64 "\n", "... branch stack", sample->branch_stack->nr);
1158 } else {
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168 printf("%s: nr:%" PRIu64 "\n", "... branch callstack", sample->branch_stack->nr+1);
1169 }
1170
1171 for (i = 0; i < sample->branch_stack->nr; i++) {
1172 struct branch_entry *e = &entries[i];
1173
1174 if (!callstack) {
1175 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x %s\n",
1176 i, e->from, e->to,
1177 (unsigned short)e->flags.cycles,
1178 e->flags.mispred ? "M" : " ",
1179 e->flags.predicted ? "P" : " ",
1180 e->flags.abort ? "A" : " ",
1181 e->flags.in_tx ? "T" : " ",
1182 (unsigned)e->flags.reserved,
1183 e->flags.type ? branch_type_name(e->flags.type) : "");
1184 } else {
1185 if (i == 0) {
1186 printf("..... %2"PRIu64": %016" PRIx64 "\n"
1187 "..... %2"PRIu64": %016" PRIx64 "\n",
1188 i, e->to, i+1, e->from);
1189 } else {
1190 printf("..... %2"PRIu64": %016" PRIx64 "\n", i+1, e->from);
1191 }
1192 }
1193 }
1194 }
1195
1196 static void regs_dump__printf(u64 mask, u64 *regs, const char *arch)
1197 {
1198 unsigned rid, i = 0;
1199
1200 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1201 u64 val = regs[i++];
1202
1203 printf(".... %-5s 0x%016" PRIx64 "\n",
1204 perf_reg_name(rid, arch), val);
1205 }
1206 }
1207
1208 static const char *regs_abi[] = {
1209 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1210 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1211 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1212 };
1213
1214 static inline const char *regs_dump_abi(struct regs_dump *d)
1215 {
1216 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1217 return "unknown";
1218
1219 return regs_abi[d->abi];
1220 }
1221
1222 static void regs__printf(const char *type, struct regs_dump *regs, const char *arch)
1223 {
1224 u64 mask = regs->mask;
1225
1226 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1227 type,
1228 mask,
1229 regs_dump_abi(regs));
1230
1231 regs_dump__printf(mask, regs->regs, arch);
1232 }
1233
1234 static void regs_user__printf(struct perf_sample *sample, const char *arch)
1235 {
1236 struct regs_dump *user_regs = &sample->user_regs;
1237
1238 if (user_regs->regs)
1239 regs__printf("user", user_regs, arch);
1240 }
1241
1242 static void regs_intr__printf(struct perf_sample *sample, const char *arch)
1243 {
1244 struct regs_dump *intr_regs = &sample->intr_regs;
1245
1246 if (intr_regs->regs)
1247 regs__printf("intr", intr_regs, arch);
1248 }
1249
1250 static void stack_user__printf(struct stack_dump *dump)
1251 {
1252 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1253 dump->size, dump->offset);
1254 }
1255
1256 static void evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
1257 {
1258 u64 sample_type = __evlist__combined_sample_type(evlist);
1259
1260 if (event->header.type != PERF_RECORD_SAMPLE &&
1261 !evlist__sample_id_all(evlist)) {
1262 fputs("-1 -1 ", stdout);
1263 return;
1264 }
1265
1266 if ((sample_type & PERF_SAMPLE_CPU))
1267 printf("%u ", sample->cpu);
1268
1269 if (sample_type & PERF_SAMPLE_TIME)
1270 printf("%" PRIu64 " ", sample->time);
1271 }
1272
1273 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1274 {
1275 printf("... sample_read:\n");
1276
1277 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1278 printf("...... time enabled %016" PRIx64 "\n",
1279 sample->read.time_enabled);
1280
1281 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1282 printf("...... time running %016" PRIx64 "\n",
1283 sample->read.time_running);
1284
1285 if (read_format & PERF_FORMAT_GROUP) {
1286 struct sample_read_value *value = sample->read.group.values;
1287
1288 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1289
1290 sample_read_group__for_each(value, sample->read.group.nr, read_format) {
1291 printf("..... id %016" PRIx64
1292 ", value %016" PRIx64,
1293 value->id, value->value);
1294 if (read_format & PERF_FORMAT_LOST)
1295 printf(", lost %" PRIu64, value->lost);
1296 printf("\n");
1297 }
1298 } else {
1299 printf("..... id %016" PRIx64 ", value %016" PRIx64,
1300 sample->read.one.id, sample->read.one.value);
1301 if (read_format & PERF_FORMAT_LOST)
1302 printf(", lost %" PRIu64, sample->read.one.lost);
1303 printf("\n");
1304 }
1305 }
1306
1307 static void dump_event(struct evlist *evlist, union perf_event *event,
1308 u64 file_offset, struct perf_sample *sample,
1309 const char *file_path)
1310 {
1311 if (!dump_trace)
1312 return;
1313
1314 printf("\n%#" PRIx64 "@%s [%#x]: event: %d\n",
1315 file_offset, file_path, event->header.size, event->header.type);
1316
1317 trace_event(event);
1318 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1319 evlist->trace_event_sample_raw(evlist, event, sample);
1320
1321 if (sample)
1322 evlist__print_tstamp(evlist, event, sample);
1323
1324 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1325 event->header.size, perf_event__name(event->header.type));
1326 }
1327
1328 char *get_page_size_name(u64 size, char *str)
1329 {
1330 if (!size || !unit_number__scnprintf(str, PAGE_SIZE_NAME_LEN, size))
1331 snprintf(str, PAGE_SIZE_NAME_LEN, "%s", "N/A");
1332
1333 return str;
1334 }
1335
1336 static void dump_sample(struct evsel *evsel, union perf_event *event,
1337 struct perf_sample *sample, const char *arch)
1338 {
1339 u64 sample_type;
1340 char str[PAGE_SIZE_NAME_LEN];
1341
1342 if (!dump_trace)
1343 return;
1344
1345 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1346 event->header.misc, sample->pid, sample->tid, sample->ip,
1347 sample->period, sample->addr);
1348
1349 sample_type = evsel->core.attr.sample_type;
1350
1351 if (evsel__has_callchain(evsel))
1352 callchain__printf(evsel, sample);
1353
1354 if (evsel__has_br_stack(evsel))
1355 branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
1356
1357 if (sample_type & PERF_SAMPLE_REGS_USER)
1358 regs_user__printf(sample, arch);
1359
1360 if (sample_type & PERF_SAMPLE_REGS_INTR)
1361 regs_intr__printf(sample, arch);
1362
1363 if (sample_type & PERF_SAMPLE_STACK_USER)
1364 stack_user__printf(&sample->user_stack);
1365
1366 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
1367 printf("... weight: %" PRIu64 "", sample->weight);
1368 if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
1369 printf(",0x%"PRIx16"", sample->ins_lat);
1370 printf(",0x%"PRIx16"", sample->p_stage_cyc);
1371 }
1372 printf("\n");
1373 }
1374
1375 if (sample_type & PERF_SAMPLE_DATA_SRC)
1376 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1377
1378 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1379 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1380
1381 if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
1382 printf(" .. data page size: %s\n", get_page_size_name(sample->data_page_size, str));
1383
1384 if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
1385 printf(" .. code page size: %s\n", get_page_size_name(sample->code_page_size, str));
1386
1387 if (sample_type & PERF_SAMPLE_TRANSACTION)
1388 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1389
1390 if (sample_type & PERF_SAMPLE_READ)
1391 sample_read__printf(sample, evsel->core.attr.read_format);
1392 }
1393
1394 static void dump_read(struct evsel *evsel, union perf_event *event)
1395 {
1396 struct perf_record_read *read_event = &event->read;
1397 u64 read_format;
1398
1399 if (!dump_trace)
1400 return;
1401
1402 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1403 evsel__name(evsel), event->read.value);
1404
1405 if (!evsel)
1406 return;
1407
1408 read_format = evsel->core.attr.read_format;
1409
1410 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1411 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1412
1413 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1414 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1415
1416 if (read_format & PERF_FORMAT_ID)
1417 printf("... id : %" PRI_lu64 "\n", read_event->id);
1418
1419 if (read_format & PERF_FORMAT_LOST)
1420 printf("... lost : %" PRI_lu64 "\n", read_event->lost);
1421 }
1422
1423 static struct machine *machines__find_for_cpumode(struct machines *machines,
1424 union perf_event *event,
1425 struct perf_sample *sample)
1426 {
1427 if (perf_guest &&
1428 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1429 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1430 u32 pid;
1431
1432 if (sample->machine_pid)
1433 pid = sample->machine_pid;
1434 else if (event->header.type == PERF_RECORD_MMAP
1435 || event->header.type == PERF_RECORD_MMAP2)
1436 pid = event->mmap.pid;
1437 else
1438 pid = sample->pid;
1439
1440
1441
1442
1443
1444 if (symbol_conf.guest_code)
1445 return machines__findnew(machines, pid);
1446
1447 return machines__find_guest(machines, pid);
1448 }
1449
1450 return &machines->host;
1451 }
1452
1453 static int deliver_sample_value(struct evlist *evlist,
1454 struct perf_tool *tool,
1455 union perf_event *event,
1456 struct perf_sample *sample,
1457 struct sample_read_value *v,
1458 struct machine *machine)
1459 {
1460 struct perf_sample_id *sid = evlist__id2sid(evlist, v->id);
1461 struct evsel *evsel;
1462
1463 if (sid) {
1464 sample->id = v->id;
1465 sample->period = v->value - sid->period;
1466 sid->period = v->value;
1467 }
1468
1469 if (!sid || sid->evsel == NULL) {
1470 ++evlist->stats.nr_unknown_id;
1471 return 0;
1472 }
1473
1474
1475
1476
1477
1478 if (!sample->period)
1479 return 0;
1480
1481 evsel = container_of(sid->evsel, struct evsel, core);
1482 return tool->sample(tool, event, sample, evsel, machine);
1483 }
1484
1485 static int deliver_sample_group(struct evlist *evlist,
1486 struct perf_tool *tool,
1487 union perf_event *event,
1488 struct perf_sample *sample,
1489 struct machine *machine,
1490 u64 read_format)
1491 {
1492 int ret = -EINVAL;
1493 struct sample_read_value *v = sample->read.group.values;
1494
1495 sample_read_group__for_each(v, sample->read.group.nr, read_format) {
1496 ret = deliver_sample_value(evlist, tool, event, sample, v,
1497 machine);
1498 if (ret)
1499 break;
1500 }
1501
1502 return ret;
1503 }
1504
1505 static int evlist__deliver_sample(struct evlist *evlist, struct perf_tool *tool,
1506 union perf_event *event, struct perf_sample *sample,
1507 struct evsel *evsel, struct machine *machine)
1508 {
1509
1510 u64 sample_type = evsel->core.attr.sample_type;
1511 u64 read_format = evsel->core.attr.read_format;
1512
1513
1514 if (!(sample_type & PERF_SAMPLE_READ))
1515 return tool->sample(tool, event, sample, evsel, machine);
1516
1517
1518 if (read_format & PERF_FORMAT_GROUP)
1519 return deliver_sample_group(evlist, tool, event, sample,
1520 machine, read_format);
1521 else
1522 return deliver_sample_value(evlist, tool, event, sample,
1523 &sample->read.one, machine);
1524 }
1525
1526 static int machines__deliver_event(struct machines *machines,
1527 struct evlist *evlist,
1528 union perf_event *event,
1529 struct perf_sample *sample,
1530 struct perf_tool *tool, u64 file_offset,
1531 const char *file_path)
1532 {
1533 struct evsel *evsel;
1534 struct machine *machine;
1535
1536 dump_event(evlist, event, file_offset, sample, file_path);
1537
1538 evsel = evlist__id2evsel(evlist, sample->id);
1539
1540 machine = machines__find_for_cpumode(machines, event, sample);
1541
1542 switch (event->header.type) {
1543 case PERF_RECORD_SAMPLE:
1544 if (evsel == NULL) {
1545 ++evlist->stats.nr_unknown_id;
1546 return 0;
1547 }
1548 if (machine == NULL) {
1549 ++evlist->stats.nr_unprocessable_samples;
1550 dump_sample(evsel, event, sample, perf_env__arch(NULL));
1551 return 0;
1552 }
1553 dump_sample(evsel, event, sample, perf_env__arch(machine->env));
1554 return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1555 case PERF_RECORD_MMAP:
1556 return tool->mmap(tool, event, sample, machine);
1557 case PERF_RECORD_MMAP2:
1558 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1559 ++evlist->stats.nr_proc_map_timeout;
1560 return tool->mmap2(tool, event, sample, machine);
1561 case PERF_RECORD_COMM:
1562 return tool->comm(tool, event, sample, machine);
1563 case PERF_RECORD_NAMESPACES:
1564 return tool->namespaces(tool, event, sample, machine);
1565 case PERF_RECORD_CGROUP:
1566 return tool->cgroup(tool, event, sample, machine);
1567 case PERF_RECORD_FORK:
1568 return tool->fork(tool, event, sample, machine);
1569 case PERF_RECORD_EXIT:
1570 return tool->exit(tool, event, sample, machine);
1571 case PERF_RECORD_LOST:
1572 if (tool->lost == perf_event__process_lost)
1573 evlist->stats.total_lost += event->lost.lost;
1574 return tool->lost(tool, event, sample, machine);
1575 case PERF_RECORD_LOST_SAMPLES:
1576 if (tool->lost_samples == perf_event__process_lost_samples)
1577 evlist->stats.total_lost_samples += event->lost_samples.lost;
1578 return tool->lost_samples(tool, event, sample, machine);
1579 case PERF_RECORD_READ:
1580 dump_read(evsel, event);
1581 return tool->read(tool, event, sample, evsel, machine);
1582 case PERF_RECORD_THROTTLE:
1583 return tool->throttle(tool, event, sample, machine);
1584 case PERF_RECORD_UNTHROTTLE:
1585 return tool->unthrottle(tool, event, sample, machine);
1586 case PERF_RECORD_AUX:
1587 if (tool->aux == perf_event__process_aux) {
1588 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1589 evlist->stats.total_aux_lost += 1;
1590 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1591 evlist->stats.total_aux_partial += 1;
1592 if (event->aux.flags & PERF_AUX_FLAG_COLLISION)
1593 evlist->stats.total_aux_collision += 1;
1594 }
1595 return tool->aux(tool, event, sample, machine);
1596 case PERF_RECORD_ITRACE_START:
1597 return tool->itrace_start(tool, event, sample, machine);
1598 case PERF_RECORD_SWITCH:
1599 case PERF_RECORD_SWITCH_CPU_WIDE:
1600 return tool->context_switch(tool, event, sample, machine);
1601 case PERF_RECORD_KSYMBOL:
1602 return tool->ksymbol(tool, event, sample, machine);
1603 case PERF_RECORD_BPF_EVENT:
1604 return tool->bpf(tool, event, sample, machine);
1605 case PERF_RECORD_TEXT_POKE:
1606 return tool->text_poke(tool, event, sample, machine);
1607 case PERF_RECORD_AUX_OUTPUT_HW_ID:
1608 return tool->aux_output_hw_id(tool, event, sample, machine);
1609 default:
1610 ++evlist->stats.nr_unknown_events;
1611 return -1;
1612 }
1613 }
1614
1615 static int perf_session__deliver_event(struct perf_session *session,
1616 union perf_event *event,
1617 struct perf_tool *tool,
1618 u64 file_offset,
1619 const char *file_path)
1620 {
1621 struct perf_sample sample;
1622 int ret = evlist__parse_sample(session->evlist, event, &sample);
1623
1624 if (ret) {
1625 pr_err("Can't parse sample, err = %d\n", ret);
1626 return ret;
1627 }
1628
1629 ret = auxtrace__process_event(session, event, &sample, tool);
1630 if (ret < 0)
1631 return ret;
1632 if (ret > 0)
1633 return 0;
1634
1635 ret = machines__deliver_event(&session->machines, session->evlist,
1636 event, &sample, tool, file_offset, file_path);
1637
1638 if (dump_trace && sample.aux_sample.size)
1639 auxtrace__dump_auxtrace_sample(session, &sample);
1640
1641 return ret;
1642 }
1643
1644 static s64 perf_session__process_user_event(struct perf_session *session,
1645 union perf_event *event,
1646 u64 file_offset,
1647 const char *file_path)
1648 {
1649 struct ordered_events *oe = &session->ordered_events;
1650 struct perf_tool *tool = session->tool;
1651 struct perf_sample sample = { .time = 0, };
1652 int fd = perf_data__fd(session->data);
1653 int err;
1654
1655 if (event->header.type != PERF_RECORD_COMPRESSED ||
1656 tool->compressed == perf_session__process_compressed_event_stub)
1657 dump_event(session->evlist, event, file_offset, &sample, file_path);
1658
1659
1660 switch (event->header.type) {
1661 case PERF_RECORD_HEADER_ATTR:
1662 err = tool->attr(tool, event, &session->evlist);
1663 if (err == 0) {
1664 perf_session__set_id_hdr_size(session);
1665 perf_session__set_comm_exec(session);
1666 }
1667 return err;
1668 case PERF_RECORD_EVENT_UPDATE:
1669 return tool->event_update(tool, event, &session->evlist);
1670 case PERF_RECORD_HEADER_EVENT_TYPE:
1671
1672
1673
1674
1675 return 0;
1676 case PERF_RECORD_HEADER_TRACING_DATA:
1677
1678
1679
1680
1681
1682 if (!perf_data__is_pipe(session->data))
1683 lseek(fd, file_offset, SEEK_SET);
1684 return tool->tracing_data(session, event);
1685 case PERF_RECORD_HEADER_BUILD_ID:
1686 return tool->build_id(session, event);
1687 case PERF_RECORD_FINISHED_ROUND:
1688 return tool->finished_round(tool, event, oe);
1689 case PERF_RECORD_ID_INDEX:
1690 return tool->id_index(session, event);
1691 case PERF_RECORD_AUXTRACE_INFO:
1692 return tool->auxtrace_info(session, event);
1693 case PERF_RECORD_AUXTRACE:
1694
1695 lseek(fd, file_offset + event->header.size, SEEK_SET);
1696 return tool->auxtrace(session, event);
1697 case PERF_RECORD_AUXTRACE_ERROR:
1698 perf_session__auxtrace_error_inc(session, event);
1699 return tool->auxtrace_error(session, event);
1700 case PERF_RECORD_THREAD_MAP:
1701 return tool->thread_map(session, event);
1702 case PERF_RECORD_CPU_MAP:
1703 return tool->cpu_map(session, event);
1704 case PERF_RECORD_STAT_CONFIG:
1705 return tool->stat_config(session, event);
1706 case PERF_RECORD_STAT:
1707 return tool->stat(session, event);
1708 case PERF_RECORD_STAT_ROUND:
1709 return tool->stat_round(session, event);
1710 case PERF_RECORD_TIME_CONV:
1711 session->time_conv = event->time_conv;
1712 return tool->time_conv(session, event);
1713 case PERF_RECORD_HEADER_FEATURE:
1714 return tool->feature(session, event);
1715 case PERF_RECORD_COMPRESSED:
1716 err = tool->compressed(session, event, file_offset, file_path);
1717 if (err)
1718 dump_event(session->evlist, event, file_offset, &sample, file_path);
1719 return err;
1720 case PERF_RECORD_FINISHED_INIT:
1721 return tool->finished_init(session, event);
1722 default:
1723 return -EINVAL;
1724 }
1725 }
1726
1727 int perf_session__deliver_synth_event(struct perf_session *session,
1728 union perf_event *event,
1729 struct perf_sample *sample)
1730 {
1731 struct evlist *evlist = session->evlist;
1732 struct perf_tool *tool = session->tool;
1733
1734 events_stats__inc(&evlist->stats, event->header.type);
1735
1736 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1737 return perf_session__process_user_event(session, event, 0, NULL);
1738
1739 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0, NULL);
1740 }
1741
1742 static void event_swap(union perf_event *event, bool sample_id_all)
1743 {
1744 perf_event__swap_op swap;
1745
1746 swap = perf_event__swap_ops[event->header.type];
1747 if (swap)
1748 swap(event, sample_id_all);
1749 }
1750
1751 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1752 void *buf, size_t buf_sz,
1753 union perf_event **event_ptr,
1754 struct perf_sample *sample)
1755 {
1756 union perf_event *event;
1757 size_t hdr_sz, rest;
1758 int fd;
1759
1760 if (session->one_mmap && !session->header.needs_swap) {
1761 event = file_offset - session->one_mmap_offset +
1762 session->one_mmap_addr;
1763 goto out_parse_sample;
1764 }
1765
1766 if (perf_data__is_pipe(session->data))
1767 return -1;
1768
1769 fd = perf_data__fd(session->data);
1770 hdr_sz = sizeof(struct perf_event_header);
1771
1772 if (buf_sz < hdr_sz)
1773 return -1;
1774
1775 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1776 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1777 return -1;
1778
1779 event = (union perf_event *)buf;
1780
1781 if (session->header.needs_swap)
1782 perf_event_header__bswap(&event->header);
1783
1784 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1785 return -1;
1786
1787 buf += hdr_sz;
1788 rest = event->header.size - hdr_sz;
1789
1790 if (readn(fd, buf, rest) != (ssize_t)rest)
1791 return -1;
1792
1793 if (session->header.needs_swap)
1794 event_swap(event, evlist__sample_id_all(session->evlist));
1795
1796 out_parse_sample:
1797
1798 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1799 evlist__parse_sample(session->evlist, event, sample))
1800 return -1;
1801
1802 *event_ptr = event;
1803
1804 return 0;
1805 }
1806
1807 int perf_session__peek_events(struct perf_session *session, u64 offset,
1808 u64 size, peek_events_cb_t cb, void *data)
1809 {
1810 u64 max_offset = offset + size;
1811 char buf[PERF_SAMPLE_MAX_SIZE];
1812 union perf_event *event;
1813 int err;
1814
1815 do {
1816 err = perf_session__peek_event(session, offset, buf,
1817 PERF_SAMPLE_MAX_SIZE, &event,
1818 NULL);
1819 if (err)
1820 return err;
1821
1822 err = cb(session, event, offset, data);
1823 if (err)
1824 return err;
1825
1826 offset += event->header.size;
1827 if (event->header.type == PERF_RECORD_AUXTRACE)
1828 offset += event->auxtrace.size;
1829
1830 } while (offset < max_offset);
1831
1832 return err;
1833 }
1834
1835 static s64 perf_session__process_event(struct perf_session *session,
1836 union perf_event *event, u64 file_offset,
1837 const char *file_path)
1838 {
1839 struct evlist *evlist = session->evlist;
1840 struct perf_tool *tool = session->tool;
1841 int ret;
1842
1843 if (session->header.needs_swap)
1844 event_swap(event, evlist__sample_id_all(evlist));
1845
1846 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1847 return -EINVAL;
1848
1849 events_stats__inc(&evlist->stats, event->header.type);
1850
1851 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1852 return perf_session__process_user_event(session, event, file_offset, file_path);
1853
1854 if (tool->ordered_events) {
1855 u64 timestamp = -1ULL;
1856
1857 ret = evlist__parse_sample_timestamp(evlist, event, ×tamp);
1858 if (ret && ret != -1)
1859 return ret;
1860
1861 ret = perf_session__queue_event(session, event, timestamp, file_offset, file_path);
1862 if (ret != -ETIME)
1863 return ret;
1864 }
1865
1866 return perf_session__deliver_event(session, event, tool, file_offset, file_path);
1867 }
1868
1869 void perf_event_header__bswap(struct perf_event_header *hdr)
1870 {
1871 hdr->type = bswap_32(hdr->type);
1872 hdr->misc = bswap_16(hdr->misc);
1873 hdr->size = bswap_16(hdr->size);
1874 }
1875
1876 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1877 {
1878 return machine__findnew_thread(&session->machines.host, -1, pid);
1879 }
1880
1881 int perf_session__register_idle_thread(struct perf_session *session)
1882 {
1883 struct thread *thread = machine__idle_thread(&session->machines.host);
1884
1885
1886 thread__put(thread);
1887 return thread ? 0 : -1;
1888 }
1889
1890 static void
1891 perf_session__warn_order(const struct perf_session *session)
1892 {
1893 const struct ordered_events *oe = &session->ordered_events;
1894 struct evsel *evsel;
1895 bool should_warn = true;
1896
1897 evlist__for_each_entry(session->evlist, evsel) {
1898 if (evsel->core.attr.write_backward)
1899 should_warn = false;
1900 }
1901
1902 if (!should_warn)
1903 return;
1904 if (oe->nr_unordered_events != 0)
1905 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1906 }
1907
1908 static void perf_session__warn_about_errors(const struct perf_session *session)
1909 {
1910 const struct events_stats *stats = &session->evlist->stats;
1911
1912 if (session->tool->lost == perf_event__process_lost &&
1913 stats->nr_events[PERF_RECORD_LOST] != 0) {
1914 ui__warning("Processed %d events and lost %d chunks!\n\n"
1915 "Check IO/CPU overload!\n\n",
1916 stats->nr_events[0],
1917 stats->nr_events[PERF_RECORD_LOST]);
1918 }
1919
1920 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1921 double drop_rate;
1922
1923 drop_rate = (double)stats->total_lost_samples /
1924 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1925 if (drop_rate > 0.05) {
1926 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1927 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1928 drop_rate * 100.0);
1929 }
1930 }
1931
1932 if (session->tool->aux == perf_event__process_aux &&
1933 stats->total_aux_lost != 0) {
1934 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1935 stats->total_aux_lost,
1936 stats->nr_events[PERF_RECORD_AUX]);
1937 }
1938
1939 if (session->tool->aux == perf_event__process_aux &&
1940 stats->total_aux_partial != 0) {
1941 bool vmm_exclusive = false;
1942
1943 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1944 &vmm_exclusive);
1945
1946 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1947 "Are you running a KVM guest in the background?%s\n\n",
1948 stats->total_aux_partial,
1949 stats->nr_events[PERF_RECORD_AUX],
1950 vmm_exclusive ?
1951 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1952 "will reduce the gaps to only guest's timeslices." :
1953 "");
1954 }
1955
1956 if (session->tool->aux == perf_event__process_aux &&
1957 stats->total_aux_collision != 0) {
1958 ui__warning("AUX data detected collision %" PRIu64 " times out of %u!\n\n",
1959 stats->total_aux_collision,
1960 stats->nr_events[PERF_RECORD_AUX]);
1961 }
1962
1963 if (stats->nr_unknown_events != 0) {
1964 ui__warning("Found %u unknown events!\n\n"
1965 "Is this an older tool processing a perf.data "
1966 "file generated by a more recent tool?\n\n"
1967 "If that is not the case, consider "
1968 "reporting to linux-kernel@vger.kernel.org.\n\n",
1969 stats->nr_unknown_events);
1970 }
1971
1972 if (stats->nr_unknown_id != 0) {
1973 ui__warning("%u samples with id not present in the header\n",
1974 stats->nr_unknown_id);
1975 }
1976
1977 if (stats->nr_invalid_chains != 0) {
1978 ui__warning("Found invalid callchains!\n\n"
1979 "%u out of %u events were discarded for this reason.\n\n"
1980 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1981 stats->nr_invalid_chains,
1982 stats->nr_events[PERF_RECORD_SAMPLE]);
1983 }
1984
1985 if (stats->nr_unprocessable_samples != 0) {
1986 ui__warning("%u unprocessable samples recorded.\n"
1987 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1988 stats->nr_unprocessable_samples);
1989 }
1990
1991 perf_session__warn_order(session);
1992
1993 events_stats__auxtrace_error_warn(stats);
1994
1995 if (stats->nr_proc_map_timeout != 0) {
1996 ui__warning("%d map information files for pre-existing threads were\n"
1997 "not processed, if there are samples for addresses they\n"
1998 "will not be resolved, you may find out which are these\n"
1999 "threads by running with -v and redirecting the output\n"
2000 "to a file.\n"
2001 "The time limit to process proc map is too short?\n"
2002 "Increase it by --proc-map-timeout\n",
2003 stats->nr_proc_map_timeout);
2004 }
2005 }
2006
2007 static int perf_session__flush_thread_stack(struct thread *thread,
2008 void *p __maybe_unused)
2009 {
2010 return thread_stack__flush(thread);
2011 }
2012
2013 static int perf_session__flush_thread_stacks(struct perf_session *session)
2014 {
2015 return machines__for_each_thread(&session->machines,
2016 perf_session__flush_thread_stack,
2017 NULL);
2018 }
2019
2020 volatile int session_done;
2021
2022 static int __perf_session__process_decomp_events(struct perf_session *session);
2023
2024 static int __perf_session__process_pipe_events(struct perf_session *session)
2025 {
2026 struct ordered_events *oe = &session->ordered_events;
2027 struct perf_tool *tool = session->tool;
2028 union perf_event *event;
2029 uint32_t size, cur_size = 0;
2030 void *buf = NULL;
2031 s64 skip = 0;
2032 u64 head;
2033 ssize_t err;
2034 void *p;
2035
2036 perf_tool__fill_defaults(tool);
2037
2038 head = 0;
2039 cur_size = sizeof(union perf_event);
2040
2041 buf = malloc(cur_size);
2042 if (!buf)
2043 return -errno;
2044 ordered_events__set_copy_on_queue(oe, true);
2045 more:
2046 event = buf;
2047 err = perf_data__read(session->data, event,
2048 sizeof(struct perf_event_header));
2049 if (err <= 0) {
2050 if (err == 0)
2051 goto done;
2052
2053 pr_err("failed to read event header\n");
2054 goto out_err;
2055 }
2056
2057 if (session->header.needs_swap)
2058 perf_event_header__bswap(&event->header);
2059
2060 size = event->header.size;
2061 if (size < sizeof(struct perf_event_header)) {
2062 pr_err("bad event header size\n");
2063 goto out_err;
2064 }
2065
2066 if (size > cur_size) {
2067 void *new = realloc(buf, size);
2068 if (!new) {
2069 pr_err("failed to allocate memory to read event\n");
2070 goto out_err;
2071 }
2072 buf = new;
2073 cur_size = size;
2074 event = buf;
2075 }
2076 p = event;
2077 p += sizeof(struct perf_event_header);
2078
2079 if (size - sizeof(struct perf_event_header)) {
2080 err = perf_data__read(session->data, p,
2081 size - sizeof(struct perf_event_header));
2082 if (err <= 0) {
2083 if (err == 0) {
2084 pr_err("unexpected end of event stream\n");
2085 goto done;
2086 }
2087
2088 pr_err("failed to read event data\n");
2089 goto out_err;
2090 }
2091 }
2092
2093 if ((skip = perf_session__process_event(session, event, head, "pipe")) < 0) {
2094 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2095 head, event->header.size, event->header.type);
2096 err = -EINVAL;
2097 goto out_err;
2098 }
2099
2100 head += size;
2101
2102 if (skip > 0)
2103 head += skip;
2104
2105 err = __perf_session__process_decomp_events(session);
2106 if (err)
2107 goto out_err;
2108
2109 if (!session_done())
2110 goto more;
2111 done:
2112
2113 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2114 if (err)
2115 goto out_err;
2116 err = auxtrace__flush_events(session, tool);
2117 if (err)
2118 goto out_err;
2119 err = perf_session__flush_thread_stacks(session);
2120 out_err:
2121 free(buf);
2122 if (!tool->no_warn)
2123 perf_session__warn_about_errors(session);
2124 ordered_events__free(&session->ordered_events);
2125 auxtrace__free_events(session);
2126 return err;
2127 }
2128
2129 static union perf_event *
2130 prefetch_event(char *buf, u64 head, size_t mmap_size,
2131 bool needs_swap, union perf_event *error)
2132 {
2133 union perf_event *event;
2134 u16 event_size;
2135
2136
2137
2138
2139
2140 if (head + sizeof(event->header) > mmap_size)
2141 return NULL;
2142
2143 event = (union perf_event *)(buf + head);
2144 if (needs_swap)
2145 perf_event_header__bswap(&event->header);
2146
2147 event_size = event->header.size;
2148 if (head + event_size <= mmap_size)
2149 return event;
2150
2151
2152 if (needs_swap)
2153 perf_event_header__bswap(&event->header);
2154
2155
2156 if (event_size <= mmap_size - head % page_size) {
2157
2158 return NULL;
2159 }
2160
2161
2162 pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:"
2163 " fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size);
2164
2165 return error;
2166 }
2167
2168 static union perf_event *
2169 fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2170 {
2171 return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2172 }
2173
2174 static union perf_event *
2175 fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2176 {
2177 return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2178 }
2179
2180 static int __perf_session__process_decomp_events(struct perf_session *session)
2181 {
2182 s64 skip;
2183 u64 size;
2184 struct decomp *decomp = session->active_decomp->decomp_last;
2185
2186 if (!decomp)
2187 return 0;
2188
2189 while (decomp->head < decomp->size && !session_done()) {
2190 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2191 session->header.needs_swap);
2192
2193 if (!event)
2194 break;
2195
2196 size = event->header.size;
2197
2198 if (size < sizeof(struct perf_event_header) ||
2199 (skip = perf_session__process_event(session, event, decomp->file_pos,
2200 decomp->file_path)) < 0) {
2201 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2202 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2203 return -EINVAL;
2204 }
2205
2206 if (skip)
2207 size += skip;
2208
2209 decomp->head += size;
2210 }
2211
2212 return 0;
2213 }
2214
2215
2216
2217
2218
2219 #if BITS_PER_LONG == 64
2220 #define MMAP_SIZE ULLONG_MAX
2221 #define NUM_MMAPS 1
2222 #else
2223 #define MMAP_SIZE (32 * 1024 * 1024ULL)
2224 #define NUM_MMAPS 128
2225 #endif
2226
2227 struct reader;
2228
2229 typedef s64 (*reader_cb_t)(struct perf_session *session,
2230 union perf_event *event,
2231 u64 file_offset,
2232 const char *file_path);
2233
2234 struct reader {
2235 int fd;
2236 const char *path;
2237 u64 data_size;
2238 u64 data_offset;
2239 reader_cb_t process;
2240 bool in_place_update;
2241 char *mmaps[NUM_MMAPS];
2242 size_t mmap_size;
2243 int mmap_idx;
2244 char *mmap_cur;
2245 u64 file_pos;
2246 u64 file_offset;
2247 u64 head;
2248 u64 size;
2249 bool done;
2250 struct zstd_data zstd_data;
2251 struct decomp_data decomp_data;
2252 };
2253
2254 static int
2255 reader__init(struct reader *rd, bool *one_mmap)
2256 {
2257 u64 data_size = rd->data_size;
2258 char **mmaps = rd->mmaps;
2259
2260 rd->head = rd->data_offset;
2261 data_size += rd->data_offset;
2262
2263 rd->mmap_size = MMAP_SIZE;
2264 if (rd->mmap_size > data_size) {
2265 rd->mmap_size = data_size;
2266 if (one_mmap)
2267 *one_mmap = true;
2268 }
2269
2270 memset(mmaps, 0, sizeof(rd->mmaps));
2271
2272 if (zstd_init(&rd->zstd_data, 0))
2273 return -1;
2274 rd->decomp_data.zstd_decomp = &rd->zstd_data;
2275
2276 return 0;
2277 }
2278
2279 static void
2280 reader__release_decomp(struct reader *rd)
2281 {
2282 perf_decomp__release_events(rd->decomp_data.decomp);
2283 zstd_fini(&rd->zstd_data);
2284 }
2285
2286 static int
2287 reader__mmap(struct reader *rd, struct perf_session *session)
2288 {
2289 int mmap_prot, mmap_flags;
2290 char *buf, **mmaps = rd->mmaps;
2291 u64 page_offset;
2292
2293 mmap_prot = PROT_READ;
2294 mmap_flags = MAP_SHARED;
2295
2296 if (rd->in_place_update) {
2297 mmap_prot |= PROT_WRITE;
2298 } else if (session->header.needs_swap) {
2299 mmap_prot |= PROT_WRITE;
2300 mmap_flags = MAP_PRIVATE;
2301 }
2302
2303 if (mmaps[rd->mmap_idx]) {
2304 munmap(mmaps[rd->mmap_idx], rd->mmap_size);
2305 mmaps[rd->mmap_idx] = NULL;
2306 }
2307
2308 page_offset = page_size * (rd->head / page_size);
2309 rd->file_offset += page_offset;
2310 rd->head -= page_offset;
2311
2312 buf = mmap(NULL, rd->mmap_size, mmap_prot, mmap_flags, rd->fd,
2313 rd->file_offset);
2314 if (buf == MAP_FAILED) {
2315 pr_err("failed to mmap file\n");
2316 return -errno;
2317 }
2318 mmaps[rd->mmap_idx] = rd->mmap_cur = buf;
2319 rd->mmap_idx = (rd->mmap_idx + 1) & (ARRAY_SIZE(rd->mmaps) - 1);
2320 rd->file_pos = rd->file_offset + rd->head;
2321 if (session->one_mmap) {
2322 session->one_mmap_addr = buf;
2323 session->one_mmap_offset = rd->file_offset;
2324 }
2325
2326 return 0;
2327 }
2328
2329 enum {
2330 READER_OK,
2331 READER_NODATA,
2332 };
2333
2334 static int
2335 reader__read_event(struct reader *rd, struct perf_session *session,
2336 struct ui_progress *prog)
2337 {
2338 u64 size;
2339 int err = READER_OK;
2340 union perf_event *event;
2341 s64 skip;
2342
2343 event = fetch_mmaped_event(rd->head, rd->mmap_size, rd->mmap_cur,
2344 session->header.needs_swap);
2345 if (IS_ERR(event))
2346 return PTR_ERR(event);
2347
2348 if (!event)
2349 return READER_NODATA;
2350
2351 size = event->header.size;
2352
2353 skip = -EINVAL;
2354
2355 if (size < sizeof(struct perf_event_header) ||
2356 (skip = rd->process(session, event, rd->file_pos, rd->path)) < 0) {
2357 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2358 rd->file_offset + rd->head, event->header.size,
2359 event->header.type, strerror(-skip));
2360 err = skip;
2361 goto out;
2362 }
2363
2364 if (skip)
2365 size += skip;
2366
2367 rd->size += size;
2368 rd->head += size;
2369 rd->file_pos += size;
2370
2371 err = __perf_session__process_decomp_events(session);
2372 if (err)
2373 goto out;
2374
2375 ui_progress__update(prog, size);
2376
2377 out:
2378 return err;
2379 }
2380
2381 static inline bool
2382 reader__eof(struct reader *rd)
2383 {
2384 return (rd->file_pos >= rd->data_size + rd->data_offset);
2385 }
2386
2387 static int
2388 reader__process_events(struct reader *rd, struct perf_session *session,
2389 struct ui_progress *prog)
2390 {
2391 int err;
2392
2393 err = reader__init(rd, &session->one_mmap);
2394 if (err)
2395 goto out;
2396
2397 session->active_decomp = &rd->decomp_data;
2398
2399 remap:
2400 err = reader__mmap(rd, session);
2401 if (err)
2402 goto out;
2403
2404 more:
2405 err = reader__read_event(rd, session, prog);
2406 if (err < 0)
2407 goto out;
2408 else if (err == READER_NODATA)
2409 goto remap;
2410
2411 if (session_done())
2412 goto out;
2413
2414 if (!reader__eof(rd))
2415 goto more;
2416
2417 out:
2418 session->active_decomp = &session->decomp_data;
2419 return err;
2420 }
2421
2422 static s64 process_simple(struct perf_session *session,
2423 union perf_event *event,
2424 u64 file_offset,
2425 const char *file_path)
2426 {
2427 return perf_session__process_event(session, event, file_offset, file_path);
2428 }
2429
2430 static int __perf_session__process_events(struct perf_session *session)
2431 {
2432 struct reader rd = {
2433 .fd = perf_data__fd(session->data),
2434 .path = session->data->file.path,
2435 .data_size = session->header.data_size,
2436 .data_offset = session->header.data_offset,
2437 .process = process_simple,
2438 .in_place_update = session->data->in_place_update,
2439 };
2440 struct ordered_events *oe = &session->ordered_events;
2441 struct perf_tool *tool = session->tool;
2442 struct ui_progress prog;
2443 int err;
2444
2445 perf_tool__fill_defaults(tool);
2446
2447 if (rd.data_size == 0)
2448 return -1;
2449
2450 ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2451
2452 err = reader__process_events(&rd, session, &prog);
2453 if (err)
2454 goto out_err;
2455
2456 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2457 if (err)
2458 goto out_err;
2459 err = auxtrace__flush_events(session, tool);
2460 if (err)
2461 goto out_err;
2462 err = perf_session__flush_thread_stacks(session);
2463 out_err:
2464 ui_progress__finish();
2465 if (!tool->no_warn)
2466 perf_session__warn_about_errors(session);
2467
2468
2469
2470
2471 ordered_events__reinit(&session->ordered_events);
2472 auxtrace__free_events(session);
2473 reader__release_decomp(&rd);
2474 session->one_mmap = false;
2475 return err;
2476 }
2477
2478
2479
2480
2481
2482
2483 #define READER_MAX_SIZE (2 * 1024 * 1024)
2484
2485
2486
2487
2488
2489
2490 static int __perf_session__process_dir_events(struct perf_session *session)
2491 {
2492 struct perf_data *data = session->data;
2493 struct perf_tool *tool = session->tool;
2494 int i, ret, readers, nr_readers;
2495 struct ui_progress prog;
2496 u64 total_size = perf_data__size(session->data);
2497 struct reader *rd;
2498
2499 perf_tool__fill_defaults(tool);
2500
2501 ui_progress__init_size(&prog, total_size, "Sorting events...");
2502
2503 nr_readers = 1;
2504 for (i = 0; i < data->dir.nr; i++) {
2505 if (data->dir.files[i].size)
2506 nr_readers++;
2507 }
2508
2509 rd = zalloc(nr_readers * sizeof(struct reader));
2510 if (!rd)
2511 return -ENOMEM;
2512
2513 rd[0] = (struct reader) {
2514 .fd = perf_data__fd(session->data),
2515 .path = session->data->file.path,
2516 .data_size = session->header.data_size,
2517 .data_offset = session->header.data_offset,
2518 .process = process_simple,
2519 .in_place_update = session->data->in_place_update,
2520 };
2521 ret = reader__init(&rd[0], NULL);
2522 if (ret)
2523 goto out_err;
2524 ret = reader__mmap(&rd[0], session);
2525 if (ret)
2526 goto out_err;
2527 readers = 1;
2528
2529 for (i = 0; i < data->dir.nr; i++) {
2530 if (!data->dir.files[i].size)
2531 continue;
2532 rd[readers] = (struct reader) {
2533 .fd = data->dir.files[i].fd,
2534 .path = data->dir.files[i].path,
2535 .data_size = data->dir.files[i].size,
2536 .data_offset = 0,
2537 .process = process_simple,
2538 .in_place_update = session->data->in_place_update,
2539 };
2540 ret = reader__init(&rd[readers], NULL);
2541 if (ret)
2542 goto out_err;
2543 ret = reader__mmap(&rd[readers], session);
2544 if (ret)
2545 goto out_err;
2546 readers++;
2547 }
2548
2549 i = 0;
2550 while (readers) {
2551 if (session_done())
2552 break;
2553
2554 if (rd[i].done) {
2555 i = (i + 1) % nr_readers;
2556 continue;
2557 }
2558 if (reader__eof(&rd[i])) {
2559 rd[i].done = true;
2560 readers--;
2561 continue;
2562 }
2563
2564 session->active_decomp = &rd[i].decomp_data;
2565 ret = reader__read_event(&rd[i], session, &prog);
2566 if (ret < 0) {
2567 goto out_err;
2568 } else if (ret == READER_NODATA) {
2569 ret = reader__mmap(&rd[i], session);
2570 if (ret)
2571 goto out_err;
2572 }
2573
2574 if (rd[i].size >= READER_MAX_SIZE) {
2575 rd[i].size = 0;
2576 i = (i + 1) % nr_readers;
2577 }
2578 }
2579
2580 ret = ordered_events__flush(&session->ordered_events, OE_FLUSH__FINAL);
2581 if (ret)
2582 goto out_err;
2583
2584 ret = perf_session__flush_thread_stacks(session);
2585 out_err:
2586 ui_progress__finish();
2587
2588 if (!tool->no_warn)
2589 perf_session__warn_about_errors(session);
2590
2591
2592
2593
2594
2595 ordered_events__reinit(&session->ordered_events);
2596
2597 session->one_mmap = false;
2598
2599 session->active_decomp = &session->decomp_data;
2600 for (i = 0; i < nr_readers; i++)
2601 reader__release_decomp(&rd[i]);
2602 zfree(&rd);
2603
2604 return ret;
2605 }
2606
2607 int perf_session__process_events(struct perf_session *session)
2608 {
2609 if (perf_session__register_idle_thread(session) < 0)
2610 return -ENOMEM;
2611
2612 if (perf_data__is_pipe(session->data))
2613 return __perf_session__process_pipe_events(session);
2614
2615 if (perf_data__is_dir(session->data) && session->data->dir.nr)
2616 return __perf_session__process_dir_events(session);
2617
2618 return __perf_session__process_events(session);
2619 }
2620
2621 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2622 {
2623 struct evsel *evsel;
2624
2625 evlist__for_each_entry(session->evlist, evsel) {
2626 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2627 return true;
2628 }
2629
2630 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2631 return false;
2632 }
2633
2634 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2635 {
2636 char *bracket;
2637 struct ref_reloc_sym *ref;
2638 struct kmap *kmap;
2639
2640 ref = zalloc(sizeof(struct ref_reloc_sym));
2641 if (ref == NULL)
2642 return -ENOMEM;
2643
2644 ref->name = strdup(symbol_name);
2645 if (ref->name == NULL) {
2646 free(ref);
2647 return -ENOMEM;
2648 }
2649
2650 bracket = strchr(ref->name, ']');
2651 if (bracket)
2652 *bracket = '\0';
2653
2654 ref->addr = addr;
2655
2656 kmap = map__kmap(map);
2657 if (kmap)
2658 kmap->ref_reloc_sym = ref;
2659
2660 return 0;
2661 }
2662
2663 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2664 {
2665 return machines__fprintf_dsos(&session->machines, fp);
2666 }
2667
2668 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2669 bool (skip)(struct dso *dso, int parm), int parm)
2670 {
2671 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2672 }
2673
2674 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp,
2675 bool skip_empty)
2676 {
2677 size_t ret;
2678 const char *msg = "";
2679
2680 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2681 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2682
2683 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2684
2685 ret += events_stats__fprintf(&session->evlist->stats, fp, skip_empty);
2686 return ret;
2687 }
2688
2689 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2690 {
2691
2692
2693
2694
2695 return machine__fprintf(&session->machines.host, fp);
2696 }
2697
2698 struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2699 unsigned int type)
2700 {
2701 struct evsel *pos;
2702
2703 evlist__for_each_entry(session->evlist, pos) {
2704 if (pos->core.attr.type == type)
2705 return pos;
2706 }
2707 return NULL;
2708 }
2709
2710 int perf_session__cpu_bitmap(struct perf_session *session,
2711 const char *cpu_list, unsigned long *cpu_bitmap)
2712 {
2713 int i, err = -1;
2714 struct perf_cpu_map *map;
2715 int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
2716
2717 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2718 struct evsel *evsel;
2719
2720 evsel = perf_session__find_first_evtype(session, i);
2721 if (!evsel)
2722 continue;
2723
2724 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2725 pr_err("File does not contain CPU events. "
2726 "Remove -C option to proceed.\n");
2727 return -1;
2728 }
2729 }
2730
2731 map = perf_cpu_map__new(cpu_list);
2732 if (map == NULL) {
2733 pr_err("Invalid cpu_list\n");
2734 return -1;
2735 }
2736
2737 for (i = 0; i < perf_cpu_map__nr(map); i++) {
2738 struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
2739
2740 if (cpu.cpu >= nr_cpus) {
2741 pr_err("Requested CPU %d too large. "
2742 "Consider raising MAX_NR_CPUS\n", cpu.cpu);
2743 goto out_delete_map;
2744 }
2745
2746 set_bit(cpu.cpu, cpu_bitmap);
2747 }
2748
2749 err = 0;
2750
2751 out_delete_map:
2752 perf_cpu_map__put(map);
2753 return err;
2754 }
2755
2756 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2757 bool full)
2758 {
2759 if (session == NULL || fp == NULL)
2760 return;
2761
2762 fprintf(fp, "# ========\n");
2763 perf_header__fprintf_info(session, fp, full);
2764 fprintf(fp, "# ========\n#\n");
2765 }
2766
2767 static int perf_session__register_guest(struct perf_session *session, pid_t machine_pid)
2768 {
2769 struct machine *machine = machines__findnew(&session->machines, machine_pid);
2770 struct thread *thread;
2771
2772 if (!machine)
2773 return -ENOMEM;
2774
2775 machine->single_address_space = session->machines.host.single_address_space;
2776
2777 thread = machine__idle_thread(machine);
2778 if (!thread)
2779 return -ENOMEM;
2780 thread__put(thread);
2781
2782 machine->kallsyms_filename = perf_data__guest_kallsyms_name(session->data, machine_pid);
2783
2784 return 0;
2785 }
2786
2787 static int perf_session__set_guest_cpu(struct perf_session *session, pid_t pid,
2788 pid_t tid, int guest_cpu)
2789 {
2790 struct machine *machine = &session->machines.host;
2791 struct thread *thread = machine__findnew_thread(machine, pid, tid);
2792
2793 if (!thread)
2794 return -ENOMEM;
2795 thread->guest_cpu = guest_cpu;
2796 thread__put(thread);
2797
2798 return 0;
2799 }
2800
2801 int perf_event__process_id_index(struct perf_session *session,
2802 union perf_event *event)
2803 {
2804 struct evlist *evlist = session->evlist;
2805 struct perf_record_id_index *ie = &event->id_index;
2806 size_t sz = ie->header.size - sizeof(*ie);
2807 size_t i, nr, max_nr;
2808 size_t e1_sz = sizeof(struct id_index_entry);
2809 size_t e2_sz = sizeof(struct id_index_entry_2);
2810 size_t etot_sz = e1_sz + e2_sz;
2811 struct id_index_entry_2 *e2;
2812 pid_t last_pid = 0;
2813
2814 max_nr = sz / e1_sz;
2815 nr = ie->nr;
2816 if (nr > max_nr) {
2817 printf("Too big: nr %zu max_nr %zu\n", nr, max_nr);
2818 return -EINVAL;
2819 }
2820
2821 if (sz >= nr * etot_sz) {
2822 max_nr = sz / etot_sz;
2823 if (nr > max_nr) {
2824 printf("Too big2: nr %zu max_nr %zu\n", nr, max_nr);
2825 return -EINVAL;
2826 }
2827 e2 = (void *)ie + sizeof(*ie) + nr * e1_sz;
2828 } else {
2829 e2 = NULL;
2830 }
2831
2832 if (dump_trace)
2833 fprintf(stdout, " nr: %zu\n", nr);
2834
2835 for (i = 0; i < nr; i++, (e2 ? e2++ : 0)) {
2836 struct id_index_entry *e = &ie->entries[i];
2837 struct perf_sample_id *sid;
2838 int ret;
2839
2840 if (dump_trace) {
2841 fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2842 fprintf(stdout, " idx: %"PRI_lu64, e->idx);
2843 fprintf(stdout, " cpu: %"PRI_ld64, e->cpu);
2844 fprintf(stdout, " tid: %"PRI_ld64, e->tid);
2845 if (e2) {
2846 fprintf(stdout, " machine_pid: %"PRI_ld64, e2->machine_pid);
2847 fprintf(stdout, " vcpu: %"PRI_lu64"\n", e2->vcpu);
2848 } else {
2849 fprintf(stdout, "\n");
2850 }
2851 }
2852
2853 sid = evlist__id2sid(evlist, e->id);
2854 if (!sid)
2855 return -ENOENT;
2856
2857 sid->idx = e->idx;
2858 sid->cpu.cpu = e->cpu;
2859 sid->tid = e->tid;
2860
2861 if (!e2)
2862 continue;
2863
2864 sid->machine_pid = e2->machine_pid;
2865 sid->vcpu.cpu = e2->vcpu;
2866
2867 if (!sid->machine_pid)
2868 continue;
2869
2870 if (sid->machine_pid != last_pid) {
2871 ret = perf_session__register_guest(session, sid->machine_pid);
2872 if (ret)
2873 return ret;
2874 last_pid = sid->machine_pid;
2875 perf_guest = true;
2876 }
2877
2878 ret = perf_session__set_guest_cpu(session, sid->machine_pid, e->tid, e2->vcpu);
2879 if (ret)
2880 return ret;
2881 }
2882 return 0;
2883 }