0001
0002
0003
0004
0005
0006
0007 #include <byteswap.h>
0008 #include <endian.h>
0009 #include <errno.h>
0010 #include <inttypes.h>
0011 #include <linux/bitops.h>
0012 #include <linux/kernel.h>
0013 #include <linux/log2.h>
0014 #include <linux/types.h>
0015 #include <linux/zalloc.h>
0016 #include <stdlib.h>
0017 #include <unistd.h>
0018
0019 #include "auxtrace.h"
0020 #include "color.h"
0021 #include "debug.h"
0022 #include "evlist.h"
0023 #include "evsel.h"
0024 #include "machine.h"
0025 #include "session.h"
0026 #include "symbol.h"
0027 #include "thread.h"
0028 #include "thread-stack.h"
0029 #include "tsc.h"
0030 #include "tool.h"
0031 #include "util/synthetic-events.h"
0032
0033 #include "arm-spe.h"
0034 #include "arm-spe-decoder/arm-spe-decoder.h"
0035 #include "arm-spe-decoder/arm-spe-pkt-decoder.h"
0036
0037 #include "../../arch/arm64/include/asm/cputype.h"
0038 #define MAX_TIMESTAMP (~0ULL)
0039
0040 struct arm_spe {
0041 struct auxtrace auxtrace;
0042 struct auxtrace_queues queues;
0043 struct auxtrace_heap heap;
0044 struct itrace_synth_opts synth_opts;
0045 u32 auxtrace_type;
0046 struct perf_session *session;
0047 struct machine *machine;
0048 u32 pmu_type;
0049 u64 midr;
0050
0051 struct perf_tsc_conversion tc;
0052
0053 u8 timeless_decoding;
0054 u8 data_queued;
0055
0056 u64 sample_type;
0057 u8 sample_flc;
0058 u8 sample_llc;
0059 u8 sample_tlb;
0060 u8 sample_branch;
0061 u8 sample_remote_access;
0062 u8 sample_memory;
0063 u8 sample_instructions;
0064 u64 instructions_sample_period;
0065
0066 u64 l1d_miss_id;
0067 u64 l1d_access_id;
0068 u64 llc_miss_id;
0069 u64 llc_access_id;
0070 u64 tlb_miss_id;
0071 u64 tlb_access_id;
0072 u64 branch_miss_id;
0073 u64 remote_access_id;
0074 u64 memory_id;
0075 u64 instructions_id;
0076
0077 u64 kernel_start;
0078
0079 unsigned long num_events;
0080 u8 use_ctx_pkt_for_pid;
0081 };
0082
0083 struct arm_spe_queue {
0084 struct arm_spe *spe;
0085 unsigned int queue_nr;
0086 struct auxtrace_buffer *buffer;
0087 struct auxtrace_buffer *old_buffer;
0088 union perf_event *event_buf;
0089 bool on_heap;
0090 bool done;
0091 pid_t pid;
0092 pid_t tid;
0093 int cpu;
0094 struct arm_spe_decoder *decoder;
0095 u64 time;
0096 u64 timestamp;
0097 struct thread *thread;
0098 u64 period_instructions;
0099 };
0100
0101 static void arm_spe_dump(struct arm_spe *spe __maybe_unused,
0102 unsigned char *buf, size_t len)
0103 {
0104 struct arm_spe_pkt packet;
0105 size_t pos = 0;
0106 int ret, pkt_len, i;
0107 char desc[ARM_SPE_PKT_DESC_MAX];
0108 const char *color = PERF_COLOR_BLUE;
0109
0110 color_fprintf(stdout, color,
0111 ". ... ARM SPE data: size %#zx bytes\n",
0112 len);
0113
0114 while (len) {
0115 ret = arm_spe_get_packet(buf, len, &packet);
0116 if (ret > 0)
0117 pkt_len = ret;
0118 else
0119 pkt_len = 1;
0120 printf(".");
0121 color_fprintf(stdout, color, " %08x: ", pos);
0122 for (i = 0; i < pkt_len; i++)
0123 color_fprintf(stdout, color, " %02x", buf[i]);
0124 for (; i < 16; i++)
0125 color_fprintf(stdout, color, " ");
0126 if (ret > 0) {
0127 ret = arm_spe_pkt_desc(&packet, desc,
0128 ARM_SPE_PKT_DESC_MAX);
0129 if (!ret)
0130 color_fprintf(stdout, color, " %s\n", desc);
0131 } else {
0132 color_fprintf(stdout, color, " Bad packet!\n");
0133 }
0134 pos += pkt_len;
0135 buf += pkt_len;
0136 len -= pkt_len;
0137 }
0138 }
0139
0140 static void arm_spe_dump_event(struct arm_spe *spe, unsigned char *buf,
0141 size_t len)
0142 {
0143 printf(".\n");
0144 arm_spe_dump(spe, buf, len);
0145 }
0146
0147 static int arm_spe_get_trace(struct arm_spe_buffer *b, void *data)
0148 {
0149 struct arm_spe_queue *speq = data;
0150 struct auxtrace_buffer *buffer = speq->buffer;
0151 struct auxtrace_buffer *old_buffer = speq->old_buffer;
0152 struct auxtrace_queue *queue;
0153
0154 queue = &speq->spe->queues.queue_array[speq->queue_nr];
0155
0156 buffer = auxtrace_buffer__next(queue, buffer);
0157
0158 if (!buffer) {
0159 if (old_buffer)
0160 auxtrace_buffer__drop_data(old_buffer);
0161 b->len = 0;
0162 return 0;
0163 }
0164
0165 speq->buffer = buffer;
0166
0167
0168 if (!buffer->data) {
0169
0170 int fd = perf_data__fd(speq->spe->session->data);
0171
0172 buffer->data = auxtrace_buffer__get_data(buffer, fd);
0173 if (!buffer->data)
0174 return -ENOMEM;
0175 }
0176
0177 b->len = buffer->size;
0178 b->buf = buffer->data;
0179
0180 if (b->len) {
0181 if (old_buffer)
0182 auxtrace_buffer__drop_data(old_buffer);
0183 speq->old_buffer = buffer;
0184 } else {
0185 auxtrace_buffer__drop_data(buffer);
0186 return arm_spe_get_trace(b, data);
0187 }
0188
0189 return 0;
0190 }
0191
0192 static struct arm_spe_queue *arm_spe__alloc_queue(struct arm_spe *spe,
0193 unsigned int queue_nr)
0194 {
0195 struct arm_spe_params params = { .get_trace = 0, };
0196 struct arm_spe_queue *speq;
0197
0198 speq = zalloc(sizeof(*speq));
0199 if (!speq)
0200 return NULL;
0201
0202 speq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
0203 if (!speq->event_buf)
0204 goto out_free;
0205
0206 speq->spe = spe;
0207 speq->queue_nr = queue_nr;
0208 speq->pid = -1;
0209 speq->tid = -1;
0210 speq->cpu = -1;
0211 speq->period_instructions = 0;
0212
0213
0214 params.get_trace = arm_spe_get_trace;
0215 params.data = speq;
0216
0217
0218 speq->decoder = arm_spe_decoder_new(¶ms);
0219 if (!speq->decoder)
0220 goto out_free;
0221
0222 return speq;
0223
0224 out_free:
0225 zfree(&speq->event_buf);
0226 free(speq);
0227
0228 return NULL;
0229 }
0230
0231 static inline u8 arm_spe_cpumode(struct arm_spe *spe, u64 ip)
0232 {
0233 return ip >= spe->kernel_start ?
0234 PERF_RECORD_MISC_KERNEL :
0235 PERF_RECORD_MISC_USER;
0236 }
0237
0238 static void arm_spe_set_pid_tid_cpu(struct arm_spe *spe,
0239 struct auxtrace_queue *queue)
0240 {
0241 struct arm_spe_queue *speq = queue->priv;
0242 pid_t tid;
0243
0244 tid = machine__get_current_tid(spe->machine, speq->cpu);
0245 if (tid != -1) {
0246 speq->tid = tid;
0247 thread__zput(speq->thread);
0248 } else
0249 speq->tid = queue->tid;
0250
0251 if ((!speq->thread) && (speq->tid != -1)) {
0252 speq->thread = machine__find_thread(spe->machine, -1,
0253 speq->tid);
0254 }
0255
0256 if (speq->thread) {
0257 speq->pid = speq->thread->pid_;
0258 if (queue->cpu == -1)
0259 speq->cpu = speq->thread->cpu;
0260 }
0261 }
0262
0263 static int arm_spe_set_tid(struct arm_spe_queue *speq, pid_t tid)
0264 {
0265 struct arm_spe *spe = speq->spe;
0266 int err = machine__set_current_tid(spe->machine, speq->cpu, -1, tid);
0267
0268 if (err)
0269 return err;
0270
0271 arm_spe_set_pid_tid_cpu(spe, &spe->queues.queue_array[speq->queue_nr]);
0272
0273 return 0;
0274 }
0275
0276 static void arm_spe_prep_sample(struct arm_spe *spe,
0277 struct arm_spe_queue *speq,
0278 union perf_event *event,
0279 struct perf_sample *sample)
0280 {
0281 struct arm_spe_record *record = &speq->decoder->record;
0282
0283 if (!spe->timeless_decoding)
0284 sample->time = tsc_to_perf_time(record->timestamp, &spe->tc);
0285
0286 sample->ip = record->from_ip;
0287 sample->cpumode = arm_spe_cpumode(spe, sample->ip);
0288 sample->pid = speq->pid;
0289 sample->tid = speq->tid;
0290 sample->period = 1;
0291 sample->cpu = speq->cpu;
0292
0293 event->sample.header.type = PERF_RECORD_SAMPLE;
0294 event->sample.header.misc = sample->cpumode;
0295 event->sample.header.size = sizeof(struct perf_event_header);
0296 }
0297
0298 static int arm_spe__inject_event(union perf_event *event, struct perf_sample *sample, u64 type)
0299 {
0300 event->header.size = perf_event__sample_event_size(sample, type, 0);
0301 return perf_event__synthesize_sample(event, type, 0, sample);
0302 }
0303
0304 static inline int
0305 arm_spe_deliver_synth_event(struct arm_spe *spe,
0306 struct arm_spe_queue *speq __maybe_unused,
0307 union perf_event *event,
0308 struct perf_sample *sample)
0309 {
0310 int ret;
0311
0312 if (spe->synth_opts.inject) {
0313 ret = arm_spe__inject_event(event, sample, spe->sample_type);
0314 if (ret)
0315 return ret;
0316 }
0317
0318 ret = perf_session__deliver_synth_event(spe->session, event, sample);
0319 if (ret)
0320 pr_err("ARM SPE: failed to deliver event, error %d\n", ret);
0321
0322 return ret;
0323 }
0324
0325 static int arm_spe__synth_mem_sample(struct arm_spe_queue *speq,
0326 u64 spe_events_id, u64 data_src)
0327 {
0328 struct arm_spe *spe = speq->spe;
0329 struct arm_spe_record *record = &speq->decoder->record;
0330 union perf_event *event = speq->event_buf;
0331 struct perf_sample sample = { .ip = 0, };
0332
0333 arm_spe_prep_sample(spe, speq, event, &sample);
0334
0335 sample.id = spe_events_id;
0336 sample.stream_id = spe_events_id;
0337 sample.addr = record->virt_addr;
0338 sample.phys_addr = record->phys_addr;
0339 sample.data_src = data_src;
0340 sample.weight = record->latency;
0341
0342 return arm_spe_deliver_synth_event(spe, speq, event, &sample);
0343 }
0344
0345 static int arm_spe__synth_branch_sample(struct arm_spe_queue *speq,
0346 u64 spe_events_id)
0347 {
0348 struct arm_spe *spe = speq->spe;
0349 struct arm_spe_record *record = &speq->decoder->record;
0350 union perf_event *event = speq->event_buf;
0351 struct perf_sample sample = { .ip = 0, };
0352
0353 arm_spe_prep_sample(spe, speq, event, &sample);
0354
0355 sample.id = spe_events_id;
0356 sample.stream_id = spe_events_id;
0357 sample.addr = record->to_ip;
0358 sample.weight = record->latency;
0359
0360 return arm_spe_deliver_synth_event(spe, speq, event, &sample);
0361 }
0362
0363 static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
0364 u64 spe_events_id, u64 data_src)
0365 {
0366 struct arm_spe *spe = speq->spe;
0367 struct arm_spe_record *record = &speq->decoder->record;
0368 union perf_event *event = speq->event_buf;
0369 struct perf_sample sample = { .ip = 0, };
0370
0371
0372
0373
0374 speq->period_instructions++;
0375 if (speq->period_instructions < spe->instructions_sample_period)
0376 return 0;
0377 speq->period_instructions = 0;
0378
0379 arm_spe_prep_sample(spe, speq, event, &sample);
0380
0381 sample.id = spe_events_id;
0382 sample.stream_id = spe_events_id;
0383 sample.addr = record->virt_addr;
0384 sample.phys_addr = record->phys_addr;
0385 sample.data_src = data_src;
0386 sample.period = spe->instructions_sample_period;
0387 sample.weight = record->latency;
0388
0389 return arm_spe_deliver_synth_event(spe, speq, event, &sample);
0390 }
0391
0392 static const struct midr_range neoverse_spe[] = {
0393 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
0394 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
0395 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
0396 {},
0397 };
0398
0399 static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
0400 union perf_mem_data_src *data_src)
0401 {
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414 if (record->op & ARM_SPE_ST) {
0415 data_src->mem_lvl = PERF_MEM_LVL_NA;
0416 data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA;
0417 data_src->mem_snoop = PERF_MEM_SNOOP_NA;
0418 return;
0419 }
0420
0421 switch (record->source) {
0422 case ARM_SPE_NV_L1D:
0423 data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
0424 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
0425 data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
0426 break;
0427 case ARM_SPE_NV_L2:
0428 data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
0429 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
0430 data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
0431 break;
0432 case ARM_SPE_NV_PEER_CORE:
0433 data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
0434 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
0435 data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
0436 break;
0437
0438
0439
0440
0441 case ARM_SPE_NV_LOCAL_CLUSTER:
0442 case ARM_SPE_NV_PEER_CLUSTER:
0443 data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
0444 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
0445 data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
0446 break;
0447
0448
0449
0450 case ARM_SPE_NV_SYS_CACHE:
0451 data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
0452 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
0453 data_src->mem_snoop = PERF_MEM_SNOOP_HIT;
0454 break;
0455
0456
0457
0458
0459 case ARM_SPE_NV_REMOTE:
0460 data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1;
0461 data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
0462 data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
0463 data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
0464 break;
0465 case ARM_SPE_NV_DRAM:
0466 data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
0467 data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
0468 data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
0469 break;
0470 default:
0471 break;
0472 }
0473 }
0474
0475 static void arm_spe__synth_data_source_generic(const struct arm_spe_record *record,
0476 union perf_mem_data_src *data_src)
0477 {
0478 if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
0479 data_src->mem_lvl = PERF_MEM_LVL_L3;
0480
0481 if (record->type & ARM_SPE_LLC_MISS)
0482 data_src->mem_lvl |= PERF_MEM_LVL_MISS;
0483 else
0484 data_src->mem_lvl |= PERF_MEM_LVL_HIT;
0485 } else if (record->type & (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS)) {
0486 data_src->mem_lvl = PERF_MEM_LVL_L1;
0487
0488 if (record->type & ARM_SPE_L1D_MISS)
0489 data_src->mem_lvl |= PERF_MEM_LVL_MISS;
0490 else
0491 data_src->mem_lvl |= PERF_MEM_LVL_HIT;
0492 }
0493
0494 if (record->type & ARM_SPE_REMOTE_ACCESS)
0495 data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
0496 }
0497
0498 static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr)
0499 {
0500 union perf_mem_data_src data_src = { 0 };
0501 bool is_neoverse = is_midr_in_range_list(midr, neoverse_spe);
0502
0503 if (record->op == ARM_SPE_LD)
0504 data_src.mem_op = PERF_MEM_OP_LOAD;
0505 else if (record->op == ARM_SPE_ST)
0506 data_src.mem_op = PERF_MEM_OP_STORE;
0507 else
0508 return 0;
0509
0510 if (is_neoverse)
0511 arm_spe__synth_data_source_neoverse(record, &data_src);
0512 else
0513 arm_spe__synth_data_source_generic(record, &data_src);
0514
0515 if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
0516 data_src.mem_dtlb = PERF_MEM_TLB_WK;
0517
0518 if (record->type & ARM_SPE_TLB_MISS)
0519 data_src.mem_dtlb |= PERF_MEM_TLB_MISS;
0520 else
0521 data_src.mem_dtlb |= PERF_MEM_TLB_HIT;
0522 }
0523
0524 return data_src.val;
0525 }
0526
0527 static int arm_spe_sample(struct arm_spe_queue *speq)
0528 {
0529 const struct arm_spe_record *record = &speq->decoder->record;
0530 struct arm_spe *spe = speq->spe;
0531 u64 data_src;
0532 int err;
0533
0534 data_src = arm_spe__synth_data_source(record, spe->midr);
0535
0536 if (spe->sample_flc) {
0537 if (record->type & ARM_SPE_L1D_MISS) {
0538 err = arm_spe__synth_mem_sample(speq, spe->l1d_miss_id,
0539 data_src);
0540 if (err)
0541 return err;
0542 }
0543
0544 if (record->type & ARM_SPE_L1D_ACCESS) {
0545 err = arm_spe__synth_mem_sample(speq, spe->l1d_access_id,
0546 data_src);
0547 if (err)
0548 return err;
0549 }
0550 }
0551
0552 if (spe->sample_llc) {
0553 if (record->type & ARM_SPE_LLC_MISS) {
0554 err = arm_spe__synth_mem_sample(speq, spe->llc_miss_id,
0555 data_src);
0556 if (err)
0557 return err;
0558 }
0559
0560 if (record->type & ARM_SPE_LLC_ACCESS) {
0561 err = arm_spe__synth_mem_sample(speq, spe->llc_access_id,
0562 data_src);
0563 if (err)
0564 return err;
0565 }
0566 }
0567
0568 if (spe->sample_tlb) {
0569 if (record->type & ARM_SPE_TLB_MISS) {
0570 err = arm_spe__synth_mem_sample(speq, spe->tlb_miss_id,
0571 data_src);
0572 if (err)
0573 return err;
0574 }
0575
0576 if (record->type & ARM_SPE_TLB_ACCESS) {
0577 err = arm_spe__synth_mem_sample(speq, spe->tlb_access_id,
0578 data_src);
0579 if (err)
0580 return err;
0581 }
0582 }
0583
0584 if (spe->sample_branch && (record->type & ARM_SPE_BRANCH_MISS)) {
0585 err = arm_spe__synth_branch_sample(speq, spe->branch_miss_id);
0586 if (err)
0587 return err;
0588 }
0589
0590 if (spe->sample_remote_access &&
0591 (record->type & ARM_SPE_REMOTE_ACCESS)) {
0592 err = arm_spe__synth_mem_sample(speq, spe->remote_access_id,
0593 data_src);
0594 if (err)
0595 return err;
0596 }
0597
0598
0599
0600
0601
0602 if (spe->sample_memory && data_src) {
0603 err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src);
0604 if (err)
0605 return err;
0606 }
0607
0608 if (spe->sample_instructions) {
0609 err = arm_spe__synth_instruction_sample(speq, spe->instructions_id, data_src);
0610 if (err)
0611 return err;
0612 }
0613
0614 return 0;
0615 }
0616
0617 static int arm_spe_run_decoder(struct arm_spe_queue *speq, u64 *timestamp)
0618 {
0619 struct arm_spe *spe = speq->spe;
0620 struct arm_spe_record *record;
0621 int ret;
0622
0623 if (!spe->kernel_start)
0624 spe->kernel_start = machine__kernel_start(spe->machine);
0625
0626 while (1) {
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650 record = &speq->decoder->record;
0651 if (!spe->timeless_decoding && record->context_id != (u64)-1) {
0652 ret = arm_spe_set_tid(speq, record->context_id);
0653 if (ret)
0654 return ret;
0655
0656 spe->use_ctx_pkt_for_pid = true;
0657 }
0658
0659 ret = arm_spe_sample(speq);
0660 if (ret)
0661 return ret;
0662
0663 ret = arm_spe_decode(speq->decoder);
0664 if (!ret) {
0665 pr_debug("No data or all data has been processed.\n");
0666 return 1;
0667 }
0668
0669
0670
0671
0672
0673 if (ret < 0)
0674 continue;
0675
0676 record = &speq->decoder->record;
0677
0678
0679 if (record->timestamp > speq->timestamp)
0680 speq->timestamp = record->timestamp;
0681
0682
0683
0684
0685
0686
0687 if (!spe->timeless_decoding && speq->timestamp >= *timestamp) {
0688 *timestamp = speq->timestamp;
0689 return 0;
0690 }
0691 }
0692
0693 return 0;
0694 }
0695
0696 static int arm_spe__setup_queue(struct arm_spe *spe,
0697 struct auxtrace_queue *queue,
0698 unsigned int queue_nr)
0699 {
0700 struct arm_spe_queue *speq = queue->priv;
0701 struct arm_spe_record *record;
0702
0703 if (list_empty(&queue->head) || speq)
0704 return 0;
0705
0706 speq = arm_spe__alloc_queue(spe, queue_nr);
0707
0708 if (!speq)
0709 return -ENOMEM;
0710
0711 queue->priv = speq;
0712
0713 if (queue->cpu != -1)
0714 speq->cpu = queue->cpu;
0715
0716 if (!speq->on_heap) {
0717 int ret;
0718
0719 if (spe->timeless_decoding)
0720 return 0;
0721
0722 retry:
0723 ret = arm_spe_decode(speq->decoder);
0724
0725 if (!ret)
0726 return 0;
0727
0728 if (ret < 0)
0729 goto retry;
0730
0731 record = &speq->decoder->record;
0732
0733 speq->timestamp = record->timestamp;
0734 ret = auxtrace_heap__add(&spe->heap, queue_nr, speq->timestamp);
0735 if (ret)
0736 return ret;
0737 speq->on_heap = true;
0738 }
0739
0740 return 0;
0741 }
0742
0743 static int arm_spe__setup_queues(struct arm_spe *spe)
0744 {
0745 unsigned int i;
0746 int ret;
0747
0748 for (i = 0; i < spe->queues.nr_queues; i++) {
0749 ret = arm_spe__setup_queue(spe, &spe->queues.queue_array[i], i);
0750 if (ret)
0751 return ret;
0752 }
0753
0754 return 0;
0755 }
0756
0757 static int arm_spe__update_queues(struct arm_spe *spe)
0758 {
0759 if (spe->queues.new_data) {
0760 spe->queues.new_data = false;
0761 return arm_spe__setup_queues(spe);
0762 }
0763
0764 return 0;
0765 }
0766
0767 static bool arm_spe__is_timeless_decoding(struct arm_spe *spe)
0768 {
0769 struct evsel *evsel;
0770 struct evlist *evlist = spe->session->evlist;
0771 bool timeless_decoding = true;
0772
0773
0774
0775
0776
0777 evlist__for_each_entry(evlist, evsel) {
0778 if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
0779 timeless_decoding = false;
0780 }
0781
0782 return timeless_decoding;
0783 }
0784
0785 static int arm_spe_process_queues(struct arm_spe *spe, u64 timestamp)
0786 {
0787 unsigned int queue_nr;
0788 u64 ts;
0789 int ret;
0790
0791 while (1) {
0792 struct auxtrace_queue *queue;
0793 struct arm_spe_queue *speq;
0794
0795 if (!spe->heap.heap_cnt)
0796 return 0;
0797
0798 if (spe->heap.heap_array[0].ordinal >= timestamp)
0799 return 0;
0800
0801 queue_nr = spe->heap.heap_array[0].queue_nr;
0802 queue = &spe->queues.queue_array[queue_nr];
0803 speq = queue->priv;
0804
0805 auxtrace_heap__pop(&spe->heap);
0806
0807 if (spe->heap.heap_cnt) {
0808 ts = spe->heap.heap_array[0].ordinal + 1;
0809 if (ts > timestamp)
0810 ts = timestamp;
0811 } else {
0812 ts = timestamp;
0813 }
0814
0815
0816
0817
0818
0819 if (!spe->use_ctx_pkt_for_pid)
0820 arm_spe_set_pid_tid_cpu(spe, queue);
0821
0822 ret = arm_spe_run_decoder(speq, &ts);
0823 if (ret < 0) {
0824 auxtrace_heap__add(&spe->heap, queue_nr, ts);
0825 return ret;
0826 }
0827
0828 if (!ret) {
0829 ret = auxtrace_heap__add(&spe->heap, queue_nr, ts);
0830 if (ret < 0)
0831 return ret;
0832 } else {
0833 speq->on_heap = false;
0834 }
0835 }
0836
0837 return 0;
0838 }
0839
0840 static int arm_spe_process_timeless_queues(struct arm_spe *spe, pid_t tid,
0841 u64 time_)
0842 {
0843 struct auxtrace_queues *queues = &spe->queues;
0844 unsigned int i;
0845 u64 ts = 0;
0846
0847 for (i = 0; i < queues->nr_queues; i++) {
0848 struct auxtrace_queue *queue = &spe->queues.queue_array[i];
0849 struct arm_spe_queue *speq = queue->priv;
0850
0851 if (speq && (tid == -1 || speq->tid == tid)) {
0852 speq->time = time_;
0853 arm_spe_set_pid_tid_cpu(spe, queue);
0854 arm_spe_run_decoder(speq, &ts);
0855 }
0856 }
0857 return 0;
0858 }
0859
0860 static int arm_spe_context_switch(struct arm_spe *spe, union perf_event *event,
0861 struct perf_sample *sample)
0862 {
0863 pid_t pid, tid;
0864 int cpu;
0865
0866 if (!(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT))
0867 return 0;
0868
0869 pid = event->context_switch.next_prev_pid;
0870 tid = event->context_switch.next_prev_tid;
0871 cpu = sample->cpu;
0872
0873 if (tid == -1)
0874 pr_warning("context_switch event has no tid\n");
0875
0876 return machine__set_current_tid(spe->machine, cpu, pid, tid);
0877 }
0878
0879 static int arm_spe_process_event(struct perf_session *session,
0880 union perf_event *event,
0881 struct perf_sample *sample,
0882 struct perf_tool *tool)
0883 {
0884 int err = 0;
0885 u64 timestamp;
0886 struct arm_spe *spe = container_of(session->auxtrace,
0887 struct arm_spe, auxtrace);
0888
0889 if (dump_trace)
0890 return 0;
0891
0892 if (!tool->ordered_events) {
0893 pr_err("SPE trace requires ordered events\n");
0894 return -EINVAL;
0895 }
0896
0897 if (sample->time && (sample->time != (u64) -1))
0898 timestamp = perf_time_to_tsc(sample->time, &spe->tc);
0899 else
0900 timestamp = 0;
0901
0902 if (timestamp || spe->timeless_decoding) {
0903 err = arm_spe__update_queues(spe);
0904 if (err)
0905 return err;
0906 }
0907
0908 if (spe->timeless_decoding) {
0909 if (event->header.type == PERF_RECORD_EXIT) {
0910 err = arm_spe_process_timeless_queues(spe,
0911 event->fork.tid,
0912 sample->time);
0913 }
0914 } else if (timestamp) {
0915 err = arm_spe_process_queues(spe, timestamp);
0916 if (err)
0917 return err;
0918
0919 if (!spe->use_ctx_pkt_for_pid &&
0920 (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE ||
0921 event->header.type == PERF_RECORD_SWITCH))
0922 err = arm_spe_context_switch(spe, event, sample);
0923 }
0924
0925 return err;
0926 }
0927
0928 static int arm_spe_process_auxtrace_event(struct perf_session *session,
0929 union perf_event *event,
0930 struct perf_tool *tool __maybe_unused)
0931 {
0932 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
0933 auxtrace);
0934
0935 if (!spe->data_queued) {
0936 struct auxtrace_buffer *buffer;
0937 off_t data_offset;
0938 int fd = perf_data__fd(session->data);
0939 int err;
0940
0941 if (perf_data__is_pipe(session->data)) {
0942 data_offset = 0;
0943 } else {
0944 data_offset = lseek(fd, 0, SEEK_CUR);
0945 if (data_offset == -1)
0946 return -errno;
0947 }
0948
0949 err = auxtrace_queues__add_event(&spe->queues, session, event,
0950 data_offset, &buffer);
0951 if (err)
0952 return err;
0953
0954
0955 if (dump_trace) {
0956 if (auxtrace_buffer__get_data(buffer, fd)) {
0957 arm_spe_dump_event(spe, buffer->data,
0958 buffer->size);
0959 auxtrace_buffer__put_data(buffer);
0960 }
0961 }
0962 }
0963
0964 return 0;
0965 }
0966
0967 static int arm_spe_flush(struct perf_session *session __maybe_unused,
0968 struct perf_tool *tool __maybe_unused)
0969 {
0970 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
0971 auxtrace);
0972 int ret;
0973
0974 if (dump_trace)
0975 return 0;
0976
0977 if (!tool->ordered_events)
0978 return -EINVAL;
0979
0980 ret = arm_spe__update_queues(spe);
0981 if (ret < 0)
0982 return ret;
0983
0984 if (spe->timeless_decoding)
0985 return arm_spe_process_timeless_queues(spe, -1,
0986 MAX_TIMESTAMP - 1);
0987
0988 ret = arm_spe_process_queues(spe, MAX_TIMESTAMP);
0989 if (ret)
0990 return ret;
0991
0992 if (!spe->use_ctx_pkt_for_pid)
0993 ui__warning("Arm SPE CONTEXT packets not found in the traces.\n"
0994 "Matching of TIDs to SPE events could be inaccurate.\n");
0995
0996 return 0;
0997 }
0998
0999 static void arm_spe_free_queue(void *priv)
1000 {
1001 struct arm_spe_queue *speq = priv;
1002
1003 if (!speq)
1004 return;
1005 thread__zput(speq->thread);
1006 arm_spe_decoder_free(speq->decoder);
1007 zfree(&speq->event_buf);
1008 free(speq);
1009 }
1010
1011 static void arm_spe_free_events(struct perf_session *session)
1012 {
1013 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
1014 auxtrace);
1015 struct auxtrace_queues *queues = &spe->queues;
1016 unsigned int i;
1017
1018 for (i = 0; i < queues->nr_queues; i++) {
1019 arm_spe_free_queue(queues->queue_array[i].priv);
1020 queues->queue_array[i].priv = NULL;
1021 }
1022 auxtrace_queues__free(queues);
1023 }
1024
1025 static void arm_spe_free(struct perf_session *session)
1026 {
1027 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
1028 auxtrace);
1029
1030 auxtrace_heap__free(&spe->heap);
1031 arm_spe_free_events(session);
1032 session->auxtrace = NULL;
1033 free(spe);
1034 }
1035
1036 static bool arm_spe_evsel_is_auxtrace(struct perf_session *session,
1037 struct evsel *evsel)
1038 {
1039 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe, auxtrace);
1040
1041 return evsel->core.attr.type == spe->pmu_type;
1042 }
1043
1044 static const char * const arm_spe_info_fmts[] = {
1045 [ARM_SPE_PMU_TYPE] = " PMU Type %"PRId64"\n",
1046 };
1047
1048 static void arm_spe_print_info(__u64 *arr)
1049 {
1050 if (!dump_trace)
1051 return;
1052
1053 fprintf(stdout, arm_spe_info_fmts[ARM_SPE_PMU_TYPE], arr[ARM_SPE_PMU_TYPE]);
1054 }
1055
1056 struct arm_spe_synth {
1057 struct perf_tool dummy_tool;
1058 struct perf_session *session;
1059 };
1060
1061 static int arm_spe_event_synth(struct perf_tool *tool,
1062 union perf_event *event,
1063 struct perf_sample *sample __maybe_unused,
1064 struct machine *machine __maybe_unused)
1065 {
1066 struct arm_spe_synth *arm_spe_synth =
1067 container_of(tool, struct arm_spe_synth, dummy_tool);
1068
1069 return perf_session__deliver_synth_event(arm_spe_synth->session,
1070 event, NULL);
1071 }
1072
1073 static int arm_spe_synth_event(struct perf_session *session,
1074 struct perf_event_attr *attr, u64 id)
1075 {
1076 struct arm_spe_synth arm_spe_synth;
1077
1078 memset(&arm_spe_synth, 0, sizeof(struct arm_spe_synth));
1079 arm_spe_synth.session = session;
1080
1081 return perf_event__synthesize_attr(&arm_spe_synth.dummy_tool, attr, 1,
1082 &id, arm_spe_event_synth);
1083 }
1084
1085 static void arm_spe_set_event_name(struct evlist *evlist, u64 id,
1086 const char *name)
1087 {
1088 struct evsel *evsel;
1089
1090 evlist__for_each_entry(evlist, evsel) {
1091 if (evsel->core.id && evsel->core.id[0] == id) {
1092 if (evsel->name)
1093 zfree(&evsel->name);
1094 evsel->name = strdup(name);
1095 break;
1096 }
1097 }
1098 }
1099
1100 static int
1101 arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
1102 {
1103 struct evlist *evlist = session->evlist;
1104 struct evsel *evsel;
1105 struct perf_event_attr attr;
1106 bool found = false;
1107 u64 id;
1108 int err;
1109
1110 evlist__for_each_entry(evlist, evsel) {
1111 if (evsel->core.attr.type == spe->pmu_type) {
1112 found = true;
1113 break;
1114 }
1115 }
1116
1117 if (!found) {
1118 pr_debug("No selected events with SPE trace data\n");
1119 return 0;
1120 }
1121
1122 memset(&attr, 0, sizeof(struct perf_event_attr));
1123 attr.size = sizeof(struct perf_event_attr);
1124 attr.type = PERF_TYPE_HARDWARE;
1125 attr.sample_type = evsel->core.attr.sample_type &
1126 (PERF_SAMPLE_MASK | PERF_SAMPLE_PHYS_ADDR);
1127 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
1128 PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC |
1129 PERF_SAMPLE_WEIGHT | PERF_SAMPLE_ADDR;
1130 if (spe->timeless_decoding)
1131 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
1132 else
1133 attr.sample_type |= PERF_SAMPLE_TIME;
1134
1135 spe->sample_type = attr.sample_type;
1136
1137 attr.exclude_user = evsel->core.attr.exclude_user;
1138 attr.exclude_kernel = evsel->core.attr.exclude_kernel;
1139 attr.exclude_hv = evsel->core.attr.exclude_hv;
1140 attr.exclude_host = evsel->core.attr.exclude_host;
1141 attr.exclude_guest = evsel->core.attr.exclude_guest;
1142 attr.sample_id_all = evsel->core.attr.sample_id_all;
1143 attr.read_format = evsel->core.attr.read_format;
1144
1145
1146 id = evsel->core.id[0] + 1000000000;
1147
1148 if (!id)
1149 id = 1;
1150
1151 if (spe->synth_opts.flc) {
1152 spe->sample_flc = true;
1153
1154
1155 err = arm_spe_synth_event(session, &attr, id);
1156 if (err)
1157 return err;
1158 spe->l1d_miss_id = id;
1159 arm_spe_set_event_name(evlist, id, "l1d-miss");
1160 id += 1;
1161
1162
1163 err = arm_spe_synth_event(session, &attr, id);
1164 if (err)
1165 return err;
1166 spe->l1d_access_id = id;
1167 arm_spe_set_event_name(evlist, id, "l1d-access");
1168 id += 1;
1169 }
1170
1171 if (spe->synth_opts.llc) {
1172 spe->sample_llc = true;
1173
1174
1175 err = arm_spe_synth_event(session, &attr, id);
1176 if (err)
1177 return err;
1178 spe->llc_miss_id = id;
1179 arm_spe_set_event_name(evlist, id, "llc-miss");
1180 id += 1;
1181
1182
1183 err = arm_spe_synth_event(session, &attr, id);
1184 if (err)
1185 return err;
1186 spe->llc_access_id = id;
1187 arm_spe_set_event_name(evlist, id, "llc-access");
1188 id += 1;
1189 }
1190
1191 if (spe->synth_opts.tlb) {
1192 spe->sample_tlb = true;
1193
1194
1195 err = arm_spe_synth_event(session, &attr, id);
1196 if (err)
1197 return err;
1198 spe->tlb_miss_id = id;
1199 arm_spe_set_event_name(evlist, id, "tlb-miss");
1200 id += 1;
1201
1202
1203 err = arm_spe_synth_event(session, &attr, id);
1204 if (err)
1205 return err;
1206 spe->tlb_access_id = id;
1207 arm_spe_set_event_name(evlist, id, "tlb-access");
1208 id += 1;
1209 }
1210
1211 if (spe->synth_opts.branches) {
1212 spe->sample_branch = true;
1213
1214
1215 err = arm_spe_synth_event(session, &attr, id);
1216 if (err)
1217 return err;
1218 spe->branch_miss_id = id;
1219 arm_spe_set_event_name(evlist, id, "branch-miss");
1220 id += 1;
1221 }
1222
1223 if (spe->synth_opts.remote_access) {
1224 spe->sample_remote_access = true;
1225
1226
1227 err = arm_spe_synth_event(session, &attr, id);
1228 if (err)
1229 return err;
1230 spe->remote_access_id = id;
1231 arm_spe_set_event_name(evlist, id, "remote-access");
1232 id += 1;
1233 }
1234
1235 if (spe->synth_opts.mem) {
1236 spe->sample_memory = true;
1237
1238 err = arm_spe_synth_event(session, &attr, id);
1239 if (err)
1240 return err;
1241 spe->memory_id = id;
1242 arm_spe_set_event_name(evlist, id, "memory");
1243 id += 1;
1244 }
1245
1246 if (spe->synth_opts.instructions) {
1247 if (spe->synth_opts.period_type != PERF_ITRACE_PERIOD_INSTRUCTIONS) {
1248 pr_warning("Only instruction-based sampling period is currently supported by Arm SPE.\n");
1249 goto synth_instructions_out;
1250 }
1251 if (spe->synth_opts.period > 1)
1252 pr_warning("Arm SPE has a hardware-based sample period.\n"
1253 "Additional instruction events will be discarded by --itrace\n");
1254
1255 spe->sample_instructions = true;
1256 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1257 attr.sample_period = spe->synth_opts.period;
1258 spe->instructions_sample_period = attr.sample_period;
1259 err = arm_spe_synth_event(session, &attr, id);
1260 if (err)
1261 return err;
1262 spe->instructions_id = id;
1263 arm_spe_set_event_name(evlist, id, "instructions");
1264 }
1265 synth_instructions_out:
1266
1267 return 0;
1268 }
1269
1270 int arm_spe_process_auxtrace_info(union perf_event *event,
1271 struct perf_session *session)
1272 {
1273 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
1274 size_t min_sz = sizeof(u64) * ARM_SPE_AUXTRACE_PRIV_MAX;
1275 struct perf_record_time_conv *tc = &session->time_conv;
1276 const char *cpuid = perf_env__cpuid(session->evlist->env);
1277 u64 midr = strtol(cpuid, NULL, 16);
1278 struct arm_spe *spe;
1279 int err;
1280
1281 if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
1282 min_sz)
1283 return -EINVAL;
1284
1285 spe = zalloc(sizeof(struct arm_spe));
1286 if (!spe)
1287 return -ENOMEM;
1288
1289 err = auxtrace_queues__init(&spe->queues);
1290 if (err)
1291 goto err_free;
1292
1293 spe->session = session;
1294 spe->machine = &session->machines.host;
1295 spe->auxtrace_type = auxtrace_info->type;
1296 spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE];
1297 spe->midr = midr;
1298
1299 spe->timeless_decoding = arm_spe__is_timeless_decoding(spe);
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311 spe->tc.time_shift = tc->time_shift;
1312 spe->tc.time_mult = tc->time_mult;
1313 spe->tc.time_zero = tc->time_zero;
1314
1315 if (event_contains(*tc, time_cycles)) {
1316 spe->tc.time_cycles = tc->time_cycles;
1317 spe->tc.time_mask = tc->time_mask;
1318 spe->tc.cap_user_time_zero = tc->cap_user_time_zero;
1319 spe->tc.cap_user_time_short = tc->cap_user_time_short;
1320 }
1321
1322 spe->auxtrace.process_event = arm_spe_process_event;
1323 spe->auxtrace.process_auxtrace_event = arm_spe_process_auxtrace_event;
1324 spe->auxtrace.flush_events = arm_spe_flush;
1325 spe->auxtrace.free_events = arm_spe_free_events;
1326 spe->auxtrace.free = arm_spe_free;
1327 spe->auxtrace.evsel_is_auxtrace = arm_spe_evsel_is_auxtrace;
1328 session->auxtrace = &spe->auxtrace;
1329
1330 arm_spe_print_info(&auxtrace_info->priv[0]);
1331
1332 if (dump_trace)
1333 return 0;
1334
1335 if (session->itrace_synth_opts && session->itrace_synth_opts->set)
1336 spe->synth_opts = *session->itrace_synth_opts;
1337 else
1338 itrace_synth_opts__set_default(&spe->synth_opts, false);
1339
1340 err = arm_spe_synth_events(spe, session);
1341 if (err)
1342 goto err_free_queues;
1343
1344 err = auxtrace_queues__process_index(&spe->queues, session);
1345 if (err)
1346 goto err_free_queues;
1347
1348 if (spe->queues.populated)
1349 spe->data_queued = true;
1350
1351 return 0;
1352
1353 err_free_queues:
1354 auxtrace_queues__free(&spe->queues);
1355 session->auxtrace = NULL;
1356 err_free:
1357 free(spe);
1358 return err;
1359 }