0001
0002
0003
0004
0005
0006
0007 #include <api/fs/fs.h>
0008 #include <linux/bits.h>
0009 #include <linux/bitops.h>
0010 #include <linux/compiler.h>
0011 #include <linux/coresight-pmu.h>
0012 #include <linux/kernel.h>
0013 #include <linux/log2.h>
0014 #include <linux/string.h>
0015 #include <linux/types.h>
0016 #include <linux/zalloc.h>
0017
0018 #include "cs-etm.h"
0019 #include "../../../util/debug.h"
0020 #include "../../../util/record.h"
0021 #include "../../../util/auxtrace.h"
0022 #include "../../../util/cpumap.h"
0023 #include "../../../util/event.h"
0024 #include "../../../util/evlist.h"
0025 #include "../../../util/evsel.h"
0026 #include "../../../util/perf_api_probe.h"
0027 #include "../../../util/evsel_config.h"
0028 #include "../../../util/pmu.h"
0029 #include "../../../util/cs-etm.h"
0030 #include <internal/lib.h> // page_size
0031 #include "../../../util/session.h"
0032
0033 #include <errno.h>
0034 #include <stdlib.h>
0035 #include <sys/stat.h>
0036
0037 struct cs_etm_recording {
0038 struct auxtrace_record itr;
0039 struct perf_pmu *cs_etm_pmu;
0040 struct evlist *evlist;
0041 bool snapshot_mode;
0042 size_t snapshot_size;
0043 };
0044
0045 static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
0046 [CS_ETM_ETMCCER] = "mgmt/etmccer",
0047 [CS_ETM_ETMIDR] = "mgmt/etmidr",
0048 };
0049
0050 static const char * const metadata_etmv4_ro[] = {
0051 [CS_ETMV4_TRCIDR0] = "trcidr/trcidr0",
0052 [CS_ETMV4_TRCIDR1] = "trcidr/trcidr1",
0053 [CS_ETMV4_TRCIDR2] = "trcidr/trcidr2",
0054 [CS_ETMV4_TRCIDR8] = "trcidr/trcidr8",
0055 [CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus",
0056 [CS_ETE_TRCDEVARCH] = "mgmt/trcdevarch"
0057 };
0058
0059 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
0060 static bool cs_etm_is_ete(struct auxtrace_record *itr, int cpu);
0061
0062 static int cs_etm_set_context_id(struct auxtrace_record *itr,
0063 struct evsel *evsel, int cpu)
0064 {
0065 struct cs_etm_recording *ptr;
0066 struct perf_pmu *cs_etm_pmu;
0067 char path[PATH_MAX];
0068 int err = -EINVAL;
0069 u32 val;
0070 u64 contextid;
0071
0072 ptr = container_of(itr, struct cs_etm_recording, itr);
0073 cs_etm_pmu = ptr->cs_etm_pmu;
0074
0075 if (!cs_etm_is_etmv4(itr, cpu))
0076 goto out;
0077
0078
0079 snprintf(path, PATH_MAX, "cpu%d/%s",
0080 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
0081 err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
0082
0083
0084 if (err != 1) {
0085 pr_err("%s: can't read file %s\n",
0086 CORESIGHT_ETM_PMU_NAME, path);
0087 goto out;
0088 }
0089
0090
0091 contextid = evsel->core.attr.config &
0092 (BIT(ETM_OPT_CTXTID) | BIT(ETM_OPT_CTXTID2));
0093
0094
0095
0096
0097
0098
0099
0100
0101 if (!contextid)
0102 contextid = perf_pmu__format_bits(&cs_etm_pmu->format,
0103 "contextid");
0104
0105 if (contextid & BIT(ETM_OPT_CTXTID)) {
0106
0107
0108
0109
0110
0111
0112
0113 val = BMVAL(val, 5, 9);
0114 if (!val || val != 0x4) {
0115 pr_err("%s: CONTEXTIDR_EL1 isn't supported\n",
0116 CORESIGHT_ETM_PMU_NAME);
0117 err = -EINVAL;
0118 goto out;
0119 }
0120 }
0121
0122 if (contextid & BIT(ETM_OPT_CTXTID2)) {
0123
0124
0125
0126
0127
0128
0129
0130 if (!BMVAL(val, 29, 30) || BMVAL(val, 10, 14) < 4) {
0131 pr_err("%s: CONTEXTIDR_EL2 isn't supported\n",
0132 CORESIGHT_ETM_PMU_NAME);
0133 err = -EINVAL;
0134 goto out;
0135 }
0136 }
0137
0138
0139 evsel->core.attr.config |= contextid;
0140 err = 0;
0141
0142 out:
0143 return err;
0144 }
0145
0146 static int cs_etm_set_timestamp(struct auxtrace_record *itr,
0147 struct evsel *evsel, int cpu)
0148 {
0149 struct cs_etm_recording *ptr;
0150 struct perf_pmu *cs_etm_pmu;
0151 char path[PATH_MAX];
0152 int err = -EINVAL;
0153 u32 val;
0154
0155 ptr = container_of(itr, struct cs_etm_recording, itr);
0156 cs_etm_pmu = ptr->cs_etm_pmu;
0157
0158 if (!cs_etm_is_etmv4(itr, cpu))
0159 goto out;
0160
0161
0162 snprintf(path, PATH_MAX, "cpu%d/%s",
0163 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
0164 err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
0165
0166
0167 if (err != 1) {
0168 pr_err("%s: can't read file %s\n",
0169 CORESIGHT_ETM_PMU_NAME, path);
0170 goto out;
0171 }
0172
0173
0174
0175
0176
0177
0178
0179
0180 val &= GENMASK(28, 24);
0181 if (!val) {
0182 err = -EINVAL;
0183 goto out;
0184 }
0185
0186
0187 evsel->core.attr.config |= (1 << ETM_OPT_TS);
0188 err = 0;
0189
0190 out:
0191 return err;
0192 }
0193
0194 #define ETM_SET_OPT_CTXTID (1 << 0)
0195 #define ETM_SET_OPT_TS (1 << 1)
0196 #define ETM_SET_OPT_MASK (ETM_SET_OPT_CTXTID | ETM_SET_OPT_TS)
0197
0198 static int cs_etm_set_option(struct auxtrace_record *itr,
0199 struct evsel *evsel, u32 option)
0200 {
0201 int i, err = -EINVAL;
0202 struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus;
0203 struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
0204
0205
0206 for (i = 0; i < cpu__max_cpu().cpu; i++) {
0207 struct perf_cpu cpu = { .cpu = i, };
0208
0209 if (!perf_cpu_map__has(event_cpus, cpu) ||
0210 !perf_cpu_map__has(online_cpus, cpu))
0211 continue;
0212
0213 if (option & BIT(ETM_OPT_CTXTID)) {
0214 err = cs_etm_set_context_id(itr, evsel, i);
0215 if (err)
0216 goto out;
0217 }
0218 if (option & BIT(ETM_OPT_TS)) {
0219 err = cs_etm_set_timestamp(itr, evsel, i);
0220 if (err)
0221 goto out;
0222 }
0223 if (option & ~(BIT(ETM_OPT_CTXTID) | BIT(ETM_OPT_TS)))
0224
0225 goto out;
0226 }
0227
0228 err = 0;
0229 out:
0230 perf_cpu_map__put(online_cpus);
0231 return err;
0232 }
0233
0234 static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
0235 struct record_opts *opts,
0236 const char *str)
0237 {
0238 struct cs_etm_recording *ptr =
0239 container_of(itr, struct cs_etm_recording, itr);
0240 unsigned long long snapshot_size = 0;
0241 char *endptr;
0242
0243 if (str) {
0244 snapshot_size = strtoull(str, &endptr, 0);
0245 if (*endptr || snapshot_size > SIZE_MAX)
0246 return -1;
0247 }
0248
0249 opts->auxtrace_snapshot_mode = true;
0250 opts->auxtrace_snapshot_size = snapshot_size;
0251 ptr->snapshot_size = snapshot_size;
0252
0253 return 0;
0254 }
0255
0256 static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
0257 struct evsel *evsel)
0258 {
0259 char msg[BUFSIZ], path[PATH_MAX], *sink;
0260 struct evsel_config_term *term;
0261 int ret = -EINVAL;
0262 u32 hash;
0263
0264 if (evsel->core.attr.config2 & GENMASK(31, 0))
0265 return 0;
0266
0267 list_for_each_entry(term, &evsel->config_terms, list) {
0268 if (term->type != EVSEL__CONFIG_TERM_DRV_CFG)
0269 continue;
0270
0271 sink = term->val.str;
0272 snprintf(path, PATH_MAX, "sinks/%s", sink);
0273
0274 ret = perf_pmu__scan_file(pmu, path, "%x", &hash);
0275 if (ret != 1) {
0276 pr_err("failed to set sink \"%s\" on event %s with %d (%s)\n",
0277 sink, evsel__name(evsel), errno,
0278 str_error_r(errno, msg, sizeof(msg)));
0279 return ret;
0280 }
0281
0282 evsel->core.attr.config2 |= hash;
0283 return 0;
0284 }
0285
0286
0287
0288
0289
0290 return 0;
0291 }
0292
0293 static int cs_etm_recording_options(struct auxtrace_record *itr,
0294 struct evlist *evlist,
0295 struct record_opts *opts)
0296 {
0297 int ret;
0298 struct cs_etm_recording *ptr =
0299 container_of(itr, struct cs_etm_recording, itr);
0300 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
0301 struct evsel *evsel, *cs_etm_evsel = NULL;
0302 struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
0303 bool privileged = perf_event_paranoid_check(-1);
0304 int err = 0;
0305
0306 ptr->evlist = evlist;
0307 ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
0308
0309 if (!record_opts__no_switch_events(opts) &&
0310 perf_can_record_switch_events())
0311 opts->record_switch_events = true;
0312
0313 evlist__for_each_entry(evlist, evsel) {
0314 if (evsel->core.attr.type == cs_etm_pmu->type) {
0315 if (cs_etm_evsel) {
0316 pr_err("There may be only one %s event\n",
0317 CORESIGHT_ETM_PMU_NAME);
0318 return -EINVAL;
0319 }
0320 evsel->core.attr.freq = 0;
0321 evsel->core.attr.sample_period = 1;
0322 evsel->needs_auxtrace_mmap = true;
0323 cs_etm_evsel = evsel;
0324 opts->full_auxtrace = true;
0325 }
0326 }
0327
0328
0329 if (!cs_etm_evsel)
0330 return 0;
0331
0332 ret = cs_etm_set_sink_attr(cs_etm_pmu, cs_etm_evsel);
0333 if (ret)
0334 return ret;
0335
0336 if (opts->use_clockid) {
0337 pr_err("Cannot use clockid (-k option) with %s\n",
0338 CORESIGHT_ETM_PMU_NAME);
0339 return -EINVAL;
0340 }
0341
0342
0343 if (opts->auxtrace_snapshot_mode) {
0344
0345
0346
0347
0348 if (!opts->auxtrace_snapshot_size &&
0349 !opts->auxtrace_mmap_pages) {
0350 if (privileged) {
0351 opts->auxtrace_mmap_pages = MiB(4) / page_size;
0352 } else {
0353 opts->auxtrace_mmap_pages =
0354 KiB(128) / page_size;
0355 if (opts->mmap_pages == UINT_MAX)
0356 opts->mmap_pages = KiB(256) / page_size;
0357 }
0358 } else if (!opts->auxtrace_mmap_pages && !privileged &&
0359 opts->mmap_pages == UINT_MAX) {
0360 opts->mmap_pages = KiB(256) / page_size;
0361 }
0362
0363
0364
0365
0366
0367 if (!opts->auxtrace_snapshot_size) {
0368 opts->auxtrace_snapshot_size =
0369 opts->auxtrace_mmap_pages * (size_t)page_size;
0370 }
0371
0372
0373
0374
0375
0376
0377 if (!opts->auxtrace_mmap_pages) {
0378 size_t sz = opts->auxtrace_snapshot_size;
0379
0380 sz = round_up(sz, page_size) / page_size;
0381 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
0382 }
0383
0384
0385 if (opts->auxtrace_snapshot_size >
0386 opts->auxtrace_mmap_pages * (size_t)page_size) {
0387 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
0388 opts->auxtrace_snapshot_size,
0389 opts->auxtrace_mmap_pages * (size_t)page_size);
0390 return -EINVAL;
0391 }
0392
0393
0394 if (!opts->auxtrace_snapshot_size ||
0395 !opts->auxtrace_mmap_pages) {
0396 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
0397 return -EINVAL;
0398 }
0399 }
0400
0401
0402 if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
0403 if (privileged) {
0404 opts->auxtrace_mmap_pages = MiB(4) / page_size;
0405 } else {
0406 opts->auxtrace_mmap_pages = KiB(128) / page_size;
0407 if (opts->mmap_pages == UINT_MAX)
0408 opts->mmap_pages = KiB(256) / page_size;
0409 }
0410
0411 }
0412
0413 if (opts->auxtrace_snapshot_mode)
0414 pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME,
0415 opts->auxtrace_snapshot_size);
0416
0417
0418
0419
0420
0421 evlist__to_front(evlist, cs_etm_evsel);
0422
0423
0424
0425
0426
0427
0428 if (!perf_cpu_map__empty(cpus)) {
0429 evsel__set_sample_bit(cs_etm_evsel, CPU);
0430
0431 err = cs_etm_set_option(itr, cs_etm_evsel,
0432 BIT(ETM_OPT_CTXTID) | BIT(ETM_OPT_TS));
0433 if (err)
0434 goto out;
0435 }
0436
0437
0438 if (opts->full_auxtrace) {
0439 struct evsel *tracking_evsel;
0440
0441 err = parse_event(evlist, "dummy:u");
0442 if (err)
0443 goto out;
0444
0445 tracking_evsel = evlist__last(evlist);
0446 evlist__set_tracking_event(evlist, tracking_evsel);
0447
0448 tracking_evsel->core.attr.freq = 0;
0449 tracking_evsel->core.attr.sample_period = 1;
0450
0451
0452 if (!perf_cpu_map__empty(cpus))
0453 evsel__set_sample_bit(tracking_evsel, TIME);
0454 }
0455
0456 out:
0457 return err;
0458 }
0459
0460 static u64 cs_etm_get_config(struct auxtrace_record *itr)
0461 {
0462 u64 config = 0;
0463 struct cs_etm_recording *ptr =
0464 container_of(itr, struct cs_etm_recording, itr);
0465 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
0466 struct evlist *evlist = ptr->evlist;
0467 struct evsel *evsel;
0468
0469 evlist__for_each_entry(evlist, evsel) {
0470 if (evsel->core.attr.type == cs_etm_pmu->type) {
0471
0472
0473
0474
0475
0476
0477
0478
0479 config = evsel->core.attr.config;
0480 break;
0481 }
0482 }
0483
0484 return config;
0485 }
0486
0487 #ifndef BIT
0488 #define BIT(N) (1UL << (N))
0489 #endif
0490
0491 static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
0492 {
0493 u64 config = 0;
0494 u64 config_opts = 0;
0495
0496
0497
0498
0499
0500
0501
0502 config_opts = cs_etm_get_config(itr);
0503 if (config_opts & BIT(ETM_OPT_CYCACC))
0504 config |= BIT(ETM4_CFG_BIT_CYCACC);
0505 if (config_opts & BIT(ETM_OPT_CTXTID))
0506 config |= BIT(ETM4_CFG_BIT_CTXTID);
0507 if (config_opts & BIT(ETM_OPT_TS))
0508 config |= BIT(ETM4_CFG_BIT_TS);
0509 if (config_opts & BIT(ETM_OPT_RETSTK))
0510 config |= BIT(ETM4_CFG_BIT_RETSTK);
0511 if (config_opts & BIT(ETM_OPT_CTXTID2))
0512 config |= BIT(ETM4_CFG_BIT_VMID) |
0513 BIT(ETM4_CFG_BIT_VMID_OPT);
0514 if (config_opts & BIT(ETM_OPT_BRANCH_BROADCAST))
0515 config |= BIT(ETM4_CFG_BIT_BB);
0516
0517 return config;
0518 }
0519
0520 static size_t
0521 cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
0522 struct evlist *evlist __maybe_unused)
0523 {
0524 int i;
0525 int etmv3 = 0, etmv4 = 0, ete = 0;
0526 struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus;
0527 struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
0528
0529
0530 if (!perf_cpu_map__empty(event_cpus)) {
0531 for (i = 0; i < cpu__max_cpu().cpu; i++) {
0532 struct perf_cpu cpu = { .cpu = i, };
0533
0534 if (!perf_cpu_map__has(event_cpus, cpu) ||
0535 !perf_cpu_map__has(online_cpus, cpu))
0536 continue;
0537
0538 if (cs_etm_is_ete(itr, i))
0539 ete++;
0540 else if (cs_etm_is_etmv4(itr, i))
0541 etmv4++;
0542 else
0543 etmv3++;
0544 }
0545 } else {
0546
0547 for (i = 0; i < cpu__max_cpu().cpu; i++) {
0548 struct perf_cpu cpu = { .cpu = i, };
0549
0550 if (!perf_cpu_map__has(online_cpus, cpu))
0551 continue;
0552
0553 if (cs_etm_is_ete(itr, i))
0554 ete++;
0555 else if (cs_etm_is_etmv4(itr, i))
0556 etmv4++;
0557 else
0558 etmv3++;
0559 }
0560 }
0561
0562 perf_cpu_map__put(online_cpus);
0563
0564 return (CS_ETM_HEADER_SIZE +
0565 (ete * CS_ETE_PRIV_SIZE) +
0566 (etmv4 * CS_ETMV4_PRIV_SIZE) +
0567 (etmv3 * CS_ETMV3_PRIV_SIZE));
0568 }
0569
0570 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu)
0571 {
0572 bool ret = false;
0573 char path[PATH_MAX];
0574 int scan;
0575 unsigned int val;
0576 struct cs_etm_recording *ptr =
0577 container_of(itr, struct cs_etm_recording, itr);
0578 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
0579
0580
0581 snprintf(path, PATH_MAX, "cpu%d/%s",
0582 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
0583 scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
0584
0585
0586 if (scan == 1)
0587 ret = true;
0588
0589 return ret;
0590 }
0591
0592 static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path)
0593 {
0594 char pmu_path[PATH_MAX];
0595 int scan;
0596 unsigned int val = 0;
0597
0598
0599 snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
0600
0601 scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val);
0602 if (scan != 1)
0603 pr_err("%s: error reading: %s\n", __func__, pmu_path);
0604
0605 return val;
0606 }
0607
0608 #define TRCDEVARCH_ARCHPART_SHIFT 0
0609 #define TRCDEVARCH_ARCHPART_MASK GENMASK(11, 0)
0610 #define TRCDEVARCH_ARCHPART(x) (((x) & TRCDEVARCH_ARCHPART_MASK) >> TRCDEVARCH_ARCHPART_SHIFT)
0611
0612 #define TRCDEVARCH_ARCHVER_SHIFT 12
0613 #define TRCDEVARCH_ARCHVER_MASK GENMASK(15, 12)
0614 #define TRCDEVARCH_ARCHVER(x) (((x) & TRCDEVARCH_ARCHVER_MASK) >> TRCDEVARCH_ARCHVER_SHIFT)
0615
0616 static bool cs_etm_is_ete(struct auxtrace_record *itr, int cpu)
0617 {
0618 struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr);
0619 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
0620 int trcdevarch = cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETE_TRCDEVARCH]);
0621
0622
0623
0624
0625
0626 return TRCDEVARCH_ARCHVER(trcdevarch) == 5 && TRCDEVARCH_ARCHPART(trcdevarch) == 0xA13;
0627 }
0628
0629 static void cs_etm_save_etmv4_header(__u64 data[], struct auxtrace_record *itr, int cpu)
0630 {
0631 struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr);
0632 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
0633
0634
0635 data[CS_ETMV4_TRCCONFIGR] = cs_etmv4_get_config(itr);
0636
0637 data[CS_ETMV4_TRCTRACEIDR] = coresight_get_trace_id(cpu);
0638
0639 data[CS_ETMV4_TRCIDR0] = cs_etm_get_ro(cs_etm_pmu, cpu,
0640 metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
0641 data[CS_ETMV4_TRCIDR1] = cs_etm_get_ro(cs_etm_pmu, cpu,
0642 metadata_etmv4_ro[CS_ETMV4_TRCIDR1]);
0643 data[CS_ETMV4_TRCIDR2] = cs_etm_get_ro(cs_etm_pmu, cpu,
0644 metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
0645 data[CS_ETMV4_TRCIDR8] = cs_etm_get_ro(cs_etm_pmu, cpu,
0646 metadata_etmv4_ro[CS_ETMV4_TRCIDR8]);
0647 data[CS_ETMV4_TRCAUTHSTATUS] = cs_etm_get_ro(cs_etm_pmu, cpu,
0648 metadata_etmv4_ro[CS_ETMV4_TRCAUTHSTATUS]);
0649 }
0650
0651 static void cs_etm_get_metadata(int cpu, u32 *offset,
0652 struct auxtrace_record *itr,
0653 struct perf_record_auxtrace_info *info)
0654 {
0655 u32 increment, nr_trc_params;
0656 u64 magic;
0657 struct cs_etm_recording *ptr =
0658 container_of(itr, struct cs_etm_recording, itr);
0659 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
0660
0661
0662 if (cs_etm_is_ete(itr, cpu)) {
0663 magic = __perf_cs_ete_magic;
0664
0665 cs_etm_save_etmv4_header(&info->priv[*offset], itr, cpu);
0666 info->priv[*offset + CS_ETE_TRCDEVARCH] =
0667 cs_etm_get_ro(cs_etm_pmu, cpu,
0668 metadata_etmv4_ro[CS_ETE_TRCDEVARCH]);
0669
0670
0671 increment = CS_ETE_PRIV_MAX;
0672 nr_trc_params = CS_ETE_PRIV_MAX - CS_ETM_COMMON_BLK_MAX_V1;
0673 } else if (cs_etm_is_etmv4(itr, cpu)) {
0674 magic = __perf_cs_etmv4_magic;
0675 cs_etm_save_etmv4_header(&info->priv[*offset], itr, cpu);
0676
0677
0678 increment = CS_ETMV4_PRIV_MAX;
0679 nr_trc_params = CS_ETMV4_PRIV_MAX - CS_ETMV4_TRCCONFIGR;
0680 } else {
0681 magic = __perf_cs_etmv3_magic;
0682
0683 info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr);
0684
0685 info->priv[*offset + CS_ETM_ETMTRACEIDR] =
0686 coresight_get_trace_id(cpu);
0687
0688 info->priv[*offset + CS_ETM_ETMCCER] =
0689 cs_etm_get_ro(cs_etm_pmu, cpu,
0690 metadata_etmv3_ro[CS_ETM_ETMCCER]);
0691 info->priv[*offset + CS_ETM_ETMIDR] =
0692 cs_etm_get_ro(cs_etm_pmu, cpu,
0693 metadata_etmv3_ro[CS_ETM_ETMIDR]);
0694
0695
0696 increment = CS_ETM_PRIV_MAX;
0697 nr_trc_params = CS_ETM_PRIV_MAX - CS_ETM_ETMCR;
0698 }
0699
0700
0701 info->priv[*offset + CS_ETM_MAGIC] = magic;
0702 info->priv[*offset + CS_ETM_CPU] = cpu;
0703 info->priv[*offset + CS_ETM_NR_TRC_PARAMS] = nr_trc_params;
0704
0705 *offset += increment;
0706 }
0707
0708 static int cs_etm_info_fill(struct auxtrace_record *itr,
0709 struct perf_session *session,
0710 struct perf_record_auxtrace_info *info,
0711 size_t priv_size)
0712 {
0713 int i;
0714 u32 offset;
0715 u64 nr_cpu, type;
0716 struct perf_cpu_map *cpu_map;
0717 struct perf_cpu_map *event_cpus = session->evlist->core.user_requested_cpus;
0718 struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
0719 struct cs_etm_recording *ptr =
0720 container_of(itr, struct cs_etm_recording, itr);
0721 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
0722
0723 if (priv_size != cs_etm_info_priv_size(itr, session->evlist))
0724 return -EINVAL;
0725
0726 if (!session->evlist->core.nr_mmaps)
0727 return -EINVAL;
0728
0729
0730 if (perf_cpu_map__empty(event_cpus)) {
0731 cpu_map = online_cpus;
0732 } else {
0733
0734 for (i = 0; i < perf_cpu_map__nr(event_cpus); i++) {
0735 struct perf_cpu cpu = { .cpu = i, };
0736
0737 if (perf_cpu_map__has(event_cpus, cpu) &&
0738 !perf_cpu_map__has(online_cpus, cpu))
0739 return -EINVAL;
0740 }
0741
0742 cpu_map = event_cpus;
0743 }
0744
0745 nr_cpu = perf_cpu_map__nr(cpu_map);
0746
0747 type = cs_etm_pmu->type;
0748
0749
0750 info->type = PERF_AUXTRACE_CS_ETM;
0751 info->priv[CS_HEADER_VERSION] = CS_HEADER_CURRENT_VERSION;
0752 info->priv[CS_PMU_TYPE_CPUS] = type << 32;
0753 info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu;
0754 info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode;
0755
0756 offset = CS_ETM_SNAPSHOT + 1;
0757
0758 for (i = 0; i < cpu__max_cpu().cpu && offset < priv_size; i++) {
0759 struct perf_cpu cpu = { .cpu = i, };
0760
0761 if (perf_cpu_map__has(cpu_map, cpu))
0762 cs_etm_get_metadata(i, &offset, itr, info);
0763 }
0764
0765 perf_cpu_map__put(online_cpus);
0766
0767 return 0;
0768 }
0769
0770 static int cs_etm_snapshot_start(struct auxtrace_record *itr)
0771 {
0772 struct cs_etm_recording *ptr =
0773 container_of(itr, struct cs_etm_recording, itr);
0774 struct evsel *evsel;
0775
0776 evlist__for_each_entry(ptr->evlist, evsel) {
0777 if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
0778 return evsel__disable(evsel);
0779 }
0780 return -EINVAL;
0781 }
0782
0783 static int cs_etm_snapshot_finish(struct auxtrace_record *itr)
0784 {
0785 struct cs_etm_recording *ptr =
0786 container_of(itr, struct cs_etm_recording, itr);
0787 struct evsel *evsel;
0788
0789 evlist__for_each_entry(ptr->evlist, evsel) {
0790 if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
0791 return evsel__enable(evsel);
0792 }
0793 return -EINVAL;
0794 }
0795
0796 static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused)
0797 {
0798 return (((u64) rand() << 0) & 0x00000000FFFFFFFFull) |
0799 (((u64) rand() << 32) & 0xFFFFFFFF00000000ull);
0800 }
0801
0802 static void cs_etm_recording_free(struct auxtrace_record *itr)
0803 {
0804 struct cs_etm_recording *ptr =
0805 container_of(itr, struct cs_etm_recording, itr);
0806
0807 free(ptr);
0808 }
0809
0810 struct auxtrace_record *cs_etm_record_init(int *err)
0811 {
0812 struct perf_pmu *cs_etm_pmu;
0813 struct cs_etm_recording *ptr;
0814
0815 cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
0816
0817 if (!cs_etm_pmu) {
0818 *err = -EINVAL;
0819 goto out;
0820 }
0821
0822 ptr = zalloc(sizeof(struct cs_etm_recording));
0823 if (!ptr) {
0824 *err = -ENOMEM;
0825 goto out;
0826 }
0827
0828 ptr->cs_etm_pmu = cs_etm_pmu;
0829 ptr->itr.pmu = cs_etm_pmu;
0830 ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options;
0831 ptr->itr.recording_options = cs_etm_recording_options;
0832 ptr->itr.info_priv_size = cs_etm_info_priv_size;
0833 ptr->itr.info_fill = cs_etm_info_fill;
0834 ptr->itr.snapshot_start = cs_etm_snapshot_start;
0835 ptr->itr.snapshot_finish = cs_etm_snapshot_finish;
0836 ptr->itr.reference = cs_etm_reference;
0837 ptr->itr.free = cs_etm_recording_free;
0838 ptr->itr.read_finish = auxtrace_record__read_finish;
0839
0840 *err = 0;
0841 return &ptr->itr;
0842 out:
0843 return NULL;
0844 }