Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright(C) 2015 Linaro Limited. All rights reserved.
0004  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
0005  */
0006 
0007 #include <stdbool.h>
0008 #include <linux/coresight-pmu.h>
0009 #include <linux/zalloc.h>
0010 
0011 #include "../../../util/auxtrace.h"
0012 #include "../../../util/debug.h"
0013 #include "../../../util/evlist.h"
0014 #include "../../../util/pmu.h"
0015 #include "cs-etm.h"
0016 #include "arm-spe.h"
0017 
0018 static struct perf_pmu **find_all_arm_spe_pmus(int *nr_spes, int *err)
0019 {
0020     struct perf_pmu **arm_spe_pmus = NULL;
0021     int ret, i, nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
0022     /* arm_spe_xxxxxxxxx\0 */
0023     char arm_spe_pmu_name[sizeof(ARM_SPE_PMU_NAME) + 10];
0024 
0025     arm_spe_pmus = zalloc(sizeof(struct perf_pmu *) * nr_cpus);
0026     if (!arm_spe_pmus) {
0027         pr_err("spes alloc failed\n");
0028         *err = -ENOMEM;
0029         return NULL;
0030     }
0031 
0032     for (i = 0; i < nr_cpus; i++) {
0033         ret = sprintf(arm_spe_pmu_name, "%s%d", ARM_SPE_PMU_NAME, i);
0034         if (ret < 0) {
0035             pr_err("sprintf failed\n");
0036             *err = -ENOMEM;
0037             return NULL;
0038         }
0039 
0040         arm_spe_pmus[*nr_spes] = perf_pmu__find(arm_spe_pmu_name);
0041         if (arm_spe_pmus[*nr_spes]) {
0042             pr_debug2("%s %d: arm_spe_pmu %d type %d name %s\n",
0043                  __func__, __LINE__, *nr_spes,
0044                  arm_spe_pmus[*nr_spes]->type,
0045                  arm_spe_pmus[*nr_spes]->name);
0046             (*nr_spes)++;
0047         }
0048     }
0049 
0050     return arm_spe_pmus;
0051 }
0052 
0053 struct auxtrace_record
0054 *auxtrace_record__init(struct evlist *evlist, int *err)
0055 {
0056     struct perf_pmu *cs_etm_pmu;
0057     struct evsel *evsel;
0058     bool found_etm = false;
0059     struct perf_pmu *found_spe = NULL;
0060     struct perf_pmu **arm_spe_pmus = NULL;
0061     int nr_spes = 0;
0062     int i = 0;
0063 
0064     if (!evlist)
0065         return NULL;
0066 
0067     cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
0068     arm_spe_pmus = find_all_arm_spe_pmus(&nr_spes, err);
0069 
0070     evlist__for_each_entry(evlist, evsel) {
0071         if (cs_etm_pmu &&
0072             evsel->core.attr.type == cs_etm_pmu->type)
0073             found_etm = true;
0074 
0075         if (!nr_spes || found_spe)
0076             continue;
0077 
0078         for (i = 0; i < nr_spes; i++) {
0079             if (evsel->core.attr.type == arm_spe_pmus[i]->type) {
0080                 found_spe = arm_spe_pmus[i];
0081                 break;
0082             }
0083         }
0084     }
0085     free(arm_spe_pmus);
0086 
0087     if (found_etm && found_spe) {
0088         pr_err("Concurrent ARM Coresight ETM and SPE operation not currently supported\n");
0089         *err = -EOPNOTSUPP;
0090         return NULL;
0091     }
0092 
0093     if (found_etm)
0094         return cs_etm_record_init(err);
0095 
0096 #if defined(__aarch64__)
0097     if (found_spe)
0098         return arm_spe_recording_init(err, found_spe);
0099 #endif
0100 
0101     /*
0102      * Clear 'err' even if we haven't found an event - that way perf
0103      * record can still be used even if tracers aren't present.  The NULL
0104      * return value will take care of telling the infrastructure HW tracing
0105      * isn't available.
0106      */
0107     *err = 0;
0108     return NULL;
0109 }
0110 
0111 #if defined(__arm__)
0112 u64 compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
0113 {
0114     struct perf_event_mmap_page *pc = mm->userpg;
0115     u64 result;
0116 
0117     __asm__ __volatile__(
0118 "   ldrd    %0, %H0, [%1]"
0119     : "=&r" (result)
0120     : "r" (&pc->aux_head), "Qo" (pc->aux_head)
0121     );
0122 
0123     return result;
0124 }
0125 
0126 int compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
0127 {
0128     struct perf_event_mmap_page *pc = mm->userpg;
0129 
0130     /* Ensure all reads are done before we write the tail out */
0131     smp_mb();
0132 
0133     __asm__ __volatile__(
0134 "   strd    %2, %H2, [%1]"
0135     : "=Qo" (pc->aux_tail)
0136     : "r" (&pc->aux_tail), "r" (tail)
0137     );
0138 
0139     return 0;
0140 }
0141 #endif