Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 #include <errno.h>
0003 #include <inttypes.h>
0004 #include "cpumap.h"
0005 #include "evlist.h"
0006 #include "evsel.h"
0007 #include "../perf.h"
0008 #include "util/pmu-hybrid.h"
0009 #include "util/evlist-hybrid.h"
0010 #include "debug.h"
0011 #include <unistd.h>
0012 #include <stdlib.h>
0013 #include <linux/err.h>
0014 #include <linux/string.h>
0015 #include <perf/evlist.h>
0016 #include <perf/evsel.h>
0017 #include <perf/cpumap.h>
0018 
0019 int evlist__add_default_hybrid(struct evlist *evlist, bool precise)
0020 {
0021     struct evsel *evsel;
0022     struct perf_pmu *pmu;
0023     __u64 config;
0024     struct perf_cpu_map *cpus;
0025 
0026     perf_pmu__for_each_hybrid_pmu(pmu) {
0027         config = PERF_COUNT_HW_CPU_CYCLES |
0028              ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT);
0029         evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE,
0030                       config);
0031         if (!evsel)
0032             return -ENOMEM;
0033 
0034         cpus = perf_cpu_map__get(pmu->cpus);
0035         evsel->core.cpus = cpus;
0036         evsel->core.own_cpus = perf_cpu_map__get(cpus);
0037         evsel->pmu_name = strdup(pmu->name);
0038         evlist__add(evlist, evsel);
0039     }
0040 
0041     return 0;
0042 }
0043 
0044 static bool group_hybrid_conflict(struct evsel *leader)
0045 {
0046     struct evsel *pos, *prev = NULL;
0047 
0048     for_each_group_evsel(pos, leader) {
0049         if (!evsel__is_hybrid(pos))
0050             continue;
0051 
0052         if (prev && strcmp(prev->pmu_name, pos->pmu_name))
0053             return true;
0054 
0055         prev = pos;
0056     }
0057 
0058     return false;
0059 }
0060 
0061 void evlist__warn_hybrid_group(struct evlist *evlist)
0062 {
0063     struct evsel *evsel;
0064 
0065     evlist__for_each_entry(evlist, evsel) {
0066         if (evsel__is_group_leader(evsel) &&
0067             evsel->core.nr_members > 1 &&
0068             group_hybrid_conflict(evsel)) {
0069             pr_warning("WARNING: events in group from "
0070                    "different hybrid PMUs!\n");
0071             return;
0072         }
0073     }
0074 }
0075 
0076 bool evlist__has_hybrid(struct evlist *evlist)
0077 {
0078     struct evsel *evsel;
0079 
0080     evlist__for_each_entry(evlist, evsel) {
0081         if (evsel->pmu_name &&
0082             perf_pmu__is_hybrid(evsel->pmu_name)) {
0083             return true;
0084         }
0085     }
0086 
0087     return false;
0088 }
0089 
0090 int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list)
0091 {
0092     struct perf_cpu_map *cpus;
0093     struct evsel *evsel, *tmp;
0094     struct perf_pmu *pmu;
0095     int ret, unmatched_count = 0, events_nr = 0;
0096 
0097     if (!perf_pmu__has_hybrid() || !cpu_list)
0098         return 0;
0099 
0100     cpus = perf_cpu_map__new(cpu_list);
0101     if (!cpus)
0102         return -1;
0103 
0104     /*
0105      * The evsels are created with hybrid pmu's cpus. But now we
0106      * need to check and adjust the cpus of evsel by cpu_list because
0107      * cpu_list may cause conflicts with cpus of evsel. For example,
0108      * cpus of evsel is cpu0-7, but the cpu_list is cpu6-8, we need
0109      * to adjust the cpus of evsel to cpu6-7. And then propatate maps
0110      * in evlist__create_maps().
0111      */
0112     evlist__for_each_entry_safe(evlist, tmp, evsel) {
0113         struct perf_cpu_map *matched_cpus, *unmatched_cpus;
0114         char buf1[128], buf2[128];
0115 
0116         pmu = perf_pmu__find_hybrid_pmu(evsel->pmu_name);
0117         if (!pmu)
0118             continue;
0119 
0120         ret = perf_pmu__cpus_match(pmu, cpus, &matched_cpus,
0121                        &unmatched_cpus);
0122         if (ret)
0123             goto out;
0124 
0125         events_nr++;
0126 
0127         if (perf_cpu_map__nr(matched_cpus) > 0 &&
0128             (perf_cpu_map__nr(unmatched_cpus) > 0 ||
0129              perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(cpus) ||
0130              perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(pmu->cpus))) {
0131             perf_cpu_map__put(evsel->core.cpus);
0132             perf_cpu_map__put(evsel->core.own_cpus);
0133             evsel->core.cpus = perf_cpu_map__get(matched_cpus);
0134             evsel->core.own_cpus = perf_cpu_map__get(matched_cpus);
0135 
0136             if (perf_cpu_map__nr(unmatched_cpus) > 0) {
0137                 cpu_map__snprint(matched_cpus, buf1, sizeof(buf1));
0138                 pr_warning("WARNING: use %s in '%s' for '%s', skip other cpus in list.\n",
0139                        buf1, pmu->name, evsel->name);
0140             }
0141         }
0142 
0143         if (perf_cpu_map__nr(matched_cpus) == 0) {
0144             evlist__remove(evlist, evsel);
0145             evsel__delete(evsel);
0146 
0147             cpu_map__snprint(cpus, buf1, sizeof(buf1));
0148             cpu_map__snprint(pmu->cpus, buf2, sizeof(buf2));
0149             pr_warning("WARNING: %s isn't a '%s', please use a CPU list in the '%s' range (%s)\n",
0150                    buf1, pmu->name, pmu->name, buf2);
0151             unmatched_count++;
0152         }
0153 
0154         perf_cpu_map__put(matched_cpus);
0155         perf_cpu_map__put(unmatched_cpus);
0156     }
0157     if (events_nr)
0158         ret = (unmatched_count == events_nr) ? -1 : 0;
0159 out:
0160     perf_cpu_map__put(cpus);
0161     return ret;
0162 }