0001
0002 #include <stdio.h>
0003 #include "api/fs/fs.h"
0004 #include "util/pmu.h"
0005 #include "util/topdown.h"
0006 #include "util/evlist.h"
0007 #include "util/debug.h"
0008 #include "util/pmu-hybrid.h"
0009 #include "topdown.h"
0010 #include "evsel.h"
0011
0012 #define TOPDOWN_L1_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound}"
0013 #define TOPDOWN_L1_EVENTS_CORE "{slots,cpu_core/topdown-retiring/,cpu_core/topdown-bad-spec/,cpu_core/topdown-fe-bound/,cpu_core/topdown-be-bound/}"
0014 #define TOPDOWN_L2_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound,topdown-heavy-ops,topdown-br-mispredict,topdown-fetch-lat,topdown-mem-bound}"
0015 #define TOPDOWN_L2_EVENTS_CORE "{slots,cpu_core/topdown-retiring/,cpu_core/topdown-bad-spec/,cpu_core/topdown-fe-bound/,cpu_core/topdown-be-bound/,cpu_core/topdown-heavy-ops/,cpu_core/topdown-br-mispredict/,cpu_core/topdown-fetch-lat/,cpu_core/topdown-mem-bound/}"
0016
0017
0018 bool topdown_sys_has_perf_metrics(void)
0019 {
0020 static bool has_perf_metrics;
0021 static bool cached;
0022 struct perf_pmu *pmu;
0023
0024 if (cached)
0025 return has_perf_metrics;
0026
0027
0028
0029
0030
0031
0032
0033 pmu = perf_pmu__find_by_type(PERF_TYPE_RAW);
0034 if (pmu && pmu_have_event(pmu->name, "slots"))
0035 has_perf_metrics = true;
0036
0037 cached = true;
0038 return has_perf_metrics;
0039 }
0040
0041
0042
0043
0044
0045 bool arch_topdown_check_group(bool *warn)
0046 {
0047 int n;
0048
0049 if (sysctl__read_int("kernel/nmi_watchdog", &n) < 0)
0050 return false;
0051 if (n > 0) {
0052 *warn = true;
0053 return false;
0054 }
0055 return true;
0056 }
0057
0058 void arch_topdown_group_warn(void)
0059 {
0060 fprintf(stderr,
0061 "nmi_watchdog enabled with topdown. May give wrong results.\n"
0062 "Disable with echo 0 > /proc/sys/kernel/nmi_watchdog\n");
0063 }
0064
0065 #define TOPDOWN_SLOTS 0x0400
0066
0067
0068
0069
0070
0071
0072
0073
0074 bool arch_topdown_sample_read(struct evsel *leader)
0075 {
0076 if (!evsel__sys_has_perf_metrics(leader))
0077 return false;
0078
0079 if (leader->core.attr.config == TOPDOWN_SLOTS)
0080 return true;
0081
0082 return false;
0083 }
0084
0085 const char *arch_get_topdown_pmu_name(struct evlist *evlist, bool warn)
0086 {
0087 const char *pmu_name;
0088
0089 if (!perf_pmu__has_hybrid())
0090 return "cpu";
0091
0092 if (!evlist->hybrid_pmu_name) {
0093 if (warn)
0094 pr_warning("WARNING: default to use cpu_core topdown events\n");
0095 evlist->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu("core");
0096 }
0097
0098 pmu_name = evlist->hybrid_pmu_name;
0099
0100 return pmu_name;
0101 }
0102
0103 int topdown_parse_events(struct evlist *evlist)
0104 {
0105 const char *topdown_events;
0106 const char *pmu_name;
0107
0108 if (!topdown_sys_has_perf_metrics())
0109 return 0;
0110
0111 pmu_name = arch_get_topdown_pmu_name(evlist, false);
0112
0113 if (pmu_have_event(pmu_name, "topdown-heavy-ops")) {
0114 if (!strcmp(pmu_name, "cpu_core"))
0115 topdown_events = TOPDOWN_L2_EVENTS_CORE;
0116 else
0117 topdown_events = TOPDOWN_L2_EVENTS;
0118 } else {
0119 if (!strcmp(pmu_name, "cpu_core"))
0120 topdown_events = TOPDOWN_L1_EVENTS_CORE;
0121 else
0122 topdown_events = TOPDOWN_L1_EVENTS;
0123 }
0124
0125 return parse_event(evlist, topdown_events);
0126 }