Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __PERF_EVSEL_H
0003 #define __PERF_EVSEL_H 1
0004 
0005 #include <linux/list.h>
0006 #include <stdbool.h>
0007 #include <sys/types.h>
0008 #include <linux/perf_event.h>
0009 #include <linux/types.h>
0010 #include <internal/evsel.h>
0011 #include <perf/evsel.h>
0012 #include "symbol_conf.h"
0013 #include <internal/cpumap.h>
0014 #include <perf/cpumap.h>
0015 
0016 struct bpf_object;
0017 struct cgroup;
0018 struct perf_counts;
0019 struct perf_stat_evsel;
0020 union perf_event;
0021 struct bpf_counter_ops;
0022 struct target;
0023 struct hashmap;
0024 struct bperf_leader_bpf;
0025 struct bperf_follower_bpf;
0026 struct perf_pmu;
0027 
0028 typedef int (evsel__sb_cb_t)(union perf_event *event, void *data);
0029 
0030 enum perf_tool_event {
0031     PERF_TOOL_NONE      = 0,
0032     PERF_TOOL_DURATION_TIME = 1,
0033     PERF_TOOL_USER_TIME = 2,
0034     PERF_TOOL_SYSTEM_TIME = 3,
0035 
0036     PERF_TOOL_MAX,
0037 };
0038 
0039 const char *perf_tool_event__to_str(enum perf_tool_event ev);
0040 enum perf_tool_event perf_tool_event__from_str(const char *str);
0041 
0042 #define perf_tool_event__for_each_event(ev)     \
0043     for ((ev) = PERF_TOOL_DURATION_TIME; (ev) < PERF_TOOL_MAX; ev++)
0044 
0045 /** struct evsel - event selector
0046  *
0047  * @evlist - evlist this evsel is in, if it is in one.
0048  * @core - libperf evsel object
0049  * @name - Can be set to retain the original event name passed by the user,
0050  *         so that when showing results in tools such as 'perf stat', we
0051  *         show the name used, not some alias.
0052  * @id_pos: the position of the event id (PERF_SAMPLE_ID or
0053  *          PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of
0054  *          struct perf_record_sample
0055  * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or
0056  *          PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
0057  *          is used there is an id sample appended to non-sample events
0058  * @priv:   And what is in its containing unnamed union are tool specific
0059  */
0060 struct evsel {
0061     struct perf_evsel   core;
0062     struct evlist       *evlist;
0063     off_t           id_offset;
0064     int         id_pos;
0065     int         is_pos;
0066     unsigned int        sample_size;
0067 
0068     /*
0069      * These fields can be set in the parse-events code or similar.
0070      * Please check evsel__clone() to copy them properly so that
0071      * they can be released properly.
0072      */
0073     struct {
0074         char            *name;
0075         char            *group_name;
0076         const char      *pmu_name;
0077         struct tep_event    *tp_format;
0078         char            *filter;
0079         unsigned long       max_events;
0080         double          scale;
0081         const char      *unit;
0082         struct cgroup       *cgrp;
0083         const char      *metric_id;
0084         enum perf_tool_event    tool_event;
0085         /* parse modifier helper */
0086         int         exclude_GH;
0087         int         sample_read;
0088         bool            snapshot;
0089         bool            per_pkg;
0090         bool            percore;
0091         bool            precise_max;
0092         bool            use_uncore_alias;
0093         bool            is_libpfm_event;
0094         bool            auto_merge_stats;
0095         bool            collect_stat;
0096         bool            weak_group;
0097         bool            bpf_counter;
0098         bool            use_config_name;
0099         int         bpf_fd;
0100         struct bpf_object   *bpf_obj;
0101         struct list_head    config_terms;
0102     };
0103 
0104     /*
0105      * metric fields are similar, but needs more care as they can have
0106      * references to other metric (evsel).
0107      */
0108     const char *        metric_expr;
0109     const char *        metric_name;
0110     struct evsel        **metric_events;
0111     struct evsel        *metric_leader;
0112 
0113     void            *handler;
0114     struct perf_counts  *counts;
0115     struct perf_counts  *prev_raw_counts;
0116     unsigned long       nr_events_printed;
0117     struct perf_stat_evsel  *stats;
0118     void            *priv;
0119     u64         db_id;
0120     bool            uniquified_name;
0121     bool            supported;
0122     bool            needs_swap;
0123     bool            disabled;
0124     bool            no_aux_samples;
0125     bool            immediate;
0126     bool            tracking;
0127     bool            ignore_missing_thread;
0128     bool            forced_leader;
0129     bool            cmdline_group_boundary;
0130     bool            merged_stat;
0131     bool            reset_group;
0132     bool            errored;
0133     bool            needs_auxtrace_mmap;
0134     struct hashmap      *per_pkg_mask;
0135     int         err;
0136     struct {
0137         evsel__sb_cb_t  *cb;
0138         void        *data;
0139     } side_band;
0140     /*
0141      * For reporting purposes, an evsel sample can have a callchain
0142      * synthesized from AUX area data. Keep track of synthesized sample
0143      * types here. Note, the recorded sample_type cannot be changed because
0144      * it is needed to continue to parse events.
0145      * See also evsel__has_callchain().
0146      */
0147     __u64           synth_sample_type;
0148 
0149     /*
0150      * bpf_counter_ops serves two use cases:
0151      *   1. perf-stat -b          counting events used byBPF programs
0152      *   2. perf-stat --use-bpf   use BPF programs to aggregate counts
0153      */
0154     struct bpf_counter_ops  *bpf_counter_ops;
0155 
0156     /* for perf-stat -b */
0157     struct list_head    bpf_counter_list;
0158 
0159     /* for perf-stat --use-bpf */
0160     int         bperf_leader_prog_fd;
0161     int         bperf_leader_link_fd;
0162     union {
0163         struct bperf_leader_bpf *leader_skel;
0164         struct bperf_follower_bpf *follower_skel;
0165     };
0166     unsigned long       open_flags;
0167     int         precise_ip_original;
0168 
0169     /* for missing_features */
0170     struct perf_pmu     *pmu;
0171 };
0172 
0173 struct perf_missing_features {
0174     bool sample_id_all;
0175     bool exclude_guest;
0176     bool mmap2;
0177     bool cloexec;
0178     bool clockid;
0179     bool clockid_wrong;
0180     bool lbr_flags;
0181     bool write_backward;
0182     bool group_read;
0183     bool ksymbol;
0184     bool bpf;
0185     bool aux_output;
0186     bool branch_hw_idx;
0187     bool cgroup;
0188     bool data_page_size;
0189     bool code_page_size;
0190     bool weight_struct;
0191 };
0192 
0193 extern struct perf_missing_features perf_missing_features;
0194 
0195 struct perf_cpu_map;
0196 struct thread_map;
0197 struct record_opts;
0198 
0199 static inline struct perf_cpu_map *evsel__cpus(struct evsel *evsel)
0200 {
0201     return perf_evsel__cpus(&evsel->core);
0202 }
0203 
0204 static inline int evsel__nr_cpus(struct evsel *evsel)
0205 {
0206     return perf_cpu_map__nr(evsel__cpus(evsel));
0207 }
0208 
0209 void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
0210                struct perf_counts_values *count);
0211 
0212 int evsel__object_config(size_t object_size,
0213              int (*init)(struct evsel *evsel),
0214              void (*fini)(struct evsel *evsel));
0215 
0216 struct perf_pmu *evsel__find_pmu(struct evsel *evsel);
0217 bool evsel__is_aux_event(struct evsel *evsel);
0218 
0219 struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx);
0220 
0221 static inline struct evsel *evsel__new(struct perf_event_attr *attr)
0222 {
0223     return evsel__new_idx(attr, 0);
0224 }
0225 
0226 struct evsel *evsel__clone(struct evsel *orig);
0227 struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx);
0228 
0229 int copy_config_terms(struct list_head *dst, struct list_head *src);
0230 void free_config_terms(struct list_head *config_terms);
0231 
0232 /*
0233  * Returns pointer with encoded error via <linux/err.h> interface.
0234  */
0235 static inline struct evsel *evsel__newtp(const char *sys, const char *name)
0236 {
0237     return evsel__newtp_idx(sys, name, 0);
0238 }
0239 
0240 struct evsel *evsel__new_cycles(bool precise, __u32 type, __u64 config);
0241 
0242 struct tep_event *event_format__new(const char *sys, const char *name);
0243 
0244 void evsel__init(struct evsel *evsel, struct perf_event_attr *attr, int idx);
0245 void evsel__exit(struct evsel *evsel);
0246 void evsel__delete(struct evsel *evsel);
0247 
0248 struct callchain_param;
0249 
0250 void evsel__config(struct evsel *evsel, struct record_opts *opts,
0251            struct callchain_param *callchain);
0252 void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
0253                  struct callchain_param *callchain);
0254 
0255 int __evsel__sample_size(u64 sample_type);
0256 void evsel__calc_id_pos(struct evsel *evsel);
0257 
0258 bool evsel__is_cache_op_valid(u8 type, u8 op);
0259 
0260 static inline bool evsel__is_bpf(struct evsel *evsel)
0261 {
0262     return evsel->bpf_counter_ops != NULL;
0263 }
0264 
0265 #define EVSEL__MAX_ALIASES 8
0266 
0267 extern const char *const evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES];
0268 extern const char *const evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES];
0269 extern const char *const evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES];
0270 extern const char *const evsel__hw_names[PERF_COUNT_HW_MAX];
0271 extern const char *const evsel__sw_names[PERF_COUNT_SW_MAX];
0272 extern char *evsel__bpf_counter_events;
0273 bool evsel__match_bpf_counter_events(const char *name);
0274 int arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size);
0275 
0276 int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size);
0277 const char *evsel__name(struct evsel *evsel);
0278 const char *evsel__metric_id(const struct evsel *evsel);
0279 
0280 static inline bool evsel__is_tool(const struct evsel *evsel)
0281 {
0282     return evsel->tool_event != PERF_TOOL_NONE;
0283 }
0284 
0285 const char *evsel__group_name(struct evsel *evsel);
0286 int evsel__group_desc(struct evsel *evsel, char *buf, size_t size);
0287 
0288 void __evsel__set_sample_bit(struct evsel *evsel, enum perf_event_sample_format bit);
0289 void __evsel__reset_sample_bit(struct evsel *evsel, enum perf_event_sample_format bit);
0290 
0291 #define evsel__set_sample_bit(evsel, bit) \
0292     __evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
0293 
0294 #define evsel__reset_sample_bit(evsel, bit) \
0295     __evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
0296 
0297 void evsel__set_sample_id(struct evsel *evsel, bool use_sample_identifier);
0298 
0299 void arch_evsel__set_sample_weight(struct evsel *evsel);
0300 void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr);
0301 void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr);
0302 
0303 int evsel__set_filter(struct evsel *evsel, const char *filter);
0304 int evsel__append_tp_filter(struct evsel *evsel, const char *filter);
0305 int evsel__append_addr_filter(struct evsel *evsel, const char *filter);
0306 int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx);
0307 int evsel__enable(struct evsel *evsel);
0308 int evsel__disable(struct evsel *evsel);
0309 int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx);
0310 
0311 int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx);
0312 int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads);
0313 int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
0314         struct perf_thread_map *threads);
0315 void evsel__close(struct evsel *evsel);
0316 int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
0317         struct perf_thread_map *threads);
0318 bool evsel__detect_missing_features(struct evsel *evsel);
0319 
0320 enum rlimit_action { NO_CHANGE, SET_TO_MAX, INCREASED_MAX };
0321 bool evsel__increase_rlimit(enum rlimit_action *set_rlimit);
0322 
0323 bool evsel__precise_ip_fallback(struct evsel *evsel);
0324 
0325 struct perf_sample;
0326 
0327 void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name);
0328 u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name);
0329 
0330 static inline char *evsel__strval(struct evsel *evsel, struct perf_sample *sample, const char *name)
0331 {
0332     return evsel__rawptr(evsel, sample, name);
0333 }
0334 
0335 struct tep_format_field;
0336 
0337 u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample, bool needs_swap);
0338 
0339 struct tep_format_field *evsel__field(struct evsel *evsel, const char *name);
0340 
0341 #define evsel__match(evsel, t, c)       \
0342     (evsel->core.attr.type == PERF_TYPE_##t &&  \
0343      evsel->core.attr.config == PERF_COUNT_##c)
0344 
0345 static inline bool evsel__match2(struct evsel *e1, struct evsel *e2)
0346 {
0347     return (e1->core.attr.type == e2->core.attr.type) &&
0348            (e1->core.attr.config == e2->core.attr.config);
0349 }
0350 
0351 int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread);
0352 
0353 int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale);
0354 
0355 /**
0356  * evsel__read_on_cpu - Read out the results on a CPU and thread
0357  *
0358  * @evsel - event selector to read value
0359  * @cpu_map_idx - CPU of interest
0360  * @thread - thread of interest
0361  */
0362 static inline int evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread)
0363 {
0364     return __evsel__read_on_cpu(evsel, cpu_map_idx, thread, false);
0365 }
0366 
0367 /**
0368  * evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
0369  *
0370  * @evsel - event selector to read value
0371  * @cpu_map_idx - CPU of interest
0372  * @thread - thread of interest
0373  */
0374 static inline int evsel__read_on_cpu_scaled(struct evsel *evsel, int cpu_map_idx, int thread)
0375 {
0376     return __evsel__read_on_cpu(evsel, cpu_map_idx, thread, true);
0377 }
0378 
0379 int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
0380             struct perf_sample *sample);
0381 
0382 int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
0383                   u64 *timestamp);
0384 
0385 u16 evsel__id_hdr_size(struct evsel *evsel);
0386 
0387 static inline struct evsel *evsel__next(struct evsel *evsel)
0388 {
0389     return list_entry(evsel->core.node.next, struct evsel, core.node);
0390 }
0391 
0392 static inline struct evsel *evsel__prev(struct evsel *evsel)
0393 {
0394     return list_entry(evsel->core.node.prev, struct evsel, core.node);
0395 }
0396 
0397 /**
0398  * evsel__is_group_leader - Return whether given evsel is a leader event
0399  *
0400  * @evsel - evsel selector to be tested
0401  *
0402  * Return %true if @evsel is a group leader or a stand-alone event
0403  */
0404 static inline bool evsel__is_group_leader(const struct evsel *evsel)
0405 {
0406     return evsel->core.leader == &evsel->core;
0407 }
0408 
0409 /**
0410  * evsel__is_group_event - Return whether given evsel is a group event
0411  *
0412  * @evsel - evsel selector to be tested
0413  *
0414  * Return %true iff event group view is enabled and @evsel is a actual group
0415  * leader which has other members in the group
0416  */
0417 static inline bool evsel__is_group_event(struct evsel *evsel)
0418 {
0419     if (!symbol_conf.event_group)
0420         return false;
0421 
0422     return evsel__is_group_leader(evsel) && evsel->core.nr_members > 1;
0423 }
0424 
0425 bool evsel__is_function_event(struct evsel *evsel);
0426 
0427 static inline bool evsel__is_bpf_output(struct evsel *evsel)
0428 {
0429     return evsel__match(evsel, SOFTWARE, SW_BPF_OUTPUT);
0430 }
0431 
0432 static inline bool evsel__is_clock(struct evsel *evsel)
0433 {
0434     return evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
0435            evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK);
0436 }
0437 
0438 bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize);
0439 int evsel__open_strerror(struct evsel *evsel, struct target *target,
0440              int err, char *msg, size_t size);
0441 
0442 static inline int evsel__group_idx(struct evsel *evsel)
0443 {
0444     return evsel->core.idx - evsel->core.leader->idx;
0445 }
0446 
0447 /* Iterates group WITHOUT the leader. */
0448 #define for_each_group_member(_evsel, _leader)                  \
0449 for ((_evsel) = list_entry((_leader)->core.node.next, struct evsel, core.node); \
0450      (_evsel) && (_evsel)->core.leader == (&_leader->core);                 \
0451      (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
0452 
0453 /* Iterates group WITH the leader. */
0454 #define for_each_group_evsel(_evsel, _leader)                   \
0455 for ((_evsel) = _leader;                            \
0456      (_evsel) && (_evsel)->core.leader == (&_leader->core);                 \
0457      (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
0458 
0459 static inline bool evsel__has_branch_callstack(const struct evsel *evsel)
0460 {
0461     return evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
0462 }
0463 
0464 static inline bool evsel__has_branch_hw_idx(const struct evsel *evsel)
0465 {
0466     return evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
0467 }
0468 
0469 static inline bool evsel__has_callchain(const struct evsel *evsel)
0470 {
0471     /*
0472      * For reporting purposes, an evsel sample can have a recorded callchain
0473      * or a callchain synthesized from AUX area data.
0474      */
0475     return evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN ||
0476            evsel->synth_sample_type & PERF_SAMPLE_CALLCHAIN;
0477 }
0478 
0479 static inline bool evsel__has_br_stack(const struct evsel *evsel)
0480 {
0481     /*
0482      * For reporting purposes, an evsel sample can have a recorded branch
0483      * stack or a branch stack synthesized from AUX area data.
0484      */
0485     return evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK ||
0486            evsel->synth_sample_type & PERF_SAMPLE_BRANCH_STACK;
0487 }
0488 
0489 static inline bool evsel__is_dummy_event(struct evsel *evsel)
0490 {
0491     return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
0492            (evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
0493 }
0494 
0495 struct perf_env *evsel__env(struct evsel *evsel);
0496 
0497 int evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
0498 
0499 void evsel__zero_per_pkg(struct evsel *evsel);
0500 bool evsel__is_hybrid(struct evsel *evsel);
0501 struct evsel *evsel__leader(struct evsel *evsel);
0502 bool evsel__has_leader(struct evsel *evsel, struct evsel *leader);
0503 bool evsel__is_leader(struct evsel *evsel);
0504 void evsel__set_leader(struct evsel *evsel, struct evsel *leader);
0505 int evsel__source_count(const struct evsel *evsel);
0506 void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader);
0507 
0508 bool arch_evsel__must_be_in_group(const struct evsel *evsel);
0509 
0510 /*
0511  * Macro to swap the bit-field postition and size.
0512  * Used when,
0513  * - dont need to swap the entire u64 &&
0514  * - when u64 has variable bit-field sizes &&
0515  * - when presented in a host endian which is different
0516  *   than the source endian of the perf.data file
0517  */
0518 #define bitfield_swap(src, pos, size)   \
0519     ((((src) >> (pos)) & ((1ull << (size)) - 1)) << (63 - ((pos) + (size) - 1)))
0520 
0521 u64 evsel__bitfield_swap_branch_flags(u64 value);
0522 #endif /* __PERF_EVSEL_H */