![]() |
|
|||
0001 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 0002 /* 0003 * Performance events: 0004 * 0005 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> 0006 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar 0007 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra 0008 * 0009 * Data type definitions, declarations, prototypes. 0010 * 0011 * Started by: Thomas Gleixner and Ingo Molnar 0012 * 0013 * For licencing details see kernel-base/COPYING 0014 */ 0015 #ifndef _UAPI_LINUX_PERF_EVENT_H 0016 #define _UAPI_LINUX_PERF_EVENT_H 0017 0018 #include <linux/types.h> 0019 #include <linux/ioctl.h> 0020 #include <asm/byteorder.h> 0021 0022 /* 0023 * User-space ABI bits: 0024 */ 0025 0026 /* 0027 * attr.type 0028 */ 0029 enum perf_type_id { 0030 PERF_TYPE_HARDWARE = 0, 0031 PERF_TYPE_SOFTWARE = 1, 0032 PERF_TYPE_TRACEPOINT = 2, 0033 PERF_TYPE_HW_CACHE = 3, 0034 PERF_TYPE_RAW = 4, 0035 PERF_TYPE_BREAKPOINT = 5, 0036 0037 PERF_TYPE_MAX, /* non-ABI */ 0038 }; 0039 0040 /* 0041 * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE 0042 * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA 0043 * AA: hardware event ID 0044 * EEEEEEEE: PMU type ID 0045 * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB 0046 * BB: hardware cache ID 0047 * CC: hardware cache op ID 0048 * DD: hardware cache op result ID 0049 * EEEEEEEE: PMU type ID 0050 * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied. 0051 */ 0052 #define PERF_PMU_TYPE_SHIFT 32 0053 #define PERF_HW_EVENT_MASK 0xffffffff 0054 0055 /* 0056 * Generalized performance event event_id types, used by the 0057 * attr.event_id parameter of the sys_perf_event_open() 0058 * syscall: 0059 */ 0060 enum perf_hw_id { 0061 /* 0062 * Common hardware events, generalized by the kernel: 0063 */ 0064 PERF_COUNT_HW_CPU_CYCLES = 0, 0065 PERF_COUNT_HW_INSTRUCTIONS = 1, 0066 PERF_COUNT_HW_CACHE_REFERENCES = 2, 0067 PERF_COUNT_HW_CACHE_MISSES = 3, 0068 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, 0069 PERF_COUNT_HW_BRANCH_MISSES = 5, 0070 PERF_COUNT_HW_BUS_CYCLES = 6, 0071 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, 0072 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, 0073 PERF_COUNT_HW_REF_CPU_CYCLES = 9, 0074 0075 PERF_COUNT_HW_MAX, /* non-ABI */ 0076 }; 0077 0078 /* 0079 * Generalized hardware cache events: 0080 * 0081 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x 0082 * { read, write, prefetch } x 0083 * { accesses, misses } 0084 */ 0085 enum perf_hw_cache_id { 0086 PERF_COUNT_HW_CACHE_L1D = 0, 0087 PERF_COUNT_HW_CACHE_L1I = 1, 0088 PERF_COUNT_HW_CACHE_LL = 2, 0089 PERF_COUNT_HW_CACHE_DTLB = 3, 0090 PERF_COUNT_HW_CACHE_ITLB = 4, 0091 PERF_COUNT_HW_CACHE_BPU = 5, 0092 PERF_COUNT_HW_CACHE_NODE = 6, 0093 0094 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ 0095 }; 0096 0097 enum perf_hw_cache_op_id { 0098 PERF_COUNT_HW_CACHE_OP_READ = 0, 0099 PERF_COUNT_HW_CACHE_OP_WRITE = 1, 0100 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, 0101 0102 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ 0103 }; 0104 0105 enum perf_hw_cache_op_result_id { 0106 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, 0107 PERF_COUNT_HW_CACHE_RESULT_MISS = 1, 0108 0109 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ 0110 }; 0111 0112 /* 0113 * Special "software" events provided by the kernel, even if the hardware 0114 * does not support performance events. These events measure various 0115 * physical and sw events of the kernel (and allow the profiling of them as 0116 * well): 0117 */ 0118 enum perf_sw_ids { 0119 PERF_COUNT_SW_CPU_CLOCK = 0, 0120 PERF_COUNT_SW_TASK_CLOCK = 1, 0121 PERF_COUNT_SW_PAGE_FAULTS = 2, 0122 PERF_COUNT_SW_CONTEXT_SWITCHES = 3, 0123 PERF_COUNT_SW_CPU_MIGRATIONS = 4, 0124 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, 0125 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, 0126 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, 0127 PERF_COUNT_SW_EMULATION_FAULTS = 8, 0128 PERF_COUNT_SW_DUMMY = 9, 0129 PERF_COUNT_SW_BPF_OUTPUT = 10, 0130 PERF_COUNT_SW_CGROUP_SWITCHES = 11, 0131 0132 PERF_COUNT_SW_MAX, /* non-ABI */ 0133 }; 0134 0135 /* 0136 * Bits that can be set in attr.sample_type to request information 0137 * in the overflow packets. 0138 */ 0139 enum perf_event_sample_format { 0140 PERF_SAMPLE_IP = 1U << 0, 0141 PERF_SAMPLE_TID = 1U << 1, 0142 PERF_SAMPLE_TIME = 1U << 2, 0143 PERF_SAMPLE_ADDR = 1U << 3, 0144 PERF_SAMPLE_READ = 1U << 4, 0145 PERF_SAMPLE_CALLCHAIN = 1U << 5, 0146 PERF_SAMPLE_ID = 1U << 6, 0147 PERF_SAMPLE_CPU = 1U << 7, 0148 PERF_SAMPLE_PERIOD = 1U << 8, 0149 PERF_SAMPLE_STREAM_ID = 1U << 9, 0150 PERF_SAMPLE_RAW = 1U << 10, 0151 PERF_SAMPLE_BRANCH_STACK = 1U << 11, 0152 PERF_SAMPLE_REGS_USER = 1U << 12, 0153 PERF_SAMPLE_STACK_USER = 1U << 13, 0154 PERF_SAMPLE_WEIGHT = 1U << 14, 0155 PERF_SAMPLE_DATA_SRC = 1U << 15, 0156 PERF_SAMPLE_IDENTIFIER = 1U << 16, 0157 PERF_SAMPLE_TRANSACTION = 1U << 17, 0158 PERF_SAMPLE_REGS_INTR = 1U << 18, 0159 PERF_SAMPLE_PHYS_ADDR = 1U << 19, 0160 PERF_SAMPLE_AUX = 1U << 20, 0161 PERF_SAMPLE_CGROUP = 1U << 21, 0162 PERF_SAMPLE_DATA_PAGE_SIZE = 1U << 22, 0163 PERF_SAMPLE_CODE_PAGE_SIZE = 1U << 23, 0164 PERF_SAMPLE_WEIGHT_STRUCT = 1U << 24, 0165 0166 PERF_SAMPLE_MAX = 1U << 25, /* non-ABI */ 0167 0168 __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */ 0169 }; 0170 0171 #define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT) 0172 /* 0173 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set 0174 * 0175 * If the user does not pass priv level information via branch_sample_type, 0176 * the kernel uses the event's priv level. Branch and event priv levels do 0177 * not have to match. Branch priv level is checked for permissions. 0178 * 0179 * The branch types can be combined, however BRANCH_ANY covers all types 0180 * of branches and therefore it supersedes all the other types. 0181 */ 0182 enum perf_branch_sample_type_shift { 0183 PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */ 0184 PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */ 0185 PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */ 0186 0187 PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */ 0188 PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */ 0189 PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */ 0190 PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */ 0191 PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */ 0192 PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */ 0193 PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */ 0194 PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */ 0195 0196 PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */ 0197 PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */ 0198 PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */ 0199 0200 PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */ 0201 PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */ 0202 0203 PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */ 0204 0205 PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, /* save low level index of raw branch records */ 0206 0207 PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ 0208 }; 0209 0210 enum perf_branch_sample_type { 0211 PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT, 0212 PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT, 0213 PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, 0214 0215 PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, 0216 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, 0217 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, 0218 PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, 0219 PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, 0220 PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, 0221 PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, 0222 PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, 0223 0224 PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, 0225 PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, 0226 PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, 0227 0228 PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, 0229 PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, 0230 0231 PERF_SAMPLE_BRANCH_TYPE_SAVE = 0232 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, 0233 0234 PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT, 0235 0236 PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, 0237 }; 0238 0239 /* 0240 * Common flow change classification 0241 */ 0242 enum { 0243 PERF_BR_UNKNOWN = 0, /* unknown */ 0244 PERF_BR_COND = 1, /* conditional */ 0245 PERF_BR_UNCOND = 2, /* unconditional */ 0246 PERF_BR_IND = 3, /* indirect */ 0247 PERF_BR_CALL = 4, /* function call */ 0248 PERF_BR_IND_CALL = 5, /* indirect function call */ 0249 PERF_BR_RET = 6, /* function return */ 0250 PERF_BR_SYSCALL = 7, /* syscall */ 0251 PERF_BR_SYSRET = 8, /* syscall return */ 0252 PERF_BR_COND_CALL = 9, /* conditional function call */ 0253 PERF_BR_COND_RET = 10, /* conditional function return */ 0254 PERF_BR_ERET = 11, /* exception return */ 0255 PERF_BR_IRQ = 12, /* irq */ 0256 PERF_BR_MAX, 0257 }; 0258 0259 #define PERF_SAMPLE_BRANCH_PLM_ALL \ 0260 (PERF_SAMPLE_BRANCH_USER|\ 0261 PERF_SAMPLE_BRANCH_KERNEL|\ 0262 PERF_SAMPLE_BRANCH_HV) 0263 0264 /* 0265 * Values to determine ABI of the registers dump. 0266 */ 0267 enum perf_sample_regs_abi { 0268 PERF_SAMPLE_REGS_ABI_NONE = 0, 0269 PERF_SAMPLE_REGS_ABI_32 = 1, 0270 PERF_SAMPLE_REGS_ABI_64 = 2, 0271 }; 0272 0273 /* 0274 * Values for the memory transaction event qualifier, mostly for 0275 * abort events. Multiple bits can be set. 0276 */ 0277 enum { 0278 PERF_TXN_ELISION = (1 << 0), /* From elision */ 0279 PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */ 0280 PERF_TXN_SYNC = (1 << 2), /* Instruction is related */ 0281 PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */ 0282 PERF_TXN_RETRY = (1 << 4), /* Retry possible */ 0283 PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */ 0284 PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */ 0285 PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */ 0286 0287 PERF_TXN_MAX = (1 << 8), /* non-ABI */ 0288 0289 /* bits 32..63 are reserved for the abort code */ 0290 0291 PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), 0292 PERF_TXN_ABORT_SHIFT = 32, 0293 }; 0294 0295 /* 0296 * The format of the data returned by read() on a perf event fd, 0297 * as specified by attr.read_format: 0298 * 0299 * struct read_format { 0300 * { u64 value; 0301 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 0302 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 0303 * { u64 id; } && PERF_FORMAT_ID 0304 * { u64 lost; } && PERF_FORMAT_LOST 0305 * } && !PERF_FORMAT_GROUP 0306 * 0307 * { u64 nr; 0308 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 0309 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 0310 * { u64 value; 0311 * { u64 id; } && PERF_FORMAT_ID 0312 * { u64 lost; } && PERF_FORMAT_LOST 0313 * } cntr[nr]; 0314 * } && PERF_FORMAT_GROUP 0315 * }; 0316 */ 0317 enum perf_event_read_format { 0318 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, 0319 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, 0320 PERF_FORMAT_ID = 1U << 2, 0321 PERF_FORMAT_GROUP = 1U << 3, 0322 PERF_FORMAT_LOST = 1U << 4, 0323 0324 PERF_FORMAT_MAX = 1U << 5, /* non-ABI */ 0325 }; 0326 0327 #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ 0328 #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ 0329 #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ 0330 #define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */ 0331 /* add: sample_stack_user */ 0332 #define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */ 0333 #define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */ 0334 #define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */ 0335 #define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */ 0336 0337 /* 0338 * Hardware event_id to monitor via a performance monitoring event: 0339 * 0340 * @sample_max_stack: Max number of frame pointers in a callchain, 0341 * should be < /proc/sys/kernel/perf_event_max_stack 0342 */ 0343 struct perf_event_attr { 0344 0345 /* 0346 * Major type: hardware/software/tracepoint/etc. 0347 */ 0348 __u32 type; 0349 0350 /* 0351 * Size of the attr structure, for fwd/bwd compat. 0352 */ 0353 __u32 size; 0354 0355 /* 0356 * Type specific configuration information. 0357 */ 0358 __u64 config; 0359 0360 union { 0361 __u64 sample_period; 0362 __u64 sample_freq; 0363 }; 0364 0365 __u64 sample_type; 0366 __u64 read_format; 0367 0368 __u64 disabled : 1, /* off by default */ 0369 inherit : 1, /* children inherit it */ 0370 pinned : 1, /* must always be on PMU */ 0371 exclusive : 1, /* only group on PMU */ 0372 exclude_user : 1, /* don't count user */ 0373 exclude_kernel : 1, /* ditto kernel */ 0374 exclude_hv : 1, /* ditto hypervisor */ 0375 exclude_idle : 1, /* don't count when idle */ 0376 mmap : 1, /* include mmap data */ 0377 comm : 1, /* include comm data */ 0378 freq : 1, /* use freq, not period */ 0379 inherit_stat : 1, /* per task counts */ 0380 enable_on_exec : 1, /* next exec enables */ 0381 task : 1, /* trace fork/exit */ 0382 watermark : 1, /* wakeup_watermark */ 0383 /* 0384 * precise_ip: 0385 * 0386 * 0 - SAMPLE_IP can have arbitrary skid 0387 * 1 - SAMPLE_IP must have constant skid 0388 * 2 - SAMPLE_IP requested to have 0 skid 0389 * 3 - SAMPLE_IP must have 0 skid 0390 * 0391 * See also PERF_RECORD_MISC_EXACT_IP 0392 */ 0393 precise_ip : 2, /* skid constraint */ 0394 mmap_data : 1, /* non-exec mmap data */ 0395 sample_id_all : 1, /* sample_type all events */ 0396 0397 exclude_host : 1, /* don't count in host */ 0398 exclude_guest : 1, /* don't count in guest */ 0399 0400 exclude_callchain_kernel : 1, /* exclude kernel callchains */ 0401 exclude_callchain_user : 1, /* exclude user callchains */ 0402 mmap2 : 1, /* include mmap with inode data */ 0403 comm_exec : 1, /* flag comm events that are due to an exec */ 0404 use_clockid : 1, /* use @clockid for time fields */ 0405 context_switch : 1, /* context switch data */ 0406 write_backward : 1, /* Write ring buffer from end to beginning */ 0407 namespaces : 1, /* include namespaces data */ 0408 ksymbol : 1, /* include ksymbol events */ 0409 bpf_event : 1, /* include bpf events */ 0410 aux_output : 1, /* generate AUX records instead of events */ 0411 cgroup : 1, /* include cgroup events */ 0412 text_poke : 1, /* include text poke events */ 0413 build_id : 1, /* use build id in mmap2 events */ 0414 inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */ 0415 remove_on_exec : 1, /* event is removed from task on exec */ 0416 sigtrap : 1, /* send synchronous SIGTRAP on event */ 0417 __reserved_1 : 26; 0418 0419 union { 0420 __u32 wakeup_events; /* wakeup every n events */ 0421 __u32 wakeup_watermark; /* bytes before wakeup */ 0422 }; 0423 0424 __u32 bp_type; 0425 union { 0426 __u64 bp_addr; 0427 __u64 kprobe_func; /* for perf_kprobe */ 0428 __u64 uprobe_path; /* for perf_uprobe */ 0429 __u64 config1; /* extension of config */ 0430 }; 0431 union { 0432 __u64 bp_len; 0433 __u64 kprobe_addr; /* when kprobe_func == NULL */ 0434 __u64 probe_offset; /* for perf_[k,u]probe */ 0435 __u64 config2; /* extension of config1 */ 0436 }; 0437 __u64 branch_sample_type; /* enum perf_branch_sample_type */ 0438 0439 /* 0440 * Defines set of user regs to dump on samples. 0441 * See asm/perf_regs.h for details. 0442 */ 0443 __u64 sample_regs_user; 0444 0445 /* 0446 * Defines size of the user stack to dump on samples. 0447 */ 0448 __u32 sample_stack_user; 0449 0450 __s32 clockid; 0451 /* 0452 * Defines set of regs to dump for each sample 0453 * state captured on: 0454 * - precise = 0: PMU interrupt 0455 * - precise > 0: sampled instruction 0456 * 0457 * See asm/perf_regs.h for details. 0458 */ 0459 __u64 sample_regs_intr; 0460 0461 /* 0462 * Wakeup watermark for AUX area 0463 */ 0464 __u32 aux_watermark; 0465 __u16 sample_max_stack; 0466 __u16 __reserved_2; 0467 __u32 aux_sample_size; 0468 __u32 __reserved_3; 0469 0470 /* 0471 * User provided data if sigtrap=1, passed back to user via 0472 * siginfo_t::si_perf_data, e.g. to permit user to identify the event. 0473 * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be 0474 * truncated accordingly on 32 bit architectures. 0475 */ 0476 __u64 sig_data; 0477 }; 0478 0479 /* 0480 * Structure used by below PERF_EVENT_IOC_QUERY_BPF command 0481 * to query bpf programs attached to the same perf tracepoint 0482 * as the given perf event. 0483 */ 0484 struct perf_event_query_bpf { 0485 /* 0486 * The below ids array length 0487 */ 0488 __u32 ids_len; 0489 /* 0490 * Set by the kernel to indicate the number of 0491 * available programs 0492 */ 0493 __u32 prog_cnt; 0494 /* 0495 * User provided buffer to store program ids 0496 */ 0497 __u32 ids[]; 0498 }; 0499 0500 /* 0501 * Ioctls that can be done on a perf event fd: 0502 */ 0503 #define PERF_EVENT_IOC_ENABLE _IO ('$', 0) 0504 #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) 0505 #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) 0506 #define PERF_EVENT_IOC_RESET _IO ('$', 3) 0507 #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) 0508 #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) 0509 #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) 0510 #define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *) 0511 #define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32) 0512 #define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32) 0513 #define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *) 0514 #define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW('$', 11, struct perf_event_attr *) 0515 0516 enum perf_event_ioc_flags { 0517 PERF_IOC_FLAG_GROUP = 1U << 0, 0518 }; 0519 0520 /* 0521 * Structure of the page that can be mapped via mmap 0522 */ 0523 struct perf_event_mmap_page { 0524 __u32 version; /* version number of this structure */ 0525 __u32 compat_version; /* lowest version this is compat with */ 0526 0527 /* 0528 * Bits needed to read the hw events in user-space. 0529 * 0530 * u32 seq, time_mult, time_shift, index, width; 0531 * u64 count, enabled, running; 0532 * u64 cyc, time_offset; 0533 * s64 pmc = 0; 0534 * 0535 * do { 0536 * seq = pc->lock; 0537 * barrier() 0538 * 0539 * enabled = pc->time_enabled; 0540 * running = pc->time_running; 0541 * 0542 * if (pc->cap_usr_time && enabled != running) { 0543 * cyc = rdtsc(); 0544 * time_offset = pc->time_offset; 0545 * time_mult = pc->time_mult; 0546 * time_shift = pc->time_shift; 0547 * } 0548 * 0549 * index = pc->index; 0550 * count = pc->offset; 0551 * if (pc->cap_user_rdpmc && index) { 0552 * width = pc->pmc_width; 0553 * pmc = rdpmc(index - 1); 0554 * } 0555 * 0556 * barrier(); 0557 * } while (pc->lock != seq); 0558 * 0559 * NOTE: for obvious reason this only works on self-monitoring 0560 * processes. 0561 */ 0562 __u32 lock; /* seqlock for synchronization */ 0563 __u32 index; /* hardware event identifier */ 0564 __s64 offset; /* add to hardware event value */ 0565 __u64 time_enabled; /* time event active */ 0566 __u64 time_running; /* time event on cpu */ 0567 union { 0568 __u64 capabilities; 0569 struct { 0570 __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */ 0571 cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */ 0572 0573 cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */ 0574 cap_user_time : 1, /* The time_{shift,mult,offset} fields are used */ 0575 cap_user_time_zero : 1, /* The time_zero field is used */ 0576 cap_user_time_short : 1, /* the time_{cycle,mask} fields are used */ 0577 cap_____res : 58; 0578 }; 0579 }; 0580 0581 /* 0582 * If cap_user_rdpmc this field provides the bit-width of the value 0583 * read using the rdpmc() or equivalent instruction. This can be used 0584 * to sign extend the result like: 0585 * 0586 * pmc <<= 64 - width; 0587 * pmc >>= 64 - width; // signed shift right 0588 * count += pmc; 0589 */ 0590 __u16 pmc_width; 0591 0592 /* 0593 * If cap_usr_time the below fields can be used to compute the time 0594 * delta since time_enabled (in ns) using rdtsc or similar. 0595 * 0596 * u64 quot, rem; 0597 * u64 delta; 0598 * 0599 * quot = (cyc >> time_shift); 0600 * rem = cyc & (((u64)1 << time_shift) - 1); 0601 * delta = time_offset + quot * time_mult + 0602 * ((rem * time_mult) >> time_shift); 0603 * 0604 * Where time_offset,time_mult,time_shift and cyc are read in the 0605 * seqcount loop described above. This delta can then be added to 0606 * enabled and possible running (if index), improving the scaling: 0607 * 0608 * enabled += delta; 0609 * if (index) 0610 * running += delta; 0611 * 0612 * quot = count / running; 0613 * rem = count % running; 0614 * count = quot * enabled + (rem * enabled) / running; 0615 */ 0616 __u16 time_shift; 0617 __u32 time_mult; 0618 __u64 time_offset; 0619 /* 0620 * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated 0621 * from sample timestamps. 0622 * 0623 * time = timestamp - time_zero; 0624 * quot = time / time_mult; 0625 * rem = time % time_mult; 0626 * cyc = (quot << time_shift) + (rem << time_shift) / time_mult; 0627 * 0628 * And vice versa: 0629 * 0630 * quot = cyc >> time_shift; 0631 * rem = cyc & (((u64)1 << time_shift) - 1); 0632 * timestamp = time_zero + quot * time_mult + 0633 * ((rem * time_mult) >> time_shift); 0634 */ 0635 __u64 time_zero; 0636 0637 __u32 size; /* Header size up to __reserved[] fields. */ 0638 __u32 __reserved_1; 0639 0640 /* 0641 * If cap_usr_time_short, the hardware clock is less than 64bit wide 0642 * and we must compute the 'cyc' value, as used by cap_usr_time, as: 0643 * 0644 * cyc = time_cycles + ((cyc - time_cycles) & time_mask) 0645 * 0646 * NOTE: this form is explicitly chosen such that cap_usr_time_short 0647 * is a correction on top of cap_usr_time, and code that doesn't 0648 * know about cap_usr_time_short still works under the assumption 0649 * the counter doesn't wrap. 0650 */ 0651 __u64 time_cycles; 0652 __u64 time_mask; 0653 0654 /* 0655 * Hole for extension of the self monitor capabilities 0656 */ 0657 0658 __u8 __reserved[116*8]; /* align to 1k. */ 0659 0660 /* 0661 * Control data for the mmap() data buffer. 0662 * 0663 * User-space reading the @data_head value should issue an smp_rmb(), 0664 * after reading this value. 0665 * 0666 * When the mapping is PROT_WRITE the @data_tail value should be 0667 * written by userspace to reflect the last read data, after issueing 0668 * an smp_mb() to separate the data read from the ->data_tail store. 0669 * In this case the kernel will not over-write unread data. 0670 * 0671 * See perf_output_put_handle() for the data ordering. 0672 * 0673 * data_{offset,size} indicate the location and size of the perf record 0674 * buffer within the mmapped area. 0675 */ 0676 __u64 data_head; /* head in the data section */ 0677 __u64 data_tail; /* user-space written tail */ 0678 __u64 data_offset; /* where the buffer starts */ 0679 __u64 data_size; /* data buffer size */ 0680 0681 /* 0682 * AUX area is defined by aux_{offset,size} fields that should be set 0683 * by the userspace, so that 0684 * 0685 * aux_offset >= data_offset + data_size 0686 * 0687 * prior to mmap()ing it. Size of the mmap()ed area should be aux_size. 0688 * 0689 * Ring buffer pointers aux_{head,tail} have the same semantics as 0690 * data_{head,tail} and same ordering rules apply. 0691 */ 0692 __u64 aux_head; 0693 __u64 aux_tail; 0694 __u64 aux_offset; 0695 __u64 aux_size; 0696 }; 0697 0698 /* 0699 * The current state of perf_event_header::misc bits usage: 0700 * ('|' used bit, '-' unused bit) 0701 * 0702 * 012 CDEF 0703 * |||---------|||| 0704 * 0705 * Where: 0706 * 0-2 CPUMODE_MASK 0707 * 0708 * C PROC_MAP_PARSE_TIMEOUT 0709 * D MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT 0710 * E MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT 0711 * F (reserved) 0712 */ 0713 0714 #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0) 0715 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) 0716 #define PERF_RECORD_MISC_KERNEL (1 << 0) 0717 #define PERF_RECORD_MISC_USER (2 << 0) 0718 #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) 0719 #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0) 0720 #define PERF_RECORD_MISC_GUEST_USER (5 << 0) 0721 0722 /* 0723 * Indicates that /proc/PID/maps parsing are truncated by time out. 0724 */ 0725 #define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12) 0726 /* 0727 * Following PERF_RECORD_MISC_* are used on different 0728 * events, so can reuse the same bit position: 0729 * 0730 * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events 0731 * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event 0732 * PERF_RECORD_MISC_FORK_EXEC - PERF_RECORD_FORK event (perf internal) 0733 * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events 0734 */ 0735 #define PERF_RECORD_MISC_MMAP_DATA (1 << 13) 0736 #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) 0737 #define PERF_RECORD_MISC_FORK_EXEC (1 << 13) 0738 #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) 0739 /* 0740 * These PERF_RECORD_MISC_* flags below are safely reused 0741 * for the following events: 0742 * 0743 * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events 0744 * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events 0745 * PERF_RECORD_MISC_MMAP_BUILD_ID - PERF_RECORD_MMAP2 event 0746 * 0747 * 0748 * PERF_RECORD_MISC_EXACT_IP: 0749 * Indicates that the content of PERF_SAMPLE_IP points to 0750 * the actual instruction that triggered the event. See also 0751 * perf_event_attr::precise_ip. 0752 * 0753 * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: 0754 * Indicates that thread was preempted in TASK_RUNNING state. 0755 * 0756 * PERF_RECORD_MISC_MMAP_BUILD_ID: 0757 * Indicates that mmap2 event carries build id data. 0758 */ 0759 #define PERF_RECORD_MISC_EXACT_IP (1 << 14) 0760 #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) 0761 #define PERF_RECORD_MISC_MMAP_BUILD_ID (1 << 14) 0762 /* 0763 * Reserve the last bit to indicate some extended misc field 0764 */ 0765 #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) 0766 0767 struct perf_event_header { 0768 __u32 type; 0769 __u16 misc; 0770 __u16 size; 0771 }; 0772 0773 struct perf_ns_link_info { 0774 __u64 dev; 0775 __u64 ino; 0776 }; 0777 0778 enum { 0779 NET_NS_INDEX = 0, 0780 UTS_NS_INDEX = 1, 0781 IPC_NS_INDEX = 2, 0782 PID_NS_INDEX = 3, 0783 USER_NS_INDEX = 4, 0784 MNT_NS_INDEX = 5, 0785 CGROUP_NS_INDEX = 6, 0786 0787 NR_NAMESPACES, /* number of available namespaces */ 0788 }; 0789 0790 enum perf_event_type { 0791 0792 /* 0793 * If perf_event_attr.sample_id_all is set then all event types will 0794 * have the sample_type selected fields related to where/when 0795 * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU, 0796 * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed 0797 * just after the perf_event_header and the fields already present for 0798 * the existing fields, i.e. at the end of the payload. That way a newer 0799 * perf.data file will be supported by older perf tools, with these new 0800 * optional fields being ignored. 0801 * 0802 * struct sample_id { 0803 * { u32 pid, tid; } && PERF_SAMPLE_TID 0804 * { u64 time; } && PERF_SAMPLE_TIME 0805 * { u64 id; } && PERF_SAMPLE_ID 0806 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 0807 * { u32 cpu, res; } && PERF_SAMPLE_CPU 0808 * { u64 id; } && PERF_SAMPLE_IDENTIFIER 0809 * } && perf_event_attr::sample_id_all 0810 * 0811 * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The 0812 * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed 0813 * relative to header.size. 0814 */ 0815 0816 /* 0817 * The MMAP events record the PROT_EXEC mappings so that we can 0818 * correlate userspace IPs to code. They have the following structure: 0819 * 0820 * struct { 0821 * struct perf_event_header header; 0822 * 0823 * u32 pid, tid; 0824 * u64 addr; 0825 * u64 len; 0826 * u64 pgoff; 0827 * char filename[]; 0828 * struct sample_id sample_id; 0829 * }; 0830 */ 0831 PERF_RECORD_MMAP = 1, 0832 0833 /* 0834 * struct { 0835 * struct perf_event_header header; 0836 * u64 id; 0837 * u64 lost; 0838 * struct sample_id sample_id; 0839 * }; 0840 */ 0841 PERF_RECORD_LOST = 2, 0842 0843 /* 0844 * struct { 0845 * struct perf_event_header header; 0846 * 0847 * u32 pid, tid; 0848 * char comm[]; 0849 * struct sample_id sample_id; 0850 * }; 0851 */ 0852 PERF_RECORD_COMM = 3, 0853 0854 /* 0855 * struct { 0856 * struct perf_event_header header; 0857 * u32 pid, ppid; 0858 * u32 tid, ptid; 0859 * u64 time; 0860 * struct sample_id sample_id; 0861 * }; 0862 */ 0863 PERF_RECORD_EXIT = 4, 0864 0865 /* 0866 * struct { 0867 * struct perf_event_header header; 0868 * u64 time; 0869 * u64 id; 0870 * u64 stream_id; 0871 * struct sample_id sample_id; 0872 * }; 0873 */ 0874 PERF_RECORD_THROTTLE = 5, 0875 PERF_RECORD_UNTHROTTLE = 6, 0876 0877 /* 0878 * struct { 0879 * struct perf_event_header header; 0880 * u32 pid, ppid; 0881 * u32 tid, ptid; 0882 * u64 time; 0883 * struct sample_id sample_id; 0884 * }; 0885 */ 0886 PERF_RECORD_FORK = 7, 0887 0888 /* 0889 * struct { 0890 * struct perf_event_header header; 0891 * u32 pid, tid; 0892 * 0893 * struct read_format values; 0894 * struct sample_id sample_id; 0895 * }; 0896 */ 0897 PERF_RECORD_READ = 8, 0898 0899 /* 0900 * struct { 0901 * struct perf_event_header header; 0902 * 0903 * # 0904 * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. 0905 * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position 0906 * # is fixed relative to header. 0907 * # 0908 * 0909 * { u64 id; } && PERF_SAMPLE_IDENTIFIER 0910 * { u64 ip; } && PERF_SAMPLE_IP 0911 * { u32 pid, tid; } && PERF_SAMPLE_TID 0912 * { u64 time; } && PERF_SAMPLE_TIME 0913 * { u64 addr; } && PERF_SAMPLE_ADDR 0914 * { u64 id; } && PERF_SAMPLE_ID 0915 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 0916 * { u32 cpu, res; } && PERF_SAMPLE_CPU 0917 * { u64 period; } && PERF_SAMPLE_PERIOD 0918 * 0919 * { struct read_format values; } && PERF_SAMPLE_READ 0920 * 0921 * { u64 nr, 0922 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 0923 * 0924 * # 0925 * # The RAW record below is opaque data wrt the ABI 0926 * # 0927 * # That is, the ABI doesn't make any promises wrt to 0928 * # the stability of its content, it may vary depending 0929 * # on event, hardware, kernel version and phase of 0930 * # the moon. 0931 * # 0932 * # In other words, PERF_SAMPLE_RAW contents are not an ABI. 0933 * # 0934 * 0935 * { u32 size; 0936 * char data[size];}&& PERF_SAMPLE_RAW 0937 * 0938 * { u64 nr; 0939 * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX 0940 * { u64 from, to, flags } lbr[nr]; 0941 * } && PERF_SAMPLE_BRANCH_STACK 0942 * 0943 * { u64 abi; # enum perf_sample_regs_abi 0944 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER 0945 * 0946 * { u64 size; 0947 * char data[size]; 0948 * u64 dyn_size; } && PERF_SAMPLE_STACK_USER 0949 * 0950 * { union perf_sample_weight 0951 * { 0952 * u64 full; && PERF_SAMPLE_WEIGHT 0953 * #if defined(__LITTLE_ENDIAN_BITFIELD) 0954 * struct { 0955 * u32 var1_dw; 0956 * u16 var2_w; 0957 * u16 var3_w; 0958 * } && PERF_SAMPLE_WEIGHT_STRUCT 0959 * #elif defined(__BIG_ENDIAN_BITFIELD) 0960 * struct { 0961 * u16 var3_w; 0962 * u16 var2_w; 0963 * u32 var1_dw; 0964 * } && PERF_SAMPLE_WEIGHT_STRUCT 0965 * #endif 0966 * } 0967 * } 0968 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC 0969 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION 0970 * { u64 abi; # enum perf_sample_regs_abi 0971 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR 0972 * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR 0973 * { u64 size; 0974 * char data[size]; } && PERF_SAMPLE_AUX 0975 * { u64 data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE 0976 * { u64 code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE 0977 * }; 0978 */ 0979 PERF_RECORD_SAMPLE = 9, 0980 0981 /* 0982 * The MMAP2 records are an augmented version of MMAP, they add 0983 * maj, min, ino numbers to be used to uniquely identify each mapping 0984 * 0985 * struct { 0986 * struct perf_event_header header; 0987 * 0988 * u32 pid, tid; 0989 * u64 addr; 0990 * u64 len; 0991 * u64 pgoff; 0992 * union { 0993 * struct { 0994 * u32 maj; 0995 * u32 min; 0996 * u64 ino; 0997 * u64 ino_generation; 0998 * }; 0999 * struct { 1000 * u8 build_id_size; 1001 * u8 __reserved_1; 1002 * u16 __reserved_2; 1003 * u8 build_id[20]; 1004 * }; 1005 * }; 1006 * u32 prot, flags; 1007 * char filename[]; 1008 * struct sample_id sample_id; 1009 * }; 1010 */ 1011 PERF_RECORD_MMAP2 = 10, 1012 1013 /* 1014 * Records that new data landed in the AUX buffer part. 1015 * 1016 * struct { 1017 * struct perf_event_header header; 1018 * 1019 * u64 aux_offset; 1020 * u64 aux_size; 1021 * u64 flags; 1022 * struct sample_id sample_id; 1023 * }; 1024 */ 1025 PERF_RECORD_AUX = 11, 1026 1027 /* 1028 * Indicates that instruction trace has started 1029 * 1030 * struct { 1031 * struct perf_event_header header; 1032 * u32 pid; 1033 * u32 tid; 1034 * struct sample_id sample_id; 1035 * }; 1036 */ 1037 PERF_RECORD_ITRACE_START = 12, 1038 1039 /* 1040 * Records the dropped/lost sample number. 1041 * 1042 * struct { 1043 * struct perf_event_header header; 1044 * 1045 * u64 lost; 1046 * struct sample_id sample_id; 1047 * }; 1048 */ 1049 PERF_RECORD_LOST_SAMPLES = 13, 1050 1051 /* 1052 * Records a context switch in or out (flagged by 1053 * PERF_RECORD_MISC_SWITCH_OUT). See also 1054 * PERF_RECORD_SWITCH_CPU_WIDE. 1055 * 1056 * struct { 1057 * struct perf_event_header header; 1058 * struct sample_id sample_id; 1059 * }; 1060 */ 1061 PERF_RECORD_SWITCH = 14, 1062 1063 /* 1064 * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and 1065 * next_prev_tid that are the next (switching out) or previous 1066 * (switching in) pid/tid. 1067 * 1068 * struct { 1069 * struct perf_event_header header; 1070 * u32 next_prev_pid; 1071 * u32 next_prev_tid; 1072 * struct sample_id sample_id; 1073 * }; 1074 */ 1075 PERF_RECORD_SWITCH_CPU_WIDE = 15, 1076 1077 /* 1078 * struct { 1079 * struct perf_event_header header; 1080 * u32 pid; 1081 * u32 tid; 1082 * u64 nr_namespaces; 1083 * { u64 dev, inode; } [nr_namespaces]; 1084 * struct sample_id sample_id; 1085 * }; 1086 */ 1087 PERF_RECORD_NAMESPACES = 16, 1088 1089 /* 1090 * Record ksymbol register/unregister events: 1091 * 1092 * struct { 1093 * struct perf_event_header header; 1094 * u64 addr; 1095 * u32 len; 1096 * u16 ksym_type; 1097 * u16 flags; 1098 * char name[]; 1099 * struct sample_id sample_id; 1100 * }; 1101 */ 1102 PERF_RECORD_KSYMBOL = 17, 1103 1104 /* 1105 * Record bpf events: 1106 * enum perf_bpf_event_type { 1107 * PERF_BPF_EVENT_UNKNOWN = 0, 1108 * PERF_BPF_EVENT_PROG_LOAD = 1, 1109 * PERF_BPF_EVENT_PROG_UNLOAD = 2, 1110 * }; 1111 * 1112 * struct { 1113 * struct perf_event_header header; 1114 * u16 type; 1115 * u16 flags; 1116 * u32 id; 1117 * u8 tag[BPF_TAG_SIZE]; 1118 * struct sample_id sample_id; 1119 * }; 1120 */ 1121 PERF_RECORD_BPF_EVENT = 18, 1122 1123 /* 1124 * struct { 1125 * struct perf_event_header header; 1126 * u64 id; 1127 * char path[]; 1128 * struct sample_id sample_id; 1129 * }; 1130 */ 1131 PERF_RECORD_CGROUP = 19, 1132 1133 /* 1134 * Records changes to kernel text i.e. self-modified code. 'old_len' is 1135 * the number of old bytes, 'new_len' is the number of new bytes. Either 1136 * 'old_len' or 'new_len' may be zero to indicate, for example, the 1137 * addition or removal of a trampoline. 'bytes' contains the old bytes 1138 * followed immediately by the new bytes. 1139 * 1140 * struct { 1141 * struct perf_event_header header; 1142 * u64 addr; 1143 * u16 old_len; 1144 * u16 new_len; 1145 * u8 bytes[]; 1146 * struct sample_id sample_id; 1147 * }; 1148 */ 1149 PERF_RECORD_TEXT_POKE = 20, 1150 1151 /* 1152 * Data written to the AUX area by hardware due to aux_output, may need 1153 * to be matched to the event by an architecture-specific hardware ID. 1154 * This records the hardware ID, but requires sample_id to provide the 1155 * event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT 1156 * records from multiple events. 1157 * 1158 * struct { 1159 * struct perf_event_header header; 1160 * u64 hw_id; 1161 * struct sample_id sample_id; 1162 * }; 1163 */ 1164 PERF_RECORD_AUX_OUTPUT_HW_ID = 21, 1165 1166 PERF_RECORD_MAX, /* non-ABI */ 1167 }; 1168 1169 enum perf_record_ksymbol_type { 1170 PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, 1171 PERF_RECORD_KSYMBOL_TYPE_BPF = 1, 1172 /* 1173 * Out of line code such as kprobe-replaced instructions or optimized 1174 * kprobes or ftrace trampolines. 1175 */ 1176 PERF_RECORD_KSYMBOL_TYPE_OOL = 2, 1177 PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */ 1178 }; 1179 1180 #define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0) 1181 1182 enum perf_bpf_event_type { 1183 PERF_BPF_EVENT_UNKNOWN = 0, 1184 PERF_BPF_EVENT_PROG_LOAD = 1, 1185 PERF_BPF_EVENT_PROG_UNLOAD = 2, 1186 PERF_BPF_EVENT_MAX, /* non-ABI */ 1187 }; 1188 1189 #define PERF_MAX_STACK_DEPTH 127 1190 #define PERF_MAX_CONTEXTS_PER_STACK 8 1191 1192 enum perf_callchain_context { 1193 PERF_CONTEXT_HV = (__u64)-32, 1194 PERF_CONTEXT_KERNEL = (__u64)-128, 1195 PERF_CONTEXT_USER = (__u64)-512, 1196 1197 PERF_CONTEXT_GUEST = (__u64)-2048, 1198 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, 1199 PERF_CONTEXT_GUEST_USER = (__u64)-2560, 1200 1201 PERF_CONTEXT_MAX = (__u64)-4095, 1202 }; 1203 1204 /** 1205 * PERF_RECORD_AUX::flags bits 1206 */ 1207 #define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */ 1208 #define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */ 1209 #define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */ 1210 #define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */ 1211 #define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK 0xff00 /* PMU specific trace format type */ 1212 1213 /* CoreSight PMU AUX buffer formats */ 1214 #define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */ 1215 #define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */ 1216 1217 #define PERF_FLAG_FD_NO_GROUP (1UL << 0) 1218 #define PERF_FLAG_FD_OUTPUT (1UL << 1) 1219 #define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */ 1220 #define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */ 1221 1222 #if defined(__LITTLE_ENDIAN_BITFIELD) 1223 union perf_mem_data_src { 1224 __u64 val; 1225 struct { 1226 __u64 mem_op:5, /* type of opcode */ 1227 mem_lvl:14, /* memory hierarchy level */ 1228 mem_snoop:5, /* snoop mode */ 1229 mem_lock:2, /* lock instr */ 1230 mem_dtlb:7, /* tlb access */ 1231 mem_lvl_num:4, /* memory hierarchy level number */ 1232 mem_remote:1, /* remote */ 1233 mem_snoopx:2, /* snoop mode, ext */ 1234 mem_blk:3, /* access blocked */ 1235 mem_hops:3, /* hop level */ 1236 mem_rsvd:18; 1237 }; 1238 }; 1239 #elif defined(__BIG_ENDIAN_BITFIELD) 1240 union perf_mem_data_src { 1241 __u64 val; 1242 struct { 1243 __u64 mem_rsvd:18, 1244 mem_hops:3, /* hop level */ 1245 mem_blk:3, /* access blocked */ 1246 mem_snoopx:2, /* snoop mode, ext */ 1247 mem_remote:1, /* remote */ 1248 mem_lvl_num:4, /* memory hierarchy level number */ 1249 mem_dtlb:7, /* tlb access */ 1250 mem_lock:2, /* lock instr */ 1251 mem_snoop:5, /* snoop mode */ 1252 mem_lvl:14, /* memory hierarchy level */ 1253 mem_op:5; /* type of opcode */ 1254 }; 1255 }; 1256 #else 1257 #error "Unknown endianness" 1258 #endif 1259 1260 /* type of opcode (load/store/prefetch,code) */ 1261 #define PERF_MEM_OP_NA 0x01 /* not available */ 1262 #define PERF_MEM_OP_LOAD 0x02 /* load instruction */ 1263 #define PERF_MEM_OP_STORE 0x04 /* store instruction */ 1264 #define PERF_MEM_OP_PFETCH 0x08 /* prefetch */ 1265 #define PERF_MEM_OP_EXEC 0x10 /* code (execution) */ 1266 #define PERF_MEM_OP_SHIFT 0 1267 1268 /* 1269 * PERF_MEM_LVL_* namespace being depricated to some extent in the 1270 * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields. 1271 * Supporting this namespace inorder to not break defined ABIs. 1272 * 1273 * memory hierarchy (memory level, hit or miss) 1274 */ 1275 #define PERF_MEM_LVL_NA 0x01 /* not available */ 1276 #define PERF_MEM_LVL_HIT 0x02 /* hit level */ 1277 #define PERF_MEM_LVL_MISS 0x04 /* miss level */ 1278 #define PERF_MEM_LVL_L1 0x08 /* L1 */ 1279 #define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */ 1280 #define PERF_MEM_LVL_L2 0x20 /* L2 */ 1281 #define PERF_MEM_LVL_L3 0x40 /* L3 */ 1282 #define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */ 1283 #define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */ 1284 #define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */ 1285 #define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */ 1286 #define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */ 1287 #define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ 1288 #define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ 1289 #define PERF_MEM_LVL_SHIFT 5 1290 1291 #define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */ 1292 #define PERF_MEM_REMOTE_SHIFT 37 1293 1294 #define PERF_MEM_LVLNUM_L1 0x01 /* L1 */ 1295 #define PERF_MEM_LVLNUM_L2 0x02 /* L2 */ 1296 #define PERF_MEM_LVLNUM_L3 0x03 /* L3 */ 1297 #define PERF_MEM_LVLNUM_L4 0x04 /* L4 */ 1298 /* 5-0xa available */ 1299 #define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */ 1300 #define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */ 1301 #define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */ 1302 #define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */ 1303 #define PERF_MEM_LVLNUM_NA 0x0f /* N/A */ 1304 1305 #define PERF_MEM_LVLNUM_SHIFT 33 1306 1307 /* snoop mode */ 1308 #define PERF_MEM_SNOOP_NA 0x01 /* not available */ 1309 #define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */ 1310 #define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */ 1311 #define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */ 1312 #define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */ 1313 #define PERF_MEM_SNOOP_SHIFT 19 1314 1315 #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ 1316 /* 1 free */ 1317 #define PERF_MEM_SNOOPX_SHIFT 38 1318 1319 /* locked instruction */ 1320 #define PERF_MEM_LOCK_NA 0x01 /* not available */ 1321 #define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */ 1322 #define PERF_MEM_LOCK_SHIFT 24 1323 1324 /* TLB access */ 1325 #define PERF_MEM_TLB_NA 0x01 /* not available */ 1326 #define PERF_MEM_TLB_HIT 0x02 /* hit level */ 1327 #define PERF_MEM_TLB_MISS 0x04 /* miss level */ 1328 #define PERF_MEM_TLB_L1 0x08 /* L1 */ 1329 #define PERF_MEM_TLB_L2 0x10 /* L2 */ 1330 #define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/ 1331 #define PERF_MEM_TLB_OS 0x40 /* OS fault handler */ 1332 #define PERF_MEM_TLB_SHIFT 26 1333 1334 /* Access blocked */ 1335 #define PERF_MEM_BLK_NA 0x01 /* not available */ 1336 #define PERF_MEM_BLK_DATA 0x02 /* data could not be forwarded */ 1337 #define PERF_MEM_BLK_ADDR 0x04 /* address conflict */ 1338 #define PERF_MEM_BLK_SHIFT 40 1339 1340 /* hop level */ 1341 #define PERF_MEM_HOPS_0 0x01 /* remote core, same node */ 1342 #define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */ 1343 #define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */ 1344 #define PERF_MEM_HOPS_3 0x04 /* remote board */ 1345 /* 5-7 available */ 1346 #define PERF_MEM_HOPS_SHIFT 43 1347 1348 #define PERF_MEM_S(a, s) \ 1349 (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) 1350 1351 /* 1352 * single taken branch record layout: 1353 * 1354 * from: source instruction (may not always be a branch insn) 1355 * to: branch target 1356 * mispred: branch target was mispredicted 1357 * predicted: branch target was predicted 1358 * 1359 * support for mispred, predicted is optional. In case it 1360 * is not supported mispred = predicted = 0. 1361 * 1362 * in_tx: running in a hardware transaction 1363 * abort: aborting a hardware transaction 1364 * cycles: cycles from last branch (or 0 if not supported) 1365 * type: branch type 1366 */ 1367 struct perf_branch_entry { 1368 __u64 from; 1369 __u64 to; 1370 __u64 mispred:1, /* target mispredicted */ 1371 predicted:1,/* target predicted */ 1372 in_tx:1, /* in transaction */ 1373 abort:1, /* transaction abort */ 1374 cycles:16, /* cycle count to last branch */ 1375 type:4, /* branch type */ 1376 reserved:40; 1377 }; 1378 1379 union perf_sample_weight { 1380 __u64 full; 1381 #if defined(__LITTLE_ENDIAN_BITFIELD) 1382 struct { 1383 __u32 var1_dw; 1384 __u16 var2_w; 1385 __u16 var3_w; 1386 }; 1387 #elif defined(__BIG_ENDIAN_BITFIELD) 1388 struct { 1389 __u16 var3_w; 1390 __u16 var2_w; 1391 __u32 var1_dw; 1392 }; 1393 #else 1394 #error "Unknown endianness" 1395 #endif 1396 }; 1397 1398 #endif /* _UAPI_LINUX_PERF_EVENT_H */
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |