0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #ifndef _LINUX_PERF_EVENT_H
0015 #define _LINUX_PERF_EVENT_H
0016
0017 #include <uapi/linux/perf_event.h>
0018 #include <uapi/linux/bpf_perf_event.h>
0019
0020
0021
0022
0023
0024 #ifdef CONFIG_PERF_EVENTS
0025 # include <asm/perf_event.h>
0026 # include <asm/local64.h>
0027 #endif
0028
0029 #define PERF_GUEST_ACTIVE 0x01
0030 #define PERF_GUEST_USER 0x02
0031
0032 struct perf_guest_info_callbacks {
0033 unsigned int (*state)(void);
0034 unsigned long (*get_ip)(void);
0035 unsigned int (*handle_intel_pt_intr)(void);
0036 };
0037
0038 #ifdef CONFIG_HAVE_HW_BREAKPOINT
0039 #include <asm/hw_breakpoint.h>
0040 #endif
0041
0042 #include <linux/list.h>
0043 #include <linux/mutex.h>
0044 #include <linux/rculist.h>
0045 #include <linux/rcupdate.h>
0046 #include <linux/spinlock.h>
0047 #include <linux/hrtimer.h>
0048 #include <linux/fs.h>
0049 #include <linux/pid_namespace.h>
0050 #include <linux/workqueue.h>
0051 #include <linux/ftrace.h>
0052 #include <linux/cpu.h>
0053 #include <linux/irq_work.h>
0054 #include <linux/static_key.h>
0055 #include <linux/jump_label_ratelimit.h>
0056 #include <linux/atomic.h>
0057 #include <linux/sysfs.h>
0058 #include <linux/perf_regs.h>
0059 #include <linux/cgroup.h>
0060 #include <linux/refcount.h>
0061 #include <linux/security.h>
0062 #include <linux/static_call.h>
0063 #include <asm/local.h>
0064
0065 struct perf_callchain_entry {
0066 __u64 nr;
0067 __u64 ip[];
0068 };
0069
0070 struct perf_callchain_entry_ctx {
0071 struct perf_callchain_entry *entry;
0072 u32 max_stack;
0073 u32 nr;
0074 short contexts;
0075 bool contexts_maxed;
0076 };
0077
0078 typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
0079 unsigned long off, unsigned long len);
0080
0081 struct perf_raw_frag {
0082 union {
0083 struct perf_raw_frag *next;
0084 unsigned long pad;
0085 };
0086 perf_copy_f copy;
0087 void *data;
0088 u32 size;
0089 } __packed;
0090
0091 struct perf_raw_record {
0092 struct perf_raw_frag frag;
0093 u32 size;
0094 };
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116 struct perf_branch_stack {
0117 __u64 nr;
0118 __u64 hw_idx;
0119 struct perf_branch_entry entries[];
0120 };
0121
0122 struct task_struct;
0123
0124
0125
0126
0127 struct hw_perf_event_extra {
0128 u64 config;
0129 unsigned int reg;
0130 int alloc;
0131 int idx;
0132 };
0133
0134
0135
0136
0137
0138
0139
0140 #define PERF_EVENT_FLAG_ARCH 0x0000ffff
0141 #define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000
0142
0143
0144
0145
0146 struct hw_perf_event {
0147 #ifdef CONFIG_PERF_EVENTS
0148 union {
0149 struct {
0150 u64 config;
0151 u64 last_tag;
0152 unsigned long config_base;
0153 unsigned long event_base;
0154 int event_base_rdpmc;
0155 int idx;
0156 int last_cpu;
0157 int flags;
0158
0159 struct hw_perf_event_extra extra_reg;
0160 struct hw_perf_event_extra branch_reg;
0161 };
0162 struct {
0163 struct hrtimer hrtimer;
0164 };
0165 struct {
0166
0167 struct list_head tp_list;
0168 };
0169 struct {
0170 u64 pwr_acc;
0171 u64 ptsc;
0172 };
0173 #ifdef CONFIG_HAVE_HW_BREAKPOINT
0174 struct {
0175
0176
0177
0178
0179
0180 struct arch_hw_breakpoint info;
0181 struct list_head bp_list;
0182 };
0183 #endif
0184 struct {
0185 u8 iommu_bank;
0186 u8 iommu_cntr;
0187 u16 padding;
0188 u64 conf;
0189 u64 conf1;
0190 };
0191 };
0192
0193
0194
0195
0196 struct task_struct *target;
0197
0198
0199
0200
0201
0202 void *addr_filters;
0203
0204
0205 unsigned long addr_filters_gen;
0206
0207
0208
0209
0210 #define PERF_HES_STOPPED 0x01
0211 #define PERF_HES_UPTODATE 0x02
0212 #define PERF_HES_ARCH 0x04
0213
0214 int state;
0215
0216
0217
0218
0219
0220 local64_t prev_count;
0221
0222
0223
0224
0225 u64 sample_period;
0226
0227 union {
0228 struct {
0229
0230
0231
0232 u64 last_period;
0233
0234
0235
0236
0237
0238
0239
0240 local64_t period_left;
0241 };
0242 struct {
0243 u64 saved_metric;
0244 u64 saved_slots;
0245 };
0246 };
0247
0248
0249
0250
0251
0252 u64 interrupts_seq;
0253 u64 interrupts;
0254
0255
0256
0257
0258
0259 u64 freq_time_stamp;
0260 u64 freq_count_stamp;
0261 #endif
0262 };
0263
0264 struct perf_event;
0265
0266
0267
0268
0269 #define PERF_PMU_TXN_ADD 0x1
0270 #define PERF_PMU_TXN_READ 0x2
0271
0272
0273
0274
0275 #define PERF_PMU_CAP_NO_INTERRUPT 0x0001
0276 #define PERF_PMU_CAP_NO_NMI 0x0002
0277 #define PERF_PMU_CAP_AUX_NO_SG 0x0004
0278 #define PERF_PMU_CAP_EXTENDED_REGS 0x0008
0279 #define PERF_PMU_CAP_EXCLUSIVE 0x0010
0280 #define PERF_PMU_CAP_ITRACE 0x0020
0281 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x0040
0282 #define PERF_PMU_CAP_NO_EXCLUDE 0x0080
0283 #define PERF_PMU_CAP_AUX_OUTPUT 0x0100
0284 #define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0200
0285
0286 struct perf_output_handle;
0287
0288
0289
0290
0291 struct pmu {
0292 struct list_head entry;
0293
0294 struct module *module;
0295 struct device *dev;
0296 const struct attribute_group **attr_groups;
0297 const struct attribute_group **attr_update;
0298 const char *name;
0299 int type;
0300
0301
0302
0303
0304 int capabilities;
0305
0306 int __percpu *pmu_disable_count;
0307 struct perf_cpu_context __percpu *pmu_cpu_context;
0308 atomic_t exclusive_cnt;
0309 int task_ctx_nr;
0310 int hrtimer_interval_ms;
0311
0312
0313 unsigned int nr_addr_filters;
0314
0315
0316
0317
0318
0319 void (*pmu_enable) (struct pmu *pmu);
0320 void (*pmu_disable) (struct pmu *pmu);
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338 int (*event_init) (struct perf_event *event);
0339
0340
0341
0342
0343
0344 void (*event_mapped) (struct perf_event *event, struct mm_struct *mm);
0345 void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm);
0346
0347
0348
0349
0350
0351 #define PERF_EF_START 0x01
0352 #define PERF_EF_RELOAD 0x02
0353 #define PERF_EF_UPDATE 0x04
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373 int (*add) (struct perf_event *event, int flags);
0374 void (*del) (struct perf_event *event, int flags);
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394 void (*start) (struct perf_event *event, int flags);
0395 void (*stop) (struct perf_event *event, int flags);
0396
0397
0398
0399
0400
0401
0402
0403 void (*read) (struct perf_event *event);
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415 void (*start_txn) (struct pmu *pmu, unsigned int txn_flags);
0416
0417
0418
0419
0420
0421
0422
0423
0424 int (*commit_txn) (struct pmu *pmu);
0425
0426
0427
0428
0429
0430
0431 void (*cancel_txn) (struct pmu *pmu);
0432
0433
0434
0435
0436
0437 int (*event_idx) (struct perf_event *event);
0438
0439
0440
0441
0442 void (*sched_task) (struct perf_event_context *ctx,
0443 bool sched_in);
0444
0445
0446
0447
0448 struct kmem_cache *task_ctx_cache;
0449
0450
0451
0452
0453
0454
0455
0456 void (*swap_task_ctx) (struct perf_event_context *prev,
0457 struct perf_event_context *next);
0458
0459
0460
0461
0462
0463 void *(*setup_aux) (struct perf_event *event, void **pages,
0464 int nr_pages, bool overwrite);
0465
0466
0467
0468
0469
0470 void (*free_aux) (void *aux);
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481 long (*snapshot_aux) (struct perf_event *event,
0482 struct perf_output_handle *handle,
0483 unsigned long size);
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493 int (*addr_filters_validate) (struct list_head *filters);
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507 void (*addr_filters_sync) (struct perf_event *event);
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517 int (*aux_output_match) (struct perf_event *event);
0518
0519
0520
0521
0522
0523 int (*filter_match) (struct perf_event *event);
0524
0525
0526
0527
0528 int (*check_period) (struct perf_event *event, u64 value);
0529 };
0530
0531 enum perf_addr_filter_action_t {
0532 PERF_ADDR_FILTER_ACTION_STOP = 0,
0533 PERF_ADDR_FILTER_ACTION_START,
0534 PERF_ADDR_FILTER_ACTION_FILTER,
0535 };
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547 struct perf_addr_filter {
0548 struct list_head entry;
0549 struct path path;
0550 unsigned long offset;
0551 unsigned long size;
0552 enum perf_addr_filter_action_t action;
0553 };
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565 struct perf_addr_filters_head {
0566 struct list_head list;
0567 raw_spinlock_t lock;
0568 unsigned int nr_file_filters;
0569 };
0570
0571 struct perf_addr_filter_range {
0572 unsigned long start;
0573 unsigned long size;
0574 };
0575
0576
0577
0578
0579 enum perf_event_state {
0580 PERF_EVENT_STATE_DEAD = -4,
0581 PERF_EVENT_STATE_EXIT = -3,
0582 PERF_EVENT_STATE_ERROR = -2,
0583 PERF_EVENT_STATE_OFF = -1,
0584 PERF_EVENT_STATE_INACTIVE = 0,
0585 PERF_EVENT_STATE_ACTIVE = 1,
0586 };
0587
0588 struct file;
0589 struct perf_sample_data;
0590
0591 typedef void (*perf_overflow_handler_t)(struct perf_event *,
0592 struct perf_sample_data *,
0593 struct pt_regs *regs);
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605 #define PERF_EV_CAP_SOFTWARE BIT(0)
0606 #define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
0607 #define PERF_EV_CAP_SIBLING BIT(2)
0608
0609 #define SWEVENT_HLIST_BITS 8
0610 #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
0611
0612 struct swevent_hlist {
0613 struct hlist_head heads[SWEVENT_HLIST_SIZE];
0614 struct rcu_head rcu_head;
0615 };
0616
0617 #define PERF_ATTACH_CONTEXT 0x01
0618 #define PERF_ATTACH_GROUP 0x02
0619 #define PERF_ATTACH_TASK 0x04
0620 #define PERF_ATTACH_TASK_DATA 0x08
0621 #define PERF_ATTACH_ITRACE 0x10
0622 #define PERF_ATTACH_SCHED_CB 0x20
0623 #define PERF_ATTACH_CHILD 0x40
0624
0625 struct bpf_prog;
0626 struct perf_cgroup;
0627 struct perf_buffer;
0628
0629 struct pmu_event_list {
0630 raw_spinlock_t lock;
0631 struct list_head list;
0632 };
0633
0634 #define for_each_sibling_event(sibling, event) \
0635 if ((event)->group_leader == (event)) \
0636 list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
0637
0638
0639
0640
0641 struct perf_event {
0642 #ifdef CONFIG_PERF_EVENTS
0643
0644
0645
0646
0647
0648 struct list_head event_entry;
0649
0650
0651
0652
0653
0654 struct list_head sibling_list;
0655 struct list_head active_list;
0656
0657
0658
0659 struct rb_node group_node;
0660 u64 group_index;
0661
0662
0663
0664
0665
0666 struct list_head migrate_entry;
0667
0668 struct hlist_node hlist_entry;
0669 struct list_head active_entry;
0670 int nr_siblings;
0671
0672
0673 int event_caps;
0674
0675 int group_caps;
0676
0677 struct perf_event *group_leader;
0678 struct pmu *pmu;
0679 void *pmu_private;
0680
0681 enum perf_event_state state;
0682 unsigned int attach_state;
0683 local64_t count;
0684 atomic64_t child_count;
0685
0686
0687
0688
0689
0690
0691
0692 u64 total_time_enabled;
0693 u64 total_time_running;
0694 u64 tstamp;
0695
0696 struct perf_event_attr attr;
0697 u16 header_size;
0698 u16 id_header_size;
0699 u16 read_size;
0700 struct hw_perf_event hw;
0701
0702 struct perf_event_context *ctx;
0703 atomic_long_t refcount;
0704
0705
0706
0707
0708
0709 atomic64_t child_total_time_enabled;
0710 atomic64_t child_total_time_running;
0711
0712
0713
0714
0715 struct mutex child_mutex;
0716 struct list_head child_list;
0717 struct perf_event *parent;
0718
0719 int oncpu;
0720 int cpu;
0721
0722 struct list_head owner_entry;
0723 struct task_struct *owner;
0724
0725
0726 struct mutex mmap_mutex;
0727 atomic_t mmap_count;
0728
0729 struct perf_buffer *rb;
0730 struct list_head rb_entry;
0731 unsigned long rcu_batches;
0732 int rcu_pending;
0733
0734
0735 wait_queue_head_t waitq;
0736 struct fasync_struct *fasync;
0737
0738
0739 int pending_wakeup;
0740 int pending_kill;
0741 int pending_disable;
0742 unsigned long pending_addr;
0743 struct irq_work pending;
0744
0745 atomic_t event_limit;
0746
0747
0748 struct perf_addr_filters_head addr_filters;
0749
0750 struct perf_addr_filter_range *addr_filter_ranges;
0751 unsigned long addr_filters_gen;
0752
0753
0754 struct perf_event *aux_event;
0755
0756 void (*destroy)(struct perf_event *);
0757 struct rcu_head rcu_head;
0758
0759 struct pid_namespace *ns;
0760 u64 id;
0761
0762 atomic64_t lost_samples;
0763
0764 u64 (*clock)(void);
0765 perf_overflow_handler_t overflow_handler;
0766 void *overflow_handler_context;
0767 #ifdef CONFIG_BPF_SYSCALL
0768 perf_overflow_handler_t orig_overflow_handler;
0769 struct bpf_prog *prog;
0770 u64 bpf_cookie;
0771 #endif
0772
0773 #ifdef CONFIG_EVENT_TRACING
0774 struct trace_event_call *tp_event;
0775 struct event_filter *filter;
0776 #ifdef CONFIG_FUNCTION_TRACER
0777 struct ftrace_ops ftrace_ops;
0778 #endif
0779 #endif
0780
0781 #ifdef CONFIG_CGROUP_PERF
0782 struct perf_cgroup *cgrp;
0783 #endif
0784
0785 #ifdef CONFIG_SECURITY
0786 void *security;
0787 #endif
0788 struct list_head sb_list;
0789 #endif
0790 };
0791
0792
0793 struct perf_event_groups {
0794 struct rb_root tree;
0795 u64 index;
0796 };
0797
0798
0799
0800
0801
0802
0803 struct perf_event_context {
0804 struct pmu *pmu;
0805
0806
0807
0808
0809 raw_spinlock_t lock;
0810
0811
0812
0813
0814
0815 struct mutex mutex;
0816
0817 struct list_head active_ctx_list;
0818 struct perf_event_groups pinned_groups;
0819 struct perf_event_groups flexible_groups;
0820 struct list_head event_list;
0821
0822 struct list_head pinned_active;
0823 struct list_head flexible_active;
0824
0825 int nr_events;
0826 int nr_active;
0827 int nr_user;
0828 int is_active;
0829 int nr_stat;
0830 int nr_freq;
0831 int rotate_disable;
0832
0833
0834
0835
0836 int rotate_necessary;
0837 refcount_t refcount;
0838 struct task_struct *task;
0839
0840
0841
0842
0843 u64 time;
0844 u64 timestamp;
0845 u64 timeoffset;
0846
0847
0848
0849
0850
0851 struct perf_event_context *parent_ctx;
0852 u64 parent_gen;
0853 u64 generation;
0854 int pin_count;
0855 #ifdef CONFIG_CGROUP_PERF
0856 int nr_cgroups;
0857 #endif
0858 void *task_ctx_data;
0859 struct rcu_head rcu_head;
0860 };
0861
0862
0863
0864
0865
0866 #define PERF_NR_CONTEXTS 4
0867
0868
0869
0870
0871 struct perf_cpu_context {
0872 struct perf_event_context ctx;
0873 struct perf_event_context *task_ctx;
0874 int active_oncpu;
0875 int exclusive;
0876
0877 raw_spinlock_t hrtimer_lock;
0878 struct hrtimer hrtimer;
0879 ktime_t hrtimer_interval;
0880 unsigned int hrtimer_active;
0881
0882 #ifdef CONFIG_CGROUP_PERF
0883 struct perf_cgroup *cgrp;
0884 struct list_head cgrp_cpuctx_entry;
0885 #endif
0886
0887 struct list_head sched_cb_entry;
0888 int sched_cb_usage;
0889
0890 int online;
0891
0892
0893
0894
0895 int heap_size;
0896 struct perf_event **heap;
0897 struct perf_event *heap_default[2];
0898 };
0899
0900 struct perf_output_handle {
0901 struct perf_event *event;
0902 struct perf_buffer *rb;
0903 unsigned long wakeup;
0904 unsigned long size;
0905 u64 aux_flags;
0906 union {
0907 void *addr;
0908 unsigned long head;
0909 };
0910 int page;
0911 };
0912
0913 struct bpf_perf_event_data_kern {
0914 bpf_user_pt_regs_t *regs;
0915 struct perf_sample_data *data;
0916 struct perf_event *event;
0917 };
0918
0919 #ifdef CONFIG_CGROUP_PERF
0920
0921
0922
0923
0924
0925 struct perf_cgroup_info {
0926 u64 time;
0927 u64 timestamp;
0928 u64 timeoffset;
0929 int active;
0930 };
0931
0932 struct perf_cgroup {
0933 struct cgroup_subsys_state css;
0934 struct perf_cgroup_info __percpu *info;
0935 };
0936
0937
0938
0939
0940
0941
0942 static inline struct perf_cgroup *
0943 perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
0944 {
0945 return container_of(task_css_check(task, perf_event_cgrp_id,
0946 ctx ? lockdep_is_held(&ctx->lock)
0947 : true),
0948 struct perf_cgroup, css);
0949 }
0950 #endif
0951
0952 #ifdef CONFIG_PERF_EVENTS
0953
0954 extern void *perf_aux_output_begin(struct perf_output_handle *handle,
0955 struct perf_event *event);
0956 extern void perf_aux_output_end(struct perf_output_handle *handle,
0957 unsigned long size);
0958 extern int perf_aux_output_skip(struct perf_output_handle *handle,
0959 unsigned long size);
0960 extern void *perf_get_aux(struct perf_output_handle *handle);
0961 extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
0962 extern void perf_event_itrace_started(struct perf_event *event);
0963
0964 extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
0965 extern void perf_pmu_unregister(struct pmu *pmu);
0966
0967 extern void __perf_event_task_sched_in(struct task_struct *prev,
0968 struct task_struct *task);
0969 extern void __perf_event_task_sched_out(struct task_struct *prev,
0970 struct task_struct *next);
0971 extern int perf_event_init_task(struct task_struct *child, u64 clone_flags);
0972 extern void perf_event_exit_task(struct task_struct *child);
0973 extern void perf_event_free_task(struct task_struct *task);
0974 extern void perf_event_delayed_put(struct task_struct *task);
0975 extern struct file *perf_event_get(unsigned int fd);
0976 extern const struct perf_event *perf_get_event(struct file *file);
0977 extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
0978 extern void perf_event_print_debug(void);
0979 extern void perf_pmu_disable(struct pmu *pmu);
0980 extern void perf_pmu_enable(struct pmu *pmu);
0981 extern void perf_sched_cb_dec(struct pmu *pmu);
0982 extern void perf_sched_cb_inc(struct pmu *pmu);
0983 extern int perf_event_task_disable(void);
0984 extern int perf_event_task_enable(void);
0985
0986 extern void perf_pmu_resched(struct pmu *pmu);
0987
0988 extern int perf_event_refresh(struct perf_event *event, int refresh);
0989 extern void perf_event_update_userpage(struct perf_event *event);
0990 extern int perf_event_release_kernel(struct perf_event *event);
0991 extern struct perf_event *
0992 perf_event_create_kernel_counter(struct perf_event_attr *attr,
0993 int cpu,
0994 struct task_struct *task,
0995 perf_overflow_handler_t callback,
0996 void *context);
0997 extern void perf_pmu_migrate_context(struct pmu *pmu,
0998 int src_cpu, int dst_cpu);
0999 int perf_event_read_local(struct perf_event *event, u64 *value,
1000 u64 *enabled, u64 *running);
1001 extern u64 perf_event_read_value(struct perf_event *event,
1002 u64 *enabled, u64 *running);
1003
1004
1005 struct perf_sample_data {
1006
1007
1008
1009
1010 u64 addr;
1011 struct perf_raw_record *raw;
1012 struct perf_branch_stack *br_stack;
1013 u64 period;
1014 union perf_sample_weight weight;
1015 u64 txn;
1016 union perf_mem_data_src data_src;
1017
1018
1019
1020
1021
1022 u64 type;
1023 u64 ip;
1024 struct {
1025 u32 pid;
1026 u32 tid;
1027 } tid_entry;
1028 u64 time;
1029 u64 id;
1030 u64 stream_id;
1031 struct {
1032 u32 cpu;
1033 u32 reserved;
1034 } cpu_entry;
1035 struct perf_callchain_entry *callchain;
1036 u64 aux_size;
1037
1038 struct perf_regs regs_user;
1039 struct perf_regs regs_intr;
1040 u64 stack_user_size;
1041
1042 u64 phys_addr;
1043 u64 cgroup;
1044 u64 data_page_size;
1045 u64 code_page_size;
1046 } ____cacheline_aligned;
1047
1048
1049 #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
1050 PERF_MEM_S(LVL, NA) |\
1051 PERF_MEM_S(SNOOP, NA) |\
1052 PERF_MEM_S(LOCK, NA) |\
1053 PERF_MEM_S(TLB, NA))
1054
1055 static inline void perf_sample_data_init(struct perf_sample_data *data,
1056 u64 addr, u64 period)
1057 {
1058
1059 data->addr = addr;
1060 data->raw = NULL;
1061 data->br_stack = NULL;
1062 data->period = period;
1063 data->weight.full = 0;
1064 data->data_src.val = PERF_MEM_NA;
1065 data->txn = 0;
1066 }
1067
1068
1069
1070
1071
1072
1073 static inline void perf_clear_branch_entry_bitfields(struct perf_branch_entry *br)
1074 {
1075 br->mispred = 0;
1076 br->predicted = 0;
1077 br->in_tx = 0;
1078 br->abort = 0;
1079 br->cycles = 0;
1080 br->type = 0;
1081 br->reserved = 0;
1082 }
1083
1084 extern void perf_output_sample(struct perf_output_handle *handle,
1085 struct perf_event_header *header,
1086 struct perf_sample_data *data,
1087 struct perf_event *event);
1088 extern void perf_prepare_sample(struct perf_event_header *header,
1089 struct perf_sample_data *data,
1090 struct perf_event *event,
1091 struct pt_regs *regs);
1092
1093 extern int perf_event_overflow(struct perf_event *event,
1094 struct perf_sample_data *data,
1095 struct pt_regs *regs);
1096
1097 extern void perf_event_output_forward(struct perf_event *event,
1098 struct perf_sample_data *data,
1099 struct pt_regs *regs);
1100 extern void perf_event_output_backward(struct perf_event *event,
1101 struct perf_sample_data *data,
1102 struct pt_regs *regs);
1103 extern int perf_event_output(struct perf_event *event,
1104 struct perf_sample_data *data,
1105 struct pt_regs *regs);
1106
1107 static inline bool
1108 is_default_overflow_handler(struct perf_event *event)
1109 {
1110 if (likely(event->overflow_handler == perf_event_output_forward))
1111 return true;
1112 if (unlikely(event->overflow_handler == perf_event_output_backward))
1113 return true;
1114 return false;
1115 }
1116
1117 extern void
1118 perf_event_header__init_id(struct perf_event_header *header,
1119 struct perf_sample_data *data,
1120 struct perf_event *event);
1121 extern void
1122 perf_event__output_id_sample(struct perf_event *event,
1123 struct perf_output_handle *handle,
1124 struct perf_sample_data *sample);
1125
1126 extern void
1127 perf_log_lost_samples(struct perf_event *event, u64 lost);
1128
1129 static inline bool event_has_any_exclude_flag(struct perf_event *event)
1130 {
1131 struct perf_event_attr *attr = &event->attr;
1132
1133 return attr->exclude_idle || attr->exclude_user ||
1134 attr->exclude_kernel || attr->exclude_hv ||
1135 attr->exclude_guest || attr->exclude_host;
1136 }
1137
1138 static inline bool is_sampling_event(struct perf_event *event)
1139 {
1140 return event->attr.sample_period != 0;
1141 }
1142
1143
1144
1145
1146 static inline int is_software_event(struct perf_event *event)
1147 {
1148 return event->event_caps & PERF_EV_CAP_SOFTWARE;
1149 }
1150
1151
1152
1153
1154 static inline int in_software_context(struct perf_event *event)
1155 {
1156 return event->ctx->pmu->task_ctx_nr == perf_sw_context;
1157 }
1158
1159 static inline int is_exclusive_pmu(struct pmu *pmu)
1160 {
1161 return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
1162 }
1163
1164 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1165
1166 extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
1167 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1168
1169 #ifndef perf_arch_fetch_caller_regs
1170 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1171 #endif
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1188 {
1189 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1190 }
1191
1192 static __always_inline void
1193 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1194 {
1195 if (static_key_false(&perf_swevent_enabled[event_id]))
1196 __perf_sw_event(event_id, nr, regs, addr);
1197 }
1198
1199 DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
1200
1201
1202
1203
1204
1205
1206 static __always_inline void __perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
1207 {
1208 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1209
1210 perf_fetch_caller_regs(regs);
1211 ___perf_sw_event(event_id, nr, regs, addr);
1212 }
1213
1214 extern struct static_key_false perf_sched_events;
1215
1216 static __always_inline bool __perf_sw_enabled(int swevt)
1217 {
1218 return static_key_false(&perf_swevent_enabled[swevt]);
1219 }
1220
1221 static inline void perf_event_task_migrate(struct task_struct *task)
1222 {
1223 if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS))
1224 task->sched_migrated = 1;
1225 }
1226
1227 static inline void perf_event_task_sched_in(struct task_struct *prev,
1228 struct task_struct *task)
1229 {
1230 if (static_branch_unlikely(&perf_sched_events))
1231 __perf_event_task_sched_in(prev, task);
1232
1233 if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS) &&
1234 task->sched_migrated) {
1235 __perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
1236 task->sched_migrated = 0;
1237 }
1238 }
1239
1240 static inline void perf_event_task_sched_out(struct task_struct *prev,
1241 struct task_struct *next)
1242 {
1243 if (__perf_sw_enabled(PERF_COUNT_SW_CONTEXT_SWITCHES))
1244 __perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
1245
1246 #ifdef CONFIG_CGROUP_PERF
1247 if (__perf_sw_enabled(PERF_COUNT_SW_CGROUP_SWITCHES) &&
1248 perf_cgroup_from_task(prev, NULL) !=
1249 perf_cgroup_from_task(next, NULL))
1250 __perf_sw_event_sched(PERF_COUNT_SW_CGROUP_SWITCHES, 1, 0);
1251 #endif
1252
1253 if (static_branch_unlikely(&perf_sched_events))
1254 __perf_event_task_sched_out(prev, next);
1255 }
1256
1257 extern void perf_event_mmap(struct vm_area_struct *vma);
1258
1259 extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1260 bool unregister, const char *sym);
1261 extern void perf_event_bpf_event(struct bpf_prog *prog,
1262 enum perf_bpf_event_type type,
1263 u16 flags);
1264
1265 #ifdef CONFIG_GUEST_PERF_EVENTS
1266 extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
1267
1268 DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state);
1269 DECLARE_STATIC_CALL(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
1270 DECLARE_STATIC_CALL(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
1271
1272 static inline unsigned int perf_guest_state(void)
1273 {
1274 return static_call(__perf_guest_state)();
1275 }
1276 static inline unsigned long perf_guest_get_ip(void)
1277 {
1278 return static_call(__perf_guest_get_ip)();
1279 }
1280 static inline unsigned int perf_guest_handle_intel_pt_intr(void)
1281 {
1282 return static_call(__perf_guest_handle_intel_pt_intr)();
1283 }
1284 extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
1285 extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
1286 #else
1287 static inline unsigned int perf_guest_state(void) { return 0; }
1288 static inline unsigned long perf_guest_get_ip(void) { return 0; }
1289 static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return 0; }
1290 #endif
1291
1292 extern void perf_event_exec(void);
1293 extern void perf_event_comm(struct task_struct *tsk, bool exec);
1294 extern void perf_event_namespaces(struct task_struct *tsk);
1295 extern void perf_event_fork(struct task_struct *tsk);
1296 extern void perf_event_text_poke(const void *addr,
1297 const void *old_bytes, size_t old_len,
1298 const void *new_bytes, size_t new_len);
1299
1300
1301 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1302
1303 extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1304 extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1305 extern struct perf_callchain_entry *
1306 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1307 u32 max_stack, bool crosstask, bool add_mark);
1308 extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
1309 extern int get_callchain_buffers(int max_stack);
1310 extern void put_callchain_buffers(void);
1311 extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
1312 extern void put_callchain_entry(int rctx);
1313
1314 extern int sysctl_perf_event_max_stack;
1315 extern int sysctl_perf_event_max_contexts_per_stack;
1316
1317 static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
1318 {
1319 if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
1320 struct perf_callchain_entry *entry = ctx->entry;
1321 entry->ip[entry->nr++] = ip;
1322 ++ctx->contexts;
1323 return 0;
1324 } else {
1325 ctx->contexts_maxed = true;
1326 return -1;
1327 }
1328 }
1329
1330 static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
1331 {
1332 if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
1333 struct perf_callchain_entry *entry = ctx->entry;
1334 entry->ip[entry->nr++] = ip;
1335 ++ctx->nr;
1336 return 0;
1337 } else {
1338 return -1;
1339 }
1340 }
1341
1342 extern int sysctl_perf_event_paranoid;
1343 extern int sysctl_perf_event_mlock;
1344 extern int sysctl_perf_event_sample_rate;
1345 extern int sysctl_perf_cpu_time_max_percent;
1346
1347 extern void perf_sample_event_took(u64 sample_len_ns);
1348
1349 int perf_proc_update_handler(struct ctl_table *table, int write,
1350 void *buffer, size_t *lenp, loff_t *ppos);
1351 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1352 void *buffer, size_t *lenp, loff_t *ppos);
1353 int perf_event_max_stack_handler(struct ctl_table *table, int write,
1354 void *buffer, size_t *lenp, loff_t *ppos);
1355
1356
1357 #define PERF_SECURITY_OPEN 0
1358
1359
1360 #define PERF_SECURITY_CPU 1
1361 #define PERF_SECURITY_KERNEL 2
1362 #define PERF_SECURITY_TRACEPOINT 3
1363
1364 static inline int perf_is_paranoid(void)
1365 {
1366 return sysctl_perf_event_paranoid > -1;
1367 }
1368
1369 static inline int perf_allow_kernel(struct perf_event_attr *attr)
1370 {
1371 if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
1372 return -EACCES;
1373
1374 return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
1375 }
1376
1377 static inline int perf_allow_cpu(struct perf_event_attr *attr)
1378 {
1379 if (sysctl_perf_event_paranoid > 0 && !perfmon_capable())
1380 return -EACCES;
1381
1382 return security_perf_event_open(attr, PERF_SECURITY_CPU);
1383 }
1384
1385 static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
1386 {
1387 if (sysctl_perf_event_paranoid > -1 && !perfmon_capable())
1388 return -EPERM;
1389
1390 return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
1391 }
1392
1393 extern void perf_event_init(void);
1394 extern void perf_tp_event(u16 event_type, u64 count, void *record,
1395 int entry_size, struct pt_regs *regs,
1396 struct hlist_head *head, int rctx,
1397 struct task_struct *task);
1398 extern void perf_bp_event(struct perf_event *event, void *data);
1399
1400 #ifndef perf_misc_flags
1401 # define perf_misc_flags(regs) \
1402 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1403 # define perf_instruction_pointer(regs) instruction_pointer(regs)
1404 #endif
1405 #ifndef perf_arch_bpf_user_pt_regs
1406 # define perf_arch_bpf_user_pt_regs(regs) regs
1407 #endif
1408
1409 static inline bool has_branch_stack(struct perf_event *event)
1410 {
1411 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1412 }
1413
1414 static inline bool needs_branch_stack(struct perf_event *event)
1415 {
1416 return event->attr.branch_sample_type != 0;
1417 }
1418
1419 static inline bool has_aux(struct perf_event *event)
1420 {
1421 return event->pmu->setup_aux;
1422 }
1423
1424 static inline bool is_write_backward(struct perf_event *event)
1425 {
1426 return !!event->attr.write_backward;
1427 }
1428
1429 static inline bool has_addr_filter(struct perf_event *event)
1430 {
1431 return event->pmu->nr_addr_filters;
1432 }
1433
1434
1435
1436
1437 static inline struct perf_addr_filters_head *
1438 perf_event_addr_filters(struct perf_event *event)
1439 {
1440 struct perf_addr_filters_head *ifh = &event->addr_filters;
1441
1442 if (event->parent)
1443 ifh = &event->parent->addr_filters;
1444
1445 return ifh;
1446 }
1447
1448 extern void perf_event_addr_filters_sync(struct perf_event *event);
1449 extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id);
1450
1451 extern int perf_output_begin(struct perf_output_handle *handle,
1452 struct perf_sample_data *data,
1453 struct perf_event *event, unsigned int size);
1454 extern int perf_output_begin_forward(struct perf_output_handle *handle,
1455 struct perf_sample_data *data,
1456 struct perf_event *event,
1457 unsigned int size);
1458 extern int perf_output_begin_backward(struct perf_output_handle *handle,
1459 struct perf_sample_data *data,
1460 struct perf_event *event,
1461 unsigned int size);
1462
1463 extern void perf_output_end(struct perf_output_handle *handle);
1464 extern unsigned int perf_output_copy(struct perf_output_handle *handle,
1465 const void *buf, unsigned int len);
1466 extern unsigned int perf_output_skip(struct perf_output_handle *handle,
1467 unsigned int len);
1468 extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
1469 struct perf_output_handle *handle,
1470 unsigned long from, unsigned long to);
1471 extern int perf_swevent_get_recursion_context(void);
1472 extern void perf_swevent_put_recursion_context(int rctx);
1473 extern u64 perf_swevent_set_period(struct perf_event *event);
1474 extern void perf_event_enable(struct perf_event *event);
1475 extern void perf_event_disable(struct perf_event *event);
1476 extern void perf_event_disable_local(struct perf_event *event);
1477 extern void perf_event_disable_inatomic(struct perf_event *event);
1478 extern void perf_event_task_tick(void);
1479 extern int perf_event_account_interrupt(struct perf_event *event);
1480 extern int perf_event_period(struct perf_event *event, u64 value);
1481 extern u64 perf_event_pause(struct perf_event *event, bool reset);
1482 #else
1483 static inline void *
1484 perf_aux_output_begin(struct perf_output_handle *handle,
1485 struct perf_event *event) { return NULL; }
1486 static inline void
1487 perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
1488 { }
1489 static inline int
1490 perf_aux_output_skip(struct perf_output_handle *handle,
1491 unsigned long size) { return -EINVAL; }
1492 static inline void *
1493 perf_get_aux(struct perf_output_handle *handle) { return NULL; }
1494 static inline void
1495 perf_event_task_migrate(struct task_struct *task) { }
1496 static inline void
1497 perf_event_task_sched_in(struct task_struct *prev,
1498 struct task_struct *task) { }
1499 static inline void
1500 perf_event_task_sched_out(struct task_struct *prev,
1501 struct task_struct *next) { }
1502 static inline int perf_event_init_task(struct task_struct *child,
1503 u64 clone_flags) { return 0; }
1504 static inline void perf_event_exit_task(struct task_struct *child) { }
1505 static inline void perf_event_free_task(struct task_struct *task) { }
1506 static inline void perf_event_delayed_put(struct task_struct *task) { }
1507 static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
1508 static inline const struct perf_event *perf_get_event(struct file *file)
1509 {
1510 return ERR_PTR(-EINVAL);
1511 }
1512 static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
1513 {
1514 return ERR_PTR(-EINVAL);
1515 }
1516 static inline int perf_event_read_local(struct perf_event *event, u64 *value,
1517 u64 *enabled, u64 *running)
1518 {
1519 return -EINVAL;
1520 }
1521 static inline void perf_event_print_debug(void) { }
1522 static inline int perf_event_task_disable(void) { return -EINVAL; }
1523 static inline int perf_event_task_enable(void) { return -EINVAL; }
1524 static inline int perf_event_refresh(struct perf_event *event, int refresh)
1525 {
1526 return -EINVAL;
1527 }
1528
1529 static inline void
1530 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
1531 static inline void
1532 perf_bp_event(struct perf_event *event, void *data) { }
1533
1534 static inline void perf_event_mmap(struct vm_area_struct *vma) { }
1535
1536 typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
1537 static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1538 bool unregister, const char *sym) { }
1539 static inline void perf_event_bpf_event(struct bpf_prog *prog,
1540 enum perf_bpf_event_type type,
1541 u16 flags) { }
1542 static inline void perf_event_exec(void) { }
1543 static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
1544 static inline void perf_event_namespaces(struct task_struct *tsk) { }
1545 static inline void perf_event_fork(struct task_struct *tsk) { }
1546 static inline void perf_event_text_poke(const void *addr,
1547 const void *old_bytes,
1548 size_t old_len,
1549 const void *new_bytes,
1550 size_t new_len) { }
1551 static inline void perf_event_init(void) { }
1552 static inline int perf_swevent_get_recursion_context(void) { return -1; }
1553 static inline void perf_swevent_put_recursion_context(int rctx) { }
1554 static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; }
1555 static inline void perf_event_enable(struct perf_event *event) { }
1556 static inline void perf_event_disable(struct perf_event *event) { }
1557 static inline int __perf_event_disable(void *info) { return -1; }
1558 static inline void perf_event_task_tick(void) { }
1559 static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
1560 static inline int perf_event_period(struct perf_event *event, u64 value)
1561 {
1562 return -EINVAL;
1563 }
1564 static inline u64 perf_event_pause(struct perf_event *event, bool reset)
1565 {
1566 return 0;
1567 }
1568 #endif
1569
1570 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1571 extern void perf_restore_debug_store(void);
1572 #else
1573 static inline void perf_restore_debug_store(void) { }
1574 #endif
1575
1576 static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
1577 {
1578 return frag->pad < sizeof(u64);
1579 }
1580
1581 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1582
1583 struct perf_pmu_events_attr {
1584 struct device_attribute attr;
1585 u64 id;
1586 const char *event_str;
1587 };
1588
1589 struct perf_pmu_events_ht_attr {
1590 struct device_attribute attr;
1591 u64 id;
1592 const char *event_str_ht;
1593 const char *event_str_noht;
1594 };
1595
1596 struct perf_pmu_events_hybrid_attr {
1597 struct device_attribute attr;
1598 u64 id;
1599 const char *event_str;
1600 u64 pmu_type;
1601 };
1602
1603 struct perf_pmu_format_hybrid_attr {
1604 struct device_attribute attr;
1605 u64 pmu_type;
1606 };
1607
1608 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1609 char *page);
1610
1611 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
1612 static struct perf_pmu_events_attr _var = { \
1613 .attr = __ATTR(_name, 0444, _show, NULL), \
1614 .id = _id, \
1615 };
1616
1617 #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \
1618 static struct perf_pmu_events_attr _var = { \
1619 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
1620 .id = 0, \
1621 .event_str = _str, \
1622 };
1623
1624 #define PMU_EVENT_ATTR_ID(_name, _show, _id) \
1625 (&((struct perf_pmu_events_attr[]) { \
1626 { .attr = __ATTR(_name, 0444, _show, NULL), \
1627 .id = _id, } \
1628 })[0].attr.attr)
1629
1630 #define PMU_FORMAT_ATTR(_name, _format) \
1631 static ssize_t \
1632 _name##_show(struct device *dev, \
1633 struct device_attribute *attr, \
1634 char *page) \
1635 { \
1636 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
1637 return sprintf(page, _format "\n"); \
1638 } \
1639 \
1640 static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1641
1642
1643 #ifdef CONFIG_PERF_EVENTS
1644 int perf_event_init_cpu(unsigned int cpu);
1645 int perf_event_exit_cpu(unsigned int cpu);
1646 #else
1647 #define perf_event_init_cpu NULL
1648 #define perf_event_exit_cpu NULL
1649 #endif
1650
1651 extern void __weak arch_perf_update_userpage(struct perf_event *event,
1652 struct perf_event_mmap_page *userpg,
1653 u64 now);
1654
1655 #ifdef CONFIG_MMU
1656 extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr);
1657 #endif
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677 typedef int (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries,
1678 unsigned int cnt);
1679 DECLARE_STATIC_CALL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
1680
1681 #ifndef PERF_NEEDS_LOPWR_CB
1682 static inline void perf_lopwr_cb(bool mode)
1683 {
1684 }
1685 #endif
1686
1687 #endif