0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/perf_event.h>
0012 #include <linux/slab.h>
0013 #include <linux/sched/task_stack.h>
0014
0015 #include "internal.h"
0016
0017 struct callchain_cpus_entries {
0018 struct rcu_head rcu_head;
0019 struct perf_callchain_entry *cpu_entries[];
0020 };
0021
0022 int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
0023 int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
0024
0025 static inline size_t perf_callchain_entry__sizeof(void)
0026 {
0027 return (sizeof(struct perf_callchain_entry) +
0028 sizeof(__u64) * (sysctl_perf_event_max_stack +
0029 sysctl_perf_event_max_contexts_per_stack));
0030 }
0031
0032 static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
0033 static atomic_t nr_callchain_events;
0034 static DEFINE_MUTEX(callchain_mutex);
0035 static struct callchain_cpus_entries *callchain_cpus_entries;
0036
0037
0038 __weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
0039 struct pt_regs *regs)
0040 {
0041 }
0042
0043 __weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
0044 struct pt_regs *regs)
0045 {
0046 }
0047
0048 static void release_callchain_buffers_rcu(struct rcu_head *head)
0049 {
0050 struct callchain_cpus_entries *entries;
0051 int cpu;
0052
0053 entries = container_of(head, struct callchain_cpus_entries, rcu_head);
0054
0055 for_each_possible_cpu(cpu)
0056 kfree(entries->cpu_entries[cpu]);
0057
0058 kfree(entries);
0059 }
0060
0061 static void release_callchain_buffers(void)
0062 {
0063 struct callchain_cpus_entries *entries;
0064
0065 entries = callchain_cpus_entries;
0066 RCU_INIT_POINTER(callchain_cpus_entries, NULL);
0067 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
0068 }
0069
0070 static int alloc_callchain_buffers(void)
0071 {
0072 int cpu;
0073 int size;
0074 struct callchain_cpus_entries *entries;
0075
0076
0077
0078
0079
0080
0081 size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
0082
0083 entries = kzalloc(size, GFP_KERNEL);
0084 if (!entries)
0085 return -ENOMEM;
0086
0087 size = perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS;
0088
0089 for_each_possible_cpu(cpu) {
0090 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
0091 cpu_to_node(cpu));
0092 if (!entries->cpu_entries[cpu])
0093 goto fail;
0094 }
0095
0096 rcu_assign_pointer(callchain_cpus_entries, entries);
0097
0098 return 0;
0099
0100 fail:
0101 for_each_possible_cpu(cpu)
0102 kfree(entries->cpu_entries[cpu]);
0103 kfree(entries);
0104
0105 return -ENOMEM;
0106 }
0107
0108 int get_callchain_buffers(int event_max_stack)
0109 {
0110 int err = 0;
0111 int count;
0112
0113 mutex_lock(&callchain_mutex);
0114
0115 count = atomic_inc_return(&nr_callchain_events);
0116 if (WARN_ON_ONCE(count < 1)) {
0117 err = -EINVAL;
0118 goto exit;
0119 }
0120
0121
0122
0123
0124
0125
0126
0127
0128 if (event_max_stack > sysctl_perf_event_max_stack) {
0129 err = -EOVERFLOW;
0130 goto exit;
0131 }
0132
0133 if (count == 1)
0134 err = alloc_callchain_buffers();
0135 exit:
0136 if (err)
0137 atomic_dec(&nr_callchain_events);
0138
0139 mutex_unlock(&callchain_mutex);
0140
0141 return err;
0142 }
0143
0144 void put_callchain_buffers(void)
0145 {
0146 if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
0147 release_callchain_buffers();
0148 mutex_unlock(&callchain_mutex);
0149 }
0150 }
0151
0152 struct perf_callchain_entry *get_callchain_entry(int *rctx)
0153 {
0154 int cpu;
0155 struct callchain_cpus_entries *entries;
0156
0157 *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
0158 if (*rctx == -1)
0159 return NULL;
0160
0161 entries = rcu_dereference(callchain_cpus_entries);
0162 if (!entries) {
0163 put_recursion_context(this_cpu_ptr(callchain_recursion), *rctx);
0164 return NULL;
0165 }
0166
0167 cpu = smp_processor_id();
0168
0169 return (((void *)entries->cpu_entries[cpu]) +
0170 (*rctx * perf_callchain_entry__sizeof()));
0171 }
0172
0173 void
0174 put_callchain_entry(int rctx)
0175 {
0176 put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
0177 }
0178
0179 struct perf_callchain_entry *
0180 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
0181 u32 max_stack, bool crosstask, bool add_mark)
0182 {
0183 struct perf_callchain_entry *entry;
0184 struct perf_callchain_entry_ctx ctx;
0185 int rctx;
0186
0187 entry = get_callchain_entry(&rctx);
0188 if (!entry)
0189 return NULL;
0190
0191 ctx.entry = entry;
0192 ctx.max_stack = max_stack;
0193 ctx.nr = entry->nr = init_nr;
0194 ctx.contexts = 0;
0195 ctx.contexts_maxed = false;
0196
0197 if (kernel && !user_mode(regs)) {
0198 if (add_mark)
0199 perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
0200 perf_callchain_kernel(&ctx, regs);
0201 }
0202
0203 if (user) {
0204 if (!user_mode(regs)) {
0205 if (current->mm)
0206 regs = task_pt_regs(current);
0207 else
0208 regs = NULL;
0209 }
0210
0211 if (regs) {
0212 if (crosstask)
0213 goto exit_put;
0214
0215 if (add_mark)
0216 perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
0217
0218 perf_callchain_user(&ctx, regs);
0219 }
0220 }
0221
0222 exit_put:
0223 put_callchain_entry(rctx);
0224
0225 return entry;
0226 }
0227
0228
0229
0230
0231
0232 int perf_event_max_stack_handler(struct ctl_table *table, int write,
0233 void *buffer, size_t *lenp, loff_t *ppos)
0234 {
0235 int *value = table->data;
0236 int new_value = *value, ret;
0237 struct ctl_table new_table = *table;
0238
0239 new_table.data = &new_value;
0240 ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
0241 if (ret || !write)
0242 return ret;
0243
0244 mutex_lock(&callchain_mutex);
0245 if (atomic_read(&nr_callchain_events))
0246 ret = -EBUSY;
0247 else
0248 *value = new_value;
0249
0250 mutex_unlock(&callchain_mutex);
0251
0252 return ret;
0253 }