0001
0002
0003
0004
0005 #include <linux/kernel.h>
0006 #include <linux/types.h>
0007 #include <linux/slab.h>
0008 #include <linux/bpf.h>
0009 #include <linux/bpf_perf_event.h>
0010 #include <linux/btf.h>
0011 #include <linux/filter.h>
0012 #include <linux/uaccess.h>
0013 #include <linux/ctype.h>
0014 #include <linux/kprobes.h>
0015 #include <linux/spinlock.h>
0016 #include <linux/syscalls.h>
0017 #include <linux/error-injection.h>
0018 #include <linux/btf_ids.h>
0019 #include <linux/bpf_lsm.h>
0020 #include <linux/fprobe.h>
0021 #include <linux/bsearch.h>
0022 #include <linux/sort.h>
0023
0024 #include <net/bpf_sk_storage.h>
0025
0026 #include <uapi/linux/bpf.h>
0027 #include <uapi/linux/btf.h>
0028
0029 #include <asm/tlb.h>
0030
0031 #include "trace_probe.h"
0032 #include "trace.h"
0033
0034 #define CREATE_TRACE_POINTS
0035 #include "bpf_trace.h"
0036
0037 #define bpf_event_rcu_dereference(p) \
0038 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
0039
0040 #ifdef CONFIG_MODULES
0041 struct bpf_trace_module {
0042 struct module *module;
0043 struct list_head list;
0044 };
0045
0046 static LIST_HEAD(bpf_trace_modules);
0047 static DEFINE_MUTEX(bpf_module_mutex);
0048
0049 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
0050 {
0051 struct bpf_raw_event_map *btp, *ret = NULL;
0052 struct bpf_trace_module *btm;
0053 unsigned int i;
0054
0055 mutex_lock(&bpf_module_mutex);
0056 list_for_each_entry(btm, &bpf_trace_modules, list) {
0057 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
0058 btp = &btm->module->bpf_raw_events[i];
0059 if (!strcmp(btp->tp->name, name)) {
0060 if (try_module_get(btm->module))
0061 ret = btp;
0062 goto out;
0063 }
0064 }
0065 }
0066 out:
0067 mutex_unlock(&bpf_module_mutex);
0068 return ret;
0069 }
0070 #else
0071 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
0072 {
0073 return NULL;
0074 }
0075 #endif
0076
0077 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
0078 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
0079
0080 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
0081 u64 flags, const struct btf **btf,
0082 s32 *btf_id);
0083 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
0084 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
0101 {
0102 unsigned int ret;
0103
0104 cant_sleep();
0105
0106 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
0107
0108
0109
0110
0111
0112
0113 ret = 0;
0114 goto out;
0115 }
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132 rcu_read_lock();
0133 ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
0134 ctx, bpf_prog_run);
0135 rcu_read_unlock();
0136
0137 out:
0138 __this_cpu_dec(bpf_prog_active);
0139
0140 return ret;
0141 }
0142
0143 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
0144 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
0145 {
0146 regs_set_return_value(regs, rc);
0147 override_function_with_return(regs);
0148 return 0;
0149 }
0150
0151 static const struct bpf_func_proto bpf_override_return_proto = {
0152 .func = bpf_override_return,
0153 .gpl_only = true,
0154 .ret_type = RET_INTEGER,
0155 .arg1_type = ARG_PTR_TO_CTX,
0156 .arg2_type = ARG_ANYTHING,
0157 };
0158 #endif
0159
0160 static __always_inline int
0161 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
0162 {
0163 int ret;
0164
0165 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
0166 if (unlikely(ret < 0))
0167 memset(dst, 0, size);
0168 return ret;
0169 }
0170
0171 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
0172 const void __user *, unsafe_ptr)
0173 {
0174 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
0175 }
0176
0177 const struct bpf_func_proto bpf_probe_read_user_proto = {
0178 .func = bpf_probe_read_user,
0179 .gpl_only = true,
0180 .ret_type = RET_INTEGER,
0181 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
0182 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
0183 .arg3_type = ARG_ANYTHING,
0184 };
0185
0186 static __always_inline int
0187 bpf_probe_read_user_str_common(void *dst, u32 size,
0188 const void __user *unsafe_ptr)
0189 {
0190 int ret;
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
0203 if (unlikely(ret < 0))
0204 memset(dst, 0, size);
0205 return ret;
0206 }
0207
0208 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
0209 const void __user *, unsafe_ptr)
0210 {
0211 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
0212 }
0213
0214 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
0215 .func = bpf_probe_read_user_str,
0216 .gpl_only = true,
0217 .ret_type = RET_INTEGER,
0218 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
0219 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
0220 .arg3_type = ARG_ANYTHING,
0221 };
0222
0223 static __always_inline int
0224 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
0225 {
0226 int ret;
0227
0228 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
0229 if (unlikely(ret < 0))
0230 memset(dst, 0, size);
0231 return ret;
0232 }
0233
0234 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
0235 const void *, unsafe_ptr)
0236 {
0237 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
0238 }
0239
0240 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
0241 .func = bpf_probe_read_kernel,
0242 .gpl_only = true,
0243 .ret_type = RET_INTEGER,
0244 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
0245 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
0246 .arg3_type = ARG_ANYTHING,
0247 };
0248
0249 static __always_inline int
0250 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
0251 {
0252 int ret;
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
0264 if (unlikely(ret < 0))
0265 memset(dst, 0, size);
0266 return ret;
0267 }
0268
0269 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
0270 const void *, unsafe_ptr)
0271 {
0272 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
0273 }
0274
0275 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
0276 .func = bpf_probe_read_kernel_str,
0277 .gpl_only = true,
0278 .ret_type = RET_INTEGER,
0279 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
0280 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
0281 .arg3_type = ARG_ANYTHING,
0282 };
0283
0284 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
0285 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
0286 const void *, unsafe_ptr)
0287 {
0288 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
0289 return bpf_probe_read_user_common(dst, size,
0290 (__force void __user *)unsafe_ptr);
0291 }
0292 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
0293 }
0294
0295 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
0296 .func = bpf_probe_read_compat,
0297 .gpl_only = true,
0298 .ret_type = RET_INTEGER,
0299 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
0300 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
0301 .arg3_type = ARG_ANYTHING,
0302 };
0303
0304 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
0305 const void *, unsafe_ptr)
0306 {
0307 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
0308 return bpf_probe_read_user_str_common(dst, size,
0309 (__force void __user *)unsafe_ptr);
0310 }
0311 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
0312 }
0313
0314 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
0315 .func = bpf_probe_read_compat_str,
0316 .gpl_only = true,
0317 .ret_type = RET_INTEGER,
0318 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
0319 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
0320 .arg3_type = ARG_ANYTHING,
0321 };
0322 #endif
0323
0324 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
0325 u32, size)
0326 {
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340 if (unlikely(in_interrupt() ||
0341 current->flags & (PF_KTHREAD | PF_EXITING)))
0342 return -EPERM;
0343 if (unlikely(!nmi_uaccess_okay()))
0344 return -EPERM;
0345
0346 return copy_to_user_nofault(unsafe_ptr, src, size);
0347 }
0348
0349 static const struct bpf_func_proto bpf_probe_write_user_proto = {
0350 .func = bpf_probe_write_user,
0351 .gpl_only = true,
0352 .ret_type = RET_INTEGER,
0353 .arg1_type = ARG_ANYTHING,
0354 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
0355 .arg3_type = ARG_CONST_SIZE,
0356 };
0357
0358 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
0359 {
0360 if (!capable(CAP_SYS_ADMIN))
0361 return NULL;
0362
0363 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
0364 current->comm, task_pid_nr(current));
0365
0366 return &bpf_probe_write_user_proto;
0367 }
0368
0369 static DEFINE_RAW_SPINLOCK(trace_printk_lock);
0370
0371 #define MAX_TRACE_PRINTK_VARARGS 3
0372 #define BPF_TRACE_PRINTK_SIZE 1024
0373
0374 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
0375 u64, arg2, u64, arg3)
0376 {
0377 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
0378 u32 *bin_args;
0379 static char buf[BPF_TRACE_PRINTK_SIZE];
0380 unsigned long flags;
0381 int ret;
0382
0383 ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
0384 MAX_TRACE_PRINTK_VARARGS);
0385 if (ret < 0)
0386 return ret;
0387
0388 raw_spin_lock_irqsave(&trace_printk_lock, flags);
0389 ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
0390
0391 trace_bpf_trace_printk(buf);
0392 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
0393
0394 bpf_bprintf_cleanup();
0395
0396 return ret;
0397 }
0398
0399 static const struct bpf_func_proto bpf_trace_printk_proto = {
0400 .func = bpf_trace_printk,
0401 .gpl_only = true,
0402 .ret_type = RET_INTEGER,
0403 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
0404 .arg2_type = ARG_CONST_SIZE,
0405 };
0406
0407 static void __set_printk_clr_event(void)
0408 {
0409
0410
0411
0412
0413
0414
0415
0416
0417 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
0418 pr_warn_ratelimited("could not enable bpf_trace_printk events");
0419 }
0420
0421 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
0422 {
0423 __set_printk_clr_event();
0424 return &bpf_trace_printk_proto;
0425 }
0426
0427 BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data,
0428 u32, data_len)
0429 {
0430 static char buf[BPF_TRACE_PRINTK_SIZE];
0431 unsigned long flags;
0432 int ret, num_args;
0433 u32 *bin_args;
0434
0435 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
0436 (data_len && !data))
0437 return -EINVAL;
0438 num_args = data_len / 8;
0439
0440 ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
0441 if (ret < 0)
0442 return ret;
0443
0444 raw_spin_lock_irqsave(&trace_printk_lock, flags);
0445 ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
0446
0447 trace_bpf_trace_printk(buf);
0448 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
0449
0450 bpf_bprintf_cleanup();
0451
0452 return ret;
0453 }
0454
0455 static const struct bpf_func_proto bpf_trace_vprintk_proto = {
0456 .func = bpf_trace_vprintk,
0457 .gpl_only = true,
0458 .ret_type = RET_INTEGER,
0459 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
0460 .arg2_type = ARG_CONST_SIZE,
0461 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
0462 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
0463 };
0464
0465 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
0466 {
0467 __set_printk_clr_event();
0468 return &bpf_trace_vprintk_proto;
0469 }
0470
0471 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
0472 const void *, data, u32, data_len)
0473 {
0474 int err, num_args;
0475 u32 *bin_args;
0476
0477 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
0478 (data_len && !data))
0479 return -EINVAL;
0480 num_args = data_len / 8;
0481
0482 err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
0483 if (err < 0)
0484 return err;
0485
0486 seq_bprintf(m, fmt, bin_args);
0487
0488 bpf_bprintf_cleanup();
0489
0490 return seq_has_overflowed(m) ? -EOVERFLOW : 0;
0491 }
0492
0493 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
0494
0495 static const struct bpf_func_proto bpf_seq_printf_proto = {
0496 .func = bpf_seq_printf,
0497 .gpl_only = true,
0498 .ret_type = RET_INTEGER,
0499 .arg1_type = ARG_PTR_TO_BTF_ID,
0500 .arg1_btf_id = &btf_seq_file_ids[0],
0501 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
0502 .arg3_type = ARG_CONST_SIZE,
0503 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
0504 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
0505 };
0506
0507 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
0508 {
0509 return seq_write(m, data, len) ? -EOVERFLOW : 0;
0510 }
0511
0512 static const struct bpf_func_proto bpf_seq_write_proto = {
0513 .func = bpf_seq_write,
0514 .gpl_only = true,
0515 .ret_type = RET_INTEGER,
0516 .arg1_type = ARG_PTR_TO_BTF_ID,
0517 .arg1_btf_id = &btf_seq_file_ids[0],
0518 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
0519 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
0520 };
0521
0522 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
0523 u32, btf_ptr_size, u64, flags)
0524 {
0525 const struct btf *btf;
0526 s32 btf_id;
0527 int ret;
0528
0529 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
0530 if (ret)
0531 return ret;
0532
0533 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
0534 }
0535
0536 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
0537 .func = bpf_seq_printf_btf,
0538 .gpl_only = true,
0539 .ret_type = RET_INTEGER,
0540 .arg1_type = ARG_PTR_TO_BTF_ID,
0541 .arg1_btf_id = &btf_seq_file_ids[0],
0542 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
0543 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
0544 .arg4_type = ARG_ANYTHING,
0545 };
0546
0547 static __always_inline int
0548 get_map_perf_counter(struct bpf_map *map, u64 flags,
0549 u64 *value, u64 *enabled, u64 *running)
0550 {
0551 struct bpf_array *array = container_of(map, struct bpf_array, map);
0552 unsigned int cpu = smp_processor_id();
0553 u64 index = flags & BPF_F_INDEX_MASK;
0554 struct bpf_event_entry *ee;
0555
0556 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
0557 return -EINVAL;
0558 if (index == BPF_F_CURRENT_CPU)
0559 index = cpu;
0560 if (unlikely(index >= array->map.max_entries))
0561 return -E2BIG;
0562
0563 ee = READ_ONCE(array->ptrs[index]);
0564 if (!ee)
0565 return -ENOENT;
0566
0567 return perf_event_read_local(ee->event, value, enabled, running);
0568 }
0569
0570 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
0571 {
0572 u64 value = 0;
0573 int err;
0574
0575 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
0576
0577
0578
0579
0580 if (err)
0581 return err;
0582 return value;
0583 }
0584
0585 static const struct bpf_func_proto bpf_perf_event_read_proto = {
0586 .func = bpf_perf_event_read,
0587 .gpl_only = true,
0588 .ret_type = RET_INTEGER,
0589 .arg1_type = ARG_CONST_MAP_PTR,
0590 .arg2_type = ARG_ANYTHING,
0591 };
0592
0593 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
0594 struct bpf_perf_event_value *, buf, u32, size)
0595 {
0596 int err = -EINVAL;
0597
0598 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
0599 goto clear;
0600 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
0601 &buf->running);
0602 if (unlikely(err))
0603 goto clear;
0604 return 0;
0605 clear:
0606 memset(buf, 0, size);
0607 return err;
0608 }
0609
0610 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
0611 .func = bpf_perf_event_read_value,
0612 .gpl_only = true,
0613 .ret_type = RET_INTEGER,
0614 .arg1_type = ARG_CONST_MAP_PTR,
0615 .arg2_type = ARG_ANYTHING,
0616 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
0617 .arg4_type = ARG_CONST_SIZE,
0618 };
0619
0620 static __always_inline u64
0621 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
0622 u64 flags, struct perf_sample_data *sd)
0623 {
0624 struct bpf_array *array = container_of(map, struct bpf_array, map);
0625 unsigned int cpu = smp_processor_id();
0626 u64 index = flags & BPF_F_INDEX_MASK;
0627 struct bpf_event_entry *ee;
0628 struct perf_event *event;
0629
0630 if (index == BPF_F_CURRENT_CPU)
0631 index = cpu;
0632 if (unlikely(index >= array->map.max_entries))
0633 return -E2BIG;
0634
0635 ee = READ_ONCE(array->ptrs[index]);
0636 if (!ee)
0637 return -ENOENT;
0638
0639 event = ee->event;
0640 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
0641 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
0642 return -EINVAL;
0643
0644 if (unlikely(event->oncpu != cpu))
0645 return -EOPNOTSUPP;
0646
0647 return perf_event_output(event, sd, regs);
0648 }
0649
0650
0651
0652
0653
0654 struct bpf_trace_sample_data {
0655 struct perf_sample_data sds[3];
0656 };
0657
0658 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
0659 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
0660 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
0661 u64, flags, void *, data, u64, size)
0662 {
0663 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
0664 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
0665 struct perf_raw_record raw = {
0666 .frag = {
0667 .size = size,
0668 .data = data,
0669 },
0670 };
0671 struct perf_sample_data *sd;
0672 int err;
0673
0674 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
0675 err = -EBUSY;
0676 goto out;
0677 }
0678
0679 sd = &sds->sds[nest_level - 1];
0680
0681 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
0682 err = -EINVAL;
0683 goto out;
0684 }
0685
0686 perf_sample_data_init(sd, 0, 0);
0687 sd->raw = &raw;
0688
0689 err = __bpf_perf_event_output(regs, map, flags, sd);
0690
0691 out:
0692 this_cpu_dec(bpf_trace_nest_level);
0693 return err;
0694 }
0695
0696 static const struct bpf_func_proto bpf_perf_event_output_proto = {
0697 .func = bpf_perf_event_output,
0698 .gpl_only = true,
0699 .ret_type = RET_INTEGER,
0700 .arg1_type = ARG_PTR_TO_CTX,
0701 .arg2_type = ARG_CONST_MAP_PTR,
0702 .arg3_type = ARG_ANYTHING,
0703 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
0704 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
0705 };
0706
0707 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
0708 struct bpf_nested_pt_regs {
0709 struct pt_regs regs[3];
0710 };
0711 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
0712 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
0713
0714 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
0715 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
0716 {
0717 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
0718 struct perf_raw_frag frag = {
0719 .copy = ctx_copy,
0720 .size = ctx_size,
0721 .data = ctx,
0722 };
0723 struct perf_raw_record raw = {
0724 .frag = {
0725 {
0726 .next = ctx_size ? &frag : NULL,
0727 },
0728 .size = meta_size,
0729 .data = meta,
0730 },
0731 };
0732 struct perf_sample_data *sd;
0733 struct pt_regs *regs;
0734 u64 ret;
0735
0736 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
0737 ret = -EBUSY;
0738 goto out;
0739 }
0740 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
0741 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
0742
0743 perf_fetch_caller_regs(regs);
0744 perf_sample_data_init(sd, 0, 0);
0745 sd->raw = &raw;
0746
0747 ret = __bpf_perf_event_output(regs, map, flags, sd);
0748 out:
0749 this_cpu_dec(bpf_event_output_nest_level);
0750 return ret;
0751 }
0752
0753 BPF_CALL_0(bpf_get_current_task)
0754 {
0755 return (long) current;
0756 }
0757
0758 const struct bpf_func_proto bpf_get_current_task_proto = {
0759 .func = bpf_get_current_task,
0760 .gpl_only = true,
0761 .ret_type = RET_INTEGER,
0762 };
0763
0764 BPF_CALL_0(bpf_get_current_task_btf)
0765 {
0766 return (unsigned long) current;
0767 }
0768
0769 const struct bpf_func_proto bpf_get_current_task_btf_proto = {
0770 .func = bpf_get_current_task_btf,
0771 .gpl_only = true,
0772 .ret_type = RET_PTR_TO_BTF_ID,
0773 .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
0774 };
0775
0776 BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
0777 {
0778 return (unsigned long) task_pt_regs(task);
0779 }
0780
0781 BTF_ID_LIST(bpf_task_pt_regs_ids)
0782 BTF_ID(struct, pt_regs)
0783
0784 const struct bpf_func_proto bpf_task_pt_regs_proto = {
0785 .func = bpf_task_pt_regs,
0786 .gpl_only = true,
0787 .arg1_type = ARG_PTR_TO_BTF_ID,
0788 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
0789 .ret_type = RET_PTR_TO_BTF_ID,
0790 .ret_btf_id = &bpf_task_pt_regs_ids[0],
0791 };
0792
0793 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
0794 {
0795 struct bpf_array *array = container_of(map, struct bpf_array, map);
0796 struct cgroup *cgrp;
0797
0798 if (unlikely(idx >= array->map.max_entries))
0799 return -E2BIG;
0800
0801 cgrp = READ_ONCE(array->ptrs[idx]);
0802 if (unlikely(!cgrp))
0803 return -EAGAIN;
0804
0805 return task_under_cgroup_hierarchy(current, cgrp);
0806 }
0807
0808 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
0809 .func = bpf_current_task_under_cgroup,
0810 .gpl_only = false,
0811 .ret_type = RET_INTEGER,
0812 .arg1_type = ARG_CONST_MAP_PTR,
0813 .arg2_type = ARG_ANYTHING,
0814 };
0815
0816 struct send_signal_irq_work {
0817 struct irq_work irq_work;
0818 struct task_struct *task;
0819 u32 sig;
0820 enum pid_type type;
0821 };
0822
0823 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
0824
0825 static void do_bpf_send_signal(struct irq_work *entry)
0826 {
0827 struct send_signal_irq_work *work;
0828
0829 work = container_of(entry, struct send_signal_irq_work, irq_work);
0830 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
0831 }
0832
0833 static int bpf_send_signal_common(u32 sig, enum pid_type type)
0834 {
0835 struct send_signal_irq_work *work = NULL;
0836
0837
0838
0839
0840
0841
0842 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
0843 return -EPERM;
0844 if (unlikely(!nmi_uaccess_okay()))
0845 return -EPERM;
0846
0847 if (irqs_disabled()) {
0848
0849
0850
0851 if (unlikely(!valid_signal(sig)))
0852 return -EINVAL;
0853
0854 work = this_cpu_ptr(&send_signal_work);
0855 if (irq_work_is_busy(&work->irq_work))
0856 return -EBUSY;
0857
0858
0859
0860
0861
0862 work->task = current;
0863 work->sig = sig;
0864 work->type = type;
0865 irq_work_queue(&work->irq_work);
0866 return 0;
0867 }
0868
0869 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
0870 }
0871
0872 BPF_CALL_1(bpf_send_signal, u32, sig)
0873 {
0874 return bpf_send_signal_common(sig, PIDTYPE_TGID);
0875 }
0876
0877 static const struct bpf_func_proto bpf_send_signal_proto = {
0878 .func = bpf_send_signal,
0879 .gpl_only = false,
0880 .ret_type = RET_INTEGER,
0881 .arg1_type = ARG_ANYTHING,
0882 };
0883
0884 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
0885 {
0886 return bpf_send_signal_common(sig, PIDTYPE_PID);
0887 }
0888
0889 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
0890 .func = bpf_send_signal_thread,
0891 .gpl_only = false,
0892 .ret_type = RET_INTEGER,
0893 .arg1_type = ARG_ANYTHING,
0894 };
0895
0896 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
0897 {
0898 long len;
0899 char *p;
0900
0901 if (!sz)
0902 return 0;
0903
0904 p = d_path(path, buf, sz);
0905 if (IS_ERR(p)) {
0906 len = PTR_ERR(p);
0907 } else {
0908 len = buf + sz - p;
0909 memmove(buf, p, len);
0910 }
0911
0912 return len;
0913 }
0914
0915 BTF_SET_START(btf_allowlist_d_path)
0916 #ifdef CONFIG_SECURITY
0917 BTF_ID(func, security_file_permission)
0918 BTF_ID(func, security_inode_getattr)
0919 BTF_ID(func, security_file_open)
0920 #endif
0921 #ifdef CONFIG_SECURITY_PATH
0922 BTF_ID(func, security_path_truncate)
0923 #endif
0924 BTF_ID(func, vfs_truncate)
0925 BTF_ID(func, vfs_fallocate)
0926 BTF_ID(func, dentry_open)
0927 BTF_ID(func, vfs_getattr)
0928 BTF_ID(func, filp_close)
0929 BTF_SET_END(btf_allowlist_d_path)
0930
0931 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
0932 {
0933 if (prog->type == BPF_PROG_TYPE_TRACING &&
0934 prog->expected_attach_type == BPF_TRACE_ITER)
0935 return true;
0936
0937 if (prog->type == BPF_PROG_TYPE_LSM)
0938 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
0939
0940 return btf_id_set_contains(&btf_allowlist_d_path,
0941 prog->aux->attach_btf_id);
0942 }
0943
0944 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
0945
0946 static const struct bpf_func_proto bpf_d_path_proto = {
0947 .func = bpf_d_path,
0948 .gpl_only = false,
0949 .ret_type = RET_INTEGER,
0950 .arg1_type = ARG_PTR_TO_BTF_ID,
0951 .arg1_btf_id = &bpf_d_path_btf_ids[0],
0952 .arg2_type = ARG_PTR_TO_MEM,
0953 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
0954 .allowed = bpf_d_path_allowed,
0955 };
0956
0957 #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
0958 BTF_F_PTR_RAW | BTF_F_ZERO)
0959
0960 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
0961 u64 flags, const struct btf **btf,
0962 s32 *btf_id)
0963 {
0964 const struct btf_type *t;
0965
0966 if (unlikely(flags & ~(BTF_F_ALL)))
0967 return -EINVAL;
0968
0969 if (btf_ptr_size != sizeof(struct btf_ptr))
0970 return -EINVAL;
0971
0972 *btf = bpf_get_btf_vmlinux();
0973
0974 if (IS_ERR_OR_NULL(*btf))
0975 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
0976
0977 if (ptr->type_id > 0)
0978 *btf_id = ptr->type_id;
0979 else
0980 return -EINVAL;
0981
0982 if (*btf_id > 0)
0983 t = btf_type_by_id(*btf, *btf_id);
0984 if (*btf_id <= 0 || !t)
0985 return -ENOENT;
0986
0987 return 0;
0988 }
0989
0990 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
0991 u32, btf_ptr_size, u64, flags)
0992 {
0993 const struct btf *btf;
0994 s32 btf_id;
0995 int ret;
0996
0997 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
0998 if (ret)
0999 return ret;
1000
1001 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1002 flags);
1003 }
1004
1005 const struct bpf_func_proto bpf_snprintf_btf_proto = {
1006 .func = bpf_snprintf_btf,
1007 .gpl_only = false,
1008 .ret_type = RET_INTEGER,
1009 .arg1_type = ARG_PTR_TO_MEM,
1010 .arg2_type = ARG_CONST_SIZE,
1011 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1012 .arg4_type = ARG_CONST_SIZE,
1013 .arg5_type = ARG_ANYTHING,
1014 };
1015
1016 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1017 {
1018
1019 return ((u64 *)ctx)[-2];
1020 }
1021
1022 static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1023 .func = bpf_get_func_ip_tracing,
1024 .gpl_only = true,
1025 .ret_type = RET_INTEGER,
1026 .arg1_type = ARG_PTR_TO_CTX,
1027 };
1028
1029 BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1030 {
1031 struct kprobe *kp = kprobe_running();
1032
1033 return kp ? (uintptr_t)kp->addr : 0;
1034 }
1035
1036 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1037 .func = bpf_get_func_ip_kprobe,
1038 .gpl_only = true,
1039 .ret_type = RET_INTEGER,
1040 .arg1_type = ARG_PTR_TO_CTX,
1041 };
1042
1043 BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1044 {
1045 return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1046 }
1047
1048 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1049 .func = bpf_get_func_ip_kprobe_multi,
1050 .gpl_only = false,
1051 .ret_type = RET_INTEGER,
1052 .arg1_type = ARG_PTR_TO_CTX,
1053 };
1054
1055 BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1056 {
1057 return bpf_kprobe_multi_cookie(current->bpf_ctx);
1058 }
1059
1060 static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1061 .func = bpf_get_attach_cookie_kprobe_multi,
1062 .gpl_only = false,
1063 .ret_type = RET_INTEGER,
1064 .arg1_type = ARG_PTR_TO_CTX,
1065 };
1066
1067 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1068 {
1069 struct bpf_trace_run_ctx *run_ctx;
1070
1071 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1072 return run_ctx->bpf_cookie;
1073 }
1074
1075 static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1076 .func = bpf_get_attach_cookie_trace,
1077 .gpl_only = false,
1078 .ret_type = RET_INTEGER,
1079 .arg1_type = ARG_PTR_TO_CTX,
1080 };
1081
1082 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1083 {
1084 return ctx->event->bpf_cookie;
1085 }
1086
1087 static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1088 .func = bpf_get_attach_cookie_pe,
1089 .gpl_only = false,
1090 .ret_type = RET_INTEGER,
1091 .arg1_type = ARG_PTR_TO_CTX,
1092 };
1093
1094 BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1095 {
1096 struct bpf_trace_run_ctx *run_ctx;
1097
1098 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1099 return run_ctx->bpf_cookie;
1100 }
1101
1102 static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1103 .func = bpf_get_attach_cookie_tracing,
1104 .gpl_only = false,
1105 .ret_type = RET_INTEGER,
1106 .arg1_type = ARG_PTR_TO_CTX,
1107 };
1108
1109 BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1110 {
1111 #ifndef CONFIG_X86
1112 return -ENOENT;
1113 #else
1114 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1115 u32 entry_cnt = size / br_entry_size;
1116
1117 entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1118
1119 if (unlikely(flags))
1120 return -EINVAL;
1121
1122 if (!entry_cnt)
1123 return -ENOENT;
1124
1125 return entry_cnt * br_entry_size;
1126 #endif
1127 }
1128
1129 static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1130 .func = bpf_get_branch_snapshot,
1131 .gpl_only = true,
1132 .ret_type = RET_INTEGER,
1133 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1134 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1135 };
1136
1137 BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1138 {
1139
1140 u64 nr_args = ((u64 *)ctx)[-1];
1141
1142 if ((u64) n >= nr_args)
1143 return -EINVAL;
1144 *value = ((u64 *)ctx)[n];
1145 return 0;
1146 }
1147
1148 static const struct bpf_func_proto bpf_get_func_arg_proto = {
1149 .func = get_func_arg,
1150 .ret_type = RET_INTEGER,
1151 .arg1_type = ARG_PTR_TO_CTX,
1152 .arg2_type = ARG_ANYTHING,
1153 .arg3_type = ARG_PTR_TO_LONG,
1154 };
1155
1156 BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1157 {
1158
1159 u64 nr_args = ((u64 *)ctx)[-1];
1160
1161 *value = ((u64 *)ctx)[nr_args];
1162 return 0;
1163 }
1164
1165 static const struct bpf_func_proto bpf_get_func_ret_proto = {
1166 .func = get_func_ret,
1167 .ret_type = RET_INTEGER,
1168 .arg1_type = ARG_PTR_TO_CTX,
1169 .arg2_type = ARG_PTR_TO_LONG,
1170 };
1171
1172 BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1173 {
1174
1175 return ((u64 *)ctx)[-1];
1176 }
1177
1178 static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1179 .func = get_func_arg_cnt,
1180 .ret_type = RET_INTEGER,
1181 .arg1_type = ARG_PTR_TO_CTX,
1182 };
1183
1184 static const struct bpf_func_proto *
1185 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1186 {
1187 switch (func_id) {
1188 case BPF_FUNC_map_lookup_elem:
1189 return &bpf_map_lookup_elem_proto;
1190 case BPF_FUNC_map_update_elem:
1191 return &bpf_map_update_elem_proto;
1192 case BPF_FUNC_map_delete_elem:
1193 return &bpf_map_delete_elem_proto;
1194 case BPF_FUNC_map_push_elem:
1195 return &bpf_map_push_elem_proto;
1196 case BPF_FUNC_map_pop_elem:
1197 return &bpf_map_pop_elem_proto;
1198 case BPF_FUNC_map_peek_elem:
1199 return &bpf_map_peek_elem_proto;
1200 case BPF_FUNC_map_lookup_percpu_elem:
1201 return &bpf_map_lookup_percpu_elem_proto;
1202 case BPF_FUNC_ktime_get_ns:
1203 return &bpf_ktime_get_ns_proto;
1204 case BPF_FUNC_ktime_get_boot_ns:
1205 return &bpf_ktime_get_boot_ns_proto;
1206 case BPF_FUNC_tail_call:
1207 return &bpf_tail_call_proto;
1208 case BPF_FUNC_get_current_pid_tgid:
1209 return &bpf_get_current_pid_tgid_proto;
1210 case BPF_FUNC_get_current_task:
1211 return &bpf_get_current_task_proto;
1212 case BPF_FUNC_get_current_task_btf:
1213 return &bpf_get_current_task_btf_proto;
1214 case BPF_FUNC_task_pt_regs:
1215 return &bpf_task_pt_regs_proto;
1216 case BPF_FUNC_get_current_uid_gid:
1217 return &bpf_get_current_uid_gid_proto;
1218 case BPF_FUNC_get_current_comm:
1219 return &bpf_get_current_comm_proto;
1220 case BPF_FUNC_trace_printk:
1221 return bpf_get_trace_printk_proto();
1222 case BPF_FUNC_get_smp_processor_id:
1223 return &bpf_get_smp_processor_id_proto;
1224 case BPF_FUNC_get_numa_node_id:
1225 return &bpf_get_numa_node_id_proto;
1226 case BPF_FUNC_perf_event_read:
1227 return &bpf_perf_event_read_proto;
1228 case BPF_FUNC_current_task_under_cgroup:
1229 return &bpf_current_task_under_cgroup_proto;
1230 case BPF_FUNC_get_prandom_u32:
1231 return &bpf_get_prandom_u32_proto;
1232 case BPF_FUNC_probe_write_user:
1233 return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1234 NULL : bpf_get_probe_write_proto();
1235 case BPF_FUNC_probe_read_user:
1236 return &bpf_probe_read_user_proto;
1237 case BPF_FUNC_probe_read_kernel:
1238 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1239 NULL : &bpf_probe_read_kernel_proto;
1240 case BPF_FUNC_probe_read_user_str:
1241 return &bpf_probe_read_user_str_proto;
1242 case BPF_FUNC_probe_read_kernel_str:
1243 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1244 NULL : &bpf_probe_read_kernel_str_proto;
1245 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1246 case BPF_FUNC_probe_read:
1247 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1248 NULL : &bpf_probe_read_compat_proto;
1249 case BPF_FUNC_probe_read_str:
1250 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1251 NULL : &bpf_probe_read_compat_str_proto;
1252 #endif
1253 #ifdef CONFIG_CGROUPS
1254 case BPF_FUNC_get_current_cgroup_id:
1255 return &bpf_get_current_cgroup_id_proto;
1256 case BPF_FUNC_get_current_ancestor_cgroup_id:
1257 return &bpf_get_current_ancestor_cgroup_id_proto;
1258 #endif
1259 case BPF_FUNC_send_signal:
1260 return &bpf_send_signal_proto;
1261 case BPF_FUNC_send_signal_thread:
1262 return &bpf_send_signal_thread_proto;
1263 case BPF_FUNC_perf_event_read_value:
1264 return &bpf_perf_event_read_value_proto;
1265 case BPF_FUNC_get_ns_current_pid_tgid:
1266 return &bpf_get_ns_current_pid_tgid_proto;
1267 case BPF_FUNC_ringbuf_output:
1268 return &bpf_ringbuf_output_proto;
1269 case BPF_FUNC_ringbuf_reserve:
1270 return &bpf_ringbuf_reserve_proto;
1271 case BPF_FUNC_ringbuf_submit:
1272 return &bpf_ringbuf_submit_proto;
1273 case BPF_FUNC_ringbuf_discard:
1274 return &bpf_ringbuf_discard_proto;
1275 case BPF_FUNC_ringbuf_query:
1276 return &bpf_ringbuf_query_proto;
1277 case BPF_FUNC_jiffies64:
1278 return &bpf_jiffies64_proto;
1279 case BPF_FUNC_get_task_stack:
1280 return &bpf_get_task_stack_proto;
1281 case BPF_FUNC_copy_from_user:
1282 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
1283 case BPF_FUNC_copy_from_user_task:
1284 return prog->aux->sleepable ? &bpf_copy_from_user_task_proto : NULL;
1285 case BPF_FUNC_snprintf_btf:
1286 return &bpf_snprintf_btf_proto;
1287 case BPF_FUNC_per_cpu_ptr:
1288 return &bpf_per_cpu_ptr_proto;
1289 case BPF_FUNC_this_cpu_ptr:
1290 return &bpf_this_cpu_ptr_proto;
1291 case BPF_FUNC_task_storage_get:
1292 return &bpf_task_storage_get_proto;
1293 case BPF_FUNC_task_storage_delete:
1294 return &bpf_task_storage_delete_proto;
1295 case BPF_FUNC_for_each_map_elem:
1296 return &bpf_for_each_map_elem_proto;
1297 case BPF_FUNC_snprintf:
1298 return &bpf_snprintf_proto;
1299 case BPF_FUNC_get_func_ip:
1300 return &bpf_get_func_ip_proto_tracing;
1301 case BPF_FUNC_get_branch_snapshot:
1302 return &bpf_get_branch_snapshot_proto;
1303 case BPF_FUNC_find_vma:
1304 return &bpf_find_vma_proto;
1305 case BPF_FUNC_trace_vprintk:
1306 return bpf_get_trace_vprintk_proto();
1307 default:
1308 return bpf_base_func_proto(func_id);
1309 }
1310 }
1311
1312 static const struct bpf_func_proto *
1313 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1314 {
1315 switch (func_id) {
1316 case BPF_FUNC_perf_event_output:
1317 return &bpf_perf_event_output_proto;
1318 case BPF_FUNC_get_stackid:
1319 return &bpf_get_stackid_proto;
1320 case BPF_FUNC_get_stack:
1321 return &bpf_get_stack_proto;
1322 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1323 case BPF_FUNC_override_return:
1324 return &bpf_override_return_proto;
1325 #endif
1326 case BPF_FUNC_get_func_ip:
1327 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
1328 &bpf_get_func_ip_proto_kprobe_multi :
1329 &bpf_get_func_ip_proto_kprobe;
1330 case BPF_FUNC_get_attach_cookie:
1331 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
1332 &bpf_get_attach_cookie_proto_kmulti :
1333 &bpf_get_attach_cookie_proto_trace;
1334 default:
1335 return bpf_tracing_func_proto(func_id, prog);
1336 }
1337 }
1338
1339
1340 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1341 const struct bpf_prog *prog,
1342 struct bpf_insn_access_aux *info)
1343 {
1344 if (off < 0 || off >= sizeof(struct pt_regs))
1345 return false;
1346 if (type != BPF_READ)
1347 return false;
1348 if (off % size != 0)
1349 return false;
1350
1351
1352
1353
1354 if (off + size > sizeof(struct pt_regs))
1355 return false;
1356
1357 return true;
1358 }
1359
1360 const struct bpf_verifier_ops kprobe_verifier_ops = {
1361 .get_func_proto = kprobe_prog_func_proto,
1362 .is_valid_access = kprobe_prog_is_valid_access,
1363 };
1364
1365 const struct bpf_prog_ops kprobe_prog_ops = {
1366 };
1367
1368 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1369 u64, flags, void *, data, u64, size)
1370 {
1371 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1372
1373
1374
1375
1376
1377
1378 return ____bpf_perf_event_output(regs, map, flags, data, size);
1379 }
1380
1381 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1382 .func = bpf_perf_event_output_tp,
1383 .gpl_only = true,
1384 .ret_type = RET_INTEGER,
1385 .arg1_type = ARG_PTR_TO_CTX,
1386 .arg2_type = ARG_CONST_MAP_PTR,
1387 .arg3_type = ARG_ANYTHING,
1388 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1389 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1390 };
1391
1392 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1393 u64, flags)
1394 {
1395 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1396
1397
1398
1399
1400
1401
1402 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1403 flags, 0, 0);
1404 }
1405
1406 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1407 .func = bpf_get_stackid_tp,
1408 .gpl_only = true,
1409 .ret_type = RET_INTEGER,
1410 .arg1_type = ARG_PTR_TO_CTX,
1411 .arg2_type = ARG_CONST_MAP_PTR,
1412 .arg3_type = ARG_ANYTHING,
1413 };
1414
1415 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1416 u64, flags)
1417 {
1418 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1419
1420 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1421 (unsigned long) size, flags, 0);
1422 }
1423
1424 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1425 .func = bpf_get_stack_tp,
1426 .gpl_only = true,
1427 .ret_type = RET_INTEGER,
1428 .arg1_type = ARG_PTR_TO_CTX,
1429 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1430 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1431 .arg4_type = ARG_ANYTHING,
1432 };
1433
1434 static const struct bpf_func_proto *
1435 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1436 {
1437 switch (func_id) {
1438 case BPF_FUNC_perf_event_output:
1439 return &bpf_perf_event_output_proto_tp;
1440 case BPF_FUNC_get_stackid:
1441 return &bpf_get_stackid_proto_tp;
1442 case BPF_FUNC_get_stack:
1443 return &bpf_get_stack_proto_tp;
1444 case BPF_FUNC_get_attach_cookie:
1445 return &bpf_get_attach_cookie_proto_trace;
1446 default:
1447 return bpf_tracing_func_proto(func_id, prog);
1448 }
1449 }
1450
1451 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1452 const struct bpf_prog *prog,
1453 struct bpf_insn_access_aux *info)
1454 {
1455 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1456 return false;
1457 if (type != BPF_READ)
1458 return false;
1459 if (off % size != 0)
1460 return false;
1461
1462 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1463 return true;
1464 }
1465
1466 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1467 .get_func_proto = tp_prog_func_proto,
1468 .is_valid_access = tp_prog_is_valid_access,
1469 };
1470
1471 const struct bpf_prog_ops tracepoint_prog_ops = {
1472 };
1473
1474 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1475 struct bpf_perf_event_value *, buf, u32, size)
1476 {
1477 int err = -EINVAL;
1478
1479 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1480 goto clear;
1481 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1482 &buf->running);
1483 if (unlikely(err))
1484 goto clear;
1485 return 0;
1486 clear:
1487 memset(buf, 0, size);
1488 return err;
1489 }
1490
1491 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1492 .func = bpf_perf_prog_read_value,
1493 .gpl_only = true,
1494 .ret_type = RET_INTEGER,
1495 .arg1_type = ARG_PTR_TO_CTX,
1496 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1497 .arg3_type = ARG_CONST_SIZE,
1498 };
1499
1500 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1501 void *, buf, u32, size, u64, flags)
1502 {
1503 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1504 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1505 u32 to_copy;
1506
1507 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1508 return -EINVAL;
1509
1510 if (unlikely(!br_stack))
1511 return -ENOENT;
1512
1513 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1514 return br_stack->nr * br_entry_size;
1515
1516 if (!buf || (size % br_entry_size != 0))
1517 return -EINVAL;
1518
1519 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1520 memcpy(buf, br_stack->entries, to_copy);
1521
1522 return to_copy;
1523 }
1524
1525 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1526 .func = bpf_read_branch_records,
1527 .gpl_only = true,
1528 .ret_type = RET_INTEGER,
1529 .arg1_type = ARG_PTR_TO_CTX,
1530 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1531 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1532 .arg4_type = ARG_ANYTHING,
1533 };
1534
1535 static const struct bpf_func_proto *
1536 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1537 {
1538 switch (func_id) {
1539 case BPF_FUNC_perf_event_output:
1540 return &bpf_perf_event_output_proto_tp;
1541 case BPF_FUNC_get_stackid:
1542 return &bpf_get_stackid_proto_pe;
1543 case BPF_FUNC_get_stack:
1544 return &bpf_get_stack_proto_pe;
1545 case BPF_FUNC_perf_prog_read_value:
1546 return &bpf_perf_prog_read_value_proto;
1547 case BPF_FUNC_read_branch_records:
1548 return &bpf_read_branch_records_proto;
1549 case BPF_FUNC_get_attach_cookie:
1550 return &bpf_get_attach_cookie_proto_pe;
1551 default:
1552 return bpf_tracing_func_proto(func_id, prog);
1553 }
1554 }
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564 struct bpf_raw_tp_regs {
1565 struct pt_regs regs[3];
1566 };
1567 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1568 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1569 static struct pt_regs *get_bpf_raw_tp_regs(void)
1570 {
1571 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1572 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1573
1574 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1575 this_cpu_dec(bpf_raw_tp_nest_level);
1576 return ERR_PTR(-EBUSY);
1577 }
1578
1579 return &tp_regs->regs[nest_level - 1];
1580 }
1581
1582 static void put_bpf_raw_tp_regs(void)
1583 {
1584 this_cpu_dec(bpf_raw_tp_nest_level);
1585 }
1586
1587 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1588 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1589 {
1590 struct pt_regs *regs = get_bpf_raw_tp_regs();
1591 int ret;
1592
1593 if (IS_ERR(regs))
1594 return PTR_ERR(regs);
1595
1596 perf_fetch_caller_regs(regs);
1597 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1598
1599 put_bpf_raw_tp_regs();
1600 return ret;
1601 }
1602
1603 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1604 .func = bpf_perf_event_output_raw_tp,
1605 .gpl_only = true,
1606 .ret_type = RET_INTEGER,
1607 .arg1_type = ARG_PTR_TO_CTX,
1608 .arg2_type = ARG_CONST_MAP_PTR,
1609 .arg3_type = ARG_ANYTHING,
1610 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1611 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1612 };
1613
1614 extern const struct bpf_func_proto bpf_skb_output_proto;
1615 extern const struct bpf_func_proto bpf_xdp_output_proto;
1616 extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
1617
1618 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1619 struct bpf_map *, map, u64, flags)
1620 {
1621 struct pt_regs *regs = get_bpf_raw_tp_regs();
1622 int ret;
1623
1624 if (IS_ERR(regs))
1625 return PTR_ERR(regs);
1626
1627 perf_fetch_caller_regs(regs);
1628
1629 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1630 flags, 0, 0);
1631 put_bpf_raw_tp_regs();
1632 return ret;
1633 }
1634
1635 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1636 .func = bpf_get_stackid_raw_tp,
1637 .gpl_only = true,
1638 .ret_type = RET_INTEGER,
1639 .arg1_type = ARG_PTR_TO_CTX,
1640 .arg2_type = ARG_CONST_MAP_PTR,
1641 .arg3_type = ARG_ANYTHING,
1642 };
1643
1644 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1645 void *, buf, u32, size, u64, flags)
1646 {
1647 struct pt_regs *regs = get_bpf_raw_tp_regs();
1648 int ret;
1649
1650 if (IS_ERR(regs))
1651 return PTR_ERR(regs);
1652
1653 perf_fetch_caller_regs(regs);
1654 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1655 (unsigned long) size, flags, 0);
1656 put_bpf_raw_tp_regs();
1657 return ret;
1658 }
1659
1660 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1661 .func = bpf_get_stack_raw_tp,
1662 .gpl_only = true,
1663 .ret_type = RET_INTEGER,
1664 .arg1_type = ARG_PTR_TO_CTX,
1665 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1666 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1667 .arg4_type = ARG_ANYTHING,
1668 };
1669
1670 static const struct bpf_func_proto *
1671 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1672 {
1673 switch (func_id) {
1674 case BPF_FUNC_perf_event_output:
1675 return &bpf_perf_event_output_proto_raw_tp;
1676 case BPF_FUNC_get_stackid:
1677 return &bpf_get_stackid_proto_raw_tp;
1678 case BPF_FUNC_get_stack:
1679 return &bpf_get_stack_proto_raw_tp;
1680 default:
1681 return bpf_tracing_func_proto(func_id, prog);
1682 }
1683 }
1684
1685 const struct bpf_func_proto *
1686 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1687 {
1688 const struct bpf_func_proto *fn;
1689
1690 switch (func_id) {
1691 #ifdef CONFIG_NET
1692 case BPF_FUNC_skb_output:
1693 return &bpf_skb_output_proto;
1694 case BPF_FUNC_xdp_output:
1695 return &bpf_xdp_output_proto;
1696 case BPF_FUNC_skc_to_tcp6_sock:
1697 return &bpf_skc_to_tcp6_sock_proto;
1698 case BPF_FUNC_skc_to_tcp_sock:
1699 return &bpf_skc_to_tcp_sock_proto;
1700 case BPF_FUNC_skc_to_tcp_timewait_sock:
1701 return &bpf_skc_to_tcp_timewait_sock_proto;
1702 case BPF_FUNC_skc_to_tcp_request_sock:
1703 return &bpf_skc_to_tcp_request_sock_proto;
1704 case BPF_FUNC_skc_to_udp6_sock:
1705 return &bpf_skc_to_udp6_sock_proto;
1706 case BPF_FUNC_skc_to_unix_sock:
1707 return &bpf_skc_to_unix_sock_proto;
1708 case BPF_FUNC_skc_to_mptcp_sock:
1709 return &bpf_skc_to_mptcp_sock_proto;
1710 case BPF_FUNC_sk_storage_get:
1711 return &bpf_sk_storage_get_tracing_proto;
1712 case BPF_FUNC_sk_storage_delete:
1713 return &bpf_sk_storage_delete_tracing_proto;
1714 case BPF_FUNC_sock_from_file:
1715 return &bpf_sock_from_file_proto;
1716 case BPF_FUNC_get_socket_cookie:
1717 return &bpf_get_socket_ptr_cookie_proto;
1718 case BPF_FUNC_xdp_get_buff_len:
1719 return &bpf_xdp_get_buff_len_trace_proto;
1720 #endif
1721 case BPF_FUNC_seq_printf:
1722 return prog->expected_attach_type == BPF_TRACE_ITER ?
1723 &bpf_seq_printf_proto :
1724 NULL;
1725 case BPF_FUNC_seq_write:
1726 return prog->expected_attach_type == BPF_TRACE_ITER ?
1727 &bpf_seq_write_proto :
1728 NULL;
1729 case BPF_FUNC_seq_printf_btf:
1730 return prog->expected_attach_type == BPF_TRACE_ITER ?
1731 &bpf_seq_printf_btf_proto :
1732 NULL;
1733 case BPF_FUNC_d_path:
1734 return &bpf_d_path_proto;
1735 case BPF_FUNC_get_func_arg:
1736 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
1737 case BPF_FUNC_get_func_ret:
1738 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
1739 case BPF_FUNC_get_func_arg_cnt:
1740 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
1741 case BPF_FUNC_get_attach_cookie:
1742 return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
1743 default:
1744 fn = raw_tp_prog_func_proto(func_id, prog);
1745 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
1746 fn = bpf_iter_get_func_proto(func_id, prog);
1747 return fn;
1748 }
1749 }
1750
1751 static bool raw_tp_prog_is_valid_access(int off, int size,
1752 enum bpf_access_type type,
1753 const struct bpf_prog *prog,
1754 struct bpf_insn_access_aux *info)
1755 {
1756 return bpf_tracing_ctx_access(off, size, type);
1757 }
1758
1759 static bool tracing_prog_is_valid_access(int off, int size,
1760 enum bpf_access_type type,
1761 const struct bpf_prog *prog,
1762 struct bpf_insn_access_aux *info)
1763 {
1764 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
1765 }
1766
1767 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1768 const union bpf_attr *kattr,
1769 union bpf_attr __user *uattr)
1770 {
1771 return -ENOTSUPP;
1772 }
1773
1774 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1775 .get_func_proto = raw_tp_prog_func_proto,
1776 .is_valid_access = raw_tp_prog_is_valid_access,
1777 };
1778
1779 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1780 #ifdef CONFIG_NET
1781 .test_run = bpf_prog_test_run_raw_tp,
1782 #endif
1783 };
1784
1785 const struct bpf_verifier_ops tracing_verifier_ops = {
1786 .get_func_proto = tracing_prog_func_proto,
1787 .is_valid_access = tracing_prog_is_valid_access,
1788 };
1789
1790 const struct bpf_prog_ops tracing_prog_ops = {
1791 .test_run = bpf_prog_test_run_tracing,
1792 };
1793
1794 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1795 enum bpf_access_type type,
1796 const struct bpf_prog *prog,
1797 struct bpf_insn_access_aux *info)
1798 {
1799 if (off == 0) {
1800 if (size != sizeof(u64) || type != BPF_READ)
1801 return false;
1802 info->reg_type = PTR_TO_TP_BUFFER;
1803 }
1804 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1805 }
1806
1807 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1808 .get_func_proto = raw_tp_prog_func_proto,
1809 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1810 };
1811
1812 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1813 };
1814
1815 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1816 const struct bpf_prog *prog,
1817 struct bpf_insn_access_aux *info)
1818 {
1819 const int size_u64 = sizeof(u64);
1820
1821 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1822 return false;
1823 if (type != BPF_READ)
1824 return false;
1825 if (off % size != 0) {
1826 if (sizeof(unsigned long) != 4)
1827 return false;
1828 if (size != 8)
1829 return false;
1830 if (off % size != 4)
1831 return false;
1832 }
1833
1834 switch (off) {
1835 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1836 bpf_ctx_record_field_size(info, size_u64);
1837 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1838 return false;
1839 break;
1840 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1841 bpf_ctx_record_field_size(info, size_u64);
1842 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1843 return false;
1844 break;
1845 default:
1846 if (size != sizeof(long))
1847 return false;
1848 }
1849
1850 return true;
1851 }
1852
1853 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1854 const struct bpf_insn *si,
1855 struct bpf_insn *insn_buf,
1856 struct bpf_prog *prog, u32 *target_size)
1857 {
1858 struct bpf_insn *insn = insn_buf;
1859
1860 switch (si->off) {
1861 case offsetof(struct bpf_perf_event_data, sample_period):
1862 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1863 data), si->dst_reg, si->src_reg,
1864 offsetof(struct bpf_perf_event_data_kern, data));
1865 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1866 bpf_target_off(struct perf_sample_data, period, 8,
1867 target_size));
1868 break;
1869 case offsetof(struct bpf_perf_event_data, addr):
1870 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1871 data), si->dst_reg, si->src_reg,
1872 offsetof(struct bpf_perf_event_data_kern, data));
1873 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1874 bpf_target_off(struct perf_sample_data, addr, 8,
1875 target_size));
1876 break;
1877 default:
1878 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1879 regs), si->dst_reg, si->src_reg,
1880 offsetof(struct bpf_perf_event_data_kern, regs));
1881 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1882 si->off);
1883 break;
1884 }
1885
1886 return insn - insn_buf;
1887 }
1888
1889 const struct bpf_verifier_ops perf_event_verifier_ops = {
1890 .get_func_proto = pe_prog_func_proto,
1891 .is_valid_access = pe_prog_is_valid_access,
1892 .convert_ctx_access = pe_prog_convert_ctx_access,
1893 };
1894
1895 const struct bpf_prog_ops perf_event_prog_ops = {
1896 };
1897
1898 static DEFINE_MUTEX(bpf_event_mutex);
1899
1900 #define BPF_TRACE_MAX_PROGS 64
1901
1902 int perf_event_attach_bpf_prog(struct perf_event *event,
1903 struct bpf_prog *prog,
1904 u64 bpf_cookie)
1905 {
1906 struct bpf_prog_array *old_array;
1907 struct bpf_prog_array *new_array;
1908 int ret = -EEXIST;
1909
1910
1911
1912
1913
1914 if (prog->kprobe_override &&
1915 (!trace_kprobe_on_func_entry(event->tp_event) ||
1916 !trace_kprobe_error_injectable(event->tp_event)))
1917 return -EINVAL;
1918
1919 mutex_lock(&bpf_event_mutex);
1920
1921 if (event->prog)
1922 goto unlock;
1923
1924 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1925 if (old_array &&
1926 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1927 ret = -E2BIG;
1928 goto unlock;
1929 }
1930
1931 ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
1932 if (ret < 0)
1933 goto unlock;
1934
1935
1936 event->prog = prog;
1937 event->bpf_cookie = bpf_cookie;
1938 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1939 bpf_prog_array_free_sleepable(old_array);
1940
1941 unlock:
1942 mutex_unlock(&bpf_event_mutex);
1943 return ret;
1944 }
1945
1946 void perf_event_detach_bpf_prog(struct perf_event *event)
1947 {
1948 struct bpf_prog_array *old_array;
1949 struct bpf_prog_array *new_array;
1950 int ret;
1951
1952 mutex_lock(&bpf_event_mutex);
1953
1954 if (!event->prog)
1955 goto unlock;
1956
1957 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1958 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
1959 if (ret == -ENOENT)
1960 goto unlock;
1961 if (ret < 0) {
1962 bpf_prog_array_delete_safe(old_array, event->prog);
1963 } else {
1964 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1965 bpf_prog_array_free_sleepable(old_array);
1966 }
1967
1968 bpf_prog_put(event->prog);
1969 event->prog = NULL;
1970
1971 unlock:
1972 mutex_unlock(&bpf_event_mutex);
1973 }
1974
1975 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1976 {
1977 struct perf_event_query_bpf __user *uquery = info;
1978 struct perf_event_query_bpf query = {};
1979 struct bpf_prog_array *progs;
1980 u32 *ids, prog_cnt, ids_len;
1981 int ret;
1982
1983 if (!perfmon_capable())
1984 return -EPERM;
1985 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1986 return -EINVAL;
1987 if (copy_from_user(&query, uquery, sizeof(query)))
1988 return -EFAULT;
1989
1990 ids_len = query.ids_len;
1991 if (ids_len > BPF_TRACE_MAX_PROGS)
1992 return -E2BIG;
1993 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1994 if (!ids)
1995 return -ENOMEM;
1996
1997
1998
1999
2000
2001
2002
2003 mutex_lock(&bpf_event_mutex);
2004 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2005 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2006 mutex_unlock(&bpf_event_mutex);
2007
2008 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2009 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2010 ret = -EFAULT;
2011
2012 kfree(ids);
2013 return ret;
2014 }
2015
2016 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2017 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2018
2019 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2020 {
2021 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2022
2023 for (; btp < __stop__bpf_raw_tp; btp++) {
2024 if (!strcmp(btp->tp->name, name))
2025 return btp;
2026 }
2027
2028 return bpf_get_raw_tracepoint_module(name);
2029 }
2030
2031 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2032 {
2033 struct module *mod;
2034
2035 preempt_disable();
2036 mod = __module_address((unsigned long)btp);
2037 module_put(mod);
2038 preempt_enable();
2039 }
2040
2041 static __always_inline
2042 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2043 {
2044 cant_sleep();
2045 rcu_read_lock();
2046 (void) bpf_prog_run(prog, args);
2047 rcu_read_unlock();
2048 }
2049
2050 #define UNPACK(...) __VA_ARGS__
2051 #define REPEAT_1(FN, DL, X, ...) FN(X)
2052 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2053 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2054 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2055 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2056 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2057 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2058 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2059 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2060 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2061 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2062 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2063 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2064
2065 #define SARG(X) u64 arg##X
2066 #define COPY(X) args[X] = arg##X
2067
2068 #define __DL_COM (,)
2069 #define __DL_SEM (;)
2070
2071 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2072
2073 #define BPF_TRACE_DEFN_x(x) \
2074 void bpf_trace_run##x(struct bpf_prog *prog, \
2075 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2076 { \
2077 u64 args[x]; \
2078 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
2079 __bpf_trace_run(prog, args); \
2080 } \
2081 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2082 BPF_TRACE_DEFN_x(1);
2083 BPF_TRACE_DEFN_x(2);
2084 BPF_TRACE_DEFN_x(3);
2085 BPF_TRACE_DEFN_x(4);
2086 BPF_TRACE_DEFN_x(5);
2087 BPF_TRACE_DEFN_x(6);
2088 BPF_TRACE_DEFN_x(7);
2089 BPF_TRACE_DEFN_x(8);
2090 BPF_TRACE_DEFN_x(9);
2091 BPF_TRACE_DEFN_x(10);
2092 BPF_TRACE_DEFN_x(11);
2093 BPF_TRACE_DEFN_x(12);
2094
2095 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2096 {
2097 struct tracepoint *tp = btp->tp;
2098
2099
2100
2101
2102
2103 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2104 return -EINVAL;
2105
2106 if (prog->aux->max_tp_access > btp->writable_size)
2107 return -EINVAL;
2108
2109 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
2110 prog);
2111 }
2112
2113 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2114 {
2115 return __bpf_probe_register(btp, prog);
2116 }
2117
2118 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2119 {
2120 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
2121 }
2122
2123 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2124 u32 *fd_type, const char **buf,
2125 u64 *probe_offset, u64 *probe_addr)
2126 {
2127 bool is_tracepoint, is_syscall_tp;
2128 struct bpf_prog *prog;
2129 int flags, err = 0;
2130
2131 prog = event->prog;
2132 if (!prog)
2133 return -ENOENT;
2134
2135
2136 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2137 return -EOPNOTSUPP;
2138
2139 *prog_id = prog->aux->id;
2140 flags = event->tp_event->flags;
2141 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2142 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2143
2144 if (is_tracepoint || is_syscall_tp) {
2145 *buf = is_tracepoint ? event->tp_event->tp->name
2146 : event->tp_event->name;
2147 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2148 *probe_offset = 0x0;
2149 *probe_addr = 0x0;
2150 } else {
2151
2152 err = -EOPNOTSUPP;
2153 #ifdef CONFIG_KPROBE_EVENTS
2154 if (flags & TRACE_EVENT_FL_KPROBE)
2155 err = bpf_get_kprobe_info(event, fd_type, buf,
2156 probe_offset, probe_addr,
2157 event->attr.type == PERF_TYPE_TRACEPOINT);
2158 #endif
2159 #ifdef CONFIG_UPROBE_EVENTS
2160 if (flags & TRACE_EVENT_FL_UPROBE)
2161 err = bpf_get_uprobe_info(event, fd_type, buf,
2162 probe_offset,
2163 event->attr.type == PERF_TYPE_TRACEPOINT);
2164 #endif
2165 }
2166
2167 return err;
2168 }
2169
2170 static int __init send_signal_irq_work_init(void)
2171 {
2172 int cpu;
2173 struct send_signal_irq_work *work;
2174
2175 for_each_possible_cpu(cpu) {
2176 work = per_cpu_ptr(&send_signal_work, cpu);
2177 init_irq_work(&work->irq_work, do_bpf_send_signal);
2178 }
2179 return 0;
2180 }
2181
2182 subsys_initcall(send_signal_irq_work_init);
2183
2184 #ifdef CONFIG_MODULES
2185 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2186 void *module)
2187 {
2188 struct bpf_trace_module *btm, *tmp;
2189 struct module *mod = module;
2190 int ret = 0;
2191
2192 if (mod->num_bpf_raw_events == 0 ||
2193 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2194 goto out;
2195
2196 mutex_lock(&bpf_module_mutex);
2197
2198 switch (op) {
2199 case MODULE_STATE_COMING:
2200 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2201 if (btm) {
2202 btm->module = module;
2203 list_add(&btm->list, &bpf_trace_modules);
2204 } else {
2205 ret = -ENOMEM;
2206 }
2207 break;
2208 case MODULE_STATE_GOING:
2209 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2210 if (btm->module == module) {
2211 list_del(&btm->list);
2212 kfree(btm);
2213 break;
2214 }
2215 }
2216 break;
2217 }
2218
2219 mutex_unlock(&bpf_module_mutex);
2220
2221 out:
2222 return notifier_from_errno(ret);
2223 }
2224
2225 static struct notifier_block bpf_module_nb = {
2226 .notifier_call = bpf_event_notify,
2227 };
2228
2229 static int __init bpf_event_init(void)
2230 {
2231 register_module_notifier(&bpf_module_nb);
2232 return 0;
2233 }
2234
2235 fs_initcall(bpf_event_init);
2236 #endif
2237
2238 #ifdef CONFIG_FPROBE
2239 struct bpf_kprobe_multi_link {
2240 struct bpf_link link;
2241 struct fprobe fp;
2242 unsigned long *addrs;
2243 u64 *cookies;
2244 u32 cnt;
2245 };
2246
2247 struct bpf_kprobe_multi_run_ctx {
2248 struct bpf_run_ctx run_ctx;
2249 struct bpf_kprobe_multi_link *link;
2250 unsigned long entry_ip;
2251 };
2252
2253 struct user_syms {
2254 const char **syms;
2255 char *buf;
2256 };
2257
2258 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2259 {
2260 unsigned long __user usymbol;
2261 const char **syms = NULL;
2262 char *buf = NULL, *p;
2263 int err = -ENOMEM;
2264 unsigned int i;
2265
2266 syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
2267 if (!syms)
2268 goto error;
2269
2270 buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
2271 if (!buf)
2272 goto error;
2273
2274 for (p = buf, i = 0; i < cnt; i++) {
2275 if (__get_user(usymbol, usyms + i)) {
2276 err = -EFAULT;
2277 goto error;
2278 }
2279 err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2280 if (err == KSYM_NAME_LEN)
2281 err = -E2BIG;
2282 if (err < 0)
2283 goto error;
2284 syms[i] = p;
2285 p += err + 1;
2286 }
2287
2288 us->syms = syms;
2289 us->buf = buf;
2290 return 0;
2291
2292 error:
2293 if (err) {
2294 kvfree(syms);
2295 kvfree(buf);
2296 }
2297 return err;
2298 }
2299
2300 static void free_user_syms(struct user_syms *us)
2301 {
2302 kvfree(us->syms);
2303 kvfree(us->buf);
2304 }
2305
2306 static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2307 {
2308 struct bpf_kprobe_multi_link *kmulti_link;
2309
2310 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2311 unregister_fprobe(&kmulti_link->fp);
2312 }
2313
2314 static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2315 {
2316 struct bpf_kprobe_multi_link *kmulti_link;
2317
2318 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2319 kvfree(kmulti_link->addrs);
2320 kvfree(kmulti_link->cookies);
2321 kfree(kmulti_link);
2322 }
2323
2324 static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2325 .release = bpf_kprobe_multi_link_release,
2326 .dealloc = bpf_kprobe_multi_link_dealloc,
2327 };
2328
2329 static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2330 {
2331 const struct bpf_kprobe_multi_link *link = priv;
2332 unsigned long *addr_a = a, *addr_b = b;
2333 u64 *cookie_a, *cookie_b;
2334
2335 cookie_a = link->cookies + (addr_a - link->addrs);
2336 cookie_b = link->cookies + (addr_b - link->addrs);
2337
2338
2339 swap(*addr_a, *addr_b);
2340 swap(*cookie_a, *cookie_b);
2341 }
2342
2343 static int __bpf_kprobe_multi_cookie_cmp(const void *a, const void *b)
2344 {
2345 const unsigned long *addr_a = a, *addr_b = b;
2346
2347 if (*addr_a == *addr_b)
2348 return 0;
2349 return *addr_a < *addr_b ? -1 : 1;
2350 }
2351
2352 static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2353 {
2354 return __bpf_kprobe_multi_cookie_cmp(a, b);
2355 }
2356
2357 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2358 {
2359 struct bpf_kprobe_multi_run_ctx *run_ctx;
2360 struct bpf_kprobe_multi_link *link;
2361 u64 *cookie, entry_ip;
2362 unsigned long *addr;
2363
2364 if (WARN_ON_ONCE(!ctx))
2365 return 0;
2366 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2367 link = run_ctx->link;
2368 if (!link->cookies)
2369 return 0;
2370 entry_ip = run_ctx->entry_ip;
2371 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2372 __bpf_kprobe_multi_cookie_cmp);
2373 if (!addr)
2374 return 0;
2375 cookie = link->cookies + (addr - link->addrs);
2376 return *cookie;
2377 }
2378
2379 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2380 {
2381 struct bpf_kprobe_multi_run_ctx *run_ctx;
2382
2383 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2384 return run_ctx->entry_ip;
2385 }
2386
2387 static int
2388 kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
2389 unsigned long entry_ip, struct pt_regs *regs)
2390 {
2391 struct bpf_kprobe_multi_run_ctx run_ctx = {
2392 .link = link,
2393 .entry_ip = entry_ip,
2394 };
2395 struct bpf_run_ctx *old_run_ctx;
2396 int err;
2397
2398 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2399 err = 0;
2400 goto out;
2401 }
2402
2403 migrate_disable();
2404 rcu_read_lock();
2405 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2406 err = bpf_prog_run(link->link.prog, regs);
2407 bpf_reset_run_ctx(old_run_ctx);
2408 rcu_read_unlock();
2409 migrate_enable();
2410
2411 out:
2412 __this_cpu_dec(bpf_prog_active);
2413 return err;
2414 }
2415
2416 static void
2417 kprobe_multi_link_handler(struct fprobe *fp, unsigned long entry_ip,
2418 struct pt_regs *regs)
2419 {
2420 struct bpf_kprobe_multi_link *link;
2421
2422 link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2423 kprobe_multi_link_prog_run(link, entry_ip, regs);
2424 }
2425
2426 static int symbols_cmp_r(const void *a, const void *b, const void *priv)
2427 {
2428 const char **str_a = (const char **) a;
2429 const char **str_b = (const char **) b;
2430
2431 return strcmp(*str_a, *str_b);
2432 }
2433
2434 struct multi_symbols_sort {
2435 const char **funcs;
2436 u64 *cookies;
2437 };
2438
2439 static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2440 {
2441 const struct multi_symbols_sort *data = priv;
2442 const char **name_a = a, **name_b = b;
2443
2444 swap(*name_a, *name_b);
2445
2446
2447 if (data->cookies) {
2448 u64 *cookie_a, *cookie_b;
2449
2450 cookie_a = data->cookies + (name_a - data->funcs);
2451 cookie_b = data->cookies + (name_b - data->funcs);
2452 swap(*cookie_a, *cookie_b);
2453 }
2454 }
2455
2456 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2457 {
2458 struct bpf_kprobe_multi_link *link = NULL;
2459 struct bpf_link_primer link_primer;
2460 void __user *ucookies;
2461 unsigned long *addrs;
2462 u32 flags, cnt, size;
2463 void __user *uaddrs;
2464 u64 *cookies = NULL;
2465 void __user *usyms;
2466 int err;
2467
2468
2469 if (sizeof(u64) != sizeof(void *))
2470 return -EOPNOTSUPP;
2471
2472 if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI)
2473 return -EINVAL;
2474
2475 flags = attr->link_create.kprobe_multi.flags;
2476 if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2477 return -EINVAL;
2478
2479 uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2480 usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2481 if (!!uaddrs == !!usyms)
2482 return -EINVAL;
2483
2484 cnt = attr->link_create.kprobe_multi.cnt;
2485 if (!cnt)
2486 return -EINVAL;
2487
2488 size = cnt * sizeof(*addrs);
2489 addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2490 if (!addrs)
2491 return -ENOMEM;
2492
2493 ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2494 if (ucookies) {
2495 cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2496 if (!cookies) {
2497 err = -ENOMEM;
2498 goto error;
2499 }
2500 if (copy_from_user(cookies, ucookies, size)) {
2501 err = -EFAULT;
2502 goto error;
2503 }
2504 }
2505
2506 if (uaddrs) {
2507 if (copy_from_user(addrs, uaddrs, size)) {
2508 err = -EFAULT;
2509 goto error;
2510 }
2511 } else {
2512 struct multi_symbols_sort data = {
2513 .cookies = cookies,
2514 };
2515 struct user_syms us;
2516
2517 err = copy_user_syms(&us, usyms, cnt);
2518 if (err)
2519 goto error;
2520
2521 if (cookies)
2522 data.funcs = us.syms;
2523
2524 sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
2525 symbols_swap_r, &data);
2526
2527 err = ftrace_lookup_symbols(us.syms, cnt, addrs);
2528 free_user_syms(&us);
2529 if (err)
2530 goto error;
2531 }
2532
2533 link = kzalloc(sizeof(*link), GFP_KERNEL);
2534 if (!link) {
2535 err = -ENOMEM;
2536 goto error;
2537 }
2538
2539 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
2540 &bpf_kprobe_multi_link_lops, prog);
2541
2542 err = bpf_link_prime(&link->link, &link_primer);
2543 if (err)
2544 goto error;
2545
2546 if (flags & BPF_F_KPROBE_MULTI_RETURN)
2547 link->fp.exit_handler = kprobe_multi_link_handler;
2548 else
2549 link->fp.entry_handler = kprobe_multi_link_handler;
2550
2551 link->addrs = addrs;
2552 link->cookies = cookies;
2553 link->cnt = cnt;
2554
2555 if (cookies) {
2556
2557
2558
2559
2560
2561
2562 sort_r(addrs, cnt, sizeof(*addrs),
2563 bpf_kprobe_multi_cookie_cmp,
2564 bpf_kprobe_multi_cookie_swap,
2565 link);
2566 }
2567
2568 err = register_fprobe_ips(&link->fp, addrs, cnt);
2569 if (err) {
2570 bpf_link_cleanup(&link_primer);
2571 return err;
2572 }
2573
2574 return bpf_link_settle(&link_primer);
2575
2576 error:
2577 kfree(link);
2578 kvfree(addrs);
2579 kvfree(cookies);
2580 return err;
2581 }
2582 #else
2583 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2584 {
2585 return -EOPNOTSUPP;
2586 }
2587 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2588 {
2589 return 0;
2590 }
2591 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2592 {
2593 return 0;
2594 }
2595 #endif