0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <stdlib.h>
0025 #include <string.h>
0026 #include <memory.h>
0027 #include <unistd.h>
0028 #include <asm/unistd.h>
0029 #include <errno.h>
0030 #include <linux/bpf.h>
0031 #include <linux/filter.h>
0032 #include <linux/kernel.h>
0033 #include <limits.h>
0034 #include <sys/resource.h>
0035 #include "bpf.h"
0036 #include "libbpf.h"
0037 #include "libbpf_internal.h"
0038
0039
0040
0041
0042
0043 #ifndef __NR_bpf
0044 # if defined(__i386__)
0045 # define __NR_bpf 357
0046 # elif defined(__x86_64__)
0047 # define __NR_bpf 321
0048 # elif defined(__aarch64__)
0049 # define __NR_bpf 280
0050 # elif defined(__sparc__)
0051 # define __NR_bpf 349
0052 # elif defined(__s390__)
0053 # define __NR_bpf 351
0054 # elif defined(__arc__)
0055 # define __NR_bpf 280
0056 # elif defined(__mips__) && defined(_ABIO32)
0057 # define __NR_bpf 4355
0058 # elif defined(__mips__) && defined(_ABIN32)
0059 # define __NR_bpf 6319
0060 # elif defined(__mips__) && defined(_ABI64)
0061 # define __NR_bpf 5315
0062 # else
0063 # error __NR_bpf not defined. libbpf does not support your arch.
0064 # endif
0065 #endif
0066
0067 static inline __u64 ptr_to_u64(const void *ptr)
0068 {
0069 return (__u64) (unsigned long) ptr;
0070 }
0071
0072 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
0073 unsigned int size)
0074 {
0075 return syscall(__NR_bpf, cmd, attr, size);
0076 }
0077
0078 static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr,
0079 unsigned int size)
0080 {
0081 int fd;
0082
0083 fd = sys_bpf(cmd, attr, size);
0084 return ensure_good_fd(fd);
0085 }
0086
0087 #define PROG_LOAD_ATTEMPTS 5
0088
0089 static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
0090 {
0091 int fd;
0092
0093 do {
0094 fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size);
0095 } while (fd < 0 && errno == EAGAIN && --attempts > 0);
0096
0097 return fd;
0098 }
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108 int probe_memcg_account(void)
0109 {
0110 const size_t prog_load_attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
0111 struct bpf_insn insns[] = {
0112 BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
0113 BPF_EXIT_INSN(),
0114 };
0115 size_t insn_cnt = ARRAY_SIZE(insns);
0116 union bpf_attr attr;
0117 int prog_fd;
0118
0119
0120 memset(&attr, 0, prog_load_attr_sz);
0121 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
0122 attr.insns = ptr_to_u64(insns);
0123 attr.insn_cnt = insn_cnt;
0124 attr.license = ptr_to_u64("GPL");
0125
0126 prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, prog_load_attr_sz);
0127 if (prog_fd >= 0) {
0128 close(prog_fd);
0129 return 1;
0130 }
0131 return 0;
0132 }
0133
0134 static bool memlock_bumped;
0135 static rlim_t memlock_rlim = RLIM_INFINITY;
0136
0137 int libbpf_set_memlock_rlim(size_t memlock_bytes)
0138 {
0139 if (memlock_bumped)
0140 return libbpf_err(-EBUSY);
0141
0142 memlock_rlim = memlock_bytes;
0143 return 0;
0144 }
0145
0146 int bump_rlimit_memlock(void)
0147 {
0148 struct rlimit rlim;
0149
0150
0151 if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT))
0152 return 0;
0153
0154 memlock_bumped = true;
0155
0156
0157 if (memlock_rlim == 0)
0158 return 0;
0159
0160 rlim.rlim_cur = rlim.rlim_max = memlock_rlim;
0161 if (setrlimit(RLIMIT_MEMLOCK, &rlim))
0162 return -errno;
0163
0164 return 0;
0165 }
0166
0167 int bpf_map_create(enum bpf_map_type map_type,
0168 const char *map_name,
0169 __u32 key_size,
0170 __u32 value_size,
0171 __u32 max_entries,
0172 const struct bpf_map_create_opts *opts)
0173 {
0174 const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
0175 union bpf_attr attr;
0176 int fd;
0177
0178 bump_rlimit_memlock();
0179
0180 memset(&attr, 0, attr_sz);
0181
0182 if (!OPTS_VALID(opts, bpf_map_create_opts))
0183 return libbpf_err(-EINVAL);
0184
0185 attr.map_type = map_type;
0186 if (map_name)
0187 libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
0188 attr.key_size = key_size;
0189 attr.value_size = value_size;
0190 attr.max_entries = max_entries;
0191
0192 attr.btf_fd = OPTS_GET(opts, btf_fd, 0);
0193 attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0);
0194 attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0);
0195 attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0);
0196
0197 attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0);
0198 attr.map_flags = OPTS_GET(opts, map_flags, 0);
0199 attr.map_extra = OPTS_GET(opts, map_extra, 0);
0200 attr.numa_node = OPTS_GET(opts, numa_node, 0);
0201 attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0);
0202
0203 fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz);
0204 return libbpf_err_errno(fd);
0205 }
0206
0207 static void *
0208 alloc_zero_tailing_info(const void *orecord, __u32 cnt,
0209 __u32 actual_rec_size, __u32 expected_rec_size)
0210 {
0211 __u64 info_len = (__u64)actual_rec_size * cnt;
0212 void *info, *nrecord;
0213 int i;
0214
0215 info = malloc(info_len);
0216 if (!info)
0217 return NULL;
0218
0219
0220 nrecord = info;
0221 for (i = 0; i < cnt; i++) {
0222 memcpy(nrecord, orecord, expected_rec_size);
0223 memset(nrecord + expected_rec_size, 0,
0224 actual_rec_size - expected_rec_size);
0225 orecord += actual_rec_size;
0226 nrecord += actual_rec_size;
0227 }
0228
0229 return info;
0230 }
0231
0232 int bpf_prog_load(enum bpf_prog_type prog_type,
0233 const char *prog_name, const char *license,
0234 const struct bpf_insn *insns, size_t insn_cnt,
0235 const struct bpf_prog_load_opts *opts)
0236 {
0237 void *finfo = NULL, *linfo = NULL;
0238 const char *func_info, *line_info;
0239 __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
0240 __u32 func_info_rec_size, line_info_rec_size;
0241 int fd, attempts;
0242 union bpf_attr attr;
0243 char *log_buf;
0244
0245 bump_rlimit_memlock();
0246
0247 if (!OPTS_VALID(opts, bpf_prog_load_opts))
0248 return libbpf_err(-EINVAL);
0249
0250 attempts = OPTS_GET(opts, attempts, 0);
0251 if (attempts < 0)
0252 return libbpf_err(-EINVAL);
0253 if (attempts == 0)
0254 attempts = PROG_LOAD_ATTEMPTS;
0255
0256 memset(&attr, 0, sizeof(attr));
0257
0258 attr.prog_type = prog_type;
0259 attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0);
0260
0261 attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0);
0262 attr.prog_flags = OPTS_GET(opts, prog_flags, 0);
0263 attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
0264 attr.kern_version = OPTS_GET(opts, kern_version, 0);
0265
0266 if (prog_name)
0267 libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
0268 attr.license = ptr_to_u64(license);
0269
0270 if (insn_cnt > UINT_MAX)
0271 return libbpf_err(-E2BIG);
0272
0273 attr.insns = ptr_to_u64(insns);
0274 attr.insn_cnt = (__u32)insn_cnt;
0275
0276 attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
0277 attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0);
0278
0279 if (attach_prog_fd && attach_btf_obj_fd)
0280 return libbpf_err(-EINVAL);
0281
0282 attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0);
0283 if (attach_prog_fd)
0284 attr.attach_prog_fd = attach_prog_fd;
0285 else
0286 attr.attach_btf_obj_fd = attach_btf_obj_fd;
0287
0288 log_buf = OPTS_GET(opts, log_buf, NULL);
0289 log_size = OPTS_GET(opts, log_size, 0);
0290 log_level = OPTS_GET(opts, log_level, 0);
0291
0292 if (!!log_buf != !!log_size)
0293 return libbpf_err(-EINVAL);
0294 if (log_level > (4 | 2 | 1))
0295 return libbpf_err(-EINVAL);
0296 if (log_level && !log_buf)
0297 return libbpf_err(-EINVAL);
0298
0299 func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0);
0300 func_info = OPTS_GET(opts, func_info, NULL);
0301 attr.func_info_rec_size = func_info_rec_size;
0302 attr.func_info = ptr_to_u64(func_info);
0303 attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0);
0304
0305 line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0);
0306 line_info = OPTS_GET(opts, line_info, NULL);
0307 attr.line_info_rec_size = line_info_rec_size;
0308 attr.line_info = ptr_to_u64(line_info);
0309 attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0);
0310
0311 attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL));
0312
0313 if (log_level) {
0314 attr.log_buf = ptr_to_u64(log_buf);
0315 attr.log_size = log_size;
0316 attr.log_level = log_level;
0317 }
0318
0319 fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
0320 if (fd >= 0)
0321 return fd;
0322
0323
0324
0325
0326
0327 while (errno == E2BIG && (!finfo || !linfo)) {
0328 if (!finfo && attr.func_info_cnt &&
0329 attr.func_info_rec_size < func_info_rec_size) {
0330
0331 finfo = alloc_zero_tailing_info(func_info,
0332 attr.func_info_cnt,
0333 func_info_rec_size,
0334 attr.func_info_rec_size);
0335 if (!finfo) {
0336 errno = E2BIG;
0337 goto done;
0338 }
0339
0340 attr.func_info = ptr_to_u64(finfo);
0341 attr.func_info_rec_size = func_info_rec_size;
0342 } else if (!linfo && attr.line_info_cnt &&
0343 attr.line_info_rec_size < line_info_rec_size) {
0344 linfo = alloc_zero_tailing_info(line_info,
0345 attr.line_info_cnt,
0346 line_info_rec_size,
0347 attr.line_info_rec_size);
0348 if (!linfo) {
0349 errno = E2BIG;
0350 goto done;
0351 }
0352
0353 attr.line_info = ptr_to_u64(linfo);
0354 attr.line_info_rec_size = line_info_rec_size;
0355 } else {
0356 break;
0357 }
0358
0359 fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
0360 if (fd >= 0)
0361 goto done;
0362 }
0363
0364 if (log_level == 0 && log_buf) {
0365
0366
0367
0368
0369 attr.log_buf = ptr_to_u64(log_buf);
0370 attr.log_size = log_size;
0371 attr.log_level = 1;
0372
0373 fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
0374 }
0375 done:
0376
0377 free(finfo);
0378 free(linfo);
0379 return libbpf_err_errno(fd);
0380 }
0381
0382 int bpf_map_update_elem(int fd, const void *key, const void *value,
0383 __u64 flags)
0384 {
0385 union bpf_attr attr;
0386 int ret;
0387
0388 memset(&attr, 0, sizeof(attr));
0389 attr.map_fd = fd;
0390 attr.key = ptr_to_u64(key);
0391 attr.value = ptr_to_u64(value);
0392 attr.flags = flags;
0393
0394 ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
0395 return libbpf_err_errno(ret);
0396 }
0397
0398 int bpf_map_lookup_elem(int fd, const void *key, void *value)
0399 {
0400 union bpf_attr attr;
0401 int ret;
0402
0403 memset(&attr, 0, sizeof(attr));
0404 attr.map_fd = fd;
0405 attr.key = ptr_to_u64(key);
0406 attr.value = ptr_to_u64(value);
0407
0408 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
0409 return libbpf_err_errno(ret);
0410 }
0411
0412 int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
0413 {
0414 union bpf_attr attr;
0415 int ret;
0416
0417 memset(&attr, 0, sizeof(attr));
0418 attr.map_fd = fd;
0419 attr.key = ptr_to_u64(key);
0420 attr.value = ptr_to_u64(value);
0421 attr.flags = flags;
0422
0423 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
0424 return libbpf_err_errno(ret);
0425 }
0426
0427 int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
0428 {
0429 union bpf_attr attr;
0430 int ret;
0431
0432 memset(&attr, 0, sizeof(attr));
0433 attr.map_fd = fd;
0434 attr.key = ptr_to_u64(key);
0435 attr.value = ptr_to_u64(value);
0436
0437 ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
0438 return libbpf_err_errno(ret);
0439 }
0440
0441 int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags)
0442 {
0443 union bpf_attr attr;
0444 int ret;
0445
0446 memset(&attr, 0, sizeof(attr));
0447 attr.map_fd = fd;
0448 attr.key = ptr_to_u64(key);
0449 attr.value = ptr_to_u64(value);
0450 attr.flags = flags;
0451
0452 ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
0453 return libbpf_err_errno(ret);
0454 }
0455
0456 int bpf_map_delete_elem(int fd, const void *key)
0457 {
0458 union bpf_attr attr;
0459 int ret;
0460
0461 memset(&attr, 0, sizeof(attr));
0462 attr.map_fd = fd;
0463 attr.key = ptr_to_u64(key);
0464
0465 ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
0466 return libbpf_err_errno(ret);
0467 }
0468
0469 int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags)
0470 {
0471 union bpf_attr attr;
0472 int ret;
0473
0474 memset(&attr, 0, sizeof(attr));
0475 attr.map_fd = fd;
0476 attr.key = ptr_to_u64(key);
0477 attr.flags = flags;
0478
0479 ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
0480 return libbpf_err_errno(ret);
0481 }
0482
0483 int bpf_map_get_next_key(int fd, const void *key, void *next_key)
0484 {
0485 union bpf_attr attr;
0486 int ret;
0487
0488 memset(&attr, 0, sizeof(attr));
0489 attr.map_fd = fd;
0490 attr.key = ptr_to_u64(key);
0491 attr.next_key = ptr_to_u64(next_key);
0492
0493 ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
0494 return libbpf_err_errno(ret);
0495 }
0496
0497 int bpf_map_freeze(int fd)
0498 {
0499 union bpf_attr attr;
0500 int ret;
0501
0502 memset(&attr, 0, sizeof(attr));
0503 attr.map_fd = fd;
0504
0505 ret = sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr));
0506 return libbpf_err_errno(ret);
0507 }
0508
0509 static int bpf_map_batch_common(int cmd, int fd, void *in_batch,
0510 void *out_batch, void *keys, void *values,
0511 __u32 *count,
0512 const struct bpf_map_batch_opts *opts)
0513 {
0514 union bpf_attr attr;
0515 int ret;
0516
0517 if (!OPTS_VALID(opts, bpf_map_batch_opts))
0518 return libbpf_err(-EINVAL);
0519
0520 memset(&attr, 0, sizeof(attr));
0521 attr.batch.map_fd = fd;
0522 attr.batch.in_batch = ptr_to_u64(in_batch);
0523 attr.batch.out_batch = ptr_to_u64(out_batch);
0524 attr.batch.keys = ptr_to_u64(keys);
0525 attr.batch.values = ptr_to_u64(values);
0526 attr.batch.count = *count;
0527 attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0);
0528 attr.batch.flags = OPTS_GET(opts, flags, 0);
0529
0530 ret = sys_bpf(cmd, &attr, sizeof(attr));
0531 *count = attr.batch.count;
0532
0533 return libbpf_err_errno(ret);
0534 }
0535
0536 int bpf_map_delete_batch(int fd, const void *keys, __u32 *count,
0537 const struct bpf_map_batch_opts *opts)
0538 {
0539 return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL,
0540 NULL, (void *)keys, NULL, count, opts);
0541 }
0542
0543 int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys,
0544 void *values, __u32 *count,
0545 const struct bpf_map_batch_opts *opts)
0546 {
0547 return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch,
0548 out_batch, keys, values, count, opts);
0549 }
0550
0551 int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch,
0552 void *keys, void *values, __u32 *count,
0553 const struct bpf_map_batch_opts *opts)
0554 {
0555 return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH,
0556 fd, in_batch, out_batch, keys, values,
0557 count, opts);
0558 }
0559
0560 int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *count,
0561 const struct bpf_map_batch_opts *opts)
0562 {
0563 return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL,
0564 (void *)keys, (void *)values, count, opts);
0565 }
0566
0567 int bpf_obj_pin(int fd, const char *pathname)
0568 {
0569 union bpf_attr attr;
0570 int ret;
0571
0572 memset(&attr, 0, sizeof(attr));
0573 attr.pathname = ptr_to_u64((void *)pathname);
0574 attr.bpf_fd = fd;
0575
0576 ret = sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
0577 return libbpf_err_errno(ret);
0578 }
0579
0580 int bpf_obj_get(const char *pathname)
0581 {
0582 return bpf_obj_get_opts(pathname, NULL);
0583 }
0584
0585 int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts)
0586 {
0587 union bpf_attr attr;
0588 int fd;
0589
0590 if (!OPTS_VALID(opts, bpf_obj_get_opts))
0591 return libbpf_err(-EINVAL);
0592
0593 memset(&attr, 0, sizeof(attr));
0594 attr.pathname = ptr_to_u64((void *)pathname);
0595 attr.file_flags = OPTS_GET(opts, file_flags, 0);
0596
0597 fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr));
0598 return libbpf_err_errno(fd);
0599 }
0600
0601 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
0602 unsigned int flags)
0603 {
0604 DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts,
0605 .flags = flags,
0606 );
0607
0608 return bpf_prog_attach_opts(prog_fd, target_fd, type, &opts);
0609 }
0610
0611 int bpf_prog_attach_opts(int prog_fd, int target_fd,
0612 enum bpf_attach_type type,
0613 const struct bpf_prog_attach_opts *opts)
0614 {
0615 union bpf_attr attr;
0616 int ret;
0617
0618 if (!OPTS_VALID(opts, bpf_prog_attach_opts))
0619 return libbpf_err(-EINVAL);
0620
0621 memset(&attr, 0, sizeof(attr));
0622 attr.target_fd = target_fd;
0623 attr.attach_bpf_fd = prog_fd;
0624 attr.attach_type = type;
0625 attr.attach_flags = OPTS_GET(opts, flags, 0);
0626 attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0);
0627
0628 ret = sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
0629 return libbpf_err_errno(ret);
0630 }
0631
0632 __attribute__((alias("bpf_prog_attach_opts")))
0633 int bpf_prog_attach_xattr(int prog_fd, int target_fd,
0634 enum bpf_attach_type type,
0635 const struct bpf_prog_attach_opts *opts);
0636
0637 int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
0638 {
0639 union bpf_attr attr;
0640 int ret;
0641
0642 memset(&attr, 0, sizeof(attr));
0643 attr.target_fd = target_fd;
0644 attr.attach_type = type;
0645
0646 ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
0647 return libbpf_err_errno(ret);
0648 }
0649
0650 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
0651 {
0652 union bpf_attr attr;
0653 int ret;
0654
0655 memset(&attr, 0, sizeof(attr));
0656 attr.target_fd = target_fd;
0657 attr.attach_bpf_fd = prog_fd;
0658 attr.attach_type = type;
0659
0660 ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
0661 return libbpf_err_errno(ret);
0662 }
0663
0664 int bpf_link_create(int prog_fd, int target_fd,
0665 enum bpf_attach_type attach_type,
0666 const struct bpf_link_create_opts *opts)
0667 {
0668 __u32 target_btf_id, iter_info_len;
0669 union bpf_attr attr;
0670 int fd, err;
0671
0672 if (!OPTS_VALID(opts, bpf_link_create_opts))
0673 return libbpf_err(-EINVAL);
0674
0675 iter_info_len = OPTS_GET(opts, iter_info_len, 0);
0676 target_btf_id = OPTS_GET(opts, target_btf_id, 0);
0677
0678
0679 if (iter_info_len || target_btf_id) {
0680 if (iter_info_len && target_btf_id)
0681 return libbpf_err(-EINVAL);
0682 if (!OPTS_ZEROED(opts, target_btf_id))
0683 return libbpf_err(-EINVAL);
0684 }
0685
0686 memset(&attr, 0, sizeof(attr));
0687 attr.link_create.prog_fd = prog_fd;
0688 attr.link_create.target_fd = target_fd;
0689 attr.link_create.attach_type = attach_type;
0690 attr.link_create.flags = OPTS_GET(opts, flags, 0);
0691
0692 if (target_btf_id) {
0693 attr.link_create.target_btf_id = target_btf_id;
0694 goto proceed;
0695 }
0696
0697 switch (attach_type) {
0698 case BPF_TRACE_ITER:
0699 attr.link_create.iter_info = ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0));
0700 attr.link_create.iter_info_len = iter_info_len;
0701 break;
0702 case BPF_PERF_EVENT:
0703 attr.link_create.perf_event.bpf_cookie = OPTS_GET(opts, perf_event.bpf_cookie, 0);
0704 if (!OPTS_ZEROED(opts, perf_event))
0705 return libbpf_err(-EINVAL);
0706 break;
0707 case BPF_TRACE_KPROBE_MULTI:
0708 attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0);
0709 attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0);
0710 attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0));
0711 attr.link_create.kprobe_multi.addrs = ptr_to_u64(OPTS_GET(opts, kprobe_multi.addrs, 0));
0712 attr.link_create.kprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, kprobe_multi.cookies, 0));
0713 if (!OPTS_ZEROED(opts, kprobe_multi))
0714 return libbpf_err(-EINVAL);
0715 break;
0716 case BPF_TRACE_FENTRY:
0717 case BPF_TRACE_FEXIT:
0718 case BPF_MODIFY_RETURN:
0719 case BPF_LSM_MAC:
0720 attr.link_create.tracing.cookie = OPTS_GET(opts, tracing.cookie, 0);
0721 if (!OPTS_ZEROED(opts, tracing))
0722 return libbpf_err(-EINVAL);
0723 break;
0724 default:
0725 if (!OPTS_ZEROED(opts, flags))
0726 return libbpf_err(-EINVAL);
0727 break;
0728 }
0729 proceed:
0730 fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr));
0731 if (fd >= 0)
0732 return fd;
0733
0734
0735
0736 err = -errno;
0737 if (err != -EINVAL)
0738 return libbpf_err(err);
0739
0740
0741
0742
0743 if (attr.link_create.target_fd || attr.link_create.target_btf_id)
0744 return libbpf_err(err);
0745 if (!OPTS_ZEROED(opts, sz))
0746 return libbpf_err(err);
0747
0748
0749
0750
0751
0752 switch (attach_type) {
0753 case BPF_TRACE_RAW_TP:
0754 case BPF_LSM_MAC:
0755 case BPF_TRACE_FENTRY:
0756 case BPF_TRACE_FEXIT:
0757 case BPF_MODIFY_RETURN:
0758 return bpf_raw_tracepoint_open(NULL, prog_fd);
0759 default:
0760 return libbpf_err(err);
0761 }
0762 }
0763
0764 int bpf_link_detach(int link_fd)
0765 {
0766 union bpf_attr attr;
0767 int ret;
0768
0769 memset(&attr, 0, sizeof(attr));
0770 attr.link_detach.link_fd = link_fd;
0771
0772 ret = sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr));
0773 return libbpf_err_errno(ret);
0774 }
0775
0776 int bpf_link_update(int link_fd, int new_prog_fd,
0777 const struct bpf_link_update_opts *opts)
0778 {
0779 union bpf_attr attr;
0780 int ret;
0781
0782 if (!OPTS_VALID(opts, bpf_link_update_opts))
0783 return libbpf_err(-EINVAL);
0784
0785 memset(&attr, 0, sizeof(attr));
0786 attr.link_update.link_fd = link_fd;
0787 attr.link_update.new_prog_fd = new_prog_fd;
0788 attr.link_update.flags = OPTS_GET(opts, flags, 0);
0789 attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
0790
0791 ret = sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr));
0792 return libbpf_err_errno(ret);
0793 }
0794
0795 int bpf_iter_create(int link_fd)
0796 {
0797 union bpf_attr attr;
0798 int fd;
0799
0800 memset(&attr, 0, sizeof(attr));
0801 attr.iter_create.link_fd = link_fd;
0802
0803 fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, sizeof(attr));
0804 return libbpf_err_errno(fd);
0805 }
0806
0807 int bpf_prog_query_opts(int target_fd,
0808 enum bpf_attach_type type,
0809 struct bpf_prog_query_opts *opts)
0810 {
0811 union bpf_attr attr;
0812 int ret;
0813
0814 if (!OPTS_VALID(opts, bpf_prog_query_opts))
0815 return libbpf_err(-EINVAL);
0816
0817 memset(&attr, 0, sizeof(attr));
0818
0819 attr.query.target_fd = target_fd;
0820 attr.query.attach_type = type;
0821 attr.query.query_flags = OPTS_GET(opts, query_flags, 0);
0822 attr.query.prog_cnt = OPTS_GET(opts, prog_cnt, 0);
0823 attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL));
0824 attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL));
0825
0826 ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
0827
0828 OPTS_SET(opts, attach_flags, attr.query.attach_flags);
0829 OPTS_SET(opts, prog_cnt, attr.query.prog_cnt);
0830
0831 return libbpf_err_errno(ret);
0832 }
0833
0834 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
0835 __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
0836 {
0837 LIBBPF_OPTS(bpf_prog_query_opts, opts);
0838 int ret;
0839
0840 opts.query_flags = query_flags;
0841 opts.prog_ids = prog_ids;
0842 opts.prog_cnt = *prog_cnt;
0843
0844 ret = bpf_prog_query_opts(target_fd, type, &opts);
0845
0846 if (attach_flags)
0847 *attach_flags = opts.attach_flags;
0848 *prog_cnt = opts.prog_cnt;
0849
0850 return libbpf_err_errno(ret);
0851 }
0852
0853 int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
0854 {
0855 union bpf_attr attr;
0856 int ret;
0857
0858 if (!OPTS_VALID(opts, bpf_test_run_opts))
0859 return libbpf_err(-EINVAL);
0860
0861 memset(&attr, 0, sizeof(attr));
0862 attr.test.prog_fd = prog_fd;
0863 attr.test.batch_size = OPTS_GET(opts, batch_size, 0);
0864 attr.test.cpu = OPTS_GET(opts, cpu, 0);
0865 attr.test.flags = OPTS_GET(opts, flags, 0);
0866 attr.test.repeat = OPTS_GET(opts, repeat, 0);
0867 attr.test.duration = OPTS_GET(opts, duration, 0);
0868 attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0);
0869 attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0);
0870 attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0);
0871 attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0);
0872 attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL));
0873 attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL));
0874 attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL));
0875 attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL));
0876
0877 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
0878
0879 OPTS_SET(opts, data_size_out, attr.test.data_size_out);
0880 OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out);
0881 OPTS_SET(opts, duration, attr.test.duration);
0882 OPTS_SET(opts, retval, attr.test.retval);
0883
0884 return libbpf_err_errno(ret);
0885 }
0886
0887 static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
0888 {
0889 union bpf_attr attr;
0890 int err;
0891
0892 memset(&attr, 0, sizeof(attr));
0893 attr.start_id = start_id;
0894
0895 err = sys_bpf(cmd, &attr, sizeof(attr));
0896 if (!err)
0897 *next_id = attr.next_id;
0898
0899 return libbpf_err_errno(err);
0900 }
0901
0902 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
0903 {
0904 return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID);
0905 }
0906
0907 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
0908 {
0909 return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID);
0910 }
0911
0912 int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id)
0913 {
0914 return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID);
0915 }
0916
0917 int bpf_link_get_next_id(__u32 start_id, __u32 *next_id)
0918 {
0919 return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID);
0920 }
0921
0922 int bpf_prog_get_fd_by_id(__u32 id)
0923 {
0924 union bpf_attr attr;
0925 int fd;
0926
0927 memset(&attr, 0, sizeof(attr));
0928 attr.prog_id = id;
0929
0930 fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
0931 return libbpf_err_errno(fd);
0932 }
0933
0934 int bpf_map_get_fd_by_id(__u32 id)
0935 {
0936 union bpf_attr attr;
0937 int fd;
0938
0939 memset(&attr, 0, sizeof(attr));
0940 attr.map_id = id;
0941
0942 fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
0943 return libbpf_err_errno(fd);
0944 }
0945
0946 int bpf_btf_get_fd_by_id(__u32 id)
0947 {
0948 union bpf_attr attr;
0949 int fd;
0950
0951 memset(&attr, 0, sizeof(attr));
0952 attr.btf_id = id;
0953
0954 fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
0955 return libbpf_err_errno(fd);
0956 }
0957
0958 int bpf_link_get_fd_by_id(__u32 id)
0959 {
0960 union bpf_attr attr;
0961 int fd;
0962
0963 memset(&attr, 0, sizeof(attr));
0964 attr.link_id = id;
0965
0966 fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
0967 return libbpf_err_errno(fd);
0968 }
0969
0970 int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
0971 {
0972 union bpf_attr attr;
0973 int err;
0974
0975 memset(&attr, 0, sizeof(attr));
0976 attr.info.bpf_fd = bpf_fd;
0977 attr.info.info_len = *info_len;
0978 attr.info.info = ptr_to_u64(info);
0979
0980 err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
0981
0982 if (!err)
0983 *info_len = attr.info.info_len;
0984
0985 return libbpf_err_errno(err);
0986 }
0987
0988 int bpf_raw_tracepoint_open(const char *name, int prog_fd)
0989 {
0990 union bpf_attr attr;
0991 int fd;
0992
0993 memset(&attr, 0, sizeof(attr));
0994 attr.raw_tracepoint.name = ptr_to_u64(name);
0995 attr.raw_tracepoint.prog_fd = prog_fd;
0996
0997 fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
0998 return libbpf_err_errno(fd);
0999 }
1000
1001 int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_load_opts *opts)
1002 {
1003 const size_t attr_sz = offsetofend(union bpf_attr, btf_log_level);
1004 union bpf_attr attr;
1005 char *log_buf;
1006 size_t log_size;
1007 __u32 log_level;
1008 int fd;
1009
1010 bump_rlimit_memlock();
1011
1012 memset(&attr, 0, attr_sz);
1013
1014 if (!OPTS_VALID(opts, bpf_btf_load_opts))
1015 return libbpf_err(-EINVAL);
1016
1017 log_buf = OPTS_GET(opts, log_buf, NULL);
1018 log_size = OPTS_GET(opts, log_size, 0);
1019 log_level = OPTS_GET(opts, log_level, 0);
1020
1021 if (log_size > UINT_MAX)
1022 return libbpf_err(-EINVAL);
1023 if (log_size && !log_buf)
1024 return libbpf_err(-EINVAL);
1025
1026 attr.btf = ptr_to_u64(btf_data);
1027 attr.btf_size = btf_size;
1028
1029
1030
1031
1032
1033 if (log_level) {
1034 attr.btf_log_buf = ptr_to_u64(log_buf);
1035 attr.btf_log_size = (__u32)log_size;
1036 attr.btf_log_level = log_level;
1037 }
1038
1039 fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
1040 if (fd < 0 && log_buf && log_level == 0) {
1041 attr.btf_log_buf = ptr_to_u64(log_buf);
1042 attr.btf_log_size = (__u32)log_size;
1043 attr.btf_log_level = 1;
1044 fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
1045 }
1046 return libbpf_err_errno(fd);
1047 }
1048
1049 int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
1050 __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
1051 __u64 *probe_addr)
1052 {
1053 union bpf_attr attr = {};
1054 int err;
1055
1056 attr.task_fd_query.pid = pid;
1057 attr.task_fd_query.fd = fd;
1058 attr.task_fd_query.flags = flags;
1059 attr.task_fd_query.buf = ptr_to_u64(buf);
1060 attr.task_fd_query.buf_len = *buf_len;
1061
1062 err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr));
1063
1064 *buf_len = attr.task_fd_query.buf_len;
1065 *prog_id = attr.task_fd_query.prog_id;
1066 *fd_type = attr.task_fd_query.fd_type;
1067 *probe_offset = attr.task_fd_query.probe_offset;
1068 *probe_addr = attr.task_fd_query.probe_addr;
1069
1070 return libbpf_err_errno(err);
1071 }
1072
1073 int bpf_enable_stats(enum bpf_stats_type type)
1074 {
1075 union bpf_attr attr;
1076 int fd;
1077
1078 memset(&attr, 0, sizeof(attr));
1079 attr.enable_stats.type = type;
1080
1081 fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, sizeof(attr));
1082 return libbpf_err_errno(fd);
1083 }
1084
1085 int bpf_prog_bind_map(int prog_fd, int map_fd,
1086 const struct bpf_prog_bind_opts *opts)
1087 {
1088 union bpf_attr attr;
1089 int ret;
1090
1091 if (!OPTS_VALID(opts, bpf_prog_bind_opts))
1092 return libbpf_err(-EINVAL);
1093
1094 memset(&attr, 0, sizeof(attr));
1095 attr.prog_bind_map.prog_fd = prog_fd;
1096 attr.prog_bind_map.map_fd = map_fd;
1097 attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0);
1098
1099 ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr));
1100 return libbpf_err_errno(ret);
1101 }