0001
0002
0003
0004 #include <errno.h>
0005 #include <fcntl.h>
0006 #include <string.h>
0007 #include <stdlib.h>
0008 #include <unistd.h>
0009 #include <net/if.h>
0010 #include <sys/utsname.h>
0011
0012 #include <linux/btf.h>
0013 #include <linux/filter.h>
0014 #include <linux/kernel.h>
0015
0016 #include "bpf.h"
0017 #include "libbpf.h"
0018 #include "libbpf_internal.h"
0019
0020 static int probe_prog_load(enum bpf_prog_type prog_type,
0021 const struct bpf_insn *insns, size_t insns_cnt,
0022 char *log_buf, size_t log_buf_sz)
0023 {
0024 LIBBPF_OPTS(bpf_prog_load_opts, opts,
0025 .log_buf = log_buf,
0026 .log_size = log_buf_sz,
0027 .log_level = log_buf ? 1 : 0,
0028 );
0029 int fd, err, exp_err = 0;
0030 const char *exp_msg = NULL;
0031 char buf[4096];
0032
0033 switch (prog_type) {
0034 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
0035 opts.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
0036 break;
0037 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
0038 opts.expected_attach_type = BPF_CGROUP_GETSOCKOPT;
0039 break;
0040 case BPF_PROG_TYPE_SK_LOOKUP:
0041 opts.expected_attach_type = BPF_SK_LOOKUP;
0042 break;
0043 case BPF_PROG_TYPE_KPROBE:
0044 opts.kern_version = get_kernel_version();
0045 break;
0046 case BPF_PROG_TYPE_LIRC_MODE2:
0047 opts.expected_attach_type = BPF_LIRC_MODE2;
0048 break;
0049 case BPF_PROG_TYPE_TRACING:
0050 case BPF_PROG_TYPE_LSM:
0051 opts.log_buf = buf;
0052 opts.log_size = sizeof(buf);
0053 opts.log_level = 1;
0054 if (prog_type == BPF_PROG_TYPE_TRACING)
0055 opts.expected_attach_type = BPF_TRACE_FENTRY;
0056 else
0057 opts.expected_attach_type = BPF_MODIFY_RETURN;
0058 opts.attach_btf_id = 1;
0059
0060 exp_err = -EINVAL;
0061 exp_msg = "attach_btf_id 1 is not a function";
0062 break;
0063 case BPF_PROG_TYPE_EXT:
0064 opts.log_buf = buf;
0065 opts.log_size = sizeof(buf);
0066 opts.log_level = 1;
0067 opts.attach_btf_id = 1;
0068
0069 exp_err = -EINVAL;
0070 exp_msg = "Cannot replace kernel functions";
0071 break;
0072 case BPF_PROG_TYPE_SYSCALL:
0073 opts.prog_flags = BPF_F_SLEEPABLE;
0074 break;
0075 case BPF_PROG_TYPE_STRUCT_OPS:
0076 exp_err = -524;
0077 break;
0078 case BPF_PROG_TYPE_UNSPEC:
0079 case BPF_PROG_TYPE_SOCKET_FILTER:
0080 case BPF_PROG_TYPE_SCHED_CLS:
0081 case BPF_PROG_TYPE_SCHED_ACT:
0082 case BPF_PROG_TYPE_TRACEPOINT:
0083 case BPF_PROG_TYPE_XDP:
0084 case BPF_PROG_TYPE_PERF_EVENT:
0085 case BPF_PROG_TYPE_CGROUP_SKB:
0086 case BPF_PROG_TYPE_CGROUP_SOCK:
0087 case BPF_PROG_TYPE_LWT_IN:
0088 case BPF_PROG_TYPE_LWT_OUT:
0089 case BPF_PROG_TYPE_LWT_XMIT:
0090 case BPF_PROG_TYPE_SOCK_OPS:
0091 case BPF_PROG_TYPE_SK_SKB:
0092 case BPF_PROG_TYPE_CGROUP_DEVICE:
0093 case BPF_PROG_TYPE_SK_MSG:
0094 case BPF_PROG_TYPE_RAW_TRACEPOINT:
0095 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
0096 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
0097 case BPF_PROG_TYPE_SK_REUSEPORT:
0098 case BPF_PROG_TYPE_FLOW_DISSECTOR:
0099 case BPF_PROG_TYPE_CGROUP_SYSCTL:
0100 break;
0101 default:
0102 return -EOPNOTSUPP;
0103 }
0104
0105 fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, &opts);
0106 err = -errno;
0107 if (fd >= 0)
0108 close(fd);
0109 if (exp_err) {
0110 if (fd >= 0 || err != exp_err)
0111 return 0;
0112 if (exp_msg && !strstr(buf, exp_msg))
0113 return 0;
0114 return 1;
0115 }
0116 return fd >= 0 ? 1 : 0;
0117 }
0118
0119 int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts)
0120 {
0121 struct bpf_insn insns[] = {
0122 BPF_MOV64_IMM(BPF_REG_0, 0),
0123 BPF_EXIT_INSN()
0124 };
0125 const size_t insn_cnt = ARRAY_SIZE(insns);
0126 int ret;
0127
0128 if (opts)
0129 return libbpf_err(-EINVAL);
0130
0131 ret = probe_prog_load(prog_type, insns, insn_cnt, NULL, 0);
0132 return libbpf_err(ret);
0133 }
0134
0135 int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
0136 const char *str_sec, size_t str_len)
0137 {
0138 struct btf_header hdr = {
0139 .magic = BTF_MAGIC,
0140 .version = BTF_VERSION,
0141 .hdr_len = sizeof(struct btf_header),
0142 .type_len = types_len,
0143 .str_off = types_len,
0144 .str_len = str_len,
0145 };
0146 int btf_fd, btf_len;
0147 __u8 *raw_btf;
0148
0149 btf_len = hdr.hdr_len + hdr.type_len + hdr.str_len;
0150 raw_btf = malloc(btf_len);
0151 if (!raw_btf)
0152 return -ENOMEM;
0153
0154 memcpy(raw_btf, &hdr, sizeof(hdr));
0155 memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
0156 memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
0157
0158 btf_fd = bpf_btf_load(raw_btf, btf_len, NULL);
0159
0160 free(raw_btf);
0161 return btf_fd;
0162 }
0163
0164 static int load_local_storage_btf(void)
0165 {
0166 const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l";
0167
0168
0169
0170
0171
0172
0173
0174
0175 __u32 types[] = {
0176
0177 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
0178
0179 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
0180 BTF_MEMBER_ENC(15, 1, 0),
0181
0182 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
0183 BTF_MEMBER_ENC(19, 1, 0),
0184 BTF_MEMBER_ENC(23, 2, 32),
0185 };
0186
0187 return libbpf__load_raw_btf((char *)types, sizeof(types),
0188 strs, sizeof(strs));
0189 }
0190
0191 static int probe_map_create(enum bpf_map_type map_type)
0192 {
0193 LIBBPF_OPTS(bpf_map_create_opts, opts);
0194 int key_size, value_size, max_entries;
0195 __u32 btf_key_type_id = 0, btf_value_type_id = 0;
0196 int fd = -1, btf_fd = -1, fd_inner = -1, exp_err = 0, err;
0197
0198 key_size = sizeof(__u32);
0199 value_size = sizeof(__u32);
0200 max_entries = 1;
0201
0202 switch (map_type) {
0203 case BPF_MAP_TYPE_STACK_TRACE:
0204 value_size = sizeof(__u64);
0205 break;
0206 case BPF_MAP_TYPE_LPM_TRIE:
0207 key_size = sizeof(__u64);
0208 value_size = sizeof(__u64);
0209 opts.map_flags = BPF_F_NO_PREALLOC;
0210 break;
0211 case BPF_MAP_TYPE_CGROUP_STORAGE:
0212 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
0213 key_size = sizeof(struct bpf_cgroup_storage_key);
0214 value_size = sizeof(__u64);
0215 max_entries = 0;
0216 break;
0217 case BPF_MAP_TYPE_QUEUE:
0218 case BPF_MAP_TYPE_STACK:
0219 key_size = 0;
0220 break;
0221 case BPF_MAP_TYPE_SK_STORAGE:
0222 case BPF_MAP_TYPE_INODE_STORAGE:
0223 case BPF_MAP_TYPE_TASK_STORAGE:
0224 btf_key_type_id = 1;
0225 btf_value_type_id = 3;
0226 value_size = 8;
0227 max_entries = 0;
0228 opts.map_flags = BPF_F_NO_PREALLOC;
0229 btf_fd = load_local_storage_btf();
0230 if (btf_fd < 0)
0231 return btf_fd;
0232 break;
0233 case BPF_MAP_TYPE_RINGBUF:
0234 key_size = 0;
0235 value_size = 0;
0236 max_entries = 4096;
0237 break;
0238 case BPF_MAP_TYPE_STRUCT_OPS:
0239
0240 opts.btf_vmlinux_value_type_id = 1;
0241 exp_err = -524;
0242 break;
0243 case BPF_MAP_TYPE_BLOOM_FILTER:
0244 key_size = 0;
0245 max_entries = 1;
0246 break;
0247 case BPF_MAP_TYPE_HASH:
0248 case BPF_MAP_TYPE_ARRAY:
0249 case BPF_MAP_TYPE_PROG_ARRAY:
0250 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
0251 case BPF_MAP_TYPE_PERCPU_HASH:
0252 case BPF_MAP_TYPE_PERCPU_ARRAY:
0253 case BPF_MAP_TYPE_CGROUP_ARRAY:
0254 case BPF_MAP_TYPE_LRU_HASH:
0255 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
0256 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
0257 case BPF_MAP_TYPE_HASH_OF_MAPS:
0258 case BPF_MAP_TYPE_DEVMAP:
0259 case BPF_MAP_TYPE_DEVMAP_HASH:
0260 case BPF_MAP_TYPE_SOCKMAP:
0261 case BPF_MAP_TYPE_CPUMAP:
0262 case BPF_MAP_TYPE_XSKMAP:
0263 case BPF_MAP_TYPE_SOCKHASH:
0264 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
0265 break;
0266 case BPF_MAP_TYPE_UNSPEC:
0267 default:
0268 return -EOPNOTSUPP;
0269 }
0270
0271 if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
0272 map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
0273 fd_inner = bpf_map_create(BPF_MAP_TYPE_HASH, NULL,
0274 sizeof(__u32), sizeof(__u32), 1, NULL);
0275 if (fd_inner < 0)
0276 goto cleanup;
0277
0278 opts.inner_map_fd = fd_inner;
0279 }
0280
0281 if (btf_fd >= 0) {
0282 opts.btf_fd = btf_fd;
0283 opts.btf_key_type_id = btf_key_type_id;
0284 opts.btf_value_type_id = btf_value_type_id;
0285 }
0286
0287 fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts);
0288 err = -errno;
0289
0290 cleanup:
0291 if (fd >= 0)
0292 close(fd);
0293 if (fd_inner >= 0)
0294 close(fd_inner);
0295 if (btf_fd >= 0)
0296 close(btf_fd);
0297
0298 if (exp_err)
0299 return fd < 0 && err == exp_err ? 1 : 0;
0300 else
0301 return fd >= 0 ? 1 : 0;
0302 }
0303
0304 int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts)
0305 {
0306 int ret;
0307
0308 if (opts)
0309 return libbpf_err(-EINVAL);
0310
0311 ret = probe_map_create(map_type);
0312 return libbpf_err(ret);
0313 }
0314
0315 int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helper_id,
0316 const void *opts)
0317 {
0318 struct bpf_insn insns[] = {
0319 BPF_EMIT_CALL((__u32)helper_id),
0320 BPF_EXIT_INSN(),
0321 };
0322 const size_t insn_cnt = ARRAY_SIZE(insns);
0323 char buf[4096];
0324 int ret;
0325
0326 if (opts)
0327 return libbpf_err(-EINVAL);
0328
0329
0330
0331
0332 switch (prog_type) {
0333 case BPF_PROG_TYPE_TRACING:
0334 case BPF_PROG_TYPE_EXT:
0335 case BPF_PROG_TYPE_LSM:
0336 case BPF_PROG_TYPE_STRUCT_OPS:
0337 return -EOPNOTSUPP;
0338 default:
0339 break;
0340 }
0341
0342 buf[0] = '\0';
0343 ret = probe_prog_load(prog_type, insns, insn_cnt, buf, sizeof(buf));
0344 if (ret < 0)
0345 return libbpf_err(ret);
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359 if (ret == 0 && (strstr(buf, "invalid func ") || strstr(buf, "unknown func ")))
0360 return 0;
0361 return 1;
0362 }