0001
0002
0003 #ifndef __SKEL_INTERNAL_H
0004 #define __SKEL_INTERNAL_H
0005
0006 #ifdef __KERNEL__
0007 #include <linux/fdtable.h>
0008 #include <linux/mm.h>
0009 #include <linux/mman.h>
0010 #include <linux/slab.h>
0011 #include <linux/bpf.h>
0012 #else
0013 #include <unistd.h>
0014 #include <sys/syscall.h>
0015 #include <sys/mman.h>
0016 #include <stdlib.h>
0017 #include "bpf.h"
0018 #endif
0019
0020 #ifndef __NR_bpf
0021 # if defined(__mips__) && defined(_ABIO32)
0022 # define __NR_bpf 4355
0023 # elif defined(__mips__) && defined(_ABIN32)
0024 # define __NR_bpf 6319
0025 # elif defined(__mips__) && defined(_ABI64)
0026 # define __NR_bpf 5315
0027 # endif
0028 #endif
0029
0030
0031
0032
0033
0034
0035
0036
0037 struct bpf_map_desc {
0038
0039 int map_fd;
0040
0041 __u32 max_entries;
0042 __aligned_u64 initial_value;
0043 };
0044 struct bpf_prog_desc {
0045 int prog_fd;
0046 };
0047
0048 enum {
0049 BPF_SKEL_KERNEL = (1ULL << 0),
0050 };
0051
0052 struct bpf_loader_ctx {
0053 __u32 sz;
0054 __u32 flags;
0055 __u32 log_level;
0056 __u32 log_size;
0057 __u64 log_buf;
0058 };
0059
0060 struct bpf_load_and_run_opts {
0061 struct bpf_loader_ctx *ctx;
0062 const void *data;
0063 const void *insns;
0064 __u32 data_sz;
0065 __u32 insns_sz;
0066 const char *errstr;
0067 };
0068
0069 long kern_sys_bpf(__u32 cmd, void *attr, __u32 attr_size);
0070
0071 static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
0072 unsigned int size)
0073 {
0074 #ifdef __KERNEL__
0075 return kern_sys_bpf(cmd, attr, size);
0076 #else
0077 return syscall(__NR_bpf, cmd, attr, size);
0078 #endif
0079 }
0080
0081 #ifdef __KERNEL__
0082 static inline int close(int fd)
0083 {
0084 return close_fd(fd);
0085 }
0086
0087 static inline void *skel_alloc(size_t size)
0088 {
0089 struct bpf_loader_ctx *ctx = kzalloc(size, GFP_KERNEL);
0090
0091 if (!ctx)
0092 return NULL;
0093 ctx->flags |= BPF_SKEL_KERNEL;
0094 return ctx;
0095 }
0096
0097 static inline void skel_free(const void *p)
0098 {
0099 kfree(p);
0100 }
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124 static inline void skel_free_map_data(void *p, __u64 addr, size_t sz)
0125 {
0126 if (addr != ~0ULL)
0127 kvfree(p);
0128
0129
0130
0131 }
0132
0133 static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz)
0134 {
0135 void *addr;
0136
0137 addr = kvmalloc(val_sz, GFP_KERNEL);
0138 if (!addr)
0139 return NULL;
0140 memcpy(addr, val, val_sz);
0141 return addr;
0142 }
0143
0144 static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd)
0145 {
0146 struct bpf_map *map;
0147 void *addr = NULL;
0148
0149 kvfree((void *) (long) *init_val);
0150 *init_val = ~0ULL;
0151
0152
0153
0154
0155 map = bpf_map_get(fd);
0156 if (IS_ERR(map))
0157 return NULL;
0158 if (map->map_type != BPF_MAP_TYPE_ARRAY)
0159 goto out;
0160 addr = ((struct bpf_array *)map)->value;
0161
0162 out:
0163 bpf_map_put(map);
0164 return addr;
0165 }
0166
0167 #else
0168
0169 static inline void *skel_alloc(size_t size)
0170 {
0171 return calloc(1, size);
0172 }
0173
0174 static inline void skel_free(void *p)
0175 {
0176 free(p);
0177 }
0178
0179 static inline void skel_free_map_data(void *p, __u64 addr, size_t sz)
0180 {
0181 munmap(p, sz);
0182 }
0183
0184 static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz)
0185 {
0186 void *addr;
0187
0188 addr = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
0189 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
0190 if (addr == (void *) -1)
0191 return NULL;
0192 memcpy(addr, val, val_sz);
0193 return addr;
0194 }
0195
0196 static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd)
0197 {
0198 void *addr;
0199
0200 addr = mmap((void *) (long) *init_val, mmap_sz, flags, MAP_SHARED | MAP_FIXED, fd, 0);
0201 if (addr == (void *) -1)
0202 return NULL;
0203 return addr;
0204 }
0205 #endif
0206
0207 static inline int skel_closenz(int fd)
0208 {
0209 if (fd > 0)
0210 return close(fd);
0211 return -EINVAL;
0212 }
0213
0214 #ifndef offsetofend
0215 #define offsetofend(TYPE, MEMBER) \
0216 (offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER)))
0217 #endif
0218
0219 static inline int skel_map_create(enum bpf_map_type map_type,
0220 const char *map_name,
0221 __u32 key_size,
0222 __u32 value_size,
0223 __u32 max_entries)
0224 {
0225 const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
0226 union bpf_attr attr;
0227
0228 memset(&attr, 0, attr_sz);
0229
0230 attr.map_type = map_type;
0231 strncpy(attr.map_name, map_name, sizeof(attr.map_name));
0232 attr.key_size = key_size;
0233 attr.value_size = value_size;
0234 attr.max_entries = max_entries;
0235
0236 return skel_sys_bpf(BPF_MAP_CREATE, &attr, attr_sz);
0237 }
0238
0239 static inline int skel_map_update_elem(int fd, const void *key,
0240 const void *value, __u64 flags)
0241 {
0242 const size_t attr_sz = offsetofend(union bpf_attr, flags);
0243 union bpf_attr attr;
0244
0245 memset(&attr, 0, attr_sz);
0246 attr.map_fd = fd;
0247 attr.key = (long) key;
0248 attr.value = (long) value;
0249 attr.flags = flags;
0250
0251 return skel_sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz);
0252 }
0253
0254 static inline int skel_raw_tracepoint_open(const char *name, int prog_fd)
0255 {
0256 const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint.prog_fd);
0257 union bpf_attr attr;
0258
0259 memset(&attr, 0, attr_sz);
0260 attr.raw_tracepoint.name = (long) name;
0261 attr.raw_tracepoint.prog_fd = prog_fd;
0262
0263 return skel_sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz);
0264 }
0265
0266 static inline int skel_link_create(int prog_fd, int target_fd,
0267 enum bpf_attach_type attach_type)
0268 {
0269 const size_t attr_sz = offsetofend(union bpf_attr, link_create.iter_info_len);
0270 union bpf_attr attr;
0271
0272 memset(&attr, 0, attr_sz);
0273 attr.link_create.prog_fd = prog_fd;
0274 attr.link_create.target_fd = target_fd;
0275 attr.link_create.attach_type = attach_type;
0276
0277 return skel_sys_bpf(BPF_LINK_CREATE, &attr, attr_sz);
0278 }
0279
0280 #ifdef __KERNEL__
0281 #define set_err
0282 #else
0283 #define set_err err = -errno
0284 #endif
0285
0286 static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
0287 {
0288 int map_fd = -1, prog_fd = -1, key = 0, err;
0289 union bpf_attr attr;
0290
0291 err = map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1);
0292 if (map_fd < 0) {
0293 opts->errstr = "failed to create loader map";
0294 set_err;
0295 goto out;
0296 }
0297
0298 err = skel_map_update_elem(map_fd, &key, opts->data, 0);
0299 if (err < 0) {
0300 opts->errstr = "failed to update loader map";
0301 set_err;
0302 goto out;
0303 }
0304
0305 memset(&attr, 0, sizeof(attr));
0306 attr.prog_type = BPF_PROG_TYPE_SYSCALL;
0307 attr.insns = (long) opts->insns;
0308 attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
0309 attr.license = (long) "Dual BSD/GPL";
0310 memcpy(attr.prog_name, "__loader.prog", sizeof("__loader.prog"));
0311 attr.fd_array = (long) &map_fd;
0312 attr.log_level = opts->ctx->log_level;
0313 attr.log_size = opts->ctx->log_size;
0314 attr.log_buf = opts->ctx->log_buf;
0315 attr.prog_flags = BPF_F_SLEEPABLE;
0316 err = prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
0317 if (prog_fd < 0) {
0318 opts->errstr = "failed to load loader prog";
0319 set_err;
0320 goto out;
0321 }
0322
0323 memset(&attr, 0, sizeof(attr));
0324 attr.test.prog_fd = prog_fd;
0325 attr.test.ctx_in = (long) opts->ctx;
0326 attr.test.ctx_size_in = opts->ctx->sz;
0327 err = skel_sys_bpf(BPF_PROG_RUN, &attr, sizeof(attr));
0328 if (err < 0 || (int)attr.test.retval < 0) {
0329 opts->errstr = "failed to execute loader prog";
0330 if (err < 0) {
0331 set_err;
0332 } else {
0333 err = (int)attr.test.retval;
0334 #ifndef __KERNEL__
0335 errno = -err;
0336 #endif
0337 }
0338 goto out;
0339 }
0340 err = 0;
0341 out:
0342 if (map_fd >= 0)
0343 close(map_fd);
0344 if (prog_fd >= 0)
0345 close(prog_fd);
0346 return err;
0347 }
0348
0349 #endif