0001
0002
0003 #include <stdio.h>
0004 #include <stdlib.h>
0005 #include <string.h>
0006 #include <errno.h>
0007 #include <linux/filter.h>
0008 #include <sys/param.h>
0009 #include "btf.h"
0010 #include "bpf.h"
0011 #include "libbpf.h"
0012 #include "libbpf_internal.h"
0013 #include "hashmap.h"
0014 #include "bpf_gen_internal.h"
0015 #include "skel_internal.h"
0016 #include <asm/byteorder.h>
0017
0018 #define MAX_USED_MAPS 64
0019 #define MAX_USED_PROGS 32
0020 #define MAX_KFUNC_DESCS 256
0021 #define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS)
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 struct loader_stack {
0035 __u32 btf_fd;
0036 __u32 inner_map_fd;
0037 __u32 prog_fd[MAX_USED_PROGS];
0038 };
0039
0040 #define stack_off(field) \
0041 (__s16)(-sizeof(struct loader_stack) + offsetof(struct loader_stack, field))
0042
0043 #define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
0044
0045 static int blob_fd_array_off(struct bpf_gen *gen, int index)
0046 {
0047 return gen->fd_array + index * sizeof(int);
0048 }
0049
0050 static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
0051 {
0052 size_t off = gen->insn_cur - gen->insn_start;
0053 void *insn_start;
0054
0055 if (gen->error)
0056 return gen->error;
0057 if (size > INT32_MAX || off + size > INT32_MAX) {
0058 gen->error = -ERANGE;
0059 return -ERANGE;
0060 }
0061 insn_start = realloc(gen->insn_start, off + size);
0062 if (!insn_start) {
0063 gen->error = -ENOMEM;
0064 free(gen->insn_start);
0065 gen->insn_start = NULL;
0066 return -ENOMEM;
0067 }
0068 gen->insn_start = insn_start;
0069 gen->insn_cur = insn_start + off;
0070 return 0;
0071 }
0072
0073 static int realloc_data_buf(struct bpf_gen *gen, __u32 size)
0074 {
0075 size_t off = gen->data_cur - gen->data_start;
0076 void *data_start;
0077
0078 if (gen->error)
0079 return gen->error;
0080 if (size > INT32_MAX || off + size > INT32_MAX) {
0081 gen->error = -ERANGE;
0082 return -ERANGE;
0083 }
0084 data_start = realloc(gen->data_start, off + size);
0085 if (!data_start) {
0086 gen->error = -ENOMEM;
0087 free(gen->data_start);
0088 gen->data_start = NULL;
0089 return -ENOMEM;
0090 }
0091 gen->data_start = data_start;
0092 gen->data_cur = data_start + off;
0093 return 0;
0094 }
0095
0096 static void emit(struct bpf_gen *gen, struct bpf_insn insn)
0097 {
0098 if (realloc_insn_buf(gen, sizeof(insn)))
0099 return;
0100 memcpy(gen->insn_cur, &insn, sizeof(insn));
0101 gen->insn_cur += sizeof(insn);
0102 }
0103
0104 static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn insn2)
0105 {
0106 emit(gen, insn1);
0107 emit(gen, insn2);
0108 }
0109
0110 static int add_data(struct bpf_gen *gen, const void *data, __u32 size);
0111 static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off);
0112
0113 void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps)
0114 {
0115 size_t stack_sz = sizeof(struct loader_stack), nr_progs_sz;
0116 int i;
0117
0118 gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
0119 gen->log_level = log_level;
0120
0121 emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
0122
0123
0124 emit(gen, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
0125 emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -stack_sz));
0126 emit(gen, BPF_MOV64_IMM(BPF_REG_2, stack_sz));
0127 emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
0128 emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
0129
0130
0131 nr_progs_sz = offsetof(struct loader_stack, prog_fd[nr_progs]);
0132
0133 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
0134
0135 (nr_progs_sz / 4) * 3 + 2 +
0136
0137
0138
0139 nr_maps * (6 + (gen->log_level ? 6 : 0))));
0140
0141
0142 gen->cleanup_label = gen->insn_cur - gen->insn_start;
0143
0144 for (i = 0; i < nr_progs_sz; i += 4) {
0145 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
0146 emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
0147 emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
0148 }
0149 for (i = 0; i < nr_maps; i++)
0150 emit_sys_close_blob(gen, blob_fd_array_off(gen, i));
0151
0152 emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
0153 emit(gen, BPF_EXIT_INSN());
0154 }
0155
0156 static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
0157 {
0158 __u32 size8 = roundup(size, 8);
0159 __u64 zero = 0;
0160 void *prev;
0161
0162 if (realloc_data_buf(gen, size8))
0163 return 0;
0164 prev = gen->data_cur;
0165 if (data) {
0166 memcpy(gen->data_cur, data, size);
0167 memcpy(gen->data_cur + size, &zero, size8 - size);
0168 } else {
0169 memset(gen->data_cur, 0, size8);
0170 }
0171 gen->data_cur += size8;
0172 return prev - gen->data_start;
0173 }
0174
0175
0176
0177
0178 static int add_map_fd(struct bpf_gen *gen)
0179 {
0180 if (gen->nr_maps == MAX_USED_MAPS) {
0181 pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS);
0182 gen->error = -E2BIG;
0183 return 0;
0184 }
0185 return gen->nr_maps++;
0186 }
0187
0188 static int add_kfunc_btf_fd(struct bpf_gen *gen)
0189 {
0190 int cur;
0191
0192 if (gen->nr_fd_array == MAX_KFUNC_DESCS) {
0193 cur = add_data(gen, NULL, sizeof(int));
0194 return (cur - gen->fd_array) / sizeof(int);
0195 }
0196 return MAX_USED_MAPS + gen->nr_fd_array++;
0197 }
0198
0199 static int insn_bytes_to_bpf_size(__u32 sz)
0200 {
0201 switch (sz) {
0202 case 8: return BPF_DW;
0203 case 4: return BPF_W;
0204 case 2: return BPF_H;
0205 case 1: return BPF_B;
0206 default: return -1;
0207 }
0208 }
0209
0210
0211 static void emit_rel_store(struct bpf_gen *gen, int off, int data)
0212 {
0213 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
0214 0, 0, 0, data));
0215 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0216 0, 0, 0, off));
0217 emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
0218 }
0219
0220 static void move_blob2blob(struct bpf_gen *gen, int off, int size, int blob_off)
0221 {
0222 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
0223 0, 0, 0, blob_off));
0224 emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_2, 0));
0225 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0226 0, 0, 0, off));
0227 emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
0228 }
0229
0230 static void move_blob2ctx(struct bpf_gen *gen, int ctx_off, int size, int blob_off)
0231 {
0232 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0233 0, 0, 0, blob_off));
0234 emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_1, 0));
0235 emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
0236 }
0237
0238 static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off,
0239 bool check_non_zero)
0240 {
0241 emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_6, ctx_off));
0242 if (check_non_zero)
0243
0244
0245
0246 emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3));
0247 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0248 0, 0, 0, off));
0249 emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
0250 }
0251
0252 static void move_stack2blob(struct bpf_gen *gen, int off, int size, int stack_off)
0253 {
0254 emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
0255 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0256 0, 0, 0, off));
0257 emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
0258 }
0259
0260 static void move_stack2ctx(struct bpf_gen *gen, int ctx_off, int size, int stack_off)
0261 {
0262 emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
0263 emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
0264 }
0265
0266 static void emit_sys_bpf(struct bpf_gen *gen, int cmd, int attr, int attr_size)
0267 {
0268 emit(gen, BPF_MOV64_IMM(BPF_REG_1, cmd));
0269 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
0270 0, 0, 0, attr));
0271 emit(gen, BPF_MOV64_IMM(BPF_REG_3, attr_size));
0272 emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_bpf));
0273
0274 emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
0275 }
0276
0277 static bool is_simm16(__s64 value)
0278 {
0279 return value == (__s64)(__s16)value;
0280 }
0281
0282 static void emit_check_err(struct bpf_gen *gen)
0283 {
0284 __s64 off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1;
0285
0286
0287
0288
0289 if (is_simm16(off)) {
0290 emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, off));
0291 } else {
0292 gen->error = -ERANGE;
0293 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1));
0294 }
0295 }
0296
0297
0298 static void emit_debug(struct bpf_gen *gen, int reg1, int reg2,
0299 const char *fmt, va_list args)
0300 {
0301 char buf[1024];
0302 int addr, len, ret;
0303
0304 if (!gen->log_level)
0305 return;
0306 ret = vsnprintf(buf, sizeof(buf), fmt, args);
0307 if (ret < 1024 - 7 && reg1 >= 0 && reg2 < 0)
0308
0309
0310
0311
0312 strcat(buf, " r=%d");
0313 len = strlen(buf) + 1;
0314 addr = add_data(gen, buf, len);
0315
0316 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0317 0, 0, 0, addr));
0318 emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
0319 if (reg1 >= 0)
0320 emit(gen, BPF_MOV64_REG(BPF_REG_3, reg1));
0321 if (reg2 >= 0)
0322 emit(gen, BPF_MOV64_REG(BPF_REG_4, reg2));
0323 emit(gen, BPF_EMIT_CALL(BPF_FUNC_trace_printk));
0324 }
0325
0326 static void debug_regs(struct bpf_gen *gen, int reg1, int reg2, const char *fmt, ...)
0327 {
0328 va_list args;
0329
0330 va_start(args, fmt);
0331 emit_debug(gen, reg1, reg2, fmt, args);
0332 va_end(args);
0333 }
0334
0335 static void debug_ret(struct bpf_gen *gen, const char *fmt, ...)
0336 {
0337 va_list args;
0338
0339 va_start(args, fmt);
0340 emit_debug(gen, BPF_REG_7, -1, fmt, args);
0341 va_end(args);
0342 }
0343
0344 static void __emit_sys_close(struct bpf_gen *gen)
0345 {
0346 emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0,
0347
0348
0349
0350 2 + (gen->log_level ? 6 : 0)));
0351 emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_1));
0352 emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
0353 debug_regs(gen, BPF_REG_9, BPF_REG_0, "close(%%d) = %%d");
0354 }
0355
0356 static void emit_sys_close_stack(struct bpf_gen *gen, int stack_off)
0357 {
0358 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, stack_off));
0359 __emit_sys_close(gen);
0360 }
0361
0362 static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
0363 {
0364 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
0365 0, 0, 0, blob_off));
0366 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0));
0367 __emit_sys_close(gen);
0368 }
0369
0370 int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
0371 {
0372 int i;
0373
0374 if (nr_progs < gen->nr_progs || nr_maps != gen->nr_maps) {
0375 pr_warn("nr_progs %d/%d nr_maps %d/%d mismatch\n",
0376 nr_progs, gen->nr_progs, nr_maps, gen->nr_maps);
0377 gen->error = -EFAULT;
0378 return gen->error;
0379 }
0380 emit_sys_close_stack(gen, stack_off(btf_fd));
0381 for (i = 0; i < gen->nr_progs; i++)
0382 move_stack2ctx(gen,
0383 sizeof(struct bpf_loader_ctx) +
0384 sizeof(struct bpf_map_desc) * gen->nr_maps +
0385 sizeof(struct bpf_prog_desc) * i +
0386 offsetof(struct bpf_prog_desc, prog_fd), 4,
0387 stack_off(prog_fd[i]));
0388 for (i = 0; i < gen->nr_maps; i++)
0389 move_blob2ctx(gen,
0390 sizeof(struct bpf_loader_ctx) +
0391 sizeof(struct bpf_map_desc) * i +
0392 offsetof(struct bpf_map_desc, map_fd), 4,
0393 blob_fd_array_off(gen, i));
0394 emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
0395 emit(gen, BPF_EXIT_INSN());
0396 pr_debug("gen: finish %d\n", gen->error);
0397 if (!gen->error) {
0398 struct gen_loader_opts *opts = gen->opts;
0399
0400 opts->insns = gen->insn_start;
0401 opts->insns_sz = gen->insn_cur - gen->insn_start;
0402 opts->data = gen->data_start;
0403 opts->data_sz = gen->data_cur - gen->data_start;
0404 }
0405 return gen->error;
0406 }
0407
0408 void bpf_gen__free(struct bpf_gen *gen)
0409 {
0410 if (!gen)
0411 return;
0412 free(gen->data_start);
0413 free(gen->insn_start);
0414 free(gen);
0415 }
0416
0417 void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
0418 __u32 btf_raw_size)
0419 {
0420 int attr_size = offsetofend(union bpf_attr, btf_log_level);
0421 int btf_data, btf_load_attr;
0422 union bpf_attr attr;
0423
0424 memset(&attr, 0, attr_size);
0425 pr_debug("gen: load_btf: size %d\n", btf_raw_size);
0426 btf_data = add_data(gen, btf_raw_data, btf_raw_size);
0427
0428 attr.btf_size = btf_raw_size;
0429 btf_load_attr = add_data(gen, &attr, attr_size);
0430
0431
0432 move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
0433 offsetof(struct bpf_loader_ctx, log_level), false);
0434 move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_size), 4,
0435 offsetof(struct bpf_loader_ctx, log_size), false);
0436 move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_buf), 8,
0437 offsetof(struct bpf_loader_ctx, log_buf), false);
0438
0439 emit_rel_store(gen, attr_field(btf_load_attr, btf), btf_data);
0440
0441 emit_sys_bpf(gen, BPF_BTF_LOAD, btf_load_attr, attr_size);
0442 debug_ret(gen, "btf_load size %d", btf_raw_size);
0443 emit_check_err(gen);
0444
0445 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(btf_fd)));
0446 }
0447
0448 void bpf_gen__map_create(struct bpf_gen *gen,
0449 enum bpf_map_type map_type,
0450 const char *map_name,
0451 __u32 key_size, __u32 value_size, __u32 max_entries,
0452 struct bpf_map_create_opts *map_attr, int map_idx)
0453 {
0454 int attr_size = offsetofend(union bpf_attr, map_extra);
0455 bool close_inner_map_fd = false;
0456 int map_create_attr, idx;
0457 union bpf_attr attr;
0458
0459 memset(&attr, 0, attr_size);
0460 attr.map_type = map_type;
0461 attr.key_size = key_size;
0462 attr.value_size = value_size;
0463 attr.map_flags = map_attr->map_flags;
0464 attr.map_extra = map_attr->map_extra;
0465 if (map_name)
0466 libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
0467 attr.numa_node = map_attr->numa_node;
0468 attr.map_ifindex = map_attr->map_ifindex;
0469 attr.max_entries = max_entries;
0470 attr.btf_key_type_id = map_attr->btf_key_type_id;
0471 attr.btf_value_type_id = map_attr->btf_value_type_id;
0472
0473 pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n",
0474 attr.map_name, map_idx, map_type, attr.btf_value_type_id);
0475
0476 map_create_attr = add_data(gen, &attr, attr_size);
0477 if (attr.btf_value_type_id)
0478
0479 move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
0480 stack_off(btf_fd));
0481 switch (attr.map_type) {
0482 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
0483 case BPF_MAP_TYPE_HASH_OF_MAPS:
0484 move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
0485 stack_off(inner_map_fd));
0486 close_inner_map_fd = true;
0487 break;
0488 default:
0489 break;
0490 }
0491
0492 if (map_idx >= 0)
0493 move_ctx2blob(gen, attr_field(map_create_attr, max_entries), 4,
0494 sizeof(struct bpf_loader_ctx) +
0495 sizeof(struct bpf_map_desc) * map_idx +
0496 offsetof(struct bpf_map_desc, max_entries),
0497 true );
0498
0499 emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
0500 debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
0501 attr.map_name, map_idx, map_type, value_size,
0502 attr.btf_value_type_id);
0503 emit_check_err(gen);
0504
0505 if (map_idx < 0) {
0506
0507
0508
0509
0510 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
0511 stack_off(inner_map_fd)));
0512 } else if (map_idx != gen->nr_maps) {
0513 gen->error = -EDOM;
0514 return;
0515 } else {
0516
0517 idx = add_map_fd(gen);
0518 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0519 0, 0, 0, blob_fd_array_off(gen, idx)));
0520 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, 0));
0521 }
0522 if (close_inner_map_fd)
0523 emit_sys_close_stack(gen, stack_off(inner_map_fd));
0524 }
0525
0526 void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
0527 enum bpf_attach_type type)
0528 {
0529 const char *prefix;
0530 int kind, ret;
0531
0532 btf_get_kernel_prefix_kind(type, &prefix, &kind);
0533 gen->attach_kind = kind;
0534 ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s",
0535 prefix, attach_name);
0536 if (ret >= sizeof(gen->attach_target))
0537 gen->error = -ENOSPC;
0538 }
0539
0540 static void emit_find_attach_target(struct bpf_gen *gen)
0541 {
0542 int name, len = strlen(gen->attach_target) + 1;
0543
0544 pr_debug("gen: find_attach_tgt %s %d\n", gen->attach_target, gen->attach_kind);
0545 name = add_data(gen, gen->attach_target, len);
0546
0547 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0548 0, 0, 0, name));
0549 emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
0550 emit(gen, BPF_MOV64_IMM(BPF_REG_3, gen->attach_kind));
0551 emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
0552 emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
0553 emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
0554 debug_ret(gen, "find_by_name_kind(%s,%d)",
0555 gen->attach_target, gen->attach_kind);
0556 emit_check_err(gen);
0557
0558
0559
0560 }
0561
0562 void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
0563 bool is_typeless, int kind, int insn_idx)
0564 {
0565 struct ksym_relo_desc *relo;
0566
0567 relo = libbpf_reallocarray(gen->relos, gen->relo_cnt + 1, sizeof(*relo));
0568 if (!relo) {
0569 gen->error = -ENOMEM;
0570 return;
0571 }
0572 gen->relos = relo;
0573 relo += gen->relo_cnt;
0574 relo->name = name;
0575 relo->is_weak = is_weak;
0576 relo->is_typeless = is_typeless;
0577 relo->kind = kind;
0578 relo->insn_idx = insn_idx;
0579 gen->relo_cnt++;
0580 }
0581
0582
0583 static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo)
0584 {
0585 struct ksym_desc *kdesc;
0586 int i;
0587
0588 for (i = 0; i < gen->nr_ksyms; i++) {
0589 if (!strcmp(gen->ksyms[i].name, relo->name)) {
0590 gen->ksyms[i].ref++;
0591 return &gen->ksyms[i];
0592 }
0593 }
0594 kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc));
0595 if (!kdesc) {
0596 gen->error = -ENOMEM;
0597 return NULL;
0598 }
0599 gen->ksyms = kdesc;
0600 kdesc = &gen->ksyms[gen->nr_ksyms++];
0601 kdesc->name = relo->name;
0602 kdesc->kind = relo->kind;
0603 kdesc->ref = 1;
0604 kdesc->off = 0;
0605 kdesc->insn = 0;
0606 return kdesc;
0607 }
0608
0609
0610
0611
0612 static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_desc *relo)
0613 {
0614 int name_off, len = strlen(relo->name) + 1;
0615
0616 name_off = add_data(gen, relo->name, len);
0617 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0618 0, 0, 0, name_off));
0619 emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
0620 emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind));
0621 emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
0622 emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
0623 emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
0624 debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
0625 }
0626
0627
0628
0629
0630
0631 static void emit_bpf_kallsyms_lookup_name(struct bpf_gen *gen, struct ksym_relo_desc *relo)
0632 {
0633 int name_off, len = strlen(relo->name) + 1, res_off;
0634
0635 name_off = add_data(gen, relo->name, len);
0636 res_off = add_data(gen, NULL, 8);
0637 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0638 0, 0, 0, name_off));
0639 emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
0640 emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
0641 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_4, BPF_PSEUDO_MAP_IDX_VALUE,
0642 0, 0, 0, res_off));
0643 emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_4));
0644 emit(gen, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name));
0645 emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0));
0646 emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
0647 debug_ret(gen, "kallsyms_lookup_name(%s,%d)", relo->name, relo->kind);
0648 }
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660 static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
0661 {
0662 struct ksym_desc *kdesc;
0663 int btf_fd_idx;
0664
0665 kdesc = get_ksym_desc(gen, relo);
0666 if (!kdesc)
0667 return;
0668
0669 if (kdesc->ref > 1) {
0670 move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
0671 kdesc->insn + offsetof(struct bpf_insn, imm));
0672 move_blob2blob(gen, insn + offsetof(struct bpf_insn, off), 2,
0673 kdesc->insn + offsetof(struct bpf_insn, off));
0674 goto log;
0675 }
0676
0677 kdesc->insn = insn;
0678 emit_bpf_find_by_name_kind(gen, relo);
0679 if (!relo->is_weak)
0680 emit_check_err(gen);
0681
0682 btf_fd_idx = add_kfunc_btf_fd(gen);
0683 if (btf_fd_idx > INT16_MAX) {
0684 pr_warn("BTF fd off %d for kfunc %s exceeds INT16_MAX, cannot process relocation\n",
0685 btf_fd_idx, relo->name);
0686 gen->error = -E2BIG;
0687 return;
0688 }
0689 kdesc->off = btf_fd_idx;
0690
0691 emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
0692
0693 emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
0694 emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
0695
0696 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 10));
0697
0698 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
0699
0700 emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7));
0701 emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
0702
0703 emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 2));
0704
0705 emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
0706
0707 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4));
0708
0709 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
0710 0, 0, 0, blob_fd_array_off(gen, btf_fd_idx)));
0711
0712 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0));
0713
0714 emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx));
0715 log:
0716 if (!gen->log_level)
0717 return;
0718 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
0719 offsetof(struct bpf_insn, imm)));
0720 emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8,
0721 offsetof(struct bpf_insn, off)));
0722 debug_regs(gen, BPF_REG_7, BPF_REG_9, " func (%s:count=%d): imm: %%d, off: %%d",
0723 relo->name, kdesc->ref);
0724 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
0725 0, 0, 0, blob_fd_array_off(gen, kdesc->off)));
0726 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_0, 0));
0727 debug_regs(gen, BPF_REG_9, -1, " func (%s:count=%d): btf_fd",
0728 relo->name, kdesc->ref);
0729 }
0730
0731 static void emit_ksym_relo_log(struct bpf_gen *gen, struct ksym_relo_desc *relo,
0732 int ref)
0733 {
0734 if (!gen->log_level)
0735 return;
0736 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
0737 offsetof(struct bpf_insn, imm)));
0738 emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) +
0739 offsetof(struct bpf_insn, imm)));
0740 debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=%d w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d",
0741 relo->is_typeless, relo->is_weak, relo->name, ref);
0742 emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
0743 debug_regs(gen, BPF_REG_9, -1, " var t=%d w=%d (%s:count=%d): insn.reg",
0744 relo->is_typeless, relo->is_weak, relo->name, ref);
0745 }
0746
0747
0748
0749
0750 static void emit_relo_ksym_typeless(struct bpf_gen *gen,
0751 struct ksym_relo_desc *relo, int insn)
0752 {
0753 struct ksym_desc *kdesc;
0754
0755 kdesc = get_ksym_desc(gen, relo);
0756 if (!kdesc)
0757 return;
0758
0759 if (kdesc->ref > 1) {
0760 move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
0761 kdesc->insn + offsetof(struct bpf_insn, imm));
0762 move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
0763 kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
0764 goto log;
0765 }
0766
0767 kdesc->insn = insn;
0768
0769 kdesc->typeless = true;
0770 emit_bpf_kallsyms_lookup_name(gen, relo);
0771 emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1));
0772 emit_check_err(gen);
0773
0774 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm)));
0775
0776 emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
0777 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9,
0778 sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
0779 log:
0780 emit_ksym_relo_log(gen, relo, kdesc->ref);
0781 }
0782
0783 static __u32 src_reg_mask(void)
0784 {
0785 #if defined(__LITTLE_ENDIAN_BITFIELD)
0786 return 0x0f;
0787 #elif defined(__BIG_ENDIAN_BITFIELD)
0788 return 0xf0;
0789 #else
0790 #error "Unsupported bit endianness, cannot proceed"
0791 #endif
0792 }
0793
0794
0795
0796
0797 static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
0798 {
0799 struct ksym_desc *kdesc;
0800 __u32 reg_mask;
0801
0802 kdesc = get_ksym_desc(gen, relo);
0803 if (!kdesc)
0804 return;
0805
0806 if (kdesc->ref > 1) {
0807 move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
0808 kdesc->insn + offsetof(struct bpf_insn, imm));
0809 move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
0810 kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
0811
0812 emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3));
0813 goto clear_src_reg;
0814 }
0815
0816 kdesc->insn = insn;
0817 emit_bpf_find_by_name_kind(gen, relo);
0818 if (!relo->is_weak)
0819 emit_check_err(gen);
0820
0821 emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
0822
0823 emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
0824 emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0));
0825
0826 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4));
0827
0828 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
0829
0830 emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
0831 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
0832 sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
0833
0834 emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
0835 clear_src_reg:
0836
0837 reg_mask = src_reg_mask();
0838 emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
0839 emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
0840 emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
0841
0842 emit_ksym_relo_log(gen, relo, kdesc->ref);
0843 }
0844
0845 void bpf_gen__record_relo_core(struct bpf_gen *gen,
0846 const struct bpf_core_relo *core_relo)
0847 {
0848 struct bpf_core_relo *relos;
0849
0850 relos = libbpf_reallocarray(gen->core_relos, gen->core_relo_cnt + 1, sizeof(*relos));
0851 if (!relos) {
0852 gen->error = -ENOMEM;
0853 return;
0854 }
0855 gen->core_relos = relos;
0856 relos += gen->core_relo_cnt;
0857 memcpy(relos, core_relo, sizeof(*relos));
0858 gen->core_relo_cnt++;
0859 }
0860
0861 static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
0862 {
0863 int insn;
0864
0865 pr_debug("gen: emit_relo (%d): %s at %d\n", relo->kind, relo->name, relo->insn_idx);
0866 insn = insns + sizeof(struct bpf_insn) * relo->insn_idx;
0867 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn));
0868 switch (relo->kind) {
0869 case BTF_KIND_VAR:
0870 if (relo->is_typeless)
0871 emit_relo_ksym_typeless(gen, relo, insn);
0872 else
0873 emit_relo_ksym_btf(gen, relo, insn);
0874 break;
0875 case BTF_KIND_FUNC:
0876 emit_relo_kfunc_btf(gen, relo, insn);
0877 break;
0878 default:
0879 pr_warn("Unknown relocation kind '%d'\n", relo->kind);
0880 gen->error = -EDOM;
0881 return;
0882 }
0883 }
0884
0885 static void emit_relos(struct bpf_gen *gen, int insns)
0886 {
0887 int i;
0888
0889 for (i = 0; i < gen->relo_cnt; i++)
0890 emit_relo(gen, gen->relos + i, insns);
0891 }
0892
0893 static void cleanup_core_relo(struct bpf_gen *gen)
0894 {
0895 if (!gen->core_relo_cnt)
0896 return;
0897 free(gen->core_relos);
0898 gen->core_relo_cnt = 0;
0899 gen->core_relos = NULL;
0900 }
0901
0902 static void cleanup_relos(struct bpf_gen *gen, int insns)
0903 {
0904 int i, insn;
0905
0906 for (i = 0; i < gen->nr_ksyms; i++) {
0907
0908 if (gen->ksyms[i].kind == BTF_KIND_VAR && !gen->ksyms[i].typeless) {
0909
0910 insn = gen->ksyms[i].insn;
0911 insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm);
0912 emit_sys_close_blob(gen, insn);
0913 } else if (gen->ksyms[i].kind == BTF_KIND_FUNC) {
0914 emit_sys_close_blob(gen, blob_fd_array_off(gen, gen->ksyms[i].off));
0915 if (gen->ksyms[i].off < MAX_FD_ARRAY_SZ)
0916 gen->nr_fd_array--;
0917 }
0918 }
0919 if (gen->nr_ksyms) {
0920 free(gen->ksyms);
0921 gen->nr_ksyms = 0;
0922 gen->ksyms = NULL;
0923 }
0924 if (gen->relo_cnt) {
0925 free(gen->relos);
0926 gen->relo_cnt = 0;
0927 gen->relos = NULL;
0928 }
0929 cleanup_core_relo(gen);
0930 }
0931
0932 void bpf_gen__prog_load(struct bpf_gen *gen,
0933 enum bpf_prog_type prog_type, const char *prog_name,
0934 const char *license, struct bpf_insn *insns, size_t insn_cnt,
0935 struct bpf_prog_load_opts *load_attr, int prog_idx)
0936 {
0937 int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos;
0938 int attr_size = offsetofend(union bpf_attr, core_relo_rec_size);
0939 union bpf_attr attr;
0940
0941 memset(&attr, 0, attr_size);
0942 pr_debug("gen: prog_load: type %d insns_cnt %zd progi_idx %d\n",
0943 prog_type, insn_cnt, prog_idx);
0944
0945 license_off = add_data(gen, license, strlen(license) + 1);
0946
0947 insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn));
0948
0949 attr.prog_type = prog_type;
0950 attr.expected_attach_type = load_attr->expected_attach_type;
0951 attr.attach_btf_id = load_attr->attach_btf_id;
0952 attr.prog_ifindex = load_attr->prog_ifindex;
0953 attr.kern_version = 0;
0954 attr.insn_cnt = (__u32)insn_cnt;
0955 attr.prog_flags = load_attr->prog_flags;
0956
0957 attr.func_info_rec_size = load_attr->func_info_rec_size;
0958 attr.func_info_cnt = load_attr->func_info_cnt;
0959 func_info = add_data(gen, load_attr->func_info,
0960 attr.func_info_cnt * attr.func_info_rec_size);
0961
0962 attr.line_info_rec_size = load_attr->line_info_rec_size;
0963 attr.line_info_cnt = load_attr->line_info_cnt;
0964 line_info = add_data(gen, load_attr->line_info,
0965 attr.line_info_cnt * attr.line_info_rec_size);
0966
0967 attr.core_relo_rec_size = sizeof(struct bpf_core_relo);
0968 attr.core_relo_cnt = gen->core_relo_cnt;
0969 core_relos = add_data(gen, gen->core_relos,
0970 attr.core_relo_cnt * attr.core_relo_rec_size);
0971
0972 libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
0973 prog_load_attr = add_data(gen, &attr, attr_size);
0974
0975
0976 emit_rel_store(gen, attr_field(prog_load_attr, license), license_off);
0977
0978
0979 emit_rel_store(gen, attr_field(prog_load_attr, insns), insns_off);
0980
0981
0982 emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info);
0983
0984
0985 emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info);
0986
0987
0988 emit_rel_store(gen, attr_field(prog_load_attr, core_relos), core_relos);
0989
0990
0991 emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array);
0992
0993
0994 move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4,
0995 offsetof(struct bpf_loader_ctx, log_level), false);
0996 move_ctx2blob(gen, attr_field(prog_load_attr, log_size), 4,
0997 offsetof(struct bpf_loader_ctx, log_size), false);
0998 move_ctx2blob(gen, attr_field(prog_load_attr, log_buf), 8,
0999 offsetof(struct bpf_loader_ctx, log_buf), false);
1000
1001 move_stack2blob(gen, attr_field(prog_load_attr, prog_btf_fd), 4,
1002 stack_off(btf_fd));
1003 if (gen->attach_kind) {
1004 emit_find_attach_target(gen);
1005
1006 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
1007 0, 0, 0, prog_load_attr));
1008 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
1009 offsetof(union bpf_attr, attach_btf_id)));
1010 emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
1011 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
1012 offsetof(union bpf_attr, attach_btf_obj_fd)));
1013 }
1014 emit_relos(gen, insns_off);
1015
1016 emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size);
1017 debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt);
1018
1019 cleanup_relos(gen, insns_off);
1020 if (gen->attach_kind) {
1021 emit_sys_close_blob(gen,
1022 attr_field(prog_load_attr, attach_btf_obj_fd));
1023 gen->attach_kind = 0;
1024 }
1025 emit_check_err(gen);
1026
1027 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
1028 stack_off(prog_fd[gen->nr_progs])));
1029 gen->nr_progs++;
1030 }
1031
1032 void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
1033 __u32 value_size)
1034 {
1035 int attr_size = offsetofend(union bpf_attr, flags);
1036 int map_update_attr, value, key;
1037 union bpf_attr attr;
1038 int zero = 0;
1039
1040 memset(&attr, 0, attr_size);
1041 pr_debug("gen: map_update_elem: idx %d\n", map_idx);
1042
1043 value = add_data(gen, pvalue, value_size);
1044 key = add_data(gen, &zero, sizeof(zero));
1045
1046
1047
1048
1049
1050
1051
1052
1053 emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,
1054 sizeof(struct bpf_loader_ctx) +
1055 sizeof(struct bpf_map_desc) * map_idx +
1056 offsetof(struct bpf_map_desc, initial_value)));
1057 emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 8));
1058 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
1059 0, 0, 0, value));
1060 emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size));
1061 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
1062 offsetof(struct bpf_loader_ctx, flags)));
1063 emit(gen, BPF_JMP_IMM(BPF_JSET, BPF_REG_0, BPF_SKEL_KERNEL, 2));
1064 emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
1065 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1));
1066 emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
1067
1068 map_update_attr = add_data(gen, &attr, attr_size);
1069 move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
1070 blob_fd_array_off(gen, map_idx));
1071 emit_rel_store(gen, attr_field(map_update_attr, key), key);
1072 emit_rel_store(gen, attr_field(map_update_attr, value), value);
1073
1074 emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
1075 debug_ret(gen, "update_elem idx %d value_size %d", map_idx, value_size);
1076 emit_check_err(gen);
1077 }
1078
1079 void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slot,
1080 int inner_map_idx)
1081 {
1082 int attr_size = offsetofend(union bpf_attr, flags);
1083 int map_update_attr, key;
1084 union bpf_attr attr;
1085
1086 memset(&attr, 0, attr_size);
1087 pr_debug("gen: populate_outer_map: outer %d key %d inner %d\n",
1088 outer_map_idx, slot, inner_map_idx);
1089
1090 key = add_data(gen, &slot, sizeof(slot));
1091
1092 map_update_attr = add_data(gen, &attr, attr_size);
1093 move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
1094 blob_fd_array_off(gen, outer_map_idx));
1095 emit_rel_store(gen, attr_field(map_update_attr, key), key);
1096 emit_rel_store(gen, attr_field(map_update_attr, value),
1097 blob_fd_array_off(gen, inner_map_idx));
1098
1099
1100 emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
1101 debug_ret(gen, "populate_outer_map outer %d key %d inner %d",
1102 outer_map_idx, slot, inner_map_idx);
1103 emit_check_err(gen);
1104 }
1105
1106 void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
1107 {
1108 int attr_size = offsetofend(union bpf_attr, map_fd);
1109 int map_freeze_attr;
1110 union bpf_attr attr;
1111
1112 memset(&attr, 0, attr_size);
1113 pr_debug("gen: map_freeze: idx %d\n", map_idx);
1114 map_freeze_attr = add_data(gen, &attr, attr_size);
1115 move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
1116 blob_fd_array_off(gen, map_idx));
1117
1118 emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size);
1119 debug_ret(gen, "map_freeze");
1120 emit_check_err(gen);
1121 }