Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
0002 /* Copyright (c) 2021 Facebook */
0003 #include <stdio.h>
0004 #include <stdlib.h>
0005 #include <string.h>
0006 #include <errno.h>
0007 #include <linux/filter.h>
0008 #include <sys/param.h>
0009 #include "btf.h"
0010 #include "bpf.h"
0011 #include "libbpf.h"
0012 #include "libbpf_internal.h"
0013 #include "hashmap.h"
0014 #include "bpf_gen_internal.h"
0015 #include "skel_internal.h"
0016 #include <asm/byteorder.h>
0017 
0018 #define MAX_USED_MAPS   64
0019 #define MAX_USED_PROGS  32
0020 #define MAX_KFUNC_DESCS 256
0021 #define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS)
0022 
0023 /* The following structure describes the stack layout of the loader program.
0024  * In addition R6 contains the pointer to context.
0025  * R7 contains the result of the last sys_bpf command (typically error or FD).
0026  * R9 contains the result of the last sys_close command.
0027  *
0028  * Naming convention:
0029  * ctx - bpf program context
0030  * stack - bpf program stack
0031  * blob - bpf_attr-s, strings, insns, map data.
0032  *        All the bytes that loader prog will use for read/write.
0033  */
0034 struct loader_stack {
0035     __u32 btf_fd;
0036     __u32 inner_map_fd;
0037     __u32 prog_fd[MAX_USED_PROGS];
0038 };
0039 
0040 #define stack_off(field) \
0041     (__s16)(-sizeof(struct loader_stack) + offsetof(struct loader_stack, field))
0042 
0043 #define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
0044 
0045 static int blob_fd_array_off(struct bpf_gen *gen, int index)
0046 {
0047     return gen->fd_array + index * sizeof(int);
0048 }
0049 
0050 static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
0051 {
0052     size_t off = gen->insn_cur - gen->insn_start;
0053     void *insn_start;
0054 
0055     if (gen->error)
0056         return gen->error;
0057     if (size > INT32_MAX || off + size > INT32_MAX) {
0058         gen->error = -ERANGE;
0059         return -ERANGE;
0060     }
0061     insn_start = realloc(gen->insn_start, off + size);
0062     if (!insn_start) {
0063         gen->error = -ENOMEM;
0064         free(gen->insn_start);
0065         gen->insn_start = NULL;
0066         return -ENOMEM;
0067     }
0068     gen->insn_start = insn_start;
0069     gen->insn_cur = insn_start + off;
0070     return 0;
0071 }
0072 
0073 static int realloc_data_buf(struct bpf_gen *gen, __u32 size)
0074 {
0075     size_t off = gen->data_cur - gen->data_start;
0076     void *data_start;
0077 
0078     if (gen->error)
0079         return gen->error;
0080     if (size > INT32_MAX || off + size > INT32_MAX) {
0081         gen->error = -ERANGE;
0082         return -ERANGE;
0083     }
0084     data_start = realloc(gen->data_start, off + size);
0085     if (!data_start) {
0086         gen->error = -ENOMEM;
0087         free(gen->data_start);
0088         gen->data_start = NULL;
0089         return -ENOMEM;
0090     }
0091     gen->data_start = data_start;
0092     gen->data_cur = data_start + off;
0093     return 0;
0094 }
0095 
0096 static void emit(struct bpf_gen *gen, struct bpf_insn insn)
0097 {
0098     if (realloc_insn_buf(gen, sizeof(insn)))
0099         return;
0100     memcpy(gen->insn_cur, &insn, sizeof(insn));
0101     gen->insn_cur += sizeof(insn);
0102 }
0103 
0104 static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn insn2)
0105 {
0106     emit(gen, insn1);
0107     emit(gen, insn2);
0108 }
0109 
0110 static int add_data(struct bpf_gen *gen, const void *data, __u32 size);
0111 static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off);
0112 
0113 void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps)
0114 {
0115     size_t stack_sz = sizeof(struct loader_stack), nr_progs_sz;
0116     int i;
0117 
0118     gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
0119     gen->log_level = log_level;
0120     /* save ctx pointer into R6 */
0121     emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
0122 
0123     /* bzero stack */
0124     emit(gen, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
0125     emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -stack_sz));
0126     emit(gen, BPF_MOV64_IMM(BPF_REG_2, stack_sz));
0127     emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
0128     emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
0129 
0130     /* amount of stack actually used, only used to calculate iterations, not stack offset */
0131     nr_progs_sz = offsetof(struct loader_stack, prog_fd[nr_progs]);
0132     /* jump over cleanup code */
0133     emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
0134                   /* size of cleanup code below (including map fd cleanup) */
0135                   (nr_progs_sz / 4) * 3 + 2 +
0136                   /* 6 insns for emit_sys_close_blob,
0137                    * 6 insns for debug_regs in emit_sys_close_blob
0138                    */
0139                   nr_maps * (6 + (gen->log_level ? 6 : 0))));
0140 
0141     /* remember the label where all error branches will jump to */
0142     gen->cleanup_label = gen->insn_cur - gen->insn_start;
0143     /* emit cleanup code: close all temp FDs */
0144     for (i = 0; i < nr_progs_sz; i += 4) {
0145         emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
0146         emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
0147         emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
0148     }
0149     for (i = 0; i < nr_maps; i++)
0150         emit_sys_close_blob(gen, blob_fd_array_off(gen, i));
0151     /* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
0152     emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
0153     emit(gen, BPF_EXIT_INSN());
0154 }
0155 
0156 static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
0157 {
0158     __u32 size8 = roundup(size, 8);
0159     __u64 zero = 0;
0160     void *prev;
0161 
0162     if (realloc_data_buf(gen, size8))
0163         return 0;
0164     prev = gen->data_cur;
0165     if (data) {
0166         memcpy(gen->data_cur, data, size);
0167         memcpy(gen->data_cur + size, &zero, size8 - size);
0168     } else {
0169         memset(gen->data_cur, 0, size8);
0170     }
0171     gen->data_cur += size8;
0172     return prev - gen->data_start;
0173 }
0174 
0175 /* Get index for map_fd/btf_fd slot in reserved fd_array, or in data relative
0176  * to start of fd_array. Caller can decide if it is usable or not.
0177  */
0178 static int add_map_fd(struct bpf_gen *gen)
0179 {
0180     if (gen->nr_maps == MAX_USED_MAPS) {
0181         pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS);
0182         gen->error = -E2BIG;
0183         return 0;
0184     }
0185     return gen->nr_maps++;
0186 }
0187 
0188 static int add_kfunc_btf_fd(struct bpf_gen *gen)
0189 {
0190     int cur;
0191 
0192     if (gen->nr_fd_array == MAX_KFUNC_DESCS) {
0193         cur = add_data(gen, NULL, sizeof(int));
0194         return (cur - gen->fd_array) / sizeof(int);
0195     }
0196     return MAX_USED_MAPS + gen->nr_fd_array++;
0197 }
0198 
0199 static int insn_bytes_to_bpf_size(__u32 sz)
0200 {
0201     switch (sz) {
0202     case 8: return BPF_DW;
0203     case 4: return BPF_W;
0204     case 2: return BPF_H;
0205     case 1: return BPF_B;
0206     default: return -1;
0207     }
0208 }
0209 
0210 /* *(u64 *)(blob + off) = (u64)(void *)(blob + data) */
0211 static void emit_rel_store(struct bpf_gen *gen, int off, int data)
0212 {
0213     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
0214                      0, 0, 0, data));
0215     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0216                      0, 0, 0, off));
0217     emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
0218 }
0219 
0220 static void move_blob2blob(struct bpf_gen *gen, int off, int size, int blob_off)
0221 {
0222     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
0223                      0, 0, 0, blob_off));
0224     emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_2, 0));
0225     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0226                      0, 0, 0, off));
0227     emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
0228 }
0229 
0230 static void move_blob2ctx(struct bpf_gen *gen, int ctx_off, int size, int blob_off)
0231 {
0232     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0233                      0, 0, 0, blob_off));
0234     emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_1, 0));
0235     emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
0236 }
0237 
0238 static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off,
0239                    bool check_non_zero)
0240 {
0241     emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_6, ctx_off));
0242     if (check_non_zero)
0243         /* If value in ctx is zero don't update the blob.
0244          * For example: when ctx->map.max_entries == 0, keep default max_entries from bpf.c
0245          */
0246         emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3));
0247     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0248                      0, 0, 0, off));
0249     emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
0250 }
0251 
0252 static void move_stack2blob(struct bpf_gen *gen, int off, int size, int stack_off)
0253 {
0254     emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
0255     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0256                      0, 0, 0, off));
0257     emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
0258 }
0259 
0260 static void move_stack2ctx(struct bpf_gen *gen, int ctx_off, int size, int stack_off)
0261 {
0262     emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
0263     emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
0264 }
0265 
0266 static void emit_sys_bpf(struct bpf_gen *gen, int cmd, int attr, int attr_size)
0267 {
0268     emit(gen, BPF_MOV64_IMM(BPF_REG_1, cmd));
0269     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
0270                      0, 0, 0, attr));
0271     emit(gen, BPF_MOV64_IMM(BPF_REG_3, attr_size));
0272     emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_bpf));
0273     /* remember the result in R7 */
0274     emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
0275 }
0276 
0277 static bool is_simm16(__s64 value)
0278 {
0279     return value == (__s64)(__s16)value;
0280 }
0281 
0282 static void emit_check_err(struct bpf_gen *gen)
0283 {
0284     __s64 off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1;
0285 
0286     /* R7 contains result of last sys_bpf command.
0287      * if (R7 < 0) goto cleanup;
0288      */
0289     if (is_simm16(off)) {
0290         emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, off));
0291     } else {
0292         gen->error = -ERANGE;
0293         emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1));
0294     }
0295 }
0296 
0297 /* reg1 and reg2 should not be R1 - R5. They can be R0, R6 - R10 */
0298 static void emit_debug(struct bpf_gen *gen, int reg1, int reg2,
0299                const char *fmt, va_list args)
0300 {
0301     char buf[1024];
0302     int addr, len, ret;
0303 
0304     if (!gen->log_level)
0305         return;
0306     ret = vsnprintf(buf, sizeof(buf), fmt, args);
0307     if (ret < 1024 - 7 && reg1 >= 0 && reg2 < 0)
0308         /* The special case to accommodate common debug_ret():
0309          * to avoid specifying BPF_REG_7 and adding " r=%%d" to
0310          * prints explicitly.
0311          */
0312         strcat(buf, " r=%d");
0313     len = strlen(buf) + 1;
0314     addr = add_data(gen, buf, len);
0315 
0316     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0317                      0, 0, 0, addr));
0318     emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
0319     if (reg1 >= 0)
0320         emit(gen, BPF_MOV64_REG(BPF_REG_3, reg1));
0321     if (reg2 >= 0)
0322         emit(gen, BPF_MOV64_REG(BPF_REG_4, reg2));
0323     emit(gen, BPF_EMIT_CALL(BPF_FUNC_trace_printk));
0324 }
0325 
0326 static void debug_regs(struct bpf_gen *gen, int reg1, int reg2, const char *fmt, ...)
0327 {
0328     va_list args;
0329 
0330     va_start(args, fmt);
0331     emit_debug(gen, reg1, reg2, fmt, args);
0332     va_end(args);
0333 }
0334 
0335 static void debug_ret(struct bpf_gen *gen, const char *fmt, ...)
0336 {
0337     va_list args;
0338 
0339     va_start(args, fmt);
0340     emit_debug(gen, BPF_REG_7, -1, fmt, args);
0341     va_end(args);
0342 }
0343 
0344 static void __emit_sys_close(struct bpf_gen *gen)
0345 {
0346     emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0,
0347                   /* 2 is the number of the following insns
0348                    * * 6 is additional insns in debug_regs
0349                    */
0350                   2 + (gen->log_level ? 6 : 0)));
0351     emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_1));
0352     emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
0353     debug_regs(gen, BPF_REG_9, BPF_REG_0, "close(%%d) = %%d");
0354 }
0355 
0356 static void emit_sys_close_stack(struct bpf_gen *gen, int stack_off)
0357 {
0358     emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, stack_off));
0359     __emit_sys_close(gen);
0360 }
0361 
0362 static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
0363 {
0364     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
0365                      0, 0, 0, blob_off));
0366     emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0));
0367     __emit_sys_close(gen);
0368 }
0369 
0370 int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
0371 {
0372     int i;
0373 
0374     if (nr_progs < gen->nr_progs || nr_maps != gen->nr_maps) {
0375         pr_warn("nr_progs %d/%d nr_maps %d/%d mismatch\n",
0376             nr_progs, gen->nr_progs, nr_maps, gen->nr_maps);
0377         gen->error = -EFAULT;
0378         return gen->error;
0379     }
0380     emit_sys_close_stack(gen, stack_off(btf_fd));
0381     for (i = 0; i < gen->nr_progs; i++)
0382         move_stack2ctx(gen,
0383                    sizeof(struct bpf_loader_ctx) +
0384                    sizeof(struct bpf_map_desc) * gen->nr_maps +
0385                    sizeof(struct bpf_prog_desc) * i +
0386                    offsetof(struct bpf_prog_desc, prog_fd), 4,
0387                    stack_off(prog_fd[i]));
0388     for (i = 0; i < gen->nr_maps; i++)
0389         move_blob2ctx(gen,
0390                   sizeof(struct bpf_loader_ctx) +
0391                   sizeof(struct bpf_map_desc) * i +
0392                   offsetof(struct bpf_map_desc, map_fd), 4,
0393                   blob_fd_array_off(gen, i));
0394     emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
0395     emit(gen, BPF_EXIT_INSN());
0396     pr_debug("gen: finish %d\n", gen->error);
0397     if (!gen->error) {
0398         struct gen_loader_opts *opts = gen->opts;
0399 
0400         opts->insns = gen->insn_start;
0401         opts->insns_sz = gen->insn_cur - gen->insn_start;
0402         opts->data = gen->data_start;
0403         opts->data_sz = gen->data_cur - gen->data_start;
0404     }
0405     return gen->error;
0406 }
0407 
0408 void bpf_gen__free(struct bpf_gen *gen)
0409 {
0410     if (!gen)
0411         return;
0412     free(gen->data_start);
0413     free(gen->insn_start);
0414     free(gen);
0415 }
0416 
0417 void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
0418                __u32 btf_raw_size)
0419 {
0420     int attr_size = offsetofend(union bpf_attr, btf_log_level);
0421     int btf_data, btf_load_attr;
0422     union bpf_attr attr;
0423 
0424     memset(&attr, 0, attr_size);
0425     pr_debug("gen: load_btf: size %d\n", btf_raw_size);
0426     btf_data = add_data(gen, btf_raw_data, btf_raw_size);
0427 
0428     attr.btf_size = btf_raw_size;
0429     btf_load_attr = add_data(gen, &attr, attr_size);
0430 
0431     /* populate union bpf_attr with user provided log details */
0432     move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
0433               offsetof(struct bpf_loader_ctx, log_level), false);
0434     move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_size), 4,
0435               offsetof(struct bpf_loader_ctx, log_size), false);
0436     move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_buf), 8,
0437               offsetof(struct bpf_loader_ctx, log_buf), false);
0438     /* populate union bpf_attr with a pointer to the BTF data */
0439     emit_rel_store(gen, attr_field(btf_load_attr, btf), btf_data);
0440     /* emit BTF_LOAD command */
0441     emit_sys_bpf(gen, BPF_BTF_LOAD, btf_load_attr, attr_size);
0442     debug_ret(gen, "btf_load size %d", btf_raw_size);
0443     emit_check_err(gen);
0444     /* remember btf_fd in the stack, if successful */
0445     emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(btf_fd)));
0446 }
0447 
0448 void bpf_gen__map_create(struct bpf_gen *gen,
0449              enum bpf_map_type map_type,
0450              const char *map_name,
0451              __u32 key_size, __u32 value_size, __u32 max_entries,
0452              struct bpf_map_create_opts *map_attr, int map_idx)
0453 {
0454     int attr_size = offsetofend(union bpf_attr, map_extra);
0455     bool close_inner_map_fd = false;
0456     int map_create_attr, idx;
0457     union bpf_attr attr;
0458 
0459     memset(&attr, 0, attr_size);
0460     attr.map_type = map_type;
0461     attr.key_size = key_size;
0462     attr.value_size = value_size;
0463     attr.map_flags = map_attr->map_flags;
0464     attr.map_extra = map_attr->map_extra;
0465     if (map_name)
0466         libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
0467     attr.numa_node = map_attr->numa_node;
0468     attr.map_ifindex = map_attr->map_ifindex;
0469     attr.max_entries = max_entries;
0470     attr.btf_key_type_id = map_attr->btf_key_type_id;
0471     attr.btf_value_type_id = map_attr->btf_value_type_id;
0472 
0473     pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n",
0474          attr.map_name, map_idx, map_type, attr.btf_value_type_id);
0475 
0476     map_create_attr = add_data(gen, &attr, attr_size);
0477     if (attr.btf_value_type_id)
0478         /* populate union bpf_attr with btf_fd saved in the stack earlier */
0479         move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
0480                 stack_off(btf_fd));
0481     switch (attr.map_type) {
0482     case BPF_MAP_TYPE_ARRAY_OF_MAPS:
0483     case BPF_MAP_TYPE_HASH_OF_MAPS:
0484         move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
0485                 stack_off(inner_map_fd));
0486         close_inner_map_fd = true;
0487         break;
0488     default:
0489         break;
0490     }
0491     /* conditionally update max_entries */
0492     if (map_idx >= 0)
0493         move_ctx2blob(gen, attr_field(map_create_attr, max_entries), 4,
0494                   sizeof(struct bpf_loader_ctx) +
0495                   sizeof(struct bpf_map_desc) * map_idx +
0496                   offsetof(struct bpf_map_desc, max_entries),
0497                   true /* check that max_entries != 0 */);
0498     /* emit MAP_CREATE command */
0499     emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
0500     debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
0501           attr.map_name, map_idx, map_type, value_size,
0502           attr.btf_value_type_id);
0503     emit_check_err(gen);
0504     /* remember map_fd in the stack, if successful */
0505     if (map_idx < 0) {
0506         /* This bpf_gen__map_create() function is called with map_idx >= 0
0507          * for all maps that libbpf loading logic tracks.
0508          * It's called with -1 to create an inner map.
0509          */
0510         emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
0511                       stack_off(inner_map_fd)));
0512     } else if (map_idx != gen->nr_maps) {
0513         gen->error = -EDOM; /* internal bug */
0514         return;
0515     } else {
0516         /* add_map_fd does gen->nr_maps++ */
0517         idx = add_map_fd(gen);
0518         emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0519                          0, 0, 0, blob_fd_array_off(gen, idx)));
0520         emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, 0));
0521     }
0522     if (close_inner_map_fd)
0523         emit_sys_close_stack(gen, stack_off(inner_map_fd));
0524 }
0525 
0526 void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
0527                    enum bpf_attach_type type)
0528 {
0529     const char *prefix;
0530     int kind, ret;
0531 
0532     btf_get_kernel_prefix_kind(type, &prefix, &kind);
0533     gen->attach_kind = kind;
0534     ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s",
0535                prefix, attach_name);
0536     if (ret >= sizeof(gen->attach_target))
0537         gen->error = -ENOSPC;
0538 }
0539 
0540 static void emit_find_attach_target(struct bpf_gen *gen)
0541 {
0542     int name, len = strlen(gen->attach_target) + 1;
0543 
0544     pr_debug("gen: find_attach_tgt %s %d\n", gen->attach_target, gen->attach_kind);
0545     name = add_data(gen, gen->attach_target, len);
0546 
0547     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0548                      0, 0, 0, name));
0549     emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
0550     emit(gen, BPF_MOV64_IMM(BPF_REG_3, gen->attach_kind));
0551     emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
0552     emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
0553     emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
0554     debug_ret(gen, "find_by_name_kind(%s,%d)",
0555           gen->attach_target, gen->attach_kind);
0556     emit_check_err(gen);
0557     /* if successful, btf_id is in lower 32-bit of R7 and
0558      * btf_obj_fd is in upper 32-bit
0559      */
0560 }
0561 
0562 void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
0563                 bool is_typeless, int kind, int insn_idx)
0564 {
0565     struct ksym_relo_desc *relo;
0566 
0567     relo = libbpf_reallocarray(gen->relos, gen->relo_cnt + 1, sizeof(*relo));
0568     if (!relo) {
0569         gen->error = -ENOMEM;
0570         return;
0571     }
0572     gen->relos = relo;
0573     relo += gen->relo_cnt;
0574     relo->name = name;
0575     relo->is_weak = is_weak;
0576     relo->is_typeless = is_typeless;
0577     relo->kind = kind;
0578     relo->insn_idx = insn_idx;
0579     gen->relo_cnt++;
0580 }
0581 
0582 /* returns existing ksym_desc with ref incremented, or inserts a new one */
0583 static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo)
0584 {
0585     struct ksym_desc *kdesc;
0586     int i;
0587 
0588     for (i = 0; i < gen->nr_ksyms; i++) {
0589         if (!strcmp(gen->ksyms[i].name, relo->name)) {
0590             gen->ksyms[i].ref++;
0591             return &gen->ksyms[i];
0592         }
0593     }
0594     kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc));
0595     if (!kdesc) {
0596         gen->error = -ENOMEM;
0597         return NULL;
0598     }
0599     gen->ksyms = kdesc;
0600     kdesc = &gen->ksyms[gen->nr_ksyms++];
0601     kdesc->name = relo->name;
0602     kdesc->kind = relo->kind;
0603     kdesc->ref = 1;
0604     kdesc->off = 0;
0605     kdesc->insn = 0;
0606     return kdesc;
0607 }
0608 
0609 /* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
0610  * Returns result in BPF_REG_7
0611  */
0612 static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_desc *relo)
0613 {
0614     int name_off, len = strlen(relo->name) + 1;
0615 
0616     name_off = add_data(gen, relo->name, len);
0617     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0618                      0, 0, 0, name_off));
0619     emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
0620     emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind));
0621     emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
0622     emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
0623     emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
0624     debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
0625 }
0626 
0627 /* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
0628  * Returns result in BPF_REG_7
0629  * Returns u64 symbol addr in BPF_REG_9
0630  */
0631 static void emit_bpf_kallsyms_lookup_name(struct bpf_gen *gen, struct ksym_relo_desc *relo)
0632 {
0633     int name_off, len = strlen(relo->name) + 1, res_off;
0634 
0635     name_off = add_data(gen, relo->name, len);
0636     res_off = add_data(gen, NULL, 8); /* res is u64 */
0637     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0638                      0, 0, 0, name_off));
0639     emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
0640     emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
0641     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_4, BPF_PSEUDO_MAP_IDX_VALUE,
0642                      0, 0, 0, res_off));
0643     emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_4));
0644     emit(gen, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name));
0645     emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0));
0646     emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
0647     debug_ret(gen, "kallsyms_lookup_name(%s,%d)", relo->name, relo->kind);
0648 }
0649 
0650 /* Expects:
0651  * BPF_REG_8 - pointer to instruction
0652  *
0653  * We need to reuse BTF fd for same symbol otherwise each relocation takes a new
0654  * index, while kernel limits total kfunc BTFs to 256. For duplicate symbols,
0655  * this would mean a new BTF fd index for each entry. By pairing symbol name
0656  * with index, we get the insn->imm, insn->off pairing that kernel uses for
0657  * kfunc_tab, which becomes the effective limit even though all of them may
0658  * share same index in fd_array (such that kfunc_btf_tab has 1 element).
0659  */
0660 static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
0661 {
0662     struct ksym_desc *kdesc;
0663     int btf_fd_idx;
0664 
0665     kdesc = get_ksym_desc(gen, relo);
0666     if (!kdesc)
0667         return;
0668     /* try to copy from existing bpf_insn */
0669     if (kdesc->ref > 1) {
0670         move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
0671                    kdesc->insn + offsetof(struct bpf_insn, imm));
0672         move_blob2blob(gen, insn + offsetof(struct bpf_insn, off), 2,
0673                    kdesc->insn + offsetof(struct bpf_insn, off));
0674         goto log;
0675     }
0676     /* remember insn offset, so we can copy BTF ID and FD later */
0677     kdesc->insn = insn;
0678     emit_bpf_find_by_name_kind(gen, relo);
0679     if (!relo->is_weak)
0680         emit_check_err(gen);
0681     /* get index in fd_array to store BTF FD at */
0682     btf_fd_idx = add_kfunc_btf_fd(gen);
0683     if (btf_fd_idx > INT16_MAX) {
0684         pr_warn("BTF fd off %d for kfunc %s exceeds INT16_MAX, cannot process relocation\n",
0685             btf_fd_idx, relo->name);
0686         gen->error = -E2BIG;
0687         return;
0688     }
0689     kdesc->off = btf_fd_idx;
0690     /* jump to success case */
0691     emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
0692     /* set value for imm, off as 0 */
0693     emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
0694     emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
0695     /* skip success case for ret < 0 */
0696     emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 10));
0697     /* store btf_id into insn[insn_idx].imm */
0698     emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
0699     /* obtain fd in BPF_REG_9 */
0700     emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7));
0701     emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
0702     /* jump to fd_array store if fd denotes module BTF */
0703     emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 2));
0704     /* set the default value for off */
0705     emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
0706     /* skip BTF fd store for vmlinux BTF */
0707     emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4));
0708     /* load fd_array slot pointer */
0709     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
0710                      0, 0, 0, blob_fd_array_off(gen, btf_fd_idx)));
0711     /* store BTF fd in slot */
0712     emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0));
0713     /* store index into insn[insn_idx].off */
0714     emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx));
0715 log:
0716     if (!gen->log_level)
0717         return;
0718     emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
0719                   offsetof(struct bpf_insn, imm)));
0720     emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8,
0721                   offsetof(struct bpf_insn, off)));
0722     debug_regs(gen, BPF_REG_7, BPF_REG_9, " func (%s:count=%d): imm: %%d, off: %%d",
0723            relo->name, kdesc->ref);
0724     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
0725                      0, 0, 0, blob_fd_array_off(gen, kdesc->off)));
0726     emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_0, 0));
0727     debug_regs(gen, BPF_REG_9, -1, " func (%s:count=%d): btf_fd",
0728            relo->name, kdesc->ref);
0729 }
0730 
0731 static void emit_ksym_relo_log(struct bpf_gen *gen, struct ksym_relo_desc *relo,
0732                    int ref)
0733 {
0734     if (!gen->log_level)
0735         return;
0736     emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
0737                   offsetof(struct bpf_insn, imm)));
0738     emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) +
0739                   offsetof(struct bpf_insn, imm)));
0740     debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=%d w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d",
0741            relo->is_typeless, relo->is_weak, relo->name, ref);
0742     emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
0743     debug_regs(gen, BPF_REG_9, -1, " var t=%d w=%d (%s:count=%d): insn.reg",
0744            relo->is_typeless, relo->is_weak, relo->name, ref);
0745 }
0746 
0747 /* Expects:
0748  * BPF_REG_8 - pointer to instruction
0749  */
0750 static void emit_relo_ksym_typeless(struct bpf_gen *gen,
0751                     struct ksym_relo_desc *relo, int insn)
0752 {
0753     struct ksym_desc *kdesc;
0754 
0755     kdesc = get_ksym_desc(gen, relo);
0756     if (!kdesc)
0757         return;
0758     /* try to copy from existing ldimm64 insn */
0759     if (kdesc->ref > 1) {
0760         move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
0761                    kdesc->insn + offsetof(struct bpf_insn, imm));
0762         move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
0763                    kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
0764         goto log;
0765     }
0766     /* remember insn offset, so we can copy ksym addr later */
0767     kdesc->insn = insn;
0768     /* skip typeless ksym_desc in fd closing loop in cleanup_relos */
0769     kdesc->typeless = true;
0770     emit_bpf_kallsyms_lookup_name(gen, relo);
0771     emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1));
0772     emit_check_err(gen);
0773     /* store lower half of addr into insn[insn_idx].imm */
0774     emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm)));
0775     /* store upper half of addr into insn[insn_idx + 1].imm */
0776     emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
0777     emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9,
0778               sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
0779 log:
0780     emit_ksym_relo_log(gen, relo, kdesc->ref);
0781 }
0782 
0783 static __u32 src_reg_mask(void)
0784 {
0785 #if defined(__LITTLE_ENDIAN_BITFIELD)
0786     return 0x0f; /* src_reg,dst_reg,... */
0787 #elif defined(__BIG_ENDIAN_BITFIELD)
0788     return 0xf0; /* dst_reg,src_reg,... */
0789 #else
0790 #error "Unsupported bit endianness, cannot proceed"
0791 #endif
0792 }
0793 
0794 /* Expects:
0795  * BPF_REG_8 - pointer to instruction
0796  */
0797 static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
0798 {
0799     struct ksym_desc *kdesc;
0800     __u32 reg_mask;
0801 
0802     kdesc = get_ksym_desc(gen, relo);
0803     if (!kdesc)
0804         return;
0805     /* try to copy from existing ldimm64 insn */
0806     if (kdesc->ref > 1) {
0807         move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
0808                    kdesc->insn + offsetof(struct bpf_insn, imm));
0809         move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
0810                    kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
0811         /* jump over src_reg adjustment if imm is not 0, reuse BPF_REG_0 from move_blob2blob */
0812         emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3));
0813         goto clear_src_reg;
0814     }
0815     /* remember insn offset, so we can copy BTF ID and FD later */
0816     kdesc->insn = insn;
0817     emit_bpf_find_by_name_kind(gen, relo);
0818     if (!relo->is_weak)
0819         emit_check_err(gen);
0820     /* jump to success case */
0821     emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
0822     /* set values for insn[insn_idx].imm, insn[insn_idx + 1].imm as 0 */
0823     emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
0824     emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0));
0825     /* skip success case for ret < 0 */
0826     emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4));
0827     /* store btf_id into insn[insn_idx].imm */
0828     emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
0829     /* store btf_obj_fd into insn[insn_idx + 1].imm */
0830     emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
0831     emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
0832                   sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
0833     /* skip src_reg adjustment */
0834     emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
0835 clear_src_reg:
0836     /* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
0837     reg_mask = src_reg_mask();
0838     emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
0839     emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
0840     emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
0841 
0842     emit_ksym_relo_log(gen, relo, kdesc->ref);
0843 }
0844 
0845 void bpf_gen__record_relo_core(struct bpf_gen *gen,
0846                    const struct bpf_core_relo *core_relo)
0847 {
0848     struct bpf_core_relo *relos;
0849 
0850     relos = libbpf_reallocarray(gen->core_relos, gen->core_relo_cnt + 1, sizeof(*relos));
0851     if (!relos) {
0852         gen->error = -ENOMEM;
0853         return;
0854     }
0855     gen->core_relos = relos;
0856     relos += gen->core_relo_cnt;
0857     memcpy(relos, core_relo, sizeof(*relos));
0858     gen->core_relo_cnt++;
0859 }
0860 
0861 static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
0862 {
0863     int insn;
0864 
0865     pr_debug("gen: emit_relo (%d): %s at %d\n", relo->kind, relo->name, relo->insn_idx);
0866     insn = insns + sizeof(struct bpf_insn) * relo->insn_idx;
0867     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn));
0868     switch (relo->kind) {
0869     case BTF_KIND_VAR:
0870         if (relo->is_typeless)
0871             emit_relo_ksym_typeless(gen, relo, insn);
0872         else
0873             emit_relo_ksym_btf(gen, relo, insn);
0874         break;
0875     case BTF_KIND_FUNC:
0876         emit_relo_kfunc_btf(gen, relo, insn);
0877         break;
0878     default:
0879         pr_warn("Unknown relocation kind '%d'\n", relo->kind);
0880         gen->error = -EDOM;
0881         return;
0882     }
0883 }
0884 
0885 static void emit_relos(struct bpf_gen *gen, int insns)
0886 {
0887     int i;
0888 
0889     for (i = 0; i < gen->relo_cnt; i++)
0890         emit_relo(gen, gen->relos + i, insns);
0891 }
0892 
0893 static void cleanup_core_relo(struct bpf_gen *gen)
0894 {
0895     if (!gen->core_relo_cnt)
0896         return;
0897     free(gen->core_relos);
0898     gen->core_relo_cnt = 0;
0899     gen->core_relos = NULL;
0900 }
0901 
0902 static void cleanup_relos(struct bpf_gen *gen, int insns)
0903 {
0904     int i, insn;
0905 
0906     for (i = 0; i < gen->nr_ksyms; i++) {
0907         /* only close fds for typed ksyms and kfuncs */
0908         if (gen->ksyms[i].kind == BTF_KIND_VAR && !gen->ksyms[i].typeless) {
0909             /* close fd recorded in insn[insn_idx + 1].imm */
0910             insn = gen->ksyms[i].insn;
0911             insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm);
0912             emit_sys_close_blob(gen, insn);
0913         } else if (gen->ksyms[i].kind == BTF_KIND_FUNC) {
0914             emit_sys_close_blob(gen, blob_fd_array_off(gen, gen->ksyms[i].off));
0915             if (gen->ksyms[i].off < MAX_FD_ARRAY_SZ)
0916                 gen->nr_fd_array--;
0917         }
0918     }
0919     if (gen->nr_ksyms) {
0920         free(gen->ksyms);
0921         gen->nr_ksyms = 0;
0922         gen->ksyms = NULL;
0923     }
0924     if (gen->relo_cnt) {
0925         free(gen->relos);
0926         gen->relo_cnt = 0;
0927         gen->relos = NULL;
0928     }
0929     cleanup_core_relo(gen);
0930 }
0931 
0932 void bpf_gen__prog_load(struct bpf_gen *gen,
0933             enum bpf_prog_type prog_type, const char *prog_name,
0934             const char *license, struct bpf_insn *insns, size_t insn_cnt,
0935             struct bpf_prog_load_opts *load_attr, int prog_idx)
0936 {
0937     int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos;
0938     int attr_size = offsetofend(union bpf_attr, core_relo_rec_size);
0939     union bpf_attr attr;
0940 
0941     memset(&attr, 0, attr_size);
0942     pr_debug("gen: prog_load: type %d insns_cnt %zd progi_idx %d\n",
0943          prog_type, insn_cnt, prog_idx);
0944     /* add license string to blob of bytes */
0945     license_off = add_data(gen, license, strlen(license) + 1);
0946     /* add insns to blob of bytes */
0947     insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn));
0948 
0949     attr.prog_type = prog_type;
0950     attr.expected_attach_type = load_attr->expected_attach_type;
0951     attr.attach_btf_id = load_attr->attach_btf_id;
0952     attr.prog_ifindex = load_attr->prog_ifindex;
0953     attr.kern_version = 0;
0954     attr.insn_cnt = (__u32)insn_cnt;
0955     attr.prog_flags = load_attr->prog_flags;
0956 
0957     attr.func_info_rec_size = load_attr->func_info_rec_size;
0958     attr.func_info_cnt = load_attr->func_info_cnt;
0959     func_info = add_data(gen, load_attr->func_info,
0960                  attr.func_info_cnt * attr.func_info_rec_size);
0961 
0962     attr.line_info_rec_size = load_attr->line_info_rec_size;
0963     attr.line_info_cnt = load_attr->line_info_cnt;
0964     line_info = add_data(gen, load_attr->line_info,
0965                  attr.line_info_cnt * attr.line_info_rec_size);
0966 
0967     attr.core_relo_rec_size = sizeof(struct bpf_core_relo);
0968     attr.core_relo_cnt = gen->core_relo_cnt;
0969     core_relos = add_data(gen, gen->core_relos,
0970                  attr.core_relo_cnt * attr.core_relo_rec_size);
0971 
0972     libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
0973     prog_load_attr = add_data(gen, &attr, attr_size);
0974 
0975     /* populate union bpf_attr with a pointer to license */
0976     emit_rel_store(gen, attr_field(prog_load_attr, license), license_off);
0977 
0978     /* populate union bpf_attr with a pointer to instructions */
0979     emit_rel_store(gen, attr_field(prog_load_attr, insns), insns_off);
0980 
0981     /* populate union bpf_attr with a pointer to func_info */
0982     emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info);
0983 
0984     /* populate union bpf_attr with a pointer to line_info */
0985     emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info);
0986 
0987     /* populate union bpf_attr with a pointer to core_relos */
0988     emit_rel_store(gen, attr_field(prog_load_attr, core_relos), core_relos);
0989 
0990     /* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */
0991     emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array);
0992 
0993     /* populate union bpf_attr with user provided log details */
0994     move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4,
0995               offsetof(struct bpf_loader_ctx, log_level), false);
0996     move_ctx2blob(gen, attr_field(prog_load_attr, log_size), 4,
0997               offsetof(struct bpf_loader_ctx, log_size), false);
0998     move_ctx2blob(gen, attr_field(prog_load_attr, log_buf), 8,
0999               offsetof(struct bpf_loader_ctx, log_buf), false);
1000     /* populate union bpf_attr with btf_fd saved in the stack earlier */
1001     move_stack2blob(gen, attr_field(prog_load_attr, prog_btf_fd), 4,
1002             stack_off(btf_fd));
1003     if (gen->attach_kind) {
1004         emit_find_attach_target(gen);
1005         /* populate union bpf_attr with btf_id and btf_obj_fd found by helper */
1006         emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
1007                          0, 0, 0, prog_load_attr));
1008         emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
1009                       offsetof(union bpf_attr, attach_btf_id)));
1010         emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
1011         emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
1012                       offsetof(union bpf_attr, attach_btf_obj_fd)));
1013     }
1014     emit_relos(gen, insns_off);
1015     /* emit PROG_LOAD command */
1016     emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size);
1017     debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt);
1018     /* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */
1019     cleanup_relos(gen, insns_off);
1020     if (gen->attach_kind) {
1021         emit_sys_close_blob(gen,
1022                     attr_field(prog_load_attr, attach_btf_obj_fd));
1023         gen->attach_kind = 0;
1024     }
1025     emit_check_err(gen);
1026     /* remember prog_fd in the stack, if successful */
1027     emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
1028                   stack_off(prog_fd[gen->nr_progs])));
1029     gen->nr_progs++;
1030 }
1031 
1032 void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
1033                   __u32 value_size)
1034 {
1035     int attr_size = offsetofend(union bpf_attr, flags);
1036     int map_update_attr, value, key;
1037     union bpf_attr attr;
1038     int zero = 0;
1039 
1040     memset(&attr, 0, attr_size);
1041     pr_debug("gen: map_update_elem: idx %d\n", map_idx);
1042 
1043     value = add_data(gen, pvalue, value_size);
1044     key = add_data(gen, &zero, sizeof(zero));
1045 
1046     /* if (map_desc[map_idx].initial_value) {
1047      *    if (ctx->flags & BPF_SKEL_KERNEL)
1048      *        bpf_probe_read_kernel(value, value_size, initial_value);
1049      *    else
1050      *        bpf_copy_from_user(value, value_size, initial_value);
1051      * }
1052      */
1053     emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,
1054                   sizeof(struct bpf_loader_ctx) +
1055                   sizeof(struct bpf_map_desc) * map_idx +
1056                   offsetof(struct bpf_map_desc, initial_value)));
1057     emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 8));
1058     emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
1059                      0, 0, 0, value));
1060     emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size));
1061     emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
1062                   offsetof(struct bpf_loader_ctx, flags)));
1063     emit(gen, BPF_JMP_IMM(BPF_JSET, BPF_REG_0, BPF_SKEL_KERNEL, 2));
1064     emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
1065     emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1));
1066     emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
1067 
1068     map_update_attr = add_data(gen, &attr, attr_size);
1069     move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
1070                blob_fd_array_off(gen, map_idx));
1071     emit_rel_store(gen, attr_field(map_update_attr, key), key);
1072     emit_rel_store(gen, attr_field(map_update_attr, value), value);
1073     /* emit MAP_UPDATE_ELEM command */
1074     emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
1075     debug_ret(gen, "update_elem idx %d value_size %d", map_idx, value_size);
1076     emit_check_err(gen);
1077 }
1078 
1079 void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slot,
1080                  int inner_map_idx)
1081 {
1082     int attr_size = offsetofend(union bpf_attr, flags);
1083     int map_update_attr, key;
1084     union bpf_attr attr;
1085 
1086     memset(&attr, 0, attr_size);
1087     pr_debug("gen: populate_outer_map: outer %d key %d inner %d\n",
1088          outer_map_idx, slot, inner_map_idx);
1089 
1090     key = add_data(gen, &slot, sizeof(slot));
1091 
1092     map_update_attr = add_data(gen, &attr, attr_size);
1093     move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
1094                blob_fd_array_off(gen, outer_map_idx));
1095     emit_rel_store(gen, attr_field(map_update_attr, key), key);
1096     emit_rel_store(gen, attr_field(map_update_attr, value),
1097                blob_fd_array_off(gen, inner_map_idx));
1098 
1099     /* emit MAP_UPDATE_ELEM command */
1100     emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
1101     debug_ret(gen, "populate_outer_map outer %d key %d inner %d",
1102           outer_map_idx, slot, inner_map_idx);
1103     emit_check_err(gen);
1104 }
1105 
1106 void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
1107 {
1108     int attr_size = offsetofend(union bpf_attr, map_fd);
1109     int map_freeze_attr;
1110     union bpf_attr attr;
1111 
1112     memset(&attr, 0, attr_size);
1113     pr_debug("gen: map_freeze: idx %d\n", map_idx);
1114     map_freeze_attr = add_data(gen, &attr, attr_size);
1115     move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
1116                blob_fd_array_off(gen, map_idx));
1117     /* emit MAP_FREEZE command */
1118     emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size);
1119     debug_ret(gen, "map_freeze");
1120     emit_check_err(gen);
1121 }