Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
0003  */
0004 #ifndef _LINUX_BPF_VERIFIER_H
0005 #define _LINUX_BPF_VERIFIER_H 1
0006 
0007 #include <linux/bpf.h> /* for enum bpf_reg_type */
0008 #include <linux/btf.h> /* for struct btf and btf_id() */
0009 #include <linux/filter.h> /* for MAX_BPF_STACK */
0010 #include <linux/tnum.h>
0011 
0012 /* Maximum variable offset umax_value permitted when resolving memory accesses.
0013  * In practice this is far bigger than any realistic pointer offset; this limit
0014  * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
0015  */
0016 #define BPF_MAX_VAR_OFF (1 << 29)
0017 /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO].  This ensures
0018  * that converting umax_value to int cannot overflow.
0019  */
0020 #define BPF_MAX_VAR_SIZ (1 << 29)
0021 /* size of type_str_buf in bpf_verifier. */
0022 #define TYPE_STR_BUF_LEN 64
0023 
0024 /* Liveness marks, used for registers and spilled-regs (in stack slots).
0025  * Read marks propagate upwards until they find a write mark; they record that
0026  * "one of this state's descendants read this reg" (and therefore the reg is
0027  * relevant for states_equal() checks).
0028  * Write marks collect downwards and do not propagate; they record that "the
0029  * straight-line code that reached this state (from its parent) wrote this reg"
0030  * (and therefore that reads propagated from this state or its descendants
0031  * should not propagate to its parent).
0032  * A state with a write mark can receive read marks; it just won't propagate
0033  * them to its parent, since the write mark is a property, not of the state,
0034  * but of the link between it and its parent.  See mark_reg_read() and
0035  * mark_stack_slot_read() in kernel/bpf/verifier.c.
0036  */
0037 enum bpf_reg_liveness {
0038     REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
0039     REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */
0040     REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */
0041     REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
0042     REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */
0043     REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
0044 };
0045 
0046 struct bpf_reg_state {
0047     /* Ordering of fields matters.  See states_equal() */
0048     enum bpf_reg_type type;
0049     /* Fixed part of pointer offset, pointer types only */
0050     s32 off;
0051     union {
0052         /* valid when type == PTR_TO_PACKET */
0053         int range;
0054 
0055         /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
0056          *   PTR_TO_MAP_VALUE_OR_NULL
0057          */
0058         struct {
0059             struct bpf_map *map_ptr;
0060             /* To distinguish map lookups from outer map
0061              * the map_uid is non-zero for registers
0062              * pointing to inner maps.
0063              */
0064             u32 map_uid;
0065         };
0066 
0067         /* for PTR_TO_BTF_ID */
0068         struct {
0069             struct btf *btf;
0070             u32 btf_id;
0071         };
0072 
0073         u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
0074 
0075         /* For dynptr stack slots */
0076         struct {
0077             enum bpf_dynptr_type type;
0078             /* A dynptr is 16 bytes so it takes up 2 stack slots.
0079              * We need to track which slot is the first slot
0080              * to protect against cases where the user may try to
0081              * pass in an address starting at the second slot of the
0082              * dynptr.
0083              */
0084             bool first_slot;
0085         } dynptr;
0086 
0087         /* Max size from any of the above. */
0088         struct {
0089             unsigned long raw1;
0090             unsigned long raw2;
0091         } raw;
0092 
0093         u32 subprogno; /* for PTR_TO_FUNC */
0094     };
0095     /* For PTR_TO_PACKET, used to find other pointers with the same variable
0096      * offset, so they can share range knowledge.
0097      * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
0098      * came from, when one is tested for != NULL.
0099      * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation
0100      * for the purpose of tracking that it's freed.
0101      * For PTR_TO_SOCKET this is used to share which pointers retain the
0102      * same reference to the socket, to determine proper reference freeing.
0103      * For stack slots that are dynptrs, this is used to track references to
0104      * the dynptr to determine proper reference freeing.
0105      */
0106     u32 id;
0107     /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
0108      * from a pointer-cast helper, bpf_sk_fullsock() and
0109      * bpf_tcp_sock().
0110      *
0111      * Consider the following where "sk" is a reference counted
0112      * pointer returned from "sk = bpf_sk_lookup_tcp();":
0113      *
0114      * 1: sk = bpf_sk_lookup_tcp();
0115      * 2: if (!sk) { return 0; }
0116      * 3: fullsock = bpf_sk_fullsock(sk);
0117      * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
0118      * 5: tp = bpf_tcp_sock(fullsock);
0119      * 6: if (!tp) { bpf_sk_release(sk); return 0; }
0120      * 7: bpf_sk_release(sk);
0121      * 8: snd_cwnd = tp->snd_cwnd;  // verifier will complain
0122      *
0123      * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
0124      * "tp" ptr should be invalidated also.  In order to do that,
0125      * the reg holding "fullsock" and "sk" need to remember
0126      * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
0127      * such that the verifier can reset all regs which have
0128      * ref_obj_id matching the sk_reg->id.
0129      *
0130      * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
0131      * sk_reg->id will stay as NULL-marking purpose only.
0132      * After NULL-marking is done, sk_reg->id can be reset to 0.
0133      *
0134      * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
0135      * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
0136      *
0137      * After "tp = bpf_tcp_sock(fullsock);" at line 5,
0138      * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
0139      * which is the same as sk_reg->ref_obj_id.
0140      *
0141      * From the verifier perspective, if sk, fullsock and tp
0142      * are not NULL, they are the same ptr with different
0143      * reg->type.  In particular, bpf_sk_release(tp) is also
0144      * allowed and has the same effect as bpf_sk_release(sk).
0145      */
0146     u32 ref_obj_id;
0147     /* For scalar types (SCALAR_VALUE), this represents our knowledge of
0148      * the actual value.
0149      * For pointer types, this represents the variable part of the offset
0150      * from the pointed-to object, and is shared with all bpf_reg_states
0151      * with the same id as us.
0152      */
0153     struct tnum var_off;
0154     /* Used to determine if any memory access using this register will
0155      * result in a bad access.
0156      * These refer to the same value as var_off, not necessarily the actual
0157      * contents of the register.
0158      */
0159     s64 smin_value; /* minimum possible (s64)value */
0160     s64 smax_value; /* maximum possible (s64)value */
0161     u64 umin_value; /* minimum possible (u64)value */
0162     u64 umax_value; /* maximum possible (u64)value */
0163     s32 s32_min_value; /* minimum possible (s32)value */
0164     s32 s32_max_value; /* maximum possible (s32)value */
0165     u32 u32_min_value; /* minimum possible (u32)value */
0166     u32 u32_max_value; /* maximum possible (u32)value */
0167     /* parentage chain for liveness checking */
0168     struct bpf_reg_state *parent;
0169     /* Inside the callee two registers can be both PTR_TO_STACK like
0170      * R1=fp-8 and R2=fp-8, but one of them points to this function stack
0171      * while another to the caller's stack. To differentiate them 'frameno'
0172      * is used which is an index in bpf_verifier_state->frame[] array
0173      * pointing to bpf_func_state.
0174      */
0175     u32 frameno;
0176     /* Tracks subreg definition. The stored value is the insn_idx of the
0177      * writing insn. This is safe because subreg_def is used before any insn
0178      * patching which only happens after main verification finished.
0179      */
0180     s32 subreg_def;
0181     enum bpf_reg_liveness live;
0182     /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
0183     bool precise;
0184 };
0185 
0186 enum bpf_stack_slot_type {
0187     STACK_INVALID,    /* nothing was stored in this stack slot */
0188     STACK_SPILL,      /* register spilled into stack */
0189     STACK_MISC,   /* BPF program wrote some data into this slot */
0190     STACK_ZERO,   /* BPF program wrote constant zero */
0191     /* A dynptr is stored in this stack slot. The type of dynptr
0192      * is stored in bpf_stack_state->spilled_ptr.dynptr.type
0193      */
0194     STACK_DYNPTR,
0195 };
0196 
0197 #define BPF_REG_SIZE 8  /* size of eBPF register in bytes */
0198 #define BPF_DYNPTR_SIZE     sizeof(struct bpf_dynptr_kern)
0199 #define BPF_DYNPTR_NR_SLOTS     (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
0200 
0201 struct bpf_stack_state {
0202     struct bpf_reg_state spilled_ptr;
0203     u8 slot_type[BPF_REG_SIZE];
0204 };
0205 
0206 struct bpf_reference_state {
0207     /* Track each reference created with a unique id, even if the same
0208      * instruction creates the reference multiple times (eg, via CALL).
0209      */
0210     int id;
0211     /* Instruction where the allocation of this reference occurred. This
0212      * is used purely to inform the user of a reference leak.
0213      */
0214     int insn_idx;
0215 };
0216 
0217 /* state of the program:
0218  * type of all registers and stack info
0219  */
0220 struct bpf_func_state {
0221     struct bpf_reg_state regs[MAX_BPF_REG];
0222     /* index of call instruction that called into this func */
0223     int callsite;
0224     /* stack frame number of this function state from pov of
0225      * enclosing bpf_verifier_state.
0226      * 0 = main function, 1 = first callee.
0227      */
0228     u32 frameno;
0229     /* subprog number == index within subprog_info
0230      * zero == main subprog
0231      */
0232     u32 subprogno;
0233     /* Every bpf_timer_start will increment async_entry_cnt.
0234      * It's used to distinguish:
0235      * void foo(void) { for(;;); }
0236      * void foo(void) { bpf_timer_set_callback(,foo); }
0237      */
0238     u32 async_entry_cnt;
0239     bool in_callback_fn;
0240     bool in_async_callback_fn;
0241 
0242     /* The following fields should be last. See copy_func_state() */
0243     int acquired_refs;
0244     struct bpf_reference_state *refs;
0245     int allocated_stack;
0246     struct bpf_stack_state *stack;
0247 };
0248 
0249 struct bpf_idx_pair {
0250     u32 prev_idx;
0251     u32 idx;
0252 };
0253 
0254 struct bpf_id_pair {
0255     u32 old;
0256     u32 cur;
0257 };
0258 
0259 /* Maximum number of register states that can exist at once */
0260 #define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
0261 #define MAX_CALL_FRAMES 8
0262 struct bpf_verifier_state {
0263     /* call stack tracking */
0264     struct bpf_func_state *frame[MAX_CALL_FRAMES];
0265     struct bpf_verifier_state *parent;
0266     /*
0267      * 'branches' field is the number of branches left to explore:
0268      * 0 - all possible paths from this state reached bpf_exit or
0269      * were safely pruned
0270      * 1 - at least one path is being explored.
0271      * This state hasn't reached bpf_exit
0272      * 2 - at least two paths are being explored.
0273      * This state is an immediate parent of two children.
0274      * One is fallthrough branch with branches==1 and another
0275      * state is pushed into stack (to be explored later) also with
0276      * branches==1. The parent of this state has branches==1.
0277      * The verifier state tree connected via 'parent' pointer looks like:
0278      * 1
0279      * 1
0280      * 2 -> 1 (first 'if' pushed into stack)
0281      * 1
0282      * 2 -> 1 (second 'if' pushed into stack)
0283      * 1
0284      * 1
0285      * 1 bpf_exit.
0286      *
0287      * Once do_check() reaches bpf_exit, it calls update_branch_counts()
0288      * and the verifier state tree will look:
0289      * 1
0290      * 1
0291      * 2 -> 1 (first 'if' pushed into stack)
0292      * 1
0293      * 1 -> 1 (second 'if' pushed into stack)
0294      * 0
0295      * 0
0296      * 0 bpf_exit.
0297      * After pop_stack() the do_check() will resume at second 'if'.
0298      *
0299      * If is_state_visited() sees a state with branches > 0 it means
0300      * there is a loop. If such state is exactly equal to the current state
0301      * it's an infinite loop. Note states_equal() checks for states
0302      * equivalency, so two states being 'states_equal' does not mean
0303      * infinite loop. The exact comparison is provided by
0304      * states_maybe_looping() function. It's a stronger pre-check and
0305      * much faster than states_equal().
0306      *
0307      * This algorithm may not find all possible infinite loops or
0308      * loop iteration count may be too high.
0309      * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
0310      */
0311     u32 branches;
0312     u32 insn_idx;
0313     u32 curframe;
0314     u32 active_spin_lock;
0315     bool speculative;
0316 
0317     /* first and last insn idx of this verifier state */
0318     u32 first_insn_idx;
0319     u32 last_insn_idx;
0320     /* jmp history recorded from first to last.
0321      * backtracking is using it to go from last to first.
0322      * For most states jmp_history_cnt is [0-3].
0323      * For loops can go up to ~40.
0324      */
0325     struct bpf_idx_pair *jmp_history;
0326     u32 jmp_history_cnt;
0327 };
0328 
0329 #define bpf_get_spilled_reg(slot, frame)                \
0330     (((slot < frame->allocated_stack / BPF_REG_SIZE) &&     \
0331       (frame->stack[slot].slot_type[0] == STACK_SPILL))     \
0332      ? &frame->stack[slot].spilled_ptr : NULL)
0333 
0334 /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
0335 #define bpf_for_each_spilled_reg(iter, frame, reg)          \
0336     for (iter = 0, reg = bpf_get_spilled_reg(iter, frame);      \
0337          iter < frame->allocated_stack / BPF_REG_SIZE;      \
0338          iter++, reg = bpf_get_spilled_reg(iter, frame))
0339 
0340 /* linked list of verifier states used to prune search */
0341 struct bpf_verifier_state_list {
0342     struct bpf_verifier_state state;
0343     struct bpf_verifier_state_list *next;
0344     int miss_cnt, hit_cnt;
0345 };
0346 
0347 struct bpf_loop_inline_state {
0348     unsigned int initialized:1; /* set to true upon first entry */
0349     unsigned int fit_for_inline:1; /* true if callback function is the same
0350                     * at each call and flags are always zero
0351                     */
0352     u32 callback_subprogno; /* valid when fit_for_inline is true */
0353 };
0354 
0355 /* Possible states for alu_state member. */
0356 #define BPF_ALU_SANITIZE_SRC        (1U << 0)
0357 #define BPF_ALU_SANITIZE_DST        (1U << 1)
0358 #define BPF_ALU_NEG_VALUE       (1U << 2)
0359 #define BPF_ALU_NON_POINTER     (1U << 3)
0360 #define BPF_ALU_IMMEDIATE       (1U << 4)
0361 #define BPF_ALU_SANITIZE        (BPF_ALU_SANITIZE_SRC | \
0362                      BPF_ALU_SANITIZE_DST)
0363 
0364 struct bpf_insn_aux_data {
0365     union {
0366         enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
0367         unsigned long map_ptr_state;    /* pointer/poison value for maps */
0368         s32 call_imm;           /* saved imm field of call insn */
0369         u32 alu_limit;          /* limit for add/sub register with pointer */
0370         struct {
0371             u32 map_index;      /* index into used_maps[] */
0372             u32 map_off;        /* offset from value base address */
0373         };
0374         struct {
0375             enum bpf_reg_type reg_type; /* type of pseudo_btf_id */
0376             union {
0377                 struct {
0378                     struct btf *btf;
0379                     u32 btf_id; /* btf_id for struct typed var */
0380                 };
0381                 u32 mem_size;   /* mem_size for non-struct typed var */
0382             };
0383         } btf_var;
0384         /* if instruction is a call to bpf_loop this field tracks
0385          * the state of the relevant registers to make decision about inlining
0386          */
0387         struct bpf_loop_inline_state loop_inline_state;
0388     };
0389     u64 map_key_state; /* constant (32 bit) key tracking for maps */
0390     int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
0391     u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
0392     bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
0393     bool zext_dst; /* this insn zero extends dst reg */
0394     u8 alu_state; /* used in combination with alu_limit */
0395 
0396     /* below fields are initialized once */
0397     unsigned int orig_idx; /* original instruction index */
0398     bool prune_point;
0399 };
0400 
0401 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
0402 #define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
0403 
0404 #define BPF_VERIFIER_TMP_LOG_SIZE   1024
0405 
0406 struct bpf_verifier_log {
0407     u32 level;
0408     char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
0409     char __user *ubuf;
0410     u32 len_used;
0411     u32 len_total;
0412 };
0413 
0414 static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
0415 {
0416     return log->len_used >= log->len_total - 1;
0417 }
0418 
0419 #define BPF_LOG_LEVEL1  1
0420 #define BPF_LOG_LEVEL2  2
0421 #define BPF_LOG_STATS   4
0422 #define BPF_LOG_LEVEL   (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
0423 #define BPF_LOG_MASK    (BPF_LOG_LEVEL | BPF_LOG_STATS)
0424 #define BPF_LOG_KERNEL  (BPF_LOG_MASK + 1) /* kernel internal flag */
0425 #define BPF_LOG_MIN_ALIGNMENT 8U
0426 #define BPF_LOG_ALIGNMENT 40U
0427 
0428 static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
0429 {
0430     return log &&
0431         ((log->level && log->ubuf && !bpf_verifier_log_full(log)) ||
0432          log->level == BPF_LOG_KERNEL);
0433 }
0434 
0435 static inline bool
0436 bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
0437 {
0438     return log->len_total >= 128 && log->len_total <= UINT_MAX >> 2 &&
0439            log->level && log->ubuf && !(log->level & ~BPF_LOG_MASK);
0440 }
0441 
0442 #define BPF_MAX_SUBPROGS 256
0443 
0444 struct bpf_subprog_info {
0445     /* 'start' has to be the first field otherwise find_subprog() won't work */
0446     u32 start; /* insn idx of function entry point */
0447     u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
0448     u16 stack_depth; /* max. stack depth used by this function */
0449     bool has_tail_call;
0450     bool tail_call_reachable;
0451     bool has_ld_abs;
0452     bool is_async_cb;
0453 };
0454 
0455 /* single container for all structs
0456  * one verifier_env per bpf_check() call
0457  */
0458 struct bpf_verifier_env {
0459     u32 insn_idx;
0460     u32 prev_insn_idx;
0461     struct bpf_prog *prog;      /* eBPF program being verified */
0462     const struct bpf_verifier_ops *ops;
0463     struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
0464     int stack_size;         /* number of states to be processed */
0465     bool strict_alignment;      /* perform strict pointer alignment checks */
0466     bool test_state_freq;       /* test verifier with different pruning frequency */
0467     struct bpf_verifier_state *cur_state; /* current verifier state */
0468     struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
0469     struct bpf_verifier_state_list *free_list;
0470     struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
0471     struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
0472     u32 used_map_cnt;       /* number of used maps */
0473     u32 used_btf_cnt;       /* number of used BTF objects */
0474     u32 id_gen;         /* used to generate unique reg IDs */
0475     bool explore_alu_limits;
0476     bool allow_ptr_leaks;
0477     bool allow_uninit_stack;
0478     bool allow_ptr_to_map_access;
0479     bool bpf_capable;
0480     bool bypass_spec_v1;
0481     bool bypass_spec_v4;
0482     bool seen_direct_write;
0483     struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
0484     const struct bpf_line_info *prev_linfo;
0485     struct bpf_verifier_log log;
0486     struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
0487     struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
0488     struct {
0489         int *insn_state;
0490         int *insn_stack;
0491         int cur_stack;
0492     } cfg;
0493     u32 pass_cnt; /* number of times do_check() was called */
0494     u32 subprog_cnt;
0495     /* number of instructions analyzed by the verifier */
0496     u32 prev_insn_processed, insn_processed;
0497     /* number of jmps, calls, exits analyzed so far */
0498     u32 prev_jmps_processed, jmps_processed;
0499     /* total verification time */
0500     u64 verification_time;
0501     /* maximum number of verifier states kept in 'branching' instructions */
0502     u32 max_states_per_insn;
0503     /* total number of allocated verifier states */
0504     u32 total_states;
0505     /* some states are freed during program analysis.
0506      * this is peak number of states. this number dominates kernel
0507      * memory consumption during verification
0508      */
0509     u32 peak_states;
0510     /* longest register parentage chain walked for liveness marking */
0511     u32 longest_mark_read_walk;
0512     bpfptr_t fd_array;
0513 
0514     /* bit mask to keep track of whether a register has been accessed
0515      * since the last time the function state was printed
0516      */
0517     u32 scratched_regs;
0518     /* Same as scratched_regs but for stack slots */
0519     u64 scratched_stack_slots;
0520     u32 prev_log_len, prev_insn_print_len;
0521     /* buffer used in reg_type_str() to generate reg_type string */
0522     char type_str_buf[TYPE_STR_BUF_LEN];
0523 };
0524 
0525 __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
0526                       const char *fmt, va_list args);
0527 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
0528                        const char *fmt, ...);
0529 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
0530                 const char *fmt, ...);
0531 
0532 static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
0533 {
0534     struct bpf_verifier_state *cur = env->cur_state;
0535 
0536     return cur->frame[cur->curframe];
0537 }
0538 
0539 static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
0540 {
0541     return cur_func(env)->regs;
0542 }
0543 
0544 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
0545 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
0546                  int insn_idx, int prev_insn_idx);
0547 int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
0548 void
0549 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
0550                   struct bpf_insn *insn);
0551 void
0552 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
0553 
0554 int check_ptr_off_reg(struct bpf_verifier_env *env,
0555               const struct bpf_reg_state *reg, int regno);
0556 int check_func_arg_reg_off(struct bpf_verifier_env *env,
0557                const struct bpf_reg_state *reg, int regno,
0558                enum bpf_arg_type arg_type);
0559 int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
0560                  u32 regno);
0561 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
0562            u32 regno, u32 mem_size);
0563 
0564 /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
0565 static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
0566                          struct btf *btf, u32 btf_id)
0567 {
0568     if (tgt_prog)
0569         return ((u64)tgt_prog->aux->id << 32) | btf_id;
0570     else
0571         return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id;
0572 }
0573 
0574 /* unpack the IDs from the key as constructed above */
0575 static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id)
0576 {
0577     if (obj_id)
0578         *obj_id = key >> 32;
0579     if (btf_id)
0580         *btf_id = key & 0x7FFFFFFF;
0581 }
0582 
0583 int bpf_check_attach_target(struct bpf_verifier_log *log,
0584                 const struct bpf_prog *prog,
0585                 const struct bpf_prog *tgt_prog,
0586                 u32 btf_id,
0587                 struct bpf_attach_target_info *tgt_info);
0588 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
0589 
0590 #define BPF_BASE_TYPE_MASK  GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
0591 
0592 /* extract base type from bpf_{arg, return, reg}_type. */
0593 static inline u32 base_type(u32 type)
0594 {
0595     return type & BPF_BASE_TYPE_MASK;
0596 }
0597 
0598 /* extract flags from an extended type. See bpf_type_flag in bpf.h. */
0599 static inline u32 type_flag(u32 type)
0600 {
0601     return type & ~BPF_BASE_TYPE_MASK;
0602 }
0603 
0604 /* only use after check_attach_btf_id() */
0605 static inline enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog)
0606 {
0607     return prog->type == BPF_PROG_TYPE_EXT ?
0608         prog->aux->dst_prog->type : prog->type;
0609 }
0610 
0611 #endif /* _LINUX_BPF_VERIFIER_H */