Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
0002 /* Copyright (C) 2016-2018 Netronome Systems, Inc. */
0003 
0004 #ifndef __NFP_BPF_H__
0005 #define __NFP_BPF_H__ 1
0006 
0007 #include <linux/bitfield.h>
0008 #include <linux/bpf.h>
0009 #include <linux/bpf_verifier.h>
0010 #include <linux/kernel.h>
0011 #include <linux/list.h>
0012 #include <linux/rhashtable.h>
0013 #include <linux/skbuff.h>
0014 #include <linux/types.h>
0015 #include <linux/wait.h>
0016 
0017 #include "../ccm.h"
0018 #include "../nfp_asm.h"
0019 #include "fw.h"
0020 
0021 #define cmsg_warn(bpf, msg...)  nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
0022 
0023 /* For relocation logic use up-most byte of branch instruction as scratch
0024  * area.  Remember to clear this before sending instructions to HW!
0025  */
0026 #define OP_RELO_TYPE    0xff00000000000000ULL
0027 
0028 enum nfp_relo_type {
0029     RELO_NONE = 0,
0030     /* standard internal jumps */
0031     RELO_BR_REL,
0032     /* internal jumps to parts of the outro */
0033     RELO_BR_GO_OUT,
0034     RELO_BR_GO_ABORT,
0035     RELO_BR_GO_CALL_PUSH_REGS,
0036     RELO_BR_GO_CALL_POP_REGS,
0037     /* external jumps to fixed addresses */
0038     RELO_BR_NEXT_PKT,
0039     RELO_BR_HELPER,
0040     /* immediate relocation against load address */
0041     RELO_IMMED_REL,
0042 };
0043 
0044 /* To make absolute relocated branches (branches other than RELO_BR_REL)
0045  * distinguishable in user space dumps from normal jumps, add a large offset
0046  * to them.
0047  */
0048 #define BR_OFF_RELO     15000
0049 
0050 enum static_regs {
0051     STATIC_REG_IMMA     = 20, /* Bank AB */
0052     STATIC_REG_IMM      = 21, /* Bank AB */
0053     STATIC_REG_STACK    = 22, /* Bank A */
0054     STATIC_REG_PKT_LEN  = 22, /* Bank B */
0055 };
0056 
0057 enum pkt_vec {
0058     PKT_VEC_PKT_LEN     = 0,
0059     PKT_VEC_PKT_PTR     = 2,
0060     PKT_VEC_QSEL_SET    = 4,
0061     PKT_VEC_QSEL_VAL    = 6,
0062 };
0063 
0064 #define PKT_VEL_QSEL_SET_BIT    4
0065 
0066 #define pv_len(np)  reg_lm(1, PKT_VEC_PKT_LEN)
0067 #define pv_ctm_ptr(np)  reg_lm(1, PKT_VEC_PKT_PTR)
0068 #define pv_qsel_set(np) reg_lm(1, PKT_VEC_QSEL_SET)
0069 #define pv_qsel_val(np) reg_lm(1, PKT_VEC_QSEL_VAL)
0070 
0071 #define stack_reg(np)   reg_a(STATIC_REG_STACK)
0072 #define stack_imm(np)   imm_b(np)
0073 #define plen_reg(np)    reg_b(STATIC_REG_PKT_LEN)
0074 #define pptr_reg(np)    pv_ctm_ptr(np)
0075 #define imm_a(np)   reg_a(STATIC_REG_IMM)
0076 #define imm_b(np)   reg_b(STATIC_REG_IMM)
0077 #define imma_a(np)  reg_a(STATIC_REG_IMMA)
0078 #define imma_b(np)  reg_b(STATIC_REG_IMMA)
0079 #define imm_both(np)    reg_both(STATIC_REG_IMM)
0080 #define ret_reg(np) imm_a(np)
0081 
0082 #define NFP_BPF_ABI_FLAGS   reg_imm(0)
0083 #define   NFP_BPF_ABI_FLAG_MARK 1
0084 
0085 /**
0086  * struct nfp_app_bpf - bpf app priv structure
0087  * @app:        backpointer to the app
0088  * @ccm:        common control message handler data
0089  *
0090  * @bpf_dev:        BPF offload device handle
0091  *
0092  * @cmsg_key_sz:    size of key in cmsg element array
0093  * @cmsg_val_sz:    size of value in cmsg element array
0094  *
0095  * @map_list:       list of offloaded maps
0096  * @maps_in_use:    number of currently offloaded maps
0097  * @map_elems_in_use:   number of elements allocated to offloaded maps
0098  *
0099  * @maps_neutral:   hash table of offload-neutral maps (on pointer)
0100  *
0101  * @abi_version:    global BPF ABI version
0102  * @cmsg_cache_cnt: number of entries to read for caching
0103  *
0104  * @adjust_head:    adjust head capability
0105  * @adjust_head.flags:      extra flags for adjust head
0106  * @adjust_head.off_min:    minimal packet offset within buffer required
0107  * @adjust_head.off_max:    maximum packet offset within buffer required
0108  * @adjust_head.guaranteed_sub: negative adjustment guaranteed possible
0109  * @adjust_head.guaranteed_add: positive adjustment guaranteed possible
0110  *
0111  * @maps:       map capability
0112  * @maps.types:         supported map types
0113  * @maps.max_maps:      max number of maps supported
0114  * @maps.max_elems:     max number of entries in each map
0115  * @maps.max_key_sz:        max size of map key
0116  * @maps.max_val_sz:        max size of map value
0117  * @maps.max_elem_sz:       max size of map entry (key + value)
0118  *
0119  * @helpers:        helper addressess for various calls
0120  * @helpers.map_lookup:     map lookup helper address
0121  * @helpers.map_update:     map update helper address
0122  * @helpers.map_delete:     map delete helper address
0123  * @helpers.perf_event_output:  output perf event to a ring buffer
0124  *
0125  * @pseudo_random:  FW initialized the pseudo-random machinery (CSRs)
0126  * @queue_select:   BPF can set the RX queue ID in packet vector
0127  * @adjust_tail:    BPF can simply trunc packet size for adjust tail
0128  * @cmsg_multi_ent: FW can pack multiple map entries in a single cmsg
0129  */
0130 struct nfp_app_bpf {
0131     struct nfp_app *app;
0132     struct nfp_ccm ccm;
0133 
0134     struct bpf_offload_dev *bpf_dev;
0135 
0136     unsigned int cmsg_key_sz;
0137     unsigned int cmsg_val_sz;
0138 
0139     unsigned int cmsg_cache_cnt;
0140 
0141     struct list_head map_list;
0142     unsigned int maps_in_use;
0143     unsigned int map_elems_in_use;
0144 
0145     struct rhashtable maps_neutral;
0146 
0147     u32 abi_version;
0148 
0149     struct nfp_bpf_cap_adjust_head {
0150         u32 flags;
0151         int off_min;
0152         int off_max;
0153         int guaranteed_sub;
0154         int guaranteed_add;
0155     } adjust_head;
0156 
0157     struct {
0158         u32 types;
0159         u32 max_maps;
0160         u32 max_elems;
0161         u32 max_key_sz;
0162         u32 max_val_sz;
0163         u32 max_elem_sz;
0164     } maps;
0165 
0166     struct {
0167         u32 map_lookup;
0168         u32 map_update;
0169         u32 map_delete;
0170         u32 perf_event_output;
0171     } helpers;
0172 
0173     bool pseudo_random;
0174     bool queue_select;
0175     bool adjust_tail;
0176     bool cmsg_multi_ent;
0177 };
0178 
0179 enum nfp_bpf_map_use {
0180     NFP_MAP_UNUSED = 0,
0181     NFP_MAP_USE_READ,
0182     NFP_MAP_USE_WRITE,
0183     NFP_MAP_USE_ATOMIC_CNT,
0184 };
0185 
0186 struct nfp_bpf_map_word {
0187     unsigned char type      :4;
0188     unsigned char non_zero_update   :1;
0189 };
0190 
0191 #define NFP_BPF_MAP_CACHE_CNT       4U
0192 #define NFP_BPF_MAP_CACHE_TIME_NS   (250 * 1000)
0193 
0194 /**
0195  * struct nfp_bpf_map - private per-map data attached to BPF maps for offload
0196  * @offmap: pointer to the offloaded BPF map
0197  * @bpf:    back pointer to bpf app private structure
0198  * @tid:    table id identifying map on datapath
0199  *
0200  * @cache_lock: protects @cache_blockers, @cache_to, @cache
0201  * @cache_blockers: number of ops in flight which block caching
0202  * @cache_gen:  counter incremented by every blocker on exit
0203  * @cache_to:   time when cache will no longer be valid (ns)
0204  * @cache:  skb with cached response
0205  *
0206  * @l:      link on the nfp_app_bpf->map_list list
0207  * @use_map:    map of how the value is used (in 4B chunks)
0208  */
0209 struct nfp_bpf_map {
0210     struct bpf_offloaded_map *offmap;
0211     struct nfp_app_bpf *bpf;
0212     u32 tid;
0213 
0214     spinlock_t cache_lock;
0215     u32 cache_blockers;
0216     u32 cache_gen;
0217     u64 cache_to;
0218     struct sk_buff *cache;
0219 
0220     struct list_head l;
0221     struct nfp_bpf_map_word use_map[];
0222 };
0223 
0224 struct nfp_bpf_neutral_map {
0225     struct rhash_head l;
0226     struct bpf_map *ptr;
0227     u32 map_id;
0228     u32 count;
0229 };
0230 
0231 extern const struct rhashtable_params nfp_bpf_maps_neutral_params;
0232 
0233 struct nfp_prog;
0234 struct nfp_insn_meta;
0235 typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
0236 
0237 #define nfp_prog_first_meta(nfp_prog)                   \
0238     list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
0239 #define nfp_prog_last_meta(nfp_prog)                    \
0240     list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
0241 #define nfp_meta_next(meta) list_next_entry(meta, l)
0242 #define nfp_meta_prev(meta) list_prev_entry(meta, l)
0243 
0244 /**
0245  * struct nfp_bpf_reg_state - register state for calls
0246  * @reg: BPF register state from latest path
0247  * @var_off: for stack arg - changes stack offset on different paths
0248  */
0249 struct nfp_bpf_reg_state {
0250     struct bpf_reg_state reg;
0251     bool var_off;
0252 };
0253 
0254 #define FLAG_INSN_IS_JUMP_DST           BIT(0)
0255 #define FLAG_INSN_IS_SUBPROG_START      BIT(1)
0256 #define FLAG_INSN_PTR_CALLER_STACK_FRAME    BIT(2)
0257 /* Instruction is pointless, noop even on its own */
0258 #define FLAG_INSN_SKIP_NOOP         BIT(3)
0259 /* Instruction is optimized out based on preceding instructions */
0260 #define FLAG_INSN_SKIP_PREC_DEPENDENT       BIT(4)
0261 /* Instruction is optimized by the verifier */
0262 #define FLAG_INSN_SKIP_VERIFIER_OPT     BIT(5)
0263 /* Instruction needs to zero extend to high 32-bit */
0264 #define FLAG_INSN_DO_ZEXT           BIT(6)
0265 
0266 #define FLAG_INSN_SKIP_MASK     (FLAG_INSN_SKIP_NOOP | \
0267                      FLAG_INSN_SKIP_PREC_DEPENDENT | \
0268                      FLAG_INSN_SKIP_VERIFIER_OPT)
0269 
0270 /**
0271  * struct nfp_insn_meta - BPF instruction wrapper
0272  * @insn: BPF instruction
0273  * @ptr: pointer type for memory operations
0274  * @ldst_gather_len: memcpy length gathered from load/store sequence
0275  * @paired_st: the paired store insn at the head of the sequence
0276  * @ptr_not_const: pointer is not always constant
0277  * @pkt_cache: packet data cache information
0278  * @pkt_cache.range_start: start offset for associated packet data cache
0279  * @pkt_cache.range_end: end offset for associated packet data cache
0280  * @pkt_cache.do_init: this read needs to initialize packet data cache
0281  * @xadd_over_16bit: 16bit immediate is not guaranteed
0282  * @xadd_maybe_16bit: 16bit immediate is possible
0283  * @jmp_dst: destination info for jump instructions
0284  * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB
0285  * @num_insns_after_br: number of insns following a branch jump, used for fixup
0286  * @func_id: function id for call instructions
0287  * @arg1: arg1 for call instructions
0288  * @arg2: arg2 for call instructions
0289  * @umin_src: copy of core verifier umin_value for src opearnd.
0290  * @umax_src: copy of core verifier umax_value for src operand.
0291  * @umin_dst: copy of core verifier umin_value for dst opearnd.
0292  * @umax_dst: copy of core verifier umax_value for dst operand.
0293  * @off: index of first generated machine instruction (in nfp_prog.prog)
0294  * @n: eBPF instruction number
0295  * @flags: eBPF instruction extra optimization flags
0296  * @subprog_idx: index of subprogram to which the instruction belongs
0297  * @double_cb: callback for second part of the instruction
0298  * @l: link on nfp_prog->insns list
0299  */
0300 struct nfp_insn_meta {
0301     struct bpf_insn insn;
0302     union {
0303         /* pointer ops (ld/st/xadd) */
0304         struct {
0305             struct bpf_reg_state ptr;
0306             struct bpf_insn *paired_st;
0307             s16 ldst_gather_len;
0308             bool ptr_not_const;
0309             struct {
0310                 s16 range_start;
0311                 s16 range_end;
0312                 bool do_init;
0313             } pkt_cache;
0314             bool xadd_over_16bit;
0315             bool xadd_maybe_16bit;
0316         };
0317         /* jump */
0318         struct {
0319             struct nfp_insn_meta *jmp_dst;
0320             bool jump_neg_op;
0321             u32 num_insns_after_br; /* only for BPF-to-BPF calls */
0322         };
0323         /* function calls */
0324         struct {
0325             u32 func_id;
0326             struct bpf_reg_state arg1;
0327             struct nfp_bpf_reg_state arg2;
0328         };
0329         /* We are interested in range info for operands of ALU
0330          * operations. For example, shift amount, multiplicand and
0331          * multiplier etc.
0332          */
0333         struct {
0334             u64 umin_src;
0335             u64 umax_src;
0336             u64 umin_dst;
0337             u64 umax_dst;
0338         };
0339     };
0340     unsigned int off;
0341     unsigned short n;
0342     unsigned short flags;
0343     unsigned short subprog_idx;
0344     instr_cb_t double_cb;
0345 
0346     struct list_head l;
0347 };
0348 
0349 #define BPF_SIZE_MASK   0x18
0350 
0351 static inline u8 mbpf_class(const struct nfp_insn_meta *meta)
0352 {
0353     return BPF_CLASS(meta->insn.code);
0354 }
0355 
0356 static inline u8 mbpf_src(const struct nfp_insn_meta *meta)
0357 {
0358     return BPF_SRC(meta->insn.code);
0359 }
0360 
0361 static inline u8 mbpf_op(const struct nfp_insn_meta *meta)
0362 {
0363     return BPF_OP(meta->insn.code);
0364 }
0365 
0366 static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
0367 {
0368     return BPF_MODE(meta->insn.code);
0369 }
0370 
0371 static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta)
0372 {
0373     return mbpf_class(meta) == BPF_ALU64 || mbpf_class(meta) == BPF_ALU;
0374 }
0375 
0376 static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
0377 {
0378     return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
0379 }
0380 
0381 static inline bool is_mbpf_jmp32(const struct nfp_insn_meta *meta)
0382 {
0383     return mbpf_class(meta) == BPF_JMP32;
0384 }
0385 
0386 static inline bool is_mbpf_jmp64(const struct nfp_insn_meta *meta)
0387 {
0388     return mbpf_class(meta) == BPF_JMP;
0389 }
0390 
0391 static inline bool is_mbpf_jmp(const struct nfp_insn_meta *meta)
0392 {
0393     return is_mbpf_jmp32(meta) || is_mbpf_jmp64(meta);
0394 }
0395 
0396 static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
0397 {
0398     return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
0399 }
0400 
0401 static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta)
0402 {
0403     return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET;
0404 }
0405 
0406 static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta)
0407 {
0408     return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET;
0409 }
0410 
0411 static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta)
0412 {
0413     u8 code = meta->insn.code;
0414 
0415     return BPF_CLASS(code) == BPF_LD &&
0416            (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND);
0417 }
0418 
0419 static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta)
0420 {
0421     u8 code = meta->insn.code;
0422 
0423     return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM;
0424 }
0425 
0426 static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
0427 {
0428     return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
0429 }
0430 
0431 static inline bool is_mbpf_atomic(const struct nfp_insn_meta *meta)
0432 {
0433     return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_ATOMIC);
0434 }
0435 
0436 static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
0437 {
0438     return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL;
0439 }
0440 
0441 static inline bool is_mbpf_div(const struct nfp_insn_meta *meta)
0442 {
0443     return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
0444 }
0445 
0446 static inline bool is_mbpf_cond_jump(const struct nfp_insn_meta *meta)
0447 {
0448     u8 op;
0449 
0450     if (is_mbpf_jmp32(meta))
0451         return true;
0452 
0453     if (!is_mbpf_jmp64(meta))
0454         return false;
0455 
0456     op = mbpf_op(meta);
0457     return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
0458 }
0459 
0460 static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta)
0461 {
0462     struct bpf_insn insn = meta->insn;
0463 
0464     return insn.code == (BPF_JMP | BPF_CALL) &&
0465         insn.src_reg != BPF_PSEUDO_CALL;
0466 }
0467 
0468 static inline bool is_mbpf_pseudo_call(const struct nfp_insn_meta *meta)
0469 {
0470     struct bpf_insn insn = meta->insn;
0471 
0472     return insn.code == (BPF_JMP | BPF_CALL) &&
0473         insn.src_reg == BPF_PSEUDO_CALL;
0474 }
0475 
0476 #define STACK_FRAME_ALIGN 64
0477 
0478 /**
0479  * struct nfp_bpf_subprog_info - nfp BPF sub-program (a.k.a. function) info
0480  * @stack_depth:    maximum stack depth used by this sub-program
0481  * @needs_reg_push: whether sub-program uses callee-saved registers
0482  */
0483 struct nfp_bpf_subprog_info {
0484     u16 stack_depth;
0485     u8 needs_reg_push : 1;
0486 };
0487 
0488 /**
0489  * struct nfp_prog - nfp BPF program
0490  * @bpf: backpointer to the bpf app priv structure
0491  * @prog: machine code
0492  * @prog_len: number of valid instructions in @prog array
0493  * @__prog_alloc_len: alloc size of @prog array
0494  * @stack_size: total amount of stack used
0495  * @verifier_meta: temporary storage for verifier's insn meta
0496  * @type: BPF program type
0497  * @last_bpf_off: address of the last instruction translated from BPF
0498  * @tgt_out: jump target for normal exit
0499  * @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
0500  * @tgt_call_push_regs: jump target for subroutine for saving R6~R9 to stack
0501  * @tgt_call_pop_regs: jump target for subroutine used for restoring R6~R9
0502  * @n_translated: number of successfully translated instructions (for errors)
0503  * @error: error code if something went wrong
0504  * @stack_frame_depth: max stack depth for current frame
0505  * @adjust_head_location: if program has single adjust head call - the insn no.
0506  * @map_records_cnt: the number of map pointers recorded for this prog
0507  * @subprog_cnt: number of sub-programs, including main function
0508  * @map_records: the map record pointers from bpf->maps_neutral
0509  * @subprog: pointer to an array of objects holding info about sub-programs
0510  * @n_insns: number of instructions on @insns list
0511  * @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
0512  */
0513 struct nfp_prog {
0514     struct nfp_app_bpf *bpf;
0515 
0516     u64 *prog;
0517     unsigned int prog_len;
0518     unsigned int __prog_alloc_len;
0519 
0520     unsigned int stack_size;
0521 
0522     struct nfp_insn_meta *verifier_meta;
0523 
0524     enum bpf_prog_type type;
0525 
0526     unsigned int last_bpf_off;
0527     unsigned int tgt_out;
0528     unsigned int tgt_abort;
0529     unsigned int tgt_call_push_regs;
0530     unsigned int tgt_call_pop_regs;
0531 
0532     unsigned int n_translated;
0533     int error;
0534 
0535     unsigned int stack_frame_depth;
0536     unsigned int adjust_head_location;
0537 
0538     unsigned int map_records_cnt;
0539     unsigned int subprog_cnt;
0540     struct nfp_bpf_neutral_map **map_records;
0541     struct nfp_bpf_subprog_info *subprog;
0542 
0543     unsigned int n_insns;
0544     struct list_head insns;
0545 };
0546 
0547 /**
0548  * struct nfp_bpf_vnic - per-vNIC BPF priv structure
0549  * @tc_prog:    currently loaded cls_bpf program
0550  * @start_off:  address of the first instruction in the memory
0551  * @tgt_done:   jump target to get the next packet
0552  */
0553 struct nfp_bpf_vnic {
0554     struct bpf_prog *tc_prog;
0555     unsigned int start_off;
0556     unsigned int tgt_done;
0557 };
0558 
0559 bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
0560 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
0561 int nfp_bpf_jit(struct nfp_prog *prog);
0562 bool nfp_bpf_supported_opcode(u8 code);
0563 bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
0564                    unsigned int mtu);
0565 
0566 int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
0567             int prev_insn_idx);
0568 int nfp_bpf_finalize(struct bpf_verifier_env *env);
0569 
0570 int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off,
0571                  struct bpf_insn *insn);
0572 int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
0573 
0574 extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops;
0575 
0576 struct netdev_bpf;
0577 struct nfp_app;
0578 struct nfp_net;
0579 
0580 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn,
0581         struct netdev_bpf *bpf);
0582 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
0583             bool old_prog, struct netlink_ext_ack *extack);
0584 
0585 struct nfp_insn_meta *
0586 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
0587           unsigned int insn_idx);
0588 
0589 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
0590 
0591 unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf);
0592 unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf);
0593 unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf);
0594 long long int
0595 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
0596 void
0597 nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map);
0598 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
0599                 void *next_key);
0600 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
0601                   void *key, void *value, u64 flags);
0602 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key);
0603 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
0604                   void *key, void *value);
0605 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
0606                    void *key, void *next_key);
0607 
0608 int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
0609              unsigned int len);
0610 
0611 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
0612 void
0613 nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data,
0614             unsigned int len);
0615 #endif