0001
0002
0003
0004 #ifndef __NFP_BPF_H__
0005 #define __NFP_BPF_H__ 1
0006
0007 #include <linux/bitfield.h>
0008 #include <linux/bpf.h>
0009 #include <linux/bpf_verifier.h>
0010 #include <linux/kernel.h>
0011 #include <linux/list.h>
0012 #include <linux/rhashtable.h>
0013 #include <linux/skbuff.h>
0014 #include <linux/types.h>
0015 #include <linux/wait.h>
0016
0017 #include "../ccm.h"
0018 #include "../nfp_asm.h"
0019 #include "fw.h"
0020
0021 #define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
0022
0023
0024
0025
0026 #define OP_RELO_TYPE 0xff00000000000000ULL
0027
0028 enum nfp_relo_type {
0029 RELO_NONE = 0,
0030
0031 RELO_BR_REL,
0032
0033 RELO_BR_GO_OUT,
0034 RELO_BR_GO_ABORT,
0035 RELO_BR_GO_CALL_PUSH_REGS,
0036 RELO_BR_GO_CALL_POP_REGS,
0037
0038 RELO_BR_NEXT_PKT,
0039 RELO_BR_HELPER,
0040
0041 RELO_IMMED_REL,
0042 };
0043
0044
0045
0046
0047
0048 #define BR_OFF_RELO 15000
0049
0050 enum static_regs {
0051 STATIC_REG_IMMA = 20,
0052 STATIC_REG_IMM = 21,
0053 STATIC_REG_STACK = 22,
0054 STATIC_REG_PKT_LEN = 22,
0055 };
0056
0057 enum pkt_vec {
0058 PKT_VEC_PKT_LEN = 0,
0059 PKT_VEC_PKT_PTR = 2,
0060 PKT_VEC_QSEL_SET = 4,
0061 PKT_VEC_QSEL_VAL = 6,
0062 };
0063
0064 #define PKT_VEL_QSEL_SET_BIT 4
0065
0066 #define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN)
0067 #define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR)
0068 #define pv_qsel_set(np) reg_lm(1, PKT_VEC_QSEL_SET)
0069 #define pv_qsel_val(np) reg_lm(1, PKT_VEC_QSEL_VAL)
0070
0071 #define stack_reg(np) reg_a(STATIC_REG_STACK)
0072 #define stack_imm(np) imm_b(np)
0073 #define plen_reg(np) reg_b(STATIC_REG_PKT_LEN)
0074 #define pptr_reg(np) pv_ctm_ptr(np)
0075 #define imm_a(np) reg_a(STATIC_REG_IMM)
0076 #define imm_b(np) reg_b(STATIC_REG_IMM)
0077 #define imma_a(np) reg_a(STATIC_REG_IMMA)
0078 #define imma_b(np) reg_b(STATIC_REG_IMMA)
0079 #define imm_both(np) reg_both(STATIC_REG_IMM)
0080 #define ret_reg(np) imm_a(np)
0081
0082 #define NFP_BPF_ABI_FLAGS reg_imm(0)
0083 #define NFP_BPF_ABI_FLAG_MARK 1
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130 struct nfp_app_bpf {
0131 struct nfp_app *app;
0132 struct nfp_ccm ccm;
0133
0134 struct bpf_offload_dev *bpf_dev;
0135
0136 unsigned int cmsg_key_sz;
0137 unsigned int cmsg_val_sz;
0138
0139 unsigned int cmsg_cache_cnt;
0140
0141 struct list_head map_list;
0142 unsigned int maps_in_use;
0143 unsigned int map_elems_in_use;
0144
0145 struct rhashtable maps_neutral;
0146
0147 u32 abi_version;
0148
0149 struct nfp_bpf_cap_adjust_head {
0150 u32 flags;
0151 int off_min;
0152 int off_max;
0153 int guaranteed_sub;
0154 int guaranteed_add;
0155 } adjust_head;
0156
0157 struct {
0158 u32 types;
0159 u32 max_maps;
0160 u32 max_elems;
0161 u32 max_key_sz;
0162 u32 max_val_sz;
0163 u32 max_elem_sz;
0164 } maps;
0165
0166 struct {
0167 u32 map_lookup;
0168 u32 map_update;
0169 u32 map_delete;
0170 u32 perf_event_output;
0171 } helpers;
0172
0173 bool pseudo_random;
0174 bool queue_select;
0175 bool adjust_tail;
0176 bool cmsg_multi_ent;
0177 };
0178
0179 enum nfp_bpf_map_use {
0180 NFP_MAP_UNUSED = 0,
0181 NFP_MAP_USE_READ,
0182 NFP_MAP_USE_WRITE,
0183 NFP_MAP_USE_ATOMIC_CNT,
0184 };
0185
0186 struct nfp_bpf_map_word {
0187 unsigned char type :4;
0188 unsigned char non_zero_update :1;
0189 };
0190
0191 #define NFP_BPF_MAP_CACHE_CNT 4U
0192 #define NFP_BPF_MAP_CACHE_TIME_NS (250 * 1000)
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209 struct nfp_bpf_map {
0210 struct bpf_offloaded_map *offmap;
0211 struct nfp_app_bpf *bpf;
0212 u32 tid;
0213
0214 spinlock_t cache_lock;
0215 u32 cache_blockers;
0216 u32 cache_gen;
0217 u64 cache_to;
0218 struct sk_buff *cache;
0219
0220 struct list_head l;
0221 struct nfp_bpf_map_word use_map[];
0222 };
0223
0224 struct nfp_bpf_neutral_map {
0225 struct rhash_head l;
0226 struct bpf_map *ptr;
0227 u32 map_id;
0228 u32 count;
0229 };
0230
0231 extern const struct rhashtable_params nfp_bpf_maps_neutral_params;
0232
0233 struct nfp_prog;
0234 struct nfp_insn_meta;
0235 typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
0236
0237 #define nfp_prog_first_meta(nfp_prog) \
0238 list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
0239 #define nfp_prog_last_meta(nfp_prog) \
0240 list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
0241 #define nfp_meta_next(meta) list_next_entry(meta, l)
0242 #define nfp_meta_prev(meta) list_prev_entry(meta, l)
0243
0244
0245
0246
0247
0248
0249 struct nfp_bpf_reg_state {
0250 struct bpf_reg_state reg;
0251 bool var_off;
0252 };
0253
0254 #define FLAG_INSN_IS_JUMP_DST BIT(0)
0255 #define FLAG_INSN_IS_SUBPROG_START BIT(1)
0256 #define FLAG_INSN_PTR_CALLER_STACK_FRAME BIT(2)
0257
0258 #define FLAG_INSN_SKIP_NOOP BIT(3)
0259
0260 #define FLAG_INSN_SKIP_PREC_DEPENDENT BIT(4)
0261
0262 #define FLAG_INSN_SKIP_VERIFIER_OPT BIT(5)
0263
0264 #define FLAG_INSN_DO_ZEXT BIT(6)
0265
0266 #define FLAG_INSN_SKIP_MASK (FLAG_INSN_SKIP_NOOP | \
0267 FLAG_INSN_SKIP_PREC_DEPENDENT | \
0268 FLAG_INSN_SKIP_VERIFIER_OPT)
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300 struct nfp_insn_meta {
0301 struct bpf_insn insn;
0302 union {
0303
0304 struct {
0305 struct bpf_reg_state ptr;
0306 struct bpf_insn *paired_st;
0307 s16 ldst_gather_len;
0308 bool ptr_not_const;
0309 struct {
0310 s16 range_start;
0311 s16 range_end;
0312 bool do_init;
0313 } pkt_cache;
0314 bool xadd_over_16bit;
0315 bool xadd_maybe_16bit;
0316 };
0317
0318 struct {
0319 struct nfp_insn_meta *jmp_dst;
0320 bool jump_neg_op;
0321 u32 num_insns_after_br;
0322 };
0323
0324 struct {
0325 u32 func_id;
0326 struct bpf_reg_state arg1;
0327 struct nfp_bpf_reg_state arg2;
0328 };
0329
0330
0331
0332
0333 struct {
0334 u64 umin_src;
0335 u64 umax_src;
0336 u64 umin_dst;
0337 u64 umax_dst;
0338 };
0339 };
0340 unsigned int off;
0341 unsigned short n;
0342 unsigned short flags;
0343 unsigned short subprog_idx;
0344 instr_cb_t double_cb;
0345
0346 struct list_head l;
0347 };
0348
0349 #define BPF_SIZE_MASK 0x18
0350
0351 static inline u8 mbpf_class(const struct nfp_insn_meta *meta)
0352 {
0353 return BPF_CLASS(meta->insn.code);
0354 }
0355
0356 static inline u8 mbpf_src(const struct nfp_insn_meta *meta)
0357 {
0358 return BPF_SRC(meta->insn.code);
0359 }
0360
0361 static inline u8 mbpf_op(const struct nfp_insn_meta *meta)
0362 {
0363 return BPF_OP(meta->insn.code);
0364 }
0365
0366 static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
0367 {
0368 return BPF_MODE(meta->insn.code);
0369 }
0370
0371 static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta)
0372 {
0373 return mbpf_class(meta) == BPF_ALU64 || mbpf_class(meta) == BPF_ALU;
0374 }
0375
0376 static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
0377 {
0378 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
0379 }
0380
0381 static inline bool is_mbpf_jmp32(const struct nfp_insn_meta *meta)
0382 {
0383 return mbpf_class(meta) == BPF_JMP32;
0384 }
0385
0386 static inline bool is_mbpf_jmp64(const struct nfp_insn_meta *meta)
0387 {
0388 return mbpf_class(meta) == BPF_JMP;
0389 }
0390
0391 static inline bool is_mbpf_jmp(const struct nfp_insn_meta *meta)
0392 {
0393 return is_mbpf_jmp32(meta) || is_mbpf_jmp64(meta);
0394 }
0395
0396 static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
0397 {
0398 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
0399 }
0400
0401 static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta)
0402 {
0403 return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET;
0404 }
0405
0406 static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta)
0407 {
0408 return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET;
0409 }
0410
0411 static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta)
0412 {
0413 u8 code = meta->insn.code;
0414
0415 return BPF_CLASS(code) == BPF_LD &&
0416 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND);
0417 }
0418
0419 static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta)
0420 {
0421 u8 code = meta->insn.code;
0422
0423 return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM;
0424 }
0425
0426 static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
0427 {
0428 return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
0429 }
0430
0431 static inline bool is_mbpf_atomic(const struct nfp_insn_meta *meta)
0432 {
0433 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_ATOMIC);
0434 }
0435
0436 static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
0437 {
0438 return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL;
0439 }
0440
0441 static inline bool is_mbpf_div(const struct nfp_insn_meta *meta)
0442 {
0443 return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
0444 }
0445
0446 static inline bool is_mbpf_cond_jump(const struct nfp_insn_meta *meta)
0447 {
0448 u8 op;
0449
0450 if (is_mbpf_jmp32(meta))
0451 return true;
0452
0453 if (!is_mbpf_jmp64(meta))
0454 return false;
0455
0456 op = mbpf_op(meta);
0457 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
0458 }
0459
0460 static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta)
0461 {
0462 struct bpf_insn insn = meta->insn;
0463
0464 return insn.code == (BPF_JMP | BPF_CALL) &&
0465 insn.src_reg != BPF_PSEUDO_CALL;
0466 }
0467
0468 static inline bool is_mbpf_pseudo_call(const struct nfp_insn_meta *meta)
0469 {
0470 struct bpf_insn insn = meta->insn;
0471
0472 return insn.code == (BPF_JMP | BPF_CALL) &&
0473 insn.src_reg == BPF_PSEUDO_CALL;
0474 }
0475
0476 #define STACK_FRAME_ALIGN 64
0477
0478
0479
0480
0481
0482
0483 struct nfp_bpf_subprog_info {
0484 u16 stack_depth;
0485 u8 needs_reg_push : 1;
0486 };
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513 struct nfp_prog {
0514 struct nfp_app_bpf *bpf;
0515
0516 u64 *prog;
0517 unsigned int prog_len;
0518 unsigned int __prog_alloc_len;
0519
0520 unsigned int stack_size;
0521
0522 struct nfp_insn_meta *verifier_meta;
0523
0524 enum bpf_prog_type type;
0525
0526 unsigned int last_bpf_off;
0527 unsigned int tgt_out;
0528 unsigned int tgt_abort;
0529 unsigned int tgt_call_push_regs;
0530 unsigned int tgt_call_pop_regs;
0531
0532 unsigned int n_translated;
0533 int error;
0534
0535 unsigned int stack_frame_depth;
0536 unsigned int adjust_head_location;
0537
0538 unsigned int map_records_cnt;
0539 unsigned int subprog_cnt;
0540 struct nfp_bpf_neutral_map **map_records;
0541 struct nfp_bpf_subprog_info *subprog;
0542
0543 unsigned int n_insns;
0544 struct list_head insns;
0545 };
0546
0547
0548
0549
0550
0551
0552
0553 struct nfp_bpf_vnic {
0554 struct bpf_prog *tc_prog;
0555 unsigned int start_off;
0556 unsigned int tgt_done;
0557 };
0558
0559 bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
0560 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
0561 int nfp_bpf_jit(struct nfp_prog *prog);
0562 bool nfp_bpf_supported_opcode(u8 code);
0563 bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
0564 unsigned int mtu);
0565
0566 int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
0567 int prev_insn_idx);
0568 int nfp_bpf_finalize(struct bpf_verifier_env *env);
0569
0570 int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off,
0571 struct bpf_insn *insn);
0572 int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
0573
0574 extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops;
0575
0576 struct netdev_bpf;
0577 struct nfp_app;
0578 struct nfp_net;
0579
0580 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn,
0581 struct netdev_bpf *bpf);
0582 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
0583 bool old_prog, struct netlink_ext_ack *extack);
0584
0585 struct nfp_insn_meta *
0586 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
0587 unsigned int insn_idx);
0588
0589 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
0590
0591 unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf);
0592 unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf);
0593 unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf);
0594 long long int
0595 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
0596 void
0597 nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map);
0598 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
0599 void *next_key);
0600 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
0601 void *key, void *value, u64 flags);
0602 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key);
0603 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
0604 void *key, void *value);
0605 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
0606 void *key, void *next_key);
0607
0608 int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
0609 unsigned int len);
0610
0611 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
0612 void
0613 nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data,
0614 unsigned int len);
0615 #endif