0001
0002
0003
0004 #include <linux/bpf.h>
0005 #include <linux/bpf_verifier.h>
0006 #include <linux/kernel.h>
0007 #include <linux/netdevice.h>
0008 #include <linux/pkt_cls.h>
0009
0010 #include "../nfp_app.h"
0011 #include "../nfp_main.h"
0012 #include "../nfp_net.h"
0013 #include "fw.h"
0014 #include "main.h"
0015
0016 #define pr_vlog(env, fmt, ...) \
0017 bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
0018
0019 struct nfp_insn_meta *
0020 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
0021 unsigned int insn_idx)
0022 {
0023 unsigned int forward, backward, i;
0024
0025 backward = meta->n - insn_idx;
0026 forward = insn_idx - meta->n;
0027
0028 if (min(forward, backward) > nfp_prog->n_insns - insn_idx - 1) {
0029 backward = nfp_prog->n_insns - insn_idx - 1;
0030 meta = nfp_prog_last_meta(nfp_prog);
0031 }
0032 if (min(forward, backward) > insn_idx && backward > insn_idx) {
0033 forward = insn_idx;
0034 meta = nfp_prog_first_meta(nfp_prog);
0035 }
0036
0037 if (forward < backward)
0038 for (i = 0; i < forward; i++)
0039 meta = nfp_meta_next(meta);
0040 else
0041 for (i = 0; i < backward; i++)
0042 meta = nfp_meta_prev(meta);
0043
0044 return meta;
0045 }
0046
0047 static void
0048 nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
0049 struct nfp_insn_meta *meta,
0050 const struct bpf_reg_state *reg2)
0051 {
0052 unsigned int location = UINT_MAX;
0053 int imm;
0054
0055
0056
0057
0058
0059 if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off))
0060 goto exit_set_location;
0061 imm = reg2->var_off.value;
0062
0063 if (imm > ETH_ZLEN - ETH_HLEN)
0064 goto exit_set_location;
0065 if (imm > (int)bpf->adjust_head.guaranteed_add ||
0066 imm < -bpf->adjust_head.guaranteed_sub)
0067 goto exit_set_location;
0068
0069 if (nfp_prog->adjust_head_location) {
0070
0071 if (nfp_prog->adjust_head_location != meta->n)
0072 goto exit_set_location;
0073
0074 if (meta->arg2.reg.var_off.value != imm)
0075 goto exit_set_location;
0076 }
0077
0078 location = meta->n;
0079 exit_set_location:
0080 nfp_prog->adjust_head_location = location;
0081 }
0082
0083 static bool nfp_bpf_map_update_value_ok(struct bpf_verifier_env *env)
0084 {
0085 const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
0086 const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
0087 struct bpf_offloaded_map *offmap;
0088 struct bpf_func_state *state;
0089 struct nfp_bpf_map *nfp_map;
0090 int off, i;
0091
0092 state = env->cur_state->frame[reg3->frameno];
0093
0094
0095
0096
0097
0098
0099 offmap = map_to_offmap(reg1->map_ptr);
0100 nfp_map = offmap->dev_priv;
0101 off = reg3->off + reg3->var_off.value;
0102
0103 for (i = 0; i < offmap->map.value_size; i++) {
0104 struct bpf_stack_state *stack_entry;
0105 unsigned int soff;
0106
0107 soff = -(off + i) - 1;
0108 stack_entry = &state->stack[soff / BPF_REG_SIZE];
0109 if (stack_entry->slot_type[soff % BPF_REG_SIZE] == STACK_ZERO)
0110 continue;
0111
0112 if (nfp_map->use_map[i / 4].type == NFP_MAP_USE_ATOMIC_CNT) {
0113 pr_vlog(env, "value at offset %d/%d may be non-zero, bpf_map_update_elem() is required to initialize atomic counters to zero to avoid offload endian issues\n",
0114 i, soff);
0115 return false;
0116 }
0117 nfp_map->use_map[i / 4].non_zero_update = 1;
0118 }
0119
0120 return true;
0121 }
0122
0123 static int
0124 nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env,
0125 const struct bpf_reg_state *reg,
0126 struct nfp_bpf_reg_state *old_arg)
0127 {
0128 s64 off, old_off;
0129
0130 if (reg->type != PTR_TO_STACK) {
0131 pr_vlog(env, "%s: unsupported ptr type %d\n",
0132 fname, reg->type);
0133 return false;
0134 }
0135 if (!tnum_is_const(reg->var_off)) {
0136 pr_vlog(env, "%s: variable pointer\n", fname);
0137 return false;
0138 }
0139
0140 off = reg->var_off.value + reg->off;
0141 if (-off % 4) {
0142 pr_vlog(env, "%s: unaligned stack pointer %lld\n", fname, -off);
0143 return false;
0144 }
0145
0146
0147 if (!old_arg)
0148 return true;
0149
0150 old_off = old_arg->reg.var_off.value + old_arg->reg.off;
0151 old_arg->var_off |= off != old_off;
0152
0153 return true;
0154 }
0155
0156 static bool
0157 nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env,
0158 struct nfp_insn_meta *meta,
0159 u32 helper_tgt, const struct bpf_reg_state *reg1)
0160 {
0161 if (!helper_tgt) {
0162 pr_vlog(env, "%s: not supported by FW\n", fname);
0163 return false;
0164 }
0165
0166 return true;
0167 }
0168
0169 static int
0170 nfp_bpf_check_helper_call(struct nfp_prog *nfp_prog,
0171 struct bpf_verifier_env *env,
0172 struct nfp_insn_meta *meta)
0173 {
0174 const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
0175 const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
0176 const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
0177 struct nfp_app_bpf *bpf = nfp_prog->bpf;
0178 u32 func_id = meta->insn.imm;
0179
0180 switch (func_id) {
0181 case BPF_FUNC_xdp_adjust_head:
0182 if (!bpf->adjust_head.off_max) {
0183 pr_vlog(env, "adjust_head not supported by FW\n");
0184 return -EOPNOTSUPP;
0185 }
0186 if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
0187 pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
0188 return -EOPNOTSUPP;
0189 }
0190
0191 nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
0192 break;
0193
0194 case BPF_FUNC_xdp_adjust_tail:
0195 if (!bpf->adjust_tail) {
0196 pr_vlog(env, "adjust_tail not supported by FW\n");
0197 return -EOPNOTSUPP;
0198 }
0199 break;
0200
0201 case BPF_FUNC_map_lookup_elem:
0202 if (!nfp_bpf_map_call_ok("map_lookup", env, meta,
0203 bpf->helpers.map_lookup, reg1) ||
0204 !nfp_bpf_stack_arg_ok("map_lookup", env, reg2,
0205 meta->func_id ? &meta->arg2 : NULL))
0206 return -EOPNOTSUPP;
0207 break;
0208
0209 case BPF_FUNC_map_update_elem:
0210 if (!nfp_bpf_map_call_ok("map_update", env, meta,
0211 bpf->helpers.map_update, reg1) ||
0212 !nfp_bpf_stack_arg_ok("map_update", env, reg2,
0213 meta->func_id ? &meta->arg2 : NULL) ||
0214 !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL) ||
0215 !nfp_bpf_map_update_value_ok(env))
0216 return -EOPNOTSUPP;
0217 break;
0218
0219 case BPF_FUNC_map_delete_elem:
0220 if (!nfp_bpf_map_call_ok("map_delete", env, meta,
0221 bpf->helpers.map_delete, reg1) ||
0222 !nfp_bpf_stack_arg_ok("map_delete", env, reg2,
0223 meta->func_id ? &meta->arg2 : NULL))
0224 return -EOPNOTSUPP;
0225 break;
0226
0227 case BPF_FUNC_get_prandom_u32:
0228 if (bpf->pseudo_random)
0229 break;
0230 pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n");
0231 return -EOPNOTSUPP;
0232
0233 case BPF_FUNC_perf_event_output:
0234 BUILD_BUG_ON(NFP_BPF_SCALAR_VALUE != SCALAR_VALUE ||
0235 NFP_BPF_MAP_VALUE != PTR_TO_MAP_VALUE ||
0236 NFP_BPF_STACK != PTR_TO_STACK ||
0237 NFP_BPF_PACKET_DATA != PTR_TO_PACKET);
0238
0239 if (!bpf->helpers.perf_event_output) {
0240 pr_vlog(env, "event_output: not supported by FW\n");
0241 return -EOPNOTSUPP;
0242 }
0243
0244
0245
0246
0247 if (reg3->var_off.mask & BPF_F_INDEX_MASK ||
0248 (reg3->var_off.value & BPF_F_INDEX_MASK) !=
0249 BPF_F_CURRENT_CPU) {
0250 char tn_buf[48];
0251
0252 tnum_strn(tn_buf, sizeof(tn_buf), reg3->var_off);
0253 pr_vlog(env, "event_output: must use BPF_F_CURRENT_CPU, var_off: %s\n",
0254 tn_buf);
0255 return -EOPNOTSUPP;
0256 }
0257
0258
0259
0260
0261 reg1 = cur_regs(env) + BPF_REG_4;
0262
0263 if (reg1->type != SCALAR_VALUE &&
0264 reg1->type != PTR_TO_STACK &&
0265 reg1->type != PTR_TO_MAP_VALUE &&
0266 reg1->type != PTR_TO_PACKET) {
0267 pr_vlog(env, "event_output: unsupported ptr type: %d\n",
0268 reg1->type);
0269 return -EOPNOTSUPP;
0270 }
0271
0272 if (reg1->type == PTR_TO_STACK &&
0273 !nfp_bpf_stack_arg_ok("event_output", env, reg1, NULL))
0274 return -EOPNOTSUPP;
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284 dev_warn_once(&nfp_prog->bpf->app->pf->pdev->dev,
0285 "bpf: note: return codes and behavior of bpf_event_output() helper differs for offloaded programs!\n");
0286 pr_vlog(env, "warning: return codes and behavior of event_output helper differ for offload!\n");
0287
0288 if (!meta->func_id)
0289 break;
0290
0291 if (reg1->type != meta->arg1.type) {
0292 pr_vlog(env, "event_output: ptr type changed: %d %d\n",
0293 meta->arg1.type, reg1->type);
0294 return -EINVAL;
0295 }
0296 break;
0297
0298 default:
0299 pr_vlog(env, "unsupported function id: %d\n", func_id);
0300 return -EOPNOTSUPP;
0301 }
0302
0303 meta->func_id = func_id;
0304 meta->arg1 = *reg1;
0305 meta->arg2.reg = *reg2;
0306
0307 return 0;
0308 }
0309
0310 static int
0311 nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
0312 struct bpf_verifier_env *env)
0313 {
0314 const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0;
0315 u64 imm;
0316
0317 if (nfp_prog->type == BPF_PROG_TYPE_XDP)
0318 return 0;
0319
0320 if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) {
0321 char tn_buf[48];
0322
0323 tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
0324 pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
0325 reg0->type, tn_buf);
0326 return -EINVAL;
0327 }
0328
0329 imm = reg0->var_off.value;
0330 if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS &&
0331 imm <= TC_ACT_REDIRECT &&
0332 imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
0333 imm != TC_ACT_QUEUED) {
0334 pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
0335 reg0->type, imm);
0336 return -EINVAL;
0337 }
0338
0339 return 0;
0340 }
0341
0342 static int
0343 nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
0344 struct nfp_insn_meta *meta,
0345 const struct bpf_reg_state *reg,
0346 struct bpf_verifier_env *env)
0347 {
0348 s32 old_off, new_off;
0349
0350 if (reg->frameno != env->cur_state->curframe)
0351 meta->flags |= FLAG_INSN_PTR_CALLER_STACK_FRAME;
0352
0353 if (!tnum_is_const(reg->var_off)) {
0354 pr_vlog(env, "variable ptr stack access\n");
0355 return -EINVAL;
0356 }
0357
0358 if (meta->ptr.type == NOT_INIT)
0359 return 0;
0360
0361 old_off = meta->ptr.off + meta->ptr.var_off.value;
0362 new_off = reg->off + reg->var_off.value;
0363
0364 meta->ptr_not_const |= old_off != new_off;
0365
0366 if (!meta->ptr_not_const)
0367 return 0;
0368
0369 if (old_off % 4 == new_off % 4)
0370 return 0;
0371
0372 pr_vlog(env, "stack access changed location was:%d is:%d\n",
0373 old_off, new_off);
0374 return -EINVAL;
0375 }
0376
0377 static const char *nfp_bpf_map_use_name(enum nfp_bpf_map_use use)
0378 {
0379 static const char * const names[] = {
0380 [NFP_MAP_UNUSED] = "unused",
0381 [NFP_MAP_USE_READ] = "read",
0382 [NFP_MAP_USE_WRITE] = "write",
0383 [NFP_MAP_USE_ATOMIC_CNT] = "atomic",
0384 };
0385
0386 if (use >= ARRAY_SIZE(names) || !names[use])
0387 return "unknown";
0388 return names[use];
0389 }
0390
0391 static int
0392 nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env,
0393 struct nfp_bpf_map *nfp_map,
0394 unsigned int off, enum nfp_bpf_map_use use)
0395 {
0396 if (nfp_map->use_map[off / 4].type != NFP_MAP_UNUSED &&
0397 nfp_map->use_map[off / 4].type != use) {
0398 pr_vlog(env, "map value use type conflict %s vs %s off: %u\n",
0399 nfp_bpf_map_use_name(nfp_map->use_map[off / 4].type),
0400 nfp_bpf_map_use_name(use), off);
0401 return -EOPNOTSUPP;
0402 }
0403
0404 if (nfp_map->use_map[off / 4].non_zero_update &&
0405 use == NFP_MAP_USE_ATOMIC_CNT) {
0406 pr_vlog(env, "atomic counter in map value may already be initialized to non-zero value off: %u\n",
0407 off);
0408 return -EOPNOTSUPP;
0409 }
0410
0411 nfp_map->use_map[off / 4].type = use;
0412
0413 return 0;
0414 }
0415
0416 static int
0417 nfp_bpf_map_mark_used(struct bpf_verifier_env *env, struct nfp_insn_meta *meta,
0418 const struct bpf_reg_state *reg,
0419 enum nfp_bpf_map_use use)
0420 {
0421 struct bpf_offloaded_map *offmap;
0422 struct nfp_bpf_map *nfp_map;
0423 unsigned int size, off;
0424 int i, err;
0425
0426 if (!tnum_is_const(reg->var_off)) {
0427 pr_vlog(env, "map value offset is variable\n");
0428 return -EOPNOTSUPP;
0429 }
0430
0431 off = reg->var_off.value + meta->insn.off + reg->off;
0432 size = BPF_LDST_BYTES(&meta->insn);
0433 offmap = map_to_offmap(reg->map_ptr);
0434 nfp_map = offmap->dev_priv;
0435
0436 if (off + size > offmap->map.value_size) {
0437 pr_vlog(env, "map value access out-of-bounds\n");
0438 return -EINVAL;
0439 }
0440
0441 for (i = 0; i < size; i += 4 - (off + i) % 4) {
0442 err = nfp_bpf_map_mark_used_one(env, nfp_map, off + i, use);
0443 if (err)
0444 return err;
0445 }
0446
0447 return 0;
0448 }
0449
0450 static int
0451 nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
0452 struct bpf_verifier_env *env, u8 reg_no)
0453 {
0454 const struct bpf_reg_state *reg = cur_regs(env) + reg_no;
0455 int err;
0456
0457 if (reg->type != PTR_TO_CTX &&
0458 reg->type != PTR_TO_STACK &&
0459 reg->type != PTR_TO_MAP_VALUE &&
0460 reg->type != PTR_TO_PACKET) {
0461 pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
0462 return -EINVAL;
0463 }
0464
0465 if (reg->type == PTR_TO_STACK) {
0466 err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
0467 if (err)
0468 return err;
0469 }
0470
0471 if (reg->type == PTR_TO_MAP_VALUE) {
0472 if (is_mbpf_load(meta)) {
0473 err = nfp_bpf_map_mark_used(env, meta, reg,
0474 NFP_MAP_USE_READ);
0475 if (err)
0476 return err;
0477 }
0478 if (is_mbpf_store(meta)) {
0479 pr_vlog(env, "map writes not supported\n");
0480 return -EOPNOTSUPP;
0481 }
0482 if (is_mbpf_atomic(meta)) {
0483 err = nfp_bpf_map_mark_used(env, meta, reg,
0484 NFP_MAP_USE_ATOMIC_CNT);
0485 if (err)
0486 return err;
0487 }
0488 }
0489
0490 if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
0491 pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
0492 meta->ptr.type, reg->type);
0493 return -EINVAL;
0494 }
0495
0496 meta->ptr = *reg;
0497
0498 return 0;
0499 }
0500
0501 static int
0502 nfp_bpf_check_store(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
0503 struct bpf_verifier_env *env)
0504 {
0505 const struct bpf_reg_state *reg = cur_regs(env) + meta->insn.dst_reg;
0506
0507 if (reg->type == PTR_TO_CTX) {
0508 if (nfp_prog->type == BPF_PROG_TYPE_XDP) {
0509
0510 switch (meta->insn.off) {
0511 case offsetof(struct xdp_md, rx_queue_index):
0512 if (nfp_prog->bpf->queue_select)
0513 goto exit_check_ptr;
0514 pr_vlog(env, "queue selection not supported by FW\n");
0515 return -EOPNOTSUPP;
0516 }
0517 }
0518 pr_vlog(env, "unsupported store to context field\n");
0519 return -EOPNOTSUPP;
0520 }
0521 exit_check_ptr:
0522 return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
0523 }
0524
0525 static int
0526 nfp_bpf_check_atomic(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
0527 struct bpf_verifier_env *env)
0528 {
0529 const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
0530 const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
0531
0532 if (meta->insn.imm != BPF_ADD) {
0533 pr_vlog(env, "atomic op not implemented: %d\n", meta->insn.imm);
0534 return -EOPNOTSUPP;
0535 }
0536
0537 if (dreg->type != PTR_TO_MAP_VALUE) {
0538 pr_vlog(env, "atomic add not to a map value pointer: %d\n",
0539 dreg->type);
0540 return -EOPNOTSUPP;
0541 }
0542 if (sreg->type != SCALAR_VALUE) {
0543 pr_vlog(env, "atomic add not of a scalar: %d\n", sreg->type);
0544 return -EOPNOTSUPP;
0545 }
0546
0547 meta->xadd_over_16bit |=
0548 sreg->var_off.value > 0xffff || sreg->var_off.mask > 0xffff;
0549 meta->xadd_maybe_16bit |=
0550 (sreg->var_off.value & ~sreg->var_off.mask) <= 0xffff;
0551
0552 return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
0553 }
0554
0555 static int
0556 nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
0557 struct bpf_verifier_env *env)
0558 {
0559 const struct bpf_reg_state *sreg =
0560 cur_regs(env) + meta->insn.src_reg;
0561 const struct bpf_reg_state *dreg =
0562 cur_regs(env) + meta->insn.dst_reg;
0563
0564 meta->umin_src = min(meta->umin_src, sreg->umin_value);
0565 meta->umax_src = max(meta->umax_src, sreg->umax_value);
0566 meta->umin_dst = min(meta->umin_dst, dreg->umin_value);
0567 meta->umax_dst = max(meta->umax_dst, dreg->umax_value);
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581 if (is_mbpf_mul(meta)) {
0582 if (meta->umax_dst > U32_MAX) {
0583 pr_vlog(env, "multiplier is not within u32 value range\n");
0584 return -EINVAL;
0585 }
0586 if (mbpf_src(meta) == BPF_X && meta->umax_src > U32_MAX) {
0587 pr_vlog(env, "multiplicand is not within u32 value range\n");
0588 return -EINVAL;
0589 }
0590 if (mbpf_class(meta) == BPF_ALU64 &&
0591 mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
0592 pr_vlog(env, "sign extended multiplicand won't be within u32 value range\n");
0593 return -EINVAL;
0594 }
0595 }
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607 if (is_mbpf_div(meta)) {
0608 if (meta->umax_dst > U32_MAX) {
0609 pr_vlog(env, "dividend is not within u32 value range\n");
0610 return -EINVAL;
0611 }
0612 if (mbpf_src(meta) == BPF_X) {
0613 if (meta->umin_src != meta->umax_src) {
0614 pr_vlog(env, "divisor is not constant\n");
0615 return -EINVAL;
0616 }
0617 if (meta->umax_src > U32_MAX) {
0618 pr_vlog(env, "divisor is not within u32 value range\n");
0619 return -EINVAL;
0620 }
0621 }
0622 if (mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
0623 pr_vlog(env, "divide by negative constant is not supported\n");
0624 return -EINVAL;
0625 }
0626 }
0627
0628 return 0;
0629 }
0630
0631 int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
0632 int prev_insn_idx)
0633 {
0634 struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
0635 struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
0636
0637 meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx);
0638 nfp_prog->verifier_meta = meta;
0639
0640 if (!nfp_bpf_supported_opcode(meta->insn.code)) {
0641 pr_vlog(env, "instruction %#02x not supported\n",
0642 meta->insn.code);
0643 return -EINVAL;
0644 }
0645
0646 if (meta->insn.src_reg >= MAX_BPF_REG ||
0647 meta->insn.dst_reg >= MAX_BPF_REG) {
0648 pr_vlog(env, "program uses extended registers - jit hardening?\n");
0649 return -EINVAL;
0650 }
0651
0652 if (is_mbpf_helper_call(meta))
0653 return nfp_bpf_check_helper_call(nfp_prog, env, meta);
0654 if (meta->insn.code == (BPF_JMP | BPF_EXIT))
0655 return nfp_bpf_check_exit(nfp_prog, env);
0656
0657 if (is_mbpf_load(meta))
0658 return nfp_bpf_check_ptr(nfp_prog, meta, env,
0659 meta->insn.src_reg);
0660 if (is_mbpf_store(meta))
0661 return nfp_bpf_check_store(nfp_prog, meta, env);
0662
0663 if (is_mbpf_atomic(meta))
0664 return nfp_bpf_check_atomic(nfp_prog, meta, env);
0665
0666 if (is_mbpf_alu(meta))
0667 return nfp_bpf_check_alu(nfp_prog, meta, env);
0668
0669 return 0;
0670 }
0671
0672 static int
0673 nfp_assign_subprog_idx_and_regs(struct bpf_verifier_env *env,
0674 struct nfp_prog *nfp_prog)
0675 {
0676 struct nfp_insn_meta *meta;
0677 int index = 0;
0678
0679 list_for_each_entry(meta, &nfp_prog->insns, l) {
0680 if (nfp_is_subprog_start(meta))
0681 index++;
0682 meta->subprog_idx = index;
0683
0684 if (meta->insn.dst_reg >= BPF_REG_6 &&
0685 meta->insn.dst_reg <= BPF_REG_9)
0686 nfp_prog->subprog[index].needs_reg_push = 1;
0687 }
0688
0689 if (index + 1 != nfp_prog->subprog_cnt) {
0690 pr_vlog(env, "BUG: number of processed BPF functions is not consistent (processed %d, expected %d)\n",
0691 index + 1, nfp_prog->subprog_cnt);
0692 return -EFAULT;
0693 }
0694
0695 return 0;
0696 }
0697
0698 static unsigned int nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog)
0699 {
0700 struct nfp_insn_meta *meta = nfp_prog_first_meta(nfp_prog);
0701 unsigned int max_depth = 0, depth = 0, frame = 0;
0702 struct nfp_insn_meta *ret_insn[MAX_CALL_FRAMES];
0703 unsigned short frame_depths[MAX_CALL_FRAMES];
0704 unsigned short ret_prog[MAX_CALL_FRAMES];
0705 unsigned short idx = meta->subprog_idx;
0706
0707
0708
0709
0710
0711
0712
0713 process_subprog:
0714 frame_depths[frame] = nfp_prog->subprog[idx].stack_depth;
0715 frame_depths[frame] = round_up(frame_depths[frame], STACK_FRAME_ALIGN);
0716 depth += frame_depths[frame];
0717 max_depth = max(max_depth, depth);
0718
0719 continue_subprog:
0720 for (; meta != nfp_prog_last_meta(nfp_prog) && meta->subprog_idx == idx;
0721 meta = nfp_meta_next(meta)) {
0722 if (!is_mbpf_pseudo_call(meta))
0723 continue;
0724
0725
0726
0727
0728 ret_insn[frame] = nfp_meta_next(meta);
0729 ret_prog[frame] = idx;
0730
0731
0732 meta = nfp_bpf_goto_meta(nfp_prog, meta,
0733 meta->n + 1 + meta->insn.imm);
0734 idx = meta->subprog_idx;
0735 frame++;
0736 goto process_subprog;
0737 }
0738
0739
0740
0741
0742 if (frame == 0)
0743 return max_depth;
0744
0745 depth -= frame_depths[frame];
0746 frame--;
0747 meta = ret_insn[frame];
0748 idx = ret_prog[frame];
0749 goto continue_subprog;
0750 }
0751
0752 static void nfp_bpf_insn_flag_zext(struct nfp_prog *nfp_prog,
0753 struct bpf_insn_aux_data *aux)
0754 {
0755 struct nfp_insn_meta *meta;
0756
0757 list_for_each_entry(meta, &nfp_prog->insns, l) {
0758 if (aux[meta->n].zext_dst)
0759 meta->flags |= FLAG_INSN_DO_ZEXT;
0760 }
0761 }
0762
0763 int nfp_bpf_finalize(struct bpf_verifier_env *env)
0764 {
0765 struct bpf_subprog_info *info;
0766 struct nfp_prog *nfp_prog;
0767 unsigned int max_stack;
0768 struct nfp_net *nn;
0769 int i;
0770
0771 nfp_prog = env->prog->aux->offload->dev_priv;
0772 nfp_prog->subprog_cnt = env->subprog_cnt;
0773 nfp_prog->subprog = kcalloc(nfp_prog->subprog_cnt,
0774 sizeof(nfp_prog->subprog[0]), GFP_KERNEL);
0775 if (!nfp_prog->subprog)
0776 return -ENOMEM;
0777
0778 nfp_assign_subprog_idx_and_regs(env, nfp_prog);
0779
0780 info = env->subprog_info;
0781 for (i = 0; i < nfp_prog->subprog_cnt; i++) {
0782 nfp_prog->subprog[i].stack_depth = info[i].stack_depth;
0783
0784 if (i == 0)
0785 continue;
0786
0787
0788 nfp_prog->subprog[i].stack_depth += REG_WIDTH;
0789
0790 if (nfp_prog->subprog[i].needs_reg_push)
0791 nfp_prog->subprog[i].stack_depth += BPF_REG_SIZE * 4;
0792 }
0793
0794 nn = netdev_priv(env->prog->aux->offload->netdev);
0795 max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
0796 nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog);
0797 if (nfp_prog->stack_size > max_stack) {
0798 pr_vlog(env, "stack too large: program %dB > FW stack %dB\n",
0799 nfp_prog->stack_size, max_stack);
0800 return -EOPNOTSUPP;
0801 }
0802
0803 nfp_bpf_insn_flag_zext(nfp_prog, env->insn_aux_data);
0804 return 0;
0805 }
0806
0807 int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off,
0808 struct bpf_insn *insn)
0809 {
0810 struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
0811 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
0812 struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
0813
0814 meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx);
0815 nfp_prog->verifier_meta = meta;
0816
0817
0818 if (is_mbpf_cond_jump(meta) &&
0819 insn->code == (BPF_JMP | BPF_JA | BPF_K)) {
0820 unsigned int tgt_off;
0821
0822 tgt_off = off + insn->off + 1;
0823
0824 if (!insn->off) {
0825 meta->jmp_dst = list_next_entry(meta, l);
0826 meta->jump_neg_op = false;
0827 } else if (meta->jmp_dst->n != aux_data[tgt_off].orig_idx) {
0828 pr_vlog(env, "branch hard wire at %d changes target %d -> %d\n",
0829 off, meta->jmp_dst->n,
0830 aux_data[tgt_off].orig_idx);
0831 return -EINVAL;
0832 }
0833 return 0;
0834 }
0835
0836 pr_vlog(env, "unsupported instruction replacement %hhx -> %hhx\n",
0837 meta->insn.code, insn->code);
0838 return -EINVAL;
0839 }
0840
0841 int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
0842 {
0843 struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
0844 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
0845 struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
0846 unsigned int i;
0847
0848 meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx);
0849
0850 for (i = 0; i < cnt; i++) {
0851 if (WARN_ON_ONCE(&meta->l == &nfp_prog->insns))
0852 return -EINVAL;
0853
0854
0855 if (meta->flags & FLAG_INSN_SKIP_VERIFIER_OPT)
0856 i--;
0857
0858 meta->flags |= FLAG_INSN_SKIP_VERIFIER_OPT;
0859 meta = list_next_entry(meta, l);
0860 }
0861
0862 return 0;
0863 }