0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020 #include <uapi/linux/btf.h>
0021 #include <linux/filter.h>
0022 #include <linux/skbuff.h>
0023 #include <linux/vmalloc.h>
0024 #include <linux/random.h>
0025 #include <linux/moduleloader.h>
0026 #include <linux/bpf.h>
0027 #include <linux/btf.h>
0028 #include <linux/objtool.h>
0029 #include <linux/rbtree_latch.h>
0030 #include <linux/kallsyms.h>
0031 #include <linux/rcupdate.h>
0032 #include <linux/perf_event.h>
0033 #include <linux/extable.h>
0034 #include <linux/log2.h>
0035 #include <linux/bpf_verifier.h>
0036 #include <linux/nodemask.h>
0037
0038 #include <asm/barrier.h>
0039 #include <asm/unaligned.h>
0040
0041
0042 #define BPF_R0 regs[BPF_REG_0]
0043 #define BPF_R1 regs[BPF_REG_1]
0044 #define BPF_R2 regs[BPF_REG_2]
0045 #define BPF_R3 regs[BPF_REG_3]
0046 #define BPF_R4 regs[BPF_REG_4]
0047 #define BPF_R5 regs[BPF_REG_5]
0048 #define BPF_R6 regs[BPF_REG_6]
0049 #define BPF_R7 regs[BPF_REG_7]
0050 #define BPF_R8 regs[BPF_REG_8]
0051 #define BPF_R9 regs[BPF_REG_9]
0052 #define BPF_R10 regs[BPF_REG_10]
0053
0054
0055 #define DST regs[insn->dst_reg]
0056 #define SRC regs[insn->src_reg]
0057 #define FP regs[BPF_REG_FP]
0058 #define AX regs[BPF_REG_AX]
0059 #define ARG1 regs[BPF_REG_ARG1]
0060 #define CTX regs[BPF_REG_CTX]
0061 #define IMM insn->imm
0062
0063
0064
0065
0066
0067 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
0068 {
0069 u8 *ptr = NULL;
0070
0071 if (k >= SKF_NET_OFF) {
0072 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
0073 } else if (k >= SKF_LL_OFF) {
0074 if (unlikely(!skb_mac_header_was_set(skb)))
0075 return NULL;
0076 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
0077 }
0078 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
0079 return ptr;
0080
0081 return NULL;
0082 }
0083
0084 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
0085 {
0086 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
0087 struct bpf_prog_aux *aux;
0088 struct bpf_prog *fp;
0089
0090 size = round_up(size, PAGE_SIZE);
0091 fp = __vmalloc(size, gfp_flags);
0092 if (fp == NULL)
0093 return NULL;
0094
0095 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
0096 if (aux == NULL) {
0097 vfree(fp);
0098 return NULL;
0099 }
0100 fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
0101 if (!fp->active) {
0102 vfree(fp);
0103 kfree(aux);
0104 return NULL;
0105 }
0106
0107 fp->pages = size / PAGE_SIZE;
0108 fp->aux = aux;
0109 fp->aux->prog = fp;
0110 fp->jit_requested = ebpf_jit_enabled();
0111 fp->blinding_requested = bpf_jit_blinding_enabled(fp);
0112 #ifdef CONFIG_CGROUP_BPF
0113 aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
0114 #endif
0115
0116 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
0117 mutex_init(&fp->aux->used_maps_mutex);
0118 mutex_init(&fp->aux->dst_mutex);
0119
0120 return fp;
0121 }
0122
0123 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
0124 {
0125 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
0126 struct bpf_prog *prog;
0127 int cpu;
0128
0129 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
0130 if (!prog)
0131 return NULL;
0132
0133 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
0134 if (!prog->stats) {
0135 free_percpu(prog->active);
0136 kfree(prog->aux);
0137 vfree(prog);
0138 return NULL;
0139 }
0140
0141 for_each_possible_cpu(cpu) {
0142 struct bpf_prog_stats *pstats;
0143
0144 pstats = per_cpu_ptr(prog->stats, cpu);
0145 u64_stats_init(&pstats->syncp);
0146 }
0147 return prog;
0148 }
0149 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
0150
0151 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
0152 {
0153 if (!prog->aux->nr_linfo || !prog->jit_requested)
0154 return 0;
0155
0156 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
0157 sizeof(*prog->aux->jited_linfo),
0158 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
0159 if (!prog->aux->jited_linfo)
0160 return -ENOMEM;
0161
0162 return 0;
0163 }
0164
0165 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
0166 {
0167 if (prog->aux->jited_linfo &&
0168 (!prog->jited || !prog->aux->jited_linfo[0])) {
0169 kvfree(prog->aux->jited_linfo);
0170 prog->aux->jited_linfo = NULL;
0171 }
0172
0173 kfree(prog->aux->kfunc_tab);
0174 prog->aux->kfunc_tab = NULL;
0175 }
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
0202 const u32 *insn_to_jit_off)
0203 {
0204 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
0205 const struct bpf_line_info *linfo;
0206 void **jited_linfo;
0207
0208 if (!prog->aux->jited_linfo)
0209
0210 return;
0211
0212 linfo_idx = prog->aux->linfo_idx;
0213 linfo = &prog->aux->linfo[linfo_idx];
0214 insn_start = linfo[0].insn_off;
0215 insn_end = insn_start + prog->len;
0216
0217 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
0218 jited_linfo[0] = prog->bpf_func;
0219
0220 nr_linfo = prog->aux->nr_linfo - linfo_idx;
0221
0222 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
0223
0224
0225
0226 jited_linfo[i] = prog->bpf_func +
0227 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
0228 }
0229
0230 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
0231 gfp_t gfp_extra_flags)
0232 {
0233 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
0234 struct bpf_prog *fp;
0235 u32 pages;
0236
0237 size = round_up(size, PAGE_SIZE);
0238 pages = size / PAGE_SIZE;
0239 if (pages <= fp_old->pages)
0240 return fp_old;
0241
0242 fp = __vmalloc(size, gfp_flags);
0243 if (fp) {
0244 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
0245 fp->pages = pages;
0246 fp->aux->prog = fp;
0247
0248
0249
0250
0251 fp_old->aux = NULL;
0252 fp_old->stats = NULL;
0253 fp_old->active = NULL;
0254 __bpf_prog_free(fp_old);
0255 }
0256
0257 return fp;
0258 }
0259
0260 void __bpf_prog_free(struct bpf_prog *fp)
0261 {
0262 if (fp->aux) {
0263 mutex_destroy(&fp->aux->used_maps_mutex);
0264 mutex_destroy(&fp->aux->dst_mutex);
0265 kfree(fp->aux->poke_tab);
0266 kfree(fp->aux);
0267 }
0268 free_percpu(fp->stats);
0269 free_percpu(fp->active);
0270 vfree(fp);
0271 }
0272
0273 int bpf_prog_calc_tag(struct bpf_prog *fp)
0274 {
0275 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
0276 u32 raw_size = bpf_prog_tag_scratch_size(fp);
0277 u32 digest[SHA1_DIGEST_WORDS];
0278 u32 ws[SHA1_WORKSPACE_WORDS];
0279 u32 i, bsize, psize, blocks;
0280 struct bpf_insn *dst;
0281 bool was_ld_map;
0282 u8 *raw, *todo;
0283 __be32 *result;
0284 __be64 *bits;
0285
0286 raw = vmalloc(raw_size);
0287 if (!raw)
0288 return -ENOMEM;
0289
0290 sha1_init(digest);
0291 memset(ws, 0, sizeof(ws));
0292
0293
0294
0295
0296 dst = (void *)raw;
0297 for (i = 0, was_ld_map = false; i < fp->len; i++) {
0298 dst[i] = fp->insnsi[i];
0299 if (!was_ld_map &&
0300 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
0301 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
0302 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
0303 was_ld_map = true;
0304 dst[i].imm = 0;
0305 } else if (was_ld_map &&
0306 dst[i].code == 0 &&
0307 dst[i].dst_reg == 0 &&
0308 dst[i].src_reg == 0 &&
0309 dst[i].off == 0) {
0310 was_ld_map = false;
0311 dst[i].imm = 0;
0312 } else {
0313 was_ld_map = false;
0314 }
0315 }
0316
0317 psize = bpf_prog_insn_size(fp);
0318 memset(&raw[psize], 0, raw_size - psize);
0319 raw[psize++] = 0x80;
0320
0321 bsize = round_up(psize, SHA1_BLOCK_SIZE);
0322 blocks = bsize / SHA1_BLOCK_SIZE;
0323 todo = raw;
0324 if (bsize - psize >= sizeof(__be64)) {
0325 bits = (__be64 *)(todo + bsize - sizeof(__be64));
0326 } else {
0327 bits = (__be64 *)(todo + bsize + bits_offset);
0328 blocks++;
0329 }
0330 *bits = cpu_to_be64((psize - 1) << 3);
0331
0332 while (blocks--) {
0333 sha1_transform(digest, todo, ws);
0334 todo += SHA1_BLOCK_SIZE;
0335 }
0336
0337 result = (__force __be32 *)digest;
0338 for (i = 0; i < SHA1_DIGEST_WORDS; i++)
0339 result[i] = cpu_to_be32(digest[i]);
0340 memcpy(fp->tag, result, sizeof(fp->tag));
0341
0342 vfree(raw);
0343 return 0;
0344 }
0345
0346 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
0347 s32 end_new, s32 curr, const bool probe_pass)
0348 {
0349 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
0350 s32 delta = end_new - end_old;
0351 s64 imm = insn->imm;
0352
0353 if (curr < pos && curr + imm + 1 >= end_old)
0354 imm += delta;
0355 else if (curr >= end_new && curr + imm + 1 < end_new)
0356 imm -= delta;
0357 if (imm < imm_min || imm > imm_max)
0358 return -ERANGE;
0359 if (!probe_pass)
0360 insn->imm = imm;
0361 return 0;
0362 }
0363
0364 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
0365 s32 end_new, s32 curr, const bool probe_pass)
0366 {
0367 const s32 off_min = S16_MIN, off_max = S16_MAX;
0368 s32 delta = end_new - end_old;
0369 s32 off = insn->off;
0370
0371 if (curr < pos && curr + off + 1 >= end_old)
0372 off += delta;
0373 else if (curr >= end_new && curr + off + 1 < end_new)
0374 off -= delta;
0375 if (off < off_min || off > off_max)
0376 return -ERANGE;
0377 if (!probe_pass)
0378 insn->off = off;
0379 return 0;
0380 }
0381
0382 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
0383 s32 end_new, const bool probe_pass)
0384 {
0385 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
0386 struct bpf_insn *insn = prog->insnsi;
0387 int ret = 0;
0388
0389 for (i = 0; i < insn_cnt; i++, insn++) {
0390 u8 code;
0391
0392
0393
0394
0395
0396 if (probe_pass && i == pos) {
0397 i = end_new;
0398 insn = prog->insnsi + end_old;
0399 }
0400 if (bpf_pseudo_func(insn)) {
0401 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
0402 end_new, i, probe_pass);
0403 if (ret)
0404 return ret;
0405 continue;
0406 }
0407 code = insn->code;
0408 if ((BPF_CLASS(code) != BPF_JMP &&
0409 BPF_CLASS(code) != BPF_JMP32) ||
0410 BPF_OP(code) == BPF_EXIT)
0411 continue;
0412
0413 if (BPF_OP(code) == BPF_CALL) {
0414 if (insn->src_reg != BPF_PSEUDO_CALL)
0415 continue;
0416 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
0417 end_new, i, probe_pass);
0418 } else {
0419 ret = bpf_adj_delta_to_off(insn, pos, end_old,
0420 end_new, i, probe_pass);
0421 }
0422 if (ret)
0423 break;
0424 }
0425
0426 return ret;
0427 }
0428
0429 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
0430 {
0431 struct bpf_line_info *linfo;
0432 u32 i, nr_linfo;
0433
0434 nr_linfo = prog->aux->nr_linfo;
0435 if (!nr_linfo || !delta)
0436 return;
0437
0438 linfo = prog->aux->linfo;
0439
0440 for (i = 0; i < nr_linfo; i++)
0441 if (off < linfo[i].insn_off)
0442 break;
0443
0444
0445 for (; i < nr_linfo; i++)
0446 linfo[i].insn_off += delta;
0447 }
0448
0449 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
0450 const struct bpf_insn *patch, u32 len)
0451 {
0452 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
0453 const u32 cnt_max = S16_MAX;
0454 struct bpf_prog *prog_adj;
0455 int err;
0456
0457
0458 if (insn_delta == 0) {
0459 memcpy(prog->insnsi + off, patch, sizeof(*patch));
0460 return prog;
0461 }
0462
0463 insn_adj_cnt = prog->len + insn_delta;
0464
0465
0466
0467
0468
0469
0470 if (insn_adj_cnt > cnt_max &&
0471 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
0472 return ERR_PTR(err);
0473
0474
0475
0476
0477
0478 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
0479 GFP_USER);
0480 if (!prog_adj)
0481 return ERR_PTR(-ENOMEM);
0482
0483 prog_adj->len = insn_adj_cnt;
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493 insn_rest = insn_adj_cnt - off - len;
0494
0495 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
0496 sizeof(*patch) * insn_rest);
0497 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
0498
0499
0500
0501
0502
0503 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
0504
0505 bpf_adj_linfo(prog_adj, off, insn_delta);
0506
0507 return prog_adj;
0508 }
0509
0510 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
0511 {
0512
0513
0514
0515 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
0516 sizeof(struct bpf_insn) * (prog->len - off - cnt));
0517 prog->len -= cnt;
0518
0519 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
0520 }
0521
0522 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
0523 {
0524 int i;
0525
0526 for (i = 0; i < fp->aux->func_cnt; i++)
0527 bpf_prog_kallsyms_del(fp->aux->func[i]);
0528 }
0529
0530 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
0531 {
0532 bpf_prog_kallsyms_del_subprogs(fp);
0533 bpf_prog_kallsyms_del(fp);
0534 }
0535
0536 #ifdef CONFIG_BPF_JIT
0537
0538 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
0539 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
0540 int bpf_jit_harden __read_mostly;
0541 long bpf_jit_limit __read_mostly;
0542 long bpf_jit_limit_max __read_mostly;
0543
0544 static void
0545 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
0546 {
0547 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
0548
0549 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
0550 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len;
0551 }
0552
0553 static void
0554 bpf_prog_ksym_set_name(struct bpf_prog *prog)
0555 {
0556 char *sym = prog->aux->ksym.name;
0557 const char *end = sym + KSYM_NAME_LEN;
0558 const struct btf_type *type;
0559 const char *func_name;
0560
0561 BUILD_BUG_ON(sizeof("bpf_prog_") +
0562 sizeof(prog->tag) * 2 +
0563
0564
0565
0566
0567
0568
0569
0570 sizeof(prog->aux->name) > KSYM_NAME_LEN);
0571
0572 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
0573 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
0574
0575
0576 if (prog->aux->func_info_cnt) {
0577 type = btf_type_by_id(prog->aux->btf,
0578 prog->aux->func_info[prog->aux->func_idx].type_id);
0579 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
0580 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
0581 return;
0582 }
0583
0584 if (prog->aux->name[0])
0585 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
0586 else
0587 *sym = 0;
0588 }
0589
0590 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
0591 {
0592 return container_of(n, struct bpf_ksym, tnode)->start;
0593 }
0594
0595 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
0596 struct latch_tree_node *b)
0597 {
0598 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
0599 }
0600
0601 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
0602 {
0603 unsigned long val = (unsigned long)key;
0604 const struct bpf_ksym *ksym;
0605
0606 ksym = container_of(n, struct bpf_ksym, tnode);
0607
0608 if (val < ksym->start)
0609 return -1;
0610 if (val >= ksym->end)
0611 return 1;
0612
0613 return 0;
0614 }
0615
0616 static const struct latch_tree_ops bpf_tree_ops = {
0617 .less = bpf_tree_less,
0618 .comp = bpf_tree_comp,
0619 };
0620
0621 static DEFINE_SPINLOCK(bpf_lock);
0622 static LIST_HEAD(bpf_kallsyms);
0623 static struct latch_tree_root bpf_tree __cacheline_aligned;
0624
0625 void bpf_ksym_add(struct bpf_ksym *ksym)
0626 {
0627 spin_lock_bh(&bpf_lock);
0628 WARN_ON_ONCE(!list_empty(&ksym->lnode));
0629 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
0630 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
0631 spin_unlock_bh(&bpf_lock);
0632 }
0633
0634 static void __bpf_ksym_del(struct bpf_ksym *ksym)
0635 {
0636 if (list_empty(&ksym->lnode))
0637 return;
0638
0639 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
0640 list_del_rcu(&ksym->lnode);
0641 }
0642
0643 void bpf_ksym_del(struct bpf_ksym *ksym)
0644 {
0645 spin_lock_bh(&bpf_lock);
0646 __bpf_ksym_del(ksym);
0647 spin_unlock_bh(&bpf_lock);
0648 }
0649
0650 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
0651 {
0652 return fp->jited && !bpf_prog_was_classic(fp);
0653 }
0654
0655 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
0656 {
0657 if (!bpf_prog_kallsyms_candidate(fp) ||
0658 !bpf_capable())
0659 return;
0660
0661 bpf_prog_ksym_set_addr(fp);
0662 bpf_prog_ksym_set_name(fp);
0663 fp->aux->ksym.prog = true;
0664
0665 bpf_ksym_add(&fp->aux->ksym);
0666 }
0667
0668 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
0669 {
0670 if (!bpf_prog_kallsyms_candidate(fp))
0671 return;
0672
0673 bpf_ksym_del(&fp->aux->ksym);
0674 }
0675
0676 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
0677 {
0678 struct latch_tree_node *n;
0679
0680 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
0681 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
0682 }
0683
0684 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
0685 unsigned long *off, char *sym)
0686 {
0687 struct bpf_ksym *ksym;
0688 char *ret = NULL;
0689
0690 rcu_read_lock();
0691 ksym = bpf_ksym_find(addr);
0692 if (ksym) {
0693 unsigned long symbol_start = ksym->start;
0694 unsigned long symbol_end = ksym->end;
0695
0696 strncpy(sym, ksym->name, KSYM_NAME_LEN);
0697
0698 ret = sym;
0699 if (size)
0700 *size = symbol_end - symbol_start;
0701 if (off)
0702 *off = addr - symbol_start;
0703 }
0704 rcu_read_unlock();
0705
0706 return ret;
0707 }
0708
0709 bool is_bpf_text_address(unsigned long addr)
0710 {
0711 bool ret;
0712
0713 rcu_read_lock();
0714 ret = bpf_ksym_find(addr) != NULL;
0715 rcu_read_unlock();
0716
0717 return ret;
0718 }
0719
0720 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
0721 {
0722 struct bpf_ksym *ksym = bpf_ksym_find(addr);
0723
0724 return ksym && ksym->prog ?
0725 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
0726 NULL;
0727 }
0728
0729 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
0730 {
0731 const struct exception_table_entry *e = NULL;
0732 struct bpf_prog *prog;
0733
0734 rcu_read_lock();
0735 prog = bpf_prog_ksym_find(addr);
0736 if (!prog)
0737 goto out;
0738 if (!prog->aux->num_exentries)
0739 goto out;
0740
0741 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
0742 out:
0743 rcu_read_unlock();
0744 return e;
0745 }
0746
0747 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
0748 char *sym)
0749 {
0750 struct bpf_ksym *ksym;
0751 unsigned int it = 0;
0752 int ret = -ERANGE;
0753
0754 if (!bpf_jit_kallsyms_enabled())
0755 return ret;
0756
0757 rcu_read_lock();
0758 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
0759 if (it++ != symnum)
0760 continue;
0761
0762 strncpy(sym, ksym->name, KSYM_NAME_LEN);
0763
0764 *value = ksym->start;
0765 *type = BPF_SYM_ELF_TYPE;
0766
0767 ret = 0;
0768 break;
0769 }
0770 rcu_read_unlock();
0771
0772 return ret;
0773 }
0774
0775 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
0776 struct bpf_jit_poke_descriptor *poke)
0777 {
0778 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
0779 static const u32 poke_tab_max = 1024;
0780 u32 slot = prog->aux->size_poke_tab;
0781 u32 size = slot + 1;
0782
0783 if (size > poke_tab_max)
0784 return -ENOSPC;
0785 if (poke->tailcall_target || poke->tailcall_target_stable ||
0786 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
0787 return -EINVAL;
0788
0789 switch (poke->reason) {
0790 case BPF_POKE_REASON_TAIL_CALL:
0791 if (!poke->tail_call.map)
0792 return -EINVAL;
0793 break;
0794 default:
0795 return -EINVAL;
0796 }
0797
0798 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
0799 if (!tab)
0800 return -ENOMEM;
0801
0802 memcpy(&tab[slot], poke, sizeof(*poke));
0803 prog->aux->size_poke_tab = size;
0804 prog->aux->poke_tab = tab;
0805
0806 return slot;
0807 }
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818 #define BPF_PROG_CHUNK_SHIFT 6
0819 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
0820 #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
0821
0822 struct bpf_prog_pack {
0823 struct list_head list;
0824 void *ptr;
0825 unsigned long bitmap[];
0826 };
0827
0828 #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
0829
0830 static DEFINE_MUTEX(pack_mutex);
0831 static LIST_HEAD(pack_list);
0832
0833
0834
0835
0836 #ifdef PMD_SIZE
0837 #define BPF_PROG_PACK_SIZE (PMD_SIZE * num_possible_nodes())
0838 #else
0839 #define BPF_PROG_PACK_SIZE PAGE_SIZE
0840 #endif
0841
0842 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
0843
0844 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
0845 {
0846 struct bpf_prog_pack *pack;
0847
0848 pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
0849 GFP_KERNEL);
0850 if (!pack)
0851 return NULL;
0852 pack->ptr = module_alloc(BPF_PROG_PACK_SIZE);
0853 if (!pack->ptr) {
0854 kfree(pack);
0855 return NULL;
0856 }
0857 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
0858 bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
0859 list_add_tail(&pack->list, &pack_list);
0860
0861 set_vm_flush_reset_perms(pack->ptr);
0862 set_memory_ro((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
0863 set_memory_x((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
0864 return pack;
0865 }
0866
0867 static void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
0868 {
0869 unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
0870 struct bpf_prog_pack *pack;
0871 unsigned long pos;
0872 void *ptr = NULL;
0873
0874 mutex_lock(&pack_mutex);
0875 if (size > BPF_PROG_PACK_SIZE) {
0876 size = round_up(size, PAGE_SIZE);
0877 ptr = module_alloc(size);
0878 if (ptr) {
0879 bpf_fill_ill_insns(ptr, size);
0880 set_vm_flush_reset_perms(ptr);
0881 set_memory_ro((unsigned long)ptr, size / PAGE_SIZE);
0882 set_memory_x((unsigned long)ptr, size / PAGE_SIZE);
0883 }
0884 goto out;
0885 }
0886 list_for_each_entry(pack, &pack_list, list) {
0887 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
0888 nbits, 0);
0889 if (pos < BPF_PROG_CHUNK_COUNT)
0890 goto found_free_area;
0891 }
0892
0893 pack = alloc_new_pack(bpf_fill_ill_insns);
0894 if (!pack)
0895 goto out;
0896
0897 pos = 0;
0898
0899 found_free_area:
0900 bitmap_set(pack->bitmap, pos, nbits);
0901 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
0902
0903 out:
0904 mutex_unlock(&pack_mutex);
0905 return ptr;
0906 }
0907
0908 static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
0909 {
0910 struct bpf_prog_pack *pack = NULL, *tmp;
0911 unsigned int nbits;
0912 unsigned long pos;
0913
0914 mutex_lock(&pack_mutex);
0915 if (hdr->size > BPF_PROG_PACK_SIZE) {
0916 module_memfree(hdr);
0917 goto out;
0918 }
0919
0920 list_for_each_entry(tmp, &pack_list, list) {
0921 if ((void *)hdr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > (void *)hdr) {
0922 pack = tmp;
0923 break;
0924 }
0925 }
0926
0927 if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
0928 goto out;
0929
0930 nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
0931 pos = ((unsigned long)hdr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
0932
0933 WARN_ONCE(bpf_arch_text_invalidate(hdr, hdr->size),
0934 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
0935
0936 bitmap_clear(pack->bitmap, pos, nbits);
0937 if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
0938 BPF_PROG_CHUNK_COUNT, 0) == 0) {
0939 list_del(&pack->list);
0940 module_memfree(pack->ptr);
0941 kfree(pack);
0942 }
0943 out:
0944 mutex_unlock(&pack_mutex);
0945 }
0946
0947 static atomic_long_t bpf_jit_current;
0948
0949
0950
0951
0952
0953 u64 __weak bpf_jit_alloc_exec_limit(void)
0954 {
0955 #if defined(MODULES_VADDR)
0956 return MODULES_END - MODULES_VADDR;
0957 #else
0958 return VMALLOC_END - VMALLOC_START;
0959 #endif
0960 }
0961
0962 static int __init bpf_jit_charge_init(void)
0963 {
0964
0965 bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
0966 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
0967 PAGE_SIZE), LONG_MAX);
0968 return 0;
0969 }
0970 pure_initcall(bpf_jit_charge_init);
0971
0972 int bpf_jit_charge_modmem(u32 size)
0973 {
0974 if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
0975 if (!bpf_capable()) {
0976 atomic_long_sub(size, &bpf_jit_current);
0977 return -EPERM;
0978 }
0979 }
0980
0981 return 0;
0982 }
0983
0984 void bpf_jit_uncharge_modmem(u32 size)
0985 {
0986 atomic_long_sub(size, &bpf_jit_current);
0987 }
0988
0989 void *__weak bpf_jit_alloc_exec(unsigned long size)
0990 {
0991 return module_alloc(size);
0992 }
0993
0994 void __weak bpf_jit_free_exec(void *addr)
0995 {
0996 module_memfree(addr);
0997 }
0998
0999 struct bpf_binary_header *
1000 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1001 unsigned int alignment,
1002 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1003 {
1004 struct bpf_binary_header *hdr;
1005 u32 size, hole, start;
1006
1007 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1008 alignment > BPF_IMAGE_ALIGNMENT);
1009
1010
1011
1012
1013
1014 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1015
1016 if (bpf_jit_charge_modmem(size))
1017 return NULL;
1018 hdr = bpf_jit_alloc_exec(size);
1019 if (!hdr) {
1020 bpf_jit_uncharge_modmem(size);
1021 return NULL;
1022 }
1023
1024
1025 bpf_fill_ill_insns(hdr, size);
1026
1027 hdr->size = size;
1028 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1029 PAGE_SIZE - sizeof(*hdr));
1030 start = (get_random_int() % hole) & ~(alignment - 1);
1031
1032
1033 *image_ptr = &hdr->image[start];
1034
1035 return hdr;
1036 }
1037
1038 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1039 {
1040 u32 size = hdr->size;
1041
1042 bpf_jit_free_exec(hdr);
1043 bpf_jit_uncharge_modmem(size);
1044 }
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054 struct bpf_binary_header *
1055 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1056 unsigned int alignment,
1057 struct bpf_binary_header **rw_header,
1058 u8 **rw_image,
1059 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1060 {
1061 struct bpf_binary_header *ro_header;
1062 u32 size, hole, start;
1063
1064 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1065 alignment > BPF_IMAGE_ALIGNMENT);
1066
1067
1068 size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1069
1070 if (bpf_jit_charge_modmem(size))
1071 return NULL;
1072 ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1073 if (!ro_header) {
1074 bpf_jit_uncharge_modmem(size);
1075 return NULL;
1076 }
1077
1078 *rw_header = kvmalloc(size, GFP_KERNEL);
1079 if (!*rw_header) {
1080 bpf_arch_text_copy(&ro_header->size, &size, sizeof(size));
1081 bpf_prog_pack_free(ro_header);
1082 bpf_jit_uncharge_modmem(size);
1083 return NULL;
1084 }
1085
1086
1087 bpf_fill_ill_insns(*rw_header, size);
1088 (*rw_header)->size = size;
1089
1090 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1091 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1092 start = (get_random_int() % hole) & ~(alignment - 1);
1093
1094 *image_ptr = &ro_header->image[start];
1095 *rw_image = &(*rw_header)->image[start];
1096
1097 return ro_header;
1098 }
1099
1100
1101 int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
1102 struct bpf_binary_header *ro_header,
1103 struct bpf_binary_header *rw_header)
1104 {
1105 void *ptr;
1106
1107 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1108
1109 kvfree(rw_header);
1110
1111 if (IS_ERR(ptr)) {
1112 bpf_prog_pack_free(ro_header);
1113 return PTR_ERR(ptr);
1114 }
1115 return 0;
1116 }
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1129 struct bpf_binary_header *rw_header)
1130 {
1131 u32 size = ro_header->size;
1132
1133 bpf_prog_pack_free(ro_header);
1134 kvfree(rw_header);
1135 bpf_jit_uncharge_modmem(size);
1136 }
1137
1138 struct bpf_binary_header *
1139 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
1140 {
1141 unsigned long real_start = (unsigned long)fp->bpf_func;
1142 unsigned long addr;
1143
1144 addr = real_start & BPF_PROG_CHUNK_MASK;
1145 return (void *)addr;
1146 }
1147
1148 static inline struct bpf_binary_header *
1149 bpf_jit_binary_hdr(const struct bpf_prog *fp)
1150 {
1151 unsigned long real_start = (unsigned long)fp->bpf_func;
1152 unsigned long addr;
1153
1154 addr = real_start & PAGE_MASK;
1155 return (void *)addr;
1156 }
1157
1158
1159
1160
1161
1162 void __weak bpf_jit_free(struct bpf_prog *fp)
1163 {
1164 if (fp->jited) {
1165 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1166
1167 bpf_jit_binary_free(hdr);
1168 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1169 }
1170
1171 bpf_prog_unlock_free(fp);
1172 }
1173
1174 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1175 const struct bpf_insn *insn, bool extra_pass,
1176 u64 *func_addr, bool *func_addr_fixed)
1177 {
1178 s16 off = insn->off;
1179 s32 imm = insn->imm;
1180 u8 *addr;
1181
1182 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1183 if (!*func_addr_fixed) {
1184
1185
1186
1187
1188 if (!extra_pass)
1189 addr = NULL;
1190 else if (prog->aux->func &&
1191 off >= 0 && off < prog->aux->func_cnt)
1192 addr = (u8 *)prog->aux->func[off]->bpf_func;
1193 else
1194 return -EINVAL;
1195 } else {
1196
1197
1198
1199
1200
1201 addr = (u8 *)__bpf_call_base + imm;
1202 }
1203
1204 *func_addr = (unsigned long)addr;
1205 return 0;
1206 }
1207
1208 static int bpf_jit_blind_insn(const struct bpf_insn *from,
1209 const struct bpf_insn *aux,
1210 struct bpf_insn *to_buff,
1211 bool emit_zext)
1212 {
1213 struct bpf_insn *to = to_buff;
1214 u32 imm_rnd = get_random_int();
1215 s16 off;
1216
1217 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
1218 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1238 goto out;
1239
1240 if (from->imm == 0 &&
1241 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
1242 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1243 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1244 goto out;
1245 }
1246
1247 switch (from->code) {
1248 case BPF_ALU | BPF_ADD | BPF_K:
1249 case BPF_ALU | BPF_SUB | BPF_K:
1250 case BPF_ALU | BPF_AND | BPF_K:
1251 case BPF_ALU | BPF_OR | BPF_K:
1252 case BPF_ALU | BPF_XOR | BPF_K:
1253 case BPF_ALU | BPF_MUL | BPF_K:
1254 case BPF_ALU | BPF_MOV | BPF_K:
1255 case BPF_ALU | BPF_DIV | BPF_K:
1256 case BPF_ALU | BPF_MOD | BPF_K:
1257 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1258 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1259 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1260 break;
1261
1262 case BPF_ALU64 | BPF_ADD | BPF_K:
1263 case BPF_ALU64 | BPF_SUB | BPF_K:
1264 case BPF_ALU64 | BPF_AND | BPF_K:
1265 case BPF_ALU64 | BPF_OR | BPF_K:
1266 case BPF_ALU64 | BPF_XOR | BPF_K:
1267 case BPF_ALU64 | BPF_MUL | BPF_K:
1268 case BPF_ALU64 | BPF_MOV | BPF_K:
1269 case BPF_ALU64 | BPF_DIV | BPF_K:
1270 case BPF_ALU64 | BPF_MOD | BPF_K:
1271 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1272 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1273 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1274 break;
1275
1276 case BPF_JMP | BPF_JEQ | BPF_K:
1277 case BPF_JMP | BPF_JNE | BPF_K:
1278 case BPF_JMP | BPF_JGT | BPF_K:
1279 case BPF_JMP | BPF_JLT | BPF_K:
1280 case BPF_JMP | BPF_JGE | BPF_K:
1281 case BPF_JMP | BPF_JLE | BPF_K:
1282 case BPF_JMP | BPF_JSGT | BPF_K:
1283 case BPF_JMP | BPF_JSLT | BPF_K:
1284 case BPF_JMP | BPF_JSGE | BPF_K:
1285 case BPF_JMP | BPF_JSLE | BPF_K:
1286 case BPF_JMP | BPF_JSET | BPF_K:
1287
1288 off = from->off;
1289 if (off < 0)
1290 off -= 2;
1291 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1292 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1293 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1294 break;
1295
1296 case BPF_JMP32 | BPF_JEQ | BPF_K:
1297 case BPF_JMP32 | BPF_JNE | BPF_K:
1298 case BPF_JMP32 | BPF_JGT | BPF_K:
1299 case BPF_JMP32 | BPF_JLT | BPF_K:
1300 case BPF_JMP32 | BPF_JGE | BPF_K:
1301 case BPF_JMP32 | BPF_JLE | BPF_K:
1302 case BPF_JMP32 | BPF_JSGT | BPF_K:
1303 case BPF_JMP32 | BPF_JSLT | BPF_K:
1304 case BPF_JMP32 | BPF_JSGE | BPF_K:
1305 case BPF_JMP32 | BPF_JSLE | BPF_K:
1306 case BPF_JMP32 | BPF_JSET | BPF_K:
1307
1308 off = from->off;
1309 if (off < 0)
1310 off -= 2;
1311 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1312 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1313 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1314 off);
1315 break;
1316
1317 case BPF_LD | BPF_IMM | BPF_DW:
1318 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1319 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1320 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1321 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1322 break;
1323 case 0:
1324 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1325 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1326 if (emit_zext)
1327 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1328 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1329 break;
1330
1331 case BPF_ST | BPF_MEM | BPF_DW:
1332 case BPF_ST | BPF_MEM | BPF_W:
1333 case BPF_ST | BPF_MEM | BPF_H:
1334 case BPF_ST | BPF_MEM | BPF_B:
1335 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1336 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1337 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1338 break;
1339 }
1340 out:
1341 return to - to_buff;
1342 }
1343
1344 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1345 gfp_t gfp_extra_flags)
1346 {
1347 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1348 struct bpf_prog *fp;
1349
1350 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1351 if (fp != NULL) {
1352
1353
1354
1355
1356 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1357 }
1358
1359 return fp;
1360 }
1361
1362 static void bpf_prog_clone_free(struct bpf_prog *fp)
1363 {
1364
1365
1366
1367
1368
1369
1370
1371 fp->aux = NULL;
1372 fp->stats = NULL;
1373 fp->active = NULL;
1374 __bpf_prog_free(fp);
1375 }
1376
1377 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1378 {
1379
1380
1381
1382 fp->aux->prog = fp;
1383 bpf_prog_clone_free(fp_other);
1384 }
1385
1386 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1387 {
1388 struct bpf_insn insn_buff[16], aux[2];
1389 struct bpf_prog *clone, *tmp;
1390 int insn_delta, insn_cnt;
1391 struct bpf_insn *insn;
1392 int i, rewritten;
1393
1394 if (!prog->blinding_requested || prog->blinded)
1395 return prog;
1396
1397 clone = bpf_prog_clone_create(prog, GFP_USER);
1398 if (!clone)
1399 return ERR_PTR(-ENOMEM);
1400
1401 insn_cnt = clone->len;
1402 insn = clone->insnsi;
1403
1404 for (i = 0; i < insn_cnt; i++, insn++) {
1405 if (bpf_pseudo_func(insn)) {
1406
1407
1408
1409
1410 insn++;
1411 i++;
1412 continue;
1413 }
1414
1415
1416
1417
1418
1419 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1420 insn[1].code == 0)
1421 memcpy(aux, insn, sizeof(aux));
1422
1423 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1424 clone->aux->verifier_zext);
1425 if (!rewritten)
1426 continue;
1427
1428 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1429 if (IS_ERR(tmp)) {
1430
1431
1432
1433
1434 bpf_jit_prog_release_other(prog, clone);
1435 return tmp;
1436 }
1437
1438 clone = tmp;
1439 insn_delta = rewritten - 1;
1440
1441
1442 insn = clone->insnsi + i + insn_delta;
1443 insn_cnt += insn_delta;
1444 i += insn_delta;
1445 }
1446
1447 clone->blinded = 1;
1448 return clone;
1449 }
1450 #endif
1451
1452
1453
1454
1455
1456
1457
1458 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1459 {
1460 return 0;
1461 }
1462 EXPORT_SYMBOL_GPL(__bpf_call_base);
1463
1464
1465 #define BPF_INSN_MAP(INSN_2, INSN_3) \
1466 \
1467 \
1468 INSN_3(ALU, ADD, X), \
1469 INSN_3(ALU, SUB, X), \
1470 INSN_3(ALU, AND, X), \
1471 INSN_3(ALU, OR, X), \
1472 INSN_3(ALU, LSH, X), \
1473 INSN_3(ALU, RSH, X), \
1474 INSN_3(ALU, XOR, X), \
1475 INSN_3(ALU, MUL, X), \
1476 INSN_3(ALU, MOV, X), \
1477 INSN_3(ALU, ARSH, X), \
1478 INSN_3(ALU, DIV, X), \
1479 INSN_3(ALU, MOD, X), \
1480 INSN_2(ALU, NEG), \
1481 INSN_3(ALU, END, TO_BE), \
1482 INSN_3(ALU, END, TO_LE), \
1483 \
1484 INSN_3(ALU, ADD, K), \
1485 INSN_3(ALU, SUB, K), \
1486 INSN_3(ALU, AND, K), \
1487 INSN_3(ALU, OR, K), \
1488 INSN_3(ALU, LSH, K), \
1489 INSN_3(ALU, RSH, K), \
1490 INSN_3(ALU, XOR, K), \
1491 INSN_3(ALU, MUL, K), \
1492 INSN_3(ALU, MOV, K), \
1493 INSN_3(ALU, ARSH, K), \
1494 INSN_3(ALU, DIV, K), \
1495 INSN_3(ALU, MOD, K), \
1496 \
1497 \
1498 INSN_3(ALU64, ADD, X), \
1499 INSN_3(ALU64, SUB, X), \
1500 INSN_3(ALU64, AND, X), \
1501 INSN_3(ALU64, OR, X), \
1502 INSN_3(ALU64, LSH, X), \
1503 INSN_3(ALU64, RSH, X), \
1504 INSN_3(ALU64, XOR, X), \
1505 INSN_3(ALU64, MUL, X), \
1506 INSN_3(ALU64, MOV, X), \
1507 INSN_3(ALU64, ARSH, X), \
1508 INSN_3(ALU64, DIV, X), \
1509 INSN_3(ALU64, MOD, X), \
1510 INSN_2(ALU64, NEG), \
1511 \
1512 INSN_3(ALU64, ADD, K), \
1513 INSN_3(ALU64, SUB, K), \
1514 INSN_3(ALU64, AND, K), \
1515 INSN_3(ALU64, OR, K), \
1516 INSN_3(ALU64, LSH, K), \
1517 INSN_3(ALU64, RSH, K), \
1518 INSN_3(ALU64, XOR, K), \
1519 INSN_3(ALU64, MUL, K), \
1520 INSN_3(ALU64, MOV, K), \
1521 INSN_3(ALU64, ARSH, K), \
1522 INSN_3(ALU64, DIV, K), \
1523 INSN_3(ALU64, MOD, K), \
1524 \
1525 INSN_2(JMP, CALL), \
1526 \
1527 INSN_2(JMP, EXIT), \
1528 \
1529 \
1530 INSN_3(JMP32, JEQ, X), \
1531 INSN_3(JMP32, JNE, X), \
1532 INSN_3(JMP32, JGT, X), \
1533 INSN_3(JMP32, JLT, X), \
1534 INSN_3(JMP32, JGE, X), \
1535 INSN_3(JMP32, JLE, X), \
1536 INSN_3(JMP32, JSGT, X), \
1537 INSN_3(JMP32, JSLT, X), \
1538 INSN_3(JMP32, JSGE, X), \
1539 INSN_3(JMP32, JSLE, X), \
1540 INSN_3(JMP32, JSET, X), \
1541 \
1542 INSN_3(JMP32, JEQ, K), \
1543 INSN_3(JMP32, JNE, K), \
1544 INSN_3(JMP32, JGT, K), \
1545 INSN_3(JMP32, JLT, K), \
1546 INSN_3(JMP32, JGE, K), \
1547 INSN_3(JMP32, JLE, K), \
1548 INSN_3(JMP32, JSGT, K), \
1549 INSN_3(JMP32, JSLT, K), \
1550 INSN_3(JMP32, JSGE, K), \
1551 INSN_3(JMP32, JSLE, K), \
1552 INSN_3(JMP32, JSET, K), \
1553 \
1554 \
1555 INSN_3(JMP, JEQ, X), \
1556 INSN_3(JMP, JNE, X), \
1557 INSN_3(JMP, JGT, X), \
1558 INSN_3(JMP, JLT, X), \
1559 INSN_3(JMP, JGE, X), \
1560 INSN_3(JMP, JLE, X), \
1561 INSN_3(JMP, JSGT, X), \
1562 INSN_3(JMP, JSLT, X), \
1563 INSN_3(JMP, JSGE, X), \
1564 INSN_3(JMP, JSLE, X), \
1565 INSN_3(JMP, JSET, X), \
1566 \
1567 INSN_3(JMP, JEQ, K), \
1568 INSN_3(JMP, JNE, K), \
1569 INSN_3(JMP, JGT, K), \
1570 INSN_3(JMP, JLT, K), \
1571 INSN_3(JMP, JGE, K), \
1572 INSN_3(JMP, JLE, K), \
1573 INSN_3(JMP, JSGT, K), \
1574 INSN_3(JMP, JSLT, K), \
1575 INSN_3(JMP, JSGE, K), \
1576 INSN_3(JMP, JSLE, K), \
1577 INSN_3(JMP, JSET, K), \
1578 INSN_2(JMP, JA), \
1579 \
1580 \
1581 INSN_3(STX, MEM, B), \
1582 INSN_3(STX, MEM, H), \
1583 INSN_3(STX, MEM, W), \
1584 INSN_3(STX, MEM, DW), \
1585 INSN_3(STX, ATOMIC, W), \
1586 INSN_3(STX, ATOMIC, DW), \
1587 \
1588 INSN_3(ST, MEM, B), \
1589 INSN_3(ST, MEM, H), \
1590 INSN_3(ST, MEM, W), \
1591 INSN_3(ST, MEM, DW), \
1592 \
1593 \
1594 INSN_3(LDX, MEM, B), \
1595 INSN_3(LDX, MEM, H), \
1596 INSN_3(LDX, MEM, W), \
1597 INSN_3(LDX, MEM, DW), \
1598 \
1599 INSN_3(LD, IMM, DW)
1600
1601 bool bpf_opcode_in_insntable(u8 code)
1602 {
1603 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1604 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1605 static const bool public_insntable[256] = {
1606 [0 ... 255] = false,
1607
1608 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1609
1610 [BPF_LD | BPF_ABS | BPF_B] = true,
1611 [BPF_LD | BPF_ABS | BPF_H] = true,
1612 [BPF_LD | BPF_ABS | BPF_W] = true,
1613 [BPF_LD | BPF_IND | BPF_B] = true,
1614 [BPF_LD | BPF_IND | BPF_H] = true,
1615 [BPF_LD | BPF_IND | BPF_W] = true,
1616 };
1617 #undef BPF_INSN_3_TBL
1618 #undef BPF_INSN_2_TBL
1619 return public_insntable[code];
1620 }
1621
1622 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1623 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1624 {
1625 memset(dst, 0, size);
1626 return -EFAULT;
1627 }
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1639 {
1640 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1641 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1642 static const void * const jumptable[256] __annotate_jump_table = {
1643 [0 ... 255] = &&default_label,
1644
1645 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1646
1647 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1648 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1649 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
1650 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1651 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1652 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1653 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1654 };
1655 #undef BPF_INSN_3_LBL
1656 #undef BPF_INSN_2_LBL
1657 u32 tail_call_cnt = 0;
1658
1659 #define CONT ({ insn++; goto select_insn; })
1660 #define CONT_JMP ({ insn++; goto select_insn; })
1661
1662 select_insn:
1663 goto *jumptable[insn->code];
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677 #define SHT(OPCODE, OP) \
1678 ALU64_##OPCODE##_X: \
1679 DST = DST OP (SRC & 63); \
1680 CONT; \
1681 ALU_##OPCODE##_X: \
1682 DST = (u32) DST OP ((u32) SRC & 31); \
1683 CONT; \
1684 ALU64_##OPCODE##_K: \
1685 DST = DST OP IMM; \
1686 CONT; \
1687 ALU_##OPCODE##_K: \
1688 DST = (u32) DST OP (u32) IMM; \
1689 CONT;
1690
1691 #define ALU(OPCODE, OP) \
1692 ALU64_##OPCODE##_X: \
1693 DST = DST OP SRC; \
1694 CONT; \
1695 ALU_##OPCODE##_X: \
1696 DST = (u32) DST OP (u32) SRC; \
1697 CONT; \
1698 ALU64_##OPCODE##_K: \
1699 DST = DST OP IMM; \
1700 CONT; \
1701 ALU_##OPCODE##_K: \
1702 DST = (u32) DST OP (u32) IMM; \
1703 CONT;
1704 ALU(ADD, +)
1705 ALU(SUB, -)
1706 ALU(AND, &)
1707 ALU(OR, |)
1708 ALU(XOR, ^)
1709 ALU(MUL, *)
1710 SHT(LSH, <<)
1711 SHT(RSH, >>)
1712 #undef SHT
1713 #undef ALU
1714 ALU_NEG:
1715 DST = (u32) -DST;
1716 CONT;
1717 ALU64_NEG:
1718 DST = -DST;
1719 CONT;
1720 ALU_MOV_X:
1721 DST = (u32) SRC;
1722 CONT;
1723 ALU_MOV_K:
1724 DST = (u32) IMM;
1725 CONT;
1726 ALU64_MOV_X:
1727 DST = SRC;
1728 CONT;
1729 ALU64_MOV_K:
1730 DST = IMM;
1731 CONT;
1732 LD_IMM_DW:
1733 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1734 insn++;
1735 CONT;
1736 ALU_ARSH_X:
1737 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1738 CONT;
1739 ALU_ARSH_K:
1740 DST = (u64) (u32) (((s32) DST) >> IMM);
1741 CONT;
1742 ALU64_ARSH_X:
1743 (*(s64 *) &DST) >>= (SRC & 63);
1744 CONT;
1745 ALU64_ARSH_K:
1746 (*(s64 *) &DST) >>= IMM;
1747 CONT;
1748 ALU64_MOD_X:
1749 div64_u64_rem(DST, SRC, &AX);
1750 DST = AX;
1751 CONT;
1752 ALU_MOD_X:
1753 AX = (u32) DST;
1754 DST = do_div(AX, (u32) SRC);
1755 CONT;
1756 ALU64_MOD_K:
1757 div64_u64_rem(DST, IMM, &AX);
1758 DST = AX;
1759 CONT;
1760 ALU_MOD_K:
1761 AX = (u32) DST;
1762 DST = do_div(AX, (u32) IMM);
1763 CONT;
1764 ALU64_DIV_X:
1765 DST = div64_u64(DST, SRC);
1766 CONT;
1767 ALU_DIV_X:
1768 AX = (u32) DST;
1769 do_div(AX, (u32) SRC);
1770 DST = (u32) AX;
1771 CONT;
1772 ALU64_DIV_K:
1773 DST = div64_u64(DST, IMM);
1774 CONT;
1775 ALU_DIV_K:
1776 AX = (u32) DST;
1777 do_div(AX, (u32) IMM);
1778 DST = (u32) AX;
1779 CONT;
1780 ALU_END_TO_BE:
1781 switch (IMM) {
1782 case 16:
1783 DST = (__force u16) cpu_to_be16(DST);
1784 break;
1785 case 32:
1786 DST = (__force u32) cpu_to_be32(DST);
1787 break;
1788 case 64:
1789 DST = (__force u64) cpu_to_be64(DST);
1790 break;
1791 }
1792 CONT;
1793 ALU_END_TO_LE:
1794 switch (IMM) {
1795 case 16:
1796 DST = (__force u16) cpu_to_le16(DST);
1797 break;
1798 case 32:
1799 DST = (__force u32) cpu_to_le32(DST);
1800 break;
1801 case 64:
1802 DST = (__force u64) cpu_to_le64(DST);
1803 break;
1804 }
1805 CONT;
1806
1807
1808 JMP_CALL:
1809
1810
1811
1812
1813 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1814 BPF_R4, BPF_R5);
1815 CONT;
1816
1817 JMP_CALL_ARGS:
1818 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1819 BPF_R3, BPF_R4,
1820 BPF_R5,
1821 insn + insn->off + 1);
1822 CONT;
1823
1824 JMP_TAIL_CALL: {
1825 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1826 struct bpf_array *array = container_of(map, struct bpf_array, map);
1827 struct bpf_prog *prog;
1828 u32 index = BPF_R3;
1829
1830 if (unlikely(index >= array->map.max_entries))
1831 goto out;
1832
1833 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
1834 goto out;
1835
1836 tail_call_cnt++;
1837
1838 prog = READ_ONCE(array->ptrs[index]);
1839 if (!prog)
1840 goto out;
1841
1842
1843
1844
1845
1846
1847 insn = prog->insnsi;
1848 goto select_insn;
1849 out:
1850 CONT;
1851 }
1852 JMP_JA:
1853 insn += insn->off;
1854 CONT;
1855 JMP_EXIT:
1856 return BPF_R0;
1857
1858 #define COND_JMP(SIGN, OPCODE, CMP_OP) \
1859 JMP_##OPCODE##_X: \
1860 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
1861 insn += insn->off; \
1862 CONT_JMP; \
1863 } \
1864 CONT; \
1865 JMP32_##OPCODE##_X: \
1866 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
1867 insn += insn->off; \
1868 CONT_JMP; \
1869 } \
1870 CONT; \
1871 JMP_##OPCODE##_K: \
1872 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
1873 insn += insn->off; \
1874 CONT_JMP; \
1875 } \
1876 CONT; \
1877 JMP32_##OPCODE##_K: \
1878 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
1879 insn += insn->off; \
1880 CONT_JMP; \
1881 } \
1882 CONT;
1883 COND_JMP(u, JEQ, ==)
1884 COND_JMP(u, JNE, !=)
1885 COND_JMP(u, JGT, >)
1886 COND_JMP(u, JLT, <)
1887 COND_JMP(u, JGE, >=)
1888 COND_JMP(u, JLE, <=)
1889 COND_JMP(u, JSET, &)
1890 COND_JMP(s, JSGT, >)
1891 COND_JMP(s, JSLT, <)
1892 COND_JMP(s, JSGE, >=)
1893 COND_JMP(s, JSLE, <=)
1894 #undef COND_JMP
1895
1896 ST_NOSPEC:
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906 #ifdef CONFIG_X86
1907 barrier_nospec();
1908 #endif
1909 CONT;
1910 #define LDST(SIZEOP, SIZE) \
1911 STX_MEM_##SIZEOP: \
1912 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1913 CONT; \
1914 ST_MEM_##SIZEOP: \
1915 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1916 CONT; \
1917 LDX_MEM_##SIZEOP: \
1918 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1919 CONT; \
1920 LDX_PROBE_MEM_##SIZEOP: \
1921 bpf_probe_read_kernel(&DST, sizeof(SIZE), \
1922 (const void *)(long) (SRC + insn->off)); \
1923 DST = *((SIZE *)&DST); \
1924 CONT;
1925
1926 LDST(B, u8)
1927 LDST(H, u16)
1928 LDST(W, u32)
1929 LDST(DW, u64)
1930 #undef LDST
1931
1932 #define ATOMIC_ALU_OP(BOP, KOP) \
1933 case BOP: \
1934 if (BPF_SIZE(insn->code) == BPF_W) \
1935 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
1936 (DST + insn->off)); \
1937 else \
1938 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
1939 (DST + insn->off)); \
1940 break; \
1941 case BOP | BPF_FETCH: \
1942 if (BPF_SIZE(insn->code) == BPF_W) \
1943 SRC = (u32) atomic_fetch_##KOP( \
1944 (u32) SRC, \
1945 (atomic_t *)(unsigned long) (DST + insn->off)); \
1946 else \
1947 SRC = (u64) atomic64_fetch_##KOP( \
1948 (u64) SRC, \
1949 (atomic64_t *)(unsigned long) (DST + insn->off)); \
1950 break;
1951
1952 STX_ATOMIC_DW:
1953 STX_ATOMIC_W:
1954 switch (IMM) {
1955 ATOMIC_ALU_OP(BPF_ADD, add)
1956 ATOMIC_ALU_OP(BPF_AND, and)
1957 ATOMIC_ALU_OP(BPF_OR, or)
1958 ATOMIC_ALU_OP(BPF_XOR, xor)
1959 #undef ATOMIC_ALU_OP
1960
1961 case BPF_XCHG:
1962 if (BPF_SIZE(insn->code) == BPF_W)
1963 SRC = (u32) atomic_xchg(
1964 (atomic_t *)(unsigned long) (DST + insn->off),
1965 (u32) SRC);
1966 else
1967 SRC = (u64) atomic64_xchg(
1968 (atomic64_t *)(unsigned long) (DST + insn->off),
1969 (u64) SRC);
1970 break;
1971 case BPF_CMPXCHG:
1972 if (BPF_SIZE(insn->code) == BPF_W)
1973 BPF_R0 = (u32) atomic_cmpxchg(
1974 (atomic_t *)(unsigned long) (DST + insn->off),
1975 (u32) BPF_R0, (u32) SRC);
1976 else
1977 BPF_R0 = (u64) atomic64_cmpxchg(
1978 (atomic64_t *)(unsigned long) (DST + insn->off),
1979 (u64) BPF_R0, (u64) SRC);
1980 break;
1981
1982 default:
1983 goto default_label;
1984 }
1985 CONT;
1986
1987 default_label:
1988
1989
1990
1991
1992
1993
1994 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
1995 insn->code, insn->imm);
1996 BUG_ON(1);
1997 return 0;
1998 }
1999
2000 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2001 #define DEFINE_BPF_PROG_RUN(stack_size) \
2002 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2003 { \
2004 u64 stack[stack_size / sizeof(u64)]; \
2005 u64 regs[MAX_BPF_EXT_REG]; \
2006 \
2007 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2008 ARG1 = (u64) (unsigned long) ctx; \
2009 return ___bpf_prog_run(regs, insn); \
2010 }
2011
2012 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2013 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2014 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2015 const struct bpf_insn *insn) \
2016 { \
2017 u64 stack[stack_size / sizeof(u64)]; \
2018 u64 regs[MAX_BPF_EXT_REG]; \
2019 \
2020 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2021 BPF_R1 = r1; \
2022 BPF_R2 = r2; \
2023 BPF_R3 = r3; \
2024 BPF_R4 = r4; \
2025 BPF_R5 = r5; \
2026 return ___bpf_prog_run(regs, insn); \
2027 }
2028
2029 #define EVAL1(FN, X) FN(X)
2030 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2031 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2032 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2033 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2034 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2035
2036 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2037 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2038 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2039
2040 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2041 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2042 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2043
2044 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2045
2046 static unsigned int (*interpreters[])(const void *ctx,
2047 const struct bpf_insn *insn) = {
2048 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2049 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2050 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2051 };
2052 #undef PROG_NAME_LIST
2053 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2054 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2055 const struct bpf_insn *insn) = {
2056 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2057 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2058 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2059 };
2060 #undef PROG_NAME_LIST
2061
2062 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2063 {
2064 stack_depth = max_t(u32, stack_depth, 1);
2065 insn->off = (s16) insn->imm;
2066 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2067 __bpf_call_base_args;
2068 insn->code = BPF_JMP | BPF_CALL_ARGS;
2069 }
2070
2071 #else
2072 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2073 const struct bpf_insn *insn)
2074 {
2075
2076
2077
2078 WARN_ON_ONCE(1);
2079 return 0;
2080 }
2081 #endif
2082
2083 bool bpf_prog_map_compatible(struct bpf_map *map,
2084 const struct bpf_prog *fp)
2085 {
2086 bool ret;
2087
2088 if (fp->kprobe_override)
2089 return false;
2090
2091 spin_lock(&map->owner.lock);
2092 if (!map->owner.type) {
2093
2094
2095
2096 map->owner.type = fp->type;
2097 map->owner.jited = fp->jited;
2098 map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
2099 ret = true;
2100 } else {
2101 ret = map->owner.type == fp->type &&
2102 map->owner.jited == fp->jited &&
2103 map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
2104 }
2105 spin_unlock(&map->owner.lock);
2106
2107 return ret;
2108 }
2109
2110 static int bpf_check_tail_call(const struct bpf_prog *fp)
2111 {
2112 struct bpf_prog_aux *aux = fp->aux;
2113 int i, ret = 0;
2114
2115 mutex_lock(&aux->used_maps_mutex);
2116 for (i = 0; i < aux->used_map_cnt; i++) {
2117 struct bpf_map *map = aux->used_maps[i];
2118
2119 if (!map_type_contains_progs(map))
2120 continue;
2121
2122 if (!bpf_prog_map_compatible(map, fp)) {
2123 ret = -EINVAL;
2124 goto out;
2125 }
2126 }
2127
2128 out:
2129 mutex_unlock(&aux->used_maps_mutex);
2130 return ret;
2131 }
2132
2133 static void bpf_prog_select_func(struct bpf_prog *fp)
2134 {
2135 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2136 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2137
2138 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
2139 #else
2140 fp->bpf_func = __bpf_prog_ret0_warn;
2141 #endif
2142 }
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2156 {
2157
2158
2159
2160 bool jit_needed = false;
2161
2162 if (fp->bpf_func)
2163 goto finalize;
2164
2165 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2166 bpf_prog_has_kfunc_call(fp))
2167 jit_needed = true;
2168
2169 bpf_prog_select_func(fp);
2170
2171
2172
2173
2174
2175
2176
2177 if (!bpf_prog_is_dev_bound(fp->aux)) {
2178 *err = bpf_prog_alloc_jited_linfo(fp);
2179 if (*err)
2180 return fp;
2181
2182 fp = bpf_int_jit_compile(fp);
2183 bpf_prog_jit_attempt_done(fp);
2184 if (!fp->jited && jit_needed) {
2185 *err = -ENOTSUPP;
2186 return fp;
2187 }
2188 } else {
2189 *err = bpf_prog_offload_compile(fp);
2190 if (*err)
2191 return fp;
2192 }
2193
2194 finalize:
2195 bpf_prog_lock_ro(fp);
2196
2197
2198
2199
2200
2201
2202 *err = bpf_check_tail_call(fp);
2203
2204 return fp;
2205 }
2206 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2207
2208 static unsigned int __bpf_prog_ret1(const void *ctx,
2209 const struct bpf_insn *insn)
2210 {
2211 return 1;
2212 }
2213
2214 static struct bpf_prog_dummy {
2215 struct bpf_prog prog;
2216 } dummy_bpf_prog = {
2217 .prog = {
2218 .bpf_func = __bpf_prog_ret1,
2219 },
2220 };
2221
2222 struct bpf_empty_prog_array bpf_empty_prog_array = {
2223 .null_prog = NULL,
2224 };
2225 EXPORT_SYMBOL(bpf_empty_prog_array);
2226
2227 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2228 {
2229 if (prog_cnt)
2230 return kzalloc(sizeof(struct bpf_prog_array) +
2231 sizeof(struct bpf_prog_array_item) *
2232 (prog_cnt + 1),
2233 flags);
2234
2235 return &bpf_empty_prog_array.hdr;
2236 }
2237
2238 void bpf_prog_array_free(struct bpf_prog_array *progs)
2239 {
2240 if (!progs || progs == &bpf_empty_prog_array.hdr)
2241 return;
2242 kfree_rcu(progs, rcu);
2243 }
2244
2245 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
2246 {
2247 struct bpf_prog_array *progs;
2248
2249 progs = container_of(rcu, struct bpf_prog_array, rcu);
2250 kfree_rcu(progs, rcu);
2251 }
2252
2253 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
2254 {
2255 if (!progs || progs == &bpf_empty_prog_array.hdr)
2256 return;
2257 call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
2258 }
2259
2260 int bpf_prog_array_length(struct bpf_prog_array *array)
2261 {
2262 struct bpf_prog_array_item *item;
2263 u32 cnt = 0;
2264
2265 for (item = array->items; item->prog; item++)
2266 if (item->prog != &dummy_bpf_prog.prog)
2267 cnt++;
2268 return cnt;
2269 }
2270
2271 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2272 {
2273 struct bpf_prog_array_item *item;
2274
2275 for (item = array->items; item->prog; item++)
2276 if (item->prog != &dummy_bpf_prog.prog)
2277 return false;
2278 return true;
2279 }
2280
2281 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2282 u32 *prog_ids,
2283 u32 request_cnt)
2284 {
2285 struct bpf_prog_array_item *item;
2286 int i = 0;
2287
2288 for (item = array->items; item->prog; item++) {
2289 if (item->prog == &dummy_bpf_prog.prog)
2290 continue;
2291 prog_ids[i] = item->prog->aux->id;
2292 if (++i == request_cnt) {
2293 item++;
2294 break;
2295 }
2296 }
2297
2298 return !!(item->prog);
2299 }
2300
2301 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2302 __u32 __user *prog_ids, u32 cnt)
2303 {
2304 unsigned long err = 0;
2305 bool nospc;
2306 u32 *ids;
2307
2308
2309
2310
2311
2312
2313
2314 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2315 if (!ids)
2316 return -ENOMEM;
2317 nospc = bpf_prog_array_copy_core(array, ids, cnt);
2318 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2319 kfree(ids);
2320 if (err)
2321 return -EFAULT;
2322 if (nospc)
2323 return -ENOSPC;
2324 return 0;
2325 }
2326
2327 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2328 struct bpf_prog *old_prog)
2329 {
2330 struct bpf_prog_array_item *item;
2331
2332 for (item = array->items; item->prog; item++)
2333 if (item->prog == old_prog) {
2334 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2335 break;
2336 }
2337 }
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2355 {
2356 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2357 }
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2375 struct bpf_prog *prog)
2376 {
2377 struct bpf_prog_array_item *item;
2378
2379 if (unlikely(index < 0))
2380 return -EINVAL;
2381
2382 for (item = array->items; item->prog; item++) {
2383 if (item->prog == &dummy_bpf_prog.prog)
2384 continue;
2385 if (!index) {
2386 WRITE_ONCE(item->prog, prog);
2387 return 0;
2388 }
2389 index--;
2390 }
2391 return -ENOENT;
2392 }
2393
2394 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2395 struct bpf_prog *exclude_prog,
2396 struct bpf_prog *include_prog,
2397 u64 bpf_cookie,
2398 struct bpf_prog_array **new_array)
2399 {
2400 int new_prog_cnt, carry_prog_cnt = 0;
2401 struct bpf_prog_array_item *existing, *new;
2402 struct bpf_prog_array *array;
2403 bool found_exclude = false;
2404
2405
2406
2407
2408 if (old_array) {
2409 existing = old_array->items;
2410 for (; existing->prog; existing++) {
2411 if (existing->prog == exclude_prog) {
2412 found_exclude = true;
2413 continue;
2414 }
2415 if (existing->prog != &dummy_bpf_prog.prog)
2416 carry_prog_cnt++;
2417 if (existing->prog == include_prog)
2418 return -EEXIST;
2419 }
2420 }
2421
2422 if (exclude_prog && !found_exclude)
2423 return -ENOENT;
2424
2425
2426 new_prog_cnt = carry_prog_cnt;
2427 if (include_prog)
2428 new_prog_cnt += 1;
2429
2430
2431 if (!new_prog_cnt) {
2432 *new_array = NULL;
2433 return 0;
2434 }
2435
2436
2437 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2438 if (!array)
2439 return -ENOMEM;
2440 new = array->items;
2441
2442
2443 if (carry_prog_cnt) {
2444 existing = old_array->items;
2445 for (; existing->prog; existing++) {
2446 if (existing->prog == exclude_prog ||
2447 existing->prog == &dummy_bpf_prog.prog)
2448 continue;
2449
2450 new->prog = existing->prog;
2451 new->bpf_cookie = existing->bpf_cookie;
2452 new++;
2453 }
2454 }
2455 if (include_prog) {
2456 new->prog = include_prog;
2457 new->bpf_cookie = bpf_cookie;
2458 new++;
2459 }
2460 new->prog = NULL;
2461 *new_array = array;
2462 return 0;
2463 }
2464
2465 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2466 u32 *prog_ids, u32 request_cnt,
2467 u32 *prog_cnt)
2468 {
2469 u32 cnt = 0;
2470
2471 if (array)
2472 cnt = bpf_prog_array_length(array);
2473
2474 *prog_cnt = cnt;
2475
2476
2477 if (!request_cnt || !cnt)
2478 return 0;
2479
2480
2481 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2482 : 0;
2483 }
2484
2485 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2486 struct bpf_map **used_maps, u32 len)
2487 {
2488 struct bpf_map *map;
2489 u32 i;
2490
2491 for (i = 0; i < len; i++) {
2492 map = used_maps[i];
2493 if (map->ops->map_poke_untrack)
2494 map->ops->map_poke_untrack(map, aux);
2495 bpf_map_put(map);
2496 }
2497 }
2498
2499 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2500 {
2501 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2502 kfree(aux->used_maps);
2503 }
2504
2505 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2506 struct btf_mod_pair *used_btfs, u32 len)
2507 {
2508 #ifdef CONFIG_BPF_SYSCALL
2509 struct btf_mod_pair *btf_mod;
2510 u32 i;
2511
2512 for (i = 0; i < len; i++) {
2513 btf_mod = &used_btfs[i];
2514 if (btf_mod->module)
2515 module_put(btf_mod->module);
2516 btf_put(btf_mod->btf);
2517 }
2518 #endif
2519 }
2520
2521 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2522 {
2523 __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2524 kfree(aux->used_btfs);
2525 }
2526
2527 static void bpf_prog_free_deferred(struct work_struct *work)
2528 {
2529 struct bpf_prog_aux *aux;
2530 int i;
2531
2532 aux = container_of(work, struct bpf_prog_aux, work);
2533 #ifdef CONFIG_BPF_SYSCALL
2534 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2535 #endif
2536 #ifdef CONFIG_CGROUP_BPF
2537 if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2538 bpf_cgroup_atype_put(aux->cgroup_atype);
2539 #endif
2540 bpf_free_used_maps(aux);
2541 bpf_free_used_btfs(aux);
2542 if (bpf_prog_is_dev_bound(aux))
2543 bpf_prog_offload_destroy(aux->prog);
2544 #ifdef CONFIG_PERF_EVENTS
2545 if (aux->prog->has_callchain_buf)
2546 put_callchain_buffers();
2547 #endif
2548 if (aux->dst_trampoline)
2549 bpf_trampoline_put(aux->dst_trampoline);
2550 for (i = 0; i < aux->func_cnt; i++) {
2551
2552
2553
2554
2555 aux->func[i]->aux->poke_tab = NULL;
2556 bpf_jit_free(aux->func[i]);
2557 }
2558 if (aux->func_cnt) {
2559 kfree(aux->func);
2560 bpf_prog_unlock_free(aux->prog);
2561 } else {
2562 bpf_jit_free(aux->prog);
2563 }
2564 }
2565
2566 void bpf_prog_free(struct bpf_prog *fp)
2567 {
2568 struct bpf_prog_aux *aux = fp->aux;
2569
2570 if (aux->dst_prog)
2571 bpf_prog_put(aux->dst_prog);
2572 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2573 schedule_work(&aux->work);
2574 }
2575 EXPORT_SYMBOL_GPL(bpf_prog_free);
2576
2577
2578 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2579
2580 void bpf_user_rnd_init_once(void)
2581 {
2582 prandom_init_once(&bpf_user_rnd_state);
2583 }
2584
2585 BPF_CALL_0(bpf_user_rnd_u32)
2586 {
2587
2588
2589
2590
2591
2592
2593 struct rnd_state *state;
2594 u32 res;
2595
2596 state = &get_cpu_var(bpf_user_rnd_state);
2597 res = prandom_u32_state(state);
2598 put_cpu_var(bpf_user_rnd_state);
2599
2600 return res;
2601 }
2602
2603 BPF_CALL_0(bpf_get_raw_cpu_id)
2604 {
2605 return raw_smp_processor_id();
2606 }
2607
2608
2609 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2610 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2611 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2612 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2613 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2614 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2615 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
2616 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2617 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2618 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2619
2620 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2621 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2622 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2623 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2624 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2625 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2626
2627 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2628 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2629 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2630 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2631 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2632 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2633 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2634 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2635 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2636 const struct bpf_func_proto bpf_set_retval_proto __weak;
2637 const struct bpf_func_proto bpf_get_retval_proto __weak;
2638
2639 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2640 {
2641 return NULL;
2642 }
2643
2644 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
2645 {
2646 return NULL;
2647 }
2648
2649 u64 __weak
2650 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2651 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2652 {
2653 return -ENOTSUPP;
2654 }
2655 EXPORT_SYMBOL_GPL(bpf_event_output);
2656
2657
2658 const struct bpf_func_proto bpf_tail_call_proto = {
2659 .func = NULL,
2660 .gpl_only = false,
2661 .ret_type = RET_VOID,
2662 .arg1_type = ARG_PTR_TO_CTX,
2663 .arg2_type = ARG_CONST_MAP_PTR,
2664 .arg3_type = ARG_ANYTHING,
2665 };
2666
2667
2668
2669
2670
2671 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2672 {
2673 return prog;
2674 }
2675
2676
2677
2678
2679 void __weak bpf_jit_compile(struct bpf_prog *prog)
2680 {
2681 }
2682
2683 bool __weak bpf_helper_changes_pkt_data(void *func)
2684 {
2685 return false;
2686 }
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696 bool __weak bpf_jit_needs_zext(void)
2697 {
2698 return false;
2699 }
2700
2701
2702 bool __weak bpf_jit_supports_subprog_tailcalls(void)
2703 {
2704 return false;
2705 }
2706
2707 bool __weak bpf_jit_supports_kfunc_call(void)
2708 {
2709 return false;
2710 }
2711
2712
2713
2714
2715 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2716 int len)
2717 {
2718 return -EFAULT;
2719 }
2720
2721 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2722 void *addr1, void *addr2)
2723 {
2724 return -ENOTSUPP;
2725 }
2726
2727 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
2728 {
2729 return ERR_PTR(-ENOTSUPP);
2730 }
2731
2732 int __weak bpf_arch_text_invalidate(void *dst, size_t len)
2733 {
2734 return -ENOTSUPP;
2735 }
2736
2737 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2738 EXPORT_SYMBOL(bpf_stats_enabled_key);
2739
2740
2741 #define CREATE_TRACE_POINTS
2742 #include <linux/bpf_trace.h>
2743
2744 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2745 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);