Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright (c) 2017 Facebook
0003  */
0004 #include <linux/bpf.h>
0005 #include <linux/btf.h>
0006 #include <linux/btf_ids.h>
0007 #include <linux/slab.h>
0008 #include <linux/init.h>
0009 #include <linux/vmalloc.h>
0010 #include <linux/etherdevice.h>
0011 #include <linux/filter.h>
0012 #include <linux/rcupdate_trace.h>
0013 #include <linux/sched/signal.h>
0014 #include <net/bpf_sk_storage.h>
0015 #include <net/sock.h>
0016 #include <net/tcp.h>
0017 #include <net/net_namespace.h>
0018 #include <net/page_pool.h>
0019 #include <linux/error-injection.h>
0020 #include <linux/smp.h>
0021 #include <linux/sock_diag.h>
0022 #include <net/xdp.h>
0023 
0024 #define CREATE_TRACE_POINTS
0025 #include <trace/events/bpf_test_run.h>
0026 
0027 struct bpf_test_timer {
0028     enum { NO_PREEMPT, NO_MIGRATE } mode;
0029     u32 i;
0030     u64 time_start, time_spent;
0031 };
0032 
0033 static void bpf_test_timer_enter(struct bpf_test_timer *t)
0034     __acquires(rcu)
0035 {
0036     rcu_read_lock();
0037     if (t->mode == NO_PREEMPT)
0038         preempt_disable();
0039     else
0040         migrate_disable();
0041 
0042     t->time_start = ktime_get_ns();
0043 }
0044 
0045 static void bpf_test_timer_leave(struct bpf_test_timer *t)
0046     __releases(rcu)
0047 {
0048     t->time_start = 0;
0049 
0050     if (t->mode == NO_PREEMPT)
0051         preempt_enable();
0052     else
0053         migrate_enable();
0054     rcu_read_unlock();
0055 }
0056 
0057 static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
0058                     u32 repeat, int *err, u32 *duration)
0059     __must_hold(rcu)
0060 {
0061     t->i += iterations;
0062     if (t->i >= repeat) {
0063         /* We're done. */
0064         t->time_spent += ktime_get_ns() - t->time_start;
0065         do_div(t->time_spent, t->i);
0066         *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
0067         *err = 0;
0068         goto reset;
0069     }
0070 
0071     if (signal_pending(current)) {
0072         /* During iteration: we've been cancelled, abort. */
0073         *err = -EINTR;
0074         goto reset;
0075     }
0076 
0077     if (need_resched()) {
0078         /* During iteration: we need to reschedule between runs. */
0079         t->time_spent += ktime_get_ns() - t->time_start;
0080         bpf_test_timer_leave(t);
0081         cond_resched();
0082         bpf_test_timer_enter(t);
0083     }
0084 
0085     /* Do another round. */
0086     return true;
0087 
0088 reset:
0089     t->i = 0;
0090     return false;
0091 }
0092 
0093 /* We put this struct at the head of each page with a context and frame
0094  * initialised when the page is allocated, so we don't have to do this on each
0095  * repetition of the test run.
0096  */
0097 struct xdp_page_head {
0098     struct xdp_buff orig_ctx;
0099     struct xdp_buff ctx;
0100     struct xdp_frame frm;
0101     u8 data[];
0102 };
0103 
0104 struct xdp_test_data {
0105     struct xdp_buff *orig_ctx;
0106     struct xdp_rxq_info rxq;
0107     struct net_device *dev;
0108     struct page_pool *pp;
0109     struct xdp_frame **frames;
0110     struct sk_buff **skbs;
0111     struct xdp_mem_info mem;
0112     u32 batch_size;
0113     u32 frame_cnt;
0114 };
0115 
0116 #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
0117 #define TEST_XDP_MAX_BATCH 256
0118 
0119 static void xdp_test_run_init_page(struct page *page, void *arg)
0120 {
0121     struct xdp_page_head *head = phys_to_virt(page_to_phys(page));
0122     struct xdp_buff *new_ctx, *orig_ctx;
0123     u32 headroom = XDP_PACKET_HEADROOM;
0124     struct xdp_test_data *xdp = arg;
0125     size_t frm_len, meta_len;
0126     struct xdp_frame *frm;
0127     void *data;
0128 
0129     orig_ctx = xdp->orig_ctx;
0130     frm_len = orig_ctx->data_end - orig_ctx->data_meta;
0131     meta_len = orig_ctx->data - orig_ctx->data_meta;
0132     headroom -= meta_len;
0133 
0134     new_ctx = &head->ctx;
0135     frm = &head->frm;
0136     data = &head->data;
0137     memcpy(data + headroom, orig_ctx->data_meta, frm_len);
0138 
0139     xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
0140     xdp_prepare_buff(new_ctx, data, headroom, frm_len, true);
0141     new_ctx->data = new_ctx->data_meta + meta_len;
0142 
0143     xdp_update_frame_from_buff(new_ctx, frm);
0144     frm->mem = new_ctx->rxq->mem;
0145 
0146     memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx));
0147 }
0148 
0149 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
0150 {
0151     struct page_pool *pp;
0152     int err = -ENOMEM;
0153     struct page_pool_params pp_params = {
0154         .order = 0,
0155         .flags = 0,
0156         .pool_size = xdp->batch_size,
0157         .nid = NUMA_NO_NODE,
0158         .init_callback = xdp_test_run_init_page,
0159         .init_arg = xdp,
0160     };
0161 
0162     xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
0163     if (!xdp->frames)
0164         return -ENOMEM;
0165 
0166     xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
0167     if (!xdp->skbs)
0168         goto err_skbs;
0169 
0170     pp = page_pool_create(&pp_params);
0171     if (IS_ERR(pp)) {
0172         err = PTR_ERR(pp);
0173         goto err_pp;
0174     }
0175 
0176     /* will copy 'mem.id' into pp->xdp_mem_id */
0177     err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
0178     if (err)
0179         goto err_mmodel;
0180 
0181     xdp->pp = pp;
0182 
0183     /* We create a 'fake' RXQ referencing the original dev, but with an
0184      * xdp_mem_info pointing to our page_pool
0185      */
0186     xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
0187     xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
0188     xdp->rxq.mem.id = pp->xdp_mem_id;
0189     xdp->dev = orig_ctx->rxq->dev;
0190     xdp->orig_ctx = orig_ctx;
0191 
0192     return 0;
0193 
0194 err_mmodel:
0195     page_pool_destroy(pp);
0196 err_pp:
0197     kvfree(xdp->skbs);
0198 err_skbs:
0199     kvfree(xdp->frames);
0200     return err;
0201 }
0202 
0203 static void xdp_test_run_teardown(struct xdp_test_data *xdp)
0204 {
0205     xdp_unreg_mem_model(&xdp->mem);
0206     page_pool_destroy(xdp->pp);
0207     kfree(xdp->frames);
0208     kfree(xdp->skbs);
0209 }
0210 
0211 static bool ctx_was_changed(struct xdp_page_head *head)
0212 {
0213     return head->orig_ctx.data != head->ctx.data ||
0214         head->orig_ctx.data_meta != head->ctx.data_meta ||
0215         head->orig_ctx.data_end != head->ctx.data_end;
0216 }
0217 
0218 static void reset_ctx(struct xdp_page_head *head)
0219 {
0220     if (likely(!ctx_was_changed(head)))
0221         return;
0222 
0223     head->ctx.data = head->orig_ctx.data;
0224     head->ctx.data_meta = head->orig_ctx.data_meta;
0225     head->ctx.data_end = head->orig_ctx.data_end;
0226     xdp_update_frame_from_buff(&head->ctx, &head->frm);
0227 }
0228 
0229 static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
0230                struct sk_buff **skbs,
0231                struct net_device *dev)
0232 {
0233     gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
0234     int i, n;
0235     LIST_HEAD(list);
0236 
0237     n = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, (void **)skbs);
0238     if (unlikely(n == 0)) {
0239         for (i = 0; i < nframes; i++)
0240             xdp_return_frame(frames[i]);
0241         return -ENOMEM;
0242     }
0243 
0244     for (i = 0; i < nframes; i++) {
0245         struct xdp_frame *xdpf = frames[i];
0246         struct sk_buff *skb = skbs[i];
0247 
0248         skb = __xdp_build_skb_from_frame(xdpf, skb, dev);
0249         if (!skb) {
0250             xdp_return_frame(xdpf);
0251             continue;
0252         }
0253 
0254         list_add_tail(&skb->list, &list);
0255     }
0256     netif_receive_skb_list(&list);
0257 
0258     return 0;
0259 }
0260 
0261 static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
0262                   u32 repeat)
0263 {
0264     struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
0265     int err = 0, act, ret, i, nframes = 0, batch_sz;
0266     struct xdp_frame **frames = xdp->frames;
0267     struct xdp_page_head *head;
0268     struct xdp_frame *frm;
0269     bool redirect = false;
0270     struct xdp_buff *ctx;
0271     struct page *page;
0272 
0273     batch_sz = min_t(u32, repeat, xdp->batch_size);
0274 
0275     local_bh_disable();
0276     xdp_set_return_frame_no_direct();
0277 
0278     for (i = 0; i < batch_sz; i++) {
0279         page = page_pool_dev_alloc_pages(xdp->pp);
0280         if (!page) {
0281             err = -ENOMEM;
0282             goto out;
0283         }
0284 
0285         head = phys_to_virt(page_to_phys(page));
0286         reset_ctx(head);
0287         ctx = &head->ctx;
0288         frm = &head->frm;
0289         xdp->frame_cnt++;
0290 
0291         act = bpf_prog_run_xdp(prog, ctx);
0292 
0293         /* if program changed pkt bounds we need to update the xdp_frame */
0294         if (unlikely(ctx_was_changed(head))) {
0295             ret = xdp_update_frame_from_buff(ctx, frm);
0296             if (ret) {
0297                 xdp_return_buff(ctx);
0298                 continue;
0299             }
0300         }
0301 
0302         switch (act) {
0303         case XDP_TX:
0304             /* we can't do a real XDP_TX since we're not in the
0305              * driver, so turn it into a REDIRECT back to the same
0306              * index
0307              */
0308             ri->tgt_index = xdp->dev->ifindex;
0309             ri->map_id = INT_MAX;
0310             ri->map_type = BPF_MAP_TYPE_UNSPEC;
0311             fallthrough;
0312         case XDP_REDIRECT:
0313             redirect = true;
0314             ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog);
0315             if (ret)
0316                 xdp_return_buff(ctx);
0317             break;
0318         case XDP_PASS:
0319             frames[nframes++] = frm;
0320             break;
0321         default:
0322             bpf_warn_invalid_xdp_action(NULL, prog, act);
0323             fallthrough;
0324         case XDP_DROP:
0325             xdp_return_buff(ctx);
0326             break;
0327         }
0328     }
0329 
0330 out:
0331     if (redirect)
0332         xdp_do_flush();
0333     if (nframes) {
0334         ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev);
0335         if (ret)
0336             err = ret;
0337     }
0338 
0339     xdp_clear_return_frame_no_direct();
0340     local_bh_enable();
0341     return err;
0342 }
0343 
0344 static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
0345                  u32 repeat, u32 batch_size, u32 *time)
0346 
0347 {
0348     struct xdp_test_data xdp = { .batch_size = batch_size };
0349     struct bpf_test_timer t = { .mode = NO_MIGRATE };
0350     int ret;
0351 
0352     if (!repeat)
0353         repeat = 1;
0354 
0355     ret = xdp_test_run_setup(&xdp, ctx);
0356     if (ret)
0357         return ret;
0358 
0359     bpf_test_timer_enter(&t);
0360     do {
0361         xdp.frame_cnt = 0;
0362         ret = xdp_test_run_batch(&xdp, prog, repeat - t.i);
0363         if (unlikely(ret < 0))
0364             break;
0365     } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time));
0366     bpf_test_timer_leave(&t);
0367 
0368     xdp_test_run_teardown(&xdp);
0369     return ret;
0370 }
0371 
0372 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
0373             u32 *retval, u32 *time, bool xdp)
0374 {
0375     struct bpf_prog_array_item item = {.prog = prog};
0376     struct bpf_run_ctx *old_ctx;
0377     struct bpf_cg_run_ctx run_ctx;
0378     struct bpf_test_timer t = { NO_MIGRATE };
0379     enum bpf_cgroup_storage_type stype;
0380     int ret;
0381 
0382     for_each_cgroup_storage_type(stype) {
0383         item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
0384         if (IS_ERR(item.cgroup_storage[stype])) {
0385             item.cgroup_storage[stype] = NULL;
0386             for_each_cgroup_storage_type(stype)
0387                 bpf_cgroup_storage_free(item.cgroup_storage[stype]);
0388             return -ENOMEM;
0389         }
0390     }
0391 
0392     if (!repeat)
0393         repeat = 1;
0394 
0395     bpf_test_timer_enter(&t);
0396     old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
0397     do {
0398         run_ctx.prog_item = &item;
0399         if (xdp)
0400             *retval = bpf_prog_run_xdp(prog, ctx);
0401         else
0402             *retval = bpf_prog_run(prog, ctx);
0403     } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
0404     bpf_reset_run_ctx(old_ctx);
0405     bpf_test_timer_leave(&t);
0406 
0407     for_each_cgroup_storage_type(stype)
0408         bpf_cgroup_storage_free(item.cgroup_storage[stype]);
0409 
0410     return ret;
0411 }
0412 
0413 static int bpf_test_finish(const union bpf_attr *kattr,
0414                union bpf_attr __user *uattr, const void *data,
0415                struct skb_shared_info *sinfo, u32 size,
0416                u32 retval, u32 duration)
0417 {
0418     void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
0419     int err = -EFAULT;
0420     u32 copy_size = size;
0421 
0422     /* Clamp copy if the user has provided a size hint, but copy the full
0423      * buffer if not to retain old behaviour.
0424      */
0425     if (kattr->test.data_size_out &&
0426         copy_size > kattr->test.data_size_out) {
0427         copy_size = kattr->test.data_size_out;
0428         err = -ENOSPC;
0429     }
0430 
0431     if (data_out) {
0432         int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
0433 
0434         if (len < 0) {
0435             err = -ENOSPC;
0436             goto out;
0437         }
0438 
0439         if (copy_to_user(data_out, data, len))
0440             goto out;
0441 
0442         if (sinfo) {
0443             int i, offset = len;
0444             u32 data_len;
0445 
0446             for (i = 0; i < sinfo->nr_frags; i++) {
0447                 skb_frag_t *frag = &sinfo->frags[i];
0448 
0449                 if (offset >= copy_size) {
0450                     err = -ENOSPC;
0451                     break;
0452                 }
0453 
0454                 data_len = min_t(u32, copy_size - offset,
0455                          skb_frag_size(frag));
0456 
0457                 if (copy_to_user(data_out + offset,
0458                          skb_frag_address(frag),
0459                          data_len))
0460                     goto out;
0461 
0462                 offset += data_len;
0463             }
0464         }
0465     }
0466 
0467     if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
0468         goto out;
0469     if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
0470         goto out;
0471     if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
0472         goto out;
0473     if (err != -ENOSPC)
0474         err = 0;
0475 out:
0476     trace_bpf_test_finish(&err);
0477     return err;
0478 }
0479 
0480 /* Integer types of various sizes and pointer combinations cover variety of
0481  * architecture dependent calling conventions. 7+ can be supported in the
0482  * future.
0483  */
0484 __diag_push();
0485 __diag_ignore_all("-Wmissing-prototypes",
0486           "Global functions as their definitions will be in vmlinux BTF");
0487 int noinline bpf_fentry_test1(int a)
0488 {
0489     return a + 1;
0490 }
0491 EXPORT_SYMBOL_GPL(bpf_fentry_test1);
0492 ALLOW_ERROR_INJECTION(bpf_fentry_test1, ERRNO);
0493 
0494 int noinline bpf_fentry_test2(int a, u64 b)
0495 {
0496     return a + b;
0497 }
0498 
0499 int noinline bpf_fentry_test3(char a, int b, u64 c)
0500 {
0501     return a + b + c;
0502 }
0503 
0504 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
0505 {
0506     return (long)a + b + c + d;
0507 }
0508 
0509 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
0510 {
0511     return a + (long)b + c + d + e;
0512 }
0513 
0514 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
0515 {
0516     return a + (long)b + c + d + (long)e + f;
0517 }
0518 
0519 struct bpf_fentry_test_t {
0520     struct bpf_fentry_test_t *a;
0521 };
0522 
0523 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
0524 {
0525     return (long)arg;
0526 }
0527 
0528 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
0529 {
0530     return (long)arg->a;
0531 }
0532 
0533 int noinline bpf_modify_return_test(int a, int *b)
0534 {
0535     *b += 1;
0536     return a + *b;
0537 }
0538 
0539 u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
0540 {
0541     return a + b + c + d;
0542 }
0543 
0544 int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
0545 {
0546     return a + b;
0547 }
0548 
0549 struct sock * noinline bpf_kfunc_call_test3(struct sock *sk)
0550 {
0551     return sk;
0552 }
0553 
0554 struct prog_test_member1 {
0555     int a;
0556 };
0557 
0558 struct prog_test_member {
0559     struct prog_test_member1 m;
0560     int c;
0561 };
0562 
0563 struct prog_test_ref_kfunc {
0564     int a;
0565     int b;
0566     struct prog_test_member memb;
0567     struct prog_test_ref_kfunc *next;
0568     refcount_t cnt;
0569 };
0570 
0571 static struct prog_test_ref_kfunc prog_test_struct = {
0572     .a = 42,
0573     .b = 108,
0574     .next = &prog_test_struct,
0575     .cnt = REFCOUNT_INIT(1),
0576 };
0577 
0578 noinline struct prog_test_ref_kfunc *
0579 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
0580 {
0581     refcount_inc(&prog_test_struct.cnt);
0582     return &prog_test_struct;
0583 }
0584 
0585 noinline struct prog_test_member *
0586 bpf_kfunc_call_memb_acquire(void)
0587 {
0588     WARN_ON_ONCE(1);
0589     return NULL;
0590 }
0591 
0592 noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
0593 {
0594     if (!p)
0595         return;
0596 
0597     refcount_dec(&p->cnt);
0598 }
0599 
0600 noinline void bpf_kfunc_call_memb_release(struct prog_test_member *p)
0601 {
0602 }
0603 
0604 noinline void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
0605 {
0606     WARN_ON_ONCE(1);
0607 }
0608 
0609 noinline struct prog_test_ref_kfunc *
0610 bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b)
0611 {
0612     struct prog_test_ref_kfunc *p = READ_ONCE(*pp);
0613 
0614     if (!p)
0615         return NULL;
0616     refcount_inc(&p->cnt);
0617     return p;
0618 }
0619 
0620 struct prog_test_pass1 {
0621     int x0;
0622     struct {
0623         int x1;
0624         struct {
0625             int x2;
0626             struct {
0627                 int x3;
0628             };
0629         };
0630     };
0631 };
0632 
0633 struct prog_test_pass2 {
0634     int len;
0635     short arr1[4];
0636     struct {
0637         char arr2[4];
0638         unsigned long arr3[8];
0639     } x;
0640 };
0641 
0642 struct prog_test_fail1 {
0643     void *p;
0644     int x;
0645 };
0646 
0647 struct prog_test_fail2 {
0648     int x8;
0649     struct prog_test_pass1 x;
0650 };
0651 
0652 struct prog_test_fail3 {
0653     int len;
0654     char arr1[2];
0655     char arr2[];
0656 };
0657 
0658 noinline void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
0659 {
0660 }
0661 
0662 noinline void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
0663 {
0664 }
0665 
0666 noinline void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
0667 {
0668 }
0669 
0670 noinline void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
0671 {
0672 }
0673 
0674 noinline void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
0675 {
0676 }
0677 
0678 noinline void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
0679 {
0680 }
0681 
0682 noinline void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
0683 {
0684 }
0685 
0686 noinline void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
0687 {
0688 }
0689 
0690 noinline void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
0691 {
0692 }
0693 
0694 noinline void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
0695 {
0696 }
0697 
0698 __diag_pop();
0699 
0700 ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
0701 
0702 BTF_SET8_START(test_sk_check_kfunc_ids)
0703 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
0704 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
0705 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
0706 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
0707 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
0708 BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
0709 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
0710 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
0711 BTF_ID_FLAGS(func, bpf_kfunc_call_test_kptr_get, KF_ACQUIRE | KF_RET_NULL | KF_KPTR_GET)
0712 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
0713 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
0714 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
0715 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
0716 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
0717 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
0718 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
0719 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
0720 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
0721 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS)
0722 BTF_SET8_END(test_sk_check_kfunc_ids)
0723 
0724 static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
0725                u32 size, u32 headroom, u32 tailroom)
0726 {
0727     void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
0728     void *data;
0729 
0730     if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
0731         return ERR_PTR(-EINVAL);
0732 
0733     if (user_size > size)
0734         return ERR_PTR(-EMSGSIZE);
0735 
0736     data = kzalloc(size + headroom + tailroom, GFP_USER);
0737     if (!data)
0738         return ERR_PTR(-ENOMEM);
0739 
0740     if (copy_from_user(data + headroom, data_in, user_size)) {
0741         kfree(data);
0742         return ERR_PTR(-EFAULT);
0743     }
0744 
0745     return data;
0746 }
0747 
0748 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
0749                   const union bpf_attr *kattr,
0750                   union bpf_attr __user *uattr)
0751 {
0752     struct bpf_fentry_test_t arg = {};
0753     u16 side_effect = 0, ret = 0;
0754     int b = 2, err = -EFAULT;
0755     u32 retval = 0;
0756 
0757     if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
0758         return -EINVAL;
0759 
0760     switch (prog->expected_attach_type) {
0761     case BPF_TRACE_FENTRY:
0762     case BPF_TRACE_FEXIT:
0763         if (bpf_fentry_test1(1) != 2 ||
0764             bpf_fentry_test2(2, 3) != 5 ||
0765             bpf_fentry_test3(4, 5, 6) != 15 ||
0766             bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
0767             bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
0768             bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
0769             bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
0770             bpf_fentry_test8(&arg) != 0)
0771             goto out;
0772         break;
0773     case BPF_MODIFY_RETURN:
0774         ret = bpf_modify_return_test(1, &b);
0775         if (b != 2)
0776             side_effect = 1;
0777         break;
0778     default:
0779         goto out;
0780     }
0781 
0782     retval = ((u32)side_effect << 16) | ret;
0783     if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
0784         goto out;
0785 
0786     err = 0;
0787 out:
0788     trace_bpf_test_finish(&err);
0789     return err;
0790 }
0791 
0792 struct bpf_raw_tp_test_run_info {
0793     struct bpf_prog *prog;
0794     void *ctx;
0795     u32 retval;
0796 };
0797 
0798 static void
0799 __bpf_prog_test_run_raw_tp(void *data)
0800 {
0801     struct bpf_raw_tp_test_run_info *info = data;
0802 
0803     rcu_read_lock();
0804     info->retval = bpf_prog_run(info->prog, info->ctx);
0805     rcu_read_unlock();
0806 }
0807 
0808 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
0809                  const union bpf_attr *kattr,
0810                  union bpf_attr __user *uattr)
0811 {
0812     void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
0813     __u32 ctx_size_in = kattr->test.ctx_size_in;
0814     struct bpf_raw_tp_test_run_info info;
0815     int cpu = kattr->test.cpu, err = 0;
0816     int current_cpu;
0817 
0818     /* doesn't support data_in/out, ctx_out, duration, or repeat */
0819     if (kattr->test.data_in || kattr->test.data_out ||
0820         kattr->test.ctx_out || kattr->test.duration ||
0821         kattr->test.repeat || kattr->test.batch_size)
0822         return -EINVAL;
0823 
0824     if (ctx_size_in < prog->aux->max_ctx_offset ||
0825         ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
0826         return -EINVAL;
0827 
0828     if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
0829         return -EINVAL;
0830 
0831     if (ctx_size_in) {
0832         info.ctx = memdup_user(ctx_in, ctx_size_in);
0833         if (IS_ERR(info.ctx))
0834             return PTR_ERR(info.ctx);
0835     } else {
0836         info.ctx = NULL;
0837     }
0838 
0839     info.prog = prog;
0840 
0841     current_cpu = get_cpu();
0842     if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
0843         cpu == current_cpu) {
0844         __bpf_prog_test_run_raw_tp(&info);
0845     } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
0846         /* smp_call_function_single() also checks cpu_online()
0847          * after csd_lock(). However, since cpu is from user
0848          * space, let's do an extra quick check to filter out
0849          * invalid value before smp_call_function_single().
0850          */
0851         err = -ENXIO;
0852     } else {
0853         err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
0854                            &info, 1);
0855     }
0856     put_cpu();
0857 
0858     if (!err &&
0859         copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
0860         err = -EFAULT;
0861 
0862     kfree(info.ctx);
0863     return err;
0864 }
0865 
0866 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
0867 {
0868     void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
0869     void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
0870     u32 size = kattr->test.ctx_size_in;
0871     void *data;
0872     int err;
0873 
0874     if (!data_in && !data_out)
0875         return NULL;
0876 
0877     data = kzalloc(max_size, GFP_USER);
0878     if (!data)
0879         return ERR_PTR(-ENOMEM);
0880 
0881     if (data_in) {
0882         err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
0883         if (err) {
0884             kfree(data);
0885             return ERR_PTR(err);
0886         }
0887 
0888         size = min_t(u32, max_size, size);
0889         if (copy_from_user(data, data_in, size)) {
0890             kfree(data);
0891             return ERR_PTR(-EFAULT);
0892         }
0893     }
0894     return data;
0895 }
0896 
0897 static int bpf_ctx_finish(const union bpf_attr *kattr,
0898               union bpf_attr __user *uattr, const void *data,
0899               u32 size)
0900 {
0901     void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
0902     int err = -EFAULT;
0903     u32 copy_size = size;
0904 
0905     if (!data || !data_out)
0906         return 0;
0907 
0908     if (copy_size > kattr->test.ctx_size_out) {
0909         copy_size = kattr->test.ctx_size_out;
0910         err = -ENOSPC;
0911     }
0912 
0913     if (copy_to_user(data_out, data, copy_size))
0914         goto out;
0915     if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
0916         goto out;
0917     if (err != -ENOSPC)
0918         err = 0;
0919 out:
0920     return err;
0921 }
0922 
0923 /**
0924  * range_is_zero - test whether buffer is initialized
0925  * @buf: buffer to check
0926  * @from: check from this position
0927  * @to: check up until (excluding) this position
0928  *
0929  * This function returns true if the there is a non-zero byte
0930  * in the buf in the range [from,to).
0931  */
0932 static inline bool range_is_zero(void *buf, size_t from, size_t to)
0933 {
0934     return !memchr_inv((u8 *)buf + from, 0, to - from);
0935 }
0936 
0937 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
0938 {
0939     struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
0940 
0941     if (!skb->len)
0942         return -EINVAL;
0943 
0944     if (!__skb)
0945         return 0;
0946 
0947     /* make sure the fields we don't use are zeroed */
0948     if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
0949         return -EINVAL;
0950 
0951     /* mark is allowed */
0952 
0953     if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
0954                offsetof(struct __sk_buff, priority)))
0955         return -EINVAL;
0956 
0957     /* priority is allowed */
0958     /* ingress_ifindex is allowed */
0959     /* ifindex is allowed */
0960 
0961     if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
0962                offsetof(struct __sk_buff, cb)))
0963         return -EINVAL;
0964 
0965     /* cb is allowed */
0966 
0967     if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
0968                offsetof(struct __sk_buff, tstamp)))
0969         return -EINVAL;
0970 
0971     /* tstamp is allowed */
0972     /* wire_len is allowed */
0973     /* gso_segs is allowed */
0974 
0975     if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
0976                offsetof(struct __sk_buff, gso_size)))
0977         return -EINVAL;
0978 
0979     /* gso_size is allowed */
0980 
0981     if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
0982                offsetof(struct __sk_buff, hwtstamp)))
0983         return -EINVAL;
0984 
0985     /* hwtstamp is allowed */
0986 
0987     if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
0988                sizeof(struct __sk_buff)))
0989         return -EINVAL;
0990 
0991     skb->mark = __skb->mark;
0992     skb->priority = __skb->priority;
0993     skb->skb_iif = __skb->ingress_ifindex;
0994     skb->tstamp = __skb->tstamp;
0995     memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
0996 
0997     if (__skb->wire_len == 0) {
0998         cb->pkt_len = skb->len;
0999     } else {
1000         if (__skb->wire_len < skb->len ||
1001             __skb->wire_len > GSO_LEGACY_MAX_SIZE)
1002             return -EINVAL;
1003         cb->pkt_len = __skb->wire_len;
1004     }
1005 
1006     if (__skb->gso_segs > GSO_MAX_SEGS)
1007         return -EINVAL;
1008     skb_shinfo(skb)->gso_segs = __skb->gso_segs;
1009     skb_shinfo(skb)->gso_size = __skb->gso_size;
1010     skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
1011 
1012     return 0;
1013 }
1014 
1015 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
1016 {
1017     struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
1018 
1019     if (!__skb)
1020         return;
1021 
1022     __skb->mark = skb->mark;
1023     __skb->priority = skb->priority;
1024     __skb->ingress_ifindex = skb->skb_iif;
1025     __skb->ifindex = skb->dev->ifindex;
1026     __skb->tstamp = skb->tstamp;
1027     memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
1028     __skb->wire_len = cb->pkt_len;
1029     __skb->gso_segs = skb_shinfo(skb)->gso_segs;
1030     __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
1031 }
1032 
1033 static struct proto bpf_dummy_proto = {
1034     .name   = "bpf_dummy",
1035     .owner  = THIS_MODULE,
1036     .obj_size = sizeof(struct sock),
1037 };
1038 
1039 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
1040               union bpf_attr __user *uattr)
1041 {
1042     bool is_l2 = false, is_direct_pkt_access = false;
1043     struct net *net = current->nsproxy->net_ns;
1044     struct net_device *dev = net->loopback_dev;
1045     u32 size = kattr->test.data_size_in;
1046     u32 repeat = kattr->test.repeat;
1047     struct __sk_buff *ctx = NULL;
1048     u32 retval, duration;
1049     int hh_len = ETH_HLEN;
1050     struct sk_buff *skb;
1051     struct sock *sk;
1052     void *data;
1053     int ret;
1054 
1055     if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1056         return -EINVAL;
1057 
1058     data = bpf_test_init(kattr, kattr->test.data_size_in,
1059                  size, NET_SKB_PAD + NET_IP_ALIGN,
1060                  SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1061     if (IS_ERR(data))
1062         return PTR_ERR(data);
1063 
1064     ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
1065     if (IS_ERR(ctx)) {
1066         kfree(data);
1067         return PTR_ERR(ctx);
1068     }
1069 
1070     switch (prog->type) {
1071     case BPF_PROG_TYPE_SCHED_CLS:
1072     case BPF_PROG_TYPE_SCHED_ACT:
1073         is_l2 = true;
1074         fallthrough;
1075     case BPF_PROG_TYPE_LWT_IN:
1076     case BPF_PROG_TYPE_LWT_OUT:
1077     case BPF_PROG_TYPE_LWT_XMIT:
1078         is_direct_pkt_access = true;
1079         break;
1080     default:
1081         break;
1082     }
1083 
1084     sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
1085     if (!sk) {
1086         kfree(data);
1087         kfree(ctx);
1088         return -ENOMEM;
1089     }
1090     sock_init_data(NULL, sk);
1091 
1092     skb = build_skb(data, 0);
1093     if (!skb) {
1094         kfree(data);
1095         kfree(ctx);
1096         sk_free(sk);
1097         return -ENOMEM;
1098     }
1099     skb->sk = sk;
1100 
1101     skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1102     __skb_put(skb, size);
1103     if (ctx && ctx->ifindex > 1) {
1104         dev = dev_get_by_index(net, ctx->ifindex);
1105         if (!dev) {
1106             ret = -ENODEV;
1107             goto out;
1108         }
1109     }
1110     skb->protocol = eth_type_trans(skb, dev);
1111     skb_reset_network_header(skb);
1112 
1113     switch (skb->protocol) {
1114     case htons(ETH_P_IP):
1115         sk->sk_family = AF_INET;
1116         if (sizeof(struct iphdr) <= skb_headlen(skb)) {
1117             sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
1118             sk->sk_daddr = ip_hdr(skb)->daddr;
1119         }
1120         break;
1121 #if IS_ENABLED(CONFIG_IPV6)
1122     case htons(ETH_P_IPV6):
1123         sk->sk_family = AF_INET6;
1124         if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
1125             sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
1126             sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
1127         }
1128         break;
1129 #endif
1130     default:
1131         break;
1132     }
1133 
1134     if (is_l2)
1135         __skb_push(skb, hh_len);
1136     if (is_direct_pkt_access)
1137         bpf_compute_data_pointers(skb);
1138     ret = convert___skb_to_skb(skb, ctx);
1139     if (ret)
1140         goto out;
1141     ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
1142     if (ret)
1143         goto out;
1144     if (!is_l2) {
1145         if (skb_headroom(skb) < hh_len) {
1146             int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
1147 
1148             if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
1149                 ret = -ENOMEM;
1150                 goto out;
1151             }
1152         }
1153         memset(__skb_push(skb, hh_len), 0, hh_len);
1154     }
1155     convert_skb_to___skb(skb, ctx);
1156 
1157     size = skb->len;
1158     /* bpf program can never convert linear skb to non-linear */
1159     if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
1160         size = skb_headlen(skb);
1161     ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
1162                   duration);
1163     if (!ret)
1164         ret = bpf_ctx_finish(kattr, uattr, ctx,
1165                      sizeof(struct __sk_buff));
1166 out:
1167     if (dev && dev != net->loopback_dev)
1168         dev_put(dev);
1169     kfree_skb(skb);
1170     sk_free(sk);
1171     kfree(ctx);
1172     return ret;
1173 }
1174 
1175 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
1176 {
1177     unsigned int ingress_ifindex, rx_queue_index;
1178     struct netdev_rx_queue *rxqueue;
1179     struct net_device *device;
1180 
1181     if (!xdp_md)
1182         return 0;
1183 
1184     if (xdp_md->egress_ifindex != 0)
1185         return -EINVAL;
1186 
1187     ingress_ifindex = xdp_md->ingress_ifindex;
1188     rx_queue_index = xdp_md->rx_queue_index;
1189 
1190     if (!ingress_ifindex && rx_queue_index)
1191         return -EINVAL;
1192 
1193     if (ingress_ifindex) {
1194         device = dev_get_by_index(current->nsproxy->net_ns,
1195                       ingress_ifindex);
1196         if (!device)
1197             return -ENODEV;
1198 
1199         if (rx_queue_index >= device->real_num_rx_queues)
1200             goto free_dev;
1201 
1202         rxqueue = __netif_get_rx_queue(device, rx_queue_index);
1203 
1204         if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
1205             goto free_dev;
1206 
1207         xdp->rxq = &rxqueue->xdp_rxq;
1208         /* The device is now tracked in the xdp->rxq for later
1209          * dev_put()
1210          */
1211     }
1212 
1213     xdp->data = xdp->data_meta + xdp_md->data;
1214     return 0;
1215 
1216 free_dev:
1217     dev_put(device);
1218     return -EINVAL;
1219 }
1220 
1221 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
1222 {
1223     if (!xdp_md)
1224         return;
1225 
1226     xdp_md->data = xdp->data - xdp->data_meta;
1227     xdp_md->data_end = xdp->data_end - xdp->data_meta;
1228 
1229     if (xdp_md->ingress_ifindex)
1230         dev_put(xdp->rxq->dev);
1231 }
1232 
1233 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1234               union bpf_attr __user *uattr)
1235 {
1236     bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
1237     u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1238     u32 batch_size = kattr->test.batch_size;
1239     u32 retval = 0, duration, max_data_sz;
1240     u32 size = kattr->test.data_size_in;
1241     u32 headroom = XDP_PACKET_HEADROOM;
1242     u32 repeat = kattr->test.repeat;
1243     struct netdev_rx_queue *rxqueue;
1244     struct skb_shared_info *sinfo;
1245     struct xdp_buff xdp = {};
1246     int i, ret = -EINVAL;
1247     struct xdp_md *ctx;
1248     void *data;
1249 
1250     if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
1251         prog->expected_attach_type == BPF_XDP_CPUMAP)
1252         return -EINVAL;
1253 
1254     if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
1255         return -EINVAL;
1256 
1257     if (do_live) {
1258         if (!batch_size)
1259             batch_size = NAPI_POLL_WEIGHT;
1260         else if (batch_size > TEST_XDP_MAX_BATCH)
1261             return -E2BIG;
1262 
1263         headroom += sizeof(struct xdp_page_head);
1264     } else if (batch_size) {
1265         return -EINVAL;
1266     }
1267 
1268     ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
1269     if (IS_ERR(ctx))
1270         return PTR_ERR(ctx);
1271 
1272     if (ctx) {
1273         /* There can't be user provided data before the meta data */
1274         if (ctx->data_meta || ctx->data_end != size ||
1275             ctx->data > ctx->data_end ||
1276             unlikely(xdp_metalen_invalid(ctx->data)) ||
1277             (do_live && (kattr->test.data_out || kattr->test.ctx_out)))
1278             goto free_ctx;
1279         /* Meta data is allocated from the headroom */
1280         headroom -= ctx->data;
1281     }
1282 
1283     max_data_sz = 4096 - headroom - tailroom;
1284     if (size > max_data_sz) {
1285         /* disallow live data mode for jumbo frames */
1286         if (do_live)
1287             goto free_ctx;
1288         size = max_data_sz;
1289     }
1290 
1291     data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
1292     if (IS_ERR(data)) {
1293         ret = PTR_ERR(data);
1294         goto free_ctx;
1295     }
1296 
1297     rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
1298     rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
1299     xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
1300     xdp_prepare_buff(&xdp, data, headroom, size, true);
1301     sinfo = xdp_get_shared_info_from_buff(&xdp);
1302 
1303     ret = xdp_convert_md_to_buff(ctx, &xdp);
1304     if (ret)
1305         goto free_data;
1306 
1307     if (unlikely(kattr->test.data_size_in > size)) {
1308         void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1309 
1310         while (size < kattr->test.data_size_in) {
1311             struct page *page;
1312             skb_frag_t *frag;
1313             u32 data_len;
1314 
1315             if (sinfo->nr_frags == MAX_SKB_FRAGS) {
1316                 ret = -ENOMEM;
1317                 goto out;
1318             }
1319 
1320             page = alloc_page(GFP_KERNEL);
1321             if (!page) {
1322                 ret = -ENOMEM;
1323                 goto out;
1324             }
1325 
1326             frag = &sinfo->frags[sinfo->nr_frags++];
1327             __skb_frag_set_page(frag, page);
1328 
1329             data_len = min_t(u32, kattr->test.data_size_in - size,
1330                      PAGE_SIZE);
1331             skb_frag_size_set(frag, data_len);
1332 
1333             if (copy_from_user(page_address(page), data_in + size,
1334                        data_len)) {
1335                 ret = -EFAULT;
1336                 goto out;
1337             }
1338             sinfo->xdp_frags_size += data_len;
1339             size += data_len;
1340         }
1341         xdp_buff_set_frags_flag(&xdp);
1342     }
1343 
1344     if (repeat > 1)
1345         bpf_prog_change_xdp(NULL, prog);
1346 
1347     if (do_live)
1348         ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
1349     else
1350         ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
1351     /* We convert the xdp_buff back to an xdp_md before checking the return
1352      * code so the reference count of any held netdevice will be decremented
1353      * even if the test run failed.
1354      */
1355     xdp_convert_buff_to_md(&xdp, ctx);
1356     if (ret)
1357         goto out;
1358 
1359     size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
1360     ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
1361                   retval, duration);
1362     if (!ret)
1363         ret = bpf_ctx_finish(kattr, uattr, ctx,
1364                      sizeof(struct xdp_md));
1365 
1366 out:
1367     if (repeat > 1)
1368         bpf_prog_change_xdp(prog, NULL);
1369 free_data:
1370     for (i = 0; i < sinfo->nr_frags; i++)
1371         __free_page(skb_frag_page(&sinfo->frags[i]));
1372     kfree(data);
1373 free_ctx:
1374     kfree(ctx);
1375     return ret;
1376 }
1377 
1378 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
1379 {
1380     /* make sure the fields we don't use are zeroed */
1381     if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
1382         return -EINVAL;
1383 
1384     /* flags is allowed */
1385 
1386     if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
1387                sizeof(struct bpf_flow_keys)))
1388         return -EINVAL;
1389 
1390     return 0;
1391 }
1392 
1393 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1394                      const union bpf_attr *kattr,
1395                      union bpf_attr __user *uattr)
1396 {
1397     struct bpf_test_timer t = { NO_PREEMPT };
1398     u32 size = kattr->test.data_size_in;
1399     struct bpf_flow_dissector ctx = {};
1400     u32 repeat = kattr->test.repeat;
1401     struct bpf_flow_keys *user_ctx;
1402     struct bpf_flow_keys flow_keys;
1403     const struct ethhdr *eth;
1404     unsigned int flags = 0;
1405     u32 retval, duration;
1406     void *data;
1407     int ret;
1408 
1409     if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1410         return -EINVAL;
1411 
1412     if (size < ETH_HLEN)
1413         return -EINVAL;
1414 
1415     data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0);
1416     if (IS_ERR(data))
1417         return PTR_ERR(data);
1418 
1419     eth = (struct ethhdr *)data;
1420 
1421     if (!repeat)
1422         repeat = 1;
1423 
1424     user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
1425     if (IS_ERR(user_ctx)) {
1426         kfree(data);
1427         return PTR_ERR(user_ctx);
1428     }
1429     if (user_ctx) {
1430         ret = verify_user_bpf_flow_keys(user_ctx);
1431         if (ret)
1432             goto out;
1433         flags = user_ctx->flags;
1434     }
1435 
1436     ctx.flow_keys = &flow_keys;
1437     ctx.data = data;
1438     ctx.data_end = (__u8 *)data + size;
1439 
1440     bpf_test_timer_enter(&t);
1441     do {
1442         retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
1443                       size, flags);
1444     } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1445     bpf_test_timer_leave(&t);
1446 
1447     if (ret < 0)
1448         goto out;
1449 
1450     ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL,
1451                   sizeof(flow_keys), retval, duration);
1452     if (!ret)
1453         ret = bpf_ctx_finish(kattr, uattr, user_ctx,
1454                      sizeof(struct bpf_flow_keys));
1455 
1456 out:
1457     kfree(user_ctx);
1458     kfree(data);
1459     return ret;
1460 }
1461 
1462 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
1463                 union bpf_attr __user *uattr)
1464 {
1465     struct bpf_test_timer t = { NO_PREEMPT };
1466     struct bpf_prog_array *progs = NULL;
1467     struct bpf_sk_lookup_kern ctx = {};
1468     u32 repeat = kattr->test.repeat;
1469     struct bpf_sk_lookup *user_ctx;
1470     u32 retval, duration;
1471     int ret = -EINVAL;
1472 
1473     if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1474         return -EINVAL;
1475 
1476     if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
1477         kattr->test.data_size_out)
1478         return -EINVAL;
1479 
1480     if (!repeat)
1481         repeat = 1;
1482 
1483     user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
1484     if (IS_ERR(user_ctx))
1485         return PTR_ERR(user_ctx);
1486 
1487     if (!user_ctx)
1488         return -EINVAL;
1489 
1490     if (user_ctx->sk)
1491         goto out;
1492 
1493     if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
1494         goto out;
1495 
1496     if (user_ctx->local_port > U16_MAX) {
1497         ret = -ERANGE;
1498         goto out;
1499     }
1500 
1501     ctx.family = (u16)user_ctx->family;
1502     ctx.protocol = (u16)user_ctx->protocol;
1503     ctx.dport = (u16)user_ctx->local_port;
1504     ctx.sport = user_ctx->remote_port;
1505 
1506     switch (ctx.family) {
1507     case AF_INET:
1508         ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
1509         ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
1510         break;
1511 
1512 #if IS_ENABLED(CONFIG_IPV6)
1513     case AF_INET6:
1514         ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
1515         ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
1516         break;
1517 #endif
1518 
1519     default:
1520         ret = -EAFNOSUPPORT;
1521         goto out;
1522     }
1523 
1524     progs = bpf_prog_array_alloc(1, GFP_KERNEL);
1525     if (!progs) {
1526         ret = -ENOMEM;
1527         goto out;
1528     }
1529 
1530     progs->items[0].prog = prog;
1531 
1532     bpf_test_timer_enter(&t);
1533     do {
1534         ctx.selected_sk = NULL;
1535         retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
1536     } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1537     bpf_test_timer_leave(&t);
1538 
1539     if (ret < 0)
1540         goto out;
1541 
1542     user_ctx->cookie = 0;
1543     if (ctx.selected_sk) {
1544         if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
1545             ret = -EOPNOTSUPP;
1546             goto out;
1547         }
1548 
1549         user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
1550     }
1551 
1552     ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1553     if (!ret)
1554         ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
1555 
1556 out:
1557     bpf_prog_array_free(progs);
1558     kfree(user_ctx);
1559     return ret;
1560 }
1561 
1562 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1563                   const union bpf_attr *kattr,
1564                   union bpf_attr __user *uattr)
1565 {
1566     void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
1567     __u32 ctx_size_in = kattr->test.ctx_size_in;
1568     void *ctx = NULL;
1569     u32 retval;
1570     int err = 0;
1571 
1572     /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1573     if (kattr->test.data_in || kattr->test.data_out ||
1574         kattr->test.ctx_out || kattr->test.duration ||
1575         kattr->test.repeat || kattr->test.flags ||
1576         kattr->test.batch_size)
1577         return -EINVAL;
1578 
1579     if (ctx_size_in < prog->aux->max_ctx_offset ||
1580         ctx_size_in > U16_MAX)
1581         return -EINVAL;
1582 
1583     if (ctx_size_in) {
1584         ctx = memdup_user(ctx_in, ctx_size_in);
1585         if (IS_ERR(ctx))
1586             return PTR_ERR(ctx);
1587     }
1588 
1589     rcu_read_lock_trace();
1590     retval = bpf_prog_run_pin_on_cpu(prog, ctx);
1591     rcu_read_unlock_trace();
1592 
1593     if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
1594         err = -EFAULT;
1595         goto out;
1596     }
1597     if (ctx_size_in)
1598         if (copy_to_user(ctx_in, ctx, ctx_size_in))
1599             err = -EFAULT;
1600 out:
1601     kfree(ctx);
1602     return err;
1603 }
1604 
1605 static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
1606     .owner = THIS_MODULE,
1607     .set   = &test_sk_check_kfunc_ids,
1608 };
1609 
1610 BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids)
1611 BTF_ID(struct, prog_test_ref_kfunc)
1612 BTF_ID(func, bpf_kfunc_call_test_release)
1613 BTF_ID(struct, prog_test_member)
1614 BTF_ID(func, bpf_kfunc_call_memb_release)
1615 
1616 static int __init bpf_prog_test_run_init(void)
1617 {
1618     const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = {
1619         {
1620           .btf_id       = bpf_prog_test_dtor_kfunc_ids[0],
1621           .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1]
1622         },
1623         {
1624           .btf_id   = bpf_prog_test_dtor_kfunc_ids[2],
1625           .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3],
1626         },
1627     };
1628     int ret;
1629 
1630     ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
1631     ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
1632     return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
1633                           ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
1634                           THIS_MODULE);
1635 }
1636 late_initcall(bpf_prog_test_run_init);