Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright (c) 2020 Facebook */
0003 
0004 #include <linux/fs.h>
0005 #include <linux/anon_inodes.h>
0006 #include <linux/filter.h>
0007 #include <linux/bpf.h>
0008 #include <linux/rcupdate_trace.h>
0009 
0010 struct bpf_iter_target_info {
0011     struct list_head list;
0012     const struct bpf_iter_reg *reg_info;
0013     u32 btf_id; /* cached value */
0014 };
0015 
0016 struct bpf_iter_link {
0017     struct bpf_link link;
0018     struct bpf_iter_aux_info aux;
0019     struct bpf_iter_target_info *tinfo;
0020 };
0021 
0022 struct bpf_iter_priv_data {
0023     struct bpf_iter_target_info *tinfo;
0024     const struct bpf_iter_seq_info *seq_info;
0025     struct bpf_prog *prog;
0026     u64 session_id;
0027     u64 seq_num;
0028     bool done_stop;
0029     u8 target_private[] __aligned(8);
0030 };
0031 
0032 static struct list_head targets = LIST_HEAD_INIT(targets);
0033 static DEFINE_MUTEX(targets_mutex);
0034 
0035 /* protect bpf_iter_link changes */
0036 static DEFINE_MUTEX(link_mutex);
0037 
0038 /* incremented on every opened seq_file */
0039 static atomic64_t session_id;
0040 
0041 static int prepare_seq_file(struct file *file, struct bpf_iter_link *link,
0042                 const struct bpf_iter_seq_info *seq_info);
0043 
0044 static void bpf_iter_inc_seq_num(struct seq_file *seq)
0045 {
0046     struct bpf_iter_priv_data *iter_priv;
0047 
0048     iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
0049                  target_private);
0050     iter_priv->seq_num++;
0051 }
0052 
0053 static void bpf_iter_dec_seq_num(struct seq_file *seq)
0054 {
0055     struct bpf_iter_priv_data *iter_priv;
0056 
0057     iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
0058                  target_private);
0059     iter_priv->seq_num--;
0060 }
0061 
0062 static void bpf_iter_done_stop(struct seq_file *seq)
0063 {
0064     struct bpf_iter_priv_data *iter_priv;
0065 
0066     iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
0067                  target_private);
0068     iter_priv->done_stop = true;
0069 }
0070 
0071 static inline bool bpf_iter_target_support_resched(const struct bpf_iter_target_info *tinfo)
0072 {
0073     return tinfo->reg_info->feature & BPF_ITER_RESCHED;
0074 }
0075 
0076 static bool bpf_iter_support_resched(struct seq_file *seq)
0077 {
0078     struct bpf_iter_priv_data *iter_priv;
0079 
0080     iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
0081                  target_private);
0082     return bpf_iter_target_support_resched(iter_priv->tinfo);
0083 }
0084 
0085 /* maximum visited objects before bailing out */
0086 #define MAX_ITER_OBJECTS    1000000
0087 
0088 /* bpf_seq_read, a customized and simpler version for bpf iterator.
0089  * The following are differences from seq_read():
0090  *  . fixed buffer size (PAGE_SIZE)
0091  *  . assuming NULL ->llseek()
0092  *  . stop() may call bpf program, handling potential overflow there
0093  */
0094 static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
0095                 loff_t *ppos)
0096 {
0097     struct seq_file *seq = file->private_data;
0098     size_t n, offs, copied = 0;
0099     int err = 0, num_objs = 0;
0100     bool can_resched;
0101     void *p;
0102 
0103     mutex_lock(&seq->lock);
0104 
0105     if (!seq->buf) {
0106         seq->size = PAGE_SIZE << 3;
0107         seq->buf = kvmalloc(seq->size, GFP_KERNEL);
0108         if (!seq->buf) {
0109             err = -ENOMEM;
0110             goto done;
0111         }
0112     }
0113 
0114     if (seq->count) {
0115         n = min(seq->count, size);
0116         err = copy_to_user(buf, seq->buf + seq->from, n);
0117         if (err) {
0118             err = -EFAULT;
0119             goto done;
0120         }
0121         seq->count -= n;
0122         seq->from += n;
0123         copied = n;
0124         goto done;
0125     }
0126 
0127     seq->from = 0;
0128     p = seq->op->start(seq, &seq->index);
0129     if (!p)
0130         goto stop;
0131     if (IS_ERR(p)) {
0132         err = PTR_ERR(p);
0133         seq->op->stop(seq, p);
0134         seq->count = 0;
0135         goto done;
0136     }
0137 
0138     err = seq->op->show(seq, p);
0139     if (err > 0) {
0140         /* object is skipped, decrease seq_num, so next
0141          * valid object can reuse the same seq_num.
0142          */
0143         bpf_iter_dec_seq_num(seq);
0144         seq->count = 0;
0145     } else if (err < 0 || seq_has_overflowed(seq)) {
0146         if (!err)
0147             err = -E2BIG;
0148         seq->op->stop(seq, p);
0149         seq->count = 0;
0150         goto done;
0151     }
0152 
0153     can_resched = bpf_iter_support_resched(seq);
0154     while (1) {
0155         loff_t pos = seq->index;
0156 
0157         num_objs++;
0158         offs = seq->count;
0159         p = seq->op->next(seq, p, &seq->index);
0160         if (pos == seq->index) {
0161             pr_info_ratelimited("buggy seq_file .next function %ps "
0162                 "did not updated position index\n",
0163                 seq->op->next);
0164             seq->index++;
0165         }
0166 
0167         if (IS_ERR_OR_NULL(p))
0168             break;
0169 
0170         /* got a valid next object, increase seq_num */
0171         bpf_iter_inc_seq_num(seq);
0172 
0173         if (seq->count >= size)
0174             break;
0175 
0176         if (num_objs >= MAX_ITER_OBJECTS) {
0177             if (offs == 0) {
0178                 err = -EAGAIN;
0179                 seq->op->stop(seq, p);
0180                 goto done;
0181             }
0182             break;
0183         }
0184 
0185         err = seq->op->show(seq, p);
0186         if (err > 0) {
0187             bpf_iter_dec_seq_num(seq);
0188             seq->count = offs;
0189         } else if (err < 0 || seq_has_overflowed(seq)) {
0190             seq->count = offs;
0191             if (offs == 0) {
0192                 if (!err)
0193                     err = -E2BIG;
0194                 seq->op->stop(seq, p);
0195                 goto done;
0196             }
0197             break;
0198         }
0199 
0200         if (can_resched)
0201             cond_resched();
0202     }
0203 stop:
0204     offs = seq->count;
0205     /* bpf program called if !p */
0206     seq->op->stop(seq, p);
0207     if (!p) {
0208         if (!seq_has_overflowed(seq)) {
0209             bpf_iter_done_stop(seq);
0210         } else {
0211             seq->count = offs;
0212             if (offs == 0) {
0213                 err = -E2BIG;
0214                 goto done;
0215             }
0216         }
0217     }
0218 
0219     n = min(seq->count, size);
0220     err = copy_to_user(buf, seq->buf, n);
0221     if (err) {
0222         err = -EFAULT;
0223         goto done;
0224     }
0225     copied = n;
0226     seq->count -= n;
0227     seq->from = n;
0228 done:
0229     if (!copied)
0230         copied = err;
0231     else
0232         *ppos += copied;
0233     mutex_unlock(&seq->lock);
0234     return copied;
0235 }
0236 
0237 static const struct bpf_iter_seq_info *
0238 __get_seq_info(struct bpf_iter_link *link)
0239 {
0240     const struct bpf_iter_seq_info *seq_info;
0241 
0242     if (link->aux.map) {
0243         seq_info = link->aux.map->ops->iter_seq_info;
0244         if (seq_info)
0245             return seq_info;
0246     }
0247 
0248     return link->tinfo->reg_info->seq_info;
0249 }
0250 
0251 static int iter_open(struct inode *inode, struct file *file)
0252 {
0253     struct bpf_iter_link *link = inode->i_private;
0254 
0255     return prepare_seq_file(file, link, __get_seq_info(link));
0256 }
0257 
0258 static int iter_release(struct inode *inode, struct file *file)
0259 {
0260     struct bpf_iter_priv_data *iter_priv;
0261     struct seq_file *seq;
0262 
0263     seq = file->private_data;
0264     if (!seq)
0265         return 0;
0266 
0267     iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
0268                  target_private);
0269 
0270     if (iter_priv->seq_info->fini_seq_private)
0271         iter_priv->seq_info->fini_seq_private(seq->private);
0272 
0273     bpf_prog_put(iter_priv->prog);
0274     seq->private = iter_priv;
0275 
0276     return seq_release_private(inode, file);
0277 }
0278 
0279 const struct file_operations bpf_iter_fops = {
0280     .open       = iter_open,
0281     .llseek     = no_llseek,
0282     .read       = bpf_seq_read,
0283     .release    = iter_release,
0284 };
0285 
0286 /* The argument reg_info will be cached in bpf_iter_target_info.
0287  * The common practice is to declare target reg_info as
0288  * a const static variable and passed as an argument to
0289  * bpf_iter_reg_target().
0290  */
0291 int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info)
0292 {
0293     struct bpf_iter_target_info *tinfo;
0294 
0295     tinfo = kzalloc(sizeof(*tinfo), GFP_KERNEL);
0296     if (!tinfo)
0297         return -ENOMEM;
0298 
0299     tinfo->reg_info = reg_info;
0300     INIT_LIST_HEAD(&tinfo->list);
0301 
0302     mutex_lock(&targets_mutex);
0303     list_add(&tinfo->list, &targets);
0304     mutex_unlock(&targets_mutex);
0305 
0306     return 0;
0307 }
0308 
0309 void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info)
0310 {
0311     struct bpf_iter_target_info *tinfo;
0312     bool found = false;
0313 
0314     mutex_lock(&targets_mutex);
0315     list_for_each_entry(tinfo, &targets, list) {
0316         if (reg_info == tinfo->reg_info) {
0317             list_del(&tinfo->list);
0318             kfree(tinfo);
0319             found = true;
0320             break;
0321         }
0322     }
0323     mutex_unlock(&targets_mutex);
0324 
0325     WARN_ON(found == false);
0326 }
0327 
0328 static void cache_btf_id(struct bpf_iter_target_info *tinfo,
0329              struct bpf_prog *prog)
0330 {
0331     tinfo->btf_id = prog->aux->attach_btf_id;
0332 }
0333 
0334 bool bpf_iter_prog_supported(struct bpf_prog *prog)
0335 {
0336     const char *attach_fname = prog->aux->attach_func_name;
0337     struct bpf_iter_target_info *tinfo = NULL, *iter;
0338     u32 prog_btf_id = prog->aux->attach_btf_id;
0339     const char *prefix = BPF_ITER_FUNC_PREFIX;
0340     int prefix_len = strlen(prefix);
0341 
0342     if (strncmp(attach_fname, prefix, prefix_len))
0343         return false;
0344 
0345     mutex_lock(&targets_mutex);
0346     list_for_each_entry(iter, &targets, list) {
0347         if (iter->btf_id && iter->btf_id == prog_btf_id) {
0348             tinfo = iter;
0349             break;
0350         }
0351         if (!strcmp(attach_fname + prefix_len, iter->reg_info->target)) {
0352             cache_btf_id(iter, prog);
0353             tinfo = iter;
0354             break;
0355         }
0356     }
0357     mutex_unlock(&targets_mutex);
0358 
0359     if (tinfo) {
0360         prog->aux->ctx_arg_info_size = tinfo->reg_info->ctx_arg_info_size;
0361         prog->aux->ctx_arg_info = tinfo->reg_info->ctx_arg_info;
0362     }
0363 
0364     return tinfo != NULL;
0365 }
0366 
0367 const struct bpf_func_proto *
0368 bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
0369 {
0370     const struct bpf_iter_target_info *tinfo;
0371     const struct bpf_func_proto *fn = NULL;
0372 
0373     mutex_lock(&targets_mutex);
0374     list_for_each_entry(tinfo, &targets, list) {
0375         if (tinfo->btf_id == prog->aux->attach_btf_id) {
0376             const struct bpf_iter_reg *reg_info;
0377 
0378             reg_info = tinfo->reg_info;
0379             if (reg_info->get_func_proto)
0380                 fn = reg_info->get_func_proto(func_id, prog);
0381             break;
0382         }
0383     }
0384     mutex_unlock(&targets_mutex);
0385 
0386     return fn;
0387 }
0388 
0389 static void bpf_iter_link_release(struct bpf_link *link)
0390 {
0391     struct bpf_iter_link *iter_link =
0392         container_of(link, struct bpf_iter_link, link);
0393 
0394     if (iter_link->tinfo->reg_info->detach_target)
0395         iter_link->tinfo->reg_info->detach_target(&iter_link->aux);
0396 }
0397 
0398 static void bpf_iter_link_dealloc(struct bpf_link *link)
0399 {
0400     struct bpf_iter_link *iter_link =
0401         container_of(link, struct bpf_iter_link, link);
0402 
0403     kfree(iter_link);
0404 }
0405 
0406 static int bpf_iter_link_replace(struct bpf_link *link,
0407                  struct bpf_prog *new_prog,
0408                  struct bpf_prog *old_prog)
0409 {
0410     int ret = 0;
0411 
0412     mutex_lock(&link_mutex);
0413     if (old_prog && link->prog != old_prog) {
0414         ret = -EPERM;
0415         goto out_unlock;
0416     }
0417 
0418     if (link->prog->type != new_prog->type ||
0419         link->prog->expected_attach_type != new_prog->expected_attach_type ||
0420         link->prog->aux->attach_btf_id != new_prog->aux->attach_btf_id) {
0421         ret = -EINVAL;
0422         goto out_unlock;
0423     }
0424 
0425     old_prog = xchg(&link->prog, new_prog);
0426     bpf_prog_put(old_prog);
0427 
0428 out_unlock:
0429     mutex_unlock(&link_mutex);
0430     return ret;
0431 }
0432 
0433 static void bpf_iter_link_show_fdinfo(const struct bpf_link *link,
0434                       struct seq_file *seq)
0435 {
0436     struct bpf_iter_link *iter_link =
0437         container_of(link, struct bpf_iter_link, link);
0438     bpf_iter_show_fdinfo_t show_fdinfo;
0439 
0440     seq_printf(seq,
0441            "target_name:\t%s\n",
0442            iter_link->tinfo->reg_info->target);
0443 
0444     show_fdinfo = iter_link->tinfo->reg_info->show_fdinfo;
0445     if (show_fdinfo)
0446         show_fdinfo(&iter_link->aux, seq);
0447 }
0448 
0449 static int bpf_iter_link_fill_link_info(const struct bpf_link *link,
0450                     struct bpf_link_info *info)
0451 {
0452     struct bpf_iter_link *iter_link =
0453         container_of(link, struct bpf_iter_link, link);
0454     char __user *ubuf = u64_to_user_ptr(info->iter.target_name);
0455     bpf_iter_fill_link_info_t fill_link_info;
0456     u32 ulen = info->iter.target_name_len;
0457     const char *target_name;
0458     u32 target_len;
0459 
0460     if (!ulen ^ !ubuf)
0461         return -EINVAL;
0462 
0463     target_name = iter_link->tinfo->reg_info->target;
0464     target_len =  strlen(target_name);
0465     info->iter.target_name_len = target_len + 1;
0466 
0467     if (ubuf) {
0468         if (ulen >= target_len + 1) {
0469             if (copy_to_user(ubuf, target_name, target_len + 1))
0470                 return -EFAULT;
0471         } else {
0472             char zero = '\0';
0473 
0474             if (copy_to_user(ubuf, target_name, ulen - 1))
0475                 return -EFAULT;
0476             if (put_user(zero, ubuf + ulen - 1))
0477                 return -EFAULT;
0478             return -ENOSPC;
0479         }
0480     }
0481 
0482     fill_link_info = iter_link->tinfo->reg_info->fill_link_info;
0483     if (fill_link_info)
0484         return fill_link_info(&iter_link->aux, info);
0485 
0486     return 0;
0487 }
0488 
0489 static const struct bpf_link_ops bpf_iter_link_lops = {
0490     .release = bpf_iter_link_release,
0491     .dealloc = bpf_iter_link_dealloc,
0492     .update_prog = bpf_iter_link_replace,
0493     .show_fdinfo = bpf_iter_link_show_fdinfo,
0494     .fill_link_info = bpf_iter_link_fill_link_info,
0495 };
0496 
0497 bool bpf_link_is_iter(struct bpf_link *link)
0498 {
0499     return link->ops == &bpf_iter_link_lops;
0500 }
0501 
0502 int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
0503              struct bpf_prog *prog)
0504 {
0505     struct bpf_iter_target_info *tinfo = NULL, *iter;
0506     struct bpf_link_primer link_primer;
0507     union bpf_iter_link_info linfo;
0508     struct bpf_iter_link *link;
0509     u32 prog_btf_id, linfo_len;
0510     bpfptr_t ulinfo;
0511     int err;
0512 
0513     if (attr->link_create.target_fd || attr->link_create.flags)
0514         return -EINVAL;
0515 
0516     memset(&linfo, 0, sizeof(union bpf_iter_link_info));
0517 
0518     ulinfo = make_bpfptr(attr->link_create.iter_info, uattr.is_kernel);
0519     linfo_len = attr->link_create.iter_info_len;
0520     if (bpfptr_is_null(ulinfo) ^ !linfo_len)
0521         return -EINVAL;
0522 
0523     if (!bpfptr_is_null(ulinfo)) {
0524         err = bpf_check_uarg_tail_zero(ulinfo, sizeof(linfo),
0525                            linfo_len);
0526         if (err)
0527             return err;
0528         linfo_len = min_t(u32, linfo_len, sizeof(linfo));
0529         if (copy_from_bpfptr(&linfo, ulinfo, linfo_len))
0530             return -EFAULT;
0531     }
0532 
0533     prog_btf_id = prog->aux->attach_btf_id;
0534     mutex_lock(&targets_mutex);
0535     list_for_each_entry(iter, &targets, list) {
0536         if (iter->btf_id == prog_btf_id) {
0537             tinfo = iter;
0538             break;
0539         }
0540     }
0541     mutex_unlock(&targets_mutex);
0542     if (!tinfo)
0543         return -ENOENT;
0544 
0545     /* Only allow sleepable program for resched-able iterator */
0546     if (prog->aux->sleepable && !bpf_iter_target_support_resched(tinfo))
0547         return -EINVAL;
0548 
0549     link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN);
0550     if (!link)
0551         return -ENOMEM;
0552 
0553     bpf_link_init(&link->link, BPF_LINK_TYPE_ITER, &bpf_iter_link_lops, prog);
0554     link->tinfo = tinfo;
0555 
0556     err = bpf_link_prime(&link->link, &link_primer);
0557     if (err) {
0558         kfree(link);
0559         return err;
0560     }
0561 
0562     if (tinfo->reg_info->attach_target) {
0563         err = tinfo->reg_info->attach_target(prog, &linfo, &link->aux);
0564         if (err) {
0565             bpf_link_cleanup(&link_primer);
0566             return err;
0567         }
0568     }
0569 
0570     return bpf_link_settle(&link_primer);
0571 }
0572 
0573 static void init_seq_meta(struct bpf_iter_priv_data *priv_data,
0574               struct bpf_iter_target_info *tinfo,
0575               const struct bpf_iter_seq_info *seq_info,
0576               struct bpf_prog *prog)
0577 {
0578     priv_data->tinfo = tinfo;
0579     priv_data->seq_info = seq_info;
0580     priv_data->prog = prog;
0581     priv_data->session_id = atomic64_inc_return(&session_id);
0582     priv_data->seq_num = 0;
0583     priv_data->done_stop = false;
0584 }
0585 
0586 static int prepare_seq_file(struct file *file, struct bpf_iter_link *link,
0587                 const struct bpf_iter_seq_info *seq_info)
0588 {
0589     struct bpf_iter_priv_data *priv_data;
0590     struct bpf_iter_target_info *tinfo;
0591     struct bpf_prog *prog;
0592     u32 total_priv_dsize;
0593     struct seq_file *seq;
0594     int err = 0;
0595 
0596     mutex_lock(&link_mutex);
0597     prog = link->link.prog;
0598     bpf_prog_inc(prog);
0599     mutex_unlock(&link_mutex);
0600 
0601     tinfo = link->tinfo;
0602     total_priv_dsize = offsetof(struct bpf_iter_priv_data, target_private) +
0603                seq_info->seq_priv_size;
0604     priv_data = __seq_open_private(file, seq_info->seq_ops,
0605                        total_priv_dsize);
0606     if (!priv_data) {
0607         err = -ENOMEM;
0608         goto release_prog;
0609     }
0610 
0611     if (seq_info->init_seq_private) {
0612         err = seq_info->init_seq_private(priv_data->target_private, &link->aux);
0613         if (err)
0614             goto release_seq_file;
0615     }
0616 
0617     init_seq_meta(priv_data, tinfo, seq_info, prog);
0618     seq = file->private_data;
0619     seq->private = priv_data->target_private;
0620 
0621     return 0;
0622 
0623 release_seq_file:
0624     seq_release_private(file->f_inode, file);
0625     file->private_data = NULL;
0626 release_prog:
0627     bpf_prog_put(prog);
0628     return err;
0629 }
0630 
0631 int bpf_iter_new_fd(struct bpf_link *link)
0632 {
0633     struct bpf_iter_link *iter_link;
0634     struct file *file;
0635     unsigned int flags;
0636     int err, fd;
0637 
0638     if (link->ops != &bpf_iter_link_lops)
0639         return -EINVAL;
0640 
0641     flags = O_RDONLY | O_CLOEXEC;
0642     fd = get_unused_fd_flags(flags);
0643     if (fd < 0)
0644         return fd;
0645 
0646     file = anon_inode_getfile("bpf_iter", &bpf_iter_fops, NULL, flags);
0647     if (IS_ERR(file)) {
0648         err = PTR_ERR(file);
0649         goto free_fd;
0650     }
0651 
0652     iter_link = container_of(link, struct bpf_iter_link, link);
0653     err = prepare_seq_file(file, iter_link, __get_seq_info(iter_link));
0654     if (err)
0655         goto free_file;
0656 
0657     fd_install(fd, file);
0658     return fd;
0659 
0660 free_file:
0661     fput(file);
0662 free_fd:
0663     put_unused_fd(fd);
0664     return err;
0665 }
0666 
0667 struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop)
0668 {
0669     struct bpf_iter_priv_data *iter_priv;
0670     struct seq_file *seq;
0671     void *seq_priv;
0672 
0673     seq = meta->seq;
0674     if (seq->file->f_op != &bpf_iter_fops)
0675         return NULL;
0676 
0677     seq_priv = seq->private;
0678     iter_priv = container_of(seq_priv, struct bpf_iter_priv_data,
0679                  target_private);
0680 
0681     if (in_stop && iter_priv->done_stop)
0682         return NULL;
0683 
0684     meta->session_id = iter_priv->session_id;
0685     meta->seq_num = iter_priv->seq_num;
0686 
0687     return iter_priv->prog;
0688 }
0689 
0690 int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx)
0691 {
0692     int ret;
0693 
0694     if (prog->aux->sleepable) {
0695         rcu_read_lock_trace();
0696         migrate_disable();
0697         might_fault();
0698         ret = bpf_prog_run(prog, ctx);
0699         migrate_enable();
0700         rcu_read_unlock_trace();
0701     } else {
0702         rcu_read_lock();
0703         migrate_disable();
0704         ret = bpf_prog_run(prog, ctx);
0705         migrate_enable();
0706         rcu_read_unlock();
0707     }
0708 
0709     /* bpf program can only return 0 or 1:
0710      *  0 : okay
0711      *  1 : retry the same object
0712      * The bpf_iter_run_prog() return value
0713      * will be seq_ops->show() return value.
0714      */
0715     return ret == 0 ? 0 : -EAGAIN;
0716 }
0717 
0718 BPF_CALL_4(bpf_for_each_map_elem, struct bpf_map *, map, void *, callback_fn,
0719        void *, callback_ctx, u64, flags)
0720 {
0721     return map->ops->map_for_each_callback(map, callback_fn, callback_ctx, flags);
0722 }
0723 
0724 const struct bpf_func_proto bpf_for_each_map_elem_proto = {
0725     .func       = bpf_for_each_map_elem,
0726     .gpl_only   = false,
0727     .ret_type   = RET_INTEGER,
0728     .arg1_type  = ARG_CONST_MAP_PTR,
0729     .arg2_type  = ARG_PTR_TO_FUNC,
0730     .arg3_type  = ARG_PTR_TO_STACK_OR_NULL,
0731     .arg4_type  = ARG_ANYTHING,
0732 };
0733 
0734 BPF_CALL_4(bpf_loop, u32, nr_loops, void *, callback_fn, void *, callback_ctx,
0735        u64, flags)
0736 {
0737     bpf_callback_t callback = (bpf_callback_t)callback_fn;
0738     u64 ret;
0739     u32 i;
0740 
0741     /* Note: these safety checks are also verified when bpf_loop
0742      * is inlined, be careful to modify this code in sync. See
0743      * function verifier.c:inline_bpf_loop.
0744      */
0745     if (flags)
0746         return -EINVAL;
0747     if (nr_loops > BPF_MAX_LOOPS)
0748         return -E2BIG;
0749 
0750     for (i = 0; i < nr_loops; i++) {
0751         ret = callback((u64)i, (u64)(long)callback_ctx, 0, 0, 0);
0752         /* return value: 0 - continue, 1 - stop and return */
0753         if (ret)
0754             return i + 1;
0755     }
0756 
0757     return i;
0758 }
0759 
0760 const struct bpf_func_proto bpf_loop_proto = {
0761     .func       = bpf_loop,
0762     .gpl_only   = false,
0763     .ret_type   = RET_INTEGER,
0764     .arg1_type  = ARG_ANYTHING,
0765     .arg2_type  = ARG_PTR_TO_FUNC,
0766     .arg3_type  = ARG_PTR_TO_STACK_OR_NULL,
0767     .arg4_type  = ARG_ANYTHING,
0768 };