Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (C) 2017-2018 Netronome Systems, Inc.
0003  *
0004  * This software is licensed under the GNU General License Version 2,
0005  * June 1991 as shown in the file COPYING in the top-level directory of this
0006  * source tree.
0007  *
0008  * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
0009  * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
0010  * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
0011  * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
0012  * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
0013  * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
0014  */
0015 
0016 #include <linux/bpf.h>
0017 #include <linux/bpf_verifier.h>
0018 #include <linux/bug.h>
0019 #include <linux/kdev_t.h>
0020 #include <linux/list.h>
0021 #include <linux/lockdep.h>
0022 #include <linux/netdevice.h>
0023 #include <linux/printk.h>
0024 #include <linux/proc_ns.h>
0025 #include <linux/rhashtable.h>
0026 #include <linux/rtnetlink.h>
0027 #include <linux/rwsem.h>
0028 
0029 /* Protects offdevs, members of bpf_offload_netdev and offload members
0030  * of all progs.
0031  * RTNL lock cannot be taken when holding this lock.
0032  */
0033 static DECLARE_RWSEM(bpf_devs_lock);
0034 
0035 struct bpf_offload_dev {
0036     const struct bpf_prog_offload_ops *ops;
0037     struct list_head netdevs;
0038     void *priv;
0039 };
0040 
0041 struct bpf_offload_netdev {
0042     struct rhash_head l;
0043     struct net_device *netdev;
0044     struct bpf_offload_dev *offdev;
0045     struct list_head progs;
0046     struct list_head maps;
0047     struct list_head offdev_netdevs;
0048 };
0049 
0050 static const struct rhashtable_params offdevs_params = {
0051     .nelem_hint     = 4,
0052     .key_len        = sizeof(struct net_device *),
0053     .key_offset     = offsetof(struct bpf_offload_netdev, netdev),
0054     .head_offset        = offsetof(struct bpf_offload_netdev, l),
0055     .automatic_shrinking    = true,
0056 };
0057 
0058 static struct rhashtable offdevs;
0059 static bool offdevs_inited;
0060 
0061 static int bpf_dev_offload_check(struct net_device *netdev)
0062 {
0063     if (!netdev)
0064         return -EINVAL;
0065     if (!netdev->netdev_ops->ndo_bpf)
0066         return -EOPNOTSUPP;
0067     return 0;
0068 }
0069 
0070 static struct bpf_offload_netdev *
0071 bpf_offload_find_netdev(struct net_device *netdev)
0072 {
0073     lockdep_assert_held(&bpf_devs_lock);
0074 
0075     if (!offdevs_inited)
0076         return NULL;
0077     return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
0078 }
0079 
0080 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
0081 {
0082     struct bpf_offload_netdev *ondev;
0083     struct bpf_prog_offload *offload;
0084     int err;
0085 
0086     if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
0087         attr->prog_type != BPF_PROG_TYPE_XDP)
0088         return -EINVAL;
0089 
0090     if (attr->prog_flags)
0091         return -EINVAL;
0092 
0093     offload = kzalloc(sizeof(*offload), GFP_USER);
0094     if (!offload)
0095         return -ENOMEM;
0096 
0097     offload->prog = prog;
0098 
0099     offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
0100                        attr->prog_ifindex);
0101     err = bpf_dev_offload_check(offload->netdev);
0102     if (err)
0103         goto err_maybe_put;
0104 
0105     down_write(&bpf_devs_lock);
0106     ondev = bpf_offload_find_netdev(offload->netdev);
0107     if (!ondev) {
0108         err = -EINVAL;
0109         goto err_unlock;
0110     }
0111     offload->offdev = ondev->offdev;
0112     prog->aux->offload = offload;
0113     list_add_tail(&offload->offloads, &ondev->progs);
0114     dev_put(offload->netdev);
0115     up_write(&bpf_devs_lock);
0116 
0117     return 0;
0118 err_unlock:
0119     up_write(&bpf_devs_lock);
0120 err_maybe_put:
0121     if (offload->netdev)
0122         dev_put(offload->netdev);
0123     kfree(offload);
0124     return err;
0125 }
0126 
0127 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
0128 {
0129     struct bpf_prog_offload *offload;
0130     int ret = -ENODEV;
0131 
0132     down_read(&bpf_devs_lock);
0133     offload = prog->aux->offload;
0134     if (offload) {
0135         ret = offload->offdev->ops->prepare(prog);
0136         offload->dev_state = !ret;
0137     }
0138     up_read(&bpf_devs_lock);
0139 
0140     return ret;
0141 }
0142 
0143 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
0144                  int insn_idx, int prev_insn_idx)
0145 {
0146     struct bpf_prog_offload *offload;
0147     int ret = -ENODEV;
0148 
0149     down_read(&bpf_devs_lock);
0150     offload = env->prog->aux->offload;
0151     if (offload)
0152         ret = offload->offdev->ops->insn_hook(env, insn_idx,
0153                               prev_insn_idx);
0154     up_read(&bpf_devs_lock);
0155 
0156     return ret;
0157 }
0158 
0159 int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
0160 {
0161     struct bpf_prog_offload *offload;
0162     int ret = -ENODEV;
0163 
0164     down_read(&bpf_devs_lock);
0165     offload = env->prog->aux->offload;
0166     if (offload) {
0167         if (offload->offdev->ops->finalize)
0168             ret = offload->offdev->ops->finalize(env);
0169         else
0170             ret = 0;
0171     }
0172     up_read(&bpf_devs_lock);
0173 
0174     return ret;
0175 }
0176 
0177 void
0178 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
0179                   struct bpf_insn *insn)
0180 {
0181     const struct bpf_prog_offload_ops *ops;
0182     struct bpf_prog_offload *offload;
0183     int ret = -EOPNOTSUPP;
0184 
0185     down_read(&bpf_devs_lock);
0186     offload = env->prog->aux->offload;
0187     if (offload) {
0188         ops = offload->offdev->ops;
0189         if (!offload->opt_failed && ops->replace_insn)
0190             ret = ops->replace_insn(env, off, insn);
0191         offload->opt_failed |= ret;
0192     }
0193     up_read(&bpf_devs_lock);
0194 }
0195 
0196 void
0197 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
0198 {
0199     struct bpf_prog_offload *offload;
0200     int ret = -EOPNOTSUPP;
0201 
0202     down_read(&bpf_devs_lock);
0203     offload = env->prog->aux->offload;
0204     if (offload) {
0205         if (!offload->opt_failed && offload->offdev->ops->remove_insns)
0206             ret = offload->offdev->ops->remove_insns(env, off, cnt);
0207         offload->opt_failed |= ret;
0208     }
0209     up_read(&bpf_devs_lock);
0210 }
0211 
0212 static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
0213 {
0214     struct bpf_prog_offload *offload = prog->aux->offload;
0215 
0216     if (offload->dev_state)
0217         offload->offdev->ops->destroy(prog);
0218 
0219     /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
0220     bpf_prog_free_id(prog, true);
0221 
0222     list_del_init(&offload->offloads);
0223     kfree(offload);
0224     prog->aux->offload = NULL;
0225 }
0226 
0227 void bpf_prog_offload_destroy(struct bpf_prog *prog)
0228 {
0229     down_write(&bpf_devs_lock);
0230     if (prog->aux->offload)
0231         __bpf_prog_offload_destroy(prog);
0232     up_write(&bpf_devs_lock);
0233 }
0234 
0235 static int bpf_prog_offload_translate(struct bpf_prog *prog)
0236 {
0237     struct bpf_prog_offload *offload;
0238     int ret = -ENODEV;
0239 
0240     down_read(&bpf_devs_lock);
0241     offload = prog->aux->offload;
0242     if (offload)
0243         ret = offload->offdev->ops->translate(prog);
0244     up_read(&bpf_devs_lock);
0245 
0246     return ret;
0247 }
0248 
0249 static unsigned int bpf_prog_warn_on_exec(const void *ctx,
0250                       const struct bpf_insn *insn)
0251 {
0252     WARN(1, "attempt to execute device eBPF program on the host!");
0253     return 0;
0254 }
0255 
0256 int bpf_prog_offload_compile(struct bpf_prog *prog)
0257 {
0258     prog->bpf_func = bpf_prog_warn_on_exec;
0259 
0260     return bpf_prog_offload_translate(prog);
0261 }
0262 
0263 struct ns_get_path_bpf_prog_args {
0264     struct bpf_prog *prog;
0265     struct bpf_prog_info *info;
0266 };
0267 
0268 static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
0269 {
0270     struct ns_get_path_bpf_prog_args *args = private_data;
0271     struct bpf_prog_aux *aux = args->prog->aux;
0272     struct ns_common *ns;
0273     struct net *net;
0274 
0275     rtnl_lock();
0276     down_read(&bpf_devs_lock);
0277 
0278     if (aux->offload) {
0279         args->info->ifindex = aux->offload->netdev->ifindex;
0280         net = dev_net(aux->offload->netdev);
0281         get_net(net);
0282         ns = &net->ns;
0283     } else {
0284         args->info->ifindex = 0;
0285         ns = NULL;
0286     }
0287 
0288     up_read(&bpf_devs_lock);
0289     rtnl_unlock();
0290 
0291     return ns;
0292 }
0293 
0294 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
0295                    struct bpf_prog *prog)
0296 {
0297     struct ns_get_path_bpf_prog_args args = {
0298         .prog   = prog,
0299         .info   = info,
0300     };
0301     struct bpf_prog_aux *aux = prog->aux;
0302     struct inode *ns_inode;
0303     struct path ns_path;
0304     char __user *uinsns;
0305     int res;
0306     u32 ulen;
0307 
0308     res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
0309     if (res) {
0310         if (!info->ifindex)
0311             return -ENODEV;
0312         return res;
0313     }
0314 
0315     down_read(&bpf_devs_lock);
0316 
0317     if (!aux->offload) {
0318         up_read(&bpf_devs_lock);
0319         return -ENODEV;
0320     }
0321 
0322     ulen = info->jited_prog_len;
0323     info->jited_prog_len = aux->offload->jited_len;
0324     if (info->jited_prog_len && ulen) {
0325         uinsns = u64_to_user_ptr(info->jited_prog_insns);
0326         ulen = min_t(u32, info->jited_prog_len, ulen);
0327         if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
0328             up_read(&bpf_devs_lock);
0329             return -EFAULT;
0330         }
0331     }
0332 
0333     up_read(&bpf_devs_lock);
0334 
0335     ns_inode = ns_path.dentry->d_inode;
0336     info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
0337     info->netns_ino = ns_inode->i_ino;
0338     path_put(&ns_path);
0339 
0340     return 0;
0341 }
0342 
0343 const struct bpf_prog_ops bpf_offload_prog_ops = {
0344 };
0345 
0346 static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
0347                    enum bpf_netdev_command cmd)
0348 {
0349     struct netdev_bpf data = {};
0350     struct net_device *netdev;
0351 
0352     ASSERT_RTNL();
0353 
0354     data.command = cmd;
0355     data.offmap = offmap;
0356     /* Caller must make sure netdev is valid */
0357     netdev = offmap->netdev;
0358 
0359     return netdev->netdev_ops->ndo_bpf(netdev, &data);
0360 }
0361 
0362 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
0363 {
0364     struct net *net = current->nsproxy->net_ns;
0365     struct bpf_offload_netdev *ondev;
0366     struct bpf_offloaded_map *offmap;
0367     int err;
0368 
0369     if (!capable(CAP_SYS_ADMIN))
0370         return ERR_PTR(-EPERM);
0371     if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
0372         attr->map_type != BPF_MAP_TYPE_HASH)
0373         return ERR_PTR(-EINVAL);
0374 
0375     offmap = kzalloc(sizeof(*offmap), GFP_USER);
0376     if (!offmap)
0377         return ERR_PTR(-ENOMEM);
0378 
0379     bpf_map_init_from_attr(&offmap->map, attr);
0380 
0381     rtnl_lock();
0382     down_write(&bpf_devs_lock);
0383     offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
0384     err = bpf_dev_offload_check(offmap->netdev);
0385     if (err)
0386         goto err_unlock;
0387 
0388     ondev = bpf_offload_find_netdev(offmap->netdev);
0389     if (!ondev) {
0390         err = -EINVAL;
0391         goto err_unlock;
0392     }
0393 
0394     err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
0395     if (err)
0396         goto err_unlock;
0397 
0398     list_add_tail(&offmap->offloads, &ondev->maps);
0399     up_write(&bpf_devs_lock);
0400     rtnl_unlock();
0401 
0402     return &offmap->map;
0403 
0404 err_unlock:
0405     up_write(&bpf_devs_lock);
0406     rtnl_unlock();
0407     kfree(offmap);
0408     return ERR_PTR(err);
0409 }
0410 
0411 static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
0412 {
0413     WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
0414     /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
0415     bpf_map_free_id(&offmap->map, true);
0416     list_del_init(&offmap->offloads);
0417     offmap->netdev = NULL;
0418 }
0419 
0420 void bpf_map_offload_map_free(struct bpf_map *map)
0421 {
0422     struct bpf_offloaded_map *offmap = map_to_offmap(map);
0423 
0424     rtnl_lock();
0425     down_write(&bpf_devs_lock);
0426     if (offmap->netdev)
0427         __bpf_map_offload_destroy(offmap);
0428     up_write(&bpf_devs_lock);
0429     rtnl_unlock();
0430 
0431     kfree(offmap);
0432 }
0433 
0434 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
0435 {
0436     struct bpf_offloaded_map *offmap = map_to_offmap(map);
0437     int ret = -ENODEV;
0438 
0439     down_read(&bpf_devs_lock);
0440     if (offmap->netdev)
0441         ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
0442     up_read(&bpf_devs_lock);
0443 
0444     return ret;
0445 }
0446 
0447 int bpf_map_offload_update_elem(struct bpf_map *map,
0448                 void *key, void *value, u64 flags)
0449 {
0450     struct bpf_offloaded_map *offmap = map_to_offmap(map);
0451     int ret = -ENODEV;
0452 
0453     if (unlikely(flags > BPF_EXIST))
0454         return -EINVAL;
0455 
0456     down_read(&bpf_devs_lock);
0457     if (offmap->netdev)
0458         ret = offmap->dev_ops->map_update_elem(offmap, key, value,
0459                                flags);
0460     up_read(&bpf_devs_lock);
0461 
0462     return ret;
0463 }
0464 
0465 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
0466 {
0467     struct bpf_offloaded_map *offmap = map_to_offmap(map);
0468     int ret = -ENODEV;
0469 
0470     down_read(&bpf_devs_lock);
0471     if (offmap->netdev)
0472         ret = offmap->dev_ops->map_delete_elem(offmap, key);
0473     up_read(&bpf_devs_lock);
0474 
0475     return ret;
0476 }
0477 
0478 int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
0479 {
0480     struct bpf_offloaded_map *offmap = map_to_offmap(map);
0481     int ret = -ENODEV;
0482 
0483     down_read(&bpf_devs_lock);
0484     if (offmap->netdev)
0485         ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
0486     up_read(&bpf_devs_lock);
0487 
0488     return ret;
0489 }
0490 
0491 struct ns_get_path_bpf_map_args {
0492     struct bpf_offloaded_map *offmap;
0493     struct bpf_map_info *info;
0494 };
0495 
0496 static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
0497 {
0498     struct ns_get_path_bpf_map_args *args = private_data;
0499     struct ns_common *ns;
0500     struct net *net;
0501 
0502     rtnl_lock();
0503     down_read(&bpf_devs_lock);
0504 
0505     if (args->offmap->netdev) {
0506         args->info->ifindex = args->offmap->netdev->ifindex;
0507         net = dev_net(args->offmap->netdev);
0508         get_net(net);
0509         ns = &net->ns;
0510     } else {
0511         args->info->ifindex = 0;
0512         ns = NULL;
0513     }
0514 
0515     up_read(&bpf_devs_lock);
0516     rtnl_unlock();
0517 
0518     return ns;
0519 }
0520 
0521 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
0522 {
0523     struct ns_get_path_bpf_map_args args = {
0524         .offmap = map_to_offmap(map),
0525         .info   = info,
0526     };
0527     struct inode *ns_inode;
0528     struct path ns_path;
0529     int res;
0530 
0531     res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
0532     if (res) {
0533         if (!info->ifindex)
0534             return -ENODEV;
0535         return res;
0536     }
0537 
0538     ns_inode = ns_path.dentry->d_inode;
0539     info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
0540     info->netns_ino = ns_inode->i_ino;
0541     path_put(&ns_path);
0542 
0543     return 0;
0544 }
0545 
0546 static bool __bpf_offload_dev_match(struct bpf_prog *prog,
0547                     struct net_device *netdev)
0548 {
0549     struct bpf_offload_netdev *ondev1, *ondev2;
0550     struct bpf_prog_offload *offload;
0551 
0552     if (!bpf_prog_is_dev_bound(prog->aux))
0553         return false;
0554 
0555     offload = prog->aux->offload;
0556     if (!offload)
0557         return false;
0558     if (offload->netdev == netdev)
0559         return true;
0560 
0561     ondev1 = bpf_offload_find_netdev(offload->netdev);
0562     ondev2 = bpf_offload_find_netdev(netdev);
0563 
0564     return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
0565 }
0566 
0567 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
0568 {
0569     bool ret;
0570 
0571     down_read(&bpf_devs_lock);
0572     ret = __bpf_offload_dev_match(prog, netdev);
0573     up_read(&bpf_devs_lock);
0574 
0575     return ret;
0576 }
0577 EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
0578 
0579 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
0580 {
0581     struct bpf_offloaded_map *offmap;
0582     bool ret;
0583 
0584     if (!bpf_map_is_dev_bound(map))
0585         return bpf_map_offload_neutral(map);
0586     offmap = map_to_offmap(map);
0587 
0588     down_read(&bpf_devs_lock);
0589     ret = __bpf_offload_dev_match(prog, offmap->netdev);
0590     up_read(&bpf_devs_lock);
0591 
0592     return ret;
0593 }
0594 
0595 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
0596                     struct net_device *netdev)
0597 {
0598     struct bpf_offload_netdev *ondev;
0599     int err;
0600 
0601     ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
0602     if (!ondev)
0603         return -ENOMEM;
0604 
0605     ondev->netdev = netdev;
0606     ondev->offdev = offdev;
0607     INIT_LIST_HEAD(&ondev->progs);
0608     INIT_LIST_HEAD(&ondev->maps);
0609 
0610     down_write(&bpf_devs_lock);
0611     err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
0612     if (err) {
0613         netdev_warn(netdev, "failed to register for BPF offload\n");
0614         goto err_unlock_free;
0615     }
0616 
0617     list_add(&ondev->offdev_netdevs, &offdev->netdevs);
0618     up_write(&bpf_devs_lock);
0619     return 0;
0620 
0621 err_unlock_free:
0622     up_write(&bpf_devs_lock);
0623     kfree(ondev);
0624     return err;
0625 }
0626 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
0627 
0628 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
0629                        struct net_device *netdev)
0630 {
0631     struct bpf_offload_netdev *ondev, *altdev;
0632     struct bpf_offloaded_map *offmap, *mtmp;
0633     struct bpf_prog_offload *offload, *ptmp;
0634 
0635     ASSERT_RTNL();
0636 
0637     down_write(&bpf_devs_lock);
0638     ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
0639     if (WARN_ON(!ondev))
0640         goto unlock;
0641 
0642     WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
0643     list_del(&ondev->offdev_netdevs);
0644 
0645     /* Try to move the objects to another netdev of the device */
0646     altdev = list_first_entry_or_null(&offdev->netdevs,
0647                       struct bpf_offload_netdev,
0648                       offdev_netdevs);
0649     if (altdev) {
0650         list_for_each_entry(offload, &ondev->progs, offloads)
0651             offload->netdev = altdev->netdev;
0652         list_splice_init(&ondev->progs, &altdev->progs);
0653 
0654         list_for_each_entry(offmap, &ondev->maps, offloads)
0655             offmap->netdev = altdev->netdev;
0656         list_splice_init(&ondev->maps, &altdev->maps);
0657     } else {
0658         list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
0659             __bpf_prog_offload_destroy(offload->prog);
0660         list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
0661             __bpf_map_offload_destroy(offmap);
0662     }
0663 
0664     WARN_ON(!list_empty(&ondev->progs));
0665     WARN_ON(!list_empty(&ondev->maps));
0666     kfree(ondev);
0667 unlock:
0668     up_write(&bpf_devs_lock);
0669 }
0670 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
0671 
0672 struct bpf_offload_dev *
0673 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
0674 {
0675     struct bpf_offload_dev *offdev;
0676     int err;
0677 
0678     down_write(&bpf_devs_lock);
0679     if (!offdevs_inited) {
0680         err = rhashtable_init(&offdevs, &offdevs_params);
0681         if (err) {
0682             up_write(&bpf_devs_lock);
0683             return ERR_PTR(err);
0684         }
0685         offdevs_inited = true;
0686     }
0687     up_write(&bpf_devs_lock);
0688 
0689     offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
0690     if (!offdev)
0691         return ERR_PTR(-ENOMEM);
0692 
0693     offdev->ops = ops;
0694     offdev->priv = priv;
0695     INIT_LIST_HEAD(&offdev->netdevs);
0696 
0697     return offdev;
0698 }
0699 EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
0700 
0701 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
0702 {
0703     WARN_ON(!list_empty(&offdev->netdevs));
0704     kfree(offdev);
0705 }
0706 EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
0707 
0708 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev)
0709 {
0710     return offdev->priv;
0711 }
0712 EXPORT_SYMBOL_GPL(bpf_offload_dev_priv);