Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright (c) 2019 Facebook */
0003 
0004 #include <linux/bpf.h>
0005 #include <linux/bpf_verifier.h>
0006 #include <linux/btf.h>
0007 #include <linux/filter.h>
0008 #include <linux/slab.h>
0009 #include <linux/numa.h>
0010 #include <linux/seq_file.h>
0011 #include <linux/refcount.h>
0012 #include <linux/mutex.h>
0013 #include <linux/btf_ids.h>
0014 
0015 enum bpf_struct_ops_state {
0016     BPF_STRUCT_OPS_STATE_INIT,
0017     BPF_STRUCT_OPS_STATE_INUSE,
0018     BPF_STRUCT_OPS_STATE_TOBEFREE,
0019 };
0020 
0021 #define BPF_STRUCT_OPS_COMMON_VALUE         \
0022     refcount_t refcnt;              \
0023     enum bpf_struct_ops_state state
0024 
0025 struct bpf_struct_ops_value {
0026     BPF_STRUCT_OPS_COMMON_VALUE;
0027     char data[] ____cacheline_aligned_in_smp;
0028 };
0029 
0030 struct bpf_struct_ops_map {
0031     struct bpf_map map;
0032     struct rcu_head rcu;
0033     const struct bpf_struct_ops *st_ops;
0034     /* protect map_update */
0035     struct mutex lock;
0036     /* link has all the bpf_links that is populated
0037      * to the func ptr of the kernel's struct
0038      * (in kvalue.data).
0039      */
0040     struct bpf_link **links;
0041     /* image is a page that has all the trampolines
0042      * that stores the func args before calling the bpf_prog.
0043      * A PAGE_SIZE "image" is enough to store all trampoline for
0044      * "links[]".
0045      */
0046     void *image;
0047     /* uvalue->data stores the kernel struct
0048      * (e.g. tcp_congestion_ops) that is more useful
0049      * to userspace than the kvalue.  For example,
0050      * the bpf_prog's id is stored instead of the kernel
0051      * address of a func ptr.
0052      */
0053     struct bpf_struct_ops_value *uvalue;
0054     /* kvalue.data stores the actual kernel's struct
0055      * (e.g. tcp_congestion_ops) that will be
0056      * registered to the kernel subsystem.
0057      */
0058     struct bpf_struct_ops_value kvalue;
0059 };
0060 
0061 #define VALUE_PREFIX "bpf_struct_ops_"
0062 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
0063 
0064 /* bpf_struct_ops_##_name (e.g. bpf_struct_ops_tcp_congestion_ops) is
0065  * the map's value exposed to the userspace and its btf-type-id is
0066  * stored at the map->btf_vmlinux_value_type_id.
0067  *
0068  */
0069 #define BPF_STRUCT_OPS_TYPE(_name)              \
0070 extern struct bpf_struct_ops bpf_##_name;           \
0071                                 \
0072 struct bpf_struct_ops_##_name {                     \
0073     BPF_STRUCT_OPS_COMMON_VALUE;                \
0074     struct _name data ____cacheline_aligned_in_smp;     \
0075 };
0076 #include "bpf_struct_ops_types.h"
0077 #undef BPF_STRUCT_OPS_TYPE
0078 
0079 enum {
0080 #define BPF_STRUCT_OPS_TYPE(_name) BPF_STRUCT_OPS_TYPE_##_name,
0081 #include "bpf_struct_ops_types.h"
0082 #undef BPF_STRUCT_OPS_TYPE
0083     __NR_BPF_STRUCT_OPS_TYPE,
0084 };
0085 
0086 static struct bpf_struct_ops * const bpf_struct_ops[] = {
0087 #define BPF_STRUCT_OPS_TYPE(_name)              \
0088     [BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name,
0089 #include "bpf_struct_ops_types.h"
0090 #undef BPF_STRUCT_OPS_TYPE
0091 };
0092 
0093 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
0094 };
0095 
0096 const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
0097 #ifdef CONFIG_NET
0098     .test_run = bpf_struct_ops_test_run,
0099 #endif
0100 };
0101 
0102 static const struct btf_type *module_type;
0103 
0104 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
0105 {
0106     s32 type_id, value_id, module_id;
0107     const struct btf_member *member;
0108     struct bpf_struct_ops *st_ops;
0109     const struct btf_type *t;
0110     char value_name[128];
0111     const char *mname;
0112     u32 i, j;
0113 
0114     /* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */
0115 #define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name);
0116 #include "bpf_struct_ops_types.h"
0117 #undef BPF_STRUCT_OPS_TYPE
0118 
0119     module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT);
0120     if (module_id < 0) {
0121         pr_warn("Cannot find struct module in btf_vmlinux\n");
0122         return;
0123     }
0124     module_type = btf_type_by_id(btf, module_id);
0125 
0126     for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
0127         st_ops = bpf_struct_ops[i];
0128 
0129         if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
0130             sizeof(value_name)) {
0131             pr_warn("struct_ops name %s is too long\n",
0132                 st_ops->name);
0133             continue;
0134         }
0135         sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
0136 
0137         value_id = btf_find_by_name_kind(btf, value_name,
0138                          BTF_KIND_STRUCT);
0139         if (value_id < 0) {
0140             pr_warn("Cannot find struct %s in btf_vmlinux\n",
0141                 value_name);
0142             continue;
0143         }
0144 
0145         type_id = btf_find_by_name_kind(btf, st_ops->name,
0146                         BTF_KIND_STRUCT);
0147         if (type_id < 0) {
0148             pr_warn("Cannot find struct %s in btf_vmlinux\n",
0149                 st_ops->name);
0150             continue;
0151         }
0152         t = btf_type_by_id(btf, type_id);
0153         if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
0154             pr_warn("Cannot support #%u members in struct %s\n",
0155                 btf_type_vlen(t), st_ops->name);
0156             continue;
0157         }
0158 
0159         for_each_member(j, t, member) {
0160             const struct btf_type *func_proto;
0161 
0162             mname = btf_name_by_offset(btf, member->name_off);
0163             if (!*mname) {
0164                 pr_warn("anon member in struct %s is not supported\n",
0165                     st_ops->name);
0166                 break;
0167             }
0168 
0169             if (__btf_member_bitfield_size(t, member)) {
0170                 pr_warn("bit field member %s in struct %s is not supported\n",
0171                     mname, st_ops->name);
0172                 break;
0173             }
0174 
0175             func_proto = btf_type_resolve_func_ptr(btf,
0176                                    member->type,
0177                                    NULL);
0178             if (func_proto &&
0179                 btf_distill_func_proto(log, btf,
0180                            func_proto, mname,
0181                            &st_ops->func_models[j])) {
0182                 pr_warn("Error in parsing func ptr %s in struct %s\n",
0183                     mname, st_ops->name);
0184                 break;
0185             }
0186         }
0187 
0188         if (j == btf_type_vlen(t)) {
0189             if (st_ops->init(btf)) {
0190                 pr_warn("Error in init bpf_struct_ops %s\n",
0191                     st_ops->name);
0192             } else {
0193                 st_ops->type_id = type_id;
0194                 st_ops->type = t;
0195                 st_ops->value_id = value_id;
0196                 st_ops->value_type = btf_type_by_id(btf,
0197                                     value_id);
0198             }
0199         }
0200     }
0201 }
0202 
0203 extern struct btf *btf_vmlinux;
0204 
0205 static const struct bpf_struct_ops *
0206 bpf_struct_ops_find_value(u32 value_id)
0207 {
0208     unsigned int i;
0209 
0210     if (!value_id || !btf_vmlinux)
0211         return NULL;
0212 
0213     for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
0214         if (bpf_struct_ops[i]->value_id == value_id)
0215             return bpf_struct_ops[i];
0216     }
0217 
0218     return NULL;
0219 }
0220 
0221 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
0222 {
0223     unsigned int i;
0224 
0225     if (!type_id || !btf_vmlinux)
0226         return NULL;
0227 
0228     for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
0229         if (bpf_struct_ops[i]->type_id == type_id)
0230             return bpf_struct_ops[i];
0231     }
0232 
0233     return NULL;
0234 }
0235 
0236 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
0237                        void *next_key)
0238 {
0239     if (key && *(u32 *)key == 0)
0240         return -ENOENT;
0241 
0242     *(u32 *)next_key = 0;
0243     return 0;
0244 }
0245 
0246 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
0247                        void *value)
0248 {
0249     struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
0250     struct bpf_struct_ops_value *uvalue, *kvalue;
0251     enum bpf_struct_ops_state state;
0252 
0253     if (unlikely(*(u32 *)key != 0))
0254         return -ENOENT;
0255 
0256     kvalue = &st_map->kvalue;
0257     /* Pair with smp_store_release() during map_update */
0258     state = smp_load_acquire(&kvalue->state);
0259     if (state == BPF_STRUCT_OPS_STATE_INIT) {
0260         memset(value, 0, map->value_size);
0261         return 0;
0262     }
0263 
0264     /* No lock is needed.  state and refcnt do not need
0265      * to be updated together under atomic context.
0266      */
0267     uvalue = value;
0268     memcpy(uvalue, st_map->uvalue, map->value_size);
0269     uvalue->state = state;
0270     refcount_set(&uvalue->refcnt, refcount_read(&kvalue->refcnt));
0271 
0272     return 0;
0273 }
0274 
0275 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
0276 {
0277     return ERR_PTR(-EINVAL);
0278 }
0279 
0280 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
0281 {
0282     const struct btf_type *t = st_map->st_ops->type;
0283     u32 i;
0284 
0285     for (i = 0; i < btf_type_vlen(t); i++) {
0286         if (st_map->links[i]) {
0287             bpf_link_put(st_map->links[i]);
0288             st_map->links[i] = NULL;
0289         }
0290     }
0291 }
0292 
0293 static int check_zero_holes(const struct btf_type *t, void *data)
0294 {
0295     const struct btf_member *member;
0296     u32 i, moff, msize, prev_mend = 0;
0297     const struct btf_type *mtype;
0298 
0299     for_each_member(i, t, member) {
0300         moff = __btf_member_bit_offset(t, member) / 8;
0301         if (moff > prev_mend &&
0302             memchr_inv(data + prev_mend, 0, moff - prev_mend))
0303             return -EINVAL;
0304 
0305         mtype = btf_type_by_id(btf_vmlinux, member->type);
0306         mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
0307         if (IS_ERR(mtype))
0308             return PTR_ERR(mtype);
0309         prev_mend = moff + msize;
0310     }
0311 
0312     if (t->size > prev_mend &&
0313         memchr_inv(data + prev_mend, 0, t->size - prev_mend))
0314         return -EINVAL;
0315 
0316     return 0;
0317 }
0318 
0319 static void bpf_struct_ops_link_release(struct bpf_link *link)
0320 {
0321 }
0322 
0323 static void bpf_struct_ops_link_dealloc(struct bpf_link *link)
0324 {
0325     struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link);
0326 
0327     kfree(tlink);
0328 }
0329 
0330 const struct bpf_link_ops bpf_struct_ops_link_lops = {
0331     .release = bpf_struct_ops_link_release,
0332     .dealloc = bpf_struct_ops_link_dealloc,
0333 };
0334 
0335 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
0336                       struct bpf_tramp_link *link,
0337                       const struct btf_func_model *model,
0338                       void *image, void *image_end)
0339 {
0340     u32 flags;
0341 
0342     tlinks[BPF_TRAMP_FENTRY].links[0] = link;
0343     tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
0344     /* BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
0345      * and it must be used alone.
0346      */
0347     flags = model->ret_size > 0 ? BPF_TRAMP_F_RET_FENTRY_RET : 0;
0348     return arch_prepare_bpf_trampoline(NULL, image, image_end,
0349                        model, flags, tlinks, NULL);
0350 }
0351 
0352 static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
0353                       void *value, u64 flags)
0354 {
0355     struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
0356     const struct bpf_struct_ops *st_ops = st_map->st_ops;
0357     struct bpf_struct_ops_value *uvalue, *kvalue;
0358     const struct btf_member *member;
0359     const struct btf_type *t = st_ops->type;
0360     struct bpf_tramp_links *tlinks = NULL;
0361     void *udata, *kdata;
0362     int prog_fd, err = 0;
0363     void *image, *image_end;
0364     u32 i;
0365 
0366     if (flags)
0367         return -EINVAL;
0368 
0369     if (*(u32 *)key != 0)
0370         return -E2BIG;
0371 
0372     err = check_zero_holes(st_ops->value_type, value);
0373     if (err)
0374         return err;
0375 
0376     uvalue = value;
0377     err = check_zero_holes(t, uvalue->data);
0378     if (err)
0379         return err;
0380 
0381     if (uvalue->state || refcount_read(&uvalue->refcnt))
0382         return -EINVAL;
0383 
0384     tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
0385     if (!tlinks)
0386         return -ENOMEM;
0387 
0388     uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
0389     kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
0390 
0391     mutex_lock(&st_map->lock);
0392 
0393     if (kvalue->state != BPF_STRUCT_OPS_STATE_INIT) {
0394         err = -EBUSY;
0395         goto unlock;
0396     }
0397 
0398     memcpy(uvalue, value, map->value_size);
0399 
0400     udata = &uvalue->data;
0401     kdata = &kvalue->data;
0402     image = st_map->image;
0403     image_end = st_map->image + PAGE_SIZE;
0404 
0405     for_each_member(i, t, member) {
0406         const struct btf_type *mtype, *ptype;
0407         struct bpf_prog *prog;
0408         struct bpf_tramp_link *link;
0409         u32 moff;
0410 
0411         moff = __btf_member_bit_offset(t, member) / 8;
0412         ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
0413         if (ptype == module_type) {
0414             if (*(void **)(udata + moff))
0415                 goto reset_unlock;
0416             *(void **)(kdata + moff) = BPF_MODULE_OWNER;
0417             continue;
0418         }
0419 
0420         err = st_ops->init_member(t, member, kdata, udata);
0421         if (err < 0)
0422             goto reset_unlock;
0423 
0424         /* The ->init_member() has handled this member */
0425         if (err > 0)
0426             continue;
0427 
0428         /* If st_ops->init_member does not handle it,
0429          * we will only handle func ptrs and zero-ed members
0430          * here.  Reject everything else.
0431          */
0432 
0433         /* All non func ptr member must be 0 */
0434         if (!ptype || !btf_type_is_func_proto(ptype)) {
0435             u32 msize;
0436 
0437             mtype = btf_type_by_id(btf_vmlinux, member->type);
0438             mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
0439             if (IS_ERR(mtype)) {
0440                 err = PTR_ERR(mtype);
0441                 goto reset_unlock;
0442             }
0443 
0444             if (memchr_inv(udata + moff, 0, msize)) {
0445                 err = -EINVAL;
0446                 goto reset_unlock;
0447             }
0448 
0449             continue;
0450         }
0451 
0452         prog_fd = (int)(*(unsigned long *)(udata + moff));
0453         /* Similar check as the attr->attach_prog_fd */
0454         if (!prog_fd)
0455             continue;
0456 
0457         prog = bpf_prog_get(prog_fd);
0458         if (IS_ERR(prog)) {
0459             err = PTR_ERR(prog);
0460             goto reset_unlock;
0461         }
0462 
0463         if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
0464             prog->aux->attach_btf_id != st_ops->type_id ||
0465             prog->expected_attach_type != i) {
0466             bpf_prog_put(prog);
0467             err = -EINVAL;
0468             goto reset_unlock;
0469         }
0470 
0471         link = kzalloc(sizeof(*link), GFP_USER);
0472         if (!link) {
0473             bpf_prog_put(prog);
0474             err = -ENOMEM;
0475             goto reset_unlock;
0476         }
0477         bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS,
0478                   &bpf_struct_ops_link_lops, prog);
0479         st_map->links[i] = &link->link;
0480 
0481         err = bpf_struct_ops_prepare_trampoline(tlinks, link,
0482                             &st_ops->func_models[i],
0483                             image, image_end);
0484         if (err < 0)
0485             goto reset_unlock;
0486 
0487         *(void **)(kdata + moff) = image;
0488         image += err;
0489 
0490         /* put prog_id to udata */
0491         *(unsigned long *)(udata + moff) = prog->aux->id;
0492     }
0493 
0494     refcount_set(&kvalue->refcnt, 1);
0495     bpf_map_inc(map);
0496 
0497     set_memory_ro((long)st_map->image, 1);
0498     set_memory_x((long)st_map->image, 1);
0499     err = st_ops->reg(kdata);
0500     if (likely(!err)) {
0501         /* Pair with smp_load_acquire() during lookup_elem().
0502          * It ensures the above udata updates (e.g. prog->aux->id)
0503          * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
0504          */
0505         smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_INUSE);
0506         goto unlock;
0507     }
0508 
0509     /* Error during st_ops->reg(). Can happen if this struct_ops needs to be
0510      * verified as a whole, after all init_member() calls. Can also happen if
0511      * there was a race in registering the struct_ops (under the same name) to
0512      * a sub-system through different struct_ops's maps.
0513      */
0514     set_memory_nx((long)st_map->image, 1);
0515     set_memory_rw((long)st_map->image, 1);
0516     bpf_map_put(map);
0517 
0518 reset_unlock:
0519     bpf_struct_ops_map_put_progs(st_map);
0520     memset(uvalue, 0, map->value_size);
0521     memset(kvalue, 0, map->value_size);
0522 unlock:
0523     kfree(tlinks);
0524     mutex_unlock(&st_map->lock);
0525     return err;
0526 }
0527 
0528 static int bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
0529 {
0530     enum bpf_struct_ops_state prev_state;
0531     struct bpf_struct_ops_map *st_map;
0532 
0533     st_map = (struct bpf_struct_ops_map *)map;
0534     prev_state = cmpxchg(&st_map->kvalue.state,
0535                  BPF_STRUCT_OPS_STATE_INUSE,
0536                  BPF_STRUCT_OPS_STATE_TOBEFREE);
0537     switch (prev_state) {
0538     case BPF_STRUCT_OPS_STATE_INUSE:
0539         st_map->st_ops->unreg(&st_map->kvalue.data);
0540         if (refcount_dec_and_test(&st_map->kvalue.refcnt))
0541             bpf_map_put(map);
0542         return 0;
0543     case BPF_STRUCT_OPS_STATE_TOBEFREE:
0544         return -EINPROGRESS;
0545     case BPF_STRUCT_OPS_STATE_INIT:
0546         return -ENOENT;
0547     default:
0548         WARN_ON_ONCE(1);
0549         /* Should never happen.  Treat it as not found. */
0550         return -ENOENT;
0551     }
0552 }
0553 
0554 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
0555                          struct seq_file *m)
0556 {
0557     void *value;
0558     int err;
0559 
0560     value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
0561     if (!value)
0562         return;
0563 
0564     err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
0565     if (!err) {
0566         btf_type_seq_show(btf_vmlinux, map->btf_vmlinux_value_type_id,
0567                   value, m);
0568         seq_puts(m, "\n");
0569     }
0570 
0571     kfree(value);
0572 }
0573 
0574 static void bpf_struct_ops_map_free(struct bpf_map *map)
0575 {
0576     struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
0577 
0578     if (st_map->links)
0579         bpf_struct_ops_map_put_progs(st_map);
0580     bpf_map_area_free(st_map->links);
0581     bpf_jit_free_exec(st_map->image);
0582     bpf_map_area_free(st_map->uvalue);
0583     bpf_map_area_free(st_map);
0584 }
0585 
0586 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
0587 {
0588     if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
0589         attr->map_flags || !attr->btf_vmlinux_value_type_id)
0590         return -EINVAL;
0591     return 0;
0592 }
0593 
0594 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
0595 {
0596     const struct bpf_struct_ops *st_ops;
0597     size_t st_map_size;
0598     struct bpf_struct_ops_map *st_map;
0599     const struct btf_type *t, *vt;
0600     struct bpf_map *map;
0601 
0602     if (!bpf_capable())
0603         return ERR_PTR(-EPERM);
0604 
0605     st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
0606     if (!st_ops)
0607         return ERR_PTR(-ENOTSUPP);
0608 
0609     vt = st_ops->value_type;
0610     if (attr->value_size != vt->size)
0611         return ERR_PTR(-EINVAL);
0612 
0613     t = st_ops->type;
0614 
0615     st_map_size = sizeof(*st_map) +
0616         /* kvalue stores the
0617          * struct bpf_struct_ops_tcp_congestions_ops
0618          */
0619         (vt->size - sizeof(struct bpf_struct_ops_value));
0620 
0621     st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
0622     if (!st_map)
0623         return ERR_PTR(-ENOMEM);
0624 
0625     st_map->st_ops = st_ops;
0626     map = &st_map->map;
0627 
0628     st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
0629     st_map->links =
0630         bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_links *),
0631                    NUMA_NO_NODE);
0632     st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
0633     if (!st_map->uvalue || !st_map->links || !st_map->image) {
0634         bpf_struct_ops_map_free(map);
0635         return ERR_PTR(-ENOMEM);
0636     }
0637 
0638     mutex_init(&st_map->lock);
0639     set_vm_flush_reset_perms(st_map->image);
0640     bpf_map_init_from_attr(map, attr);
0641 
0642     return map;
0643 }
0644 
0645 BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map)
0646 const struct bpf_map_ops bpf_struct_ops_map_ops = {
0647     .map_alloc_check = bpf_struct_ops_map_alloc_check,
0648     .map_alloc = bpf_struct_ops_map_alloc,
0649     .map_free = bpf_struct_ops_map_free,
0650     .map_get_next_key = bpf_struct_ops_map_get_next_key,
0651     .map_lookup_elem = bpf_struct_ops_map_lookup_elem,
0652     .map_delete_elem = bpf_struct_ops_map_delete_elem,
0653     .map_update_elem = bpf_struct_ops_map_update_elem,
0654     .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
0655     .map_btf_id = &bpf_struct_ops_map_btf_ids[0],
0656 };
0657 
0658 /* "const void *" because some subsystem is
0659  * passing a const (e.g. const struct tcp_congestion_ops *)
0660  */
0661 bool bpf_struct_ops_get(const void *kdata)
0662 {
0663     struct bpf_struct_ops_value *kvalue;
0664 
0665     kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
0666 
0667     return refcount_inc_not_zero(&kvalue->refcnt);
0668 }
0669 
0670 static void bpf_struct_ops_put_rcu(struct rcu_head *head)
0671 {
0672     struct bpf_struct_ops_map *st_map;
0673 
0674     st_map = container_of(head, struct bpf_struct_ops_map, rcu);
0675     bpf_map_put(&st_map->map);
0676 }
0677 
0678 void bpf_struct_ops_put(const void *kdata)
0679 {
0680     struct bpf_struct_ops_value *kvalue;
0681 
0682     kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
0683     if (refcount_dec_and_test(&kvalue->refcnt)) {
0684         struct bpf_struct_ops_map *st_map;
0685 
0686         st_map = container_of(kvalue, struct bpf_struct_ops_map,
0687                       kvalue);
0688         /* The struct_ops's function may switch to another struct_ops.
0689          *
0690          * For example, bpf_tcp_cc_x->init() may switch to
0691          * another tcp_cc_y by calling
0692          * setsockopt(TCP_CONGESTION, "tcp_cc_y").
0693          * During the switch,  bpf_struct_ops_put(tcp_cc_x) is called
0694          * and its map->refcnt may reach 0 which then free its
0695          * trampoline image while tcp_cc_x is still running.
0696          *
0697          * Thus, a rcu grace period is needed here.
0698          */
0699         call_rcu(&st_map->rcu, bpf_struct_ops_put_rcu);
0700     }
0701 }