Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright (c) 2016 Facebook
0003  */
0004 #include <linux/bpf.h>
0005 #include <linux/jhash.h>
0006 #include <linux/filter.h>
0007 #include <linux/kernel.h>
0008 #include <linux/stacktrace.h>
0009 #include <linux/perf_event.h>
0010 #include <linux/btf_ids.h>
0011 #include <linux/buildid.h>
0012 #include "percpu_freelist.h"
0013 #include "mmap_unlock_work.h"
0014 
0015 #define STACK_CREATE_FLAG_MASK                  \
0016     (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY |    \
0017      BPF_F_STACK_BUILD_ID)
0018 
0019 struct stack_map_bucket {
0020     struct pcpu_freelist_node fnode;
0021     u32 hash;
0022     u32 nr;
0023     u64 data[];
0024 };
0025 
0026 struct bpf_stack_map {
0027     struct bpf_map map;
0028     void *elems;
0029     struct pcpu_freelist freelist;
0030     u32 n_buckets;
0031     struct stack_map_bucket *buckets[];
0032 };
0033 
0034 static inline bool stack_map_use_build_id(struct bpf_map *map)
0035 {
0036     return (map->map_flags & BPF_F_STACK_BUILD_ID);
0037 }
0038 
0039 static inline int stack_map_data_size(struct bpf_map *map)
0040 {
0041     return stack_map_use_build_id(map) ?
0042         sizeof(struct bpf_stack_build_id) : sizeof(u64);
0043 }
0044 
0045 static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
0046 {
0047     u64 elem_size = sizeof(struct stack_map_bucket) +
0048             (u64)smap->map.value_size;
0049     int err;
0050 
0051     smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
0052                      smap->map.numa_node);
0053     if (!smap->elems)
0054         return -ENOMEM;
0055 
0056     err = pcpu_freelist_init(&smap->freelist);
0057     if (err)
0058         goto free_elems;
0059 
0060     pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
0061                    smap->map.max_entries);
0062     return 0;
0063 
0064 free_elems:
0065     bpf_map_area_free(smap->elems);
0066     return err;
0067 }
0068 
0069 /* Called from syscall */
0070 static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
0071 {
0072     u32 value_size = attr->value_size;
0073     struct bpf_stack_map *smap;
0074     u64 cost, n_buckets;
0075     int err;
0076 
0077     if (!bpf_capable())
0078         return ERR_PTR(-EPERM);
0079 
0080     if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
0081         return ERR_PTR(-EINVAL);
0082 
0083     /* check sanity of attributes */
0084     if (attr->max_entries == 0 || attr->key_size != 4 ||
0085         value_size < 8 || value_size % 8)
0086         return ERR_PTR(-EINVAL);
0087 
0088     BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64));
0089     if (attr->map_flags & BPF_F_STACK_BUILD_ID) {
0090         if (value_size % sizeof(struct bpf_stack_build_id) ||
0091             value_size / sizeof(struct bpf_stack_build_id)
0092             > sysctl_perf_event_max_stack)
0093             return ERR_PTR(-EINVAL);
0094     } else if (value_size / 8 > sysctl_perf_event_max_stack)
0095         return ERR_PTR(-EINVAL);
0096 
0097     /* hash table size must be power of 2 */
0098     n_buckets = roundup_pow_of_two(attr->max_entries);
0099     if (!n_buckets)
0100         return ERR_PTR(-E2BIG);
0101 
0102     cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
0103     smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
0104     if (!smap)
0105         return ERR_PTR(-ENOMEM);
0106 
0107     bpf_map_init_from_attr(&smap->map, attr);
0108     smap->n_buckets = n_buckets;
0109 
0110     err = get_callchain_buffers(sysctl_perf_event_max_stack);
0111     if (err)
0112         goto free_smap;
0113 
0114     err = prealloc_elems_and_freelist(smap);
0115     if (err)
0116         goto put_buffers;
0117 
0118     return &smap->map;
0119 
0120 put_buffers:
0121     put_callchain_buffers();
0122 free_smap:
0123     bpf_map_area_free(smap);
0124     return ERR_PTR(err);
0125 }
0126 
0127 static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
0128                       u64 *ips, u32 trace_nr, bool user)
0129 {
0130     int i;
0131     struct mmap_unlock_irq_work *work = NULL;
0132     bool irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
0133     struct vm_area_struct *vma, *prev_vma = NULL;
0134     const char *prev_build_id;
0135 
0136     /* If the irq_work is in use, fall back to report ips. Same
0137      * fallback is used for kernel stack (!user) on a stackmap with
0138      * build_id.
0139      */
0140     if (!user || !current || !current->mm || irq_work_busy ||
0141         !mmap_read_trylock(current->mm)) {
0142         /* cannot access current->mm, fall back to ips */
0143         for (i = 0; i < trace_nr; i++) {
0144             id_offs[i].status = BPF_STACK_BUILD_ID_IP;
0145             id_offs[i].ip = ips[i];
0146             memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
0147         }
0148         return;
0149     }
0150 
0151     for (i = 0; i < trace_nr; i++) {
0152         if (range_in_vma(prev_vma, ips[i], ips[i])) {
0153             vma = prev_vma;
0154             memcpy(id_offs[i].build_id, prev_build_id,
0155                    BUILD_ID_SIZE_MAX);
0156             goto build_id_valid;
0157         }
0158         vma = find_vma(current->mm, ips[i]);
0159         if (!vma || build_id_parse(vma, id_offs[i].build_id, NULL)) {
0160             /* per entry fall back to ips */
0161             id_offs[i].status = BPF_STACK_BUILD_ID_IP;
0162             id_offs[i].ip = ips[i];
0163             memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
0164             continue;
0165         }
0166 build_id_valid:
0167         id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
0168             - vma->vm_start;
0169         id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
0170         prev_vma = vma;
0171         prev_build_id = id_offs[i].build_id;
0172     }
0173     bpf_mmap_unlock_mm(work, current->mm);
0174 }
0175 
0176 static struct perf_callchain_entry *
0177 get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
0178 {
0179 #ifdef CONFIG_STACKTRACE
0180     struct perf_callchain_entry *entry;
0181     int rctx;
0182 
0183     entry = get_callchain_entry(&rctx);
0184 
0185     if (!entry)
0186         return NULL;
0187 
0188     entry->nr = stack_trace_save_tsk(task, (unsigned long *)entry->ip,
0189                      max_depth, 0);
0190 
0191     /* stack_trace_save_tsk() works on unsigned long array, while
0192      * perf_callchain_entry uses u64 array. For 32-bit systems, it is
0193      * necessary to fix this mismatch.
0194      */
0195     if (__BITS_PER_LONG != 64) {
0196         unsigned long *from = (unsigned long *) entry->ip;
0197         u64 *to = entry->ip;
0198         int i;
0199 
0200         /* copy data from the end to avoid using extra buffer */
0201         for (i = entry->nr - 1; i >= 0; i--)
0202             to[i] = (u64)(from[i]);
0203     }
0204 
0205     put_callchain_entry(rctx);
0206 
0207     return entry;
0208 #else /* CONFIG_STACKTRACE */
0209     return NULL;
0210 #endif
0211 }
0212 
0213 static long __bpf_get_stackid(struct bpf_map *map,
0214                   struct perf_callchain_entry *trace, u64 flags)
0215 {
0216     struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
0217     struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
0218     u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
0219     u32 hash, id, trace_nr, trace_len;
0220     bool user = flags & BPF_F_USER_STACK;
0221     u64 *ips;
0222     bool hash_matches;
0223 
0224     if (trace->nr <= skip)
0225         /* skipping more than usable stack trace */
0226         return -EFAULT;
0227 
0228     trace_nr = trace->nr - skip;
0229     trace_len = trace_nr * sizeof(u64);
0230     ips = trace->ip + skip;
0231     hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
0232     id = hash & (smap->n_buckets - 1);
0233     bucket = READ_ONCE(smap->buckets[id]);
0234 
0235     hash_matches = bucket && bucket->hash == hash;
0236     /* fast cmp */
0237     if (hash_matches && flags & BPF_F_FAST_STACK_CMP)
0238         return id;
0239 
0240     if (stack_map_use_build_id(map)) {
0241         /* for build_id+offset, pop a bucket before slow cmp */
0242         new_bucket = (struct stack_map_bucket *)
0243             pcpu_freelist_pop(&smap->freelist);
0244         if (unlikely(!new_bucket))
0245             return -ENOMEM;
0246         new_bucket->nr = trace_nr;
0247         stack_map_get_build_id_offset(
0248             (struct bpf_stack_build_id *)new_bucket->data,
0249             ips, trace_nr, user);
0250         trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
0251         if (hash_matches && bucket->nr == trace_nr &&
0252             memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
0253             pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
0254             return id;
0255         }
0256         if (bucket && !(flags & BPF_F_REUSE_STACKID)) {
0257             pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
0258             return -EEXIST;
0259         }
0260     } else {
0261         if (hash_matches && bucket->nr == trace_nr &&
0262             memcmp(bucket->data, ips, trace_len) == 0)
0263             return id;
0264         if (bucket && !(flags & BPF_F_REUSE_STACKID))
0265             return -EEXIST;
0266 
0267         new_bucket = (struct stack_map_bucket *)
0268             pcpu_freelist_pop(&smap->freelist);
0269         if (unlikely(!new_bucket))
0270             return -ENOMEM;
0271         memcpy(new_bucket->data, ips, trace_len);
0272     }
0273 
0274     new_bucket->hash = hash;
0275     new_bucket->nr = trace_nr;
0276 
0277     old_bucket = xchg(&smap->buckets[id], new_bucket);
0278     if (old_bucket)
0279         pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
0280     return id;
0281 }
0282 
0283 BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
0284        u64, flags)
0285 {
0286     u32 max_depth = map->value_size / stack_map_data_size(map);
0287     u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
0288     bool user = flags & BPF_F_USER_STACK;
0289     struct perf_callchain_entry *trace;
0290     bool kernel = !user;
0291 
0292     if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
0293                    BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
0294         return -EINVAL;
0295 
0296     max_depth += skip;
0297     if (max_depth > sysctl_perf_event_max_stack)
0298         max_depth = sysctl_perf_event_max_stack;
0299 
0300     trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
0301                    false, false);
0302 
0303     if (unlikely(!trace))
0304         /* couldn't fetch the stack trace */
0305         return -EFAULT;
0306 
0307     return __bpf_get_stackid(map, trace, flags);
0308 }
0309 
0310 const struct bpf_func_proto bpf_get_stackid_proto = {
0311     .func       = bpf_get_stackid,
0312     .gpl_only   = true,
0313     .ret_type   = RET_INTEGER,
0314     .arg1_type  = ARG_PTR_TO_CTX,
0315     .arg2_type  = ARG_CONST_MAP_PTR,
0316     .arg3_type  = ARG_ANYTHING,
0317 };
0318 
0319 static __u64 count_kernel_ip(struct perf_callchain_entry *trace)
0320 {
0321     __u64 nr_kernel = 0;
0322 
0323     while (nr_kernel < trace->nr) {
0324         if (trace->ip[nr_kernel] == PERF_CONTEXT_USER)
0325             break;
0326         nr_kernel++;
0327     }
0328     return nr_kernel;
0329 }
0330 
0331 BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
0332        struct bpf_map *, map, u64, flags)
0333 {
0334     struct perf_event *event = ctx->event;
0335     struct perf_callchain_entry *trace;
0336     bool kernel, user;
0337     __u64 nr_kernel;
0338     int ret;
0339 
0340     /* perf_sample_data doesn't have callchain, use bpf_get_stackid */
0341     if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
0342         return bpf_get_stackid((unsigned long)(ctx->regs),
0343                        (unsigned long) map, flags, 0, 0);
0344 
0345     if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
0346                    BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
0347         return -EINVAL;
0348 
0349     user = flags & BPF_F_USER_STACK;
0350     kernel = !user;
0351 
0352     trace = ctx->data->callchain;
0353     if (unlikely(!trace))
0354         return -EFAULT;
0355 
0356     nr_kernel = count_kernel_ip(trace);
0357 
0358     if (kernel) {
0359         __u64 nr = trace->nr;
0360 
0361         trace->nr = nr_kernel;
0362         ret = __bpf_get_stackid(map, trace, flags);
0363 
0364         /* restore nr */
0365         trace->nr = nr;
0366     } else { /* user */
0367         u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
0368 
0369         skip += nr_kernel;
0370         if (skip > BPF_F_SKIP_FIELD_MASK)
0371             return -EFAULT;
0372 
0373         flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
0374         ret = __bpf_get_stackid(map, trace, flags);
0375     }
0376     return ret;
0377 }
0378 
0379 const struct bpf_func_proto bpf_get_stackid_proto_pe = {
0380     .func       = bpf_get_stackid_pe,
0381     .gpl_only   = false,
0382     .ret_type   = RET_INTEGER,
0383     .arg1_type  = ARG_PTR_TO_CTX,
0384     .arg2_type  = ARG_CONST_MAP_PTR,
0385     .arg3_type  = ARG_ANYTHING,
0386 };
0387 
0388 static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
0389                 struct perf_callchain_entry *trace_in,
0390                 void *buf, u32 size, u64 flags)
0391 {
0392     u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
0393     bool user_build_id = flags & BPF_F_USER_BUILD_ID;
0394     u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
0395     bool user = flags & BPF_F_USER_STACK;
0396     struct perf_callchain_entry *trace;
0397     bool kernel = !user;
0398     int err = -EINVAL;
0399     u64 *ips;
0400 
0401     if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
0402                    BPF_F_USER_BUILD_ID)))
0403         goto clear;
0404     if (kernel && user_build_id)
0405         goto clear;
0406 
0407     elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id)
0408                         : sizeof(u64);
0409     if (unlikely(size % elem_size))
0410         goto clear;
0411 
0412     /* cannot get valid user stack for task without user_mode regs */
0413     if (task && user && !user_mode(regs))
0414         goto err_fault;
0415 
0416     num_elem = size / elem_size;
0417     max_depth = num_elem + skip;
0418     if (sysctl_perf_event_max_stack < max_depth)
0419         max_depth = sysctl_perf_event_max_stack;
0420 
0421     if (trace_in)
0422         trace = trace_in;
0423     else if (kernel && task)
0424         trace = get_callchain_entry_for_task(task, max_depth);
0425     else
0426         trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
0427                        false, false);
0428     if (unlikely(!trace))
0429         goto err_fault;
0430 
0431     if (trace->nr < skip)
0432         goto err_fault;
0433 
0434     trace_nr = trace->nr - skip;
0435     trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
0436     copy_len = trace_nr * elem_size;
0437 
0438     ips = trace->ip + skip;
0439     if (user && user_build_id)
0440         stack_map_get_build_id_offset(buf, ips, trace_nr, user);
0441     else
0442         memcpy(buf, ips, copy_len);
0443 
0444     if (size > copy_len)
0445         memset(buf + copy_len, 0, size - copy_len);
0446     return copy_len;
0447 
0448 err_fault:
0449     err = -EFAULT;
0450 clear:
0451     memset(buf, 0, size);
0452     return err;
0453 }
0454 
0455 BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
0456        u64, flags)
0457 {
0458     return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
0459 }
0460 
0461 const struct bpf_func_proto bpf_get_stack_proto = {
0462     .func       = bpf_get_stack,
0463     .gpl_only   = true,
0464     .ret_type   = RET_INTEGER,
0465     .arg1_type  = ARG_PTR_TO_CTX,
0466     .arg2_type  = ARG_PTR_TO_UNINIT_MEM,
0467     .arg3_type  = ARG_CONST_SIZE_OR_ZERO,
0468     .arg4_type  = ARG_ANYTHING,
0469 };
0470 
0471 BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
0472        u32, size, u64, flags)
0473 {
0474     struct pt_regs *regs;
0475     long res = -EINVAL;
0476 
0477     if (!try_get_task_stack(task))
0478         return -EFAULT;
0479 
0480     regs = task_pt_regs(task);
0481     if (regs)
0482         res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
0483     put_task_stack(task);
0484 
0485     return res;
0486 }
0487 
0488 const struct bpf_func_proto bpf_get_task_stack_proto = {
0489     .func       = bpf_get_task_stack,
0490     .gpl_only   = false,
0491     .ret_type   = RET_INTEGER,
0492     .arg1_type  = ARG_PTR_TO_BTF_ID,
0493     .arg1_btf_id    = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
0494     .arg2_type  = ARG_PTR_TO_UNINIT_MEM,
0495     .arg3_type  = ARG_CONST_SIZE_OR_ZERO,
0496     .arg4_type  = ARG_ANYTHING,
0497 };
0498 
0499 BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
0500        void *, buf, u32, size, u64, flags)
0501 {
0502     struct pt_regs *regs = (struct pt_regs *)(ctx->regs);
0503     struct perf_event *event = ctx->event;
0504     struct perf_callchain_entry *trace;
0505     bool kernel, user;
0506     int err = -EINVAL;
0507     __u64 nr_kernel;
0508 
0509     if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
0510         return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
0511 
0512     if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
0513                    BPF_F_USER_BUILD_ID)))
0514         goto clear;
0515 
0516     user = flags & BPF_F_USER_STACK;
0517     kernel = !user;
0518 
0519     err = -EFAULT;
0520     trace = ctx->data->callchain;
0521     if (unlikely(!trace))
0522         goto clear;
0523 
0524     nr_kernel = count_kernel_ip(trace);
0525 
0526     if (kernel) {
0527         __u64 nr = trace->nr;
0528 
0529         trace->nr = nr_kernel;
0530         err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
0531 
0532         /* restore nr */
0533         trace->nr = nr;
0534     } else { /* user */
0535         u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
0536 
0537         skip += nr_kernel;
0538         if (skip > BPF_F_SKIP_FIELD_MASK)
0539             goto clear;
0540 
0541         flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
0542         err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
0543     }
0544     return err;
0545 
0546 clear:
0547     memset(buf, 0, size);
0548     return err;
0549 
0550 }
0551 
0552 const struct bpf_func_proto bpf_get_stack_proto_pe = {
0553     .func       = bpf_get_stack_pe,
0554     .gpl_only   = true,
0555     .ret_type   = RET_INTEGER,
0556     .arg1_type  = ARG_PTR_TO_CTX,
0557     .arg2_type  = ARG_PTR_TO_UNINIT_MEM,
0558     .arg3_type  = ARG_CONST_SIZE_OR_ZERO,
0559     .arg4_type  = ARG_ANYTHING,
0560 };
0561 
0562 /* Called from eBPF program */
0563 static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
0564 {
0565     return ERR_PTR(-EOPNOTSUPP);
0566 }
0567 
0568 /* Called from syscall */
0569 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
0570 {
0571     struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
0572     struct stack_map_bucket *bucket, *old_bucket;
0573     u32 id = *(u32 *)key, trace_len;
0574 
0575     if (unlikely(id >= smap->n_buckets))
0576         return -ENOENT;
0577 
0578     bucket = xchg(&smap->buckets[id], NULL);
0579     if (!bucket)
0580         return -ENOENT;
0581 
0582     trace_len = bucket->nr * stack_map_data_size(map);
0583     memcpy(value, bucket->data, trace_len);
0584     memset(value + trace_len, 0, map->value_size - trace_len);
0585 
0586     old_bucket = xchg(&smap->buckets[id], bucket);
0587     if (old_bucket)
0588         pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
0589     return 0;
0590 }
0591 
0592 static int stack_map_get_next_key(struct bpf_map *map, void *key,
0593                   void *next_key)
0594 {
0595     struct bpf_stack_map *smap = container_of(map,
0596                           struct bpf_stack_map, map);
0597     u32 id;
0598 
0599     WARN_ON_ONCE(!rcu_read_lock_held());
0600 
0601     if (!key) {
0602         id = 0;
0603     } else {
0604         id = *(u32 *)key;
0605         if (id >= smap->n_buckets || !smap->buckets[id])
0606             id = 0;
0607         else
0608             id++;
0609     }
0610 
0611     while (id < smap->n_buckets && !smap->buckets[id])
0612         id++;
0613 
0614     if (id >= smap->n_buckets)
0615         return -ENOENT;
0616 
0617     *(u32 *)next_key = id;
0618     return 0;
0619 }
0620 
0621 static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
0622                  u64 map_flags)
0623 {
0624     return -EINVAL;
0625 }
0626 
0627 /* Called from syscall or from eBPF program */
0628 static int stack_map_delete_elem(struct bpf_map *map, void *key)
0629 {
0630     struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
0631     struct stack_map_bucket *old_bucket;
0632     u32 id = *(u32 *)key;
0633 
0634     if (unlikely(id >= smap->n_buckets))
0635         return -E2BIG;
0636 
0637     old_bucket = xchg(&smap->buckets[id], NULL);
0638     if (old_bucket) {
0639         pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
0640         return 0;
0641     } else {
0642         return -ENOENT;
0643     }
0644 }
0645 
0646 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
0647 static void stack_map_free(struct bpf_map *map)
0648 {
0649     struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
0650 
0651     bpf_map_area_free(smap->elems);
0652     pcpu_freelist_destroy(&smap->freelist);
0653     bpf_map_area_free(smap);
0654     put_callchain_buffers();
0655 }
0656 
0657 BTF_ID_LIST_SINGLE(stack_trace_map_btf_ids, struct, bpf_stack_map)
0658 const struct bpf_map_ops stack_trace_map_ops = {
0659     .map_meta_equal = bpf_map_meta_equal,
0660     .map_alloc = stack_map_alloc,
0661     .map_free = stack_map_free,
0662     .map_get_next_key = stack_map_get_next_key,
0663     .map_lookup_elem = stack_map_lookup_elem,
0664     .map_update_elem = stack_map_update_elem,
0665     .map_delete_elem = stack_map_delete_elem,
0666     .map_check_btf = map_check_no_btf,
0667     .map_btf_id = &stack_trace_map_btf_ids[0],
0668 };