0001
0002
0003
0004
0005
0006
0007 #include <linux/bpf.h>
0008 #include <linux/list.h>
0009 #include <linux/slab.h>
0010 #include <linux/capability.h>
0011 #include <linux/btf_ids.h>
0012 #include "percpu_freelist.h"
0013
0014 #define QUEUE_STACK_CREATE_FLAG_MASK \
0015 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
0016
0017 struct bpf_queue_stack {
0018 struct bpf_map map;
0019 raw_spinlock_t lock;
0020 u32 head, tail;
0021 u32 size;
0022
0023 char elements[] __aligned(8);
0024 };
0025
0026 static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
0027 {
0028 return container_of(map, struct bpf_queue_stack, map);
0029 }
0030
0031 static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
0032 {
0033 return qs->head == qs->tail;
0034 }
0035
0036 static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
0037 {
0038 u32 head = qs->head + 1;
0039
0040 if (unlikely(head >= qs->size))
0041 head = 0;
0042
0043 return head == qs->tail;
0044 }
0045
0046
0047 static int queue_stack_map_alloc_check(union bpf_attr *attr)
0048 {
0049 if (!bpf_capable())
0050 return -EPERM;
0051
0052
0053 if (attr->max_entries == 0 || attr->key_size != 0 ||
0054 attr->value_size == 0 ||
0055 attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
0056 !bpf_map_flags_access_ok(attr->map_flags))
0057 return -EINVAL;
0058
0059 if (attr->value_size > KMALLOC_MAX_SIZE)
0060
0061
0062
0063 return -E2BIG;
0064
0065 return 0;
0066 }
0067
0068 static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
0069 {
0070 int numa_node = bpf_map_attr_numa_node(attr);
0071 struct bpf_queue_stack *qs;
0072 u64 size, queue_size;
0073
0074 size = (u64) attr->max_entries + 1;
0075 queue_size = sizeof(*qs) + size * attr->value_size;
0076
0077 qs = bpf_map_area_alloc(queue_size, numa_node);
0078 if (!qs)
0079 return ERR_PTR(-ENOMEM);
0080
0081 memset(qs, 0, sizeof(*qs));
0082
0083 bpf_map_init_from_attr(&qs->map, attr);
0084
0085 qs->size = size;
0086
0087 raw_spin_lock_init(&qs->lock);
0088
0089 return &qs->map;
0090 }
0091
0092
0093 static void queue_stack_map_free(struct bpf_map *map)
0094 {
0095 struct bpf_queue_stack *qs = bpf_queue_stack(map);
0096
0097 bpf_map_area_free(qs);
0098 }
0099
0100 static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
0101 {
0102 struct bpf_queue_stack *qs = bpf_queue_stack(map);
0103 unsigned long flags;
0104 int err = 0;
0105 void *ptr;
0106
0107 raw_spin_lock_irqsave(&qs->lock, flags);
0108
0109 if (queue_stack_map_is_empty(qs)) {
0110 memset(value, 0, qs->map.value_size);
0111 err = -ENOENT;
0112 goto out;
0113 }
0114
0115 ptr = &qs->elements[qs->tail * qs->map.value_size];
0116 memcpy(value, ptr, qs->map.value_size);
0117
0118 if (delete) {
0119 if (unlikely(++qs->tail >= qs->size))
0120 qs->tail = 0;
0121 }
0122
0123 out:
0124 raw_spin_unlock_irqrestore(&qs->lock, flags);
0125 return err;
0126 }
0127
0128
0129 static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
0130 {
0131 struct bpf_queue_stack *qs = bpf_queue_stack(map);
0132 unsigned long flags;
0133 int err = 0;
0134 void *ptr;
0135 u32 index;
0136
0137 raw_spin_lock_irqsave(&qs->lock, flags);
0138
0139 if (queue_stack_map_is_empty(qs)) {
0140 memset(value, 0, qs->map.value_size);
0141 err = -ENOENT;
0142 goto out;
0143 }
0144
0145 index = qs->head - 1;
0146 if (unlikely(index >= qs->size))
0147 index = qs->size - 1;
0148
0149 ptr = &qs->elements[index * qs->map.value_size];
0150 memcpy(value, ptr, qs->map.value_size);
0151
0152 if (delete)
0153 qs->head = index;
0154
0155 out:
0156 raw_spin_unlock_irqrestore(&qs->lock, flags);
0157 return err;
0158 }
0159
0160
0161 static int queue_map_peek_elem(struct bpf_map *map, void *value)
0162 {
0163 return __queue_map_get(map, value, false);
0164 }
0165
0166
0167 static int stack_map_peek_elem(struct bpf_map *map, void *value)
0168 {
0169 return __stack_map_get(map, value, false);
0170 }
0171
0172
0173 static int queue_map_pop_elem(struct bpf_map *map, void *value)
0174 {
0175 return __queue_map_get(map, value, true);
0176 }
0177
0178
0179 static int stack_map_pop_elem(struct bpf_map *map, void *value)
0180 {
0181 return __stack_map_get(map, value, true);
0182 }
0183
0184
0185 static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
0186 u64 flags)
0187 {
0188 struct bpf_queue_stack *qs = bpf_queue_stack(map);
0189 unsigned long irq_flags;
0190 int err = 0;
0191 void *dst;
0192
0193
0194
0195
0196 bool replace = (flags & BPF_EXIST);
0197
0198
0199 if (flags & BPF_NOEXIST || flags > BPF_EXIST)
0200 return -EINVAL;
0201
0202 raw_spin_lock_irqsave(&qs->lock, irq_flags);
0203
0204 if (queue_stack_map_is_full(qs)) {
0205 if (!replace) {
0206 err = -E2BIG;
0207 goto out;
0208 }
0209
0210 if (unlikely(++qs->tail >= qs->size))
0211 qs->tail = 0;
0212 }
0213
0214 dst = &qs->elements[qs->head * qs->map.value_size];
0215 memcpy(dst, value, qs->map.value_size);
0216
0217 if (unlikely(++qs->head >= qs->size))
0218 qs->head = 0;
0219
0220 out:
0221 raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
0222 return err;
0223 }
0224
0225
0226 static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
0227 {
0228 return NULL;
0229 }
0230
0231
0232 static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
0233 void *value, u64 flags)
0234 {
0235 return -EINVAL;
0236 }
0237
0238
0239 static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
0240 {
0241 return -EINVAL;
0242 }
0243
0244
0245 static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
0246 void *next_key)
0247 {
0248 return -EINVAL;
0249 }
0250
0251 BTF_ID_LIST_SINGLE(queue_map_btf_ids, struct, bpf_queue_stack)
0252 const struct bpf_map_ops queue_map_ops = {
0253 .map_meta_equal = bpf_map_meta_equal,
0254 .map_alloc_check = queue_stack_map_alloc_check,
0255 .map_alloc = queue_stack_map_alloc,
0256 .map_free = queue_stack_map_free,
0257 .map_lookup_elem = queue_stack_map_lookup_elem,
0258 .map_update_elem = queue_stack_map_update_elem,
0259 .map_delete_elem = queue_stack_map_delete_elem,
0260 .map_push_elem = queue_stack_map_push_elem,
0261 .map_pop_elem = queue_map_pop_elem,
0262 .map_peek_elem = queue_map_peek_elem,
0263 .map_get_next_key = queue_stack_map_get_next_key,
0264 .map_btf_id = &queue_map_btf_ids[0],
0265 };
0266
0267 const struct bpf_map_ops stack_map_ops = {
0268 .map_meta_equal = bpf_map_meta_equal,
0269 .map_alloc_check = queue_stack_map_alloc_check,
0270 .map_alloc = queue_stack_map_alloc,
0271 .map_free = queue_stack_map_free,
0272 .map_lookup_elem = queue_stack_map_lookup_elem,
0273 .map_update_elem = queue_stack_map_update_elem,
0274 .map_delete_elem = queue_stack_map_delete_elem,
0275 .map_push_elem = queue_stack_map_push_elem,
0276 .map_pop_elem = stack_map_pop_elem,
0277 .map_peek_elem = stack_map_peek_elem,
0278 .map_get_next_key = queue_stack_map_get_next_key,
0279 .map_btf_id = &queue_map_btf_ids[0],
0280 };