0001
0002 #include <linux/bpf-cgroup.h>
0003 #include <linux/bpf.h>
0004 #include <linux/bpf_local_storage.h>
0005 #include <linux/btf.h>
0006 #include <linux/bug.h>
0007 #include <linux/filter.h>
0008 #include <linux/mm.h>
0009 #include <linux/rbtree.h>
0010 #include <linux/slab.h>
0011 #include <uapi/linux/btf.h>
0012 #include <linux/btf_ids.h>
0013
0014 #ifdef CONFIG_CGROUP_BPF
0015
0016 #include "../cgroup/cgroup-internal.h"
0017
0018 #define LOCAL_STORAGE_CREATE_FLAG_MASK \
0019 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
0020
0021 struct bpf_cgroup_storage_map {
0022 struct bpf_map map;
0023
0024 spinlock_t lock;
0025 struct rb_root root;
0026 struct list_head list;
0027 };
0028
0029 static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map)
0030 {
0031 return container_of(map, struct bpf_cgroup_storage_map, map);
0032 }
0033
0034 static bool attach_type_isolated(const struct bpf_map *map)
0035 {
0036 return map->key_size == sizeof(struct bpf_cgroup_storage_key);
0037 }
0038
0039 static int bpf_cgroup_storage_key_cmp(const struct bpf_cgroup_storage_map *map,
0040 const void *_key1, const void *_key2)
0041 {
0042 if (attach_type_isolated(&map->map)) {
0043 const struct bpf_cgroup_storage_key *key1 = _key1;
0044 const struct bpf_cgroup_storage_key *key2 = _key2;
0045
0046 if (key1->cgroup_inode_id < key2->cgroup_inode_id)
0047 return -1;
0048 else if (key1->cgroup_inode_id > key2->cgroup_inode_id)
0049 return 1;
0050 else if (key1->attach_type < key2->attach_type)
0051 return -1;
0052 else if (key1->attach_type > key2->attach_type)
0053 return 1;
0054 } else {
0055 const __u64 *cgroup_inode_id1 = _key1;
0056 const __u64 *cgroup_inode_id2 = _key2;
0057
0058 if (*cgroup_inode_id1 < *cgroup_inode_id2)
0059 return -1;
0060 else if (*cgroup_inode_id1 > *cgroup_inode_id2)
0061 return 1;
0062 }
0063 return 0;
0064 }
0065
0066 struct bpf_cgroup_storage *
0067 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
0068 void *key, bool locked)
0069 {
0070 struct rb_root *root = &map->root;
0071 struct rb_node *node;
0072
0073 if (!locked)
0074 spin_lock_bh(&map->lock);
0075
0076 node = root->rb_node;
0077 while (node) {
0078 struct bpf_cgroup_storage *storage;
0079
0080 storage = container_of(node, struct bpf_cgroup_storage, node);
0081
0082 switch (bpf_cgroup_storage_key_cmp(map, key, &storage->key)) {
0083 case -1:
0084 node = node->rb_left;
0085 break;
0086 case 1:
0087 node = node->rb_right;
0088 break;
0089 default:
0090 if (!locked)
0091 spin_unlock_bh(&map->lock);
0092 return storage;
0093 }
0094 }
0095
0096 if (!locked)
0097 spin_unlock_bh(&map->lock);
0098
0099 return NULL;
0100 }
0101
0102 static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map,
0103 struct bpf_cgroup_storage *storage)
0104 {
0105 struct rb_root *root = &map->root;
0106 struct rb_node **new = &(root->rb_node), *parent = NULL;
0107
0108 while (*new) {
0109 struct bpf_cgroup_storage *this;
0110
0111 this = container_of(*new, struct bpf_cgroup_storage, node);
0112
0113 parent = *new;
0114 switch (bpf_cgroup_storage_key_cmp(map, &storage->key, &this->key)) {
0115 case -1:
0116 new = &((*new)->rb_left);
0117 break;
0118 case 1:
0119 new = &((*new)->rb_right);
0120 break;
0121 default:
0122 return -EEXIST;
0123 }
0124 }
0125
0126 rb_link_node(&storage->node, parent, new);
0127 rb_insert_color(&storage->node, root);
0128
0129 return 0;
0130 }
0131
0132 static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *key)
0133 {
0134 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
0135 struct bpf_cgroup_storage *storage;
0136
0137 storage = cgroup_storage_lookup(map, key, false);
0138 if (!storage)
0139 return NULL;
0140
0141 return &READ_ONCE(storage->buf)->data[0];
0142 }
0143
0144 static int cgroup_storage_update_elem(struct bpf_map *map, void *key,
0145 void *value, u64 flags)
0146 {
0147 struct bpf_cgroup_storage *storage;
0148 struct bpf_storage_buffer *new;
0149
0150 if (unlikely(flags & ~(BPF_F_LOCK | BPF_EXIST)))
0151 return -EINVAL;
0152
0153 if (unlikely((flags & BPF_F_LOCK) &&
0154 !map_value_has_spin_lock(map)))
0155 return -EINVAL;
0156
0157 storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
0158 key, false);
0159 if (!storage)
0160 return -ENOENT;
0161
0162 if (flags & BPF_F_LOCK) {
0163 copy_map_value_locked(map, storage->buf->data, value, false);
0164 return 0;
0165 }
0166
0167 new = bpf_map_kmalloc_node(map, struct_size(new, data, map->value_size),
0168 __GFP_ZERO | GFP_NOWAIT | __GFP_NOWARN,
0169 map->numa_node);
0170 if (!new)
0171 return -ENOMEM;
0172
0173 memcpy(&new->data[0], value, map->value_size);
0174 check_and_init_map_value(map, new->data);
0175
0176 new = xchg(&storage->buf, new);
0177 kfree_rcu(new, rcu);
0178
0179 return 0;
0180 }
0181
0182 int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
0183 void *value)
0184 {
0185 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
0186 struct bpf_cgroup_storage *storage;
0187 int cpu, off = 0;
0188 u32 size;
0189
0190 rcu_read_lock();
0191 storage = cgroup_storage_lookup(map, key, false);
0192 if (!storage) {
0193 rcu_read_unlock();
0194 return -ENOENT;
0195 }
0196
0197
0198
0199
0200
0201 size = round_up(_map->value_size, 8);
0202 for_each_possible_cpu(cpu) {
0203 bpf_long_memcpy(value + off,
0204 per_cpu_ptr(storage->percpu_buf, cpu), size);
0205 off += size;
0206 }
0207 rcu_read_unlock();
0208 return 0;
0209 }
0210
0211 int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
0212 void *value, u64 map_flags)
0213 {
0214 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
0215 struct bpf_cgroup_storage *storage;
0216 int cpu, off = 0;
0217 u32 size;
0218
0219 if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
0220 return -EINVAL;
0221
0222 rcu_read_lock();
0223 storage = cgroup_storage_lookup(map, key, false);
0224 if (!storage) {
0225 rcu_read_unlock();
0226 return -ENOENT;
0227 }
0228
0229
0230
0231
0232
0233
0234
0235 size = round_up(_map->value_size, 8);
0236 for_each_possible_cpu(cpu) {
0237 bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
0238 value + off, size);
0239 off += size;
0240 }
0241 rcu_read_unlock();
0242 return 0;
0243 }
0244
0245 static int cgroup_storage_get_next_key(struct bpf_map *_map, void *key,
0246 void *_next_key)
0247 {
0248 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
0249 struct bpf_cgroup_storage *storage;
0250
0251 spin_lock_bh(&map->lock);
0252
0253 if (list_empty(&map->list))
0254 goto enoent;
0255
0256 if (key) {
0257 storage = cgroup_storage_lookup(map, key, true);
0258 if (!storage)
0259 goto enoent;
0260
0261 storage = list_next_entry(storage, list_map);
0262 if (!storage)
0263 goto enoent;
0264 } else {
0265 storage = list_first_entry(&map->list,
0266 struct bpf_cgroup_storage, list_map);
0267 }
0268
0269 spin_unlock_bh(&map->lock);
0270
0271 if (attach_type_isolated(&map->map)) {
0272 struct bpf_cgroup_storage_key *next = _next_key;
0273 *next = storage->key;
0274 } else {
0275 __u64 *next = _next_key;
0276 *next = storage->key.cgroup_inode_id;
0277 }
0278 return 0;
0279
0280 enoent:
0281 spin_unlock_bh(&map->lock);
0282 return -ENOENT;
0283 }
0284
0285 static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
0286 {
0287 __u32 max_value_size = BPF_LOCAL_STORAGE_MAX_VALUE_SIZE;
0288 int numa_node = bpf_map_attr_numa_node(attr);
0289 struct bpf_cgroup_storage_map *map;
0290
0291
0292
0293
0294 if (attr->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
0295 max_value_size = min_t(__u32, max_value_size,
0296 PCPU_MIN_UNIT_SIZE);
0297
0298 if (attr->key_size != sizeof(struct bpf_cgroup_storage_key) &&
0299 attr->key_size != sizeof(__u64))
0300 return ERR_PTR(-EINVAL);
0301
0302 if (attr->value_size == 0)
0303 return ERR_PTR(-EINVAL);
0304
0305 if (attr->value_size > max_value_size)
0306 return ERR_PTR(-E2BIG);
0307
0308 if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK ||
0309 !bpf_map_flags_access_ok(attr->map_flags))
0310 return ERR_PTR(-EINVAL);
0311
0312 if (attr->max_entries)
0313
0314 return ERR_PTR(-EINVAL);
0315
0316 map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map),
0317 __GFP_ZERO | GFP_USER | __GFP_ACCOUNT, numa_node);
0318 if (!map)
0319 return ERR_PTR(-ENOMEM);
0320
0321
0322 bpf_map_init_from_attr(&map->map, attr);
0323
0324 spin_lock_init(&map->lock);
0325 map->root = RB_ROOT;
0326 INIT_LIST_HEAD(&map->list);
0327
0328 return &map->map;
0329 }
0330
0331 static void cgroup_storage_map_free(struct bpf_map *_map)
0332 {
0333 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
0334 struct list_head *storages = &map->list;
0335 struct bpf_cgroup_storage *storage, *stmp;
0336
0337 mutex_lock(&cgroup_mutex);
0338
0339 list_for_each_entry_safe(storage, stmp, storages, list_map) {
0340 bpf_cgroup_storage_unlink(storage);
0341 bpf_cgroup_storage_free(storage);
0342 }
0343
0344 mutex_unlock(&cgroup_mutex);
0345
0346 WARN_ON(!RB_EMPTY_ROOT(&map->root));
0347 WARN_ON(!list_empty(&map->list));
0348
0349 kfree(map);
0350 }
0351
0352 static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
0353 {
0354 return -EINVAL;
0355 }
0356
0357 static int cgroup_storage_check_btf(const struct bpf_map *map,
0358 const struct btf *btf,
0359 const struct btf_type *key_type,
0360 const struct btf_type *value_type)
0361 {
0362 if (attach_type_isolated(map)) {
0363 struct btf_member *m;
0364 u32 offset, size;
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ||
0378 BTF_INFO_VLEN(key_type->info) != 2)
0379 return -EINVAL;
0380
0381
0382
0383
0384 m = (struct btf_member *)(key_type + 1);
0385 size = sizeof_field(struct bpf_cgroup_storage_key, cgroup_inode_id);
0386 if (!btf_member_is_reg_int(btf, key_type, m, 0, size))
0387 return -EINVAL;
0388
0389
0390
0391
0392 m++;
0393 offset = offsetof(struct bpf_cgroup_storage_key, attach_type);
0394 size = sizeof_field(struct bpf_cgroup_storage_key, attach_type);
0395 if (!btf_member_is_reg_int(btf, key_type, m, offset, size))
0396 return -EINVAL;
0397 } else {
0398 u32 int_data;
0399
0400
0401
0402
0403
0404 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
0405 return -EINVAL;
0406
0407 int_data = *(u32 *)(key_type + 1);
0408 if (BTF_INT_BITS(int_data) != 64 || BTF_INT_OFFSET(int_data))
0409 return -EINVAL;
0410 }
0411
0412 return 0;
0413 }
0414
0415 static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key,
0416 struct seq_file *m)
0417 {
0418 enum bpf_cgroup_storage_type stype;
0419 struct bpf_cgroup_storage *storage;
0420 int cpu;
0421
0422 rcu_read_lock();
0423 storage = cgroup_storage_lookup(map_to_storage(map), key, false);
0424 if (!storage) {
0425 rcu_read_unlock();
0426 return;
0427 }
0428
0429 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
0430 stype = cgroup_storage_type(map);
0431 if (stype == BPF_CGROUP_STORAGE_SHARED) {
0432 seq_puts(m, ": ");
0433 btf_type_seq_show(map->btf, map->btf_value_type_id,
0434 &READ_ONCE(storage->buf)->data[0], m);
0435 seq_puts(m, "\n");
0436 } else {
0437 seq_puts(m, ": {\n");
0438 for_each_possible_cpu(cpu) {
0439 seq_printf(m, "\tcpu%d: ", cpu);
0440 btf_type_seq_show(map->btf, map->btf_value_type_id,
0441 per_cpu_ptr(storage->percpu_buf, cpu),
0442 m);
0443 seq_puts(m, "\n");
0444 }
0445 seq_puts(m, "}\n");
0446 }
0447 rcu_read_unlock();
0448 }
0449
0450 BTF_ID_LIST_SINGLE(cgroup_storage_map_btf_ids, struct,
0451 bpf_cgroup_storage_map)
0452 const struct bpf_map_ops cgroup_storage_map_ops = {
0453 .map_alloc = cgroup_storage_map_alloc,
0454 .map_free = cgroup_storage_map_free,
0455 .map_get_next_key = cgroup_storage_get_next_key,
0456 .map_lookup_elem = cgroup_storage_lookup_elem,
0457 .map_update_elem = cgroup_storage_update_elem,
0458 .map_delete_elem = cgroup_storage_delete_elem,
0459 .map_check_btf = cgroup_storage_check_btf,
0460 .map_seq_show_elem = cgroup_storage_seq_show_elem,
0461 .map_btf_id = &cgroup_storage_map_btf_ids[0],
0462 };
0463
0464 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map)
0465 {
0466 enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
0467
0468 if (aux->cgroup_storage[stype] &&
0469 aux->cgroup_storage[stype] != _map)
0470 return -EBUSY;
0471
0472 aux->cgroup_storage[stype] = _map;
0473 return 0;
0474 }
0475
0476 static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages)
0477 {
0478 size_t size;
0479
0480 if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) {
0481 size = sizeof(struct bpf_storage_buffer) + map->value_size;
0482 *pages = round_up(sizeof(struct bpf_cgroup_storage) + size,
0483 PAGE_SIZE) >> PAGE_SHIFT;
0484 } else {
0485 size = map->value_size;
0486 *pages = round_up(round_up(size, 8) * num_possible_cpus(),
0487 PAGE_SIZE) >> PAGE_SHIFT;
0488 }
0489
0490 return size;
0491 }
0492
0493 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
0494 enum bpf_cgroup_storage_type stype)
0495 {
0496 const gfp_t gfp = __GFP_ZERO | GFP_USER;
0497 struct bpf_cgroup_storage *storage;
0498 struct bpf_map *map;
0499 size_t size;
0500 u32 pages;
0501
0502 map = prog->aux->cgroup_storage[stype];
0503 if (!map)
0504 return NULL;
0505
0506 size = bpf_cgroup_storage_calculate_size(map, &pages);
0507
0508 storage = bpf_map_kmalloc_node(map, sizeof(struct bpf_cgroup_storage),
0509 gfp, map->numa_node);
0510 if (!storage)
0511 goto enomem;
0512
0513 if (stype == BPF_CGROUP_STORAGE_SHARED) {
0514 storage->buf = bpf_map_kmalloc_node(map, size, gfp,
0515 map->numa_node);
0516 if (!storage->buf)
0517 goto enomem;
0518 check_and_init_map_value(map, storage->buf->data);
0519 } else {
0520 storage->percpu_buf = bpf_map_alloc_percpu(map, size, 8, gfp);
0521 if (!storage->percpu_buf)
0522 goto enomem;
0523 }
0524
0525 storage->map = (struct bpf_cgroup_storage_map *)map;
0526
0527 return storage;
0528
0529 enomem:
0530 kfree(storage);
0531 return ERR_PTR(-ENOMEM);
0532 }
0533
0534 static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu)
0535 {
0536 struct bpf_cgroup_storage *storage =
0537 container_of(rcu, struct bpf_cgroup_storage, rcu);
0538
0539 kfree(storage->buf);
0540 kfree(storage);
0541 }
0542
0543 static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu)
0544 {
0545 struct bpf_cgroup_storage *storage =
0546 container_of(rcu, struct bpf_cgroup_storage, rcu);
0547
0548 free_percpu(storage->percpu_buf);
0549 kfree(storage);
0550 }
0551
0552 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
0553 {
0554 enum bpf_cgroup_storage_type stype;
0555 struct bpf_map *map;
0556
0557 if (!storage)
0558 return;
0559
0560 map = &storage->map->map;
0561 stype = cgroup_storage_type(map);
0562 if (stype == BPF_CGROUP_STORAGE_SHARED)
0563 call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu);
0564 else
0565 call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu);
0566 }
0567
0568 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
0569 struct cgroup *cgroup,
0570 enum bpf_attach_type type)
0571 {
0572 struct bpf_cgroup_storage_map *map;
0573
0574 if (!storage)
0575 return;
0576
0577 storage->key.attach_type = type;
0578 storage->key.cgroup_inode_id = cgroup_id(cgroup);
0579
0580 map = storage->map;
0581
0582 spin_lock_bh(&map->lock);
0583 WARN_ON(cgroup_storage_insert(map, storage));
0584 list_add(&storage->list_map, &map->list);
0585 list_add(&storage->list_cg, &cgroup->bpf.storages);
0586 spin_unlock_bh(&map->lock);
0587 }
0588
0589 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage)
0590 {
0591 struct bpf_cgroup_storage_map *map;
0592 struct rb_root *root;
0593
0594 if (!storage)
0595 return;
0596
0597 map = storage->map;
0598
0599 spin_lock_bh(&map->lock);
0600 root = &map->root;
0601 rb_erase(&storage->node, root);
0602
0603 list_del(&storage->list_map);
0604 list_del(&storage->list_cg);
0605 spin_unlock_bh(&map->lock);
0606 }
0607
0608 #endif