Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright (c) 2021 Facebook */
0003 
0004 #include <linux/bitmap.h>
0005 #include <linux/bpf.h>
0006 #include <linux/btf.h>
0007 #include <linux/err.h>
0008 #include <linux/jhash.h>
0009 #include <linux/random.h>
0010 #include <linux/btf_ids.h>
0011 
0012 #define BLOOM_CREATE_FLAG_MASK \
0013     (BPF_F_NUMA_NODE | BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK)
0014 
0015 struct bpf_bloom_filter {
0016     struct bpf_map map;
0017     u32 bitset_mask;
0018     u32 hash_seed;
0019     /* If the size of the values in the bloom filter is u32 aligned,
0020      * then it is more performant to use jhash2 as the underlying hash
0021      * function, else we use jhash. This tracks the number of u32s
0022      * in an u32-aligned value size. If the value size is not u32 aligned,
0023      * this will be 0.
0024      */
0025     u32 aligned_u32_count;
0026     u32 nr_hash_funcs;
0027     unsigned long bitset[];
0028 };
0029 
0030 static u32 hash(struct bpf_bloom_filter *bloom, void *value,
0031         u32 value_size, u32 index)
0032 {
0033     u32 h;
0034 
0035     if (bloom->aligned_u32_count)
0036         h = jhash2(value, bloom->aligned_u32_count,
0037                bloom->hash_seed + index);
0038     else
0039         h = jhash(value, value_size, bloom->hash_seed + index);
0040 
0041     return h & bloom->bitset_mask;
0042 }
0043 
0044 static int bloom_map_peek_elem(struct bpf_map *map, void *value)
0045 {
0046     struct bpf_bloom_filter *bloom =
0047         container_of(map, struct bpf_bloom_filter, map);
0048     u32 i, h;
0049 
0050     for (i = 0; i < bloom->nr_hash_funcs; i++) {
0051         h = hash(bloom, value, map->value_size, i);
0052         if (!test_bit(h, bloom->bitset))
0053             return -ENOENT;
0054     }
0055 
0056     return 0;
0057 }
0058 
0059 static int bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags)
0060 {
0061     struct bpf_bloom_filter *bloom =
0062         container_of(map, struct bpf_bloom_filter, map);
0063     u32 i, h;
0064 
0065     if (flags != BPF_ANY)
0066         return -EINVAL;
0067 
0068     for (i = 0; i < bloom->nr_hash_funcs; i++) {
0069         h = hash(bloom, value, map->value_size, i);
0070         set_bit(h, bloom->bitset);
0071     }
0072 
0073     return 0;
0074 }
0075 
0076 static int bloom_map_pop_elem(struct bpf_map *map, void *value)
0077 {
0078     return -EOPNOTSUPP;
0079 }
0080 
0081 static int bloom_map_delete_elem(struct bpf_map *map, void *value)
0082 {
0083     return -EOPNOTSUPP;
0084 }
0085 
0086 static int bloom_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
0087 {
0088     return -EOPNOTSUPP;
0089 }
0090 
0091 static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)
0092 {
0093     u32 bitset_bytes, bitset_mask, nr_hash_funcs, nr_bits;
0094     int numa_node = bpf_map_attr_numa_node(attr);
0095     struct bpf_bloom_filter *bloom;
0096 
0097     if (!bpf_capable())
0098         return ERR_PTR(-EPERM);
0099 
0100     if (attr->key_size != 0 || attr->value_size == 0 ||
0101         attr->max_entries == 0 ||
0102         attr->map_flags & ~BLOOM_CREATE_FLAG_MASK ||
0103         !bpf_map_flags_access_ok(attr->map_flags) ||
0104         /* The lower 4 bits of map_extra (0xF) specify the number
0105          * of hash functions
0106          */
0107         (attr->map_extra & ~0xF))
0108         return ERR_PTR(-EINVAL);
0109 
0110     nr_hash_funcs = attr->map_extra;
0111     if (nr_hash_funcs == 0)
0112         /* Default to using 5 hash functions if unspecified */
0113         nr_hash_funcs = 5;
0114 
0115     /* For the bloom filter, the optimal bit array size that minimizes the
0116      * false positive probability is n * k / ln(2) where n is the number of
0117      * expected entries in the bloom filter and k is the number of hash
0118      * functions. We use 7 / 5 to approximate 1 / ln(2).
0119      *
0120      * We round this up to the nearest power of two to enable more efficient
0121      * hashing using bitmasks. The bitmask will be the bit array size - 1.
0122      *
0123      * If this overflows a u32, the bit array size will have 2^32 (4
0124      * GB) bits.
0125      */
0126     if (check_mul_overflow(attr->max_entries, nr_hash_funcs, &nr_bits) ||
0127         check_mul_overflow(nr_bits / 5, (u32)7, &nr_bits) ||
0128         nr_bits > (1UL << 31)) {
0129         /* The bit array size is 2^32 bits but to avoid overflowing the
0130          * u32, we use U32_MAX, which will round up to the equivalent
0131          * number of bytes
0132          */
0133         bitset_bytes = BITS_TO_BYTES(U32_MAX);
0134         bitset_mask = U32_MAX;
0135     } else {
0136         if (nr_bits <= BITS_PER_LONG)
0137             nr_bits = BITS_PER_LONG;
0138         else
0139             nr_bits = roundup_pow_of_two(nr_bits);
0140         bitset_bytes = BITS_TO_BYTES(nr_bits);
0141         bitset_mask = nr_bits - 1;
0142     }
0143 
0144     bitset_bytes = roundup(bitset_bytes, sizeof(unsigned long));
0145     bloom = bpf_map_area_alloc(sizeof(*bloom) + bitset_bytes, numa_node);
0146 
0147     if (!bloom)
0148         return ERR_PTR(-ENOMEM);
0149 
0150     bpf_map_init_from_attr(&bloom->map, attr);
0151 
0152     bloom->nr_hash_funcs = nr_hash_funcs;
0153     bloom->bitset_mask = bitset_mask;
0154 
0155     /* Check whether the value size is u32-aligned */
0156     if ((attr->value_size & (sizeof(u32) - 1)) == 0)
0157         bloom->aligned_u32_count =
0158             attr->value_size / sizeof(u32);
0159 
0160     if (!(attr->map_flags & BPF_F_ZERO_SEED))
0161         bloom->hash_seed = get_random_int();
0162 
0163     return &bloom->map;
0164 }
0165 
0166 static void bloom_map_free(struct bpf_map *map)
0167 {
0168     struct bpf_bloom_filter *bloom =
0169         container_of(map, struct bpf_bloom_filter, map);
0170 
0171     bpf_map_area_free(bloom);
0172 }
0173 
0174 static void *bloom_map_lookup_elem(struct bpf_map *map, void *key)
0175 {
0176     /* The eBPF program should use map_peek_elem instead */
0177     return ERR_PTR(-EINVAL);
0178 }
0179 
0180 static int bloom_map_update_elem(struct bpf_map *map, void *key,
0181                  void *value, u64 flags)
0182 {
0183     /* The eBPF program should use map_push_elem instead */
0184     return -EINVAL;
0185 }
0186 
0187 static int bloom_map_check_btf(const struct bpf_map *map,
0188                    const struct btf *btf,
0189                    const struct btf_type *key_type,
0190                    const struct btf_type *value_type)
0191 {
0192     /* Bloom filter maps are keyless */
0193     return btf_type_is_void(key_type) ? 0 : -EINVAL;
0194 }
0195 
0196 BTF_ID_LIST_SINGLE(bpf_bloom_map_btf_ids, struct, bpf_bloom_filter)
0197 const struct bpf_map_ops bloom_filter_map_ops = {
0198     .map_meta_equal = bpf_map_meta_equal,
0199     .map_alloc = bloom_map_alloc,
0200     .map_free = bloom_map_free,
0201     .map_get_next_key = bloom_map_get_next_key,
0202     .map_push_elem = bloom_map_push_elem,
0203     .map_peek_elem = bloom_map_peek_elem,
0204     .map_pop_elem = bloom_map_pop_elem,
0205     .map_lookup_elem = bloom_map_lookup_elem,
0206     .map_update_elem = bloom_map_update_elem,
0207     .map_delete_elem = bloom_map_delete_elem,
0208     .map_check_btf = bloom_map_check_btf,
0209     .map_btf_id = &bpf_bloom_map_btf_ids[0],
0210 };