0001
0002
0003
0004
0005
0006 #include <linux/bpf.h>
0007 #include <linux/filter.h>
0008 #include <linux/capability.h>
0009 #include <net/xdp_sock.h>
0010 #include <linux/slab.h>
0011 #include <linux/sched.h>
0012 #include <linux/btf_ids.h>
0013
0014 #include "xsk.h"
0015
0016 static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
0017 struct xdp_sock __rcu **map_entry)
0018 {
0019 struct xsk_map_node *node;
0020
0021 node = bpf_map_kzalloc(&map->map, sizeof(*node),
0022 GFP_ATOMIC | __GFP_NOWARN);
0023 if (!node)
0024 return ERR_PTR(-ENOMEM);
0025
0026 bpf_map_inc(&map->map);
0027
0028 node->map = map;
0029 node->map_entry = map_entry;
0030 return node;
0031 }
0032
0033 static void xsk_map_node_free(struct xsk_map_node *node)
0034 {
0035 bpf_map_put(&node->map->map);
0036 kfree(node);
0037 }
0038
0039 static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
0040 {
0041 spin_lock_bh(&xs->map_list_lock);
0042 list_add_tail(&node->node, &xs->map_list);
0043 spin_unlock_bh(&xs->map_list_lock);
0044 }
0045
0046 static void xsk_map_sock_delete(struct xdp_sock *xs,
0047 struct xdp_sock __rcu **map_entry)
0048 {
0049 struct xsk_map_node *n, *tmp;
0050
0051 spin_lock_bh(&xs->map_list_lock);
0052 list_for_each_entry_safe(n, tmp, &xs->map_list, node) {
0053 if (map_entry == n->map_entry) {
0054 list_del(&n->node);
0055 xsk_map_node_free(n);
0056 }
0057 }
0058 spin_unlock_bh(&xs->map_list_lock);
0059 }
0060
0061 static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
0062 {
0063 struct xsk_map *m;
0064 int numa_node;
0065 u64 size;
0066
0067 if (!capable(CAP_NET_ADMIN))
0068 return ERR_PTR(-EPERM);
0069
0070 if (attr->max_entries == 0 || attr->key_size != 4 ||
0071 attr->value_size != 4 ||
0072 attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
0073 return ERR_PTR(-EINVAL);
0074
0075 numa_node = bpf_map_attr_numa_node(attr);
0076 size = struct_size(m, xsk_map, attr->max_entries);
0077
0078 m = bpf_map_area_alloc(size, numa_node);
0079 if (!m)
0080 return ERR_PTR(-ENOMEM);
0081
0082 bpf_map_init_from_attr(&m->map, attr);
0083 spin_lock_init(&m->lock);
0084
0085 return &m->map;
0086 }
0087
0088 static void xsk_map_free(struct bpf_map *map)
0089 {
0090 struct xsk_map *m = container_of(map, struct xsk_map, map);
0091
0092 synchronize_net();
0093 bpf_map_area_free(m);
0094 }
0095
0096 static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
0097 {
0098 struct xsk_map *m = container_of(map, struct xsk_map, map);
0099 u32 index = key ? *(u32 *)key : U32_MAX;
0100 u32 *next = next_key;
0101
0102 if (index >= m->map.max_entries) {
0103 *next = 0;
0104 return 0;
0105 }
0106
0107 if (index == m->map.max_entries - 1)
0108 return -ENOENT;
0109 *next = index + 1;
0110 return 0;
0111 }
0112
0113 static int xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
0114 {
0115 const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
0116 struct bpf_insn *insn = insn_buf;
0117
0118 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
0119 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
0120 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
0121 *insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
0122 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
0123 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ret, ret, 0);
0124 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
0125 *insn++ = BPF_MOV64_IMM(ret, 0);
0126 return insn - insn_buf;
0127 }
0128
0129
0130
0131
0132
0133 static void *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
0134 {
0135 struct xsk_map *m = container_of(map, struct xsk_map, map);
0136
0137 if (key >= map->max_entries)
0138 return NULL;
0139
0140 return rcu_dereference_check(m->xsk_map[key], rcu_read_lock_bh_held());
0141 }
0142
0143 static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
0144 {
0145 return __xsk_map_lookup_elem(map, *(u32 *)key);
0146 }
0147
0148 static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key)
0149 {
0150 return ERR_PTR(-EOPNOTSUPP);
0151 }
0152
0153 static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
0154 u64 map_flags)
0155 {
0156 struct xsk_map *m = container_of(map, struct xsk_map, map);
0157 struct xdp_sock __rcu **map_entry;
0158 struct xdp_sock *xs, *old_xs;
0159 u32 i = *(u32 *)key, fd = *(u32 *)value;
0160 struct xsk_map_node *node;
0161 struct socket *sock;
0162 int err;
0163
0164 if (unlikely(map_flags > BPF_EXIST))
0165 return -EINVAL;
0166 if (unlikely(i >= m->map.max_entries))
0167 return -E2BIG;
0168
0169 sock = sockfd_lookup(fd, &err);
0170 if (!sock)
0171 return err;
0172
0173 if (sock->sk->sk_family != PF_XDP) {
0174 sockfd_put(sock);
0175 return -EOPNOTSUPP;
0176 }
0177
0178 xs = (struct xdp_sock *)sock->sk;
0179
0180 map_entry = &m->xsk_map[i];
0181 node = xsk_map_node_alloc(m, map_entry);
0182 if (IS_ERR(node)) {
0183 sockfd_put(sock);
0184 return PTR_ERR(node);
0185 }
0186
0187 spin_lock_bh(&m->lock);
0188 old_xs = rcu_dereference_protected(*map_entry, lockdep_is_held(&m->lock));
0189 if (old_xs == xs) {
0190 err = 0;
0191 goto out;
0192 } else if (old_xs && map_flags == BPF_NOEXIST) {
0193 err = -EEXIST;
0194 goto out;
0195 } else if (!old_xs && map_flags == BPF_EXIST) {
0196 err = -ENOENT;
0197 goto out;
0198 }
0199 xsk_map_sock_add(xs, node);
0200 rcu_assign_pointer(*map_entry, xs);
0201 if (old_xs)
0202 xsk_map_sock_delete(old_xs, map_entry);
0203 spin_unlock_bh(&m->lock);
0204 sockfd_put(sock);
0205 return 0;
0206
0207 out:
0208 spin_unlock_bh(&m->lock);
0209 sockfd_put(sock);
0210 xsk_map_node_free(node);
0211 return err;
0212 }
0213
0214 static int xsk_map_delete_elem(struct bpf_map *map, void *key)
0215 {
0216 struct xsk_map *m = container_of(map, struct xsk_map, map);
0217 struct xdp_sock __rcu **map_entry;
0218 struct xdp_sock *old_xs;
0219 int k = *(u32 *)key;
0220
0221 if (k >= map->max_entries)
0222 return -EINVAL;
0223
0224 spin_lock_bh(&m->lock);
0225 map_entry = &m->xsk_map[k];
0226 old_xs = unrcu_pointer(xchg(map_entry, NULL));
0227 if (old_xs)
0228 xsk_map_sock_delete(old_xs, map_entry);
0229 spin_unlock_bh(&m->lock);
0230
0231 return 0;
0232 }
0233
0234 static int xsk_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
0235 {
0236 return __bpf_xdp_redirect_map(map, ifindex, flags, 0,
0237 __xsk_map_lookup_elem);
0238 }
0239
0240 void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
0241 struct xdp_sock __rcu **map_entry)
0242 {
0243 spin_lock_bh(&map->lock);
0244 if (rcu_access_pointer(*map_entry) == xs) {
0245 rcu_assign_pointer(*map_entry, NULL);
0246 xsk_map_sock_delete(xs, map_entry);
0247 }
0248 spin_unlock_bh(&map->lock);
0249 }
0250
0251 static bool xsk_map_meta_equal(const struct bpf_map *meta0,
0252 const struct bpf_map *meta1)
0253 {
0254 return meta0->max_entries == meta1->max_entries &&
0255 bpf_map_meta_equal(meta0, meta1);
0256 }
0257
0258 BTF_ID_LIST_SINGLE(xsk_map_btf_ids, struct, xsk_map)
0259 const struct bpf_map_ops xsk_map_ops = {
0260 .map_meta_equal = xsk_map_meta_equal,
0261 .map_alloc = xsk_map_alloc,
0262 .map_free = xsk_map_free,
0263 .map_get_next_key = xsk_map_get_next_key,
0264 .map_lookup_elem = xsk_map_lookup_elem,
0265 .map_gen_lookup = xsk_map_gen_lookup,
0266 .map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
0267 .map_update_elem = xsk_map_update_elem,
0268 .map_delete_elem = xsk_map_delete_elem,
0269 .map_check_btf = map_check_no_btf,
0270 .map_btf_id = &xsk_map_btf_ids[0],
0271 .map_redirect = xsk_map_redirect,
0272 };