0001
0002
0003
0004 #include <linux/bpf.h>
0005 #include <linux/bitops.h>
0006 #include <linux/bug.h>
0007 #include <linux/jiffies.h>
0008 #include <linux/skbuff.h>
0009 #include <linux/timekeeping.h>
0010
0011 #include "../ccm.h"
0012 #include "../nfp_app.h"
0013 #include "../nfp_net.h"
0014 #include "fw.h"
0015 #include "main.h"
0016
0017 static struct sk_buff *
0018 nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
0019 {
0020 struct sk_buff *skb;
0021
0022 skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
0023 skb_put(skb, size);
0024
0025 return skb;
0026 }
0027
0028 static unsigned int
0029 nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf *bpf, unsigned int n)
0030 {
0031 unsigned int size;
0032
0033 size = sizeof(struct cmsg_req_map_op);
0034 size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
0035
0036 return size;
0037 }
0038
0039 static struct sk_buff *
0040 nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
0041 {
0042 return nfp_bpf_cmsg_alloc(bpf, nfp_bpf_cmsg_map_req_size(bpf, n));
0043 }
0044
0045 static unsigned int
0046 nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
0047 {
0048 unsigned int size;
0049
0050 size = sizeof(struct cmsg_reply_map_op);
0051 size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
0052
0053 return size;
0054 }
0055
0056 static int
0057 nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
0058 struct cmsg_reply_map_simple *reply)
0059 {
0060 static const int res_table[] = {
0061 [CMSG_RC_SUCCESS] = 0,
0062 [CMSG_RC_ERR_MAP_FD] = -EBADFD,
0063 [CMSG_RC_ERR_MAP_NOENT] = -ENOENT,
0064 [CMSG_RC_ERR_MAP_ERR] = -EINVAL,
0065 [CMSG_RC_ERR_MAP_PARSE] = -EIO,
0066 [CMSG_RC_ERR_MAP_EXIST] = -EEXIST,
0067 [CMSG_RC_ERR_MAP_NOMEM] = -ENOMEM,
0068 [CMSG_RC_ERR_MAP_E2BIG] = -E2BIG,
0069 };
0070 u32 rc;
0071
0072 rc = be32_to_cpu(reply->rc);
0073 if (rc >= ARRAY_SIZE(res_table)) {
0074 cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
0075 return -EIO;
0076 }
0077
0078 return res_table[rc];
0079 }
0080
0081 long long int
0082 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
0083 {
0084 struct cmsg_reply_map_alloc_tbl *reply;
0085 struct cmsg_req_map_alloc_tbl *req;
0086 struct sk_buff *skb;
0087 u32 tid;
0088 int err;
0089
0090 skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
0091 if (!skb)
0092 return -ENOMEM;
0093
0094 req = (void *)skb->data;
0095 req->key_size = cpu_to_be32(map->key_size);
0096 req->value_size = cpu_to_be32(map->value_size);
0097 req->max_entries = cpu_to_be32(map->max_entries);
0098 req->map_type = cpu_to_be32(map->map_type);
0099 req->map_flags = 0;
0100
0101 skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_ALLOC,
0102 sizeof(*reply));
0103 if (IS_ERR(skb))
0104 return PTR_ERR(skb);
0105
0106 reply = (void *)skb->data;
0107 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
0108 if (err)
0109 goto err_free;
0110
0111 tid = be32_to_cpu(reply->tid);
0112 dev_consume_skb_any(skb);
0113
0114 return tid;
0115 err_free:
0116 dev_kfree_skb_any(skb);
0117 return err;
0118 }
0119
0120 void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
0121 {
0122 struct cmsg_reply_map_free_tbl *reply;
0123 struct cmsg_req_map_free_tbl *req;
0124 struct sk_buff *skb;
0125 int err;
0126
0127 skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
0128 if (!skb) {
0129 cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
0130 return;
0131 }
0132
0133 req = (void *)skb->data;
0134 req->tid = cpu_to_be32(nfp_map->tid);
0135
0136 skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_FREE,
0137 sizeof(*reply));
0138 if (IS_ERR(skb)) {
0139 cmsg_warn(bpf, "leaking map - I/O error\n");
0140 return;
0141 }
0142
0143 reply = (void *)skb->data;
0144 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
0145 if (err)
0146 cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
0147
0148 dev_consume_skb_any(skb);
0149 }
0150
0151 static void *
0152 nfp_bpf_ctrl_req_key(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
0153 unsigned int n)
0154 {
0155 return &req->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
0156 }
0157
0158 static void *
0159 nfp_bpf_ctrl_req_val(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
0160 unsigned int n)
0161 {
0162 return &req->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
0163 }
0164
0165 static void *
0166 nfp_bpf_ctrl_reply_key(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
0167 unsigned int n)
0168 {
0169 return &reply->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
0170 }
0171
0172 static void *
0173 nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
0174 unsigned int n)
0175 {
0176 return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
0177 }
0178
0179 static bool nfp_bpf_ctrl_op_cache_invalidate(enum nfp_ccm_type op)
0180 {
0181 return op == NFP_CCM_TYPE_BPF_MAP_UPDATE ||
0182 op == NFP_CCM_TYPE_BPF_MAP_DELETE;
0183 }
0184
0185 static bool nfp_bpf_ctrl_op_cache_capable(enum nfp_ccm_type op)
0186 {
0187 return op == NFP_CCM_TYPE_BPF_MAP_LOOKUP ||
0188 op == NFP_CCM_TYPE_BPF_MAP_GETNEXT;
0189 }
0190
0191 static bool nfp_bpf_ctrl_op_cache_fill(enum nfp_ccm_type op)
0192 {
0193 return op == NFP_CCM_TYPE_BPF_MAP_GETFIRST ||
0194 op == NFP_CCM_TYPE_BPF_MAP_GETNEXT;
0195 }
0196
0197 static unsigned int
0198 nfp_bpf_ctrl_op_cache_get(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op,
0199 const u8 *key, u8 *out_key, u8 *out_value,
0200 u32 *cache_gen)
0201 {
0202 struct bpf_map *map = &nfp_map->offmap->map;
0203 struct nfp_app_bpf *bpf = nfp_map->bpf;
0204 unsigned int i, count, n_entries;
0205 struct cmsg_reply_map_op *reply;
0206
0207 n_entries = nfp_bpf_ctrl_op_cache_fill(op) ? bpf->cmsg_cache_cnt : 1;
0208
0209 spin_lock(&nfp_map->cache_lock);
0210 *cache_gen = nfp_map->cache_gen;
0211 if (nfp_map->cache_blockers)
0212 n_entries = 1;
0213
0214 if (nfp_bpf_ctrl_op_cache_invalidate(op))
0215 goto exit_block;
0216 if (!nfp_bpf_ctrl_op_cache_capable(op))
0217 goto exit_unlock;
0218
0219 if (!nfp_map->cache)
0220 goto exit_unlock;
0221 if (nfp_map->cache_to < ktime_get_ns())
0222 goto exit_invalidate;
0223
0224 reply = (void *)nfp_map->cache->data;
0225 count = be32_to_cpu(reply->count);
0226
0227 for (i = 0; i < count; i++) {
0228 void *cached_key;
0229
0230 cached_key = nfp_bpf_ctrl_reply_key(bpf, reply, i);
0231 if (memcmp(cached_key, key, map->key_size))
0232 continue;
0233
0234 if (op == NFP_CCM_TYPE_BPF_MAP_LOOKUP)
0235 memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, i),
0236 map->value_size);
0237 if (op == NFP_CCM_TYPE_BPF_MAP_GETNEXT) {
0238 if (i + 1 == count)
0239 break;
0240
0241 memcpy(out_key,
0242 nfp_bpf_ctrl_reply_key(bpf, reply, i + 1),
0243 map->key_size);
0244 }
0245
0246 n_entries = 0;
0247 goto exit_unlock;
0248 }
0249 goto exit_unlock;
0250
0251 exit_block:
0252 nfp_map->cache_blockers++;
0253 exit_invalidate:
0254 dev_consume_skb_any(nfp_map->cache);
0255 nfp_map->cache = NULL;
0256 exit_unlock:
0257 spin_unlock(&nfp_map->cache_lock);
0258 return n_entries;
0259 }
0260
0261 static void
0262 nfp_bpf_ctrl_op_cache_put(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op,
0263 struct sk_buff *skb, u32 cache_gen)
0264 {
0265 bool blocker, filler;
0266
0267 blocker = nfp_bpf_ctrl_op_cache_invalidate(op);
0268 filler = nfp_bpf_ctrl_op_cache_fill(op);
0269 if (blocker || filler) {
0270 u64 to = 0;
0271
0272 if (filler)
0273 to = ktime_get_ns() + NFP_BPF_MAP_CACHE_TIME_NS;
0274
0275 spin_lock(&nfp_map->cache_lock);
0276 if (blocker) {
0277 nfp_map->cache_blockers--;
0278 nfp_map->cache_gen++;
0279 }
0280 if (filler && !nfp_map->cache_blockers &&
0281 nfp_map->cache_gen == cache_gen) {
0282 nfp_map->cache_to = to;
0283 swap(nfp_map->cache, skb);
0284 }
0285 spin_unlock(&nfp_map->cache_lock);
0286 }
0287
0288 dev_consume_skb_any(skb);
0289 }
0290
0291 static int
0292 nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op,
0293 u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
0294 {
0295 struct nfp_bpf_map *nfp_map = offmap->dev_priv;
0296 unsigned int n_entries, reply_entries, count;
0297 struct nfp_app_bpf *bpf = nfp_map->bpf;
0298 struct bpf_map *map = &offmap->map;
0299 struct cmsg_reply_map_op *reply;
0300 struct cmsg_req_map_op *req;
0301 struct sk_buff *skb;
0302 u32 cache_gen;
0303 int err;
0304
0305
0306 if (flags >> 32)
0307 return -EOPNOTSUPP;
0308
0309
0310 n_entries = nfp_bpf_ctrl_op_cache_get(nfp_map, op, key, out_key,
0311 out_value, &cache_gen);
0312 if (!n_entries)
0313 return 0;
0314
0315 skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
0316 if (!skb) {
0317 err = -ENOMEM;
0318 goto err_cache_put;
0319 }
0320
0321 req = (void *)skb->data;
0322 req->tid = cpu_to_be32(nfp_map->tid);
0323 req->count = cpu_to_be32(n_entries);
0324 req->flags = cpu_to_be32(flags);
0325
0326
0327 if (key)
0328 memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size);
0329 if (value)
0330 memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
0331 map->value_size);
0332
0333 skb = nfp_ccm_communicate(&bpf->ccm, skb, op, 0);
0334 if (IS_ERR(skb)) {
0335 err = PTR_ERR(skb);
0336 goto err_cache_put;
0337 }
0338
0339 if (skb->len < sizeof(*reply)) {
0340 cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d!\n",
0341 op, skb->len);
0342 err = -EIO;
0343 goto err_free;
0344 }
0345
0346 reply = (void *)skb->data;
0347 count = be32_to_cpu(reply->count);
0348 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
0349
0350
0351
0352 reply_entries = count + !!err;
0353 if (n_entries > 1 && count)
0354 err = 0;
0355 if (err)
0356 goto err_free;
0357
0358 if (skb->len != nfp_bpf_cmsg_map_reply_size(bpf, reply_entries)) {
0359 cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d for %d entries!\n",
0360 op, skb->len, reply_entries);
0361 err = -EIO;
0362 goto err_free;
0363 }
0364
0365
0366 if (out_key)
0367 memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0),
0368 map->key_size);
0369 if (out_value)
0370 memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0),
0371 map->value_size);
0372
0373 nfp_bpf_ctrl_op_cache_put(nfp_map, op, skb, cache_gen);
0374
0375 return 0;
0376 err_free:
0377 dev_kfree_skb_any(skb);
0378 err_cache_put:
0379 nfp_bpf_ctrl_op_cache_put(nfp_map, op, NULL, cache_gen);
0380 return err;
0381 }
0382
0383 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
0384 void *key, void *value, u64 flags)
0385 {
0386 return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_UPDATE,
0387 key, value, flags, NULL, NULL);
0388 }
0389
0390 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
0391 {
0392 return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_DELETE,
0393 key, NULL, 0, NULL, NULL);
0394 }
0395
0396 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
0397 void *key, void *value)
0398 {
0399 return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_LOOKUP,
0400 key, NULL, 0, NULL, value);
0401 }
0402
0403 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
0404 void *next_key)
0405 {
0406 return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETFIRST,
0407 NULL, NULL, 0, next_key, NULL);
0408 }
0409
0410 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
0411 void *key, void *next_key)
0412 {
0413 return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETNEXT,
0414 key, NULL, 0, next_key, NULL);
0415 }
0416
0417 unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf)
0418 {
0419 return max(nfp_bpf_cmsg_map_req_size(bpf, 1),
0420 nfp_bpf_cmsg_map_reply_size(bpf, 1));
0421 }
0422
0423 unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
0424 {
0425 return max3(NFP_NET_DEFAULT_MTU,
0426 nfp_bpf_cmsg_map_req_size(bpf, NFP_BPF_MAP_CACHE_CNT),
0427 nfp_bpf_cmsg_map_reply_size(bpf, NFP_BPF_MAP_CACHE_CNT));
0428 }
0429
0430 unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf)
0431 {
0432 unsigned int mtu, req_max, reply_max, entry_sz;
0433
0434 mtu = bpf->app->ctrl->dp.mtu;
0435 entry_sz = bpf->cmsg_key_sz + bpf->cmsg_val_sz;
0436 req_max = (mtu - sizeof(struct cmsg_req_map_op)) / entry_sz;
0437 reply_max = (mtu - sizeof(struct cmsg_reply_map_op)) / entry_sz;
0438
0439 return min3(req_max, reply_max, NFP_BPF_MAP_CACHE_CNT);
0440 }
0441
0442 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
0443 {
0444 struct nfp_app_bpf *bpf = app->priv;
0445
0446 if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
0447 cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
0448 dev_kfree_skb_any(skb);
0449 return;
0450 }
0451
0452 if (nfp_ccm_get_type(skb) == NFP_CCM_TYPE_BPF_BPF_EVENT) {
0453 if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
0454 dev_consume_skb_any(skb);
0455 else
0456 dev_kfree_skb_any(skb);
0457 return;
0458 }
0459
0460 nfp_ccm_rx(&bpf->ccm, skb);
0461 }
0462
0463 void
0464 nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
0465 {
0466 const struct nfp_ccm_hdr *hdr = data;
0467 struct nfp_app_bpf *bpf = app->priv;
0468
0469 if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
0470 cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
0471 return;
0472 }
0473
0474 if (hdr->type == NFP_CCM_TYPE_BPF_BPF_EVENT)
0475 nfp_bpf_event_output(bpf, data, len);
0476 else
0477 cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",
0478 hdr->type);
0479 }