Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
0002 /* Copyright (C) 2016-2018 Netronome Systems, Inc. */
0003 
0004 /*
0005  * nfp_net_offload.c
0006  * Netronome network device driver: TC offload functions for PF and VF
0007  */
0008 
0009 #define pr_fmt(fmt) "NFP net bpf: " fmt
0010 
0011 #include <linux/bpf.h>
0012 #include <linux/kernel.h>
0013 #include <linux/netdevice.h>
0014 #include <linux/pci.h>
0015 #include <linux/jiffies.h>
0016 #include <linux/timer.h>
0017 #include <linux/list.h>
0018 #include <linux/mm.h>
0019 
0020 #include <net/pkt_cls.h>
0021 #include <net/tc_act/tc_gact.h>
0022 #include <net/tc_act/tc_mirred.h>
0023 
0024 #include "main.h"
0025 #include "../ccm.h"
0026 #include "../nfp_app.h"
0027 #include "../nfp_net_ctrl.h"
0028 #include "../nfp_net.h"
0029 
0030 static int
0031 nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
0032            struct bpf_map *map)
0033 {
0034     struct nfp_bpf_neutral_map *record;
0035     int err;
0036 
0037     /* Reuse path - other offloaded program is already tracking this map. */
0038     record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
0039                     nfp_bpf_maps_neutral_params);
0040     if (record) {
0041         nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
0042         record->count++;
0043         return 0;
0044     }
0045 
0046     /* Grab a single ref to the map for our record.  The prog destroy ndo
0047      * happens after free_used_maps().
0048      */
0049     bpf_map_inc(map);
0050 
0051     record = kmalloc(sizeof(*record), GFP_KERNEL);
0052     if (!record) {
0053         err = -ENOMEM;
0054         goto err_map_put;
0055     }
0056 
0057     record->ptr = map;
0058     record->map_id = map->id;
0059     record->count = 1;
0060 
0061     err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l,
0062                      nfp_bpf_maps_neutral_params);
0063     if (err)
0064         goto err_free_rec;
0065 
0066     nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
0067 
0068     return 0;
0069 
0070 err_free_rec:
0071     kfree(record);
0072 err_map_put:
0073     bpf_map_put(map);
0074     return err;
0075 }
0076 
0077 static void
0078 nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
0079 {
0080     bool freed = false;
0081     int i;
0082 
0083     for (i = 0; i < nfp_prog->map_records_cnt; i++) {
0084         if (--nfp_prog->map_records[i]->count) {
0085             nfp_prog->map_records[i] = NULL;
0086             continue;
0087         }
0088 
0089         WARN_ON(rhashtable_remove_fast(&bpf->maps_neutral,
0090                            &nfp_prog->map_records[i]->l,
0091                            nfp_bpf_maps_neutral_params));
0092         freed = true;
0093     }
0094 
0095     if (freed) {
0096         synchronize_rcu();
0097 
0098         for (i = 0; i < nfp_prog->map_records_cnt; i++)
0099             if (nfp_prog->map_records[i]) {
0100                 bpf_map_put(nfp_prog->map_records[i]->ptr);
0101                 kfree(nfp_prog->map_records[i]);
0102             }
0103     }
0104 
0105     kfree(nfp_prog->map_records);
0106     nfp_prog->map_records = NULL;
0107     nfp_prog->map_records_cnt = 0;
0108 }
0109 
0110 static int
0111 nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
0112             struct bpf_prog *prog)
0113 {
0114     int i, cnt, err = 0;
0115 
0116     mutex_lock(&prog->aux->used_maps_mutex);
0117 
0118     /* Quickly count the maps we will have to remember */
0119     cnt = 0;
0120     for (i = 0; i < prog->aux->used_map_cnt; i++)
0121         if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
0122             cnt++;
0123     if (!cnt)
0124         goto out;
0125 
0126     nfp_prog->map_records = kmalloc_array(cnt,
0127                           sizeof(nfp_prog->map_records[0]),
0128                           GFP_KERNEL);
0129     if (!nfp_prog->map_records) {
0130         err = -ENOMEM;
0131         goto out;
0132     }
0133 
0134     for (i = 0; i < prog->aux->used_map_cnt; i++)
0135         if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
0136             err = nfp_map_ptr_record(bpf, nfp_prog,
0137                          prog->aux->used_maps[i]);
0138             if (err) {
0139                 nfp_map_ptrs_forget(bpf, nfp_prog);
0140                 goto out;
0141             }
0142         }
0143     WARN_ON(cnt != nfp_prog->map_records_cnt);
0144 
0145 out:
0146     mutex_unlock(&prog->aux->used_maps_mutex);
0147     return err;
0148 }
0149 
0150 static int
0151 nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
0152          unsigned int cnt)
0153 {
0154     struct nfp_insn_meta *meta;
0155     unsigned int i;
0156 
0157     for (i = 0; i < cnt; i++) {
0158         meta = kzalloc(sizeof(*meta), GFP_KERNEL);
0159         if (!meta)
0160             return -ENOMEM;
0161 
0162         meta->insn = prog[i];
0163         meta->n = i;
0164         if (is_mbpf_alu(meta)) {
0165             meta->umin_src = U64_MAX;
0166             meta->umin_dst = U64_MAX;
0167         }
0168 
0169         list_add_tail(&meta->l, &nfp_prog->insns);
0170     }
0171     nfp_prog->n_insns = cnt;
0172 
0173     nfp_bpf_jit_prepare(nfp_prog);
0174 
0175     return 0;
0176 }
0177 
0178 static void nfp_prog_free(struct nfp_prog *nfp_prog)
0179 {
0180     struct nfp_insn_meta *meta, *tmp;
0181 
0182     kfree(nfp_prog->subprog);
0183 
0184     list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
0185         list_del(&meta->l);
0186         kfree(meta);
0187     }
0188     kfree(nfp_prog);
0189 }
0190 
0191 static int nfp_bpf_verifier_prep(struct bpf_prog *prog)
0192 {
0193     struct nfp_prog *nfp_prog;
0194     int ret;
0195 
0196     nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
0197     if (!nfp_prog)
0198         return -ENOMEM;
0199     prog->aux->offload->dev_priv = nfp_prog;
0200 
0201     INIT_LIST_HEAD(&nfp_prog->insns);
0202     nfp_prog->type = prog->type;
0203     nfp_prog->bpf = bpf_offload_dev_priv(prog->aux->offload->offdev);
0204 
0205     ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
0206     if (ret)
0207         goto err_free;
0208 
0209     nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
0210 
0211     return 0;
0212 
0213 err_free:
0214     nfp_prog_free(nfp_prog);
0215 
0216     return ret;
0217 }
0218 
0219 static int nfp_bpf_translate(struct bpf_prog *prog)
0220 {
0221     struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
0222     struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
0223     unsigned int max_instr;
0224     int err;
0225 
0226     /* We depend on dead code elimination succeeding */
0227     if (prog->aux->offload->opt_failed)
0228         return -EINVAL;
0229 
0230     max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
0231     nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
0232 
0233     nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
0234     if (!nfp_prog->prog)
0235         return -ENOMEM;
0236 
0237     err = nfp_bpf_jit(nfp_prog);
0238     if (err)
0239         return err;
0240 
0241     prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
0242     prog->aux->offload->jited_image = nfp_prog->prog;
0243 
0244     return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
0245 }
0246 
0247 static void nfp_bpf_destroy(struct bpf_prog *prog)
0248 {
0249     struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
0250 
0251     kvfree(nfp_prog->prog);
0252     nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
0253     nfp_prog_free(nfp_prog);
0254 }
0255 
0256 /* Atomic engine requires values to be in big endian, we need to byte swap
0257  * the value words used with xadd.
0258  */
0259 static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value)
0260 {
0261     u32 *word = value;
0262     unsigned int i;
0263 
0264     for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
0265         if (nfp_map->use_map[i].type == NFP_MAP_USE_ATOMIC_CNT)
0266             word[i] = (__force u32)cpu_to_be32(word[i]);
0267 }
0268 
0269 /* Mark value as unsafely initialized in case it becomes atomic later
0270  * and we didn't byte swap something non-byte swap neutral.
0271  */
0272 static void
0273 nfp_map_bpf_byte_swap_record(struct nfp_bpf_map *nfp_map, void *value)
0274 {
0275     u32 *word = value;
0276     unsigned int i;
0277 
0278     for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
0279         if (nfp_map->use_map[i].type == NFP_MAP_UNUSED &&
0280             word[i] != (__force u32)cpu_to_be32(word[i]))
0281             nfp_map->use_map[i].non_zero_update = 1;
0282 }
0283 
0284 static int
0285 nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap,
0286              void *key, void *value)
0287 {
0288     int err;
0289 
0290     err = nfp_bpf_ctrl_lookup_entry(offmap, key, value);
0291     if (err)
0292         return err;
0293 
0294     nfp_map_bpf_byte_swap(offmap->dev_priv, value);
0295     return 0;
0296 }
0297 
0298 static int
0299 nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap,
0300              void *key, void *value, u64 flags)
0301 {
0302     nfp_map_bpf_byte_swap(offmap->dev_priv, value);
0303     nfp_map_bpf_byte_swap_record(offmap->dev_priv, value);
0304     return nfp_bpf_ctrl_update_entry(offmap, key, value, flags);
0305 }
0306 
0307 static int
0308 nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
0309              void *key, void *next_key)
0310 {
0311     if (!key)
0312         return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
0313     return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
0314 }
0315 
0316 static int
0317 nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
0318 {
0319     if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
0320         return -EINVAL;
0321     return nfp_bpf_ctrl_del_entry(offmap, key);
0322 }
0323 
0324 static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
0325     .map_get_next_key   = nfp_bpf_map_get_next_key,
0326     .map_lookup_elem    = nfp_bpf_map_lookup_entry,
0327     .map_update_elem    = nfp_bpf_map_update_entry,
0328     .map_delete_elem    = nfp_bpf_map_delete_elem,
0329 };
0330 
0331 static int
0332 nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
0333 {
0334     struct nfp_bpf_map *nfp_map;
0335     unsigned int use_map_size;
0336     long long int res;
0337 
0338     if (!bpf->maps.types)
0339         return -EOPNOTSUPP;
0340 
0341     if (offmap->map.map_flags ||
0342         offmap->map.numa_node != NUMA_NO_NODE) {
0343         pr_info("map flags are not supported\n");
0344         return -EINVAL;
0345     }
0346 
0347     if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
0348         pr_info("map type not supported\n");
0349         return -EOPNOTSUPP;
0350     }
0351     if (bpf->maps.max_maps == bpf->maps_in_use) {
0352         pr_info("too many maps for a device\n");
0353         return -ENOMEM;
0354     }
0355     if (bpf->maps.max_elems - bpf->map_elems_in_use <
0356         offmap->map.max_entries) {
0357         pr_info("map with too many elements: %u, left: %u\n",
0358             offmap->map.max_entries,
0359             bpf->maps.max_elems - bpf->map_elems_in_use);
0360         return -ENOMEM;
0361     }
0362 
0363     if (round_up(offmap->map.key_size, 8) +
0364         round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
0365         pr_info("map elements too large: %u, FW max element size (key+value): %u\n",
0366             round_up(offmap->map.key_size, 8) +
0367             round_up(offmap->map.value_size, 8),
0368             bpf->maps.max_elem_sz);
0369         return -ENOMEM;
0370     }
0371     if (offmap->map.key_size > bpf->maps.max_key_sz) {
0372         pr_info("map key size %u, FW max is %u\n",
0373             offmap->map.key_size, bpf->maps.max_key_sz);
0374         return -ENOMEM;
0375     }
0376     if (offmap->map.value_size > bpf->maps.max_val_sz) {
0377         pr_info("map value size %u, FW max is %u\n",
0378             offmap->map.value_size, bpf->maps.max_val_sz);
0379         return -ENOMEM;
0380     }
0381 
0382     use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) *
0383                sizeof_field(struct nfp_bpf_map, use_map[0]);
0384 
0385     nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER);
0386     if (!nfp_map)
0387         return -ENOMEM;
0388 
0389     offmap->dev_priv = nfp_map;
0390     nfp_map->offmap = offmap;
0391     nfp_map->bpf = bpf;
0392     spin_lock_init(&nfp_map->cache_lock);
0393 
0394     res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
0395     if (res < 0) {
0396         kfree(nfp_map);
0397         return res;
0398     }
0399 
0400     nfp_map->tid = res;
0401     offmap->dev_ops = &nfp_bpf_map_ops;
0402     bpf->maps_in_use++;
0403     bpf->map_elems_in_use += offmap->map.max_entries;
0404     list_add_tail(&nfp_map->l, &bpf->map_list);
0405 
0406     return 0;
0407 }
0408 
0409 static int
0410 nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
0411 {
0412     struct nfp_bpf_map *nfp_map = offmap->dev_priv;
0413 
0414     nfp_bpf_ctrl_free_map(bpf, nfp_map);
0415     dev_consume_skb_any(nfp_map->cache);
0416     WARN_ON_ONCE(nfp_map->cache_blockers);
0417     list_del_init(&nfp_map->l);
0418     bpf->map_elems_in_use -= offmap->map.max_entries;
0419     bpf->maps_in_use--;
0420     kfree(nfp_map);
0421 
0422     return 0;
0423 }
0424 
0425 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
0426 {
0427     switch (bpf->command) {
0428     case BPF_OFFLOAD_MAP_ALLOC:
0429         return nfp_bpf_map_alloc(app->priv, bpf->offmap);
0430     case BPF_OFFLOAD_MAP_FREE:
0431         return nfp_bpf_map_free(app->priv, bpf->offmap);
0432     default:
0433         return -EINVAL;
0434     }
0435 }
0436 
0437 static unsigned long
0438 nfp_bpf_perf_event_copy(void *dst, const void *src,
0439             unsigned long off, unsigned long len)
0440 {
0441     memcpy(dst, src + off, len);
0442     return 0;
0443 }
0444 
0445 int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
0446              unsigned int len)
0447 {
0448     struct cmsg_bpf_event *cbe = (void *)data;
0449     struct nfp_bpf_neutral_map *record;
0450     u32 pkt_size, data_size, map_id;
0451     u64 map_id_full;
0452 
0453     if (len < sizeof(struct cmsg_bpf_event))
0454         return -EINVAL;
0455 
0456     pkt_size = be32_to_cpu(cbe->pkt_size);
0457     data_size = be32_to_cpu(cbe->data_size);
0458     map_id_full = be64_to_cpu(cbe->map_ptr);
0459     map_id = map_id_full;
0460 
0461     if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
0462         return -EINVAL;
0463     if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
0464         return -EINVAL;
0465 
0466     rcu_read_lock();
0467     record = rhashtable_lookup(&bpf->maps_neutral, &map_id,
0468                    nfp_bpf_maps_neutral_params);
0469     if (!record || map_id_full > U32_MAX) {
0470         rcu_read_unlock();
0471         cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
0472               map_id_full, map_id_full);
0473         return -EINVAL;
0474     }
0475 
0476     bpf_event_output(record->ptr, be32_to_cpu(cbe->cpu_id),
0477              &cbe->data[round_up(pkt_size, 4)], data_size,
0478              cbe->data, pkt_size, nfp_bpf_perf_event_copy);
0479     rcu_read_unlock();
0480 
0481     return 0;
0482 }
0483 
0484 bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
0485                    unsigned int mtu)
0486 {
0487     unsigned int fw_mtu, pkt_off;
0488 
0489     fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
0490     pkt_off = min(prog->aux->max_pkt_offset, mtu);
0491 
0492     return fw_mtu < pkt_off;
0493 }
0494 
0495 static int
0496 nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
0497          struct netlink_ext_ack *extack)
0498 {
0499     struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
0500     unsigned int max_stack, max_prog_len;
0501     dma_addr_t dma_addr;
0502     void *img;
0503     int err;
0504 
0505     if (nfp_bpf_offload_check_mtu(nn, prog, nn->dp.netdev->mtu)) {
0506         NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
0507         return -EOPNOTSUPP;
0508     }
0509 
0510     max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
0511     if (nfp_prog->stack_size > max_stack) {
0512         NL_SET_ERR_MSG_MOD(extack, "stack too large");
0513         return -EOPNOTSUPP;
0514     }
0515 
0516     max_prog_len = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
0517     if (nfp_prog->prog_len > max_prog_len) {
0518         NL_SET_ERR_MSG_MOD(extack, "program too long");
0519         return -EOPNOTSUPP;
0520     }
0521 
0522     img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
0523     if (IS_ERR(img))
0524         return PTR_ERR(img);
0525 
0526     dma_addr = dma_map_single(nn->dp.dev, img,
0527                   nfp_prog->prog_len * sizeof(u64),
0528                   DMA_TO_DEVICE);
0529     if (dma_mapping_error(nn->dp.dev, dma_addr)) {
0530         kfree(img);
0531         return -ENOMEM;
0532     }
0533 
0534     nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
0535     nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
0536 
0537     /* Load up the JITed code */
0538     err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
0539     if (err)
0540         NL_SET_ERR_MSG_MOD(extack,
0541                    "FW command error while loading BPF");
0542 
0543     dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
0544              DMA_TO_DEVICE);
0545     kfree(img);
0546 
0547     return err;
0548 }
0549 
0550 static void
0551 nfp_net_bpf_start(struct nfp_net *nn, struct netlink_ext_ack *extack)
0552 {
0553     int err;
0554 
0555     /* Enable passing packets through BPF function */
0556     nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
0557     nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
0558     err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
0559     if (err)
0560         NL_SET_ERR_MSG_MOD(extack,
0561                    "FW command error while enabling BPF");
0562 }
0563 
0564 static int nfp_net_bpf_stop(struct nfp_net *nn)
0565 {
0566     if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
0567         return 0;
0568 
0569     nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
0570     nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
0571 
0572     return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
0573 }
0574 
0575 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
0576             bool old_prog, struct netlink_ext_ack *extack)
0577 {
0578     int err;
0579 
0580     if (prog && !bpf_offload_dev_match(prog, nn->dp.netdev))
0581         return -EINVAL;
0582 
0583     if (prog && old_prog) {
0584         u8 cap;
0585 
0586         cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
0587         if (!(cap & NFP_NET_BPF_CAP_RELO)) {
0588             NL_SET_ERR_MSG_MOD(extack,
0589                        "FW does not support live reload");
0590             return -EBUSY;
0591         }
0592     }
0593 
0594     /* Something else is loaded, different program type? */
0595     if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
0596         return -EBUSY;
0597 
0598     if (old_prog && !prog)
0599         return nfp_net_bpf_stop(nn);
0600 
0601     err = nfp_net_bpf_load(nn, prog, extack);
0602     if (err)
0603         return err;
0604 
0605     if (!old_prog)
0606         nfp_net_bpf_start(nn, extack);
0607 
0608     return 0;
0609 }
0610 
0611 const struct bpf_prog_offload_ops nfp_bpf_dev_ops = {
0612     .insn_hook  = nfp_verify_insn,
0613     .finalize   = nfp_bpf_finalize,
0614     .replace_insn   = nfp_bpf_opt_replace_insn,
0615     .remove_insns   = nfp_bpf_opt_remove_insns,
0616     .prepare    = nfp_bpf_verifier_prep,
0617     .translate  = nfp_bpf_translate,
0618     .destroy    = nfp_bpf_destroy,
0619 };