0001
0002
0003
0004 #include <linux/hash.h>
0005 #include <linux/hashtable.h>
0006 #include <linux/jhash.h>
0007 #include <linux/math64.h>
0008 #include <linux/vmalloc.h>
0009 #include <net/pkt_cls.h>
0010
0011 #include "cmsg.h"
0012 #include "conntrack.h"
0013 #include "main.h"
0014 #include "../nfp_app.h"
0015
0016 struct nfp_mask_id_table {
0017 struct hlist_node link;
0018 u32 hash_key;
0019 u32 ref_cnt;
0020 u8 mask_id;
0021 };
0022
0023 struct nfp_fl_flow_table_cmp_arg {
0024 struct net_device *netdev;
0025 unsigned long cookie;
0026 };
0027
0028 struct nfp_fl_stats_ctx_to_flow {
0029 struct rhash_head ht_node;
0030 u32 stats_cxt;
0031 struct nfp_fl_payload *flow;
0032 };
0033
0034 static const struct rhashtable_params stats_ctx_table_params = {
0035 .key_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, stats_cxt),
0036 .head_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, ht_node),
0037 .key_len = sizeof(u32),
0038 };
0039
0040 static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
0041 {
0042 struct nfp_flower_priv *priv = app->priv;
0043 struct circ_buf *ring;
0044
0045 ring = &priv->stats_ids.free_list;
0046
0047 if (!CIRC_SPACE(ring->head, ring->tail, priv->stats_ring_size))
0048 return -ENOBUFS;
0049
0050
0051 memcpy(&ring->buf[ring->head * NFP_FL_STATS_ELEM_RS],
0052 &stats_context_id, NFP_FL_STATS_ELEM_RS);
0053 ring->head = (ring->head + 1) & (priv->stats_ring_size - 1);
0054
0055 return 0;
0056 }
0057
0058 static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
0059 {
0060 struct nfp_flower_priv *priv = app->priv;
0061 u32 freed_stats_id, temp_stats_id;
0062 struct circ_buf *ring;
0063
0064 ring = &priv->stats_ids.free_list;
0065 freed_stats_id = priv->stats_ring_size;
0066
0067 if (priv->stats_ids.init_unalloc > 0) {
0068 *stats_context_id =
0069 FIELD_PREP(NFP_FL_STAT_ID_STAT,
0070 priv->stats_ids.init_unalloc - 1) |
0071 FIELD_PREP(NFP_FL_STAT_ID_MU_NUM,
0072 priv->active_mem_unit);
0073
0074 if (++priv->active_mem_unit == priv->total_mem_units) {
0075 priv->stats_ids.init_unalloc--;
0076 priv->active_mem_unit = 0;
0077 }
0078
0079 return 0;
0080 }
0081
0082
0083 if (ring->head == ring->tail) {
0084 *stats_context_id = freed_stats_id;
0085 return -ENOENT;
0086 }
0087
0088
0089 memcpy(&temp_stats_id, &ring->buf[ring->tail * NFP_FL_STATS_ELEM_RS],
0090 NFP_FL_STATS_ELEM_RS);
0091 *stats_context_id = temp_stats_id;
0092 memcpy(&ring->buf[ring->tail * NFP_FL_STATS_ELEM_RS], &freed_stats_id,
0093 NFP_FL_STATS_ELEM_RS);
0094
0095 ring->tail = (ring->tail + 1) & (priv->stats_ring_size - 1);
0096
0097 return 0;
0098 }
0099
0100
0101 struct nfp_fl_payload *
0102 nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
0103 struct net_device *netdev)
0104 {
0105 struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
0106 struct nfp_flower_priv *priv = app->priv;
0107
0108 flower_cmp_arg.netdev = netdev;
0109 flower_cmp_arg.cookie = tc_flower_cookie;
0110
0111 return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
0112 nfp_flower_table_params);
0113 }
0114
0115 void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
0116 {
0117 unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
0118 struct nfp_flower_priv *priv = app->priv;
0119 struct nfp_fl_stats_frame *stats;
0120 unsigned char *msg;
0121 u32 ctx_id;
0122 int i;
0123
0124 msg = nfp_flower_cmsg_get_data(skb);
0125
0126 spin_lock(&priv->stats_lock);
0127 for (i = 0; i < msg_len / sizeof(*stats); i++) {
0128 stats = (struct nfp_fl_stats_frame *)msg + i;
0129 ctx_id = be32_to_cpu(stats->stats_con_id);
0130 priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
0131 priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
0132 priv->stats[ctx_id].used = jiffies;
0133 }
0134 spin_unlock(&priv->stats_lock);
0135 }
0136
0137 static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
0138 {
0139 struct nfp_flower_priv *priv = app->priv;
0140 struct circ_buf *ring;
0141
0142 ring = &priv->mask_ids.mask_id_free_list;
0143
0144
0145
0146 if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
0147 return -ENOBUFS;
0148
0149
0150
0151
0152 memcpy(&ring->buf[ring->head * NFP_FLOWER_MASK_ELEMENT_RS], &mask_id,
0153 NFP_FLOWER_MASK_ELEMENT_RS);
0154 ring->head = (ring->head + 1) & (NFP_FLOWER_MASK_ENTRY_RS - 1);
0155
0156 priv->mask_ids.last_used[mask_id] = ktime_get();
0157
0158 return 0;
0159 }
0160
0161 static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
0162 {
0163 struct nfp_flower_priv *priv = app->priv;
0164 ktime_t reuse_timeout;
0165 struct circ_buf *ring;
0166 u8 temp_id, freed_id;
0167
0168 ring = &priv->mask_ids.mask_id_free_list;
0169 freed_id = NFP_FLOWER_MASK_ENTRY_RS - 1;
0170
0171 if (priv->mask_ids.init_unallocated > 0) {
0172 *mask_id = priv->mask_ids.init_unallocated;
0173 priv->mask_ids.init_unallocated--;
0174 return 0;
0175 }
0176
0177
0178 if (ring->head == ring->tail)
0179 goto err_not_found;
0180
0181
0182
0183
0184 memcpy(&temp_id, &ring->buf[ring->tail * NFP_FLOWER_MASK_ELEMENT_RS],
0185 NFP_FLOWER_MASK_ELEMENT_RS);
0186 *mask_id = temp_id;
0187
0188 reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
0189 NFP_FL_MASK_REUSE_TIME_NS);
0190
0191 if (ktime_before(ktime_get(), reuse_timeout))
0192 goto err_not_found;
0193
0194 memcpy(&ring->buf[ring->tail * NFP_FLOWER_MASK_ELEMENT_RS], &freed_id,
0195 NFP_FLOWER_MASK_ELEMENT_RS);
0196
0197 ring->tail = (ring->tail + 1) & (NFP_FLOWER_MASK_ENTRY_RS - 1);
0198
0199 return 0;
0200
0201 err_not_found:
0202 *mask_id = freed_id;
0203 return -ENOENT;
0204 }
0205
0206 static int
0207 nfp_add_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
0208 {
0209 struct nfp_flower_priv *priv = app->priv;
0210 struct nfp_mask_id_table *mask_entry;
0211 unsigned long hash_key;
0212 u8 mask_id;
0213
0214 if (nfp_mask_alloc(app, &mask_id))
0215 return -ENOENT;
0216
0217 mask_entry = kmalloc(sizeof(*mask_entry), GFP_KERNEL);
0218 if (!mask_entry) {
0219 nfp_release_mask_id(app, mask_id);
0220 return -ENOMEM;
0221 }
0222
0223 INIT_HLIST_NODE(&mask_entry->link);
0224 mask_entry->mask_id = mask_id;
0225 hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
0226 mask_entry->hash_key = hash_key;
0227 mask_entry->ref_cnt = 1;
0228 hash_add(priv->mask_table, &mask_entry->link, hash_key);
0229
0230 return mask_id;
0231 }
0232
0233 static struct nfp_mask_id_table *
0234 nfp_search_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
0235 {
0236 struct nfp_flower_priv *priv = app->priv;
0237 struct nfp_mask_id_table *mask_entry;
0238 unsigned long hash_key;
0239
0240 hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
0241
0242 hash_for_each_possible(priv->mask_table, mask_entry, link, hash_key)
0243 if (mask_entry->hash_key == hash_key)
0244 return mask_entry;
0245
0246 return NULL;
0247 }
0248
0249 static int
0250 nfp_find_in_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
0251 {
0252 struct nfp_mask_id_table *mask_entry;
0253
0254 mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
0255 if (!mask_entry)
0256 return -ENOENT;
0257
0258 mask_entry->ref_cnt++;
0259
0260
0261 return mask_entry->mask_id;
0262 }
0263
0264 static bool
0265 nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
0266 u8 *meta_flags, u8 *mask_id)
0267 {
0268 int id;
0269
0270 id = nfp_find_in_mask_table(app, mask_data, mask_len);
0271 if (id < 0) {
0272 id = nfp_add_mask_table(app, mask_data, mask_len);
0273 if (id < 0)
0274 return false;
0275 *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
0276 }
0277 *mask_id = id;
0278
0279 return true;
0280 }
0281
0282 static bool
0283 nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
0284 u8 *meta_flags, u8 *mask_id)
0285 {
0286 struct nfp_mask_id_table *mask_entry;
0287
0288 mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
0289 if (!mask_entry)
0290 return false;
0291
0292 *mask_id = mask_entry->mask_id;
0293 mask_entry->ref_cnt--;
0294 if (!mask_entry->ref_cnt) {
0295 hash_del(&mask_entry->link);
0296 nfp_release_mask_id(app, *mask_id);
0297 kfree(mask_entry);
0298 if (meta_flags)
0299 *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
0300 }
0301
0302 return true;
0303 }
0304
0305 int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
0306 struct nfp_fl_payload *nfp_flow,
0307 struct net_device *netdev,
0308 struct netlink_ext_ack *extack)
0309 {
0310 struct nfp_fl_stats_ctx_to_flow *ctx_entry;
0311 struct nfp_flower_priv *priv = app->priv;
0312 struct nfp_fl_payload *check_entry;
0313 u8 new_mask_id;
0314 u32 stats_cxt;
0315 int err;
0316
0317 err = nfp_get_stats_entry(app, &stats_cxt);
0318 if (err) {
0319 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate new stats context");
0320 return err;
0321 }
0322
0323 nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
0324 nfp_flow->meta.host_cookie = cpu_to_be64(cookie);
0325 nfp_flow->ingress_dev = netdev;
0326
0327 ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
0328 if (!ctx_entry) {
0329 err = -ENOMEM;
0330 goto err_release_stats;
0331 }
0332
0333 ctx_entry->stats_cxt = stats_cxt;
0334 ctx_entry->flow = nfp_flow;
0335
0336 if (rhashtable_insert_fast(&priv->stats_ctx_table, &ctx_entry->ht_node,
0337 stats_ctx_table_params)) {
0338 err = -ENOMEM;
0339 goto err_free_ctx_entry;
0340 }
0341
0342
0343
0344
0345
0346
0347 new_mask_id = 0;
0348 if (!nfp_flow->pre_tun_rule.dev &&
0349 !nfp_check_mask_add(app, nfp_flow->mask_data,
0350 nfp_flow->meta.mask_len,
0351 &nfp_flow->meta.flags, &new_mask_id)) {
0352 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
0353 err = -ENOENT;
0354 goto err_remove_rhash;
0355 }
0356
0357 nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
0358 priv->flower_version++;
0359
0360
0361 nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
0362 priv->stats[stats_cxt].pkts = 0;
0363 priv->stats[stats_cxt].bytes = 0;
0364 priv->stats[stats_cxt].used = jiffies;
0365
0366 check_entry = nfp_flower_search_fl_table(app, cookie, netdev);
0367 if (check_entry) {
0368 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
0369 err = -EEXIST;
0370 goto err_remove_mask;
0371 }
0372
0373 return 0;
0374
0375 err_remove_mask:
0376 if (!nfp_flow->pre_tun_rule.dev)
0377 nfp_check_mask_remove(app, nfp_flow->mask_data,
0378 nfp_flow->meta.mask_len,
0379 NULL, &new_mask_id);
0380 err_remove_rhash:
0381 WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
0382 &ctx_entry->ht_node,
0383 stats_ctx_table_params));
0384 err_free_ctx_entry:
0385 kfree(ctx_entry);
0386 err_release_stats:
0387 nfp_release_stats_entry(app, stats_cxt);
0388
0389 return err;
0390 }
0391
0392 void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
0393 struct nfp_fl_payload *nfp_flow)
0394 {
0395 nfp_flow->meta.flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
0396 nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
0397 priv->flower_version++;
0398 }
0399
0400 int nfp_modify_flow_metadata(struct nfp_app *app,
0401 struct nfp_fl_payload *nfp_flow)
0402 {
0403 struct nfp_fl_stats_ctx_to_flow *ctx_entry;
0404 struct nfp_flower_priv *priv = app->priv;
0405 u8 new_mask_id = 0;
0406 u32 temp_ctx_id;
0407
0408 __nfp_modify_flow_metadata(priv, nfp_flow);
0409
0410 if (!nfp_flow->pre_tun_rule.dev)
0411 nfp_check_mask_remove(app, nfp_flow->mask_data,
0412 nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
0413 &new_mask_id);
0414
0415
0416 nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
0417
0418
0419 temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
0420
0421 ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &temp_ctx_id,
0422 stats_ctx_table_params);
0423 if (!ctx_entry)
0424 return -ENOENT;
0425
0426 WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
0427 &ctx_entry->ht_node,
0428 stats_ctx_table_params));
0429 kfree(ctx_entry);
0430
0431 return nfp_release_stats_entry(app, temp_ctx_id);
0432 }
0433
0434 struct nfp_fl_payload *
0435 nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id)
0436 {
0437 struct nfp_fl_stats_ctx_to_flow *ctx_entry;
0438 struct nfp_flower_priv *priv = app->priv;
0439
0440 ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &ctx_id,
0441 stats_ctx_table_params);
0442 if (!ctx_entry)
0443 return NULL;
0444
0445 return ctx_entry->flow;
0446 }
0447
0448 static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
0449 const void *obj)
0450 {
0451 const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
0452 const struct nfp_fl_payload *flow_entry = obj;
0453
0454 if (flow_entry->ingress_dev == cmp_arg->netdev)
0455 return flow_entry->tc_flower_cookie != cmp_arg->cookie;
0456
0457 return 1;
0458 }
0459
0460 static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed)
0461 {
0462 const struct nfp_fl_payload *flower_entry = data;
0463
0464 return jhash2((u32 *)&flower_entry->tc_flower_cookie,
0465 sizeof(flower_entry->tc_flower_cookie) / sizeof(u32),
0466 seed);
0467 }
0468
0469 static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed)
0470 {
0471 const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data;
0472
0473 return jhash2((u32 *)&cmp_arg->cookie,
0474 sizeof(cmp_arg->cookie) / sizeof(u32), seed);
0475 }
0476
0477 const struct rhashtable_params nfp_flower_table_params = {
0478 .head_offset = offsetof(struct nfp_fl_payload, fl_node),
0479 .hashfn = nfp_fl_key_hashfn,
0480 .obj_cmpfn = nfp_fl_obj_cmpfn,
0481 .obj_hashfn = nfp_fl_obj_hashfn,
0482 .automatic_shrinking = true,
0483 };
0484
0485 const struct rhashtable_params merge_table_params = {
0486 .key_offset = offsetof(struct nfp_merge_info, parent_ctx),
0487 .head_offset = offsetof(struct nfp_merge_info, ht_node),
0488 .key_len = sizeof(u64),
0489 };
0490
0491 const struct rhashtable_params nfp_zone_table_params = {
0492 .head_offset = offsetof(struct nfp_fl_ct_zone_entry, hash_node),
0493 .key_len = sizeof(u16),
0494 .key_offset = offsetof(struct nfp_fl_ct_zone_entry, zone),
0495 .automatic_shrinking = false,
0496 };
0497
0498 const struct rhashtable_params nfp_ct_map_params = {
0499 .head_offset = offsetof(struct nfp_fl_ct_map_entry, hash_node),
0500 .key_len = sizeof(unsigned long),
0501 .key_offset = offsetof(struct nfp_fl_ct_map_entry, cookie),
0502 .automatic_shrinking = true,
0503 };
0504
0505 const struct rhashtable_params neigh_table_params = {
0506 .key_offset = offsetof(struct nfp_neigh_entry, neigh_cookie),
0507 .head_offset = offsetof(struct nfp_neigh_entry, ht_node),
0508 .key_len = sizeof(unsigned long),
0509 };
0510
0511 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
0512 unsigned int host_num_mems)
0513 {
0514 struct nfp_flower_priv *priv = app->priv;
0515 int err, stats_size;
0516
0517 hash_init(priv->mask_table);
0518
0519 err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
0520 if (err)
0521 return err;
0522
0523 err = rhashtable_init(&priv->stats_ctx_table, &stats_ctx_table_params);
0524 if (err)
0525 goto err_free_flow_table;
0526
0527 err = rhashtable_init(&priv->merge_table, &merge_table_params);
0528 if (err)
0529 goto err_free_stats_ctx_table;
0530
0531 err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params);
0532 if (err)
0533 goto err_free_merge_table;
0534
0535 err = rhashtable_init(&priv->ct_map_table, &nfp_ct_map_params);
0536 if (err)
0537 goto err_free_ct_zone_table;
0538
0539 err = rhashtable_init(&priv->neigh_table, &neigh_table_params);
0540 if (err)
0541 goto err_free_ct_map_table;
0542
0543 INIT_LIST_HEAD(&priv->predt_list);
0544
0545 get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
0546
0547
0548 priv->mask_ids.mask_id_free_list.buf =
0549 kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
0550 NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
0551 if (!priv->mask_ids.mask_id_free_list.buf)
0552 goto err_free_neigh_table;
0553
0554 priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
0555
0556
0557 priv->mask_ids.last_used =
0558 kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
0559 sizeof(*priv->mask_ids.last_used), GFP_KERNEL);
0560 if (!priv->mask_ids.last_used)
0561 goto err_free_mask_id;
0562
0563
0564 priv->stats_ids.free_list.buf =
0565 vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
0566 priv->stats_ring_size));
0567 if (!priv->stats_ids.free_list.buf)
0568 goto err_free_last_used;
0569
0570 priv->stats_ids.init_unalloc = div_u64(host_ctx_count, host_num_mems);
0571
0572 stats_size = FIELD_PREP(NFP_FL_STAT_ID_STAT, host_ctx_count) |
0573 FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, host_num_mems - 1);
0574 priv->stats = kvmalloc_array(stats_size, sizeof(struct nfp_fl_stats),
0575 GFP_KERNEL);
0576 if (!priv->stats)
0577 goto err_free_ring_buf;
0578
0579 spin_lock_init(&priv->stats_lock);
0580 spin_lock_init(&priv->predt_lock);
0581
0582 return 0;
0583
0584 err_free_ring_buf:
0585 vfree(priv->stats_ids.free_list.buf);
0586 err_free_last_used:
0587 kfree(priv->mask_ids.last_used);
0588 err_free_mask_id:
0589 kfree(priv->mask_ids.mask_id_free_list.buf);
0590 err_free_neigh_table:
0591 rhashtable_destroy(&priv->neigh_table);
0592 err_free_ct_map_table:
0593 rhashtable_destroy(&priv->ct_map_table);
0594 err_free_ct_zone_table:
0595 rhashtable_destroy(&priv->ct_zone_table);
0596 err_free_merge_table:
0597 rhashtable_destroy(&priv->merge_table);
0598 err_free_stats_ctx_table:
0599 rhashtable_destroy(&priv->stats_ctx_table);
0600 err_free_flow_table:
0601 rhashtable_destroy(&priv->flow_table);
0602 return -ENOMEM;
0603 }
0604
0605 static void nfp_zone_table_entry_destroy(struct nfp_fl_ct_zone_entry *zt)
0606 {
0607 if (!zt)
0608 return;
0609
0610 if (!list_empty(&zt->pre_ct_list)) {
0611 struct rhashtable *m_table = &zt->priv->ct_map_table;
0612 struct nfp_fl_ct_flow_entry *entry, *tmp;
0613 struct nfp_fl_ct_map_entry *map;
0614
0615 WARN_ONCE(1, "pre_ct_list not empty as expected, cleaning up\n");
0616 list_for_each_entry_safe(entry, tmp, &zt->pre_ct_list,
0617 list_node) {
0618 map = rhashtable_lookup_fast(m_table,
0619 &entry->cookie,
0620 nfp_ct_map_params);
0621 WARN_ON_ONCE(rhashtable_remove_fast(m_table,
0622 &map->hash_node,
0623 nfp_ct_map_params));
0624 nfp_fl_ct_clean_flow_entry(entry);
0625 kfree(map);
0626 }
0627 }
0628
0629 if (!list_empty(&zt->post_ct_list)) {
0630 struct rhashtable *m_table = &zt->priv->ct_map_table;
0631 struct nfp_fl_ct_flow_entry *entry, *tmp;
0632 struct nfp_fl_ct_map_entry *map;
0633
0634 WARN_ONCE(1, "post_ct_list not empty as expected, cleaning up\n");
0635 list_for_each_entry_safe(entry, tmp, &zt->post_ct_list,
0636 list_node) {
0637 map = rhashtable_lookup_fast(m_table,
0638 &entry->cookie,
0639 nfp_ct_map_params);
0640 WARN_ON_ONCE(rhashtable_remove_fast(m_table,
0641 &map->hash_node,
0642 nfp_ct_map_params));
0643 nfp_fl_ct_clean_flow_entry(entry);
0644 kfree(map);
0645 }
0646 }
0647
0648 if (zt->nft) {
0649 nf_flow_table_offload_del_cb(zt->nft,
0650 nfp_fl_ct_handle_nft_flow,
0651 zt);
0652 zt->nft = NULL;
0653 }
0654
0655 if (!list_empty(&zt->nft_flows_list)) {
0656 struct rhashtable *m_table = &zt->priv->ct_map_table;
0657 struct nfp_fl_ct_flow_entry *entry, *tmp;
0658 struct nfp_fl_ct_map_entry *map;
0659
0660 WARN_ONCE(1, "nft_flows_list not empty as expected, cleaning up\n");
0661 list_for_each_entry_safe(entry, tmp, &zt->nft_flows_list,
0662 list_node) {
0663 map = rhashtable_lookup_fast(m_table,
0664 &entry->cookie,
0665 nfp_ct_map_params);
0666 WARN_ON_ONCE(rhashtable_remove_fast(m_table,
0667 &map->hash_node,
0668 nfp_ct_map_params));
0669 nfp_fl_ct_clean_flow_entry(entry);
0670 kfree(map);
0671 }
0672 }
0673
0674 rhashtable_free_and_destroy(&zt->tc_merge_tb,
0675 nfp_check_rhashtable_empty, NULL);
0676 rhashtable_free_and_destroy(&zt->nft_merge_tb,
0677 nfp_check_rhashtable_empty, NULL);
0678
0679 kfree(zt);
0680 }
0681
0682 static void nfp_free_zone_table_entry(void *ptr, void *arg)
0683 {
0684 struct nfp_fl_ct_zone_entry *zt = ptr;
0685
0686 nfp_zone_table_entry_destroy(zt);
0687 }
0688
0689 static void nfp_free_map_table_entry(void *ptr, void *arg)
0690 {
0691 struct nfp_fl_ct_map_entry *map = ptr;
0692
0693 if (!map)
0694 return;
0695
0696 kfree(map);
0697 }
0698
0699 void nfp_flower_metadata_cleanup(struct nfp_app *app)
0700 {
0701 struct nfp_flower_priv *priv = app->priv;
0702
0703 if (!priv)
0704 return;
0705
0706 rhashtable_free_and_destroy(&priv->flow_table,
0707 nfp_check_rhashtable_empty, NULL);
0708 rhashtable_free_and_destroy(&priv->stats_ctx_table,
0709 nfp_check_rhashtable_empty, NULL);
0710 rhashtable_free_and_destroy(&priv->merge_table,
0711 nfp_check_rhashtable_empty, NULL);
0712 rhashtable_free_and_destroy(&priv->ct_zone_table,
0713 nfp_free_zone_table_entry, NULL);
0714 nfp_zone_table_entry_destroy(priv->ct_zone_wc);
0715
0716 rhashtable_free_and_destroy(&priv->ct_map_table,
0717 nfp_free_map_table_entry, NULL);
0718 rhashtable_free_and_destroy(&priv->neigh_table,
0719 nfp_check_rhashtable_empty, NULL);
0720 kvfree(priv->stats);
0721 kfree(priv->mask_ids.mask_id_free_list.buf);
0722 kfree(priv->mask_ids.last_used);
0723 vfree(priv->stats_ids.free_list.buf);
0724 }