0001
0002
0003
0004 #include "conntrack.h"
0005 #include "../nfp_port.h"
0006
0007 const struct rhashtable_params nfp_tc_ct_merge_params = {
0008 .head_offset = offsetof(struct nfp_fl_ct_tc_merge,
0009 hash_node),
0010 .key_len = sizeof(unsigned long) * 2,
0011 .key_offset = offsetof(struct nfp_fl_ct_tc_merge, cookie),
0012 .automatic_shrinking = true,
0013 };
0014
0015 const struct rhashtable_params nfp_nft_ct_merge_params = {
0016 .head_offset = offsetof(struct nfp_fl_nft_tc_merge,
0017 hash_node),
0018 .key_len = sizeof(unsigned long) * 3,
0019 .key_offset = offsetof(struct nfp_fl_nft_tc_merge, cookie),
0020 .automatic_shrinking = true,
0021 };
0022
0023 static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
0024 enum flow_action_id act_id);
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 static void *get_hashentry(struct rhashtable *ht, void *key,
0037 const struct rhashtable_params params, size_t size)
0038 {
0039 void *result;
0040
0041 result = rhashtable_lookup_fast(ht, key, params);
0042
0043 if (result)
0044 return result;
0045
0046 result = kzalloc(size, GFP_KERNEL);
0047 if (!result)
0048 return ERR_PTR(-ENOMEM);
0049
0050 return result;
0051 }
0052
0053 bool is_pre_ct_flow(struct flow_cls_offload *flow)
0054 {
0055 struct flow_action_entry *act;
0056 int i;
0057
0058 flow_action_for_each(i, act, &flow->rule->action) {
0059 if (act->id == FLOW_ACTION_CT && !act->ct.action)
0060 return true;
0061 }
0062 return false;
0063 }
0064
0065 bool is_post_ct_flow(struct flow_cls_offload *flow)
0066 {
0067 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
0068 struct flow_dissector *dissector = rule->match.dissector;
0069 struct flow_match_ct ct;
0070
0071 if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
0072 flow_rule_match_ct(rule, &ct);
0073 if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
0074 return true;
0075 }
0076 return false;
0077 }
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089 static void *get_mangled_key(struct flow_rule *rule, void *buf,
0090 u32 offset, size_t key_sz,
0091 enum flow_action_mangle_base htype)
0092 {
0093 struct flow_action_entry *act;
0094 u32 *val = (u32 *)buf;
0095 u32 off, msk, key;
0096 int i;
0097
0098 flow_action_for_each(i, act, &rule->action) {
0099 if (act->id == FLOW_ACTION_MANGLE &&
0100 act->mangle.htype == htype) {
0101 off = act->mangle.offset - offset;
0102 msk = act->mangle.mask;
0103 key = act->mangle.val;
0104
0105
0106 if (off % 4 || off >= key_sz)
0107 continue;
0108
0109 val[off >> 2] &= msk;
0110 val[off >> 2] |= key;
0111 }
0112 }
0113
0114 return buf;
0115 }
0116
0117
0118
0119
0120
0121
0122 #define NFP_IPV4_TOS_MASK GENMASK(23, 16)
0123 #define NFP_IPV4_TTL_MASK GENMASK(31, 24)
0124 #define NFP_IPV6_TCLASS_MASK GENMASK(27, 20)
0125 #define NFP_IPV6_HLIMIT_MASK GENMASK(7, 0)
0126 static void *get_mangled_tos_ttl(struct flow_rule *rule, void *buf,
0127 bool is_v6)
0128 {
0129 struct flow_match_ip match;
0130
0131 __be32 ip_hdr[3];
0132 u32 tmp, hdr_len;
0133
0134 flow_rule_match_ip(rule, &match);
0135
0136 if (is_v6) {
0137 tmp = FIELD_PREP(NFP_IPV6_TCLASS_MASK, match.key->tos);
0138 ip_hdr[0] = cpu_to_be32(tmp);
0139 tmp = FIELD_PREP(NFP_IPV6_HLIMIT_MASK, match.key->ttl);
0140 ip_hdr[1] = cpu_to_be32(tmp);
0141 hdr_len = 2 * sizeof(__be32);
0142 } else {
0143 tmp = FIELD_PREP(NFP_IPV4_TOS_MASK, match.key->tos);
0144 ip_hdr[0] = cpu_to_be32(tmp);
0145 tmp = FIELD_PREP(NFP_IPV4_TTL_MASK, match.key->ttl);
0146 ip_hdr[2] = cpu_to_be32(tmp);
0147 hdr_len = 3 * sizeof(__be32);
0148 }
0149
0150 get_mangled_key(rule, ip_hdr, 0, hdr_len,
0151 is_v6 ? FLOW_ACT_MANGLE_HDR_TYPE_IP6 :
0152 FLOW_ACT_MANGLE_HDR_TYPE_IP4);
0153
0154 match.key = buf;
0155
0156 if (is_v6) {
0157 tmp = be32_to_cpu(ip_hdr[0]);
0158 match.key->tos = FIELD_GET(NFP_IPV6_TCLASS_MASK, tmp);
0159 tmp = be32_to_cpu(ip_hdr[1]);
0160 match.key->ttl = FIELD_GET(NFP_IPV6_HLIMIT_MASK, tmp);
0161 } else {
0162 tmp = be32_to_cpu(ip_hdr[0]);
0163 match.key->tos = FIELD_GET(NFP_IPV4_TOS_MASK, tmp);
0164 tmp = be32_to_cpu(ip_hdr[2]);
0165 match.key->ttl = FIELD_GET(NFP_IPV4_TTL_MASK, tmp);
0166 }
0167
0168 return buf;
0169 }
0170
0171
0172
0173
0174
0175
0176 static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
0177 struct nfp_fl_ct_flow_entry *entry2)
0178 {
0179 unsigned int ovlp_keys = entry1->rule->match.dissector->used_keys &
0180 entry2->rule->match.dissector->used_keys;
0181 bool out, is_v6 = false;
0182 u8 ip_proto = 0;
0183
0184
0185
0186
0187
0188
0189
0190
0191 char buf[64];
0192
0193 if (entry1->netdev && entry2->netdev &&
0194 entry1->netdev != entry2->netdev)
0195 return -EINVAL;
0196
0197
0198
0199
0200 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) {
0201 struct flow_match_control match1, match2;
0202
0203 flow_rule_match_control(entry1->rule, &match1);
0204 flow_rule_match_control(entry2->rule, &match2);
0205 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
0206 if (out)
0207 goto check_failed;
0208 }
0209
0210 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) {
0211 struct flow_match_basic match1, match2;
0212
0213 flow_rule_match_basic(entry1->rule, &match1);
0214 flow_rule_match_basic(entry2->rule, &match2);
0215
0216
0217
0218
0219 is_v6 = match1.key->n_proto == htons(ETH_P_IPV6);
0220
0221 ip_proto = match1.key->ip_proto;
0222
0223 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
0224 if (out)
0225 goto check_failed;
0226 }
0227
0228 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
0229 struct flow_match_ipv4_addrs match1, match2;
0230
0231 flow_rule_match_ipv4_addrs(entry1->rule, &match1);
0232 flow_rule_match_ipv4_addrs(entry2->rule, &match2);
0233
0234 memcpy(buf, match1.key, sizeof(*match1.key));
0235 match1.key = get_mangled_key(entry1->rule, buf,
0236 offsetof(struct iphdr, saddr),
0237 sizeof(*match1.key),
0238 FLOW_ACT_MANGLE_HDR_TYPE_IP4);
0239
0240 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
0241 if (out)
0242 goto check_failed;
0243 }
0244
0245 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
0246 struct flow_match_ipv6_addrs match1, match2;
0247
0248 flow_rule_match_ipv6_addrs(entry1->rule, &match1);
0249 flow_rule_match_ipv6_addrs(entry2->rule, &match2);
0250
0251 memcpy(buf, match1.key, sizeof(*match1.key));
0252 match1.key = get_mangled_key(entry1->rule, buf,
0253 offsetof(struct ipv6hdr, saddr),
0254 sizeof(*match1.key),
0255 FLOW_ACT_MANGLE_HDR_TYPE_IP6);
0256
0257 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
0258 if (out)
0259 goto check_failed;
0260 }
0261
0262 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) {
0263 enum flow_action_mangle_base htype = FLOW_ACT_MANGLE_UNSPEC;
0264 struct flow_match_ports match1, match2;
0265
0266 flow_rule_match_ports(entry1->rule, &match1);
0267 flow_rule_match_ports(entry2->rule, &match2);
0268
0269 if (ip_proto == IPPROTO_UDP)
0270 htype = FLOW_ACT_MANGLE_HDR_TYPE_UDP;
0271 else if (ip_proto == IPPROTO_TCP)
0272 htype = FLOW_ACT_MANGLE_HDR_TYPE_TCP;
0273
0274 memcpy(buf, match1.key, sizeof(*match1.key));
0275 match1.key = get_mangled_key(entry1->rule, buf, 0,
0276 sizeof(*match1.key), htype);
0277
0278 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
0279 if (out)
0280 goto check_failed;
0281 }
0282
0283 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
0284 struct flow_match_eth_addrs match1, match2;
0285
0286 flow_rule_match_eth_addrs(entry1->rule, &match1);
0287 flow_rule_match_eth_addrs(entry2->rule, &match2);
0288
0289 memcpy(buf, match1.key, sizeof(*match1.key));
0290 match1.key = get_mangled_key(entry1->rule, buf, 0,
0291 sizeof(*match1.key),
0292 FLOW_ACT_MANGLE_HDR_TYPE_ETH);
0293
0294 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
0295 if (out)
0296 goto check_failed;
0297 }
0298
0299 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_VLAN)) {
0300 struct flow_match_vlan match1, match2;
0301
0302 flow_rule_match_vlan(entry1->rule, &match1);
0303 flow_rule_match_vlan(entry2->rule, &match2);
0304 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
0305 if (out)
0306 goto check_failed;
0307 }
0308
0309 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_MPLS)) {
0310 struct flow_match_mpls match1, match2;
0311
0312 flow_rule_match_mpls(entry1->rule, &match1);
0313 flow_rule_match_mpls(entry2->rule, &match2);
0314 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
0315 if (out)
0316 goto check_failed;
0317 }
0318
0319 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_TCP)) {
0320 struct flow_match_tcp match1, match2;
0321
0322 flow_rule_match_tcp(entry1->rule, &match1);
0323 flow_rule_match_tcp(entry2->rule, &match2);
0324 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
0325 if (out)
0326 goto check_failed;
0327 }
0328
0329 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IP)) {
0330 struct flow_match_ip match1, match2;
0331
0332 flow_rule_match_ip(entry1->rule, &match1);
0333 flow_rule_match_ip(entry2->rule, &match2);
0334
0335 match1.key = get_mangled_tos_ttl(entry1->rule, buf, is_v6);
0336 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
0337 if (out)
0338 goto check_failed;
0339 }
0340
0341 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_KEYID)) {
0342 struct flow_match_enc_keyid match1, match2;
0343
0344 flow_rule_match_enc_keyid(entry1->rule, &match1);
0345 flow_rule_match_enc_keyid(entry2->rule, &match2);
0346 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
0347 if (out)
0348 goto check_failed;
0349 }
0350
0351 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
0352 struct flow_match_ipv4_addrs match1, match2;
0353
0354 flow_rule_match_enc_ipv4_addrs(entry1->rule, &match1);
0355 flow_rule_match_enc_ipv4_addrs(entry2->rule, &match2);
0356 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
0357 if (out)
0358 goto check_failed;
0359 }
0360
0361 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
0362 struct flow_match_ipv6_addrs match1, match2;
0363
0364 flow_rule_match_enc_ipv6_addrs(entry1->rule, &match1);
0365 flow_rule_match_enc_ipv6_addrs(entry2->rule, &match2);
0366 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
0367 if (out)
0368 goto check_failed;
0369 }
0370
0371 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
0372 struct flow_match_control match1, match2;
0373
0374 flow_rule_match_enc_control(entry1->rule, &match1);
0375 flow_rule_match_enc_control(entry2->rule, &match2);
0376 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
0377 if (out)
0378 goto check_failed;
0379 }
0380
0381 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IP)) {
0382 struct flow_match_ip match1, match2;
0383
0384 flow_rule_match_enc_ip(entry1->rule, &match1);
0385 flow_rule_match_enc_ip(entry2->rule, &match2);
0386 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
0387 if (out)
0388 goto check_failed;
0389 }
0390
0391 if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_OPTS)) {
0392 struct flow_match_enc_opts match1, match2;
0393
0394 flow_rule_match_enc_opts(entry1->rule, &match1);
0395 flow_rule_match_enc_opts(entry2->rule, &match2);
0396 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
0397 if (out)
0398 goto check_failed;
0399 }
0400
0401 return 0;
0402
0403 check_failed:
0404 return -EINVAL;
0405 }
0406
0407 static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
0408 struct nfp_fl_ct_flow_entry *post_ct_entry,
0409 struct nfp_fl_ct_flow_entry *nft_entry)
0410 {
0411 struct flow_action_entry *act;
0412 int i;
0413
0414
0415 flow_action_for_each(i, act, &pre_ct_entry->rule->action) {
0416 switch (act->id) {
0417 case FLOW_ACTION_VLAN_PUSH:
0418 case FLOW_ACTION_VLAN_POP:
0419 case FLOW_ACTION_VLAN_MANGLE:
0420 case FLOW_ACTION_MPLS_PUSH:
0421 case FLOW_ACTION_MPLS_POP:
0422 case FLOW_ACTION_MPLS_MANGLE:
0423 return -EOPNOTSUPP;
0424 default:
0425 break;
0426 }
0427 }
0428
0429
0430 flow_action_for_each(i, act, &nft_entry->rule->action) {
0431 switch (act->id) {
0432 case FLOW_ACTION_VLAN_PUSH:
0433 case FLOW_ACTION_VLAN_POP:
0434 case FLOW_ACTION_VLAN_MANGLE:
0435 case FLOW_ACTION_MPLS_PUSH:
0436 case FLOW_ACTION_MPLS_POP:
0437 case FLOW_ACTION_MPLS_MANGLE:
0438 return -EOPNOTSUPP;
0439 default:
0440 break;
0441 }
0442 }
0443 return 0;
0444 }
0445
0446 static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry,
0447 struct nfp_fl_ct_flow_entry *nft_entry)
0448 {
0449 struct flow_dissector *dissector = post_ct_entry->rule->match.dissector;
0450 struct flow_action_entry *ct_met;
0451 struct flow_match_ct ct;
0452 int i;
0453
0454 ct_met = get_flow_act(nft_entry->rule, FLOW_ACTION_CT_METADATA);
0455 if (ct_met && (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))) {
0456 u32 *act_lbl;
0457
0458 act_lbl = ct_met->ct_metadata.labels;
0459 flow_rule_match_ct(post_ct_entry->rule, &ct);
0460 for (i = 0; i < 4; i++) {
0461 if ((ct.key->ct_labels[i] & ct.mask->ct_labels[i]) ^
0462 (act_lbl[i] & ct.mask->ct_labels[i]))
0463 return -EINVAL;
0464 }
0465
0466 if ((ct.key->ct_mark & ct.mask->ct_mark) ^
0467 (ct_met->ct_metadata.mark & ct.mask->ct_mark))
0468 return -EINVAL;
0469
0470 return 0;
0471 }
0472
0473 return -EINVAL;
0474 }
0475
0476 static int
0477 nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
0478 {
0479 int key_size;
0480
0481
0482 key_size = sizeof(struct nfp_flower_meta_tci);
0483 map[FLOW_PAY_META_TCI] = 0;
0484
0485 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_EXT_META) {
0486 map[FLOW_PAY_EXT_META] = key_size;
0487 key_size += sizeof(struct nfp_flower_ext_meta);
0488 }
0489 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_PORT) {
0490 map[FLOW_PAY_INPORT] = key_size;
0491 key_size += sizeof(struct nfp_flower_in_port);
0492 }
0493 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_MAC) {
0494 map[FLOW_PAY_MAC_MPLS] = key_size;
0495 key_size += sizeof(struct nfp_flower_mac_mpls);
0496 }
0497 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_TP) {
0498 map[FLOW_PAY_L4] = key_size;
0499 key_size += sizeof(struct nfp_flower_tp_ports);
0500 }
0501 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV4) {
0502 map[FLOW_PAY_IPV4] = key_size;
0503 key_size += sizeof(struct nfp_flower_ipv4);
0504 }
0505 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV6) {
0506 map[FLOW_PAY_IPV6] = key_size;
0507 key_size += sizeof(struct nfp_flower_ipv6);
0508 }
0509
0510 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
0511 map[FLOW_PAY_QINQ] = key_size;
0512 key_size += sizeof(struct nfp_flower_vlan);
0513 }
0514
0515 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
0516 map[FLOW_PAY_GRE] = key_size;
0517 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
0518 key_size += sizeof(struct nfp_flower_ipv6_gre_tun);
0519 else
0520 key_size += sizeof(struct nfp_flower_ipv4_gre_tun);
0521 }
0522
0523 if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) ||
0524 (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) {
0525 map[FLOW_PAY_UDP_TUN] = key_size;
0526 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
0527 key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
0528 else
0529 key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
0530 }
0531
0532 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
0533 map[FLOW_PAY_GENEVE_OPT] = key_size;
0534 key_size += sizeof(struct nfp_flower_geneve_options);
0535 }
0536
0537 return key_size;
0538 }
0539
0540 static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
0541 struct nfp_flower_priv *priv,
0542 struct net_device *netdev,
0543 struct nfp_fl_payload *flow_pay)
0544 {
0545 struct flow_action_entry *a_in;
0546 int i, j, num_actions, id;
0547 struct flow_rule *a_rule;
0548 int err = 0, offset = 0;
0549
0550 num_actions = rules[CT_TYPE_PRE_CT]->action.num_entries +
0551 rules[CT_TYPE_NFT]->action.num_entries +
0552 rules[CT_TYPE_POST_CT]->action.num_entries;
0553
0554 a_rule = flow_rule_alloc(num_actions);
0555 if (!a_rule)
0556 return -ENOMEM;
0557
0558
0559 a_rule->match = rules[CT_TYPE_PRE_CT]->match;
0560
0561
0562 for (j = 0; j < _CT_TYPE_MAX; j++) {
0563 if (flow_rule_match_key(rules[j], FLOW_DISSECTOR_KEY_BASIC)) {
0564 struct flow_match_basic match;
0565
0566
0567
0568
0569
0570
0571
0572
0573 flow_rule_match_basic(rules[j], &match);
0574 if (match.mask->ip_proto)
0575 a_rule->match = rules[j]->match;
0576 }
0577
0578 for (i = 0; i < rules[j]->action.num_entries; i++) {
0579 a_in = &rules[j]->action.entries[i];
0580 id = a_in->id;
0581
0582
0583
0584
0585
0586 switch (id) {
0587 case FLOW_ACTION_CT:
0588 case FLOW_ACTION_GOTO:
0589 case FLOW_ACTION_CT_METADATA:
0590 continue;
0591 default:
0592 memcpy(&a_rule->action.entries[offset++],
0593 a_in, sizeof(struct flow_action_entry));
0594 break;
0595 }
0596 }
0597 }
0598
0599
0600 a_rule->action.num_entries = offset;
0601 err = nfp_flower_compile_action(priv->app, a_rule, netdev, flow_pay, NULL);
0602 kfree(a_rule);
0603
0604 return err;
0605 }
0606
0607 static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
0608 {
0609 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
0610 struct nfp_fl_ct_zone_entry *zt = m_entry->zt;
0611 struct nfp_fl_key_ls key_layer, tmp_layer;
0612 struct nfp_flower_priv *priv = zt->priv;
0613 u16 key_map[_FLOW_PAY_LAYERS_MAX];
0614 struct nfp_fl_payload *flow_pay;
0615
0616 struct flow_rule *rules[_CT_TYPE_MAX];
0617 u8 *key, *msk, *kdata, *mdata;
0618 struct nfp_port *port = NULL;
0619 struct net_device *netdev;
0620 bool qinq_sup;
0621 u32 port_id;
0622 u16 offset;
0623 int i, err;
0624
0625 netdev = m_entry->netdev;
0626 qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ);
0627
0628 rules[CT_TYPE_PRE_CT] = m_entry->tc_m_parent->pre_ct_parent->rule;
0629 rules[CT_TYPE_NFT] = m_entry->nft_parent->rule;
0630 rules[CT_TYPE_POST_CT] = m_entry->tc_m_parent->post_ct_parent->rule;
0631
0632 memset(&key_layer, 0, sizeof(struct nfp_fl_key_ls));
0633 memset(&key_map, 0, sizeof(key_map));
0634
0635
0636 for (i = 0; i < _CT_TYPE_MAX; i++) {
0637 err = nfp_flower_calculate_key_layers(priv->app,
0638 m_entry->netdev,
0639 &tmp_layer, rules[i],
0640 &tun_type, NULL);
0641 if (err)
0642 return err;
0643
0644 key_layer.key_layer |= tmp_layer.key_layer;
0645 key_layer.key_layer_two |= tmp_layer.key_layer_two;
0646 }
0647 key_layer.key_size = nfp_fl_calc_key_layers_sz(key_layer, key_map);
0648
0649 flow_pay = nfp_flower_allocate_new(&key_layer);
0650 if (!flow_pay)
0651 return -ENOMEM;
0652
0653 memset(flow_pay->unmasked_data, 0, key_layer.key_size);
0654 memset(flow_pay->mask_data, 0, key_layer.key_size);
0655
0656 kdata = flow_pay->unmasked_data;
0657 mdata = flow_pay->mask_data;
0658
0659 offset = key_map[FLOW_PAY_META_TCI];
0660 key = kdata + offset;
0661 msk = mdata + offset;
0662 nfp_flower_compile_meta((struct nfp_flower_meta_tci *)key,
0663 (struct nfp_flower_meta_tci *)msk,
0664 key_layer.key_layer);
0665
0666 if (NFP_FLOWER_LAYER_EXT_META & key_layer.key_layer) {
0667 offset = key_map[FLOW_PAY_EXT_META];
0668 key = kdata + offset;
0669 msk = mdata + offset;
0670 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)key,
0671 key_layer.key_layer_two);
0672 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
0673 key_layer.key_layer_two);
0674 }
0675
0676
0677
0678
0679 port_id = nfp_flower_get_port_id_from_netdev(priv->app, netdev);
0680 offset = key_map[FLOW_PAY_INPORT];
0681 key = kdata + offset;
0682 msk = mdata + offset;
0683 err = nfp_flower_compile_port((struct nfp_flower_in_port *)key,
0684 port_id, false, tun_type, NULL);
0685 if (err)
0686 goto ct_offload_err;
0687 err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
0688 port_id, true, tun_type, NULL);
0689 if (err)
0690 goto ct_offload_err;
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701 if (!qinq_sup) {
0702 for (i = 0; i < _CT_TYPE_MAX; i++) {
0703 offset = key_map[FLOW_PAY_META_TCI];
0704 key = kdata + offset;
0705 msk = mdata + offset;
0706 nfp_flower_compile_tci((struct nfp_flower_meta_tci *)key,
0707 (struct nfp_flower_meta_tci *)msk,
0708 rules[i]);
0709 }
0710 }
0711
0712 if (NFP_FLOWER_LAYER_MAC & key_layer.key_layer) {
0713 offset = key_map[FLOW_PAY_MAC_MPLS];
0714 key = kdata + offset;
0715 msk = mdata + offset;
0716 for (i = 0; i < _CT_TYPE_MAX; i++) {
0717 nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)key,
0718 (struct nfp_flower_mac_mpls *)msk,
0719 rules[i]);
0720 err = nfp_flower_compile_mpls((struct nfp_flower_mac_mpls *)key,
0721 (struct nfp_flower_mac_mpls *)msk,
0722 rules[i], NULL);
0723 if (err)
0724 goto ct_offload_err;
0725 }
0726 }
0727
0728 if (NFP_FLOWER_LAYER_IPV4 & key_layer.key_layer) {
0729 offset = key_map[FLOW_PAY_IPV4];
0730 key = kdata + offset;
0731 msk = mdata + offset;
0732 for (i = 0; i < _CT_TYPE_MAX; i++) {
0733 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)key,
0734 (struct nfp_flower_ipv4 *)msk,
0735 rules[i]);
0736 }
0737 }
0738
0739 if (NFP_FLOWER_LAYER_IPV6 & key_layer.key_layer) {
0740 offset = key_map[FLOW_PAY_IPV6];
0741 key = kdata + offset;
0742 msk = mdata + offset;
0743 for (i = 0; i < _CT_TYPE_MAX; i++) {
0744 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)key,
0745 (struct nfp_flower_ipv6 *)msk,
0746 rules[i]);
0747 }
0748 }
0749
0750 if (NFP_FLOWER_LAYER_TP & key_layer.key_layer) {
0751 offset = key_map[FLOW_PAY_L4];
0752 key = kdata + offset;
0753 msk = mdata + offset;
0754 for (i = 0; i < _CT_TYPE_MAX; i++) {
0755 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)key,
0756 (struct nfp_flower_tp_ports *)msk,
0757 rules[i]);
0758 }
0759 }
0760
0761 if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
0762 offset = key_map[FLOW_PAY_QINQ];
0763 key = kdata + offset;
0764 msk = mdata + offset;
0765 for (i = 0; i < _CT_TYPE_MAX; i++) {
0766 nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
0767 (struct nfp_flower_vlan *)msk,
0768 rules[i]);
0769 }
0770 }
0771
0772 if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
0773 offset = key_map[FLOW_PAY_GRE];
0774 key = kdata + offset;
0775 msk = mdata + offset;
0776 if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
0777 struct nfp_flower_ipv6_gre_tun *gre_match;
0778 struct nfp_ipv6_addr_entry *entry;
0779 struct in6_addr *dst;
0780
0781 for (i = 0; i < _CT_TYPE_MAX; i++) {
0782 nfp_flower_compile_ipv6_gre_tun((void *)key,
0783 (void *)msk, rules[i]);
0784 }
0785 gre_match = (struct nfp_flower_ipv6_gre_tun *)key;
0786 dst = &gre_match->ipv6.dst;
0787
0788 entry = nfp_tunnel_add_ipv6_off(priv->app, dst);
0789 if (!entry) {
0790 err = -ENOMEM;
0791 goto ct_offload_err;
0792 }
0793
0794 flow_pay->nfp_tun_ipv6 = entry;
0795 } else {
0796 __be32 dst;
0797
0798 for (i = 0; i < _CT_TYPE_MAX; i++) {
0799 nfp_flower_compile_ipv4_gre_tun((void *)key,
0800 (void *)msk, rules[i]);
0801 }
0802 dst = ((struct nfp_flower_ipv4_gre_tun *)key)->ipv4.dst;
0803
0804
0805
0806
0807 flow_pay->nfp_tun_ipv4_addr = dst;
0808 nfp_tunnel_add_ipv4_off(priv->app, dst);
0809 }
0810 }
0811
0812 if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN ||
0813 key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
0814 offset = key_map[FLOW_PAY_UDP_TUN];
0815 key = kdata + offset;
0816 msk = mdata + offset;
0817 if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
0818 struct nfp_flower_ipv6_udp_tun *udp_match;
0819 struct nfp_ipv6_addr_entry *entry;
0820 struct in6_addr *dst;
0821
0822 for (i = 0; i < _CT_TYPE_MAX; i++) {
0823 nfp_flower_compile_ipv6_udp_tun((void *)key,
0824 (void *)msk, rules[i]);
0825 }
0826 udp_match = (struct nfp_flower_ipv6_udp_tun *)key;
0827 dst = &udp_match->ipv6.dst;
0828
0829 entry = nfp_tunnel_add_ipv6_off(priv->app, dst);
0830 if (!entry) {
0831 err = -ENOMEM;
0832 goto ct_offload_err;
0833 }
0834
0835 flow_pay->nfp_tun_ipv6 = entry;
0836 } else {
0837 __be32 dst;
0838
0839 for (i = 0; i < _CT_TYPE_MAX; i++) {
0840 nfp_flower_compile_ipv4_udp_tun((void *)key,
0841 (void *)msk, rules[i]);
0842 }
0843 dst = ((struct nfp_flower_ipv4_udp_tun *)key)->ipv4.dst;
0844
0845
0846
0847
0848 flow_pay->nfp_tun_ipv4_addr = dst;
0849 nfp_tunnel_add_ipv4_off(priv->app, dst);
0850 }
0851
0852 if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
0853 offset = key_map[FLOW_PAY_GENEVE_OPT];
0854 key = kdata + offset;
0855 msk = mdata + offset;
0856 for (i = 0; i < _CT_TYPE_MAX; i++)
0857 nfp_flower_compile_geneve_opt(key, msk, rules[i]);
0858 }
0859 }
0860
0861
0862 err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay);
0863 if (err)
0864 goto ct_offload_err;
0865
0866
0867
0868
0869
0870
0871 flow_pay->tc_flower_cookie = ((unsigned long)flow_pay) | 0x1;
0872 err = nfp_compile_flow_metadata(priv->app, flow_pay->tc_flower_cookie,
0873 flow_pay, netdev, NULL);
0874 if (err)
0875 goto ct_offload_err;
0876
0877 if (nfp_netdev_is_nfp_repr(netdev))
0878 port = nfp_port_from_netdev(netdev);
0879
0880 err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
0881 nfp_flower_table_params);
0882 if (err)
0883 goto ct_release_offload_meta_err;
0884
0885 err = nfp_flower_xmit_flow(priv->app, flow_pay,
0886 NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
0887 if (err)
0888 goto ct_remove_rhash_err;
0889
0890 m_entry->tc_flower_cookie = flow_pay->tc_flower_cookie;
0891 m_entry->flow_pay = flow_pay;
0892
0893 if (port)
0894 port->tc_offload_cnt++;
0895
0896 return err;
0897
0898 ct_remove_rhash_err:
0899 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
0900 &flow_pay->fl_node,
0901 nfp_flower_table_params));
0902 ct_release_offload_meta_err:
0903 nfp_modify_flow_metadata(priv->app, flow_pay);
0904 ct_offload_err:
0905 if (flow_pay->nfp_tun_ipv4_addr)
0906 nfp_tunnel_del_ipv4_off(priv->app, flow_pay->nfp_tun_ipv4_addr);
0907 if (flow_pay->nfp_tun_ipv6)
0908 nfp_tunnel_put_ipv6_off(priv->app, flow_pay->nfp_tun_ipv6);
0909 kfree(flow_pay->action_data);
0910 kfree(flow_pay->mask_data);
0911 kfree(flow_pay->unmasked_data);
0912 kfree(flow_pay);
0913 return err;
0914 }
0915
0916 static int nfp_fl_ct_del_offload(struct nfp_app *app, unsigned long cookie,
0917 struct net_device *netdev)
0918 {
0919 struct nfp_flower_priv *priv = app->priv;
0920 struct nfp_fl_payload *flow_pay;
0921 struct nfp_port *port = NULL;
0922 int err = 0;
0923
0924 if (nfp_netdev_is_nfp_repr(netdev))
0925 port = nfp_port_from_netdev(netdev);
0926
0927 flow_pay = nfp_flower_search_fl_table(app, cookie, netdev);
0928 if (!flow_pay)
0929 return -ENOENT;
0930
0931 err = nfp_modify_flow_metadata(app, flow_pay);
0932 if (err)
0933 goto err_free_merge_flow;
0934
0935 if (flow_pay->nfp_tun_ipv4_addr)
0936 nfp_tunnel_del_ipv4_off(app, flow_pay->nfp_tun_ipv4_addr);
0937
0938 if (flow_pay->nfp_tun_ipv6)
0939 nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
0940
0941 if (!flow_pay->in_hw) {
0942 err = 0;
0943 goto err_free_merge_flow;
0944 }
0945
0946 err = nfp_flower_xmit_flow(app, flow_pay,
0947 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
0948
0949 err_free_merge_flow:
0950 nfp_flower_del_linked_merge_flows(app, flow_pay);
0951 if (port)
0952 port->tc_offload_cnt--;
0953 kfree(flow_pay->action_data);
0954 kfree(flow_pay->mask_data);
0955 kfree(flow_pay->unmasked_data);
0956 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
0957 &flow_pay->fl_node,
0958 nfp_flower_table_params));
0959 kfree_rcu(flow_pay, rcu);
0960 return err;
0961 }
0962
0963 static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
0964 struct nfp_fl_ct_flow_entry *nft_entry,
0965 struct nfp_fl_ct_tc_merge *tc_m_entry)
0966 {
0967 struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
0968 struct nfp_fl_nft_tc_merge *nft_m_entry;
0969 unsigned long new_cookie[3];
0970 int err;
0971
0972 pre_ct_entry = tc_m_entry->pre_ct_parent;
0973 post_ct_entry = tc_m_entry->post_ct_parent;
0974
0975 err = nfp_ct_merge_act_check(pre_ct_entry, post_ct_entry, nft_entry);
0976 if (err)
0977 return err;
0978
0979
0980
0981
0982
0983
0984
0985 err = nfp_ct_merge_check(pre_ct_entry, nft_entry);
0986 if (err)
0987 return err;
0988 err = nfp_ct_merge_check(nft_entry, post_ct_entry);
0989 if (err)
0990 return err;
0991 err = nfp_ct_check_meta(post_ct_entry, nft_entry);
0992 if (err)
0993 return err;
0994
0995
0996 new_cookie[0] = tc_m_entry->cookie[0];
0997 new_cookie[1] = tc_m_entry->cookie[1];
0998 new_cookie[2] = nft_entry->cookie;
0999 nft_m_entry = get_hashentry(&zt->nft_merge_tb,
1000 &new_cookie,
1001 nfp_nft_ct_merge_params,
1002 sizeof(*nft_m_entry));
1003
1004 if (IS_ERR(nft_m_entry))
1005 return PTR_ERR(nft_m_entry);
1006
1007
1008 if (!memcmp(&new_cookie, nft_m_entry->cookie, sizeof(new_cookie)))
1009 return 0;
1010
1011 memcpy(&nft_m_entry->cookie, &new_cookie, sizeof(new_cookie));
1012 nft_m_entry->zt = zt;
1013 nft_m_entry->tc_m_parent = tc_m_entry;
1014 nft_m_entry->nft_parent = nft_entry;
1015 nft_m_entry->tc_flower_cookie = 0;
1016
1017
1018
1019 nft_m_entry->netdev = pre_ct_entry->netdev;
1020
1021
1022 list_add(&nft_m_entry->tc_merge_list, &tc_m_entry->children);
1023 list_add(&nft_m_entry->nft_flow_list, &nft_entry->children);
1024
1025
1026 err = nfp_fl_ct_add_offload(nft_m_entry);
1027 if (err)
1028 goto err_nft_ct_offload;
1029
1030 err = rhashtable_insert_fast(&zt->nft_merge_tb, &nft_m_entry->hash_node,
1031 nfp_nft_ct_merge_params);
1032 if (err)
1033 goto err_nft_ct_merge_insert;
1034
1035 zt->nft_merge_count++;
1036
1037 return err;
1038
1039 err_nft_ct_merge_insert:
1040 nfp_fl_ct_del_offload(zt->priv->app, nft_m_entry->tc_flower_cookie,
1041 nft_m_entry->netdev);
1042 err_nft_ct_offload:
1043 list_del(&nft_m_entry->tc_merge_list);
1044 list_del(&nft_m_entry->nft_flow_list);
1045 kfree(nft_m_entry);
1046 return err;
1047 }
1048
1049 static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
1050 struct nfp_fl_ct_flow_entry *ct_entry1,
1051 struct nfp_fl_ct_flow_entry *ct_entry2)
1052 {
1053 struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
1054 struct nfp_fl_ct_flow_entry *nft_entry, *nft_tmp;
1055 struct nfp_fl_ct_tc_merge *m_entry;
1056 unsigned long new_cookie[2];
1057 int err;
1058
1059 if (ct_entry1->type == CT_TYPE_PRE_CT) {
1060 pre_ct_entry = ct_entry1;
1061 post_ct_entry = ct_entry2;
1062 } else {
1063 post_ct_entry = ct_entry1;
1064 pre_ct_entry = ct_entry2;
1065 }
1066
1067
1068
1069
1070 if (post_ct_entry->chain_index != pre_ct_entry->chain_index)
1071 return -EINVAL;
1072
1073 err = nfp_ct_merge_check(pre_ct_entry, post_ct_entry);
1074 if (err)
1075 return err;
1076
1077 new_cookie[0] = pre_ct_entry->cookie;
1078 new_cookie[1] = post_ct_entry->cookie;
1079 m_entry = get_hashentry(&zt->tc_merge_tb, &new_cookie,
1080 nfp_tc_ct_merge_params, sizeof(*m_entry));
1081 if (IS_ERR(m_entry))
1082 return PTR_ERR(m_entry);
1083
1084
1085 if (!memcmp(&new_cookie, m_entry->cookie, sizeof(new_cookie)))
1086 return 0;
1087
1088 memcpy(&m_entry->cookie, &new_cookie, sizeof(new_cookie));
1089 m_entry->zt = zt;
1090 m_entry->post_ct_parent = post_ct_entry;
1091 m_entry->pre_ct_parent = pre_ct_entry;
1092
1093
1094 list_add(&m_entry->post_ct_list, &post_ct_entry->children);
1095 list_add(&m_entry->pre_ct_list, &pre_ct_entry->children);
1096 INIT_LIST_HEAD(&m_entry->children);
1097
1098 err = rhashtable_insert_fast(&zt->tc_merge_tb, &m_entry->hash_node,
1099 nfp_tc_ct_merge_params);
1100 if (err)
1101 goto err_ct_tc_merge_insert;
1102 zt->tc_merge_count++;
1103
1104
1105 list_for_each_entry_safe(nft_entry, nft_tmp, &zt->nft_flows_list,
1106 list_node) {
1107 nfp_ct_do_nft_merge(zt, nft_entry, m_entry);
1108 }
1109
1110 return 0;
1111
1112 err_ct_tc_merge_insert:
1113 list_del(&m_entry->post_ct_list);
1114 list_del(&m_entry->pre_ct_list);
1115 kfree(m_entry);
1116 return err;
1117 }
1118
1119 static struct
1120 nfp_fl_ct_zone_entry *get_nfp_zone_entry(struct nfp_flower_priv *priv,
1121 u16 zone, bool wildcarded)
1122 {
1123 struct nfp_fl_ct_zone_entry *zt;
1124 int err;
1125
1126 if (wildcarded && priv->ct_zone_wc)
1127 return priv->ct_zone_wc;
1128
1129 if (!wildcarded) {
1130 zt = get_hashentry(&priv->ct_zone_table, &zone,
1131 nfp_zone_table_params, sizeof(*zt));
1132
1133
1134 if (IS_ERR(zt) || zt->priv)
1135 return zt;
1136 } else {
1137 zt = kzalloc(sizeof(*zt), GFP_KERNEL);
1138 if (!zt)
1139 return ERR_PTR(-ENOMEM);
1140 }
1141
1142 zt->zone = zone;
1143 zt->priv = priv;
1144 zt->nft = NULL;
1145
1146
1147 INIT_LIST_HEAD(&zt->pre_ct_list);
1148 INIT_LIST_HEAD(&zt->post_ct_list);
1149 INIT_LIST_HEAD(&zt->nft_flows_list);
1150
1151 err = rhashtable_init(&zt->tc_merge_tb, &nfp_tc_ct_merge_params);
1152 if (err)
1153 goto err_tc_merge_tb_init;
1154
1155 err = rhashtable_init(&zt->nft_merge_tb, &nfp_nft_ct_merge_params);
1156 if (err)
1157 goto err_nft_merge_tb_init;
1158
1159 if (wildcarded) {
1160 priv->ct_zone_wc = zt;
1161 } else {
1162 err = rhashtable_insert_fast(&priv->ct_zone_table,
1163 &zt->hash_node,
1164 nfp_zone_table_params);
1165 if (err)
1166 goto err_zone_insert;
1167 }
1168
1169 return zt;
1170
1171 err_zone_insert:
1172 rhashtable_destroy(&zt->nft_merge_tb);
1173 err_nft_merge_tb_init:
1174 rhashtable_destroy(&zt->tc_merge_tb);
1175 err_tc_merge_tb_init:
1176 kfree(zt);
1177 return ERR_PTR(err);
1178 }
1179
1180 static struct net_device *get_netdev_from_rule(struct flow_rule *rule)
1181 {
1182 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
1183 struct flow_match_meta match;
1184
1185 flow_rule_match_meta(rule, &match);
1186 if (match.key->ingress_ifindex & match.mask->ingress_ifindex)
1187 return __dev_get_by_index(&init_net,
1188 match.key->ingress_ifindex);
1189 }
1190
1191 return NULL;
1192 }
1193
1194 static struct
1195 nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
1196 struct net_device *netdev,
1197 struct flow_cls_offload *flow,
1198 bool is_nft, struct netlink_ext_ack *extack)
1199 {
1200 struct nf_flow_match *nft_match = NULL;
1201 struct nfp_fl_ct_flow_entry *entry;
1202 struct nfp_fl_ct_map_entry *map;
1203 struct flow_action_entry *act;
1204 int err, i;
1205
1206 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1207 if (!entry)
1208 return ERR_PTR(-ENOMEM);
1209
1210 entry->rule = flow_rule_alloc(flow->rule->action.num_entries);
1211 if (!entry->rule) {
1212 err = -ENOMEM;
1213 goto err_pre_ct_rule;
1214 }
1215
1216
1217
1218
1219 if (is_nft) {
1220 nft_match = kzalloc(sizeof(*nft_match), GFP_KERNEL);
1221 if (!nft_match) {
1222 err = -ENOMEM;
1223 goto err_pre_ct_act;
1224 }
1225 memcpy(&nft_match->dissector, flow->rule->match.dissector,
1226 sizeof(nft_match->dissector));
1227 memcpy(&nft_match->mask, flow->rule->match.mask,
1228 sizeof(nft_match->mask));
1229 memcpy(&nft_match->key, flow->rule->match.key,
1230 sizeof(nft_match->key));
1231 entry->rule->match.dissector = &nft_match->dissector;
1232 entry->rule->match.mask = &nft_match->mask;
1233 entry->rule->match.key = &nft_match->key;
1234
1235 if (!netdev)
1236 netdev = get_netdev_from_rule(entry->rule);
1237 } else {
1238 entry->rule->match.dissector = flow->rule->match.dissector;
1239 entry->rule->match.mask = flow->rule->match.mask;
1240 entry->rule->match.key = flow->rule->match.key;
1241 }
1242
1243 entry->zt = zt;
1244 entry->netdev = netdev;
1245 entry->cookie = flow->cookie;
1246 entry->chain_index = flow->common.chain_index;
1247 entry->tun_offset = NFP_FL_CT_NO_TUN;
1248
1249
1250
1251
1252
1253
1254 entry->rule->action.num_entries = flow->rule->action.num_entries;
1255 flow_action_for_each(i, act, &flow->rule->action) {
1256 struct flow_action_entry *new_act;
1257
1258 new_act = &entry->rule->action.entries[i];
1259 memcpy(new_act, act, sizeof(struct flow_action_entry));
1260
1261
1262
1263 if (act->id == FLOW_ACTION_TUNNEL_ENCAP) {
1264 struct ip_tunnel_info *tun = act->tunnel;
1265 size_t tun_size = sizeof(*tun) + tun->options_len;
1266
1267 new_act->tunnel = kmemdup(tun, tun_size, GFP_ATOMIC);
1268 if (!new_act->tunnel) {
1269 err = -ENOMEM;
1270 goto err_pre_ct_tun_cp;
1271 }
1272 entry->tun_offset = i;
1273 }
1274 }
1275
1276 INIT_LIST_HEAD(&entry->children);
1277
1278
1279 map = get_hashentry(&zt->priv->ct_map_table, &flow->cookie,
1280 nfp_ct_map_params, sizeof(*map));
1281 if (IS_ERR(map)) {
1282 NL_SET_ERR_MSG_MOD(extack,
1283 "offload error: ct map entry creation failed");
1284 err = -ENOMEM;
1285 goto err_ct_flow_insert;
1286 }
1287 map->cookie = flow->cookie;
1288 map->ct_entry = entry;
1289 err = rhashtable_insert_fast(&zt->priv->ct_map_table,
1290 &map->hash_node,
1291 nfp_ct_map_params);
1292 if (err) {
1293 NL_SET_ERR_MSG_MOD(extack,
1294 "offload error: ct map entry table add failed");
1295 goto err_map_insert;
1296 }
1297
1298 return entry;
1299
1300 err_map_insert:
1301 kfree(map);
1302 err_ct_flow_insert:
1303 if (entry->tun_offset != NFP_FL_CT_NO_TUN)
1304 kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
1305 err_pre_ct_tun_cp:
1306 kfree(nft_match);
1307 err_pre_ct_act:
1308 kfree(entry->rule);
1309 err_pre_ct_rule:
1310 kfree(entry);
1311 return ERR_PTR(err);
1312 }
1313
1314 static void cleanup_nft_merge_entry(struct nfp_fl_nft_tc_merge *m_entry)
1315 {
1316 struct nfp_fl_ct_zone_entry *zt;
1317 int err;
1318
1319 zt = m_entry->zt;
1320
1321
1322 if (m_entry->tc_flower_cookie) {
1323 err = nfp_fl_ct_del_offload(zt->priv->app, m_entry->tc_flower_cookie,
1324 m_entry->netdev);
1325 if (err)
1326 return;
1327 }
1328
1329 WARN_ON_ONCE(rhashtable_remove_fast(&zt->nft_merge_tb,
1330 &m_entry->hash_node,
1331 nfp_nft_ct_merge_params));
1332 zt->nft_merge_count--;
1333 list_del(&m_entry->tc_merge_list);
1334 list_del(&m_entry->nft_flow_list);
1335
1336 kfree(m_entry);
1337 }
1338
1339 static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
1340 {
1341 struct nfp_fl_nft_tc_merge *m_entry, *tmp;
1342
1343
1344
1345
1346
1347
1348 if (is_nft_flow) {
1349
1350 struct nfp_fl_ct_flow_entry *ct_entry = entry;
1351
1352 list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
1353 nft_flow_list) {
1354 cleanup_nft_merge_entry(m_entry);
1355 }
1356 } else {
1357
1358 struct nfp_fl_ct_tc_merge *ct_entry = entry;
1359
1360 list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
1361 tc_merge_list) {
1362 cleanup_nft_merge_entry(m_entry);
1363 }
1364 }
1365 }
1366
1367 static void nfp_del_tc_merge_entry(struct nfp_fl_ct_tc_merge *m_ent)
1368 {
1369 struct nfp_fl_ct_zone_entry *zt;
1370 int err;
1371
1372 zt = m_ent->zt;
1373 err = rhashtable_remove_fast(&zt->tc_merge_tb,
1374 &m_ent->hash_node,
1375 nfp_tc_ct_merge_params);
1376 if (err)
1377 pr_warn("WARNING: could not remove merge_entry from hashtable\n");
1378 zt->tc_merge_count--;
1379 list_del(&m_ent->post_ct_list);
1380 list_del(&m_ent->pre_ct_list);
1381
1382 if (!list_empty(&m_ent->children))
1383 nfp_free_nft_merge_children(m_ent, false);
1384 kfree(m_ent);
1385 }
1386
1387 static void nfp_free_tc_merge_children(struct nfp_fl_ct_flow_entry *entry)
1388 {
1389 struct nfp_fl_ct_tc_merge *m_ent, *tmp;
1390
1391 switch (entry->type) {
1392 case CT_TYPE_PRE_CT:
1393 list_for_each_entry_safe(m_ent, tmp, &entry->children, pre_ct_list) {
1394 nfp_del_tc_merge_entry(m_ent);
1395 }
1396 break;
1397 case CT_TYPE_POST_CT:
1398 list_for_each_entry_safe(m_ent, tmp, &entry->children, post_ct_list) {
1399 nfp_del_tc_merge_entry(m_ent);
1400 }
1401 break;
1402 default:
1403 break;
1404 }
1405 }
1406
1407 void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry)
1408 {
1409 list_del(&entry->list_node);
1410
1411 if (!list_empty(&entry->children)) {
1412 if (entry->type == CT_TYPE_NFT)
1413 nfp_free_nft_merge_children(entry, true);
1414 else
1415 nfp_free_tc_merge_children(entry);
1416 }
1417
1418 if (entry->tun_offset != NFP_FL_CT_NO_TUN)
1419 kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
1420
1421 if (entry->type == CT_TYPE_NFT) {
1422 struct nf_flow_match *nft_match;
1423
1424 nft_match = container_of(entry->rule->match.dissector,
1425 struct nf_flow_match, dissector);
1426 kfree(nft_match);
1427 }
1428
1429 kfree(entry->rule);
1430 kfree(entry);
1431 }
1432
1433 static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
1434 enum flow_action_id act_id)
1435 {
1436 struct flow_action_entry *act = NULL;
1437 int i;
1438
1439 flow_action_for_each(i, act, &rule->action) {
1440 if (act->id == act_id)
1441 return act;
1442 }
1443 return NULL;
1444 }
1445
1446 static void
1447 nfp_ct_merge_tc_entries(struct nfp_fl_ct_flow_entry *ct_entry1,
1448 struct nfp_fl_ct_zone_entry *zt_src,
1449 struct nfp_fl_ct_zone_entry *zt_dst)
1450 {
1451 struct nfp_fl_ct_flow_entry *ct_entry2, *ct_tmp;
1452 struct list_head *ct_list;
1453
1454 if (ct_entry1->type == CT_TYPE_PRE_CT)
1455 ct_list = &zt_src->post_ct_list;
1456 else if (ct_entry1->type == CT_TYPE_POST_CT)
1457 ct_list = &zt_src->pre_ct_list;
1458 else
1459 return;
1460
1461 list_for_each_entry_safe(ct_entry2, ct_tmp, ct_list,
1462 list_node) {
1463 nfp_ct_do_tc_merge(zt_dst, ct_entry2, ct_entry1);
1464 }
1465 }
1466
1467 static void
1468 nfp_ct_merge_nft_with_tc(struct nfp_fl_ct_flow_entry *nft_entry,
1469 struct nfp_fl_ct_zone_entry *zt)
1470 {
1471 struct nfp_fl_ct_tc_merge *tc_merge_entry;
1472 struct rhashtable_iter iter;
1473
1474 rhashtable_walk_enter(&zt->tc_merge_tb, &iter);
1475 rhashtable_walk_start(&iter);
1476 while ((tc_merge_entry = rhashtable_walk_next(&iter)) != NULL) {
1477 if (IS_ERR(tc_merge_entry))
1478 continue;
1479 rhashtable_walk_stop(&iter);
1480 nfp_ct_do_nft_merge(zt, nft_entry, tc_merge_entry);
1481 rhashtable_walk_start(&iter);
1482 }
1483 rhashtable_walk_stop(&iter);
1484 rhashtable_walk_exit(&iter);
1485 }
1486
1487 int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
1488 struct net_device *netdev,
1489 struct flow_cls_offload *flow,
1490 struct netlink_ext_ack *extack)
1491 {
1492 struct flow_action_entry *ct_act, *ct_goto;
1493 struct nfp_fl_ct_flow_entry *ct_entry;
1494 struct nfp_fl_ct_zone_entry *zt;
1495 int err;
1496
1497 ct_act = get_flow_act(flow->rule, FLOW_ACTION_CT);
1498 if (!ct_act) {
1499 NL_SET_ERR_MSG_MOD(extack,
1500 "unsupported offload: Conntrack action empty in conntrack offload");
1501 return -EOPNOTSUPP;
1502 }
1503
1504 ct_goto = get_flow_act(flow->rule, FLOW_ACTION_GOTO);
1505 if (!ct_goto) {
1506 NL_SET_ERR_MSG_MOD(extack,
1507 "unsupported offload: Conntrack requires ACTION_GOTO");
1508 return -EOPNOTSUPP;
1509 }
1510
1511 zt = get_nfp_zone_entry(priv, ct_act->ct.zone, false);
1512 if (IS_ERR(zt)) {
1513 NL_SET_ERR_MSG_MOD(extack,
1514 "offload error: Could not create zone table entry");
1515 return PTR_ERR(zt);
1516 }
1517
1518 if (!zt->nft) {
1519 zt->nft = ct_act->ct.flow_table;
1520 err = nf_flow_table_offload_add_cb(zt->nft, nfp_fl_ct_handle_nft_flow, zt);
1521 if (err) {
1522 NL_SET_ERR_MSG_MOD(extack,
1523 "offload error: Could not register nft_callback");
1524 return err;
1525 }
1526 }
1527
1528
1529 ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
1530 if (IS_ERR(ct_entry))
1531 return PTR_ERR(ct_entry);
1532 ct_entry->type = CT_TYPE_PRE_CT;
1533 ct_entry->chain_index = ct_goto->chain_index;
1534 list_add(&ct_entry->list_node, &zt->pre_ct_list);
1535 zt->pre_ct_count++;
1536
1537 nfp_ct_merge_tc_entries(ct_entry, zt, zt);
1538
1539
1540 if (priv->ct_zone_wc)
1541 nfp_ct_merge_tc_entries(ct_entry, priv->ct_zone_wc, zt);
1542
1543 return 0;
1544 }
1545
1546 int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
1547 struct net_device *netdev,
1548 struct flow_cls_offload *flow,
1549 struct netlink_ext_ack *extack)
1550 {
1551 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
1552 struct nfp_fl_ct_flow_entry *ct_entry;
1553 struct nfp_fl_ct_zone_entry *zt;
1554 bool wildcarded = false;
1555 struct flow_match_ct ct;
1556
1557 flow_rule_match_ct(rule, &ct);
1558 if (!ct.mask->ct_zone) {
1559 wildcarded = true;
1560 } else if (ct.mask->ct_zone != U16_MAX) {
1561 NL_SET_ERR_MSG_MOD(extack,
1562 "unsupported offload: partially wildcarded ct_zone is not supported");
1563 return -EOPNOTSUPP;
1564 }
1565
1566 zt = get_nfp_zone_entry(priv, ct.key->ct_zone, wildcarded);
1567 if (IS_ERR(zt)) {
1568 NL_SET_ERR_MSG_MOD(extack,
1569 "offload error: Could not create zone table entry");
1570 return PTR_ERR(zt);
1571 }
1572
1573
1574 ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
1575 if (IS_ERR(ct_entry))
1576 return PTR_ERR(ct_entry);
1577
1578 ct_entry->type = CT_TYPE_POST_CT;
1579 ct_entry->chain_index = flow->common.chain_index;
1580 list_add(&ct_entry->list_node, &zt->post_ct_list);
1581 zt->post_ct_count++;
1582
1583 if (wildcarded) {
1584
1585
1586
1587 struct rhashtable_iter iter;
1588 struct nfp_fl_ct_zone_entry *zone_table;
1589
1590 rhashtable_walk_enter(&priv->ct_zone_table, &iter);
1591 rhashtable_walk_start(&iter);
1592 while ((zone_table = rhashtable_walk_next(&iter)) != NULL) {
1593 if (IS_ERR(zone_table))
1594 continue;
1595 rhashtable_walk_stop(&iter);
1596 nfp_ct_merge_tc_entries(ct_entry, zone_table, zone_table);
1597 rhashtable_walk_start(&iter);
1598 }
1599 rhashtable_walk_stop(&iter);
1600 rhashtable_walk_exit(&iter);
1601 } else {
1602 nfp_ct_merge_tc_entries(ct_entry, zt, zt);
1603 }
1604
1605 return 0;
1606 }
1607
1608 static void
1609 nfp_fl_ct_sub_stats(struct nfp_fl_nft_tc_merge *nft_merge,
1610 enum ct_entry_type type, u64 *m_pkts,
1611 u64 *m_bytes, u64 *m_used)
1612 {
1613 struct nfp_flower_priv *priv = nft_merge->zt->priv;
1614 struct nfp_fl_payload *nfp_flow;
1615 u32 ctx_id;
1616
1617 nfp_flow = nft_merge->flow_pay;
1618 if (!nfp_flow)
1619 return;
1620
1621 ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1622 *m_pkts += priv->stats[ctx_id].pkts;
1623 *m_bytes += priv->stats[ctx_id].bytes;
1624 *m_used = max_t(u64, *m_used, priv->stats[ctx_id].used);
1625
1626
1627
1628
1629 if (!list_empty(&nfp_flow->linked_flows))
1630 nfp_flower_update_merge_stats(priv->app, nfp_flow);
1631
1632 if (type != CT_TYPE_NFT) {
1633
1634 flow_stats_update(&nft_merge->nft_parent->stats,
1635 priv->stats[ctx_id].bytes,
1636 priv->stats[ctx_id].pkts,
1637 0, priv->stats[ctx_id].used,
1638 FLOW_ACTION_HW_STATS_DELAYED);
1639 } else {
1640
1641 flow_stats_update(&nft_merge->tc_m_parent->pre_ct_parent->stats,
1642 priv->stats[ctx_id].bytes,
1643 priv->stats[ctx_id].pkts,
1644 0, priv->stats[ctx_id].used,
1645 FLOW_ACTION_HW_STATS_DELAYED);
1646
1647 flow_stats_update(&nft_merge->tc_m_parent->post_ct_parent->stats,
1648 priv->stats[ctx_id].bytes,
1649 priv->stats[ctx_id].pkts,
1650 0, priv->stats[ctx_id].used,
1651 FLOW_ACTION_HW_STATS_DELAYED);
1652 }
1653
1654 priv->stats[ctx_id].pkts = 0;
1655 priv->stats[ctx_id].bytes = 0;
1656 }
1657
1658 int nfp_fl_ct_stats(struct flow_cls_offload *flow,
1659 struct nfp_fl_ct_map_entry *ct_map_ent)
1660 {
1661 struct nfp_fl_ct_flow_entry *ct_entry = ct_map_ent->ct_entry;
1662 struct nfp_fl_nft_tc_merge *nft_merge, *nft_m_tmp;
1663 struct nfp_fl_ct_tc_merge *tc_merge, *tc_m_tmp;
1664
1665 u64 pkts = 0, bytes = 0, used = 0;
1666 u64 m_pkts, m_bytes, m_used;
1667
1668 spin_lock_bh(&ct_entry->zt->priv->stats_lock);
1669
1670 if (ct_entry->type == CT_TYPE_PRE_CT) {
1671
1672 list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children,
1673 pre_ct_list) {
1674 m_pkts = 0;
1675 m_bytes = 0;
1676 m_used = 0;
1677
1678 list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children,
1679 tc_merge_list) {
1680 nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_PRE_CT,
1681 &m_pkts, &m_bytes, &m_used);
1682 }
1683 pkts += m_pkts;
1684 bytes += m_bytes;
1685 used = max_t(u64, used, m_used);
1686
1687 flow_stats_update(&tc_merge->post_ct_parent->stats,
1688 m_bytes, m_pkts, 0, m_used,
1689 FLOW_ACTION_HW_STATS_DELAYED);
1690 }
1691 } else if (ct_entry->type == CT_TYPE_POST_CT) {
1692
1693 list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children,
1694 post_ct_list) {
1695 m_pkts = 0;
1696 m_bytes = 0;
1697 m_used = 0;
1698
1699 list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children,
1700 tc_merge_list) {
1701 nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_POST_CT,
1702 &m_pkts, &m_bytes, &m_used);
1703 }
1704 pkts += m_pkts;
1705 bytes += m_bytes;
1706 used = max_t(u64, used, m_used);
1707
1708 flow_stats_update(&tc_merge->pre_ct_parent->stats,
1709 m_bytes, m_pkts, 0, m_used,
1710 FLOW_ACTION_HW_STATS_DELAYED);
1711 }
1712 } else {
1713
1714 list_for_each_entry_safe(nft_merge, nft_m_tmp, &ct_entry->children,
1715 nft_flow_list) {
1716 nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_NFT,
1717 &pkts, &bytes, &used);
1718 }
1719 }
1720
1721
1722
1723
1724 flow_stats_update(&ct_entry->stats, bytes, pkts, 0, used,
1725 FLOW_ACTION_HW_STATS_DELAYED);
1726
1727 flow_stats_update(&flow->stats, ct_entry->stats.bytes,
1728 ct_entry->stats.pkts, 0,
1729 ct_entry->stats.lastused,
1730 FLOW_ACTION_HW_STATS_DELAYED);
1731
1732
1733
1734 ct_entry->stats.pkts = 0;
1735 ct_entry->stats.bytes = 0;
1736 spin_unlock_bh(&ct_entry->zt->priv->stats_lock);
1737
1738 return 0;
1739 }
1740
1741 static int
1742 nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow)
1743 {
1744 struct nfp_fl_ct_map_entry *ct_map_ent;
1745 struct nfp_fl_ct_flow_entry *ct_entry;
1746 struct netlink_ext_ack *extack = NULL;
1747
1748 ASSERT_RTNL();
1749
1750 extack = flow->common.extack;
1751 switch (flow->command) {
1752 case FLOW_CLS_REPLACE:
1753
1754
1755
1756 ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
1757 nfp_ct_map_params);
1758 if (!ct_map_ent) {
1759 ct_entry = nfp_fl_ct_add_flow(zt, NULL, flow, true, extack);
1760 if (IS_ERR(ct_entry))
1761 return PTR_ERR(ct_entry);
1762 ct_entry->type = CT_TYPE_NFT;
1763 list_add(&ct_entry->list_node, &zt->nft_flows_list);
1764 zt->nft_flows_count++;
1765 nfp_ct_merge_nft_with_tc(ct_entry, zt);
1766 }
1767 return 0;
1768 case FLOW_CLS_DESTROY:
1769 ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
1770 nfp_ct_map_params);
1771 return nfp_fl_ct_del_flow(ct_map_ent);
1772 case FLOW_CLS_STATS:
1773 ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
1774 nfp_ct_map_params);
1775 if (ct_map_ent)
1776 return nfp_fl_ct_stats(flow, ct_map_ent);
1777 break;
1778 default:
1779 break;
1780 }
1781 return -EINVAL;
1782 }
1783
1784 int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb_priv)
1785 {
1786 struct flow_cls_offload *flow = type_data;
1787 struct nfp_fl_ct_zone_entry *zt = cb_priv;
1788 int err = -EOPNOTSUPP;
1789
1790 switch (type) {
1791 case TC_SETUP_CLSFLOWER:
1792 rtnl_lock();
1793 err = nfp_fl_ct_offload_nft_flow(zt, flow);
1794 rtnl_unlock();
1795 break;
1796 default:
1797 return -EOPNOTSUPP;
1798 }
1799 return err;
1800 }
1801
1802 static void
1803 nfp_fl_ct_clean_nft_entries(struct nfp_fl_ct_zone_entry *zt)
1804 {
1805 struct nfp_fl_ct_flow_entry *nft_entry, *ct_tmp;
1806 struct nfp_fl_ct_map_entry *ct_map_ent;
1807
1808 list_for_each_entry_safe(nft_entry, ct_tmp, &zt->nft_flows_list,
1809 list_node) {
1810 ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table,
1811 &nft_entry->cookie,
1812 nfp_ct_map_params);
1813 nfp_fl_ct_del_flow(ct_map_ent);
1814 }
1815 }
1816
1817 int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
1818 {
1819 struct nfp_fl_ct_flow_entry *ct_entry;
1820 struct nfp_fl_ct_zone_entry *zt;
1821 struct rhashtable *m_table;
1822
1823 if (!ct_map_ent)
1824 return -ENOENT;
1825
1826 zt = ct_map_ent->ct_entry->zt;
1827 ct_entry = ct_map_ent->ct_entry;
1828 m_table = &zt->priv->ct_map_table;
1829
1830 switch (ct_entry->type) {
1831 case CT_TYPE_PRE_CT:
1832 zt->pre_ct_count--;
1833 rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
1834 nfp_ct_map_params);
1835 nfp_fl_ct_clean_flow_entry(ct_entry);
1836 kfree(ct_map_ent);
1837
1838 if (!zt->pre_ct_count) {
1839 zt->nft = NULL;
1840 nfp_fl_ct_clean_nft_entries(zt);
1841 }
1842 break;
1843 case CT_TYPE_POST_CT:
1844 zt->post_ct_count--;
1845 rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
1846 nfp_ct_map_params);
1847 nfp_fl_ct_clean_flow_entry(ct_entry);
1848 kfree(ct_map_ent);
1849 break;
1850 case CT_TYPE_NFT:
1851 zt->nft_flows_count--;
1852 rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
1853 nfp_ct_map_params);
1854 nfp_fl_ct_clean_flow_entry(ct_map_ent->ct_entry);
1855 kfree(ct_map_ent);
1856 break;
1857 default:
1858 break;
1859 }
1860
1861 return 0;
1862 }