Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
0002 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
0003 
0004 #include <linux/skbuff.h>
0005 #include <net/devlink.h>
0006 #include <net/pkt_cls.h>
0007 
0008 #include "cmsg.h"
0009 #include "main.h"
0010 #include "conntrack.h"
0011 #include "../nfpcore/nfp_cpp.h"
0012 #include "../nfpcore/nfp_nsp.h"
0013 #include "../nfp_app.h"
0014 #include "../nfp_main.h"
0015 #include "../nfp_net.h"
0016 #include "../nfp_port.h"
0017 
0018 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
0019     (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
0020      TCPHDR_PSH | TCPHDR_URG)
0021 
0022 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
0023     (FLOW_DIS_IS_FRAGMENT | \
0024      FLOW_DIS_FIRST_FRAG)
0025 
0026 #define NFP_FLOWER_WHITELIST_DISSECTOR \
0027     (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
0028      BIT(FLOW_DISSECTOR_KEY_BASIC) | \
0029      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
0030      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
0031      BIT(FLOW_DISSECTOR_KEY_TCP) | \
0032      BIT(FLOW_DISSECTOR_KEY_PORTS) | \
0033      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
0034      BIT(FLOW_DISSECTOR_KEY_VLAN) | \
0035      BIT(FLOW_DISSECTOR_KEY_CVLAN) | \
0036      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
0037      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
0038      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
0039      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
0040      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
0041      BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
0042      BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
0043      BIT(FLOW_DISSECTOR_KEY_MPLS) | \
0044      BIT(FLOW_DISSECTOR_KEY_CT) | \
0045      BIT(FLOW_DISSECTOR_KEY_META) | \
0046      BIT(FLOW_DISSECTOR_KEY_IP))
0047 
0048 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
0049     (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
0050      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
0051      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
0052      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
0053      BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
0054      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
0055      BIT(FLOW_DISSECTOR_KEY_ENC_IP))
0056 
0057 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
0058     (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
0059      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
0060 
0061 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \
0062     (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
0063      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS))
0064 
0065 #define NFP_FLOWER_MERGE_FIELDS \
0066     (NFP_FLOWER_LAYER_PORT | \
0067      NFP_FLOWER_LAYER_MAC | \
0068      NFP_FLOWER_LAYER_TP | \
0069      NFP_FLOWER_LAYER_IPV4 | \
0070      NFP_FLOWER_LAYER_IPV6)
0071 
0072 #define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
0073     (NFP_FLOWER_LAYER_EXT_META | \
0074      NFP_FLOWER_LAYER_PORT | \
0075      NFP_FLOWER_LAYER_MAC | \
0076      NFP_FLOWER_LAYER_IPV4 | \
0077      NFP_FLOWER_LAYER_IPV6)
0078 
0079 struct nfp_flower_merge_check {
0080     union {
0081         struct {
0082             __be16 tci;
0083             struct nfp_flower_mac_mpls l2;
0084             struct nfp_flower_tp_ports l4;
0085             union {
0086                 struct nfp_flower_ipv4 ipv4;
0087                 struct nfp_flower_ipv6 ipv6;
0088             };
0089         };
0090         unsigned long vals[8];
0091     };
0092 };
0093 
0094 int
0095 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
0096              u8 mtype)
0097 {
0098     u32 meta_len, key_len, mask_len, act_len, tot_len;
0099     struct sk_buff *skb;
0100     unsigned char *msg;
0101 
0102     meta_len =  sizeof(struct nfp_fl_rule_metadata);
0103     key_len = nfp_flow->meta.key_len;
0104     mask_len = nfp_flow->meta.mask_len;
0105     act_len = nfp_flow->meta.act_len;
0106 
0107     tot_len = meta_len + key_len + mask_len + act_len;
0108 
0109     /* Convert to long words as firmware expects
0110      * lengths in units of NFP_FL_LW_SIZ.
0111      */
0112     nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
0113     nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
0114     nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
0115 
0116     skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
0117     if (!skb)
0118         return -ENOMEM;
0119 
0120     msg = nfp_flower_cmsg_get_data(skb);
0121     memcpy(msg, &nfp_flow->meta, meta_len);
0122     memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
0123     memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
0124     memcpy(&msg[meta_len + key_len + mask_len],
0125            nfp_flow->action_data, act_len);
0126 
0127     /* Convert back to bytes as software expects
0128      * lengths in units of bytes.
0129      */
0130     nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
0131     nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
0132     nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
0133 
0134     nfp_ctrl_tx(app->ctrl, skb);
0135 
0136     return 0;
0137 }
0138 
0139 static bool nfp_flower_check_higher_than_mac(struct flow_rule *rule)
0140 {
0141     return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
0142            flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
0143            flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
0144            flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
0145 }
0146 
0147 static bool nfp_flower_check_higher_than_l3(struct flow_rule *rule)
0148 {
0149     return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
0150            flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
0151 }
0152 
0153 static int
0154 nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
0155               u32 *key_layer_two, int *key_size, bool ipv6,
0156               struct netlink_ext_ack *extack)
0157 {
0158     if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY ||
0159         (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) {
0160         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
0161         return -EOPNOTSUPP;
0162     }
0163 
0164     if (enc_opts->len > 0) {
0165         *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
0166         *key_size += sizeof(struct nfp_flower_geneve_options);
0167     }
0168 
0169     return 0;
0170 }
0171 
0172 static int
0173 nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
0174                   struct flow_dissector_key_enc_opts *enc_op,
0175                   u32 *key_layer_two, u8 *key_layer, int *key_size,
0176                   struct nfp_flower_priv *priv,
0177                   enum nfp_flower_tun_type *tun_type, bool ipv6,
0178                   struct netlink_ext_ack *extack)
0179 {
0180     int err;
0181 
0182     switch (enc_ports->dst) {
0183     case htons(IANA_VXLAN_UDP_PORT):
0184         *tun_type = NFP_FL_TUNNEL_VXLAN;
0185         *key_layer |= NFP_FLOWER_LAYER_VXLAN;
0186 
0187         if (ipv6) {
0188             *key_layer |= NFP_FLOWER_LAYER_EXT_META;
0189             *key_size += sizeof(struct nfp_flower_ext_meta);
0190             *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
0191             *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
0192         } else {
0193             *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
0194         }
0195 
0196         if (enc_op) {
0197             NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
0198             return -EOPNOTSUPP;
0199         }
0200         break;
0201     case htons(GENEVE_UDP_PORT):
0202         if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
0203             NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
0204             return -EOPNOTSUPP;
0205         }
0206         *tun_type = NFP_FL_TUNNEL_GENEVE;
0207         *key_layer |= NFP_FLOWER_LAYER_EXT_META;
0208         *key_size += sizeof(struct nfp_flower_ext_meta);
0209         *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
0210 
0211         if (ipv6) {
0212             *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
0213             *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
0214         } else {
0215             *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
0216         }
0217 
0218         if (!enc_op)
0219             break;
0220         if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) {
0221             NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
0222             return -EOPNOTSUPP;
0223         }
0224         err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, key_size,
0225                         ipv6, extack);
0226         if (err)
0227             return err;
0228         break;
0229     default:
0230         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
0231         return -EOPNOTSUPP;
0232     }
0233 
0234     return 0;
0235 }
0236 
0237 int
0238 nfp_flower_calculate_key_layers(struct nfp_app *app,
0239                 struct net_device *netdev,
0240                 struct nfp_fl_key_ls *ret_key_ls,
0241                 struct flow_rule *rule,
0242                 enum nfp_flower_tun_type *tun_type,
0243                 struct netlink_ext_ack *extack)
0244 {
0245     struct flow_dissector *dissector = rule->match.dissector;
0246     struct flow_match_basic basic = { NULL, NULL};
0247     struct nfp_flower_priv *priv = app->priv;
0248     u32 key_layer_two;
0249     u8 key_layer;
0250     int key_size;
0251     int err;
0252 
0253     if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
0254         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported");
0255         return -EOPNOTSUPP;
0256     }
0257 
0258     /* If any tun dissector is used then the required set must be used. */
0259     if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
0260         (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R)
0261         != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R &&
0262         (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
0263         != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
0264         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
0265         return -EOPNOTSUPP;
0266     }
0267 
0268     key_layer_two = 0;
0269     key_layer = NFP_FLOWER_LAYER_PORT;
0270     key_size = sizeof(struct nfp_flower_meta_tci) +
0271            sizeof(struct nfp_flower_in_port);
0272 
0273     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
0274         flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
0275         key_layer |= NFP_FLOWER_LAYER_MAC;
0276         key_size += sizeof(struct nfp_flower_mac_mpls);
0277     }
0278 
0279     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
0280         struct flow_match_vlan vlan;
0281 
0282         flow_rule_match_vlan(rule, &vlan);
0283         if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
0284             vlan.key->vlan_priority) {
0285             NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
0286             return -EOPNOTSUPP;
0287         }
0288         if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ &&
0289             !(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
0290             key_layer |= NFP_FLOWER_LAYER_EXT_META;
0291             key_size += sizeof(struct nfp_flower_ext_meta);
0292             key_size += sizeof(struct nfp_flower_vlan);
0293             key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
0294         }
0295     }
0296 
0297     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
0298         struct flow_match_vlan cvlan;
0299 
0300         if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
0301             NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN QinQ offload");
0302             return -EOPNOTSUPP;
0303         }
0304 
0305         flow_rule_match_vlan(rule, &cvlan);
0306         if (!(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
0307             key_layer |= NFP_FLOWER_LAYER_EXT_META;
0308             key_size += sizeof(struct nfp_flower_ext_meta);
0309             key_size += sizeof(struct nfp_flower_vlan);
0310             key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
0311         }
0312     }
0313 
0314     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
0315         struct flow_match_enc_opts enc_op = { NULL, NULL };
0316         struct flow_match_ipv4_addrs ipv4_addrs;
0317         struct flow_match_ipv6_addrs ipv6_addrs;
0318         struct flow_match_control enc_ctl;
0319         struct flow_match_ports enc_ports;
0320         bool ipv6_tun = false;
0321 
0322         flow_rule_match_enc_control(rule, &enc_ctl);
0323 
0324         if (enc_ctl.mask->addr_type != 0xffff) {
0325             NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
0326             return -EOPNOTSUPP;
0327         }
0328 
0329         ipv6_tun = enc_ctl.key->addr_type ==
0330                 FLOW_DISSECTOR_KEY_IPV6_ADDRS;
0331         if (ipv6_tun &&
0332             !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) {
0333             NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels");
0334             return -EOPNOTSUPP;
0335         }
0336 
0337         if (!ipv6_tun &&
0338             enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
0339             NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6");
0340             return -EOPNOTSUPP;
0341         }
0342 
0343         if (ipv6_tun) {
0344             flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs);
0345             if (memchr_inv(&ipv6_addrs.mask->dst, 0xff,
0346                        sizeof(ipv6_addrs.mask->dst))) {
0347                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported");
0348                 return -EOPNOTSUPP;
0349             }
0350         } else {
0351             flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
0352             if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
0353                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
0354                 return -EOPNOTSUPP;
0355             }
0356         }
0357 
0358         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
0359             flow_rule_match_enc_opts(rule, &enc_op);
0360 
0361         if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
0362             /* Check if GRE, which has no enc_ports */
0363             if (!netif_is_gretap(netdev) && !netif_is_ip6gretap(netdev)) {
0364                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
0365                 return -EOPNOTSUPP;
0366             }
0367 
0368             *tun_type = NFP_FL_TUNNEL_GRE;
0369             key_layer |= NFP_FLOWER_LAYER_EXT_META;
0370             key_size += sizeof(struct nfp_flower_ext_meta);
0371             key_layer_two |= NFP_FLOWER_LAYER2_GRE;
0372 
0373             if (ipv6_tun) {
0374                 key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
0375                 key_size +=
0376                     sizeof(struct nfp_flower_ipv6_udp_tun);
0377             } else {
0378                 key_size +=
0379                     sizeof(struct nfp_flower_ipv4_udp_tun);
0380             }
0381 
0382             if (enc_op.key) {
0383                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
0384                 return -EOPNOTSUPP;
0385             }
0386         } else {
0387             flow_rule_match_enc_ports(rule, &enc_ports);
0388             if (enc_ports.mask->dst != cpu_to_be16(~0)) {
0389                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
0390                 return -EOPNOTSUPP;
0391             }
0392 
0393             err = nfp_flower_calc_udp_tun_layer(enc_ports.key,
0394                                 enc_op.key,
0395                                 &key_layer_two,
0396                                 &key_layer,
0397                                 &key_size, priv,
0398                                 tun_type, ipv6_tun,
0399                                 extack);
0400             if (err)
0401                 return err;
0402 
0403             /* Ensure the ingress netdev matches the expected
0404              * tun type.
0405              */
0406             if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
0407                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
0408                 return -EOPNOTSUPP;
0409             }
0410         }
0411     }
0412 
0413     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
0414         flow_rule_match_basic(rule, &basic);
0415 
0416     if (basic.mask && basic.mask->n_proto) {
0417         /* Ethernet type is present in the key. */
0418         switch (basic.key->n_proto) {
0419         case cpu_to_be16(ETH_P_IP):
0420             key_layer |= NFP_FLOWER_LAYER_IPV4;
0421             key_size += sizeof(struct nfp_flower_ipv4);
0422             break;
0423 
0424         case cpu_to_be16(ETH_P_IPV6):
0425             key_layer |= NFP_FLOWER_LAYER_IPV6;
0426             key_size += sizeof(struct nfp_flower_ipv6);
0427             break;
0428 
0429         /* Currently we do not offload ARP
0430          * because we rely on it to get to the host.
0431          */
0432         case cpu_to_be16(ETH_P_ARP):
0433             NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported");
0434             return -EOPNOTSUPP;
0435 
0436         case cpu_to_be16(ETH_P_MPLS_UC):
0437         case cpu_to_be16(ETH_P_MPLS_MC):
0438             if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
0439                 key_layer |= NFP_FLOWER_LAYER_MAC;
0440                 key_size += sizeof(struct nfp_flower_mac_mpls);
0441             }
0442             break;
0443 
0444         /* Will be included in layer 2. */
0445         case cpu_to_be16(ETH_P_8021Q):
0446             break;
0447 
0448         default:
0449             NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
0450             return -EOPNOTSUPP;
0451         }
0452     } else if (nfp_flower_check_higher_than_mac(rule)) {
0453         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
0454         return -EOPNOTSUPP;
0455     }
0456 
0457     if (basic.mask && basic.mask->ip_proto) {
0458         switch (basic.key->ip_proto) {
0459         case IPPROTO_TCP:
0460         case IPPROTO_UDP:
0461         case IPPROTO_SCTP:
0462         case IPPROTO_ICMP:
0463         case IPPROTO_ICMPV6:
0464             key_layer |= NFP_FLOWER_LAYER_TP;
0465             key_size += sizeof(struct nfp_flower_tp_ports);
0466             break;
0467         }
0468     }
0469 
0470     if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
0471         nfp_flower_check_higher_than_l3(rule)) {
0472         NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
0473         return -EOPNOTSUPP;
0474     }
0475 
0476     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
0477         struct flow_match_tcp tcp;
0478         u32 tcp_flags;
0479 
0480         flow_rule_match_tcp(rule, &tcp);
0481         tcp_flags = be16_to_cpu(tcp.key->flags);
0482 
0483         if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
0484             NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags");
0485             return -EOPNOTSUPP;
0486         }
0487 
0488         /* We only support PSH and URG flags when either
0489          * FIN, SYN or RST is present as well.
0490          */
0491         if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
0492             !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
0493             NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
0494             return -EOPNOTSUPP;
0495         }
0496 
0497         /* We need to store TCP flags in the either the IPv4 or IPv6 key
0498          * space, thus we need to ensure we include a IPv4/IPv6 key
0499          * layer if we have not done so already.
0500          */
0501         if (!basic.key) {
0502             NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol");
0503             return -EOPNOTSUPP;
0504         }
0505 
0506         if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
0507             !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
0508             switch (basic.key->n_proto) {
0509             case cpu_to_be16(ETH_P_IP):
0510                 key_layer |= NFP_FLOWER_LAYER_IPV4;
0511                 key_size += sizeof(struct nfp_flower_ipv4);
0512                 break;
0513 
0514             case cpu_to_be16(ETH_P_IPV6):
0515                     key_layer |= NFP_FLOWER_LAYER_IPV6;
0516                 key_size += sizeof(struct nfp_flower_ipv6);
0517                 break;
0518 
0519             default:
0520                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
0521                 return -EOPNOTSUPP;
0522             }
0523         }
0524     }
0525 
0526     if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
0527         struct flow_match_control ctl;
0528 
0529         flow_rule_match_control(rule, &ctl);
0530         if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
0531             NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
0532             return -EOPNOTSUPP;
0533         }
0534     }
0535 
0536     ret_key_ls->key_layer = key_layer;
0537     ret_key_ls->key_layer_two = key_layer_two;
0538     ret_key_ls->key_size = key_size;
0539 
0540     return 0;
0541 }
0542 
0543 struct nfp_fl_payload *
0544 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
0545 {
0546     struct nfp_fl_payload *flow_pay;
0547 
0548     flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
0549     if (!flow_pay)
0550         return NULL;
0551 
0552     flow_pay->meta.key_len = key_layer->key_size;
0553     flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
0554     if (!flow_pay->unmasked_data)
0555         goto err_free_flow;
0556 
0557     flow_pay->meta.mask_len = key_layer->key_size;
0558     flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
0559     if (!flow_pay->mask_data)
0560         goto err_free_unmasked;
0561 
0562     flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
0563     if (!flow_pay->action_data)
0564         goto err_free_mask;
0565 
0566     flow_pay->nfp_tun_ipv4_addr = 0;
0567     flow_pay->nfp_tun_ipv6 = NULL;
0568     flow_pay->meta.flags = 0;
0569     INIT_LIST_HEAD(&flow_pay->linked_flows);
0570     flow_pay->in_hw = false;
0571     flow_pay->pre_tun_rule.dev = NULL;
0572 
0573     return flow_pay;
0574 
0575 err_free_mask:
0576     kfree(flow_pay->mask_data);
0577 err_free_unmasked:
0578     kfree(flow_pay->unmasked_data);
0579 err_free_flow:
0580     kfree(flow_pay);
0581     return NULL;
0582 }
0583 
0584 static int
0585 nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
0586                      struct nfp_flower_merge_check *merge,
0587                      u8 *last_act_id, int *act_out)
0588 {
0589     struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
0590     struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
0591     struct nfp_fl_set_ip4_addrs *ipv4_add;
0592     struct nfp_fl_set_ipv6_addr *ipv6_add;
0593     struct nfp_fl_push_vlan *push_vlan;
0594     struct nfp_fl_pre_tunnel *pre_tun;
0595     struct nfp_fl_set_tport *tport;
0596     struct nfp_fl_set_eth *eth;
0597     struct nfp_fl_act_head *a;
0598     unsigned int act_off = 0;
0599     bool ipv6_tun = false;
0600     u8 act_id = 0;
0601     u8 *ports;
0602     int i;
0603 
0604     while (act_off < flow->meta.act_len) {
0605         a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
0606         act_id = a->jump_id;
0607 
0608         switch (act_id) {
0609         case NFP_FL_ACTION_OPCODE_OUTPUT:
0610             if (act_out)
0611                 (*act_out)++;
0612             break;
0613         case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
0614             push_vlan = (struct nfp_fl_push_vlan *)a;
0615             if (push_vlan->vlan_tci)
0616                 merge->tci = cpu_to_be16(0xffff);
0617             break;
0618         case NFP_FL_ACTION_OPCODE_POP_VLAN:
0619             merge->tci = cpu_to_be16(0);
0620             break;
0621         case NFP_FL_ACTION_OPCODE_SET_TUNNEL:
0622             /* New tunnel header means l2 to l4 can be matched. */
0623             eth_broadcast_addr(&merge->l2.mac_dst[0]);
0624             eth_broadcast_addr(&merge->l2.mac_src[0]);
0625             memset(&merge->l4, 0xff,
0626                    sizeof(struct nfp_flower_tp_ports));
0627             if (ipv6_tun)
0628                 memset(&merge->ipv6, 0xff,
0629                        sizeof(struct nfp_flower_ipv6));
0630             else
0631                 memset(&merge->ipv4, 0xff,
0632                        sizeof(struct nfp_flower_ipv4));
0633             break;
0634         case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
0635             eth = (struct nfp_fl_set_eth *)a;
0636             for (i = 0; i < ETH_ALEN; i++)
0637                 merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
0638             for (i = 0; i < ETH_ALEN; i++)
0639                 merge->l2.mac_src[i] |=
0640                     eth->eth_addr_mask[ETH_ALEN + i];
0641             break;
0642         case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
0643             ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
0644             merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
0645             merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
0646             break;
0647         case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
0648             ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
0649             merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
0650             merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
0651             break;
0652         case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
0653             ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
0654             for (i = 0; i < 4; i++)
0655                 merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
0656                     ipv6_add->ipv6[i].mask;
0657             break;
0658         case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
0659             ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
0660             for (i = 0; i < 4; i++)
0661                 merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
0662                     ipv6_add->ipv6[i].mask;
0663             break;
0664         case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
0665             ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
0666             merge->ipv6.ip_ext.ttl |=
0667                 ipv6_tc_hl_fl->ipv6_hop_limit_mask;
0668             merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
0669             merge->ipv6.ipv6_flow_label_exthdr |=
0670                 ipv6_tc_hl_fl->ipv6_label_mask;
0671             break;
0672         case NFP_FL_ACTION_OPCODE_SET_UDP:
0673         case NFP_FL_ACTION_OPCODE_SET_TCP:
0674             tport = (struct nfp_fl_set_tport *)a;
0675             ports = (u8 *)&merge->l4.port_src;
0676             for (i = 0; i < 4; i++)
0677                 ports[i] |= tport->tp_port_mask[i];
0678             break;
0679         case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
0680             pre_tun = (struct nfp_fl_pre_tunnel *)a;
0681             ipv6_tun = be16_to_cpu(pre_tun->flags) &
0682                     NFP_FL_PRE_TUN_IPV6;
0683             break;
0684         case NFP_FL_ACTION_OPCODE_PRE_LAG:
0685         case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
0686             break;
0687         default:
0688             return -EOPNOTSUPP;
0689         }
0690 
0691         act_off += a->len_lw << NFP_FL_LW_SIZ;
0692     }
0693 
0694     if (last_act_id)
0695         *last_act_id = act_id;
0696 
0697     return 0;
0698 }
0699 
0700 static int
0701 nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
0702                 struct nfp_flower_merge_check *merge,
0703                 bool extra_fields)
0704 {
0705     struct nfp_flower_meta_tci *meta_tci;
0706     u8 *mask = flow->mask_data;
0707     u8 key_layer, match_size;
0708 
0709     memset(merge, 0, sizeof(struct nfp_flower_merge_check));
0710 
0711     meta_tci = (struct nfp_flower_meta_tci *)mask;
0712     key_layer = meta_tci->nfp_flow_key_layer;
0713 
0714     if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
0715         return -EOPNOTSUPP;
0716 
0717     merge->tci = meta_tci->tci;
0718     mask += sizeof(struct nfp_flower_meta_tci);
0719 
0720     if (key_layer & NFP_FLOWER_LAYER_EXT_META)
0721         mask += sizeof(struct nfp_flower_ext_meta);
0722 
0723     mask += sizeof(struct nfp_flower_in_port);
0724 
0725     if (key_layer & NFP_FLOWER_LAYER_MAC) {
0726         match_size = sizeof(struct nfp_flower_mac_mpls);
0727         memcpy(&merge->l2, mask, match_size);
0728         mask += match_size;
0729     }
0730 
0731     if (key_layer & NFP_FLOWER_LAYER_TP) {
0732         match_size = sizeof(struct nfp_flower_tp_ports);
0733         memcpy(&merge->l4, mask, match_size);
0734         mask += match_size;
0735     }
0736 
0737     if (key_layer & NFP_FLOWER_LAYER_IPV4) {
0738         match_size = sizeof(struct nfp_flower_ipv4);
0739         memcpy(&merge->ipv4, mask, match_size);
0740     }
0741 
0742     if (key_layer & NFP_FLOWER_LAYER_IPV6) {
0743         match_size = sizeof(struct nfp_flower_ipv6);
0744         memcpy(&merge->ipv6, mask, match_size);
0745     }
0746 
0747     return 0;
0748 }
0749 
0750 static int
0751 nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
0752              struct nfp_fl_payload *sub_flow2)
0753 {
0754     /* Two flows can be merged if sub_flow2 only matches on bits that are
0755      * either matched by sub_flow1 or set by a sub_flow1 action. This
0756      * ensures that every packet that hits sub_flow1 and recirculates is
0757      * guaranteed to hit sub_flow2.
0758      */
0759     struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
0760     int err, act_out = 0;
0761     u8 last_act_id = 0;
0762 
0763     err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
0764                           true);
0765     if (err)
0766         return err;
0767 
0768     err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
0769                           false);
0770     if (err)
0771         return err;
0772 
0773     err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
0774                            &last_act_id, &act_out);
0775     if (err)
0776         return err;
0777 
0778     /* Must only be 1 output action and it must be the last in sequence. */
0779     if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
0780         return -EOPNOTSUPP;
0781 
0782     /* Reject merge if sub_flow2 matches on something that is not matched
0783      * on or set in an action by sub_flow1.
0784      */
0785     err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
0786                 sub_flow1_merge.vals,
0787                 sizeof(struct nfp_flower_merge_check) * 8);
0788     if (err)
0789         return -EINVAL;
0790 
0791     return 0;
0792 }
0793 
0794 static unsigned int
0795 nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
0796                 bool *tunnel_act)
0797 {
0798     unsigned int act_off = 0, act_len;
0799     struct nfp_fl_act_head *a;
0800     u8 act_id = 0;
0801 
0802     while (act_off < len) {
0803         a = (struct nfp_fl_act_head *)&act_src[act_off];
0804         act_len = a->len_lw << NFP_FL_LW_SIZ;
0805         act_id = a->jump_id;
0806 
0807         switch (act_id) {
0808         case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
0809             if (tunnel_act)
0810                 *tunnel_act = true;
0811             fallthrough;
0812         case NFP_FL_ACTION_OPCODE_PRE_LAG:
0813             memcpy(act_dst + act_off, act_src + act_off, act_len);
0814             break;
0815         default:
0816             return act_off;
0817         }
0818 
0819         act_off += act_len;
0820     }
0821 
0822     return act_off;
0823 }
0824 
0825 static int
0826 nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
0827 {
0828     struct nfp_fl_act_head *a;
0829     unsigned int act_off = 0;
0830 
0831     while (act_off < len) {
0832         a = (struct nfp_fl_act_head *)&acts[act_off];
0833 
0834         if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off)
0835             *vlan = (struct nfp_fl_push_vlan *)a;
0836         else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
0837             return -EOPNOTSUPP;
0838 
0839         act_off += a->len_lw << NFP_FL_LW_SIZ;
0840     }
0841 
0842     /* Ensure any VLAN push also has an egress action. */
0843     if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan))
0844         return -EOPNOTSUPP;
0845 
0846     return 0;
0847 }
0848 
0849 static int
0850 nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
0851 {
0852     struct nfp_fl_set_tun *tun;
0853     struct nfp_fl_act_head *a;
0854     unsigned int act_off = 0;
0855 
0856     while (act_off < len) {
0857         a = (struct nfp_fl_act_head *)&acts[act_off];
0858 
0859         if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) {
0860             tun = (struct nfp_fl_set_tun *)a;
0861             tun->outer_vlan_tpid = vlan->vlan_tpid;
0862             tun->outer_vlan_tci = vlan->vlan_tci;
0863 
0864             return 0;
0865         }
0866 
0867         act_off += a->len_lw << NFP_FL_LW_SIZ;
0868     }
0869 
0870     /* Return error if no tunnel action is found. */
0871     return -EOPNOTSUPP;
0872 }
0873 
0874 static int
0875 nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
0876             struct nfp_fl_payload *sub_flow2,
0877             struct nfp_fl_payload *merge_flow)
0878 {
0879     unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
0880     struct nfp_fl_push_vlan *post_tun_push_vlan = NULL;
0881     bool tunnel_act = false;
0882     char *merge_act;
0883     int err;
0884 
0885     /* The last action of sub_flow1 must be output - do not merge this. */
0886     sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
0887     sub2_act_len = sub_flow2->meta.act_len;
0888 
0889     if (!sub2_act_len)
0890         return -EINVAL;
0891 
0892     if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
0893         return -EINVAL;
0894 
0895     /* A shortcut can only be applied if there is a single action. */
0896     if (sub1_act_len)
0897         merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
0898     else
0899         merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
0900 
0901     merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
0902     merge_act = merge_flow->action_data;
0903 
0904     /* Copy any pre-actions to the start of merge flow action list. */
0905     pre_off1 = nfp_flower_copy_pre_actions(merge_act,
0906                            sub_flow1->action_data,
0907                            sub1_act_len, &tunnel_act);
0908     merge_act += pre_off1;
0909     sub1_act_len -= pre_off1;
0910     pre_off2 = nfp_flower_copy_pre_actions(merge_act,
0911                            sub_flow2->action_data,
0912                            sub2_act_len, NULL);
0913     merge_act += pre_off2;
0914     sub2_act_len -= pre_off2;
0915 
0916     /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
0917      * a tunnel, there are restrictions on what sub_flow 2 actions lead to a
0918      * valid merge.
0919      */
0920     if (tunnel_act) {
0921         char *post_tun_acts = &sub_flow2->action_data[pre_off2];
0922 
0923         err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len,
0924                           &post_tun_push_vlan);
0925         if (err)
0926             return err;
0927 
0928         if (post_tun_push_vlan) {
0929             pre_off2 += sizeof(*post_tun_push_vlan);
0930             sub2_act_len -= sizeof(*post_tun_push_vlan);
0931         }
0932     }
0933 
0934     /* Copy remaining actions from sub_flows 1 and 2. */
0935     memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
0936 
0937     if (post_tun_push_vlan) {
0938         /* Update tunnel action in merge to include VLAN push. */
0939         err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len,
0940                          post_tun_push_vlan);
0941         if (err)
0942             return err;
0943 
0944         merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan);
0945     }
0946 
0947     merge_act += sub1_act_len;
0948     memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
0949 
0950     return 0;
0951 }
0952 
0953 /* Flow link code should only be accessed under RTNL. */
0954 static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
0955 {
0956     list_del(&link->merge_flow.list);
0957     list_del(&link->sub_flow.list);
0958     kfree(link);
0959 }
0960 
0961 static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
0962                     struct nfp_fl_payload *sub_flow)
0963 {
0964     struct nfp_fl_payload_link *link;
0965 
0966     list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
0967         if (link->sub_flow.flow == sub_flow) {
0968             nfp_flower_unlink_flow(link);
0969             return;
0970         }
0971 }
0972 
0973 static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
0974                  struct nfp_fl_payload *sub_flow)
0975 {
0976     struct nfp_fl_payload_link *link;
0977 
0978     link = kmalloc(sizeof(*link), GFP_KERNEL);
0979     if (!link)
0980         return -ENOMEM;
0981 
0982     link->merge_flow.flow = merge_flow;
0983     list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
0984     link->sub_flow.flow = sub_flow;
0985     list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
0986 
0987     return 0;
0988 }
0989 
0990 /**
0991  * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
0992  * @app:    Pointer to the APP handle
0993  * @sub_flow1:  Initial flow matched to produce merge hint
0994  * @sub_flow2:  Post recirculation flow matched in merge hint
0995  *
0996  * Combines 2 flows (if valid) to a single flow, removing the initial from hw
0997  * and offloading the new, merged flow.
0998  *
0999  * Return: negative value on error, 0 in success.
1000  */
1001 int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
1002                      struct nfp_fl_payload *sub_flow1,
1003                      struct nfp_fl_payload *sub_flow2)
1004 {
1005     struct nfp_flower_priv *priv = app->priv;
1006     struct nfp_fl_payload *merge_flow;
1007     struct nfp_fl_key_ls merge_key_ls;
1008     struct nfp_merge_info *merge_info;
1009     u64 parent_ctx = 0;
1010     int err;
1011 
1012     ASSERT_RTNL();
1013 
1014     if (sub_flow1 == sub_flow2 ||
1015         nfp_flower_is_merge_flow(sub_flow1) ||
1016         nfp_flower_is_merge_flow(sub_flow2))
1017         return -EINVAL;
1018 
1019     /* Check if the two flows are already merged */
1020     parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
1021     parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
1022     if (rhashtable_lookup_fast(&priv->merge_table,
1023                    &parent_ctx, merge_table_params)) {
1024         nfp_flower_cmsg_warn(app, "The two flows are already merged.\n");
1025         return 0;
1026     }
1027 
1028     err = nfp_flower_can_merge(sub_flow1, sub_flow2);
1029     if (err)
1030         return err;
1031 
1032     merge_key_ls.key_size = sub_flow1->meta.key_len;
1033 
1034     merge_flow = nfp_flower_allocate_new(&merge_key_ls);
1035     if (!merge_flow)
1036         return -ENOMEM;
1037 
1038     merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
1039     merge_flow->ingress_dev = sub_flow1->ingress_dev;
1040 
1041     memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
1042            sub_flow1->meta.key_len);
1043     memcpy(merge_flow->mask_data, sub_flow1->mask_data,
1044            sub_flow1->meta.mask_len);
1045 
1046     err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
1047     if (err)
1048         goto err_destroy_merge_flow;
1049 
1050     err = nfp_flower_link_flows(merge_flow, sub_flow1);
1051     if (err)
1052         goto err_destroy_merge_flow;
1053 
1054     err = nfp_flower_link_flows(merge_flow, sub_flow2);
1055     if (err)
1056         goto err_unlink_sub_flow1;
1057 
1058     err = nfp_compile_flow_metadata(app, merge_flow->tc_flower_cookie, merge_flow,
1059                     merge_flow->ingress_dev, NULL);
1060     if (err)
1061         goto err_unlink_sub_flow2;
1062 
1063     err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
1064                      nfp_flower_table_params);
1065     if (err)
1066         goto err_release_metadata;
1067 
1068     merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL);
1069     if (!merge_info) {
1070         err = -ENOMEM;
1071         goto err_remove_rhash;
1072     }
1073     merge_info->parent_ctx = parent_ctx;
1074     err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node,
1075                      merge_table_params);
1076     if (err)
1077         goto err_destroy_merge_info;
1078 
1079     err = nfp_flower_xmit_flow(app, merge_flow,
1080                    NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1081     if (err)
1082         goto err_remove_merge_info;
1083 
1084     merge_flow->in_hw = true;
1085     sub_flow1->in_hw = false;
1086 
1087     return 0;
1088 
1089 err_remove_merge_info:
1090     WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
1091                         &merge_info->ht_node,
1092                         merge_table_params));
1093 err_destroy_merge_info:
1094     kfree(merge_info);
1095 err_remove_rhash:
1096     WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1097                         &merge_flow->fl_node,
1098                         nfp_flower_table_params));
1099 err_release_metadata:
1100     nfp_modify_flow_metadata(app, merge_flow);
1101 err_unlink_sub_flow2:
1102     nfp_flower_unlink_flows(merge_flow, sub_flow2);
1103 err_unlink_sub_flow1:
1104     nfp_flower_unlink_flows(merge_flow, sub_flow1);
1105 err_destroy_merge_flow:
1106     kfree(merge_flow->action_data);
1107     kfree(merge_flow->mask_data);
1108     kfree(merge_flow->unmasked_data);
1109     kfree(merge_flow);
1110     return err;
1111 }
1112 
1113 /**
1114  * nfp_flower_validate_pre_tun_rule()
1115  * @app:    Pointer to the APP handle
1116  * @flow:   Pointer to NFP flow representation of rule
1117  * @key_ls: Pointer to NFP key layers structure
1118  * @extack: Netlink extended ACK report
1119  *
1120  * Verifies the flow as a pre-tunnel rule.
1121  *
1122  * Return: negative value on error, 0 if verified.
1123  */
1124 static int
1125 nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
1126                  struct nfp_fl_payload *flow,
1127                  struct nfp_fl_key_ls *key_ls,
1128                  struct netlink_ext_ack *extack)
1129 {
1130     struct nfp_flower_priv *priv = app->priv;
1131     struct nfp_flower_meta_tci *meta_tci;
1132     struct nfp_flower_mac_mpls *mac;
1133     u8 *ext = flow->unmasked_data;
1134     struct nfp_fl_act_head *act;
1135     u8 *mask = flow->mask_data;
1136     bool vlan = false;
1137     int act_offset;
1138     u8 key_layer;
1139 
1140     meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
1141     key_layer = key_ls->key_layer;
1142     if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
1143         if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
1144             u16 vlan_tci = be16_to_cpu(meta_tci->tci);
1145 
1146             vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
1147             flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
1148             vlan = true;
1149         } else {
1150             flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
1151         }
1152     }
1153 
1154     if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
1155         NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
1156         return -EOPNOTSUPP;
1157     } else if (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) {
1158         NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended match fields");
1159         return -EOPNOTSUPP;
1160     }
1161 
1162     if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
1163         NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required");
1164         return -EOPNOTSUPP;
1165     }
1166 
1167     if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
1168         !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
1169         NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present");
1170         return -EOPNOTSUPP;
1171     }
1172 
1173     if (key_layer & NFP_FLOWER_LAYER_IPV6)
1174         flow->pre_tun_rule.is_ipv6 = true;
1175     else
1176         flow->pre_tun_rule.is_ipv6 = false;
1177 
1178     /* Skip fields known to exist. */
1179     mask += sizeof(struct nfp_flower_meta_tci);
1180     ext += sizeof(struct nfp_flower_meta_tci);
1181     if (key_ls->key_layer_two) {
1182         mask += sizeof(struct nfp_flower_ext_meta);
1183         ext += sizeof(struct nfp_flower_ext_meta);
1184     }
1185     mask += sizeof(struct nfp_flower_in_port);
1186     ext += sizeof(struct nfp_flower_in_port);
1187 
1188     /* Ensure destination MAC address is fully matched. */
1189     mac = (struct nfp_flower_mac_mpls *)mask;
1190     if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
1191         NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked");
1192         return -EOPNOTSUPP;
1193     }
1194 
1195     /* Ensure source MAC address is fully matched. This is only needed
1196      * for firmware with the DECAP_V2 feature enabled. Don't do this
1197      * for firmware without this feature to keep old behaviour.
1198      */
1199     if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) {
1200         mac = (struct nfp_flower_mac_mpls *)mask;
1201         if (!is_broadcast_ether_addr(&mac->mac_src[0])) {
1202             NL_SET_ERR_MSG_MOD(extack,
1203                        "unsupported pre-tunnel rule: source MAC field must not be masked");
1204             return -EOPNOTSUPP;
1205         }
1206     }
1207 
1208     if (mac->mpls_lse) {
1209         NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported");
1210         return -EOPNOTSUPP;
1211     }
1212 
1213     /* Ensure destination MAC address matches pre_tun_dev. */
1214     mac = (struct nfp_flower_mac_mpls *)ext;
1215     if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) {
1216         NL_SET_ERR_MSG_MOD(extack,
1217                    "unsupported pre-tunnel rule: dest MAC must match output dev MAC");
1218         return -EOPNOTSUPP;
1219     }
1220 
1221     /* Save mac addresses in pre_tun_rule entry for later use */
1222     memcpy(&flow->pre_tun_rule.loc_mac, &mac->mac_dst[0], ETH_ALEN);
1223     memcpy(&flow->pre_tun_rule.rem_mac, &mac->mac_src[0], ETH_ALEN);
1224 
1225     mask += sizeof(struct nfp_flower_mac_mpls);
1226     ext += sizeof(struct nfp_flower_mac_mpls);
1227     if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
1228         key_layer & NFP_FLOWER_LAYER_IPV6) {
1229         /* Flags and proto fields have same offset in IPv4 and IPv6. */
1230         int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
1231         int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
1232         int size;
1233         int i;
1234 
1235         size = key_layer & NFP_FLOWER_LAYER_IPV4 ?
1236             sizeof(struct nfp_flower_ipv4) :
1237             sizeof(struct nfp_flower_ipv6);
1238 
1239 
1240         /* Ensure proto and flags are the only IP layer fields. */
1241         for (i = 0; i < size; i++)
1242             if (mask[i] && i != ip_flags && i != ip_proto) {
1243                 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
1244                 return -EOPNOTSUPP;
1245             }
1246         ext += size;
1247         mask += size;
1248     }
1249 
1250     if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
1251         if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
1252             struct nfp_flower_vlan *vlan_tags;
1253             u16 vlan_tpid;
1254             u16 vlan_tci;
1255 
1256             vlan_tags = (struct nfp_flower_vlan *)ext;
1257 
1258             vlan_tci = be16_to_cpu(vlan_tags->outer_tci);
1259             vlan_tpid = be16_to_cpu(vlan_tags->outer_tpid);
1260 
1261             vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
1262             flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
1263             flow->pre_tun_rule.vlan_tpid = cpu_to_be16(vlan_tpid);
1264             vlan = true;
1265         } else {
1266             flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
1267             flow->pre_tun_rule.vlan_tpid = cpu_to_be16(0xffff);
1268         }
1269     }
1270 
1271     /* Action must be a single egress or pop_vlan and egress. */
1272     act_offset = 0;
1273     act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
1274     if (vlan) {
1275         if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) {
1276             NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action");
1277             return -EOPNOTSUPP;
1278         }
1279 
1280         act_offset += act->len_lw << NFP_FL_LW_SIZ;
1281         act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
1282     }
1283 
1284     if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) {
1285         NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected");
1286         return -EOPNOTSUPP;
1287     }
1288 
1289     act_offset += act->len_lw << NFP_FL_LW_SIZ;
1290 
1291     /* Ensure there are no more actions after egress. */
1292     if (act_offset != flow->meta.act_len) {
1293         NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action");
1294         return -EOPNOTSUPP;
1295     }
1296 
1297     return 0;
1298 }
1299 
1300 static bool offload_pre_check(struct flow_cls_offload *flow)
1301 {
1302     struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
1303     struct flow_dissector *dissector = rule->match.dissector;
1304 
1305     if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))
1306         return false;
1307 
1308     if (flow->common.chain_index)
1309         return false;
1310 
1311     return true;
1312 }
1313 
1314 /**
1315  * nfp_flower_add_offload() - Adds a new flow to hardware.
1316  * @app:    Pointer to the APP handle
1317  * @netdev: netdev structure.
1318  * @flow:   TC flower classifier offload structure.
1319  *
1320  * Adds a new flow to the repeated hash structure and action payload.
1321  *
1322  * Return: negative value on error, 0 if configured successfully.
1323  */
1324 static int
1325 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
1326                struct flow_cls_offload *flow)
1327 {
1328     struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
1329     enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
1330     struct nfp_flower_priv *priv = app->priv;
1331     struct netlink_ext_ack *extack = NULL;
1332     struct nfp_fl_payload *flow_pay;
1333     struct nfp_fl_key_ls *key_layer;
1334     struct nfp_port *port = NULL;
1335     int err;
1336 
1337     extack = flow->common.extack;
1338     if (nfp_netdev_is_nfp_repr(netdev))
1339         port = nfp_port_from_netdev(netdev);
1340 
1341     if (is_pre_ct_flow(flow))
1342         return nfp_fl_ct_handle_pre_ct(priv, netdev, flow, extack);
1343 
1344     if (is_post_ct_flow(flow))
1345         return nfp_fl_ct_handle_post_ct(priv, netdev, flow, extack);
1346 
1347     if (!offload_pre_check(flow))
1348         return -EOPNOTSUPP;
1349 
1350     key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
1351     if (!key_layer)
1352         return -ENOMEM;
1353 
1354     err = nfp_flower_calculate_key_layers(app, netdev, key_layer, rule,
1355                           &tun_type, extack);
1356     if (err)
1357         goto err_free_key_ls;
1358 
1359     flow_pay = nfp_flower_allocate_new(key_layer);
1360     if (!flow_pay) {
1361         err = -ENOMEM;
1362         goto err_free_key_ls;
1363     }
1364 
1365     err = nfp_flower_compile_flow_match(app, rule, key_layer, netdev,
1366                         flow_pay, tun_type, extack);
1367     if (err)
1368         goto err_destroy_flow;
1369 
1370     err = nfp_flower_compile_action(app, rule, netdev, flow_pay, extack);
1371     if (err)
1372         goto err_destroy_flow;
1373 
1374     if (flow_pay->pre_tun_rule.dev) {
1375         err = nfp_flower_validate_pre_tun_rule(app, flow_pay, key_layer, extack);
1376         if (err)
1377             goto err_destroy_flow;
1378     }
1379 
1380     err = nfp_compile_flow_metadata(app, flow->cookie, flow_pay, netdev, extack);
1381     if (err)
1382         goto err_destroy_flow;
1383 
1384     flow_pay->tc_flower_cookie = flow->cookie;
1385     err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
1386                      nfp_flower_table_params);
1387     if (err) {
1388         NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads");
1389         goto err_release_metadata;
1390     }
1391 
1392     if (flow_pay->pre_tun_rule.dev) {
1393         if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) {
1394             struct nfp_predt_entry *predt;
1395 
1396             predt = kzalloc(sizeof(*predt), GFP_KERNEL);
1397             if (!predt) {
1398                 err = -ENOMEM;
1399                 goto err_remove_rhash;
1400             }
1401             predt->flow_pay = flow_pay;
1402             INIT_LIST_HEAD(&predt->nn_list);
1403             spin_lock_bh(&priv->predt_lock);
1404             list_add(&predt->list_head, &priv->predt_list);
1405             flow_pay->pre_tun_rule.predt = predt;
1406             nfp_tun_link_and_update_nn_entries(app, predt);
1407             spin_unlock_bh(&priv->predt_lock);
1408         } else {
1409             err = nfp_flower_xmit_pre_tun_flow(app, flow_pay);
1410         }
1411     } else {
1412         err = nfp_flower_xmit_flow(app, flow_pay,
1413                        NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
1414     }
1415 
1416     if (err)
1417         goto err_remove_rhash;
1418 
1419     if (port)
1420         port->tc_offload_cnt++;
1421 
1422     flow_pay->in_hw = true;
1423 
1424     /* Deallocate flow payload when flower rule has been destroyed. */
1425     kfree(key_layer);
1426 
1427     return 0;
1428 
1429 err_remove_rhash:
1430     WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1431                         &flow_pay->fl_node,
1432                         nfp_flower_table_params));
1433 err_release_metadata:
1434     nfp_modify_flow_metadata(app, flow_pay);
1435 err_destroy_flow:
1436     if (flow_pay->nfp_tun_ipv6)
1437         nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
1438     kfree(flow_pay->action_data);
1439     kfree(flow_pay->mask_data);
1440     kfree(flow_pay->unmasked_data);
1441     kfree(flow_pay);
1442 err_free_key_ls:
1443     kfree(key_layer);
1444     return err;
1445 }
1446 
1447 static void
1448 nfp_flower_remove_merge_flow(struct nfp_app *app,
1449                  struct nfp_fl_payload *del_sub_flow,
1450                  struct nfp_fl_payload *merge_flow)
1451 {
1452     struct nfp_flower_priv *priv = app->priv;
1453     struct nfp_fl_payload_link *link, *temp;
1454     struct nfp_merge_info *merge_info;
1455     struct nfp_fl_payload *origin;
1456     u64 parent_ctx = 0;
1457     bool mod = false;
1458     int err;
1459 
1460     link = list_first_entry(&merge_flow->linked_flows,
1461                 struct nfp_fl_payload_link, merge_flow.list);
1462     origin = link->sub_flow.flow;
1463 
1464     /* Re-add rule the merge had overwritten if it has not been deleted. */
1465     if (origin != del_sub_flow)
1466         mod = true;
1467 
1468     err = nfp_modify_flow_metadata(app, merge_flow);
1469     if (err) {
1470         nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
1471         goto err_free_links;
1472     }
1473 
1474     if (!mod) {
1475         err = nfp_flower_xmit_flow(app, merge_flow,
1476                        NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1477         if (err) {
1478             nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
1479             goto err_free_links;
1480         }
1481     } else {
1482         __nfp_modify_flow_metadata(priv, origin);
1483         err = nfp_flower_xmit_flow(app, origin,
1484                        NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1485         if (err)
1486             nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
1487         origin->in_hw = true;
1488     }
1489 
1490 err_free_links:
1491     /* Clean any links connected with the merged flow. */
1492     list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
1493                  merge_flow.list) {
1494         u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id);
1495 
1496         parent_ctx = (parent_ctx << 32) | (u64)(ctx_id);
1497         nfp_flower_unlink_flow(link);
1498     }
1499 
1500     merge_info = rhashtable_lookup_fast(&priv->merge_table,
1501                         &parent_ctx,
1502                         merge_table_params);
1503     if (merge_info) {
1504         WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
1505                             &merge_info->ht_node,
1506                             merge_table_params));
1507         kfree(merge_info);
1508     }
1509 
1510     kfree(merge_flow->action_data);
1511     kfree(merge_flow->mask_data);
1512     kfree(merge_flow->unmasked_data);
1513     WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1514                         &merge_flow->fl_node,
1515                         nfp_flower_table_params));
1516     kfree_rcu(merge_flow, rcu);
1517 }
1518 
1519 void
1520 nfp_flower_del_linked_merge_flows(struct nfp_app *app,
1521                   struct nfp_fl_payload *sub_flow)
1522 {
1523     struct nfp_fl_payload_link *link, *temp;
1524 
1525     /* Remove any merge flow formed from the deleted sub_flow. */
1526     list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
1527                  sub_flow.list)
1528         nfp_flower_remove_merge_flow(app, sub_flow,
1529                          link->merge_flow.flow);
1530 }
1531 
1532 /**
1533  * nfp_flower_del_offload() - Removes a flow from hardware.
1534  * @app:    Pointer to the APP handle
1535  * @netdev: netdev structure.
1536  * @flow:   TC flower classifier offload structure
1537  *
1538  * Removes a flow from the repeated hash structure and clears the
1539  * action payload. Any flows merged from this are also deleted.
1540  *
1541  * Return: negative value on error, 0 if removed successfully.
1542  */
1543 static int
1544 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
1545                struct flow_cls_offload *flow)
1546 {
1547     struct nfp_flower_priv *priv = app->priv;
1548     struct nfp_fl_ct_map_entry *ct_map_ent;
1549     struct netlink_ext_ack *extack = NULL;
1550     struct nfp_fl_payload *nfp_flow;
1551     struct nfp_port *port = NULL;
1552     int err;
1553 
1554     extack = flow->common.extack;
1555     if (nfp_netdev_is_nfp_repr(netdev))
1556         port = nfp_port_from_netdev(netdev);
1557 
1558     /* Check ct_map_table */
1559     ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie,
1560                         nfp_ct_map_params);
1561     if (ct_map_ent) {
1562         err = nfp_fl_ct_del_flow(ct_map_ent);
1563         return err;
1564     }
1565 
1566     nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1567     if (!nfp_flow) {
1568         NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
1569         return -ENOENT;
1570     }
1571 
1572     err = nfp_modify_flow_metadata(app, nfp_flow);
1573     if (err)
1574         goto err_free_merge_flow;
1575 
1576     if (nfp_flow->nfp_tun_ipv4_addr)
1577         nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
1578 
1579     if (nfp_flow->nfp_tun_ipv6)
1580         nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6);
1581 
1582     if (!nfp_flow->in_hw) {
1583         err = 0;
1584         goto err_free_merge_flow;
1585     }
1586 
1587     if (nfp_flow->pre_tun_rule.dev) {
1588         if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) {
1589             struct nfp_predt_entry *predt;
1590 
1591             predt = nfp_flow->pre_tun_rule.predt;
1592             if (predt) {
1593                 spin_lock_bh(&priv->predt_lock);
1594                 nfp_tun_unlink_and_update_nn_entries(app, predt);
1595                 list_del(&predt->list_head);
1596                 spin_unlock_bh(&priv->predt_lock);
1597                 kfree(predt);
1598             }
1599         } else {
1600             err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow);
1601         }
1602     } else {
1603         err = nfp_flower_xmit_flow(app, nfp_flow,
1604                        NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1605     }
1606     /* Fall through on error. */
1607 
1608 err_free_merge_flow:
1609     nfp_flower_del_linked_merge_flows(app, nfp_flow);
1610     if (port)
1611         port->tc_offload_cnt--;
1612     kfree(nfp_flow->action_data);
1613     kfree(nfp_flow->mask_data);
1614     kfree(nfp_flow->unmasked_data);
1615     WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1616                         &nfp_flow->fl_node,
1617                         nfp_flower_table_params));
1618     kfree_rcu(nfp_flow, rcu);
1619     return err;
1620 }
1621 
1622 static void
1623 __nfp_flower_update_merge_stats(struct nfp_app *app,
1624                 struct nfp_fl_payload *merge_flow)
1625 {
1626     struct nfp_flower_priv *priv = app->priv;
1627     struct nfp_fl_payload_link *link;
1628     struct nfp_fl_payload *sub_flow;
1629     u64 pkts, bytes, used;
1630     u32 ctx_id;
1631 
1632     ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
1633     pkts = priv->stats[ctx_id].pkts;
1634     /* Do not cycle subflows if no stats to distribute. */
1635     if (!pkts)
1636         return;
1637     bytes = priv->stats[ctx_id].bytes;
1638     used = priv->stats[ctx_id].used;
1639 
1640     /* Reset stats for the merge flow. */
1641     priv->stats[ctx_id].pkts = 0;
1642     priv->stats[ctx_id].bytes = 0;
1643 
1644     /* The merge flow has received stats updates from firmware.
1645      * Distribute these stats to all subflows that form the merge.
1646      * The stats will collected from TC via the subflows.
1647      */
1648     list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
1649         sub_flow = link->sub_flow.flow;
1650         ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
1651         priv->stats[ctx_id].pkts += pkts;
1652         priv->stats[ctx_id].bytes += bytes;
1653         priv->stats[ctx_id].used = max_t(u64, used,
1654                          priv->stats[ctx_id].used);
1655     }
1656 }
1657 
1658 void
1659 nfp_flower_update_merge_stats(struct nfp_app *app,
1660                   struct nfp_fl_payload *sub_flow)
1661 {
1662     struct nfp_fl_payload_link *link;
1663 
1664     /* Get merge flows that the subflow forms to distribute their stats. */
1665     list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
1666         __nfp_flower_update_merge_stats(app, link->merge_flow.flow);
1667 }
1668 
1669 /**
1670  * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
1671  * @app:    Pointer to the APP handle
1672  * @netdev: Netdev structure.
1673  * @flow:   TC flower classifier offload structure
1674  *
1675  * Populates a flow statistics structure which which corresponds to a
1676  * specific flow.
1677  *
1678  * Return: negative value on error, 0 if stats populated successfully.
1679  */
1680 static int
1681 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
1682              struct flow_cls_offload *flow)
1683 {
1684     struct nfp_flower_priv *priv = app->priv;
1685     struct nfp_fl_ct_map_entry *ct_map_ent;
1686     struct netlink_ext_ack *extack = NULL;
1687     struct nfp_fl_payload *nfp_flow;
1688     u32 ctx_id;
1689 
1690     /* Check ct_map table first */
1691     ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie,
1692                         nfp_ct_map_params);
1693     if (ct_map_ent)
1694         return nfp_fl_ct_stats(flow, ct_map_ent);
1695 
1696     extack = flow->common.extack;
1697     nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1698     if (!nfp_flow) {
1699         NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist");
1700         return -EINVAL;
1701     }
1702 
1703     ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1704 
1705     spin_lock_bh(&priv->stats_lock);
1706     /* If request is for a sub_flow, update stats from merged flows. */
1707     if (!list_empty(&nfp_flow->linked_flows))
1708         nfp_flower_update_merge_stats(app, nfp_flow);
1709 
1710     flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
1711               priv->stats[ctx_id].pkts, 0, priv->stats[ctx_id].used,
1712               FLOW_ACTION_HW_STATS_DELAYED);
1713 
1714     priv->stats[ctx_id].pkts = 0;
1715     priv->stats[ctx_id].bytes = 0;
1716     spin_unlock_bh(&priv->stats_lock);
1717 
1718     return 0;
1719 }
1720 
1721 static int
1722 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
1723             struct flow_cls_offload *flower)
1724 {
1725     if (!eth_proto_is_802_3(flower->common.protocol))
1726         return -EOPNOTSUPP;
1727 
1728     switch (flower->command) {
1729     case FLOW_CLS_REPLACE:
1730         return nfp_flower_add_offload(app, netdev, flower);
1731     case FLOW_CLS_DESTROY:
1732         return nfp_flower_del_offload(app, netdev, flower);
1733     case FLOW_CLS_STATS:
1734         return nfp_flower_get_stats(app, netdev, flower);
1735     default:
1736         return -EOPNOTSUPP;
1737     }
1738 }
1739 
1740 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
1741                     void *type_data, void *cb_priv)
1742 {
1743     struct flow_cls_common_offload *common = type_data;
1744     struct nfp_repr *repr = cb_priv;
1745 
1746     if (!tc_can_offload_extack(repr->netdev, common->extack))
1747         return -EOPNOTSUPP;
1748 
1749     switch (type) {
1750     case TC_SETUP_CLSFLOWER:
1751         return nfp_flower_repr_offload(repr->app, repr->netdev,
1752                            type_data);
1753     case TC_SETUP_CLSMATCHALL:
1754         return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
1755                             type_data);
1756     default:
1757         return -EOPNOTSUPP;
1758     }
1759 }
1760 
1761 static LIST_HEAD(nfp_block_cb_list);
1762 
1763 static int nfp_flower_setup_tc_block(struct net_device *netdev,
1764                      struct flow_block_offload *f)
1765 {
1766     struct nfp_repr *repr = netdev_priv(netdev);
1767     struct nfp_flower_repr_priv *repr_priv;
1768     struct flow_block_cb *block_cb;
1769 
1770     if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1771         return -EOPNOTSUPP;
1772 
1773     repr_priv = repr->app_priv;
1774     repr_priv->block_shared = f->block_shared;
1775     f->driver_block_list = &nfp_block_cb_list;
1776 
1777     switch (f->command) {
1778     case FLOW_BLOCK_BIND:
1779         if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr,
1780                       &nfp_block_cb_list))
1781             return -EBUSY;
1782 
1783         block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
1784                            repr, repr, NULL);
1785         if (IS_ERR(block_cb))
1786             return PTR_ERR(block_cb);
1787 
1788         flow_block_cb_add(block_cb, f);
1789         list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1790         return 0;
1791     case FLOW_BLOCK_UNBIND:
1792         block_cb = flow_block_cb_lookup(f->block,
1793                         nfp_flower_setup_tc_block_cb,
1794                         repr);
1795         if (!block_cb)
1796             return -ENOENT;
1797 
1798         flow_block_cb_remove(block_cb, f);
1799         list_del(&block_cb->driver_list);
1800         return 0;
1801     default:
1802         return -EOPNOTSUPP;
1803     }
1804 }
1805 
1806 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
1807             enum tc_setup_type type, void *type_data)
1808 {
1809     switch (type) {
1810     case TC_SETUP_BLOCK:
1811         return nfp_flower_setup_tc_block(netdev, type_data);
1812     default:
1813         return -EOPNOTSUPP;
1814     }
1815 }
1816 
1817 struct nfp_flower_indr_block_cb_priv {
1818     struct net_device *netdev;
1819     struct nfp_app *app;
1820     struct list_head list;
1821 };
1822 
1823 static struct nfp_flower_indr_block_cb_priv *
1824 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
1825                      struct net_device *netdev)
1826 {
1827     struct nfp_flower_indr_block_cb_priv *cb_priv;
1828     struct nfp_flower_priv *priv = app->priv;
1829 
1830     list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
1831         if (cb_priv->netdev == netdev)
1832             return cb_priv;
1833 
1834     return NULL;
1835 }
1836 
1837 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
1838                       void *type_data, void *cb_priv)
1839 {
1840     struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1841 
1842     switch (type) {
1843     case TC_SETUP_CLSFLOWER:
1844         return nfp_flower_repr_offload(priv->app, priv->netdev,
1845                            type_data);
1846     default:
1847         return -EOPNOTSUPP;
1848     }
1849 }
1850 
1851 void nfp_flower_setup_indr_tc_release(void *cb_priv)
1852 {
1853     struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1854 
1855     list_del(&priv->list);
1856     kfree(priv);
1857 }
1858 
1859 static int
1860 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app,
1861                    struct flow_block_offload *f, void *data,
1862                    void (*cleanup)(struct flow_block_cb *block_cb))
1863 {
1864     struct nfp_flower_indr_block_cb_priv *cb_priv;
1865     struct nfp_flower_priv *priv = app->priv;
1866     struct flow_block_cb *block_cb;
1867 
1868     if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1869          !nfp_flower_internal_port_can_offload(app, netdev)) ||
1870         (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1871          nfp_flower_internal_port_can_offload(app, netdev)))
1872         return -EOPNOTSUPP;
1873 
1874     switch (f->command) {
1875     case FLOW_BLOCK_BIND:
1876         cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1877         if (cb_priv &&
1878             flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
1879                       cb_priv,
1880                       &nfp_block_cb_list))
1881             return -EBUSY;
1882 
1883         cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1884         if (!cb_priv)
1885             return -ENOMEM;
1886 
1887         cb_priv->netdev = netdev;
1888         cb_priv->app = app;
1889         list_add(&cb_priv->list, &priv->indr_block_cb_priv);
1890 
1891         block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb,
1892                             cb_priv, cb_priv,
1893                             nfp_flower_setup_indr_tc_release,
1894                             f, netdev, sch, data, app, cleanup);
1895         if (IS_ERR(block_cb)) {
1896             list_del(&cb_priv->list);
1897             kfree(cb_priv);
1898             return PTR_ERR(block_cb);
1899         }
1900 
1901         flow_block_cb_add(block_cb, f);
1902         list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1903         return 0;
1904     case FLOW_BLOCK_UNBIND:
1905         cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1906         if (!cb_priv)
1907             return -ENOENT;
1908 
1909         block_cb = flow_block_cb_lookup(f->block,
1910                         nfp_flower_setup_indr_block_cb,
1911                         cb_priv);
1912         if (!block_cb)
1913             return -ENOENT;
1914 
1915         flow_indr_block_cb_remove(block_cb, f);
1916         list_del(&block_cb->driver_list);
1917         return 0;
1918     default:
1919         return -EOPNOTSUPP;
1920     }
1921     return 0;
1922 }
1923 
1924 static int
1925 nfp_setup_tc_no_dev(struct nfp_app *app, enum tc_setup_type type, void *data)
1926 {
1927     if (!data)
1928         return -EOPNOTSUPP;
1929 
1930     switch (type) {
1931     case TC_SETUP_ACT:
1932         return nfp_setup_tc_act_offload(app, data);
1933     default:
1934         return -EOPNOTSUPP;
1935     }
1936 }
1937 
1938 int
1939 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
1940                 enum tc_setup_type type, void *type_data,
1941                 void *data,
1942                 void (*cleanup)(struct flow_block_cb *block_cb))
1943 {
1944     if (!netdev)
1945         return nfp_setup_tc_no_dev(cb_priv, type, data);
1946 
1947     if (!nfp_fl_is_netdev_to_offload(netdev))
1948         return -EOPNOTSUPP;
1949 
1950     switch (type) {
1951     case TC_SETUP_BLOCK:
1952         return nfp_flower_setup_indr_tc_block(netdev, sch, cb_priv,
1953                               type_data, data, cleanup);
1954     default:
1955         return -EOPNOTSUPP;
1956     }
1957 }