Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
0002 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
0003 
0004 #include <linux/etherdevice.h>
0005 #include <linux/inetdevice.h>
0006 #include <net/netevent.h>
0007 #include <linux/idr.h>
0008 #include <net/dst_metadata.h>
0009 #include <net/arp.h>
0010 
0011 #include "cmsg.h"
0012 #include "main.h"
0013 #include "../nfp_net_repr.h"
0014 #include "../nfp_net.h"
0015 
0016 #define NFP_FL_MAX_ROUTES               32
0017 
0018 #define NFP_TUN_PRE_TUN_RULE_LIMIT  32
0019 #define NFP_TUN_PRE_TUN_RULE_DEL    BIT(0)
0020 #define NFP_TUN_PRE_TUN_IDX_BIT     BIT(3)
0021 #define NFP_TUN_PRE_TUN_IPV6_BIT    BIT(7)
0022 
0023 /**
0024  * struct nfp_tun_pre_tun_rule - rule matched before decap
0025  * @flags:      options for the rule offset
0026  * @port_idx:       index of destination MAC address for the rule
0027  * @vlan_tci:       VLAN info associated with MAC
0028  * @host_ctx_id:    stats context of rule to update
0029  */
0030 struct nfp_tun_pre_tun_rule {
0031     __be32 flags;
0032     __be16 port_idx;
0033     __be16 vlan_tci;
0034     __be32 host_ctx_id;
0035 };
0036 
0037 /**
0038  * struct nfp_tun_active_tuns - periodic message of active tunnels
0039  * @seq:        sequence number of the message
0040  * @count:      number of tunnels report in message
0041  * @flags:      options part of the request
0042  * @tun_info.ipv4:      dest IPv4 address of active route
0043  * @tun_info.egress_port:   port the encapsulated packet egressed
0044  * @tun_info.extra:     reserved for future use
0045  * @tun_info:       tunnels that have sent traffic in reported period
0046  */
0047 struct nfp_tun_active_tuns {
0048     __be32 seq;
0049     __be32 count;
0050     __be32 flags;
0051     struct route_ip_info {
0052         __be32 ipv4;
0053         __be32 egress_port;
0054         __be32 extra[2];
0055     } tun_info[];
0056 };
0057 
0058 /**
0059  * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels
0060  * @seq:        sequence number of the message
0061  * @count:      number of tunnels report in message
0062  * @flags:      options part of the request
0063  * @tun_info.ipv6:      dest IPv6 address of active route
0064  * @tun_info.egress_port:   port the encapsulated packet egressed
0065  * @tun_info.extra:     reserved for future use
0066  * @tun_info:       tunnels that have sent traffic in reported period
0067  */
0068 struct nfp_tun_active_tuns_v6 {
0069     __be32 seq;
0070     __be32 count;
0071     __be32 flags;
0072     struct route_ip_info_v6 {
0073         struct in6_addr ipv6;
0074         __be32 egress_port;
0075         __be32 extra[2];
0076     } tun_info[];
0077 };
0078 
0079 /**
0080  * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
0081  * @ingress_port:   ingress port of packet that signalled request
0082  * @ipv4_addr:      destination ipv4 address for route
0083  * @reserved:       reserved for future use
0084  */
0085 struct nfp_tun_req_route_ipv4 {
0086     __be32 ingress_port;
0087     __be32 ipv4_addr;
0088     __be32 reserved[2];
0089 };
0090 
0091 /**
0092  * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup
0093  * @ingress_port:   ingress port of packet that signalled request
0094  * @ipv6_addr:      destination ipv6 address for route
0095  */
0096 struct nfp_tun_req_route_ipv6 {
0097     __be32 ingress_port;
0098     struct in6_addr ipv6_addr;
0099 };
0100 
0101 /**
0102  * struct nfp_offloaded_route - routes that are offloaded to the NFP
0103  * @list:   list pointer
0104  * @ip_add: destination of route - can be IPv4 or IPv6
0105  */
0106 struct nfp_offloaded_route {
0107     struct list_head list;
0108     u8 ip_add[];
0109 };
0110 
0111 #define NFP_FL_IPV4_ADDRS_MAX        32
0112 
0113 /**
0114  * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
0115  * @count:  number of IPs populated in the array
0116  * @ipv4_addr:  array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
0117  */
0118 struct nfp_tun_ipv4_addr {
0119     __be32 count;
0120     __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
0121 };
0122 
0123 /**
0124  * struct nfp_ipv4_addr_entry - cached IPv4 addresses
0125  * @ipv4_addr:  IP address
0126  * @ref_count:  number of rules currently using this IP
0127  * @list:   list pointer
0128  */
0129 struct nfp_ipv4_addr_entry {
0130     __be32 ipv4_addr;
0131     int ref_count;
0132     struct list_head list;
0133 };
0134 
0135 #define NFP_FL_IPV6_ADDRS_MAX        4
0136 
0137 /**
0138  * struct nfp_tun_ipv6_addr - set the IP address list on the NFP
0139  * @count:  number of IPs populated in the array
0140  * @ipv6_addr:  array of IPV6_ADDRS_MAX 128 bit IPv6 addresses
0141  */
0142 struct nfp_tun_ipv6_addr {
0143     __be32 count;
0144     struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX];
0145 };
0146 
0147 #define NFP_TUN_MAC_OFFLOAD_DEL_FLAG    0x2
0148 
0149 /**
0150  * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP
0151  * @flags:  MAC address offload options
0152  * @count:  number of MAC addresses in the message (should be 1)
0153  * @index:  index of MAC address in the lookup table
0154  * @addr:   interface MAC address
0155  */
0156 struct nfp_tun_mac_addr_offload {
0157     __be16 flags;
0158     __be16 count;
0159     __be16 index;
0160     u8 addr[ETH_ALEN];
0161 };
0162 
0163 enum nfp_flower_mac_offload_cmd {
0164     NFP_TUNNEL_MAC_OFFLOAD_ADD =        0,
0165     NFP_TUNNEL_MAC_OFFLOAD_DEL =        1,
0166     NFP_TUNNEL_MAC_OFFLOAD_MOD =        2,
0167 };
0168 
0169 #define NFP_MAX_MAC_INDEX       0xff
0170 
0171 /**
0172  * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
0173  * @ht_node:        Hashtable entry
0174  * @addr:       Offloaded MAC address
0175  * @index:      Offloaded index for given MAC address
0176  * @ref_count:      Number of devs using this MAC address
0177  * @repr_list:      List of reprs sharing this MAC address
0178  * @bridge_count:   Number of bridge/internal devs with MAC
0179  */
0180 struct nfp_tun_offloaded_mac {
0181     struct rhash_head ht_node;
0182     u8 addr[ETH_ALEN];
0183     u16 index;
0184     int ref_count;
0185     struct list_head repr_list;
0186     int bridge_count;
0187 };
0188 
0189 static const struct rhashtable_params offloaded_macs_params = {
0190     .key_offset = offsetof(struct nfp_tun_offloaded_mac, addr),
0191     .head_offset    = offsetof(struct nfp_tun_offloaded_mac, ht_node),
0192     .key_len    = ETH_ALEN,
0193     .automatic_shrinking    = true,
0194 };
0195 
0196 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
0197 {
0198     struct nfp_tun_active_tuns *payload;
0199     struct net_device *netdev;
0200     int count, i, pay_len;
0201     struct neighbour *n;
0202     __be32 ipv4_addr;
0203     u32 port;
0204 
0205     payload = nfp_flower_cmsg_get_data(skb);
0206     count = be32_to_cpu(payload->count);
0207     if (count > NFP_FL_MAX_ROUTES) {
0208         nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
0209         return;
0210     }
0211 
0212     pay_len = nfp_flower_cmsg_get_data_len(skb);
0213     if (pay_len != struct_size(payload, tun_info, count)) {
0214         nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
0215         return;
0216     }
0217 
0218     rcu_read_lock();
0219     for (i = 0; i < count; i++) {
0220         ipv4_addr = payload->tun_info[i].ipv4;
0221         port = be32_to_cpu(payload->tun_info[i].egress_port);
0222         netdev = nfp_app_dev_get(app, port, NULL);
0223         if (!netdev)
0224             continue;
0225 
0226         n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
0227         if (!n)
0228             continue;
0229 
0230         /* Update the used timestamp of neighbour */
0231         neigh_event_send(n, NULL);
0232         neigh_release(n);
0233     }
0234     rcu_read_unlock();
0235 }
0236 
0237 void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb)
0238 {
0239 #if IS_ENABLED(CONFIG_IPV6)
0240     struct nfp_tun_active_tuns_v6 *payload;
0241     struct net_device *netdev;
0242     int count, i, pay_len;
0243     struct neighbour *n;
0244     void *ipv6_add;
0245     u32 port;
0246 
0247     payload = nfp_flower_cmsg_get_data(skb);
0248     count = be32_to_cpu(payload->count);
0249     if (count > NFP_FL_IPV6_ADDRS_MAX) {
0250         nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n");
0251         return;
0252     }
0253 
0254     pay_len = nfp_flower_cmsg_get_data_len(skb);
0255     if (pay_len != struct_size(payload, tun_info, count)) {
0256         nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
0257         return;
0258     }
0259 
0260     rcu_read_lock();
0261     for (i = 0; i < count; i++) {
0262         ipv6_add = &payload->tun_info[i].ipv6;
0263         port = be32_to_cpu(payload->tun_info[i].egress_port);
0264         netdev = nfp_app_dev_get(app, port, NULL);
0265         if (!netdev)
0266             continue;
0267 
0268         n = neigh_lookup(&nd_tbl, ipv6_add, netdev);
0269         if (!n)
0270             continue;
0271 
0272         /* Update the used timestamp of neighbour */
0273         neigh_event_send(n, NULL);
0274         neigh_release(n);
0275     }
0276     rcu_read_unlock();
0277 #endif
0278 }
0279 
0280 static int
0281 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
0282              gfp_t flag)
0283 {
0284     struct nfp_flower_priv *priv = app->priv;
0285     struct sk_buff *skb;
0286     unsigned char *msg;
0287 
0288     if (!(priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) &&
0289         (mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH ||
0290          mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6))
0291         plen -= sizeof(struct nfp_tun_neigh_ext);
0292 
0293     skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
0294     if (!skb)
0295         return -ENOMEM;
0296 
0297     msg = nfp_flower_cmsg_get_data(skb);
0298     memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
0299 
0300     nfp_ctrl_tx(app->ctrl, skb);
0301     return 0;
0302 }
0303 
0304 static void
0305 nfp_tun_mutual_link(struct nfp_predt_entry *predt,
0306             struct nfp_neigh_entry *neigh)
0307 {
0308     struct nfp_fl_payload *flow_pay = predt->flow_pay;
0309     struct nfp_tun_neigh_ext *ext;
0310     struct nfp_tun_neigh *common;
0311 
0312     if (flow_pay->pre_tun_rule.is_ipv6 != neigh->is_ipv6)
0313         return;
0314 
0315     /* In the case of bonding it is possible that there might already
0316      * be a flow linked (as the MAC address gets shared). If a flow
0317      * is already linked just return.
0318      */
0319     if (neigh->flow)
0320         return;
0321 
0322     common = neigh->is_ipv6 ?
0323          &((struct nfp_tun_neigh_v6 *)neigh->payload)->common :
0324          &((struct nfp_tun_neigh_v4 *)neigh->payload)->common;
0325     ext = neigh->is_ipv6 ?
0326          &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext :
0327          &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext;
0328 
0329     if (memcmp(flow_pay->pre_tun_rule.loc_mac,
0330            common->src_addr, ETH_ALEN) ||
0331         memcmp(flow_pay->pre_tun_rule.rem_mac,
0332            common->dst_addr, ETH_ALEN))
0333         return;
0334 
0335     list_add(&neigh->list_head, &predt->nn_list);
0336     neigh->flow = predt;
0337     ext->host_ctx = flow_pay->meta.host_ctx_id;
0338     ext->vlan_tci = flow_pay->pre_tun_rule.vlan_tci;
0339     ext->vlan_tpid = flow_pay->pre_tun_rule.vlan_tpid;
0340 }
0341 
0342 static void
0343 nfp_tun_link_predt_entries(struct nfp_app *app,
0344                struct nfp_neigh_entry *nn_entry)
0345 {
0346     struct nfp_flower_priv *priv = app->priv;
0347     struct nfp_predt_entry *predt, *tmp;
0348 
0349     list_for_each_entry_safe(predt, tmp, &priv->predt_list, list_head) {
0350         nfp_tun_mutual_link(predt, nn_entry);
0351     }
0352 }
0353 
0354 void nfp_tun_link_and_update_nn_entries(struct nfp_app *app,
0355                     struct nfp_predt_entry *predt)
0356 {
0357     struct nfp_flower_priv *priv = app->priv;
0358     struct nfp_neigh_entry *nn_entry;
0359     struct rhashtable_iter iter;
0360     size_t neigh_size;
0361     u8 type;
0362 
0363     rhashtable_walk_enter(&priv->neigh_table, &iter);
0364     rhashtable_walk_start(&iter);
0365     while ((nn_entry = rhashtable_walk_next(&iter)) != NULL) {
0366         if (IS_ERR(nn_entry))
0367             continue;
0368         nfp_tun_mutual_link(predt, nn_entry);
0369         neigh_size = nn_entry->is_ipv6 ?
0370                  sizeof(struct nfp_tun_neigh_v6) :
0371                  sizeof(struct nfp_tun_neigh_v4);
0372         type = nn_entry->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
0373                        NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
0374         nfp_flower_xmit_tun_conf(app, type, neigh_size,
0375                      nn_entry->payload,
0376                      GFP_ATOMIC);
0377     }
0378     rhashtable_walk_stop(&iter);
0379     rhashtable_walk_exit(&iter);
0380 }
0381 
0382 static void nfp_tun_cleanup_nn_entries(struct nfp_app *app)
0383 {
0384     struct nfp_flower_priv *priv = app->priv;
0385     struct nfp_neigh_entry *neigh;
0386     struct nfp_tun_neigh_ext *ext;
0387     struct rhashtable_iter iter;
0388     size_t neigh_size;
0389     u8 type;
0390 
0391     rhashtable_walk_enter(&priv->neigh_table, &iter);
0392     rhashtable_walk_start(&iter);
0393     while ((neigh = rhashtable_walk_next(&iter)) != NULL) {
0394         if (IS_ERR(neigh))
0395             continue;
0396         ext = neigh->is_ipv6 ?
0397              &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext :
0398              &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext;
0399         ext->host_ctx = cpu_to_be32(U32_MAX);
0400         ext->vlan_tpid = cpu_to_be16(U16_MAX);
0401         ext->vlan_tci = cpu_to_be16(U16_MAX);
0402 
0403         neigh_size = neigh->is_ipv6 ?
0404                  sizeof(struct nfp_tun_neigh_v6) :
0405                  sizeof(struct nfp_tun_neigh_v4);
0406         type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
0407                        NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
0408         nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload,
0409                      GFP_ATOMIC);
0410 
0411         rhashtable_remove_fast(&priv->neigh_table, &neigh->ht_node,
0412                        neigh_table_params);
0413         if (neigh->flow)
0414             list_del(&neigh->list_head);
0415         kfree(neigh);
0416     }
0417     rhashtable_walk_stop(&iter);
0418     rhashtable_walk_exit(&iter);
0419 }
0420 
0421 void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app,
0422                       struct nfp_predt_entry *predt)
0423 {
0424     struct nfp_neigh_entry *neigh, *tmp;
0425     struct nfp_tun_neigh_ext *ext;
0426     size_t neigh_size;
0427     u8 type;
0428 
0429     list_for_each_entry_safe(neigh, tmp, &predt->nn_list, list_head) {
0430         ext = neigh->is_ipv6 ?
0431              &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext :
0432              &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext;
0433         neigh->flow = NULL;
0434         ext->host_ctx = cpu_to_be32(U32_MAX);
0435         ext->vlan_tpid = cpu_to_be16(U16_MAX);
0436         ext->vlan_tci = cpu_to_be16(U16_MAX);
0437         list_del(&neigh->list_head);
0438         neigh_size = neigh->is_ipv6 ?
0439                  sizeof(struct nfp_tun_neigh_v6) :
0440                  sizeof(struct nfp_tun_neigh_v4);
0441         type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
0442                        NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
0443         nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload,
0444                      GFP_ATOMIC);
0445     }
0446 }
0447 
0448 static void
0449 nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
0450             void *flow, struct neighbour *neigh, bool is_ipv6,
0451             bool override)
0452 {
0453     bool neigh_invalid = !(neigh->nud_state & NUD_VALID) || neigh->dead;
0454     size_t neigh_size = is_ipv6 ? sizeof(struct nfp_tun_neigh_v6) :
0455                 sizeof(struct nfp_tun_neigh_v4);
0456     unsigned long cookie = (unsigned long)neigh;
0457     struct nfp_flower_priv *priv = app->priv;
0458     struct nfp_neigh_entry *nn_entry;
0459     u32 port_id;
0460     u8 mtype;
0461 
0462     port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
0463     if (!port_id)
0464         return;
0465 
0466     spin_lock_bh(&priv->predt_lock);
0467     nn_entry = rhashtable_lookup_fast(&priv->neigh_table, &cookie,
0468                       neigh_table_params);
0469     if (!nn_entry && !neigh_invalid) {
0470         struct nfp_tun_neigh_ext *ext;
0471         struct nfp_tun_neigh *common;
0472 
0473         nn_entry = kzalloc(sizeof(*nn_entry) + neigh_size,
0474                    GFP_ATOMIC);
0475         if (!nn_entry)
0476             goto err;
0477 
0478         nn_entry->payload = (char *)&nn_entry[1];
0479         nn_entry->neigh_cookie = cookie;
0480         nn_entry->is_ipv6 = is_ipv6;
0481         nn_entry->flow = NULL;
0482         if (is_ipv6) {
0483             struct flowi6 *flowi6 = (struct flowi6 *)flow;
0484             struct nfp_tun_neigh_v6 *payload;
0485 
0486             payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload;
0487             payload->src_ipv6 = flowi6->saddr;
0488             payload->dst_ipv6 = flowi6->daddr;
0489             common = &payload->common;
0490             ext = &payload->ext;
0491             mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6;
0492         } else {
0493             struct flowi4 *flowi4 = (struct flowi4 *)flow;
0494             struct nfp_tun_neigh_v4 *payload;
0495 
0496             payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload;
0497             payload->src_ipv4 = flowi4->saddr;
0498             payload->dst_ipv4 = flowi4->daddr;
0499             common = &payload->common;
0500             ext = &payload->ext;
0501             mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
0502         }
0503         ext->host_ctx = cpu_to_be32(U32_MAX);
0504         ext->vlan_tpid = cpu_to_be16(U16_MAX);
0505         ext->vlan_tci = cpu_to_be16(U16_MAX);
0506         ether_addr_copy(common->src_addr, netdev->dev_addr);
0507         neigh_ha_snapshot(common->dst_addr, neigh, netdev);
0508         common->port_id = cpu_to_be32(port_id);
0509 
0510         if (rhashtable_insert_fast(&priv->neigh_table,
0511                        &nn_entry->ht_node,
0512                        neigh_table_params))
0513             goto err;
0514 
0515         nfp_tun_link_predt_entries(app, nn_entry);
0516         nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
0517                      nn_entry->payload,
0518                      GFP_ATOMIC);
0519     } else if (nn_entry && neigh_invalid) {
0520         if (is_ipv6) {
0521             struct flowi6 *flowi6 = (struct flowi6 *)flow;
0522             struct nfp_tun_neigh_v6 *payload;
0523 
0524             payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload;
0525             memset(payload, 0, sizeof(struct nfp_tun_neigh_v6));
0526             payload->dst_ipv6 = flowi6->daddr;
0527             mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6;
0528         } else {
0529             struct flowi4 *flowi4 = (struct flowi4 *)flow;
0530             struct nfp_tun_neigh_v4 *payload;
0531 
0532             payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload;
0533             memset(payload, 0, sizeof(struct nfp_tun_neigh_v4));
0534             payload->dst_ipv4 = flowi4->daddr;
0535             mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
0536         }
0537         /* Trigger ARP to verify invalid neighbour state. */
0538         neigh_event_send(neigh, NULL);
0539         rhashtable_remove_fast(&priv->neigh_table,
0540                        &nn_entry->ht_node,
0541                        neigh_table_params);
0542 
0543         nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
0544                      nn_entry->payload,
0545                      GFP_ATOMIC);
0546 
0547         if (nn_entry->flow)
0548             list_del(&nn_entry->list_head);
0549         kfree(nn_entry);
0550     } else if (nn_entry && !neigh_invalid && override) {
0551         mtype = is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
0552                 NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
0553         nfp_tun_link_predt_entries(app, nn_entry);
0554         nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
0555                      nn_entry->payload,
0556                      GFP_ATOMIC);
0557     }
0558 
0559     spin_unlock_bh(&priv->predt_lock);
0560     return;
0561 
0562 err:
0563     kfree(nn_entry);
0564     spin_unlock_bh(&priv->predt_lock);
0565     nfp_flower_cmsg_warn(app, "Neighbour configuration failed.\n");
0566 }
0567 
0568 static int
0569 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
0570                 void *ptr)
0571 {
0572     struct nfp_flower_priv *app_priv;
0573     struct netevent_redirect *redir;
0574     struct neighbour *n;
0575     struct nfp_app *app;
0576     bool neigh_invalid;
0577     int err;
0578 
0579     switch (event) {
0580     case NETEVENT_REDIRECT:
0581         redir = (struct netevent_redirect *)ptr;
0582         n = redir->neigh;
0583         break;
0584     case NETEVENT_NEIGH_UPDATE:
0585         n = (struct neighbour *)ptr;
0586         break;
0587     default:
0588         return NOTIFY_DONE;
0589     }
0590 
0591     neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead;
0592 
0593     app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
0594     app = app_priv->app;
0595 
0596     if (!nfp_netdev_is_nfp_repr(n->dev) &&
0597         !nfp_flower_internal_port_can_offload(app, n->dev))
0598         return NOTIFY_DONE;
0599 
0600 #if IS_ENABLED(CONFIG_INET)
0601     if (n->tbl->family == AF_INET6) {
0602 #if IS_ENABLED(CONFIG_IPV6)
0603         struct flowi6 flow6 = {};
0604 
0605         flow6.daddr = *(struct in6_addr *)n->primary_key;
0606         if (!neigh_invalid) {
0607             struct dst_entry *dst;
0608             /* Use ipv6_dst_lookup_flow to populate flow6->saddr
0609              * and other fields. This information is only needed
0610              * for new entries, lookup can be skipped when an entry
0611              * gets invalidated - as only the daddr is needed for
0612              * deleting.
0613              */
0614             dst = ip6_dst_lookup_flow(dev_net(n->dev), NULL,
0615                           &flow6, NULL);
0616             if (IS_ERR(dst))
0617                 return NOTIFY_DONE;
0618 
0619             dst_release(dst);
0620         }
0621         nfp_tun_write_neigh(n->dev, app, &flow6, n, true, false);
0622 #else
0623         return NOTIFY_DONE;
0624 #endif /* CONFIG_IPV6 */
0625     } else {
0626         struct flowi4 flow4 = {};
0627 
0628         flow4.daddr = *(__be32 *)n->primary_key;
0629         if (!neigh_invalid) {
0630             struct rtable *rt;
0631             /* Use ip_route_output_key to populate flow4->saddr and
0632              * other fields. This information is only needed for
0633              * new entries, lookup can be skipped when an entry
0634              * gets invalidated - as only the daddr is needed for
0635              * deleting.
0636              */
0637             rt = ip_route_output_key(dev_net(n->dev), &flow4);
0638             err = PTR_ERR_OR_ZERO(rt);
0639             if (err)
0640                 return NOTIFY_DONE;
0641 
0642             ip_rt_put(rt);
0643         }
0644         nfp_tun_write_neigh(n->dev, app, &flow4, n, false, false);
0645     }
0646 #else
0647     return NOTIFY_DONE;
0648 #endif /* CONFIG_INET */
0649 
0650     return NOTIFY_OK;
0651 }
0652 
0653 void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
0654 {
0655     struct nfp_tun_req_route_ipv4 *payload;
0656     struct net_device *netdev;
0657     struct flowi4 flow = {};
0658     struct neighbour *n;
0659     struct rtable *rt;
0660     int err;
0661 
0662     payload = nfp_flower_cmsg_get_data(skb);
0663 
0664     rcu_read_lock();
0665     netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
0666     if (!netdev)
0667         goto fail_rcu_unlock;
0668 
0669     flow.daddr = payload->ipv4_addr;
0670     flow.flowi4_proto = IPPROTO_UDP;
0671 
0672 #if IS_ENABLED(CONFIG_INET)
0673     /* Do a route lookup on same namespace as ingress port. */
0674     rt = ip_route_output_key(dev_net(netdev), &flow);
0675     err = PTR_ERR_OR_ZERO(rt);
0676     if (err)
0677         goto fail_rcu_unlock;
0678 #else
0679     goto fail_rcu_unlock;
0680 #endif
0681 
0682     /* Get the neighbour entry for the lookup */
0683     n = dst_neigh_lookup(&rt->dst, &flow.daddr);
0684     ip_rt_put(rt);
0685     if (!n)
0686         goto fail_rcu_unlock;
0687     nfp_tun_write_neigh(n->dev, app, &flow, n, false, true);
0688     neigh_release(n);
0689     rcu_read_unlock();
0690     return;
0691 
0692 fail_rcu_unlock:
0693     rcu_read_unlock();
0694     nfp_flower_cmsg_warn(app, "Requested route not found.\n");
0695 }
0696 
0697 void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
0698 {
0699     struct nfp_tun_req_route_ipv6 *payload;
0700     struct net_device *netdev;
0701     struct flowi6 flow = {};
0702     struct dst_entry *dst;
0703     struct neighbour *n;
0704 
0705     payload = nfp_flower_cmsg_get_data(skb);
0706 
0707     rcu_read_lock();
0708     netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
0709     if (!netdev)
0710         goto fail_rcu_unlock;
0711 
0712     flow.daddr = payload->ipv6_addr;
0713     flow.flowi6_proto = IPPROTO_UDP;
0714 
0715 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
0716     dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow,
0717                           NULL);
0718     if (IS_ERR(dst))
0719         goto fail_rcu_unlock;
0720 #else
0721     goto fail_rcu_unlock;
0722 #endif
0723 
0724     n = dst_neigh_lookup(dst, &flow.daddr);
0725     dst_release(dst);
0726     if (!n)
0727         goto fail_rcu_unlock;
0728 
0729     nfp_tun_write_neigh(n->dev, app, &flow, n, true, true);
0730     neigh_release(n);
0731     rcu_read_unlock();
0732     return;
0733 
0734 fail_rcu_unlock:
0735     rcu_read_unlock();
0736     nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n");
0737 }
0738 
0739 static void nfp_tun_write_ipv4_list(struct nfp_app *app)
0740 {
0741     struct nfp_flower_priv *priv = app->priv;
0742     struct nfp_ipv4_addr_entry *entry;
0743     struct nfp_tun_ipv4_addr payload;
0744     struct list_head *ptr, *storage;
0745     int count;
0746 
0747     memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
0748     mutex_lock(&priv->tun.ipv4_off_lock);
0749     count = 0;
0750     list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
0751         if (count >= NFP_FL_IPV4_ADDRS_MAX) {
0752             mutex_unlock(&priv->tun.ipv4_off_lock);
0753             nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
0754             return;
0755         }
0756         entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
0757         payload.ipv4_addr[count++] = entry->ipv4_addr;
0758     }
0759     payload.count = cpu_to_be32(count);
0760     mutex_unlock(&priv->tun.ipv4_off_lock);
0761 
0762     nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
0763                  sizeof(struct nfp_tun_ipv4_addr),
0764                  &payload, GFP_KERNEL);
0765 }
0766 
0767 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
0768 {
0769     struct nfp_flower_priv *priv = app->priv;
0770     struct nfp_ipv4_addr_entry *entry;
0771     struct list_head *ptr, *storage;
0772 
0773     mutex_lock(&priv->tun.ipv4_off_lock);
0774     list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
0775         entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
0776         if (entry->ipv4_addr == ipv4) {
0777             entry->ref_count++;
0778             mutex_unlock(&priv->tun.ipv4_off_lock);
0779             return;
0780         }
0781     }
0782 
0783     entry = kmalloc(sizeof(*entry), GFP_KERNEL);
0784     if (!entry) {
0785         mutex_unlock(&priv->tun.ipv4_off_lock);
0786         nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
0787         return;
0788     }
0789     entry->ipv4_addr = ipv4;
0790     entry->ref_count = 1;
0791     list_add_tail(&entry->list, &priv->tun.ipv4_off_list);
0792     mutex_unlock(&priv->tun.ipv4_off_lock);
0793 
0794     nfp_tun_write_ipv4_list(app);
0795 }
0796 
0797 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
0798 {
0799     struct nfp_flower_priv *priv = app->priv;
0800     struct nfp_ipv4_addr_entry *entry;
0801     struct list_head *ptr, *storage;
0802 
0803     mutex_lock(&priv->tun.ipv4_off_lock);
0804     list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
0805         entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
0806         if (entry->ipv4_addr == ipv4) {
0807             entry->ref_count--;
0808             if (!entry->ref_count) {
0809                 list_del(&entry->list);
0810                 kfree(entry);
0811             }
0812             break;
0813         }
0814     }
0815     mutex_unlock(&priv->tun.ipv4_off_lock);
0816 
0817     nfp_tun_write_ipv4_list(app);
0818 }
0819 
0820 static void nfp_tun_write_ipv6_list(struct nfp_app *app)
0821 {
0822     struct nfp_flower_priv *priv = app->priv;
0823     struct nfp_ipv6_addr_entry *entry;
0824     struct nfp_tun_ipv6_addr payload;
0825     int count = 0;
0826 
0827     memset(&payload, 0, sizeof(struct nfp_tun_ipv6_addr));
0828     mutex_lock(&priv->tun.ipv6_off_lock);
0829     list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) {
0830         if (count >= NFP_FL_IPV6_ADDRS_MAX) {
0831             nfp_flower_cmsg_warn(app, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n");
0832             break;
0833         }
0834         payload.ipv6_addr[count++] = entry->ipv6_addr;
0835     }
0836     mutex_unlock(&priv->tun.ipv6_off_lock);
0837     payload.count = cpu_to_be32(count);
0838 
0839     nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6,
0840                  sizeof(struct nfp_tun_ipv6_addr),
0841                  &payload, GFP_KERNEL);
0842 }
0843 
0844 struct nfp_ipv6_addr_entry *
0845 nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6)
0846 {
0847     struct nfp_flower_priv *priv = app->priv;
0848     struct nfp_ipv6_addr_entry *entry;
0849 
0850     mutex_lock(&priv->tun.ipv6_off_lock);
0851     list_for_each_entry(entry, &priv->tun.ipv6_off_list, list)
0852         if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) {
0853             entry->ref_count++;
0854             mutex_unlock(&priv->tun.ipv6_off_lock);
0855             return entry;
0856         }
0857 
0858     entry = kmalloc(sizeof(*entry), GFP_KERNEL);
0859     if (!entry) {
0860         mutex_unlock(&priv->tun.ipv6_off_lock);
0861         nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
0862         return NULL;
0863     }
0864     entry->ipv6_addr = *ipv6;
0865     entry->ref_count = 1;
0866     list_add_tail(&entry->list, &priv->tun.ipv6_off_list);
0867     mutex_unlock(&priv->tun.ipv6_off_lock);
0868 
0869     nfp_tun_write_ipv6_list(app);
0870 
0871     return entry;
0872 }
0873 
0874 void
0875 nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry)
0876 {
0877     struct nfp_flower_priv *priv = app->priv;
0878     bool freed = false;
0879 
0880     mutex_lock(&priv->tun.ipv6_off_lock);
0881     if (!--entry->ref_count) {
0882         list_del(&entry->list);
0883         kfree(entry);
0884         freed = true;
0885     }
0886     mutex_unlock(&priv->tun.ipv6_off_lock);
0887 
0888     if (freed)
0889         nfp_tun_write_ipv6_list(app);
0890 }
0891 
0892 static int
0893 __nfp_tunnel_offload_mac(struct nfp_app *app, const u8 *mac, u16 idx, bool del)
0894 {
0895     struct nfp_tun_mac_addr_offload payload;
0896 
0897     memset(&payload, 0, sizeof(payload));
0898 
0899     if (del)
0900         payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG);
0901 
0902     /* FW supports multiple MACs per cmsg but restrict to single. */
0903     payload.count = cpu_to_be16(1);
0904     payload.index = cpu_to_be16(idx);
0905     ether_addr_copy(payload.addr, mac);
0906 
0907     return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
0908                     sizeof(struct nfp_tun_mac_addr_offload),
0909                     &payload, GFP_KERNEL);
0910 }
0911 
0912 static bool nfp_tunnel_port_is_phy_repr(int port)
0913 {
0914     if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
0915         NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT)
0916         return true;
0917 
0918     return false;
0919 }
0920 
0921 static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port)
0922 {
0923     return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
0924 }
0925 
0926 static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id)
0927 {
0928     return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
0929 }
0930 
0931 static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx)
0932 {
0933     return nfp_mac_idx >> 8;
0934 }
0935 
0936 static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
0937 {
0938     return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
0939 }
0940 
0941 static struct nfp_tun_offloaded_mac *
0942 nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, const u8 *mac)
0943 {
0944     struct nfp_flower_priv *priv = app->priv;
0945 
0946     return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac,
0947                       offloaded_macs_params);
0948 }
0949 
0950 static void
0951 nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
0952                        struct net_device *netdev, bool mod)
0953 {
0954     if (nfp_netdev_is_nfp_repr(netdev)) {
0955         struct nfp_flower_repr_priv *repr_priv;
0956         struct nfp_repr *repr;
0957 
0958         repr = netdev_priv(netdev);
0959         repr_priv = repr->app_priv;
0960 
0961         /* If modifing MAC, remove repr from old list first. */
0962         if (mod)
0963             list_del(&repr_priv->mac_list);
0964 
0965         list_add_tail(&repr_priv->mac_list, &entry->repr_list);
0966     } else if (nfp_flower_is_supported_bridge(netdev)) {
0967         entry->bridge_count++;
0968     }
0969 
0970     entry->ref_count++;
0971 }
0972 
0973 static int
0974 nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
0975               int port, bool mod)
0976 {
0977     struct nfp_flower_priv *priv = app->priv;
0978     struct nfp_tun_offloaded_mac *entry;
0979     int ida_idx = -1, err;
0980     u16 nfp_mac_idx = 0;
0981 
0982     entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
0983     if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
0984         if (entry->bridge_count ||
0985             !nfp_flower_is_supported_bridge(netdev)) {
0986             nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
0987                                    netdev, mod);
0988             return 0;
0989         }
0990 
0991         /* MAC is global but matches need to go to pre_tun table. */
0992         nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT;
0993     }
0994 
0995     if (!nfp_mac_idx) {
0996         /* Assign a global index if non-repr or MAC is now shared. */
0997         if (entry || !port) {
0998             ida_idx = ida_alloc_max(&priv->tun.mac_off_ids,
0999                         NFP_MAX_MAC_INDEX, GFP_KERNEL);
1000             if (ida_idx < 0)
1001                 return ida_idx;
1002 
1003             nfp_mac_idx =
1004                 nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
1005 
1006             if (nfp_flower_is_supported_bridge(netdev))
1007                 nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT;
1008 
1009         } else {
1010             nfp_mac_idx =
1011                 nfp_tunnel_get_mac_idx_from_phy_port_id(port);
1012         }
1013     }
1014 
1015     if (!entry) {
1016         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1017         if (!entry) {
1018             err = -ENOMEM;
1019             goto err_free_ida;
1020         }
1021 
1022         ether_addr_copy(entry->addr, netdev->dev_addr);
1023         INIT_LIST_HEAD(&entry->repr_list);
1024 
1025         if (rhashtable_insert_fast(&priv->tun.offloaded_macs,
1026                        &entry->ht_node,
1027                        offloaded_macs_params)) {
1028             err = -ENOMEM;
1029             goto err_free_entry;
1030         }
1031     }
1032 
1033     err = __nfp_tunnel_offload_mac(app, netdev->dev_addr,
1034                        nfp_mac_idx, false);
1035     if (err) {
1036         /* If not shared then free. */
1037         if (!entry->ref_count)
1038             goto err_remove_hash;
1039         goto err_free_ida;
1040     }
1041 
1042     entry->index = nfp_mac_idx;
1043     nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
1044 
1045     return 0;
1046 
1047 err_remove_hash:
1048     rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node,
1049                    offloaded_macs_params);
1050 err_free_entry:
1051     kfree(entry);
1052 err_free_ida:
1053     if (ida_idx != -1)
1054         ida_free(&priv->tun.mac_off_ids, ida_idx);
1055 
1056     return err;
1057 }
1058 
1059 static int
1060 nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
1061               const u8 *mac, bool mod)
1062 {
1063     struct nfp_flower_priv *priv = app->priv;
1064     struct nfp_flower_repr_priv *repr_priv;
1065     struct nfp_tun_offloaded_mac *entry;
1066     struct nfp_repr *repr;
1067     u16 nfp_mac_idx;
1068     int ida_idx;
1069 
1070     entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
1071     if (!entry)
1072         return 0;
1073 
1074     entry->ref_count--;
1075     /* If del is part of a mod then mac_list is still in use elsewhere. */
1076     if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
1077         repr = netdev_priv(netdev);
1078         repr_priv = repr->app_priv;
1079         list_del(&repr_priv->mac_list);
1080     }
1081 
1082     if (nfp_flower_is_supported_bridge(netdev)) {
1083         entry->bridge_count--;
1084 
1085         if (!entry->bridge_count && entry->ref_count) {
1086             nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
1087             if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
1088                              false)) {
1089                 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
1090                              netdev_name(netdev));
1091                 return 0;
1092             }
1093 
1094             entry->index = nfp_mac_idx;
1095             return 0;
1096         }
1097     }
1098 
1099     /* If MAC is now used by 1 repr set the offloaded MAC index to port. */
1100     if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
1101         int port, err;
1102 
1103         repr_priv = list_first_entry(&entry->repr_list,
1104                          struct nfp_flower_repr_priv,
1105                          mac_list);
1106         repr = repr_priv->nfp_repr;
1107         port = nfp_repr_get_port_id(repr->netdev);
1108         nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
1109         err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false);
1110         if (err) {
1111             nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
1112                          netdev_name(netdev));
1113             return 0;
1114         }
1115 
1116         ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
1117         ida_free(&priv->tun.mac_off_ids, ida_idx);
1118         entry->index = nfp_mac_idx;
1119         return 0;
1120     }
1121 
1122     if (entry->ref_count)
1123         return 0;
1124 
1125     WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
1126                         &entry->ht_node,
1127                         offloaded_macs_params));
1128 
1129     if (nfp_flower_is_supported_bridge(netdev))
1130         nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
1131     else
1132         nfp_mac_idx = entry->index;
1133 
1134     /* If MAC has global ID then extract and free the ida entry. */
1135     if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) {
1136         ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
1137         ida_free(&priv->tun.mac_off_ids, ida_idx);
1138     }
1139 
1140     kfree(entry);
1141 
1142     return __nfp_tunnel_offload_mac(app, mac, 0, true);
1143 }
1144 
1145 static int
1146 nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
1147                enum nfp_flower_mac_offload_cmd cmd)
1148 {
1149     struct nfp_flower_non_repr_priv *nr_priv = NULL;
1150     bool non_repr = false, *mac_offloaded;
1151     u8 *off_mac = NULL;
1152     int err, port = 0;
1153 
1154     if (nfp_netdev_is_nfp_repr(netdev)) {
1155         struct nfp_flower_repr_priv *repr_priv;
1156         struct nfp_repr *repr;
1157 
1158         repr = netdev_priv(netdev);
1159         if (repr->app != app)
1160             return 0;
1161 
1162         repr_priv = repr->app_priv;
1163         if (repr_priv->on_bridge)
1164             return 0;
1165 
1166         mac_offloaded = &repr_priv->mac_offloaded;
1167         off_mac = &repr_priv->offloaded_mac_addr[0];
1168         port = nfp_repr_get_port_id(netdev);
1169         if (!nfp_tunnel_port_is_phy_repr(port))
1170             return 0;
1171     } else if (nfp_fl_is_netdev_to_offload(netdev)) {
1172         nr_priv = nfp_flower_non_repr_priv_get(app, netdev);
1173         if (!nr_priv)
1174             return -ENOMEM;
1175 
1176         mac_offloaded = &nr_priv->mac_offloaded;
1177         off_mac = &nr_priv->offloaded_mac_addr[0];
1178         non_repr = true;
1179     } else {
1180         return 0;
1181     }
1182 
1183     if (!is_valid_ether_addr(netdev->dev_addr)) {
1184         err = -EINVAL;
1185         goto err_put_non_repr_priv;
1186     }
1187 
1188     if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded)
1189         cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD;
1190 
1191     switch (cmd) {
1192     case NFP_TUNNEL_MAC_OFFLOAD_ADD:
1193         err = nfp_tunnel_add_shared_mac(app, netdev, port, false);
1194         if (err)
1195             goto err_put_non_repr_priv;
1196 
1197         if (non_repr)
1198             __nfp_flower_non_repr_priv_get(nr_priv);
1199 
1200         *mac_offloaded = true;
1201         ether_addr_copy(off_mac, netdev->dev_addr);
1202         break;
1203     case NFP_TUNNEL_MAC_OFFLOAD_DEL:
1204         /* Only attempt delete if add was successful. */
1205         if (!*mac_offloaded)
1206             break;
1207 
1208         if (non_repr)
1209             __nfp_flower_non_repr_priv_put(nr_priv);
1210 
1211         *mac_offloaded = false;
1212 
1213         err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr,
1214                         false);
1215         if (err)
1216             goto err_put_non_repr_priv;
1217 
1218         break;
1219     case NFP_TUNNEL_MAC_OFFLOAD_MOD:
1220         /* Ignore if changing to the same address. */
1221         if (ether_addr_equal(netdev->dev_addr, off_mac))
1222             break;
1223 
1224         err = nfp_tunnel_add_shared_mac(app, netdev, port, true);
1225         if (err)
1226             goto err_put_non_repr_priv;
1227 
1228         /* Delete the previous MAC address. */
1229         err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true);
1230         if (err)
1231             nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n",
1232                          netdev_name(netdev));
1233 
1234         ether_addr_copy(off_mac, netdev->dev_addr);
1235         break;
1236     default:
1237         err = -EINVAL;
1238         goto err_put_non_repr_priv;
1239     }
1240 
1241     if (non_repr)
1242         __nfp_flower_non_repr_priv_put(nr_priv);
1243 
1244     return 0;
1245 
1246 err_put_non_repr_priv:
1247     if (non_repr)
1248         __nfp_flower_non_repr_priv_put(nr_priv);
1249 
1250     return err;
1251 }
1252 
1253 int nfp_tunnel_mac_event_handler(struct nfp_app *app,
1254                  struct net_device *netdev,
1255                  unsigned long event, void *ptr)
1256 {
1257     int err;
1258 
1259     if (event == NETDEV_DOWN) {
1260         err = nfp_tunnel_offload_mac(app, netdev,
1261                          NFP_TUNNEL_MAC_OFFLOAD_DEL);
1262         if (err)
1263             nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n",
1264                          netdev_name(netdev));
1265     } else if (event == NETDEV_UP) {
1266         err = nfp_tunnel_offload_mac(app, netdev,
1267                          NFP_TUNNEL_MAC_OFFLOAD_ADD);
1268         if (err)
1269             nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
1270                          netdev_name(netdev));
1271     } else if (event == NETDEV_CHANGEADDR) {
1272         /* Only offload addr change if netdev is already up. */
1273         if (!(netdev->flags & IFF_UP))
1274             return NOTIFY_OK;
1275 
1276         err = nfp_tunnel_offload_mac(app, netdev,
1277                          NFP_TUNNEL_MAC_OFFLOAD_MOD);
1278         if (err)
1279             nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
1280                          netdev_name(netdev));
1281     } else if (event == NETDEV_CHANGEUPPER) {
1282         /* If a repr is attached to a bridge then tunnel packets
1283          * entering the physical port are directed through the bridge
1284          * datapath and cannot be directly detunneled. Therefore,
1285          * associated offloaded MACs and indexes should not be used
1286          * by fw for detunneling.
1287          */
1288         struct netdev_notifier_changeupper_info *info = ptr;
1289         struct net_device *upper = info->upper_dev;
1290         struct nfp_flower_repr_priv *repr_priv;
1291         struct nfp_repr *repr;
1292 
1293         if (!nfp_netdev_is_nfp_repr(netdev) ||
1294             !nfp_flower_is_supported_bridge(upper))
1295             return NOTIFY_OK;
1296 
1297         repr = netdev_priv(netdev);
1298         if (repr->app != app)
1299             return NOTIFY_OK;
1300 
1301         repr_priv = repr->app_priv;
1302 
1303         if (info->linking) {
1304             if (nfp_tunnel_offload_mac(app, netdev,
1305                            NFP_TUNNEL_MAC_OFFLOAD_DEL))
1306                 nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n",
1307                              netdev_name(netdev));
1308             repr_priv->on_bridge = true;
1309         } else {
1310             repr_priv->on_bridge = false;
1311 
1312             if (!(netdev->flags & IFF_UP))
1313                 return NOTIFY_OK;
1314 
1315             if (nfp_tunnel_offload_mac(app, netdev,
1316                            NFP_TUNNEL_MAC_OFFLOAD_ADD))
1317                 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
1318                              netdev_name(netdev));
1319         }
1320     }
1321     return NOTIFY_OK;
1322 }
1323 
1324 int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
1325                  struct nfp_fl_payload *flow)
1326 {
1327     struct nfp_flower_priv *app_priv = app->priv;
1328     struct nfp_tun_offloaded_mac *mac_entry;
1329     struct nfp_flower_meta_tci *key_meta;
1330     struct nfp_tun_pre_tun_rule payload;
1331     struct net_device *internal_dev;
1332     int err;
1333 
1334     if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT)
1335         return -ENOSPC;
1336 
1337     memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
1338 
1339     internal_dev = flow->pre_tun_rule.dev;
1340     payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
1341     payload.host_ctx_id = flow->meta.host_ctx_id;
1342 
1343     /* Lookup MAC index for the pre-tunnel rule egress device.
1344      * Note that because the device is always an internal port, it will
1345      * have a constant global index so does not need to be tracked.
1346      */
1347     mac_entry = nfp_tunnel_lookup_offloaded_macs(app,
1348                              internal_dev->dev_addr);
1349     if (!mac_entry)
1350         return -ENOENT;
1351 
1352     /* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being
1353      * set/clear for port_idx.
1354      */
1355     key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data;
1356     if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6)
1357         mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT;
1358     else
1359         mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT;
1360 
1361     payload.port_idx = cpu_to_be16(mac_entry->index);
1362 
1363     /* Copy mac id and vlan to flow - dev may not exist at delete time. */
1364     flow->pre_tun_rule.vlan_tci = payload.vlan_tci;
1365     flow->pre_tun_rule.port_idx = payload.port_idx;
1366 
1367     err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
1368                        sizeof(struct nfp_tun_pre_tun_rule),
1369                        (unsigned char *)&payload, GFP_KERNEL);
1370     if (err)
1371         return err;
1372 
1373     app_priv->pre_tun_rule_cnt++;
1374 
1375     return 0;
1376 }
1377 
1378 int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
1379                      struct nfp_fl_payload *flow)
1380 {
1381     struct nfp_flower_priv *app_priv = app->priv;
1382     struct nfp_tun_pre_tun_rule payload;
1383     u32 tmp_flags = 0;
1384     int err;
1385 
1386     memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
1387 
1388     tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL;
1389     payload.flags = cpu_to_be32(tmp_flags);
1390     payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
1391     payload.port_idx = flow->pre_tun_rule.port_idx;
1392 
1393     err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
1394                        sizeof(struct nfp_tun_pre_tun_rule),
1395                        (unsigned char *)&payload, GFP_KERNEL);
1396     if (err)
1397         return err;
1398 
1399     app_priv->pre_tun_rule_cnt--;
1400 
1401     return 0;
1402 }
1403 
1404 int nfp_tunnel_config_start(struct nfp_app *app)
1405 {
1406     struct nfp_flower_priv *priv = app->priv;
1407     int err;
1408 
1409     /* Initialise rhash for MAC offload tracking. */
1410     err = rhashtable_init(&priv->tun.offloaded_macs,
1411                   &offloaded_macs_params);
1412     if (err)
1413         return err;
1414 
1415     ida_init(&priv->tun.mac_off_ids);
1416 
1417     /* Initialise priv data for IPv4/v6 offloading. */
1418     mutex_init(&priv->tun.ipv4_off_lock);
1419     INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
1420     mutex_init(&priv->tun.ipv6_off_lock);
1421     INIT_LIST_HEAD(&priv->tun.ipv6_off_list);
1422 
1423     /* Initialise priv data for neighbour offloading. */
1424     priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
1425 
1426     err = register_netevent_notifier(&priv->tun.neigh_nb);
1427     if (err) {
1428         rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
1429                         nfp_check_rhashtable_empty, NULL);
1430         return err;
1431     }
1432 
1433     return 0;
1434 }
1435 
1436 void nfp_tunnel_config_stop(struct nfp_app *app)
1437 {
1438     struct nfp_flower_priv *priv = app->priv;
1439     struct nfp_ipv4_addr_entry *ip_entry;
1440     struct list_head *ptr, *storage;
1441 
1442     unregister_netevent_notifier(&priv->tun.neigh_nb);
1443 
1444     ida_destroy(&priv->tun.mac_off_ids);
1445 
1446     /* Free any memory that may be occupied by ipv4 list. */
1447     list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
1448         ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
1449         list_del(&ip_entry->list);
1450         kfree(ip_entry);
1451     }
1452 
1453     mutex_destroy(&priv->tun.ipv6_off_lock);
1454 
1455     /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
1456     rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
1457                     nfp_check_rhashtable_empty, NULL);
1458 
1459     nfp_tun_cleanup_nn_entries(app);
1460 }