Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
0003  *
0004  * This software is available to you under a choice of one of two
0005  * licenses.  You may choose to be licensed under the terms of the GNU
0006  * General Public License (GPL) Version 2, available from the file
0007  * COPYING in the main directory of this source tree, or the
0008  * OpenIB.org BSD license below:
0009  *
0010  *     Redistribution and use in source and binary forms, with or
0011  *     without modification, are permitted provided that the following
0012  *     conditions are met:
0013  *
0014  *      - Redistributions of source code must retain the above
0015  *        copyright notice, this list of conditions and the following
0016  *        disclaimer.
0017  *
0018  *      - Redistributions in binary form must reproduce the above
0019  *        copyright notice, this list of conditions and the following
0020  *        disclaimer in the documentation and/or other materials
0021  *        provided with the distribution.
0022  *
0023  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0024  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0025  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0026  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0027  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0028  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0029  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0030  * SOFTWARE.
0031  */
0032 
0033 #include <linux/mlx5/fs.h>
0034 #include <net/switchdev.h>
0035 #include <net/pkt_cls.h>
0036 #include <net/act_api.h>
0037 #include <net/devlink.h>
0038 #include <net/ipv6_stubs.h>
0039 
0040 #include "eswitch.h"
0041 #include "en.h"
0042 #include "en_rep.h"
0043 #include "en/params.h"
0044 #include "en/txrx.h"
0045 #include "en_tc.h"
0046 #include "en/rep/tc.h"
0047 #include "en/rep/neigh.h"
0048 #include "en/rep/bridge.h"
0049 #include "en/devlink.h"
0050 #include "fs_core.h"
0051 #include "lib/mlx5.h"
0052 #include "lib/devcom.h"
0053 #include "lib/vxlan.h"
0054 #define CREATE_TRACE_POINTS
0055 #include "diag/en_rep_tracepoint.h"
0056 #include "en_accel/ipsec.h"
0057 #include "en/tc/int_port.h"
0058 #include "en/ptp.h"
0059 
0060 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
0061     max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
0062 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
0063 
0064 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
0065 
0066 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
0067                   struct ethtool_drvinfo *drvinfo)
0068 {
0069     struct mlx5e_priv *priv = netdev_priv(dev);
0070     struct mlx5_core_dev *mdev = priv->mdev;
0071 
0072     strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
0073         sizeof(drvinfo->driver));
0074     snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
0075          "%d.%d.%04d (%.16s)",
0076          fw_rev_maj(mdev), fw_rev_min(mdev),
0077          fw_rev_sub(mdev), mdev->board_id);
0078 }
0079 
0080 static const struct counter_desc sw_rep_stats_desc[] = {
0081     { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
0082     { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
0083     { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
0084     { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
0085 };
0086 
0087 struct vport_stats {
0088     u64 vport_rx_packets;
0089     u64 vport_tx_packets;
0090     u64 vport_rx_bytes;
0091     u64 vport_tx_bytes;
0092 };
0093 
0094 static const struct counter_desc vport_rep_stats_desc[] = {
0095     { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
0096     { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
0097     { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
0098     { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
0099 };
0100 
0101 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
0102 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
0103 
0104 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
0105 {
0106     return NUM_VPORT_REP_SW_COUNTERS;
0107 }
0108 
0109 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
0110 {
0111     int i;
0112 
0113     for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
0114         strcpy(data + (idx++) * ETH_GSTRING_LEN,
0115                sw_rep_stats_desc[i].format);
0116     return idx;
0117 }
0118 
0119 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
0120 {
0121     int i;
0122 
0123     for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
0124         data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
0125                            sw_rep_stats_desc, i);
0126     return idx;
0127 }
0128 
0129 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
0130 {
0131     struct mlx5e_sw_stats *s = &priv->stats.sw;
0132     struct rtnl_link_stats64 stats64 = {};
0133 
0134     memset(s, 0, sizeof(*s));
0135     mlx5e_fold_sw_stats64(priv, &stats64);
0136 
0137     s->rx_packets = stats64.rx_packets;
0138     s->rx_bytes   = stats64.rx_bytes;
0139     s->tx_packets = stats64.tx_packets;
0140     s->tx_bytes   = stats64.tx_bytes;
0141     s->tx_queue_dropped = stats64.tx_dropped;
0142 }
0143 
0144 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
0145 {
0146     return NUM_VPORT_REP_HW_COUNTERS;
0147 }
0148 
0149 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
0150 {
0151     int i;
0152 
0153     for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
0154         strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
0155     return idx;
0156 }
0157 
0158 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
0159 {
0160     int i;
0161 
0162     for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
0163         data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
0164                            vport_rep_stats_desc, i);
0165     return idx;
0166 }
0167 
0168 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
0169 {
0170     struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
0171     struct mlx5e_rep_priv *rpriv = priv->ppriv;
0172     struct mlx5_eswitch_rep *rep = rpriv->rep;
0173     struct rtnl_link_stats64 *vport_stats;
0174     struct ifla_vf_stats vf_stats;
0175     int err;
0176 
0177     err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
0178     if (err) {
0179         netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
0180                 rep->vport, err);
0181         return;
0182     }
0183 
0184     vport_stats = &priv->stats.vf_vport;
0185     /* flip tx/rx as we are reporting the counters for the switch vport */
0186     vport_stats->rx_packets = vf_stats.tx_packets;
0187     vport_stats->rx_bytes   = vf_stats.tx_bytes;
0188     vport_stats->tx_packets = vf_stats.rx_packets;
0189     vport_stats->tx_bytes   = vf_stats.rx_bytes;
0190 }
0191 
0192 static void mlx5e_rep_get_strings(struct net_device *dev,
0193                   u32 stringset, uint8_t *data)
0194 {
0195     struct mlx5e_priv *priv = netdev_priv(dev);
0196 
0197     switch (stringset) {
0198     case ETH_SS_STATS:
0199         mlx5e_stats_fill_strings(priv, data);
0200         break;
0201     }
0202 }
0203 
0204 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
0205                     struct ethtool_stats *stats, u64 *data)
0206 {
0207     struct mlx5e_priv *priv = netdev_priv(dev);
0208 
0209     mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
0210 }
0211 
0212 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
0213 {
0214     struct mlx5e_priv *priv = netdev_priv(dev);
0215 
0216     switch (sset) {
0217     case ETH_SS_STATS:
0218         return mlx5e_stats_total_num(priv);
0219     default:
0220         return -EOPNOTSUPP;
0221     }
0222 }
0223 
0224 static void
0225 mlx5e_rep_get_ringparam(struct net_device *dev,
0226             struct ethtool_ringparam *param,
0227             struct kernel_ethtool_ringparam *kernel_param,
0228             struct netlink_ext_ack *extack)
0229 {
0230     struct mlx5e_priv *priv = netdev_priv(dev);
0231 
0232     mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
0233 }
0234 
0235 static int
0236 mlx5e_rep_set_ringparam(struct net_device *dev,
0237             struct ethtool_ringparam *param,
0238             struct kernel_ethtool_ringparam *kernel_param,
0239             struct netlink_ext_ack *extack)
0240 {
0241     struct mlx5e_priv *priv = netdev_priv(dev);
0242 
0243     return mlx5e_ethtool_set_ringparam(priv, param);
0244 }
0245 
0246 static void mlx5e_rep_get_channels(struct net_device *dev,
0247                    struct ethtool_channels *ch)
0248 {
0249     struct mlx5e_priv *priv = netdev_priv(dev);
0250 
0251     mlx5e_ethtool_get_channels(priv, ch);
0252 }
0253 
0254 static int mlx5e_rep_set_channels(struct net_device *dev,
0255                   struct ethtool_channels *ch)
0256 {
0257     struct mlx5e_priv *priv = netdev_priv(dev);
0258 
0259     return mlx5e_ethtool_set_channels(priv, ch);
0260 }
0261 
0262 static int mlx5e_rep_get_coalesce(struct net_device *netdev,
0263                   struct ethtool_coalesce *coal,
0264                   struct kernel_ethtool_coalesce *kernel_coal,
0265                   struct netlink_ext_ack *extack)
0266 {
0267     struct mlx5e_priv *priv = netdev_priv(netdev);
0268 
0269     return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
0270 }
0271 
0272 static int mlx5e_rep_set_coalesce(struct net_device *netdev,
0273                   struct ethtool_coalesce *coal,
0274                   struct kernel_ethtool_coalesce *kernel_coal,
0275                   struct netlink_ext_ack *extack)
0276 {
0277     struct mlx5e_priv *priv = netdev_priv(netdev);
0278 
0279     return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
0280 }
0281 
0282 static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
0283 {
0284     struct mlx5e_priv *priv = netdev_priv(netdev);
0285 
0286     return mlx5e_ethtool_get_rxfh_key_size(priv);
0287 }
0288 
0289 static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
0290 {
0291     struct mlx5e_priv *priv = netdev_priv(netdev);
0292 
0293     return mlx5e_ethtool_get_rxfh_indir_size(priv);
0294 }
0295 
0296 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
0297     .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
0298                      ETHTOOL_COALESCE_MAX_FRAMES |
0299                      ETHTOOL_COALESCE_USE_ADAPTIVE,
0300     .get_drvinfo       = mlx5e_rep_get_drvinfo,
0301     .get_link      = ethtool_op_get_link,
0302     .get_strings       = mlx5e_rep_get_strings,
0303     .get_sset_count    = mlx5e_rep_get_sset_count,
0304     .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
0305     .get_ringparam     = mlx5e_rep_get_ringparam,
0306     .set_ringparam     = mlx5e_rep_set_ringparam,
0307     .get_channels      = mlx5e_rep_get_channels,
0308     .set_channels      = mlx5e_rep_set_channels,
0309     .get_coalesce      = mlx5e_rep_get_coalesce,
0310     .set_coalesce      = mlx5e_rep_set_coalesce,
0311     .get_rxfh_key_size   = mlx5e_rep_get_rxfh_key_size,
0312     .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
0313 };
0314 
0315 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
0316                  struct mlx5_eswitch_rep *rep)
0317 {
0318     struct mlx5e_rep_sq *rep_sq, *tmp;
0319     struct mlx5e_rep_priv *rpriv;
0320 
0321     if (esw->mode != MLX5_ESWITCH_OFFLOADS)
0322         return;
0323 
0324     rpriv = mlx5e_rep_to_rep_priv(rep);
0325     list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
0326         mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
0327         if (rep_sq->send_to_vport_rule_peer)
0328             mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer);
0329         list_del(&rep_sq->list);
0330         kfree(rep_sq);
0331     }
0332 }
0333 
0334 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
0335                  struct mlx5_eswitch_rep *rep,
0336                  u32 *sqns_array, int sqns_num)
0337 {
0338     struct mlx5_eswitch *peer_esw = NULL;
0339     struct mlx5_flow_handle *flow_rule;
0340     struct mlx5e_rep_priv *rpriv;
0341     struct mlx5e_rep_sq *rep_sq;
0342     int err;
0343     int i;
0344 
0345     if (esw->mode != MLX5_ESWITCH_OFFLOADS)
0346         return 0;
0347 
0348     rpriv = mlx5e_rep_to_rep_priv(rep);
0349     if (mlx5_devcom_is_paired(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS))
0350         peer_esw = mlx5_devcom_get_peer_data(esw->dev->priv.devcom,
0351                              MLX5_DEVCOM_ESW_OFFLOADS);
0352 
0353     for (i = 0; i < sqns_num; i++) {
0354         rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
0355         if (!rep_sq) {
0356             err = -ENOMEM;
0357             goto out_err;
0358         }
0359 
0360         /* Add re-inject rule to the PF/representor sqs */
0361         flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep,
0362                                 sqns_array[i]);
0363         if (IS_ERR(flow_rule)) {
0364             err = PTR_ERR(flow_rule);
0365             kfree(rep_sq);
0366             goto out_err;
0367         }
0368         rep_sq->send_to_vport_rule = flow_rule;
0369         rep_sq->sqn = sqns_array[i];
0370 
0371         if (peer_esw) {
0372             flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw,
0373                                     rep, sqns_array[i]);
0374             if (IS_ERR(flow_rule)) {
0375                 err = PTR_ERR(flow_rule);
0376                 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
0377                 kfree(rep_sq);
0378                 goto out_err;
0379             }
0380             rep_sq->send_to_vport_rule_peer = flow_rule;
0381         }
0382 
0383         list_add(&rep_sq->list, &rpriv->vport_sqs_list);
0384     }
0385 
0386     if (peer_esw)
0387         mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
0388 
0389     return 0;
0390 
0391 out_err:
0392     mlx5e_sqs2vport_stop(esw, rep);
0393 
0394     if (peer_esw)
0395         mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
0396 
0397     return err;
0398 }
0399 
0400 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
0401 {
0402     int sqs_per_channel = mlx5e_get_dcb_num_tc(&priv->channels.params);
0403     struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
0404     bool is_uplink_rep = mlx5e_is_uplink_rep(priv);
0405     struct mlx5e_rep_priv *rpriv = priv->ppriv;
0406     struct mlx5_eswitch_rep *rep = rpriv->rep;
0407     int n, tc, nch, num_sqs = 0;
0408     struct mlx5e_channel *c;
0409     int err = -ENOMEM;
0410     bool ptp_sq;
0411     u32 *sqs;
0412 
0413     ptp_sq = !!(priv->channels.ptp &&
0414             MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS));
0415     nch = priv->channels.num + ptp_sq;
0416     /* +2 for xdpsqs, they don't exist on the ptp channel but will not be
0417      * counted for by num_sqs.
0418      */
0419     if (is_uplink_rep)
0420         sqs_per_channel += 2;
0421 
0422     sqs = kvcalloc(nch * sqs_per_channel, sizeof(*sqs), GFP_KERNEL);
0423     if (!sqs)
0424         goto out;
0425 
0426     for (n = 0; n < priv->channels.num; n++) {
0427         c = priv->channels.c[n];
0428         for (tc = 0; tc < c->num_tc; tc++)
0429             sqs[num_sqs++] = c->sq[tc].sqn;
0430 
0431         if (is_uplink_rep) {
0432             if (c->xdp)
0433                 sqs[num_sqs++] = c->rq_xdpsq.sqn;
0434 
0435             sqs[num_sqs++] = c->xdpsq.sqn;
0436         }
0437     }
0438     if (ptp_sq) {
0439         struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
0440 
0441         for (tc = 0; tc < ptp_ch->num_tc; tc++)
0442             sqs[num_sqs++] = ptp_ch->ptpsq[tc].txqsq.sqn;
0443     }
0444 
0445     err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
0446     kvfree(sqs);
0447 
0448 out:
0449     if (err)
0450         netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
0451     return err;
0452 }
0453 
0454 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
0455 {
0456     struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
0457     struct mlx5e_rep_priv *rpriv = priv->ppriv;
0458     struct mlx5_eswitch_rep *rep = rpriv->rep;
0459 
0460     mlx5e_sqs2vport_stop(esw, rep);
0461 }
0462 
0463 static int mlx5e_rep_open(struct net_device *dev)
0464 {
0465     struct mlx5e_priv *priv = netdev_priv(dev);
0466     struct mlx5e_rep_priv *rpriv = priv->ppriv;
0467     struct mlx5_eswitch_rep *rep = rpriv->rep;
0468     int err;
0469 
0470     mutex_lock(&priv->state_lock);
0471     err = mlx5e_open_locked(dev);
0472     if (err)
0473         goto unlock;
0474 
0475     if (!mlx5_modify_vport_admin_state(priv->mdev,
0476                        MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
0477                        rep->vport, 1,
0478                        MLX5_VPORT_ADMIN_STATE_UP))
0479         netif_carrier_on(dev);
0480 
0481 unlock:
0482     mutex_unlock(&priv->state_lock);
0483     return err;
0484 }
0485 
0486 static int mlx5e_rep_close(struct net_device *dev)
0487 {
0488     struct mlx5e_priv *priv = netdev_priv(dev);
0489     struct mlx5e_rep_priv *rpriv = priv->ppriv;
0490     struct mlx5_eswitch_rep *rep = rpriv->rep;
0491     int ret;
0492 
0493     mutex_lock(&priv->state_lock);
0494     mlx5_modify_vport_admin_state(priv->mdev,
0495                       MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
0496                       rep->vport, 1,
0497                       MLX5_VPORT_ADMIN_STATE_DOWN);
0498     ret = mlx5e_close_locked(dev);
0499     mutex_unlock(&priv->state_lock);
0500     return ret;
0501 }
0502 
0503 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
0504 {
0505     struct mlx5e_rep_priv *rpriv = priv->ppriv;
0506     struct mlx5_eswitch_rep *rep;
0507 
0508     if (!MLX5_ESWITCH_MANAGER(priv->mdev))
0509         return false;
0510 
0511     if (!rpriv) /* non vport rep mlx5e instances don't use this field */
0512         return false;
0513 
0514     rep = rpriv->rep;
0515     return (rep->vport == MLX5_VPORT_UPLINK);
0516 }
0517 
0518 bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
0519 {
0520     switch (attr_id) {
0521     case IFLA_OFFLOAD_XSTATS_CPU_HIT:
0522             return true;
0523     }
0524 
0525     return false;
0526 }
0527 
0528 static int
0529 mlx5e_get_sw_stats64(const struct net_device *dev,
0530              struct rtnl_link_stats64 *stats)
0531 {
0532     struct mlx5e_priv *priv = netdev_priv(dev);
0533 
0534     mlx5e_fold_sw_stats64(priv, stats);
0535     return 0;
0536 }
0537 
0538 int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
0539                 void *sp)
0540 {
0541     switch (attr_id) {
0542     case IFLA_OFFLOAD_XSTATS_CPU_HIT:
0543         return mlx5e_get_sw_stats64(dev, sp);
0544     }
0545 
0546     return -EINVAL;
0547 }
0548 
0549 static void
0550 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
0551 {
0552     struct mlx5e_priv *priv = netdev_priv(dev);
0553 
0554     /* update HW stats in background for next time */
0555     mlx5e_queue_update_stats(priv);
0556     memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
0557 }
0558 
0559 static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
0560 {
0561     return mlx5e_change_mtu(netdev, new_mtu, NULL);
0562 }
0563 
0564 static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *netdev)
0565 {
0566     struct mlx5e_priv *priv = netdev_priv(netdev);
0567     struct mlx5e_rep_priv *rpriv = priv->ppriv;
0568     struct mlx5_core_dev *dev = priv->mdev;
0569 
0570     return mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
0571 }
0572 
0573 static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier)
0574 {
0575     struct mlx5e_priv *priv = netdev_priv(dev);
0576     struct mlx5e_rep_priv *rpriv = priv->ppriv;
0577     struct mlx5_eswitch_rep *rep = rpriv->rep;
0578     int err;
0579 
0580     if (new_carrier) {
0581         err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
0582                             rep->vport, 1, MLX5_VPORT_ADMIN_STATE_UP);
0583         if (err)
0584             return err;
0585         netif_carrier_on(dev);
0586     } else {
0587         err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
0588                             rep->vport, 1, MLX5_VPORT_ADMIN_STATE_DOWN);
0589         if (err)
0590             return err;
0591         netif_carrier_off(dev);
0592     }
0593     return 0;
0594 }
0595 
0596 static const struct net_device_ops mlx5e_netdev_ops_rep = {
0597     .ndo_open                = mlx5e_rep_open,
0598     .ndo_stop                = mlx5e_rep_close,
0599     .ndo_start_xmit          = mlx5e_xmit,
0600     .ndo_setup_tc            = mlx5e_rep_setup_tc,
0601     .ndo_get_devlink_port    = mlx5e_rep_get_devlink_port,
0602     .ndo_get_stats64         = mlx5e_rep_get_stats,
0603     .ndo_has_offload_stats   = mlx5e_rep_has_offload_stats,
0604     .ndo_get_offload_stats   = mlx5e_rep_get_offload_stats,
0605     .ndo_change_mtu          = mlx5e_rep_change_mtu,
0606     .ndo_change_carrier      = mlx5e_rep_change_carrier,
0607 };
0608 
0609 bool mlx5e_eswitch_uplink_rep(const struct net_device *netdev)
0610 {
0611     return netdev->netdev_ops == &mlx5e_netdev_ops &&
0612            mlx5e_is_uplink_rep(netdev_priv(netdev));
0613 }
0614 
0615 bool mlx5e_eswitch_vf_rep(const struct net_device *netdev)
0616 {
0617     return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
0618 }
0619 
0620 /* One indirect TIR set for outer. Inner not supported in reps. */
0621 #define REP_NUM_INDIR_TIRS MLX5E_NUM_INDIR_TIRS
0622 
0623 static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev)
0624 {
0625     int max_tir_num = 1 << MLX5_CAP_GEN(mdev, log_max_tir);
0626     int num_vports = mlx5_eswitch_get_total_vports(mdev);
0627 
0628     return (max_tir_num - mlx5e_get_pf_num_tirs(mdev)
0629         - (num_vports * REP_NUM_INDIR_TIRS)) / num_vports;
0630 }
0631 
0632 static void mlx5e_build_rep_params(struct net_device *netdev)
0633 {
0634     struct mlx5e_priv *priv = netdev_priv(netdev);
0635     struct mlx5e_rep_priv *rpriv = priv->ppriv;
0636     struct mlx5_eswitch_rep *rep = rpriv->rep;
0637     struct mlx5_core_dev *mdev = priv->mdev;
0638     struct mlx5e_params *params;
0639 
0640     u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
0641                      MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
0642                      MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
0643 
0644     params = &priv->channels.params;
0645 
0646     params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
0647     params->hard_mtu    = MLX5E_ETH_HARD_MTU;
0648     params->sw_mtu      = netdev->mtu;
0649 
0650     /* SQ */
0651     if (rep->vport == MLX5_VPORT_UPLINK)
0652         params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
0653     else
0654         params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
0655 
0656     /* RQ */
0657     mlx5e_build_rq_params(mdev, params);
0658 
0659     /* CQ moderation params */
0660     params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
0661     mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
0662 
0663     params->mqprio.num_tc       = 1;
0664     params->tunneled_offload_en = false;
0665     if (rep->vport != MLX5_VPORT_UPLINK)
0666         params->vlan_strip_disable = true;
0667 
0668     mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
0669 }
0670 
0671 static void mlx5e_build_rep_netdev(struct net_device *netdev,
0672                    struct mlx5_core_dev *mdev)
0673 {
0674     SET_NETDEV_DEV(netdev, mdev->device);
0675     netdev->netdev_ops = &mlx5e_netdev_ops_rep;
0676     eth_hw_addr_random(netdev);
0677     netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
0678 
0679     netdev->watchdog_timeo    = 15 * HZ;
0680 
0681 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
0682     netdev->hw_features    |= NETIF_F_HW_TC;
0683 #endif
0684     netdev->hw_features    |= NETIF_F_SG;
0685     netdev->hw_features    |= NETIF_F_IP_CSUM;
0686     netdev->hw_features    |= NETIF_F_IPV6_CSUM;
0687     netdev->hw_features    |= NETIF_F_GRO;
0688     netdev->hw_features    |= NETIF_F_TSO;
0689     netdev->hw_features    |= NETIF_F_TSO6;
0690     netdev->hw_features    |= NETIF_F_RXCSUM;
0691 
0692     netdev->features |= netdev->hw_features;
0693     netdev->features |= NETIF_F_NETNS_LOCAL;
0694 }
0695 
0696 static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
0697               struct net_device *netdev)
0698 {
0699     struct mlx5e_priv *priv = netdev_priv(netdev);
0700 
0701     priv->fs = mlx5e_fs_init(priv->profile, mdev,
0702                  !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
0703     if (!priv->fs) {
0704         netdev_err(priv->netdev, "FS allocation failed\n");
0705         return -ENOMEM;
0706     }
0707 
0708     mlx5e_build_rep_params(netdev);
0709     mlx5e_timestamp_init(priv);
0710 
0711     return 0;
0712 }
0713 
0714 static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
0715                  struct net_device *netdev)
0716 {
0717     struct mlx5e_priv *priv = netdev_priv(netdev);
0718     int err;
0719 
0720     priv->fs = mlx5e_fs_init(priv->profile, mdev,
0721                  !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
0722     if (!priv->fs) {
0723         netdev_err(priv->netdev, "FS allocation failed\n");
0724         return -ENOMEM;
0725     }
0726 
0727     err = mlx5e_ipsec_init(priv);
0728     if (err)
0729         mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
0730 
0731     mlx5e_vxlan_set_netdev_info(priv);
0732     mlx5e_build_rep_params(netdev);
0733     mlx5e_timestamp_init(priv);
0734     return 0;
0735 }
0736 
0737 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
0738 {
0739     mlx5e_fs_cleanup(priv->fs);
0740     mlx5e_ipsec_cleanup(priv);
0741 }
0742 
0743 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
0744 {
0745     struct mlx5e_rep_priv *rpriv = priv->ppriv;
0746     struct mlx5_eswitch_rep *rep = rpriv->rep;
0747     struct ttc_params ttc_params = {};
0748     int err;
0749 
0750     priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
0751                            MLX5_FLOW_NAMESPACE_KERNEL);
0752 
0753     /* The inner_ttc in the ttc params is intentionally not set */
0754     mlx5e_set_ttc_params(priv, &ttc_params, false);
0755 
0756     if (rep->vport != MLX5_VPORT_UPLINK)
0757         /* To give uplik rep TTC a lower level for chaining from root ft */
0758         ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
0759 
0760     priv->fs->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
0761     if (IS_ERR(priv->fs->ttc)) {
0762         err = PTR_ERR(priv->fs->ttc);
0763         netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
0764                err);
0765         return err;
0766     }
0767     return 0;
0768 }
0769 
0770 static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
0771 {
0772     struct mlx5e_rep_priv *rpriv = priv->ppriv;
0773     struct mlx5_eswitch_rep *rep = rpriv->rep;
0774     struct mlx5_flow_table_attr ft_attr = {};
0775     struct mlx5_flow_namespace *ns;
0776     int err = 0;
0777 
0778     if (rep->vport != MLX5_VPORT_UPLINK) {
0779         /* non uplik reps will skip any bypass tables and go directly to
0780          * their own ttc
0781          */
0782         rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs->ttc);
0783         return 0;
0784     }
0785 
0786     /* uplink root ft will be used to auto chain, to ethtool or ttc tables */
0787     ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_OFFLOADS);
0788     if (!ns) {
0789         netdev_err(priv->netdev, "Failed to get reps offloads namespace\n");
0790         return -EOPNOTSUPP;
0791     }
0792 
0793     ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */
0794     ft_attr.prio = 1;
0795     ft_attr.level = 1;
0796 
0797     rpriv->root_ft = mlx5_create_flow_table(ns, &ft_attr);
0798     if (IS_ERR(rpriv->root_ft)) {
0799         err = PTR_ERR(rpriv->root_ft);
0800         rpriv->root_ft = NULL;
0801     }
0802 
0803     return err;
0804 }
0805 
0806 static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv *priv)
0807 {
0808     struct mlx5e_rep_priv *rpriv = priv->ppriv;
0809     struct mlx5_eswitch_rep *rep = rpriv->rep;
0810 
0811     if (rep->vport != MLX5_VPORT_UPLINK)
0812         return;
0813     mlx5_destroy_flow_table(rpriv->root_ft);
0814 }
0815 
0816 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
0817 {
0818     struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
0819     struct mlx5e_rep_priv *rpriv = priv->ppriv;
0820     struct mlx5_eswitch_rep *rep = rpriv->rep;
0821     struct mlx5_flow_handle *flow_rule;
0822     struct mlx5_flow_destination dest;
0823 
0824     dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
0825     dest.ft = rpriv->root_ft;
0826 
0827     flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, &dest);
0828     if (IS_ERR(flow_rule))
0829         return PTR_ERR(flow_rule);
0830     rpriv->vport_rx_rule = flow_rule;
0831     return 0;
0832 }
0833 
0834 static void rep_vport_rx_rule_destroy(struct mlx5e_priv *priv)
0835 {
0836     struct mlx5e_rep_priv *rpriv = priv->ppriv;
0837 
0838     if (!rpriv->vport_rx_rule)
0839         return;
0840 
0841     mlx5_del_flow_rules(rpriv->vport_rx_rule);
0842     rpriv->vport_rx_rule = NULL;
0843 }
0844 
0845 int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
0846 {
0847     rep_vport_rx_rule_destroy(priv);
0848 
0849     return cleanup ? 0 : mlx5e_create_rep_vport_rx_rule(priv);
0850 }
0851 
0852 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
0853 {
0854     struct mlx5_core_dev *mdev = priv->mdev;
0855     int err;
0856 
0857     priv->rx_res = mlx5e_rx_res_alloc();
0858     if (!priv->rx_res) {
0859         err = -ENOMEM;
0860         goto err_free_fs;
0861     }
0862 
0863     mlx5e_fs_init_l2_addr(priv->fs, priv->netdev);
0864 
0865     err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
0866     if (err) {
0867         mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
0868         return err;
0869     }
0870 
0871     err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
0872                 priv->max_nch, priv->drop_rq.rqn,
0873                 &priv->channels.params.packet_merge,
0874                 priv->channels.params.num_channels);
0875     if (err)
0876         goto err_close_drop_rq;
0877 
0878     err = mlx5e_create_rep_ttc_table(priv);
0879     if (err)
0880         goto err_destroy_rx_res;
0881 
0882     err = mlx5e_create_rep_root_ft(priv);
0883     if (err)
0884         goto err_destroy_ttc_table;
0885 
0886     err = mlx5e_create_rep_vport_rx_rule(priv);
0887     if (err)
0888         goto err_destroy_root_ft;
0889 
0890     mlx5e_ethtool_init_steering(priv);
0891 
0892     return 0;
0893 
0894 err_destroy_root_ft:
0895     mlx5e_destroy_rep_root_ft(priv);
0896 err_destroy_ttc_table:
0897     mlx5_destroy_ttc_table(priv->fs->ttc);
0898 err_destroy_rx_res:
0899     mlx5e_rx_res_destroy(priv->rx_res);
0900 err_close_drop_rq:
0901     mlx5e_close_drop_rq(&priv->drop_rq);
0902     mlx5e_rx_res_free(priv->rx_res);
0903     priv->rx_res = NULL;
0904 err_free_fs:
0905     mlx5e_fs_cleanup(priv->fs);
0906     return err;
0907 }
0908 
0909 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
0910 {
0911     mlx5e_ethtool_cleanup_steering(priv);
0912     rep_vport_rx_rule_destroy(priv);
0913     mlx5e_destroy_rep_root_ft(priv);
0914     mlx5_destroy_ttc_table(priv->fs->ttc);
0915     mlx5e_rx_res_destroy(priv->rx_res);
0916     mlx5e_close_drop_rq(&priv->drop_rq);
0917     mlx5e_rx_res_free(priv->rx_res);
0918     priv->rx_res = NULL;
0919 }
0920 
0921 static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
0922 {
0923     int err;
0924 
0925     mlx5e_create_q_counters(priv);
0926     err = mlx5e_init_rep_rx(priv);
0927     if (err)
0928         goto out;
0929 
0930     mlx5e_tc_int_port_init_rep_rx(priv);
0931 
0932 out:
0933     return err;
0934 }
0935 
0936 static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
0937 {
0938     mlx5e_tc_int_port_cleanup_rep_rx(priv);
0939     mlx5e_cleanup_rep_rx(priv);
0940     mlx5e_destroy_q_counters(priv);
0941 }
0942 
0943 static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
0944 {
0945     struct mlx5_rep_uplink_priv *uplink_priv;
0946     struct net_device *netdev;
0947     struct mlx5e_priv *priv;
0948     int err;
0949 
0950     netdev = rpriv->netdev;
0951     priv = netdev_priv(netdev);
0952     uplink_priv = &rpriv->uplink_priv;
0953 
0954     err = mlx5e_rep_tc_init(rpriv);
0955     if (err)
0956         return err;
0957 
0958     mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
0959 
0960     mlx5e_rep_bond_init(rpriv);
0961     err = mlx5e_rep_tc_netdevice_event_register(rpriv);
0962     if (err) {
0963         mlx5_core_err(priv->mdev, "Failed to register netdev notifier, err: %d\n",
0964                   err);
0965         goto err_event_reg;
0966     }
0967 
0968     return 0;
0969 
0970 err_event_reg:
0971     mlx5e_rep_bond_cleanup(rpriv);
0972     mlx5e_rep_tc_cleanup(rpriv);
0973     return err;
0974 }
0975 
0976 static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
0977 {
0978     mlx5e_rep_tc_netdevice_event_unregister(rpriv);
0979     mlx5e_rep_bond_cleanup(rpriv);
0980     mlx5e_rep_tc_cleanup(rpriv);
0981 }
0982 
0983 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
0984 {
0985     struct mlx5e_rep_priv *rpriv = priv->ppriv;
0986     int err;
0987 
0988     err = mlx5e_create_tises(priv);
0989     if (err) {
0990         mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
0991         return err;
0992     }
0993 
0994     if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
0995         err = mlx5e_init_uplink_rep_tx(rpriv);
0996         if (err)
0997             goto err_init_tx;
0998     }
0999 
1000     err = mlx5e_tc_ht_init(&rpriv->tc_ht);
1001     if (err)
1002         goto err_ht_init;
1003 
1004     return 0;
1005 
1006 err_ht_init:
1007     if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
1008         mlx5e_cleanup_uplink_rep_tx(rpriv);
1009 err_init_tx:
1010     mlx5e_destroy_tises(priv);
1011     return err;
1012 }
1013 
1014 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
1015 {
1016     struct mlx5e_rep_priv *rpriv = priv->ppriv;
1017 
1018     mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
1019 
1020     if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
1021         mlx5e_cleanup_uplink_rep_tx(rpriv);
1022 
1023     mlx5e_destroy_tises(priv);
1024 }
1025 
1026 static void mlx5e_rep_enable(struct mlx5e_priv *priv)
1027 {
1028     struct mlx5e_rep_priv *rpriv = priv->ppriv;
1029 
1030     mlx5e_set_netdev_mtu_boundaries(priv);
1031     mlx5e_rep_neigh_init(rpriv);
1032 }
1033 
1034 static void mlx5e_rep_disable(struct mlx5e_priv *priv)
1035 {
1036     struct mlx5e_rep_priv *rpriv = priv->ppriv;
1037 
1038     mlx5e_rep_neigh_cleanup(rpriv);
1039 }
1040 
1041 static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
1042 {
1043     return 0;
1044 }
1045 
1046 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
1047 {
1048     struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
1049 
1050     if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
1051         struct mlx5_eqe *eqe = data;
1052 
1053         switch (eqe->sub_type) {
1054         case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
1055         case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
1056             queue_work(priv->wq, &priv->update_carrier_work);
1057             break;
1058         default:
1059             return NOTIFY_DONE;
1060         }
1061 
1062         return NOTIFY_OK;
1063     }
1064 
1065     if (event == MLX5_DEV_EVENT_PORT_AFFINITY)
1066         return mlx5e_rep_tc_event_port_affinity(priv);
1067 
1068     return NOTIFY_DONE;
1069 }
1070 
1071 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1072 {
1073     struct mlx5e_rep_priv *rpriv = priv->ppriv;
1074     struct net_device *netdev = priv->netdev;
1075     struct mlx5_core_dev *mdev = priv->mdev;
1076     u16 max_mtu;
1077 
1078     netdev->min_mtu = ETH_MIN_MTU;
1079     mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1080     netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1081     mlx5e_set_dev_port_mtu(priv);
1082 
1083     mlx5e_rep_tc_enable(priv);
1084 
1085     if (MLX5_CAP_GEN(mdev, uplink_follow))
1086         mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
1087                           0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
1088     mlx5_lag_add_netdev(mdev, netdev);
1089     priv->events_nb.notifier_call = uplink_rep_async_event;
1090     mlx5_notifier_register(mdev, &priv->events_nb);
1091     mlx5e_dcbnl_initialize(priv);
1092     mlx5e_dcbnl_init_app(priv);
1093     mlx5e_rep_neigh_init(rpriv);
1094     mlx5e_rep_bridge_init(priv);
1095 
1096     netdev->wanted_features |= NETIF_F_HW_TC;
1097 
1098     rtnl_lock();
1099     if (netif_running(netdev))
1100         mlx5e_open(netdev);
1101     udp_tunnel_nic_reset_ntf(priv->netdev);
1102     netif_device_attach(netdev);
1103     rtnl_unlock();
1104 }
1105 
1106 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1107 {
1108     struct mlx5e_rep_priv *rpriv = priv->ppriv;
1109     struct mlx5_core_dev *mdev = priv->mdev;
1110 
1111     rtnl_lock();
1112     if (netif_running(priv->netdev))
1113         mlx5e_close(priv->netdev);
1114     netif_device_detach(priv->netdev);
1115     rtnl_unlock();
1116 
1117     mlx5e_rep_bridge_cleanup(priv);
1118     mlx5e_rep_neigh_cleanup(rpriv);
1119     mlx5e_dcbnl_delete_app(priv);
1120     mlx5_notifier_unregister(mdev, &priv->events_nb);
1121     mlx5e_rep_tc_disable(priv);
1122     mlx5_lag_remove_netdev(mdev, priv->netdev);
1123     mlx5_vxlan_reset_to_default(mdev->vxlan);
1124 }
1125 
1126 static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
1127 static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS);
1128 
1129 /* The stats groups order is opposite to the update_stats() order calls */
1130 static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = {
1131     &MLX5E_STATS_GRP(sw_rep),
1132     &MLX5E_STATS_GRP(vport_rep),
1133 };
1134 
1135 static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv)
1136 {
1137     return ARRAY_SIZE(mlx5e_rep_stats_grps);
1138 }
1139 
1140 /* The stats groups order is opposite to the update_stats() order calls */
1141 static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
1142     &MLX5E_STATS_GRP(sw),
1143     &MLX5E_STATS_GRP(qcnt),
1144     &MLX5E_STATS_GRP(vnic_env),
1145     &MLX5E_STATS_GRP(vport),
1146     &MLX5E_STATS_GRP(802_3),
1147     &MLX5E_STATS_GRP(2863),
1148     &MLX5E_STATS_GRP(2819),
1149     &MLX5E_STATS_GRP(phy),
1150     &MLX5E_STATS_GRP(eth_ext),
1151     &MLX5E_STATS_GRP(pcie),
1152     &MLX5E_STATS_GRP(per_prio),
1153     &MLX5E_STATS_GRP(pme),
1154     &MLX5E_STATS_GRP(channels),
1155     &MLX5E_STATS_GRP(per_port_buff_congest),
1156 #ifdef CONFIG_MLX5_EN_IPSEC
1157     &MLX5E_STATS_GRP(ipsec_sw),
1158 #endif
1159     &MLX5E_STATS_GRP(ptp),
1160 };
1161 
1162 static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
1163 {
1164     return ARRAY_SIZE(mlx5e_ul_rep_stats_grps);
1165 }
1166 
1167 static const struct mlx5e_profile mlx5e_rep_profile = {
1168     .init           = mlx5e_init_rep,
1169     .cleanup        = mlx5e_cleanup_rep,
1170     .init_rx        = mlx5e_init_rep_rx,
1171     .cleanup_rx     = mlx5e_cleanup_rep_rx,
1172     .init_tx        = mlx5e_init_rep_tx,
1173     .cleanup_tx     = mlx5e_cleanup_rep_tx,
1174     .enable             = mlx5e_rep_enable,
1175     .disable            = mlx5e_rep_disable,
1176     .update_rx      = mlx5e_update_rep_rx,
1177     .update_stats           = mlx5e_stats_update_ndo_stats,
1178     .rx_handlers            = &mlx5e_rx_handlers_rep,
1179     .max_tc         = 1,
1180     .rq_groups      = MLX5E_NUM_RQ_GROUPS(REGULAR),
1181     .stats_grps     = mlx5e_rep_stats_grps,
1182     .stats_grps_num     = mlx5e_rep_stats_grps_num,
1183     .max_nch_limit      = mlx5e_rep_max_nch_limit,
1184 };
1185 
1186 static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1187     .init           = mlx5e_init_ul_rep,
1188     .cleanup        = mlx5e_cleanup_rep,
1189     .init_rx        = mlx5e_init_ul_rep_rx,
1190     .cleanup_rx     = mlx5e_cleanup_ul_rep_rx,
1191     .init_tx        = mlx5e_init_rep_tx,
1192     .cleanup_tx     = mlx5e_cleanup_rep_tx,
1193     .enable             = mlx5e_uplink_rep_enable,
1194     .disable            = mlx5e_uplink_rep_disable,
1195     .update_rx      = mlx5e_update_rep_rx,
1196     .update_stats           = mlx5e_stats_update_ndo_stats,
1197     .update_carrier         = mlx5e_update_carrier,
1198     .rx_handlers            = &mlx5e_rx_handlers_rep,
1199     .max_tc         = MLX5E_MAX_NUM_TC,
1200     /* XSK is needed so we can replace profile with NIC netdev */
1201     .rq_groups      = MLX5E_NUM_RQ_GROUPS(XSK),
1202     .stats_grps     = mlx5e_ul_rep_stats_grps,
1203     .stats_grps_num     = mlx5e_ul_rep_stats_grps_num,
1204 };
1205 
1206 /* e-Switch vport representors */
1207 static int
1208 mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1209 {
1210     struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
1211     struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1212     struct devlink_port *dl_port;
1213     int err;
1214 
1215     rpriv->netdev = priv->netdev;
1216 
1217     err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
1218                       rpriv);
1219     if (err)
1220         return err;
1221 
1222     dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1223     if (dl_port)
1224         devlink_port_type_eth_set(dl_port, rpriv->netdev);
1225 
1226     return 0;
1227 }
1228 
1229 static void
1230 mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
1231 {
1232     struct net_device *netdev = rpriv->netdev;
1233     struct devlink_port *dl_port;
1234     struct mlx5_core_dev *dev;
1235     struct mlx5e_priv *priv;
1236 
1237     priv = netdev_priv(netdev);
1238     dev = priv->mdev;
1239 
1240     dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1241     if (dl_port)
1242         devlink_port_type_clear(dl_port);
1243     mlx5e_netdev_attach_nic_profile(priv);
1244 }
1245 
1246 static int
1247 mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1248 {
1249     struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1250     const struct mlx5e_profile *profile;
1251     struct devlink_port *dl_port;
1252     struct net_device *netdev;
1253     struct mlx5e_priv *priv;
1254     int err;
1255 
1256     profile = &mlx5e_rep_profile;
1257     netdev = mlx5e_create_netdev(dev, profile);
1258     if (!netdev) {
1259         mlx5_core_warn(dev,
1260                    "Failed to create representor netdev for vport %d\n",
1261                    rep->vport);
1262         return -EINVAL;
1263     }
1264 
1265     mlx5e_build_rep_netdev(netdev, dev);
1266     rpriv->netdev = netdev;
1267 
1268     priv = netdev_priv(netdev);
1269     priv->profile = profile;
1270     priv->ppriv = rpriv;
1271     err = profile->init(dev, netdev);
1272     if (err) {
1273         netdev_warn(netdev, "rep profile init failed, %d\n", err);
1274         goto err_destroy_netdev;
1275     }
1276 
1277     err = mlx5e_attach_netdev(netdev_priv(netdev));
1278     if (err) {
1279         netdev_warn(netdev,
1280                 "Failed to attach representor netdev for vport %d\n",
1281                 rep->vport);
1282         goto err_cleanup_profile;
1283     }
1284 
1285     err = register_netdev(netdev);
1286     if (err) {
1287         netdev_warn(netdev,
1288                 "Failed to register representor netdev for vport %d\n",
1289                 rep->vport);
1290         goto err_detach_netdev;
1291     }
1292 
1293     dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1294     if (dl_port)
1295         devlink_port_type_eth_set(dl_port, netdev);
1296     return 0;
1297 
1298 err_detach_netdev:
1299     mlx5e_detach_netdev(netdev_priv(netdev));
1300 
1301 err_cleanup_profile:
1302     priv->profile->cleanup(priv);
1303 
1304 err_destroy_netdev:
1305     mlx5e_destroy_netdev(netdev_priv(netdev));
1306     return err;
1307 }
1308 
1309 static int
1310 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1311 {
1312     struct mlx5e_rep_priv *rpriv;
1313     int err;
1314 
1315     rpriv = kvzalloc(sizeof(*rpriv), GFP_KERNEL);
1316     if (!rpriv)
1317         return -ENOMEM;
1318 
1319     /* rpriv->rep to be looked up when profile->init() is called */
1320     rpriv->rep = rep;
1321     rep->rep_data[REP_ETH].priv = rpriv;
1322     INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1323 
1324     if (rep->vport == MLX5_VPORT_UPLINK)
1325         err = mlx5e_vport_uplink_rep_load(dev, rep);
1326     else
1327         err = mlx5e_vport_vf_rep_load(dev, rep);
1328 
1329     if (err)
1330         kvfree(rpriv);
1331 
1332     return err;
1333 }
1334 
1335 static void
1336 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1337 {
1338     struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1339     struct net_device *netdev = rpriv->netdev;
1340     struct mlx5e_priv *priv = netdev_priv(netdev);
1341     struct mlx5_core_dev *dev = priv->mdev;
1342     struct devlink_port *dl_port;
1343     void *ppriv = priv->ppriv;
1344 
1345     if (rep->vport == MLX5_VPORT_UPLINK) {
1346         mlx5e_vport_uplink_rep_unload(rpriv);
1347         goto free_ppriv;
1348     }
1349 
1350     dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1351     if (dl_port)
1352         devlink_port_type_clear(dl_port);
1353     unregister_netdev(netdev);
1354     mlx5e_detach_netdev(priv);
1355     priv->profile->cleanup(priv);
1356     mlx5e_destroy_netdev(priv);
1357 free_ppriv:
1358     kvfree(ppriv); /* mlx5e_rep_priv */
1359 }
1360 
1361 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1362 {
1363     struct mlx5e_rep_priv *rpriv;
1364 
1365     rpriv = mlx5e_rep_to_rep_priv(rep);
1366 
1367     return rpriv->netdev;
1368 }
1369 
1370 static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep)
1371 {
1372     struct mlx5e_rep_priv *rpriv;
1373     struct mlx5e_rep_sq *rep_sq;
1374 
1375     rpriv = mlx5e_rep_to_rep_priv(rep);
1376     list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
1377         if (!rep_sq->send_to_vport_rule_peer)
1378             continue;
1379         mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer);
1380         rep_sq->send_to_vport_rule_peer = NULL;
1381     }
1382 }
1383 
1384 static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw,
1385                       struct mlx5_eswitch_rep *rep,
1386                       struct mlx5_eswitch *peer_esw)
1387 {
1388     struct mlx5_flow_handle *flow_rule;
1389     struct mlx5e_rep_priv *rpriv;
1390     struct mlx5e_rep_sq *rep_sq;
1391 
1392     rpriv = mlx5e_rep_to_rep_priv(rep);
1393     list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
1394         if (rep_sq->send_to_vport_rule_peer)
1395             continue;
1396         flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep, rep_sq->sqn);
1397         if (IS_ERR(flow_rule))
1398             goto err_out;
1399         rep_sq->send_to_vport_rule_peer = flow_rule;
1400     }
1401 
1402     return 0;
1403 err_out:
1404     mlx5e_vport_rep_event_unpair(rep);
1405     return PTR_ERR(flow_rule);
1406 }
1407 
1408 static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw,
1409                  struct mlx5_eswitch_rep *rep,
1410                  enum mlx5_switchdev_event event,
1411                  void *data)
1412 {
1413     int err = 0;
1414 
1415     if (event == MLX5_SWITCHDEV_EVENT_PAIR)
1416         err = mlx5e_vport_rep_event_pair(esw, rep, data);
1417     else if (event == MLX5_SWITCHDEV_EVENT_UNPAIR)
1418         mlx5e_vport_rep_event_unpair(rep);
1419 
1420     return err;
1421 }
1422 
1423 static const struct mlx5_eswitch_rep_ops rep_ops = {
1424     .load = mlx5e_vport_rep_load,
1425     .unload = mlx5e_vport_rep_unload,
1426     .get_proto_dev = mlx5e_vport_rep_get_proto_dev,
1427     .event = mlx5e_vport_rep_event,
1428 };
1429 
1430 static int mlx5e_rep_probe(struct auxiliary_device *adev,
1431                const struct auxiliary_device_id *id)
1432 {
1433     struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
1434     struct mlx5_core_dev *mdev = edev->mdev;
1435     struct mlx5_eswitch *esw;
1436 
1437     esw = mdev->priv.eswitch;
1438     mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
1439     return 0;
1440 }
1441 
1442 static void mlx5e_rep_remove(struct auxiliary_device *adev)
1443 {
1444     struct mlx5_adev *vdev = container_of(adev, struct mlx5_adev, adev);
1445     struct mlx5_core_dev *mdev = vdev->mdev;
1446     struct mlx5_eswitch *esw;
1447 
1448     esw = mdev->priv.eswitch;
1449     mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
1450 }
1451 
1452 static const struct auxiliary_device_id mlx5e_rep_id_table[] = {
1453     { .name = MLX5_ADEV_NAME ".eth-rep", },
1454     {},
1455 };
1456 
1457 MODULE_DEVICE_TABLE(auxiliary, mlx5e_rep_id_table);
1458 
1459 static struct auxiliary_driver mlx5e_rep_driver = {
1460     .name = "eth-rep",
1461     .probe = mlx5e_rep_probe,
1462     .remove = mlx5e_rep_remove,
1463     .id_table = mlx5e_rep_id_table,
1464 };
1465 
1466 int mlx5e_rep_init(void)
1467 {
1468     return auxiliary_driver_register(&mlx5e_rep_driver);
1469 }
1470 
1471 void mlx5e_rep_cleanup(void)
1472 {
1473     auxiliary_driver_unregister(&mlx5e_rep_driver);
1474 }