Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Marvell RVU Ethernet driver
0003  *
0004  * Copyright (C) 2020 Marvell.
0005  *
0006  */
0007 
0008 #include <linux/pci.h>
0009 #include <linux/ethtool.h>
0010 #include <linux/stddef.h>
0011 #include <linux/etherdevice.h>
0012 #include <linux/log2.h>
0013 #include <linux/net_tstamp.h>
0014 #include <linux/linkmode.h>
0015 
0016 #include "otx2_common.h"
0017 #include "otx2_ptp.h"
0018 
0019 #define DRV_NAME    "rvu-nicpf"
0020 #define DRV_VF_NAME "rvu-nicvf"
0021 
0022 struct otx2_stat {
0023     char name[ETH_GSTRING_LEN];
0024     unsigned int index;
0025 };
0026 
0027 /* HW device stats */
0028 #define OTX2_DEV_STAT(stat) { \
0029     .name = #stat, \
0030     .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \
0031 }
0032 
0033 enum link_mode {
0034     OTX2_MODE_SUPPORTED,
0035     OTX2_MODE_ADVERTISED
0036 };
0037 
0038 static const struct otx2_stat otx2_dev_stats[] = {
0039     OTX2_DEV_STAT(rx_ucast_frames),
0040     OTX2_DEV_STAT(rx_bcast_frames),
0041     OTX2_DEV_STAT(rx_mcast_frames),
0042 
0043     OTX2_DEV_STAT(tx_ucast_frames),
0044     OTX2_DEV_STAT(tx_bcast_frames),
0045     OTX2_DEV_STAT(tx_mcast_frames),
0046 };
0047 
0048 /* Driver level stats */
0049 #define OTX2_DRV_STAT(stat) { \
0050     .name = #stat, \
0051     .index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \
0052 }
0053 
0054 static const struct otx2_stat otx2_drv_stats[] = {
0055     OTX2_DRV_STAT(rx_fcs_errs),
0056     OTX2_DRV_STAT(rx_oversize_errs),
0057     OTX2_DRV_STAT(rx_undersize_errs),
0058     OTX2_DRV_STAT(rx_csum_errs),
0059     OTX2_DRV_STAT(rx_len_errs),
0060     OTX2_DRV_STAT(rx_other_errs),
0061 };
0062 
0063 static const struct otx2_stat otx2_queue_stats[] = {
0064     { "bytes", 0 },
0065     { "frames", 1 },
0066 };
0067 
0068 static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
0069 static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
0070 static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
0071 
0072 static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf);
0073 
0074 static void otx2_get_drvinfo(struct net_device *netdev,
0075                  struct ethtool_drvinfo *info)
0076 {
0077     struct otx2_nic *pfvf = netdev_priv(netdev);
0078 
0079     strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
0080     strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
0081 }
0082 
0083 static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
0084 {
0085     int start_qidx = qset * pfvf->hw.rx_queues;
0086     int qidx, stats;
0087 
0088     for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
0089         for (stats = 0; stats < otx2_n_queue_stats; stats++) {
0090             sprintf(*data, "rxq%d: %s", qidx + start_qidx,
0091                 otx2_queue_stats[stats].name);
0092             *data += ETH_GSTRING_LEN;
0093         }
0094     }
0095     for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
0096         for (stats = 0; stats < otx2_n_queue_stats; stats++) {
0097             sprintf(*data, "txq%d: %s", qidx + start_qidx,
0098                 otx2_queue_stats[stats].name);
0099             *data += ETH_GSTRING_LEN;
0100         }
0101     }
0102 }
0103 
0104 static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
0105 {
0106     struct otx2_nic *pfvf = netdev_priv(netdev);
0107     int stats;
0108 
0109     if (sset != ETH_SS_STATS)
0110         return;
0111 
0112     for (stats = 0; stats < otx2_n_dev_stats; stats++) {
0113         memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
0114         data += ETH_GSTRING_LEN;
0115     }
0116 
0117     for (stats = 0; stats < otx2_n_drv_stats; stats++) {
0118         memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
0119         data += ETH_GSTRING_LEN;
0120     }
0121 
0122     otx2_get_qset_strings(pfvf, &data, 0);
0123 
0124     if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
0125         for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
0126             sprintf(data, "cgx_rxstat%d: ", stats);
0127             data += ETH_GSTRING_LEN;
0128         }
0129 
0130         for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
0131             sprintf(data, "cgx_txstat%d: ", stats);
0132             data += ETH_GSTRING_LEN;
0133         }
0134     }
0135 
0136     strcpy(data, "reset_count");
0137     data += ETH_GSTRING_LEN;
0138     sprintf(data, "Fec Corrected Errors: ");
0139     data += ETH_GSTRING_LEN;
0140     sprintf(data, "Fec Uncorrected Errors: ");
0141     data += ETH_GSTRING_LEN;
0142 }
0143 
0144 static void otx2_get_qset_stats(struct otx2_nic *pfvf,
0145                 struct ethtool_stats *stats, u64 **data)
0146 {
0147     int stat, qidx;
0148 
0149     if (!pfvf)
0150         return;
0151     for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
0152         if (!otx2_update_rq_stats(pfvf, qidx)) {
0153             for (stat = 0; stat < otx2_n_queue_stats; stat++)
0154                 *((*data)++) = 0;
0155             continue;
0156         }
0157         for (stat = 0; stat < otx2_n_queue_stats; stat++)
0158             *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats)
0159                 [otx2_queue_stats[stat].index];
0160     }
0161 
0162     for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
0163         if (!otx2_update_sq_stats(pfvf, qidx)) {
0164             for (stat = 0; stat < otx2_n_queue_stats; stat++)
0165                 *((*data)++) = 0;
0166             continue;
0167         }
0168         for (stat = 0; stat < otx2_n_queue_stats; stat++)
0169             *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats)
0170                 [otx2_queue_stats[stat].index];
0171     }
0172 }
0173 
0174 static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf)
0175 {
0176     struct msg_req *req;
0177     int rc = -ENOMEM;
0178 
0179     mutex_lock(&pfvf->mbox.lock);
0180     req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox);
0181     if (!req)
0182         goto end;
0183 
0184     if (!otx2_sync_mbox_msg(&pfvf->mbox))
0185         rc = 0;
0186 end:
0187     mutex_unlock(&pfvf->mbox.lock);
0188     return rc;
0189 }
0190 
0191 /* Get device and per queue statistics */
0192 static void otx2_get_ethtool_stats(struct net_device *netdev,
0193                    struct ethtool_stats *stats, u64 *data)
0194 {
0195     struct otx2_nic *pfvf = netdev_priv(netdev);
0196     u64 fec_corr_blks, fec_uncorr_blks;
0197     struct cgx_fw_data *rsp;
0198     int stat;
0199 
0200     otx2_get_dev_stats(pfvf);
0201     for (stat = 0; stat < otx2_n_dev_stats; stat++)
0202         *(data++) = ((u64 *)&pfvf->hw.dev_stats)
0203                 [otx2_dev_stats[stat].index];
0204 
0205     for (stat = 0; stat < otx2_n_drv_stats; stat++)
0206         *(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats)
0207                         [otx2_drv_stats[stat].index]);
0208 
0209     otx2_get_qset_stats(pfvf, stats, &data);
0210 
0211     if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
0212         otx2_update_lmac_stats(pfvf);
0213         for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
0214             *(data++) = pfvf->hw.cgx_rx_stats[stat];
0215         for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
0216             *(data++) = pfvf->hw.cgx_tx_stats[stat];
0217     }
0218 
0219     *(data++) = pfvf->reset_count;
0220 
0221     fec_corr_blks = pfvf->hw.cgx_fec_corr_blks;
0222     fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks;
0223 
0224     rsp = otx2_get_fwdata(pfvf);
0225     if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
0226         !otx2_get_phy_fec_stats(pfvf)) {
0227         /* Fetch fwdata again because it's been recently populated with
0228          * latest PHY FEC stats.
0229          */
0230         rsp = otx2_get_fwdata(pfvf);
0231         if (!IS_ERR(rsp)) {
0232             struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
0233 
0234             if (pfvf->linfo.fec == OTX2_FEC_BASER) {
0235                 fec_corr_blks   = p->brfec_corr_blks;
0236                 fec_uncorr_blks = p->brfec_uncorr_blks;
0237             } else {
0238                 fec_corr_blks   = p->rsfec_corr_cws;
0239                 fec_uncorr_blks = p->rsfec_uncorr_cws;
0240             }
0241         }
0242     }
0243 
0244     *(data++) = fec_corr_blks;
0245     *(data++) = fec_uncorr_blks;
0246 }
0247 
0248 static int otx2_get_sset_count(struct net_device *netdev, int sset)
0249 {
0250     struct otx2_nic *pfvf = netdev_priv(netdev);
0251     int qstats_count, mac_stats = 0;
0252 
0253     if (sset != ETH_SS_STATS)
0254         return -EINVAL;
0255 
0256     qstats_count = otx2_n_queue_stats *
0257                (pfvf->hw.rx_queues + pfvf->hw.tx_queues);
0258     if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag))
0259         mac_stats = CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT;
0260     otx2_update_lmac_fec_stats(pfvf);
0261 
0262     return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
0263            mac_stats + OTX2_FEC_STATS_CNT + 1;
0264 }
0265 
0266 /* Get no of queues device supports and current queue count */
0267 static void otx2_get_channels(struct net_device *dev,
0268                   struct ethtool_channels *channel)
0269 {
0270     struct otx2_nic *pfvf = netdev_priv(dev);
0271 
0272     channel->max_rx = pfvf->hw.max_queues;
0273     channel->max_tx = pfvf->hw.max_queues;
0274 
0275     channel->rx_count = pfvf->hw.rx_queues;
0276     channel->tx_count = pfvf->hw.tx_queues;
0277 }
0278 
0279 /* Set no of Tx, Rx queues to be used */
0280 static int otx2_set_channels(struct net_device *dev,
0281                  struct ethtool_channels *channel)
0282 {
0283     struct otx2_nic *pfvf = netdev_priv(dev);
0284     bool if_up = netif_running(dev);
0285     int err = 0;
0286 
0287     if (!channel->rx_count || !channel->tx_count)
0288         return -EINVAL;
0289 
0290     if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) {
0291         netdev_err(dev,
0292                "Receive queues are in use by TC police action\n");
0293         return -EINVAL;
0294     }
0295 
0296     if (if_up)
0297         dev->netdev_ops->ndo_stop(dev);
0298 
0299     err = otx2_set_real_num_queues(dev, channel->tx_count,
0300                        channel->rx_count);
0301     if (err)
0302         return err;
0303 
0304     pfvf->hw.rx_queues = channel->rx_count;
0305     pfvf->hw.tx_queues = channel->tx_count;
0306     pfvf->qset.cq_cnt = pfvf->hw.tx_queues +  pfvf->hw.rx_queues;
0307 
0308     if (if_up)
0309         err = dev->netdev_ops->ndo_open(dev);
0310 
0311     netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
0312             pfvf->hw.tx_queues, pfvf->hw.rx_queues);
0313 
0314     return err;
0315 }
0316 
0317 static void otx2_get_pauseparam(struct net_device *netdev,
0318                 struct ethtool_pauseparam *pause)
0319 {
0320     struct otx2_nic *pfvf = netdev_priv(netdev);
0321     struct cgx_pause_frm_cfg *req, *rsp;
0322 
0323     if (is_otx2_lbkvf(pfvf->pdev))
0324         return;
0325 
0326     req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
0327     if (!req)
0328         return;
0329 
0330     if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
0331         rsp = (struct cgx_pause_frm_cfg *)
0332                otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
0333         pause->rx_pause = rsp->rx_pause;
0334         pause->tx_pause = rsp->tx_pause;
0335     }
0336 }
0337 
0338 static int otx2_set_pauseparam(struct net_device *netdev,
0339                    struct ethtool_pauseparam *pause)
0340 {
0341     struct otx2_nic *pfvf = netdev_priv(netdev);
0342 
0343     if (pause->autoneg)
0344         return -EOPNOTSUPP;
0345 
0346     if (is_otx2_lbkvf(pfvf->pdev))
0347         return -EOPNOTSUPP;
0348 
0349     if (pause->rx_pause)
0350         pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
0351     else
0352         pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
0353 
0354     if (pause->tx_pause)
0355         pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
0356     else
0357         pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
0358 
0359     return otx2_config_pause_frm(pfvf);
0360 }
0361 
0362 static void otx2_get_ringparam(struct net_device *netdev,
0363                    struct ethtool_ringparam *ring,
0364                    struct kernel_ethtool_ringparam *kernel_ring,
0365                    struct netlink_ext_ack *extack)
0366 {
0367     struct otx2_nic *pfvf = netdev_priv(netdev);
0368     struct otx2_qset *qs = &pfvf->qset;
0369 
0370     ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX);
0371     ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256);
0372     ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX);
0373     ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K);
0374     kernel_ring->rx_buf_len = pfvf->hw.rbuf_len;
0375     kernel_ring->cqe_size = pfvf->hw.xqe_size;
0376 }
0377 
0378 static int otx2_set_ringparam(struct net_device *netdev,
0379                   struct ethtool_ringparam *ring,
0380                   struct kernel_ethtool_ringparam *kernel_ring,
0381                   struct netlink_ext_ack *extack)
0382 {
0383     struct otx2_nic *pfvf = netdev_priv(netdev);
0384     u32 rx_buf_len = kernel_ring->rx_buf_len;
0385     u32 old_rx_buf_len = pfvf->hw.rbuf_len;
0386     u32 xqe_size = kernel_ring->cqe_size;
0387     bool if_up = netif_running(netdev);
0388     struct otx2_qset *qs = &pfvf->qset;
0389     u32 rx_count, tx_count;
0390 
0391     if (ring->rx_mini_pending || ring->rx_jumbo_pending)
0392         return -EINVAL;
0393 
0394     /* Hardware supports max size of 32k for a receive buffer
0395      * and 1536 is typical ethernet frame size.
0396      */
0397     if (rx_buf_len && (rx_buf_len < 1536 || rx_buf_len > 32768)) {
0398         netdev_err(netdev,
0399                "Receive buffer range is 1536 - 32768");
0400         return -EINVAL;
0401     }
0402 
0403     if (xqe_size != 128 && xqe_size != 512) {
0404         netdev_err(netdev,
0405                "Completion event size must be 128 or 512");
0406         return -EINVAL;
0407     }
0408 
0409     /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M  */
0410     rx_count = ring->rx_pending;
0411     /* On some silicon variants a skid or reserved CQEs are
0412      * needed to avoid CQ overflow.
0413      */
0414     if (rx_count < pfvf->hw.rq_skid)
0415         rx_count =  pfvf->hw.rq_skid;
0416     rx_count = Q_COUNT(Q_SIZE(rx_count, 3));
0417 
0418     /* Due pipelining impact minimum 2000 unused SQ CQE's
0419      * need to be maintained to avoid CQ overflow, hence the
0420      * minimum 4K size.
0421      */
0422     tx_count = clamp_t(u32, ring->tx_pending,
0423                Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX));
0424     tx_count = Q_COUNT(Q_SIZE(tx_count, 3));
0425 
0426     if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt &&
0427         rx_buf_len == old_rx_buf_len && xqe_size == pfvf->hw.xqe_size)
0428         return 0;
0429 
0430     if (if_up)
0431         netdev->netdev_ops->ndo_stop(netdev);
0432 
0433     /* Assigned to the nearest possible exponent. */
0434     qs->sqe_cnt = tx_count;
0435     qs->rqe_cnt = rx_count;
0436 
0437     pfvf->hw.rbuf_len = rx_buf_len;
0438     pfvf->hw.xqe_size = xqe_size;
0439 
0440     if (if_up)
0441         return netdev->netdev_ops->ndo_open(netdev);
0442 
0443     return 0;
0444 }
0445 
0446 static int otx2_get_coalesce(struct net_device *netdev,
0447                  struct ethtool_coalesce *cmd,
0448                  struct kernel_ethtool_coalesce *kernel_coal,
0449                  struct netlink_ext_ack *extack)
0450 {
0451     struct otx2_nic *pfvf = netdev_priv(netdev);
0452     struct otx2_hw *hw = &pfvf->hw;
0453 
0454     cmd->rx_coalesce_usecs = hw->cq_time_wait;
0455     cmd->rx_max_coalesced_frames = hw->cq_ecount_wait;
0456     cmd->tx_coalesce_usecs = hw->cq_time_wait;
0457     cmd->tx_max_coalesced_frames = hw->cq_ecount_wait;
0458     if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
0459             OTX2_FLAG_ADPTV_INT_COAL_ENABLED) {
0460         cmd->use_adaptive_rx_coalesce = 1;
0461         cmd->use_adaptive_tx_coalesce = 1;
0462     } else {
0463         cmd->use_adaptive_rx_coalesce = 0;
0464         cmd->use_adaptive_tx_coalesce = 0;
0465     }
0466 
0467     return 0;
0468 }
0469 
0470 static int otx2_set_coalesce(struct net_device *netdev,
0471                  struct ethtool_coalesce *ec,
0472                  struct kernel_ethtool_coalesce *kernel_coal,
0473                  struct netlink_ext_ack *extack)
0474 {
0475     struct otx2_nic *pfvf = netdev_priv(netdev);
0476     struct otx2_hw *hw = &pfvf->hw;
0477     u8 priv_coalesce_status;
0478     int qidx;
0479 
0480     if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames)
0481         return 0;
0482 
0483     if (ec->use_adaptive_rx_coalesce != ec->use_adaptive_tx_coalesce) {
0484         netdev_err(netdev,
0485                "adaptive-rx should be same as adaptive-tx");
0486         return -EINVAL;
0487     }
0488 
0489     /* Check and update coalesce status */
0490     if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
0491             OTX2_FLAG_ADPTV_INT_COAL_ENABLED) {
0492         priv_coalesce_status = 1;
0493         if (!ec->use_adaptive_rx_coalesce)
0494             pfvf->flags &= ~OTX2_FLAG_ADPTV_INT_COAL_ENABLED;
0495     } else {
0496         priv_coalesce_status = 0;
0497         if (ec->use_adaptive_rx_coalesce)
0498             pfvf->flags |= OTX2_FLAG_ADPTV_INT_COAL_ENABLED;
0499     }
0500 
0501     /* 'cq_time_wait' is 8bit and is in multiple of 100ns,
0502      * so clamp the user given value to the range of 1 to 25usec.
0503      */
0504     ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs,
0505                     1, CQ_TIMER_THRESH_MAX);
0506     ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs,
0507                     1, CQ_TIMER_THRESH_MAX);
0508 
0509     /* Rx and Tx are mapped to same CQ, check which one
0510      * is changed, if both then choose the min.
0511      */
0512     if (hw->cq_time_wait == ec->rx_coalesce_usecs)
0513         hw->cq_time_wait = ec->tx_coalesce_usecs;
0514     else if (hw->cq_time_wait == ec->tx_coalesce_usecs)
0515         hw->cq_time_wait = ec->rx_coalesce_usecs;
0516     else
0517         hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs,
0518                      ec->tx_coalesce_usecs);
0519 
0520     /* Max ecount_wait supported is 16bit,
0521      * so clamp the user given value to the range of 1 to 64k.
0522      */
0523     ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames,
0524                           1, NAPI_POLL_WEIGHT);
0525     ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames,
0526                           1, NAPI_POLL_WEIGHT);
0527 
0528     /* Rx and Tx are mapped to same CQ, check which one
0529      * is changed, if both then choose the min.
0530      */
0531     if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames)
0532         hw->cq_ecount_wait = ec->tx_max_coalesced_frames;
0533     else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames)
0534         hw->cq_ecount_wait = ec->rx_max_coalesced_frames;
0535     else
0536         hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames,
0537                        ec->tx_max_coalesced_frames);
0538 
0539     /* Reset 'cq_time_wait' and 'cq_ecount_wait' to
0540      * default values if coalesce status changed from
0541      * 'on' to 'off'.
0542      */
0543     if (priv_coalesce_status &&
0544         ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) !=
0545          OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
0546         hw->cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
0547         hw->cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
0548     }
0549 
0550     if (netif_running(netdev)) {
0551         for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++)
0552             otx2_config_irq_coalescing(pfvf, qidx);
0553     }
0554 
0555     return 0;
0556 }
0557 
0558 static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
0559                   struct ethtool_rxnfc *nfc)
0560 {
0561     struct otx2_rss_info *rss = &pfvf->hw.rss_info;
0562 
0563     if (!(rss->flowkey_cfg &
0564         (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)))
0565         return 0;
0566 
0567     /* Mimimum is IPv4 and IPv6, SIP/DIP */
0568     nfc->data = RXH_IP_SRC | RXH_IP_DST;
0569     if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_VLAN)
0570         nfc->data |= RXH_VLAN;
0571 
0572     switch (nfc->flow_type) {
0573     case TCP_V4_FLOW:
0574     case TCP_V6_FLOW:
0575         if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP)
0576             nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
0577         break;
0578     case UDP_V4_FLOW:
0579     case UDP_V6_FLOW:
0580         if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP)
0581             nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
0582         break;
0583     case SCTP_V4_FLOW:
0584     case SCTP_V6_FLOW:
0585         if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP)
0586             nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
0587         break;
0588     case AH_ESP_V4_FLOW:
0589     case AH_ESP_V6_FLOW:
0590         if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_ESP)
0591             nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
0592         break;
0593     case AH_V4_FLOW:
0594     case ESP_V4_FLOW:
0595     case IPV4_FLOW:
0596         break;
0597     case AH_V6_FLOW:
0598     case ESP_V6_FLOW:
0599     case IPV6_FLOW:
0600         break;
0601     default:
0602         return -EINVAL;
0603     }
0604 
0605     return 0;
0606 }
0607 
0608 static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
0609                   struct ethtool_rxnfc *nfc)
0610 {
0611     struct otx2_rss_info *rss = &pfvf->hw.rss_info;
0612     u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3;
0613     u32 rss_cfg = rss->flowkey_cfg;
0614 
0615     if (!rss->enable) {
0616         netdev_err(pfvf->netdev,
0617                "RSS is disabled, cannot change settings\n");
0618         return -EIO;
0619     }
0620 
0621     /* Mimimum is IPv4 and IPv6, SIP/DIP */
0622     if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST))
0623         return -EINVAL;
0624 
0625     if (nfc->data & RXH_VLAN)
0626         rss_cfg |=  NIX_FLOW_KEY_TYPE_VLAN;
0627     else
0628         rss_cfg &= ~NIX_FLOW_KEY_TYPE_VLAN;
0629 
0630     switch (nfc->flow_type) {
0631     case TCP_V4_FLOW:
0632     case TCP_V6_FLOW:
0633         /* Different config for v4 and v6 is not supported.
0634          * Both of them have to be either 4-tuple or 2-tuple.
0635          */
0636         switch (nfc->data & rxh_l4) {
0637         case 0:
0638             rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP;
0639             break;
0640         case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
0641             rss_cfg |= NIX_FLOW_KEY_TYPE_TCP;
0642             break;
0643         default:
0644             return -EINVAL;
0645         }
0646         break;
0647     case UDP_V4_FLOW:
0648     case UDP_V6_FLOW:
0649         switch (nfc->data & rxh_l4) {
0650         case 0:
0651             rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP;
0652             break;
0653         case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
0654             rss_cfg |= NIX_FLOW_KEY_TYPE_UDP;
0655             break;
0656         default:
0657             return -EINVAL;
0658         }
0659         break;
0660     case SCTP_V4_FLOW:
0661     case SCTP_V6_FLOW:
0662         switch (nfc->data & rxh_l4) {
0663         case 0:
0664             rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP;
0665             break;
0666         case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
0667             rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP;
0668             break;
0669         default:
0670             return -EINVAL;
0671         }
0672         break;
0673     case AH_ESP_V4_FLOW:
0674     case AH_ESP_V6_FLOW:
0675         switch (nfc->data & rxh_l4) {
0676         case 0:
0677             rss_cfg &= ~(NIX_FLOW_KEY_TYPE_ESP |
0678                      NIX_FLOW_KEY_TYPE_AH);
0679             rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN |
0680                    NIX_FLOW_KEY_TYPE_IPV4_PROTO;
0681             break;
0682         case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
0683             /* If VLAN hashing is also requested for ESP then do not
0684              * allow because of hardware 40 bytes flow key limit.
0685              */
0686             if (rss_cfg & NIX_FLOW_KEY_TYPE_VLAN) {
0687                 netdev_err(pfvf->netdev,
0688                        "RSS hash of ESP or AH with VLAN is not supported\n");
0689                 return -EOPNOTSUPP;
0690             }
0691 
0692             rss_cfg |= NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH;
0693             /* Disable IPv4 proto hashing since IPv6 SA+DA(32 bytes)
0694              * and ESP SPI+sequence(8 bytes) uses hardware maximum
0695              * limit of 40 byte flow key.
0696              */
0697             rss_cfg &= ~NIX_FLOW_KEY_TYPE_IPV4_PROTO;
0698             break;
0699         default:
0700             return -EINVAL;
0701         }
0702         break;
0703     case IPV4_FLOW:
0704     case IPV6_FLOW:
0705         rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
0706         break;
0707     default:
0708         return -EINVAL;
0709     }
0710 
0711     rss->flowkey_cfg = rss_cfg;
0712     otx2_set_flowkey_cfg(pfvf);
0713     return 0;
0714 }
0715 
0716 static int otx2_get_rxnfc(struct net_device *dev,
0717               struct ethtool_rxnfc *nfc, u32 *rules)
0718 {
0719     bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
0720     struct otx2_nic *pfvf = netdev_priv(dev);
0721     int ret = -EOPNOTSUPP;
0722 
0723     switch (nfc->cmd) {
0724     case ETHTOOL_GRXRINGS:
0725         nfc->data = pfvf->hw.rx_queues;
0726         ret = 0;
0727         break;
0728     case ETHTOOL_GRXCLSRLCNT:
0729         if (netif_running(dev) && ntuple) {
0730             nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
0731             ret = 0;
0732         }
0733         break;
0734     case ETHTOOL_GRXCLSRULE:
0735         if (netif_running(dev) && ntuple)
0736             ret = otx2_get_flow(pfvf, nfc,  nfc->fs.location);
0737         break;
0738     case ETHTOOL_GRXCLSRLALL:
0739         if (netif_running(dev) && ntuple)
0740             ret = otx2_get_all_flows(pfvf, nfc, rules);
0741         break;
0742     case ETHTOOL_GRXFH:
0743         return otx2_get_rss_hash_opts(pfvf, nfc);
0744     default:
0745         break;
0746     }
0747     return ret;
0748 }
0749 
0750 static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
0751 {
0752     bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
0753     struct otx2_nic *pfvf = netdev_priv(dev);
0754     int ret = -EOPNOTSUPP;
0755 
0756     switch (nfc->cmd) {
0757     case ETHTOOL_SRXFH:
0758         ret = otx2_set_rss_hash_opts(pfvf, nfc);
0759         break;
0760     case ETHTOOL_SRXCLSRLINS:
0761         if (netif_running(dev) && ntuple)
0762             ret = otx2_add_flow(pfvf, nfc);
0763         break;
0764     case ETHTOOL_SRXCLSRLDEL:
0765         if (netif_running(dev) && ntuple)
0766             ret = otx2_remove_flow(pfvf, nfc->fs.location);
0767         break;
0768     default:
0769         break;
0770     }
0771 
0772     return ret;
0773 }
0774 
0775 static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
0776 {
0777     struct otx2_nic *pfvf = netdev_priv(netdev);
0778     struct otx2_rss_info *rss;
0779 
0780     rss = &pfvf->hw.rss_info;
0781 
0782     return sizeof(rss->key);
0783 }
0784 
0785 static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
0786 {
0787     return  MAX_RSS_INDIR_TBL_SIZE;
0788 }
0789 
0790 static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id)
0791 {
0792     struct otx2_rss_info *rss = &pfvf->hw.rss_info;
0793 
0794     otx2_rss_ctx_flow_del(pfvf, ctx_id);
0795     kfree(rss->rss_ctx[ctx_id]);
0796     rss->rss_ctx[ctx_id] = NULL;
0797 
0798     return 0;
0799 }
0800 
0801 static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
0802                    u32 *rss_context)
0803 {
0804     struct otx2_rss_info *rss = &pfvf->hw.rss_info;
0805     u8 ctx;
0806 
0807     for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) {
0808         if (!rss->rss_ctx[ctx])
0809             break;
0810     }
0811     if (ctx == MAX_RSS_GROUPS)
0812         return -EINVAL;
0813 
0814     rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL);
0815     if (!rss->rss_ctx[ctx])
0816         return -ENOMEM;
0817     *rss_context = ctx;
0818 
0819     return 0;
0820 }
0821 
0822 /* RSS context configuration */
0823 static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
0824                  const u8 *hkey, const u8 hfunc,
0825                  u32 *rss_context, bool delete)
0826 {
0827     struct otx2_nic *pfvf = netdev_priv(dev);
0828     struct otx2_rss_ctx *rss_ctx;
0829     struct otx2_rss_info *rss;
0830     int ret, idx;
0831 
0832     if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
0833         return -EOPNOTSUPP;
0834 
0835     if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
0836         *rss_context >= MAX_RSS_GROUPS)
0837         return -EINVAL;
0838 
0839     rss = &pfvf->hw.rss_info;
0840 
0841     if (!rss->enable) {
0842         netdev_err(dev, "RSS is disabled, cannot change settings\n");
0843         return -EIO;
0844     }
0845 
0846     if (hkey) {
0847         memcpy(rss->key, hkey, sizeof(rss->key));
0848         otx2_set_rss_key(pfvf);
0849     }
0850     if (delete)
0851         return otx2_rss_ctx_delete(pfvf, *rss_context);
0852 
0853     if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
0854         ret = otx2_rss_ctx_create(pfvf, rss_context);
0855         if (ret)
0856             return ret;
0857     }
0858     if (indir) {
0859         rss_ctx = rss->rss_ctx[*rss_context];
0860         for (idx = 0; idx < rss->rss_size; idx++)
0861             rss_ctx->ind_tbl[idx] = indir[idx];
0862     }
0863     otx2_set_rss_table(pfvf, *rss_context);
0864 
0865     return 0;
0866 }
0867 
0868 static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
0869                  u8 *hkey, u8 *hfunc, u32 rss_context)
0870 {
0871     struct otx2_nic *pfvf = netdev_priv(dev);
0872     struct otx2_rss_ctx *rss_ctx;
0873     struct otx2_rss_info *rss;
0874     int idx, rx_queues;
0875 
0876     rss = &pfvf->hw.rss_info;
0877 
0878     if (hfunc)
0879         *hfunc = ETH_RSS_HASH_TOP;
0880 
0881     if (!indir)
0882         return 0;
0883 
0884     if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) {
0885         rx_queues = pfvf->hw.rx_queues;
0886         for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++)
0887             indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues);
0888         return 0;
0889     }
0890     if (rss_context >= MAX_RSS_GROUPS)
0891         return -ENOENT;
0892 
0893     rss_ctx = rss->rss_ctx[rss_context];
0894     if (!rss_ctx)
0895         return -ENOENT;
0896 
0897     if (indir) {
0898         for (idx = 0; idx < rss->rss_size; idx++)
0899             indir[idx] = rss_ctx->ind_tbl[idx];
0900     }
0901     if (hkey)
0902         memcpy(hkey, rss->key, sizeof(rss->key));
0903 
0904     return 0;
0905 }
0906 
0907 /* Get RSS configuration */
0908 static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
0909              u8 *hkey, u8 *hfunc)
0910 {
0911     return otx2_get_rxfh_context(dev, indir, hkey, hfunc,
0912                      DEFAULT_RSS_CONTEXT_GROUP);
0913 }
0914 
0915 /* Configure RSS table and hash key */
0916 static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
0917              const u8 *hkey, const u8 hfunc)
0918 {
0919 
0920     u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
0921 
0922     return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0);
0923 }
0924 
0925 static u32 otx2_get_msglevel(struct net_device *netdev)
0926 {
0927     struct otx2_nic *pfvf = netdev_priv(netdev);
0928 
0929     return pfvf->msg_enable;
0930 }
0931 
0932 static void otx2_set_msglevel(struct net_device *netdev, u32 val)
0933 {
0934     struct otx2_nic *pfvf = netdev_priv(netdev);
0935 
0936     pfvf->msg_enable = val;
0937 }
0938 
0939 static u32 otx2_get_link(struct net_device *netdev)
0940 {
0941     struct otx2_nic *pfvf = netdev_priv(netdev);
0942 
0943     /* LBK link is internal and always UP */
0944     if (is_otx2_lbkvf(pfvf->pdev))
0945         return 1;
0946     return pfvf->linfo.link_up;
0947 }
0948 
0949 static int otx2_get_ts_info(struct net_device *netdev,
0950                 struct ethtool_ts_info *info)
0951 {
0952     struct otx2_nic *pfvf = netdev_priv(netdev);
0953 
0954     if (!pfvf->ptp)
0955         return ethtool_op_get_ts_info(netdev, info);
0956 
0957     info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
0958                 SOF_TIMESTAMPING_RX_SOFTWARE |
0959                 SOF_TIMESTAMPING_SOFTWARE |
0960                 SOF_TIMESTAMPING_TX_HARDWARE |
0961                 SOF_TIMESTAMPING_RX_HARDWARE |
0962                 SOF_TIMESTAMPING_RAW_HARDWARE;
0963 
0964     info->phc_index = otx2_ptp_clock_index(pfvf);
0965 
0966     info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
0967 
0968     info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
0969                (1 << HWTSTAMP_FILTER_ALL);
0970 
0971     return 0;
0972 }
0973 
0974 static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf)
0975 {
0976     struct cgx_fw_data *rsp = NULL;
0977     struct msg_req *req;
0978     int err = 0;
0979 
0980     mutex_lock(&pfvf->mbox.lock);
0981     req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox);
0982     if (!req) {
0983         mutex_unlock(&pfvf->mbox.lock);
0984         return ERR_PTR(-ENOMEM);
0985     }
0986 
0987     err = otx2_sync_mbox_msg(&pfvf->mbox);
0988     if (!err) {
0989         rsp = (struct cgx_fw_data *)
0990             otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
0991     } else {
0992         rsp = ERR_PTR(err);
0993     }
0994 
0995     mutex_unlock(&pfvf->mbox.lock);
0996     return rsp;
0997 }
0998 
0999 static int otx2_get_fecparam(struct net_device *netdev,
1000                  struct ethtool_fecparam *fecparam)
1001 {
1002     struct otx2_nic *pfvf = netdev_priv(netdev);
1003     struct cgx_fw_data *rsp;
1004     const int fec[] = {
1005         ETHTOOL_FEC_OFF,
1006         ETHTOOL_FEC_BASER,
1007         ETHTOOL_FEC_RS,
1008         ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS};
1009 #define FEC_MAX_INDEX 4
1010     if (pfvf->linfo.fec < FEC_MAX_INDEX)
1011         fecparam->active_fec = fec[pfvf->linfo.fec];
1012 
1013     rsp = otx2_get_fwdata(pfvf);
1014     if (IS_ERR(rsp))
1015         return PTR_ERR(rsp);
1016 
1017     if (rsp->fwdata.supported_fec < FEC_MAX_INDEX) {
1018         if (!rsp->fwdata.supported_fec)
1019             fecparam->fec = ETHTOOL_FEC_NONE;
1020         else
1021             fecparam->fec = fec[rsp->fwdata.supported_fec];
1022     }
1023     return 0;
1024 }
1025 
1026 static int otx2_set_fecparam(struct net_device *netdev,
1027                  struct ethtool_fecparam *fecparam)
1028 {
1029     struct otx2_nic *pfvf = netdev_priv(netdev);
1030     struct mbox *mbox = &pfvf->mbox;
1031     struct fec_mode *req, *rsp;
1032     int err = 0, fec = 0;
1033 
1034     switch (fecparam->fec) {
1035     /* Firmware does not support AUTO mode consider it as FEC_OFF */
1036     case ETHTOOL_FEC_OFF:
1037     case ETHTOOL_FEC_AUTO:
1038         fec = OTX2_FEC_OFF;
1039         break;
1040     case ETHTOOL_FEC_RS:
1041         fec = OTX2_FEC_RS;
1042         break;
1043     case ETHTOOL_FEC_BASER:
1044         fec = OTX2_FEC_BASER;
1045         break;
1046     default:
1047         netdev_warn(pfvf->netdev, "Unsupported FEC mode: %d",
1048                 fecparam->fec);
1049         return -EINVAL;
1050     }
1051 
1052     if (fec == pfvf->linfo.fec)
1053         return 0;
1054 
1055     mutex_lock(&mbox->lock);
1056     req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox);
1057     if (!req) {
1058         err = -ENOMEM;
1059         goto end;
1060     }
1061     req->fec = fec;
1062     err = otx2_sync_mbox_msg(&pfvf->mbox);
1063     if (err)
1064         goto end;
1065 
1066     rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
1067                            0, &req->hdr);
1068     if (rsp->fec >= 0)
1069         pfvf->linfo.fec = rsp->fec;
1070     else
1071         err = rsp->fec;
1072 end:
1073     mutex_unlock(&mbox->lock);
1074     return err;
1075 }
1076 
1077 static void otx2_get_fec_info(u64 index, int req_mode,
1078                   struct ethtool_link_ksettings *link_ksettings)
1079 {
1080     __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_fec_modes) = { 0, };
1081 
1082     switch (index) {
1083     case OTX2_FEC_NONE:
1084         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
1085                  otx2_fec_modes);
1086         break;
1087     case OTX2_FEC_BASER:
1088         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1089                  otx2_fec_modes);
1090         break;
1091     case OTX2_FEC_RS:
1092         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1093                  otx2_fec_modes);
1094         break;
1095     case OTX2_FEC_BASER | OTX2_FEC_RS:
1096         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1097                  otx2_fec_modes);
1098         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1099                  otx2_fec_modes);
1100         break;
1101     }
1102 
1103     /* Add fec modes to existing modes */
1104     if (req_mode == OTX2_MODE_ADVERTISED)
1105         linkmode_or(link_ksettings->link_modes.advertising,
1106                 link_ksettings->link_modes.advertising,
1107                 otx2_fec_modes);
1108     else
1109         linkmode_or(link_ksettings->link_modes.supported,
1110                 link_ksettings->link_modes.supported,
1111                 otx2_fec_modes);
1112 }
1113 
1114 static void otx2_get_link_mode_info(u64 link_mode_bmap,
1115                     bool req_mode,
1116                     struct ethtool_link_ksettings
1117                     *link_ksettings)
1118 {
1119     __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, };
1120     const int otx2_sgmii_features[6] = {
1121         ETHTOOL_LINK_MODE_10baseT_Half_BIT,
1122         ETHTOOL_LINK_MODE_10baseT_Full_BIT,
1123         ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1124         ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1125         ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
1126         ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1127     };
1128     /* CGX link modes to Ethtool link mode mapping */
1129     const int cgx_link_mode[27] = {
1130         0, /* SGMII  Mode */
1131         ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1132         ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
1133         ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1134         ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1135         ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1136         0,
1137         ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1138         0,
1139         0,
1140         ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1141         ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1142         ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1143         ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1144         ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1145         ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1146         0,
1147         ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
1148         0,
1149         ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1150         ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
1151         ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
1152         0,
1153         ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1154         ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1155         ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1156         ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
1157     };
1158     u8 bit;
1159 
1160     for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) {
1161         /* SGMII mode is set */
1162         if (bit == 0)
1163             linkmode_set_bit_array(otx2_sgmii_features,
1164                            ARRAY_SIZE(otx2_sgmii_features),
1165                            otx2_link_modes);
1166         else
1167             linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes);
1168     }
1169 
1170     if (req_mode == OTX2_MODE_ADVERTISED)
1171         linkmode_copy(link_ksettings->link_modes.advertising,
1172                   otx2_link_modes);
1173     else
1174         linkmode_copy(link_ksettings->link_modes.supported,
1175                   otx2_link_modes);
1176 }
1177 
1178 static int otx2_get_link_ksettings(struct net_device *netdev,
1179                    struct ethtool_link_ksettings *cmd)
1180 {
1181     struct otx2_nic *pfvf = netdev_priv(netdev);
1182     struct cgx_fw_data *rsp = NULL;
1183 
1184     cmd->base.duplex  = pfvf->linfo.full_duplex;
1185     cmd->base.speed   = pfvf->linfo.speed;
1186     cmd->base.autoneg = pfvf->linfo.an;
1187 
1188     rsp = otx2_get_fwdata(pfvf);
1189     if (IS_ERR(rsp))
1190         return PTR_ERR(rsp);
1191 
1192     if (rsp->fwdata.supported_an)
1193         ethtool_link_ksettings_add_link_mode(cmd,
1194                              supported,
1195                              Autoneg);
1196 
1197     otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes,
1198                 OTX2_MODE_ADVERTISED, cmd);
1199     otx2_get_fec_info(rsp->fwdata.advertised_fec,
1200               OTX2_MODE_ADVERTISED, cmd);
1201     otx2_get_link_mode_info(rsp->fwdata.supported_link_modes,
1202                 OTX2_MODE_SUPPORTED, cmd);
1203     otx2_get_fec_info(rsp->fwdata.supported_fec,
1204               OTX2_MODE_SUPPORTED, cmd);
1205     return 0;
1206 }
1207 
1208 static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd,
1209                      u64 *mode)
1210 {
1211     u32 bit_pos;
1212 
1213     /* Firmware does not support requesting multiple advertised modes
1214      * return first set bit
1215      */
1216     bit_pos = find_first_bit(cmd->link_modes.advertising,
1217                  __ETHTOOL_LINK_MODE_MASK_NBITS);
1218     if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS)
1219         *mode = bit_pos;
1220 }
1221 
1222 static int otx2_set_link_ksettings(struct net_device *netdev,
1223                    const struct ethtool_link_ksettings *cmd)
1224 {
1225     struct otx2_nic *pf = netdev_priv(netdev);
1226     struct ethtool_link_ksettings cur_ks;
1227     struct cgx_set_link_mode_req *req;
1228     struct mbox *mbox = &pf->mbox;
1229     int err = 0;
1230 
1231     memset(&cur_ks, 0, sizeof(struct ethtool_link_ksettings));
1232 
1233     if (!ethtool_validate_speed(cmd->base.speed) ||
1234         !ethtool_validate_duplex(cmd->base.duplex))
1235         return -EINVAL;
1236 
1237     if (cmd->base.autoneg != AUTONEG_ENABLE &&
1238         cmd->base.autoneg != AUTONEG_DISABLE)
1239         return -EINVAL;
1240 
1241     otx2_get_link_ksettings(netdev, &cur_ks);
1242 
1243     /* Check requested modes against supported modes by hardware */
1244     if (!linkmode_subset(cmd->link_modes.advertising,
1245                  cur_ks.link_modes.supported))
1246         return -EINVAL;
1247 
1248     mutex_lock(&mbox->lock);
1249     req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pf->mbox);
1250     if (!req) {
1251         err = -ENOMEM;
1252         goto end;
1253     }
1254 
1255     req->args.speed = cmd->base.speed;
1256     /* firmware expects 1 for half duplex and 0 for full duplex
1257      * hence inverting
1258      */
1259     req->args.duplex = cmd->base.duplex ^ 0x1;
1260     req->args.an = cmd->base.autoneg;
1261     otx2_get_advertised_mode(cmd, &req->args.mode);
1262 
1263     err = otx2_sync_mbox_msg(&pf->mbox);
1264 end:
1265     mutex_unlock(&mbox->lock);
1266     return err;
1267 }
1268 
1269 static const struct ethtool_ops otx2_ethtool_ops = {
1270     .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1271                      ETHTOOL_COALESCE_MAX_FRAMES |
1272                      ETHTOOL_COALESCE_USE_ADAPTIVE,
1273     .supported_ring_params  = ETHTOOL_RING_USE_RX_BUF_LEN |
1274                   ETHTOOL_RING_USE_CQE_SIZE,
1275     .get_link       = otx2_get_link,
1276     .get_drvinfo        = otx2_get_drvinfo,
1277     .get_strings        = otx2_get_strings,
1278     .get_ethtool_stats  = otx2_get_ethtool_stats,
1279     .get_sset_count     = otx2_get_sset_count,
1280     .set_channels       = otx2_set_channels,
1281     .get_channels       = otx2_get_channels,
1282     .get_ringparam      = otx2_get_ringparam,
1283     .set_ringparam      = otx2_set_ringparam,
1284     .get_coalesce       = otx2_get_coalesce,
1285     .set_coalesce       = otx2_set_coalesce,
1286     .get_rxnfc      = otx2_get_rxnfc,
1287     .set_rxnfc              = otx2_set_rxnfc,
1288     .get_rxfh_key_size  = otx2_get_rxfh_key_size,
1289     .get_rxfh_indir_size    = otx2_get_rxfh_indir_size,
1290     .get_rxfh       = otx2_get_rxfh,
1291     .set_rxfh       = otx2_set_rxfh,
1292     .get_rxfh_context   = otx2_get_rxfh_context,
1293     .set_rxfh_context   = otx2_set_rxfh_context,
1294     .get_msglevel       = otx2_get_msglevel,
1295     .set_msglevel       = otx2_set_msglevel,
1296     .get_pauseparam     = otx2_get_pauseparam,
1297     .set_pauseparam     = otx2_set_pauseparam,
1298     .get_ts_info        = otx2_get_ts_info,
1299     .get_fecparam       = otx2_get_fecparam,
1300     .set_fecparam       = otx2_set_fecparam,
1301     .get_link_ksettings     = otx2_get_link_ksettings,
1302     .set_link_ksettings     = otx2_set_link_ksettings,
1303 };
1304 
1305 void otx2_set_ethtool_ops(struct net_device *netdev)
1306 {
1307     netdev->ethtool_ops = &otx2_ethtool_ops;
1308 }
1309 
1310 /* VF's ethtool APIs */
1311 static void otx2vf_get_drvinfo(struct net_device *netdev,
1312                    struct ethtool_drvinfo *info)
1313 {
1314     struct otx2_nic *vf = netdev_priv(netdev);
1315 
1316     strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
1317     strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
1318 }
1319 
1320 static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
1321 {
1322     struct otx2_nic *vf = netdev_priv(netdev);
1323     int stats;
1324 
1325     if (sset != ETH_SS_STATS)
1326         return;
1327 
1328     for (stats = 0; stats < otx2_n_dev_stats; stats++) {
1329         memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
1330         data += ETH_GSTRING_LEN;
1331     }
1332 
1333     for (stats = 0; stats < otx2_n_drv_stats; stats++) {
1334         memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
1335         data += ETH_GSTRING_LEN;
1336     }
1337 
1338     otx2_get_qset_strings(vf, &data, 0);
1339 
1340     strcpy(data, "reset_count");
1341     data += ETH_GSTRING_LEN;
1342 }
1343 
1344 static void otx2vf_get_ethtool_stats(struct net_device *netdev,
1345                      struct ethtool_stats *stats, u64 *data)
1346 {
1347     struct otx2_nic *vf = netdev_priv(netdev);
1348     int stat;
1349 
1350     otx2_get_dev_stats(vf);
1351     for (stat = 0; stat < otx2_n_dev_stats; stat++)
1352         *(data++) = ((u64 *)&vf->hw.dev_stats)
1353                 [otx2_dev_stats[stat].index];
1354 
1355     for (stat = 0; stat < otx2_n_drv_stats; stat++)
1356         *(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats)
1357                         [otx2_drv_stats[stat].index]);
1358 
1359     otx2_get_qset_stats(vf, stats, &data);
1360     *(data++) = vf->reset_count;
1361 }
1362 
1363 static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
1364 {
1365     struct otx2_nic *vf = netdev_priv(netdev);
1366     int qstats_count;
1367 
1368     if (sset != ETH_SS_STATS)
1369         return -EINVAL;
1370 
1371     qstats_count = otx2_n_queue_stats *
1372                (vf->hw.rx_queues + vf->hw.tx_queues);
1373 
1374     return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1;
1375 }
1376 
1377 static int otx2vf_get_link_ksettings(struct net_device *netdev,
1378                      struct ethtool_link_ksettings *cmd)
1379 {
1380     struct otx2_nic *pfvf = netdev_priv(netdev);
1381 
1382     if (is_otx2_lbkvf(pfvf->pdev)) {
1383         cmd->base.duplex = DUPLEX_FULL;
1384         cmd->base.speed = SPEED_100000;
1385     } else {
1386         return otx2_get_link_ksettings(netdev, cmd);
1387     }
1388     return 0;
1389 }
1390 
1391 static const struct ethtool_ops otx2vf_ethtool_ops = {
1392     .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1393                      ETHTOOL_COALESCE_MAX_FRAMES |
1394                      ETHTOOL_COALESCE_USE_ADAPTIVE,
1395     .supported_ring_params  = ETHTOOL_RING_USE_RX_BUF_LEN |
1396                   ETHTOOL_RING_USE_CQE_SIZE,
1397     .get_link       = otx2_get_link,
1398     .get_drvinfo        = otx2vf_get_drvinfo,
1399     .get_strings        = otx2vf_get_strings,
1400     .get_ethtool_stats  = otx2vf_get_ethtool_stats,
1401     .get_sset_count     = otx2vf_get_sset_count,
1402     .set_channels       = otx2_set_channels,
1403     .get_channels       = otx2_get_channels,
1404     .get_rxnfc      = otx2_get_rxnfc,
1405     .set_rxnfc              = otx2_set_rxnfc,
1406     .get_rxfh_key_size  = otx2_get_rxfh_key_size,
1407     .get_rxfh_indir_size    = otx2_get_rxfh_indir_size,
1408     .get_rxfh       = otx2_get_rxfh,
1409     .set_rxfh       = otx2_set_rxfh,
1410     .get_rxfh_context   = otx2_get_rxfh_context,
1411     .set_rxfh_context   = otx2_set_rxfh_context,
1412     .get_ringparam      = otx2_get_ringparam,
1413     .set_ringparam      = otx2_set_ringparam,
1414     .get_coalesce       = otx2_get_coalesce,
1415     .set_coalesce       = otx2_set_coalesce,
1416     .get_msglevel       = otx2_get_msglevel,
1417     .set_msglevel       = otx2_set_msglevel,
1418     .get_pauseparam     = otx2_get_pauseparam,
1419     .set_pauseparam     = otx2_set_pauseparam,
1420     .get_link_ksettings     = otx2vf_get_link_ksettings,
1421     .get_ts_info        = otx2_get_ts_info,
1422 };
1423 
1424 void otx2vf_set_ethtool_ops(struct net_device *netdev)
1425 {
1426     netdev->ethtool_ops = &otx2vf_ethtool_ops;
1427 }
1428 EXPORT_SYMBOL(otx2vf_set_ethtool_ops);