Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
0002 /* QLogic qede NIC Driver
0003  * Copyright (c) 2015-2017  QLogic Corporation
0004  * Copyright (c) 2019-2020 Marvell International Ltd.
0005  */
0006 
0007 #include <linux/version.h>
0008 #include <linux/types.h>
0009 #include <linux/netdevice.h>
0010 #include <linux/etherdevice.h>
0011 #include <linux/ethtool.h>
0012 #include <linux/string.h>
0013 #include <linux/pci.h>
0014 #include <linux/capability.h>
0015 #include <linux/vmalloc.h>
0016 #include <linux/phylink.h>
0017 
0018 #include "qede.h"
0019 #include "qede_ptp.h"
0020 
0021 #define QEDE_RQSTAT_OFFSET(stat_name) \
0022      (offsetof(struct qede_rx_queue, stat_name))
0023 #define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
0024 #define QEDE_RQSTAT(stat_name) \
0025      {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
0026 
0027 #define QEDE_SELFTEST_POLL_COUNT 100
0028 #define QEDE_DUMP_VERSION   0x1
0029 #define QEDE_DUMP_NVM_ARG_COUNT 2
0030 
0031 static const struct {
0032     u64 offset;
0033     char string[ETH_GSTRING_LEN];
0034 } qede_rqstats_arr[] = {
0035     QEDE_RQSTAT(rcv_pkts),
0036     QEDE_RQSTAT(rx_hw_errors),
0037     QEDE_RQSTAT(rx_alloc_errors),
0038     QEDE_RQSTAT(rx_ip_frags),
0039     QEDE_RQSTAT(xdp_no_pass),
0040 };
0041 
0042 #define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
0043 #define QEDE_TQSTAT_OFFSET(stat_name) \
0044     (offsetof(struct qede_tx_queue, stat_name))
0045 #define QEDE_TQSTAT_STRING(stat_name) (#stat_name)
0046 #define QEDE_TQSTAT(stat_name) \
0047     {QEDE_TQSTAT_OFFSET(stat_name), QEDE_TQSTAT_STRING(stat_name)}
0048 #define QEDE_NUM_TQSTATS ARRAY_SIZE(qede_tqstats_arr)
0049 static const struct {
0050     u64 offset;
0051     char string[ETH_GSTRING_LEN];
0052 } qede_tqstats_arr[] = {
0053     QEDE_TQSTAT(xmit_pkts),
0054     QEDE_TQSTAT(stopped_cnt),
0055     QEDE_TQSTAT(tx_mem_alloc_err),
0056 };
0057 
0058 #define QEDE_STAT_OFFSET(stat_name, type, base) \
0059     (offsetof(type, stat_name) + (base))
0060 #define QEDE_STAT_STRING(stat_name) (#stat_name)
0061 #define _QEDE_STAT(stat_name, type, base, attr) \
0062     {QEDE_STAT_OFFSET(stat_name, type, base), \
0063      QEDE_STAT_STRING(stat_name), \
0064      attr}
0065 #define QEDE_STAT(stat_name) \
0066     _QEDE_STAT(stat_name, struct qede_stats_common, 0, 0x0)
0067 #define QEDE_PF_STAT(stat_name) \
0068     _QEDE_STAT(stat_name, struct qede_stats_common, 0, \
0069            BIT(QEDE_STAT_PF_ONLY))
0070 #define QEDE_PF_BB_STAT(stat_name) \
0071     _QEDE_STAT(stat_name, struct qede_stats_bb, \
0072            offsetof(struct qede_stats, bb), \
0073            BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_BB_ONLY))
0074 #define QEDE_PF_AH_STAT(stat_name) \
0075     _QEDE_STAT(stat_name, struct qede_stats_ah, \
0076            offsetof(struct qede_stats, ah), \
0077            BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_AH_ONLY))
0078 static const struct {
0079     u64 offset;
0080     char string[ETH_GSTRING_LEN];
0081     unsigned long attr;
0082 #define QEDE_STAT_PF_ONLY   0
0083 #define QEDE_STAT_BB_ONLY   1
0084 #define QEDE_STAT_AH_ONLY   2
0085 } qede_stats_arr[] = {
0086     QEDE_STAT(rx_ucast_bytes),
0087     QEDE_STAT(rx_mcast_bytes),
0088     QEDE_STAT(rx_bcast_bytes),
0089     QEDE_STAT(rx_ucast_pkts),
0090     QEDE_STAT(rx_mcast_pkts),
0091     QEDE_STAT(rx_bcast_pkts),
0092 
0093     QEDE_STAT(tx_ucast_bytes),
0094     QEDE_STAT(tx_mcast_bytes),
0095     QEDE_STAT(tx_bcast_bytes),
0096     QEDE_STAT(tx_ucast_pkts),
0097     QEDE_STAT(tx_mcast_pkts),
0098     QEDE_STAT(tx_bcast_pkts),
0099 
0100     QEDE_PF_STAT(rx_64_byte_packets),
0101     QEDE_PF_STAT(rx_65_to_127_byte_packets),
0102     QEDE_PF_STAT(rx_128_to_255_byte_packets),
0103     QEDE_PF_STAT(rx_256_to_511_byte_packets),
0104     QEDE_PF_STAT(rx_512_to_1023_byte_packets),
0105     QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
0106     QEDE_PF_BB_STAT(rx_1519_to_1522_byte_packets),
0107     QEDE_PF_BB_STAT(rx_1519_to_2047_byte_packets),
0108     QEDE_PF_BB_STAT(rx_2048_to_4095_byte_packets),
0109     QEDE_PF_BB_STAT(rx_4096_to_9216_byte_packets),
0110     QEDE_PF_BB_STAT(rx_9217_to_16383_byte_packets),
0111     QEDE_PF_AH_STAT(rx_1519_to_max_byte_packets),
0112     QEDE_PF_STAT(tx_64_byte_packets),
0113     QEDE_PF_STAT(tx_65_to_127_byte_packets),
0114     QEDE_PF_STAT(tx_128_to_255_byte_packets),
0115     QEDE_PF_STAT(tx_256_to_511_byte_packets),
0116     QEDE_PF_STAT(tx_512_to_1023_byte_packets),
0117     QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
0118     QEDE_PF_BB_STAT(tx_1519_to_2047_byte_packets),
0119     QEDE_PF_BB_STAT(tx_2048_to_4095_byte_packets),
0120     QEDE_PF_BB_STAT(tx_4096_to_9216_byte_packets),
0121     QEDE_PF_BB_STAT(tx_9217_to_16383_byte_packets),
0122     QEDE_PF_AH_STAT(tx_1519_to_max_byte_packets),
0123     QEDE_PF_STAT(rx_mac_crtl_frames),
0124     QEDE_PF_STAT(tx_mac_ctrl_frames),
0125     QEDE_PF_STAT(rx_pause_frames),
0126     QEDE_PF_STAT(tx_pause_frames),
0127     QEDE_PF_STAT(rx_pfc_frames),
0128     QEDE_PF_STAT(tx_pfc_frames),
0129 
0130     QEDE_PF_STAT(rx_crc_errors),
0131     QEDE_PF_STAT(rx_align_errors),
0132     QEDE_PF_STAT(rx_carrier_errors),
0133     QEDE_PF_STAT(rx_oversize_packets),
0134     QEDE_PF_STAT(rx_jabbers),
0135     QEDE_PF_STAT(rx_undersize_packets),
0136     QEDE_PF_STAT(rx_fragments),
0137     QEDE_PF_BB_STAT(tx_lpi_entry_count),
0138     QEDE_PF_BB_STAT(tx_total_collisions),
0139     QEDE_PF_STAT(brb_truncates),
0140     QEDE_PF_STAT(brb_discards),
0141     QEDE_STAT(no_buff_discards),
0142     QEDE_PF_STAT(mftag_filter_discards),
0143     QEDE_PF_STAT(mac_filter_discards),
0144     QEDE_PF_STAT(gft_filter_drop),
0145     QEDE_STAT(tx_err_drop_pkts),
0146     QEDE_STAT(ttl0_discard),
0147     QEDE_STAT(packet_too_big_discard),
0148 
0149     QEDE_STAT(coalesced_pkts),
0150     QEDE_STAT(coalesced_events),
0151     QEDE_STAT(coalesced_aborts_num),
0152     QEDE_STAT(non_coalesced_pkts),
0153     QEDE_STAT(coalesced_bytes),
0154 
0155     QEDE_STAT(link_change_count),
0156     QEDE_STAT(ptp_skip_txts),
0157 };
0158 
0159 #define QEDE_NUM_STATS  ARRAY_SIZE(qede_stats_arr)
0160 #define QEDE_STAT_IS_PF_ONLY(i) \
0161     test_bit(QEDE_STAT_PF_ONLY, &qede_stats_arr[i].attr)
0162 #define QEDE_STAT_IS_BB_ONLY(i) \
0163     test_bit(QEDE_STAT_BB_ONLY, &qede_stats_arr[i].attr)
0164 #define QEDE_STAT_IS_AH_ONLY(i) \
0165     test_bit(QEDE_STAT_AH_ONLY, &qede_stats_arr[i].attr)
0166 
0167 enum {
0168     QEDE_PRI_FLAG_CMT,
0169     QEDE_PRI_FLAG_SMART_AN_SUPPORT, /* MFW supports SmartAN */
0170     QEDE_PRI_FLAG_RECOVER_ON_ERROR,
0171     QEDE_PRI_FLAG_ESL_SUPPORT, /* MFW supports Enhanced System Lockdown */
0172     QEDE_PRI_FLAG_ESL_ACTIVE, /* Enhanced System Lockdown Active status */
0173     QEDE_PRI_FLAG_LEN,
0174 };
0175 
0176 static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
0177     "Coupled-Function",
0178     "SmartAN capable",
0179     "Recover on error",
0180     "ESL capable",
0181     "ESL active",
0182 };
0183 
0184 enum qede_ethtool_tests {
0185     QEDE_ETHTOOL_INT_LOOPBACK,
0186     QEDE_ETHTOOL_INTERRUPT_TEST,
0187     QEDE_ETHTOOL_MEMORY_TEST,
0188     QEDE_ETHTOOL_REGISTER_TEST,
0189     QEDE_ETHTOOL_CLOCK_TEST,
0190     QEDE_ETHTOOL_NVRAM_TEST,
0191     QEDE_ETHTOOL_TEST_MAX
0192 };
0193 
0194 static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
0195     "Internal loopback (offline)",
0196     "Interrupt (online)\t",
0197     "Memory (online)\t\t",
0198     "Register (online)\t",
0199     "Clock (online)\t\t",
0200     "Nvram (online)\t\t",
0201 };
0202 
0203 /* Forced speed capabilities maps */
0204 
0205 struct qede_forced_speed_map {
0206     u32     speed;
0207     __ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
0208 
0209     const u32   *cap_arr;
0210     u32     arr_size;
0211 };
0212 
0213 #define QEDE_FORCED_SPEED_MAP(value)                    \
0214 {                                   \
0215     .speed      = SPEED_##value,                \
0216     .cap_arr    = qede_forced_speed_##value,            \
0217     .arr_size   = ARRAY_SIZE(qede_forced_speed_##value),    \
0218 }
0219 
0220 static const u32 qede_forced_speed_1000[] __initconst = {
0221     ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
0222     ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
0223     ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
0224 };
0225 
0226 static const u32 qede_forced_speed_10000[] __initconst = {
0227     ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
0228     ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
0229     ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
0230     ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
0231     ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
0232     ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
0233     ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
0234     ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
0235 };
0236 
0237 static const u32 qede_forced_speed_20000[] __initconst = {
0238     ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
0239 };
0240 
0241 static const u32 qede_forced_speed_25000[] __initconst = {
0242     ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
0243     ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
0244     ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
0245 };
0246 
0247 static const u32 qede_forced_speed_40000[] __initconst = {
0248     ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
0249     ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
0250     ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
0251     ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
0252 };
0253 
0254 static const u32 qede_forced_speed_50000[] __initconst = {
0255     ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
0256     ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
0257     ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
0258 };
0259 
0260 static const u32 qede_forced_speed_100000[] __initconst = {
0261     ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
0262     ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
0263     ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
0264     ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
0265 };
0266 
0267 static struct qede_forced_speed_map qede_forced_speed_maps[] __ro_after_init = {
0268     QEDE_FORCED_SPEED_MAP(1000),
0269     QEDE_FORCED_SPEED_MAP(10000),
0270     QEDE_FORCED_SPEED_MAP(20000),
0271     QEDE_FORCED_SPEED_MAP(25000),
0272     QEDE_FORCED_SPEED_MAP(40000),
0273     QEDE_FORCED_SPEED_MAP(50000),
0274     QEDE_FORCED_SPEED_MAP(100000),
0275 };
0276 
0277 void __init qede_forced_speed_maps_init(void)
0278 {
0279     struct qede_forced_speed_map *map;
0280     u32 i;
0281 
0282     for (i = 0; i < ARRAY_SIZE(qede_forced_speed_maps); i++) {
0283         map = qede_forced_speed_maps + i;
0284 
0285         linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps);
0286         map->cap_arr = NULL;
0287         map->arr_size = 0;
0288     }
0289 }
0290 
0291 /* Ethtool callbacks */
0292 
0293 static void qede_get_strings_stats_txq(struct qede_dev *edev,
0294                        struct qede_tx_queue *txq, u8 **buf)
0295 {
0296     int i;
0297 
0298     for (i = 0; i < QEDE_NUM_TQSTATS; i++) {
0299         if (txq->is_xdp)
0300             sprintf(*buf, "%d [XDP]: %s",
0301                 QEDE_TXQ_XDP_TO_IDX(edev, txq),
0302                 qede_tqstats_arr[i].string);
0303         else
0304             sprintf(*buf, "%d_%d: %s", txq->index, txq->cos,
0305                 qede_tqstats_arr[i].string);
0306         *buf += ETH_GSTRING_LEN;
0307     }
0308 }
0309 
0310 static void qede_get_strings_stats_rxq(struct qede_dev *edev,
0311                        struct qede_rx_queue *rxq, u8 **buf)
0312 {
0313     int i;
0314 
0315     for (i = 0; i < QEDE_NUM_RQSTATS; i++) {
0316         sprintf(*buf, "%d: %s", rxq->rxq_id,
0317             qede_rqstats_arr[i].string);
0318         *buf += ETH_GSTRING_LEN;
0319     }
0320 }
0321 
0322 static bool qede_is_irrelevant_stat(struct qede_dev *edev, int stat_index)
0323 {
0324     return (IS_VF(edev) && QEDE_STAT_IS_PF_ONLY(stat_index)) ||
0325            (QEDE_IS_BB(edev) && QEDE_STAT_IS_AH_ONLY(stat_index)) ||
0326            (QEDE_IS_AH(edev) && QEDE_STAT_IS_BB_ONLY(stat_index));
0327 }
0328 
0329 static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
0330 {
0331     struct qede_fastpath *fp;
0332     int i;
0333 
0334     /* Account for queue statistics */
0335     for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
0336         fp = &edev->fp_array[i];
0337 
0338         if (fp->type & QEDE_FASTPATH_RX)
0339             qede_get_strings_stats_rxq(edev, fp->rxq, &buf);
0340 
0341         if (fp->type & QEDE_FASTPATH_XDP)
0342             qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf);
0343 
0344         if (fp->type & QEDE_FASTPATH_TX) {
0345             int cos;
0346 
0347             for_each_cos_in_txq(edev, cos)
0348                 qede_get_strings_stats_txq(edev,
0349                                &fp->txq[cos], &buf);
0350         }
0351     }
0352 
0353     /* Account for non-queue statistics */
0354     for (i = 0; i < QEDE_NUM_STATS; i++) {
0355         if (qede_is_irrelevant_stat(edev, i))
0356             continue;
0357         strcpy(buf, qede_stats_arr[i].string);
0358         buf += ETH_GSTRING_LEN;
0359     }
0360 }
0361 
0362 static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
0363 {
0364     struct qede_dev *edev = netdev_priv(dev);
0365 
0366     switch (stringset) {
0367     case ETH_SS_STATS:
0368         qede_get_strings_stats(edev, buf);
0369         break;
0370     case ETH_SS_PRIV_FLAGS:
0371         memcpy(buf, qede_private_arr,
0372                ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN);
0373         break;
0374     case ETH_SS_TEST:
0375         memcpy(buf, qede_tests_str_arr,
0376                ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX);
0377         break;
0378     default:
0379         DP_VERBOSE(edev, QED_MSG_DEBUG,
0380                "Unsupported stringset 0x%08x\n", stringset);
0381     }
0382 }
0383 
0384 static void qede_get_ethtool_stats_txq(struct qede_tx_queue *txq, u64 **buf)
0385 {
0386     int i;
0387 
0388     for (i = 0; i < QEDE_NUM_TQSTATS; i++) {
0389         **buf = *((u64 *)(((void *)txq) + qede_tqstats_arr[i].offset));
0390         (*buf)++;
0391     }
0392 }
0393 
0394 static void qede_get_ethtool_stats_rxq(struct qede_rx_queue *rxq, u64 **buf)
0395 {
0396     int i;
0397 
0398     for (i = 0; i < QEDE_NUM_RQSTATS; i++) {
0399         **buf = *((u64 *)(((void *)rxq) + qede_rqstats_arr[i].offset));
0400         (*buf)++;
0401     }
0402 }
0403 
0404 static void qede_get_ethtool_stats(struct net_device *dev,
0405                    struct ethtool_stats *stats, u64 *buf)
0406 {
0407     struct qede_dev *edev = netdev_priv(dev);
0408     struct qede_fastpath *fp;
0409     int i;
0410 
0411     qede_fill_by_demand_stats(edev);
0412 
0413     /* Need to protect the access to the fastpath array */
0414     __qede_lock(edev);
0415 
0416     for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
0417         fp = &edev->fp_array[i];
0418 
0419         if (fp->type & QEDE_FASTPATH_RX)
0420             qede_get_ethtool_stats_rxq(fp->rxq, &buf);
0421 
0422         if (fp->type & QEDE_FASTPATH_XDP)
0423             qede_get_ethtool_stats_txq(fp->xdp_tx, &buf);
0424 
0425         if (fp->type & QEDE_FASTPATH_TX) {
0426             int cos;
0427 
0428             for_each_cos_in_txq(edev, cos)
0429                 qede_get_ethtool_stats_txq(&fp->txq[cos], &buf);
0430         }
0431     }
0432 
0433     for (i = 0; i < QEDE_NUM_STATS; i++) {
0434         if (qede_is_irrelevant_stat(edev, i))
0435             continue;
0436         *buf = *((u64 *)(((void *)&edev->stats) +
0437                  qede_stats_arr[i].offset));
0438 
0439         buf++;
0440     }
0441 
0442     __qede_unlock(edev);
0443 }
0444 
0445 static int qede_get_sset_count(struct net_device *dev, int stringset)
0446 {
0447     struct qede_dev *edev = netdev_priv(dev);
0448     int num_stats = QEDE_NUM_STATS, i;
0449 
0450     switch (stringset) {
0451     case ETH_SS_STATS:
0452         for (i = 0; i < QEDE_NUM_STATS; i++)
0453             if (qede_is_irrelevant_stat(edev, i))
0454                 num_stats--;
0455 
0456         /* Account for the Regular Tx statistics */
0457         num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS *
0458                 edev->dev_info.num_tc;
0459 
0460         /* Account for the Regular Rx statistics */
0461         num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS;
0462 
0463         /* Account for XDP statistics [if needed] */
0464         if (edev->xdp_prog)
0465             num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_TQSTATS;
0466         return num_stats;
0467 
0468     case ETH_SS_PRIV_FLAGS:
0469         return QEDE_PRI_FLAG_LEN;
0470     case ETH_SS_TEST:
0471         if (!IS_VF(edev))
0472             return QEDE_ETHTOOL_TEST_MAX;
0473         else
0474             return 0;
0475     default:
0476         DP_VERBOSE(edev, QED_MSG_DEBUG,
0477                "Unsupported stringset 0x%08x\n", stringset);
0478         return -EINVAL;
0479     }
0480 }
0481 
0482 static u32 qede_get_priv_flags(struct net_device *dev)
0483 {
0484     struct qede_dev *edev = netdev_priv(dev);
0485     bool esl_active;
0486     u32 flags = 0;
0487 
0488     if (edev->dev_info.common.num_hwfns > 1)
0489         flags |= BIT(QEDE_PRI_FLAG_CMT);
0490 
0491     if (edev->dev_info.common.smart_an)
0492         flags |= BIT(QEDE_PRI_FLAG_SMART_AN_SUPPORT);
0493 
0494     if (edev->err_flags & BIT(QEDE_ERR_IS_RECOVERABLE))
0495         flags |= BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR);
0496 
0497     if (edev->dev_info.common.esl)
0498         flags |= BIT(QEDE_PRI_FLAG_ESL_SUPPORT);
0499 
0500     edev->ops->common->get_esl_status(edev->cdev, &esl_active);
0501 
0502     if (esl_active)
0503         flags |= BIT(QEDE_PRI_FLAG_ESL_ACTIVE);
0504 
0505     return flags;
0506 }
0507 
0508 static int qede_set_priv_flags(struct net_device *dev, u32 flags)
0509 {
0510     struct qede_dev *edev = netdev_priv(dev);
0511     u32 cflags = qede_get_priv_flags(dev);
0512     u32 dflags = flags ^ cflags;
0513 
0514     /* can only change RECOVER_ON_ERROR flag */
0515     if (dflags & ~BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR))
0516         return -EINVAL;
0517 
0518     if (flags & BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR))
0519         set_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags);
0520     else
0521         clear_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags);
0522 
0523     return 0;
0524 }
0525 
0526 static int qede_get_link_ksettings(struct net_device *dev,
0527                    struct ethtool_link_ksettings *cmd)
0528 {
0529     typeof(cmd->link_modes) *link_modes = &cmd->link_modes;
0530     struct ethtool_link_settings *base = &cmd->base;
0531     struct qede_dev *edev = netdev_priv(dev);
0532     struct qed_link_output current_link;
0533 
0534     __qede_lock(edev);
0535 
0536     memset(&current_link, 0, sizeof(current_link));
0537     edev->ops->common->get_link(edev->cdev, &current_link);
0538 
0539     linkmode_copy(link_modes->supported, current_link.supported_caps);
0540     linkmode_copy(link_modes->advertising, current_link.advertised_caps);
0541     linkmode_copy(link_modes->lp_advertising, current_link.lp_caps);
0542 
0543     if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) {
0544         base->speed = current_link.speed;
0545         base->duplex = current_link.duplex;
0546     } else {
0547         base->speed = SPEED_UNKNOWN;
0548         base->duplex = DUPLEX_UNKNOWN;
0549     }
0550 
0551     __qede_unlock(edev);
0552 
0553     base->port = current_link.port;
0554     base->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
0555             AUTONEG_DISABLE;
0556 
0557     return 0;
0558 }
0559 
0560 static int qede_set_link_ksettings(struct net_device *dev,
0561                    const struct ethtool_link_ksettings *cmd)
0562 {
0563     const struct ethtool_link_settings *base = &cmd->base;
0564     struct qede_dev *edev = netdev_priv(dev);
0565     const struct qede_forced_speed_map *map;
0566     struct qed_link_output current_link;
0567     struct qed_link_params params;
0568     u32 i;
0569 
0570     if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
0571         DP_INFO(edev, "Link settings are not allowed to be changed\n");
0572         return -EOPNOTSUPP;
0573     }
0574     memset(&current_link, 0, sizeof(current_link));
0575     memset(&params, 0, sizeof(params));
0576     edev->ops->common->get_link(edev->cdev, &current_link);
0577 
0578     params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS;
0579     params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG;
0580 
0581     if (base->autoneg == AUTONEG_ENABLE) {
0582         if (!phylink_test(current_link.supported_caps, Autoneg)) {
0583             DP_INFO(edev, "Auto negotiation is not supported\n");
0584             return -EOPNOTSUPP;
0585         }
0586 
0587         params.autoneg = true;
0588         params.forced_speed = 0;
0589 
0590         linkmode_copy(params.adv_speeds, cmd->link_modes.advertising);
0591     } else {        /* forced speed */
0592         params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED;
0593         params.autoneg = false;
0594         params.forced_speed = base->speed;
0595 
0596         for (i = 0; i < ARRAY_SIZE(qede_forced_speed_maps); i++) {
0597             map = qede_forced_speed_maps + i;
0598 
0599             if (base->speed != map->speed ||
0600                 !linkmode_intersects(current_link.supported_caps,
0601                          map->caps))
0602                 continue;
0603 
0604             linkmode_and(params.adv_speeds,
0605                      current_link.supported_caps, map->caps);
0606             goto set_link;
0607         }
0608 
0609         DP_INFO(edev, "Unsupported speed %u\n", base->speed);
0610         return -EINVAL;
0611     }
0612 
0613 set_link:
0614     params.link_up = true;
0615     edev->ops->common->set_link(edev->cdev, &params);
0616 
0617     return 0;
0618 }
0619 
0620 static void qede_get_drvinfo(struct net_device *ndev,
0621                  struct ethtool_drvinfo *info)
0622 {
0623     char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN];
0624     struct qede_dev *edev = netdev_priv(ndev);
0625     char mbi[ETHTOOL_FWVERS_LEN];
0626 
0627     strlcpy(info->driver, "qede", sizeof(info->driver));
0628 
0629     snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
0630          edev->dev_info.common.fw_major,
0631          edev->dev_info.common.fw_minor,
0632          edev->dev_info.common.fw_rev,
0633          edev->dev_info.common.fw_eng);
0634 
0635     snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
0636          (edev->dev_info.common.mfw_rev >> 24) & 0xFF,
0637          (edev->dev_info.common.mfw_rev >> 16) & 0xFF,
0638          (edev->dev_info.common.mfw_rev >> 8) & 0xFF,
0639          edev->dev_info.common.mfw_rev & 0xFF);
0640 
0641     if ((strlen(storm) + strlen("[storm]")) <
0642         sizeof(info->version))
0643         snprintf(info->version, sizeof(info->version),
0644              "[storm %s]", storm);
0645     else
0646         snprintf(info->version, sizeof(info->version),
0647              "%s", storm);
0648 
0649     if (edev->dev_info.common.mbi_version) {
0650         snprintf(mbi, ETHTOOL_FWVERS_LEN, "%d.%d.%d",
0651              (edev->dev_info.common.mbi_version &
0652               QED_MBI_VERSION_2_MASK) >> QED_MBI_VERSION_2_OFFSET,
0653              (edev->dev_info.common.mbi_version &
0654               QED_MBI_VERSION_1_MASK) >> QED_MBI_VERSION_1_OFFSET,
0655              (edev->dev_info.common.mbi_version &
0656               QED_MBI_VERSION_0_MASK) >> QED_MBI_VERSION_0_OFFSET);
0657         snprintf(info->fw_version, sizeof(info->fw_version),
0658              "mbi %s [mfw %s]", mbi, mfw);
0659     } else {
0660         snprintf(info->fw_version, sizeof(info->fw_version),
0661              "mfw %s", mfw);
0662     }
0663 
0664     strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
0665 }
0666 
0667 static void qede_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
0668 {
0669     struct qede_dev *edev = netdev_priv(ndev);
0670 
0671     if (edev->dev_info.common.wol_support) {
0672         wol->supported = WAKE_MAGIC;
0673         wol->wolopts = edev->wol_enabled ? WAKE_MAGIC : 0;
0674     }
0675 }
0676 
0677 static int qede_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
0678 {
0679     struct qede_dev *edev = netdev_priv(ndev);
0680     bool wol_requested;
0681     int rc;
0682 
0683     if (wol->wolopts & ~WAKE_MAGIC) {
0684         DP_INFO(edev,
0685             "Can't support WoL options other than magic-packet\n");
0686         return -EINVAL;
0687     }
0688 
0689     wol_requested = !!(wol->wolopts & WAKE_MAGIC);
0690     if (wol_requested == edev->wol_enabled)
0691         return 0;
0692 
0693     /* Need to actually change configuration */
0694     if (!edev->dev_info.common.wol_support) {
0695         DP_INFO(edev, "Device doesn't support WoL\n");
0696         return -EINVAL;
0697     }
0698 
0699     rc = edev->ops->common->update_wol(edev->cdev, wol_requested);
0700     if (!rc)
0701         edev->wol_enabled = wol_requested;
0702 
0703     return rc;
0704 }
0705 
0706 static u32 qede_get_msglevel(struct net_device *ndev)
0707 {
0708     struct qede_dev *edev = netdev_priv(ndev);
0709 
0710     return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | edev->dp_module;
0711 }
0712 
0713 static void qede_set_msglevel(struct net_device *ndev, u32 level)
0714 {
0715     struct qede_dev *edev = netdev_priv(ndev);
0716     u32 dp_module = 0;
0717     u8 dp_level = 0;
0718 
0719     qede_config_debug(level, &dp_module, &dp_level);
0720 
0721     edev->dp_level = dp_level;
0722     edev->dp_module = dp_module;
0723     edev->ops->common->update_msglvl(edev->cdev,
0724                      dp_module, dp_level);
0725 }
0726 
0727 static int qede_nway_reset(struct net_device *dev)
0728 {
0729     struct qede_dev *edev = netdev_priv(dev);
0730     struct qed_link_output current_link;
0731     struct qed_link_params link_params;
0732 
0733     if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
0734         DP_INFO(edev, "Link settings are not allowed to be changed\n");
0735         return -EOPNOTSUPP;
0736     }
0737 
0738     if (!netif_running(dev))
0739         return 0;
0740 
0741     memset(&current_link, 0, sizeof(current_link));
0742     edev->ops->common->get_link(edev->cdev, &current_link);
0743     if (!current_link.link_up)
0744         return 0;
0745 
0746     /* Toggle the link */
0747     memset(&link_params, 0, sizeof(link_params));
0748     link_params.link_up = false;
0749     edev->ops->common->set_link(edev->cdev, &link_params);
0750     link_params.link_up = true;
0751     edev->ops->common->set_link(edev->cdev, &link_params);
0752 
0753     return 0;
0754 }
0755 
0756 static u32 qede_get_link(struct net_device *dev)
0757 {
0758     struct qede_dev *edev = netdev_priv(dev);
0759     struct qed_link_output current_link;
0760 
0761     memset(&current_link, 0, sizeof(current_link));
0762     edev->ops->common->get_link(edev->cdev, &current_link);
0763 
0764     return current_link.link_up;
0765 }
0766 
0767 static int qede_flash_device(struct net_device *dev,
0768                  struct ethtool_flash *flash)
0769 {
0770     struct qede_dev *edev = netdev_priv(dev);
0771 
0772     return edev->ops->common->nvm_flash(edev->cdev, flash->data);
0773 }
0774 
0775 static int qede_get_coalesce(struct net_device *dev,
0776                  struct ethtool_coalesce *coal,
0777                  struct kernel_ethtool_coalesce *kernel_coal,
0778                  struct netlink_ext_ack *extack)
0779 {
0780     void *rx_handle = NULL, *tx_handle = NULL;
0781     struct qede_dev *edev = netdev_priv(dev);
0782     u16 rx_coal, tx_coal, i, rc = 0;
0783     struct qede_fastpath *fp;
0784 
0785     rx_coal = QED_DEFAULT_RX_USECS;
0786     tx_coal = QED_DEFAULT_TX_USECS;
0787 
0788     memset(coal, 0, sizeof(struct ethtool_coalesce));
0789 
0790     __qede_lock(edev);
0791     if (edev->state == QEDE_STATE_OPEN) {
0792         for_each_queue(i) {
0793             fp = &edev->fp_array[i];
0794 
0795             if (fp->type & QEDE_FASTPATH_RX) {
0796                 rx_handle = fp->rxq->handle;
0797                 break;
0798             }
0799         }
0800 
0801         rc = edev->ops->get_coalesce(edev->cdev, &rx_coal, rx_handle);
0802         if (rc) {
0803             DP_INFO(edev, "Read Rx coalesce error\n");
0804             goto out;
0805         }
0806 
0807         for_each_queue(i) {
0808             struct qede_tx_queue *txq;
0809 
0810             fp = &edev->fp_array[i];
0811 
0812             /* All TX queues of given fastpath uses same
0813              * coalescing value, so no need to iterate over
0814              * all TCs, TC0 txq should suffice.
0815              */
0816             if (fp->type & QEDE_FASTPATH_TX) {
0817                 txq = QEDE_FP_TC0_TXQ(fp);
0818                 tx_handle = txq->handle;
0819                 break;
0820             }
0821         }
0822 
0823         rc = edev->ops->get_coalesce(edev->cdev, &tx_coal, tx_handle);
0824         if (rc)
0825             DP_INFO(edev, "Read Tx coalesce error\n");
0826     }
0827 
0828 out:
0829     __qede_unlock(edev);
0830 
0831     coal->rx_coalesce_usecs = rx_coal;
0832     coal->tx_coalesce_usecs = tx_coal;
0833 
0834     return rc;
0835 }
0836 
0837 int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal,
0838               struct kernel_ethtool_coalesce *kernel_coal,
0839               struct netlink_ext_ack *extack)
0840 {
0841     struct qede_dev *edev = netdev_priv(dev);
0842     struct qede_fastpath *fp;
0843     int i, rc = 0;
0844     u16 rxc, txc;
0845 
0846     if (!netif_running(dev)) {
0847         DP_INFO(edev, "Interface is down\n");
0848         return -EINVAL;
0849     }
0850 
0851     if (coal->rx_coalesce_usecs > QED_COALESCE_MAX ||
0852         coal->tx_coalesce_usecs > QED_COALESCE_MAX) {
0853         DP_INFO(edev,
0854             "Can't support requested %s coalesce value [max supported value %d]\n",
0855             coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" :
0856             "tx", QED_COALESCE_MAX);
0857         return -EINVAL;
0858     }
0859 
0860     rxc = (u16)coal->rx_coalesce_usecs;
0861     txc = (u16)coal->tx_coalesce_usecs;
0862     for_each_queue(i) {
0863         fp = &edev->fp_array[i];
0864 
0865         if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
0866             rc = edev->ops->common->set_coalesce(edev->cdev,
0867                                  rxc, 0,
0868                                  fp->rxq->handle);
0869             if (rc) {
0870                 DP_INFO(edev,
0871                     "Set RX coalesce error, rc = %d\n", rc);
0872                 return rc;
0873             }
0874             edev->coal_entry[i].rxc = rxc;
0875             edev->coal_entry[i].isvalid = true;
0876         }
0877 
0878         if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
0879             struct qede_tx_queue *txq;
0880 
0881             /* All TX queues of given fastpath uses same
0882              * coalescing value, so no need to iterate over
0883              * all TCs, TC0 txq should suffice.
0884              */
0885             txq = QEDE_FP_TC0_TXQ(fp);
0886 
0887             rc = edev->ops->common->set_coalesce(edev->cdev,
0888                                  0, txc,
0889                                  txq->handle);
0890             if (rc) {
0891                 DP_INFO(edev,
0892                     "Set TX coalesce error, rc = %d\n", rc);
0893                 return rc;
0894             }
0895             edev->coal_entry[i].txc = txc;
0896             edev->coal_entry[i].isvalid = true;
0897         }
0898     }
0899 
0900     return rc;
0901 }
0902 
0903 static void qede_get_ringparam(struct net_device *dev,
0904                    struct ethtool_ringparam *ering,
0905                    struct kernel_ethtool_ringparam *kernel_ering,
0906                    struct netlink_ext_ack *extack)
0907 {
0908     struct qede_dev *edev = netdev_priv(dev);
0909 
0910     ering->rx_max_pending = NUM_RX_BDS_MAX;
0911     ering->rx_pending = edev->q_num_rx_buffers;
0912     ering->tx_max_pending = NUM_TX_BDS_MAX;
0913     ering->tx_pending = edev->q_num_tx_buffers;
0914 }
0915 
0916 static int qede_set_ringparam(struct net_device *dev,
0917                   struct ethtool_ringparam *ering,
0918                   struct kernel_ethtool_ringparam *kernel_ering,
0919                   struct netlink_ext_ack *extack)
0920 {
0921     struct qede_dev *edev = netdev_priv(dev);
0922 
0923     DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
0924            "Set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
0925            ering->rx_pending, ering->tx_pending);
0926 
0927     /* Validate legality of configuration */
0928     if (ering->rx_pending > NUM_RX_BDS_MAX ||
0929         ering->rx_pending < NUM_RX_BDS_MIN ||
0930         ering->tx_pending > NUM_TX_BDS_MAX ||
0931         ering->tx_pending < NUM_TX_BDS_MIN) {
0932         DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
0933                "Can only support Rx Buffer size [0%08x,...,0x%08x] and Tx Buffer size [0x%08x,...,0x%08x]\n",
0934                NUM_RX_BDS_MIN, NUM_RX_BDS_MAX,
0935                NUM_TX_BDS_MIN, NUM_TX_BDS_MAX);
0936         return -EINVAL;
0937     }
0938 
0939     /* Change ring size and re-load */
0940     edev->q_num_rx_buffers = ering->rx_pending;
0941     edev->q_num_tx_buffers = ering->tx_pending;
0942 
0943     qede_reload(edev, NULL, false);
0944 
0945     return 0;
0946 }
0947 
0948 static void qede_get_pauseparam(struct net_device *dev,
0949                 struct ethtool_pauseparam *epause)
0950 {
0951     struct qede_dev *edev = netdev_priv(dev);
0952     struct qed_link_output current_link;
0953 
0954     memset(&current_link, 0, sizeof(current_link));
0955     edev->ops->common->get_link(edev->cdev, &current_link);
0956 
0957     if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
0958         epause->autoneg = true;
0959     if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
0960         epause->rx_pause = true;
0961     if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
0962         epause->tx_pause = true;
0963 
0964     DP_VERBOSE(edev, QED_MSG_DEBUG,
0965            "ethtool_pauseparam: cmd %d  autoneg %d  rx_pause %d  tx_pause %d\n",
0966            epause->cmd, epause->autoneg, epause->rx_pause,
0967            epause->tx_pause);
0968 }
0969 
0970 static int qede_set_pauseparam(struct net_device *dev,
0971                    struct ethtool_pauseparam *epause)
0972 {
0973     struct qede_dev *edev = netdev_priv(dev);
0974     struct qed_link_params params;
0975     struct qed_link_output current_link;
0976 
0977     if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
0978         DP_INFO(edev,
0979             "Pause settings are not allowed to be changed\n");
0980         return -EOPNOTSUPP;
0981     }
0982 
0983     memset(&current_link, 0, sizeof(current_link));
0984     edev->ops->common->get_link(edev->cdev, &current_link);
0985 
0986     memset(&params, 0, sizeof(params));
0987     params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
0988 
0989     if (epause->autoneg) {
0990         if (!phylink_test(current_link.supported_caps, Autoneg)) {
0991             DP_INFO(edev, "autoneg not supported\n");
0992             return -EINVAL;
0993         }
0994 
0995         params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
0996     }
0997 
0998     if (epause->rx_pause)
0999         params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1000     if (epause->tx_pause)
1001         params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1002 
1003     params.link_up = true;
1004     edev->ops->common->set_link(edev->cdev, &params);
1005 
1006     return 0;
1007 }
1008 
1009 static void qede_get_regs(struct net_device *ndev,
1010               struct ethtool_regs *regs, void *buffer)
1011 {
1012     struct qede_dev *edev = netdev_priv(ndev);
1013 
1014     regs->version = 0;
1015     memset(buffer, 0, regs->len);
1016 
1017     if (edev->ops && edev->ops->common)
1018         edev->ops->common->dbg_all_data(edev->cdev, buffer);
1019 }
1020 
1021 static int qede_get_regs_len(struct net_device *ndev)
1022 {
1023     struct qede_dev *edev = netdev_priv(ndev);
1024 
1025     if (edev->ops && edev->ops->common)
1026         return edev->ops->common->dbg_all_data_size(edev->cdev);
1027     else
1028         return -EINVAL;
1029 }
1030 
1031 static void qede_update_mtu(struct qede_dev *edev,
1032                 struct qede_reload_args *args)
1033 {
1034     edev->ndev->mtu = args->u.mtu;
1035 }
1036 
1037 /* Netdevice NDOs */
1038 int qede_change_mtu(struct net_device *ndev, int new_mtu)
1039 {
1040     struct qede_dev *edev = netdev_priv(ndev);
1041     struct qede_reload_args args;
1042 
1043     DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
1044            "Configuring MTU size of %d\n", new_mtu);
1045 
1046     if (new_mtu > PAGE_SIZE)
1047         ndev->features &= ~NETIF_F_GRO_HW;
1048 
1049     /* Set the mtu field and re-start the interface if needed */
1050     args.u.mtu = new_mtu;
1051     args.func = &qede_update_mtu;
1052     qede_reload(edev, &args, false);
1053 #if IS_ENABLED(CONFIG_QED_RDMA)
1054     qede_rdma_event_change_mtu(edev);
1055 #endif
1056     edev->ops->common->update_mtu(edev->cdev, new_mtu);
1057 
1058     return 0;
1059 }
1060 
1061 static void qede_get_channels(struct net_device *dev,
1062                   struct ethtool_channels *channels)
1063 {
1064     struct qede_dev *edev = netdev_priv(dev);
1065 
1066     channels->max_combined = QEDE_MAX_RSS_CNT(edev);
1067     channels->max_rx = QEDE_MAX_RSS_CNT(edev);
1068     channels->max_tx = QEDE_MAX_RSS_CNT(edev);
1069     channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
1070                     edev->fp_num_rx;
1071     channels->tx_count = edev->fp_num_tx;
1072     channels->rx_count = edev->fp_num_rx;
1073 }
1074 
1075 static int qede_set_channels(struct net_device *dev,
1076                  struct ethtool_channels *channels)
1077 {
1078     struct qede_dev *edev = netdev_priv(dev);
1079     u32 count;
1080 
1081     DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
1082            "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
1083            channels->rx_count, channels->tx_count,
1084            channels->other_count, channels->combined_count);
1085 
1086     count = channels->rx_count + channels->tx_count +
1087             channels->combined_count;
1088 
1089     /* We don't support `other' channels */
1090     if (channels->other_count) {
1091         DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
1092                "command parameters not supported\n");
1093         return -EINVAL;
1094     }
1095 
1096     if (!(channels->combined_count || (channels->rx_count &&
1097                        channels->tx_count))) {
1098         DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
1099                "need to request at least one transmit and one receive channel\n");
1100         return -EINVAL;
1101     }
1102 
1103     if (count > QEDE_MAX_RSS_CNT(edev)) {
1104         DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
1105                "requested channels = %d max supported channels = %d\n",
1106                count, QEDE_MAX_RSS_CNT(edev));
1107         return -EINVAL;
1108     }
1109 
1110     /* Check if there was a change in the active parameters */
1111     if ((count == QEDE_QUEUE_CNT(edev)) &&
1112         (channels->tx_count == edev->fp_num_tx) &&
1113         (channels->rx_count == edev->fp_num_rx)) {
1114         DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
1115                "No change in active parameters\n");
1116         return 0;
1117     }
1118 
1119     /* We need the number of queues to be divisible between the hwfns */
1120     if ((count % edev->dev_info.common.num_hwfns) ||
1121         (channels->tx_count % edev->dev_info.common.num_hwfns) ||
1122         (channels->rx_count % edev->dev_info.common.num_hwfns)) {
1123         DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
1124                "Number of channels must be divisible by %04x\n",
1125                edev->dev_info.common.num_hwfns);
1126         return -EINVAL;
1127     }
1128 
1129     /* Set number of queues and reload if necessary */
1130     edev->req_queues = count;
1131     edev->req_num_tx = channels->tx_count;
1132     edev->req_num_rx = channels->rx_count;
1133     /* Reset the indirection table if rx queue count is updated */
1134     if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
1135         edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
1136         memset(edev->rss_ind_table, 0, sizeof(edev->rss_ind_table));
1137     }
1138 
1139     qede_reload(edev, NULL, false);
1140 
1141     return 0;
1142 }
1143 
1144 static int qede_get_ts_info(struct net_device *dev,
1145                 struct ethtool_ts_info *info)
1146 {
1147     struct qede_dev *edev = netdev_priv(dev);
1148 
1149     return qede_ptp_get_ts_info(edev, info);
1150 }
1151 
1152 static int qede_set_phys_id(struct net_device *dev,
1153                 enum ethtool_phys_id_state state)
1154 {
1155     struct qede_dev *edev = netdev_priv(dev);
1156     u8 led_state = 0;
1157 
1158     switch (state) {
1159     case ETHTOOL_ID_ACTIVE:
1160         return 1;   /* cycle on/off once per second */
1161 
1162     case ETHTOOL_ID_ON:
1163         led_state = QED_LED_MODE_ON;
1164         break;
1165 
1166     case ETHTOOL_ID_OFF:
1167         led_state = QED_LED_MODE_OFF;
1168         break;
1169 
1170     case ETHTOOL_ID_INACTIVE:
1171         led_state = QED_LED_MODE_RESTORE;
1172         break;
1173     }
1174 
1175     edev->ops->common->set_led(edev->cdev, led_state);
1176 
1177     return 0;
1178 }
1179 
1180 static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
1181 {
1182     info->data = RXH_IP_SRC | RXH_IP_DST;
1183 
1184     switch (info->flow_type) {
1185     case TCP_V4_FLOW:
1186     case TCP_V6_FLOW:
1187         info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1188         break;
1189     case UDP_V4_FLOW:
1190         if (edev->rss_caps & QED_RSS_IPV4_UDP)
1191             info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1192         break;
1193     case UDP_V6_FLOW:
1194         if (edev->rss_caps & QED_RSS_IPV6_UDP)
1195             info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1196         break;
1197     case IPV4_FLOW:
1198     case IPV6_FLOW:
1199         break;
1200     default:
1201         info->data = 0;
1202         break;
1203     }
1204 
1205     return 0;
1206 }
1207 
1208 static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1209               u32 *rule_locs)
1210 {
1211     struct qede_dev *edev = netdev_priv(dev);
1212     int rc = 0;
1213 
1214     switch (info->cmd) {
1215     case ETHTOOL_GRXRINGS:
1216         info->data = QEDE_RSS_COUNT(edev);
1217         break;
1218     case ETHTOOL_GRXFH:
1219         rc = qede_get_rss_flags(edev, info);
1220         break;
1221     case ETHTOOL_GRXCLSRLCNT:
1222         info->rule_cnt = qede_get_arfs_filter_count(edev);
1223         info->data = QEDE_RFS_MAX_FLTR;
1224         break;
1225     case ETHTOOL_GRXCLSRULE:
1226         rc = qede_get_cls_rule_entry(edev, info);
1227         break;
1228     case ETHTOOL_GRXCLSRLALL:
1229         rc = qede_get_cls_rule_all(edev, info, rule_locs);
1230         break;
1231     default:
1232         DP_ERR(edev, "Command parameters not supported\n");
1233         rc = -EOPNOTSUPP;
1234     }
1235 
1236     return rc;
1237 }
1238 
1239 static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
1240 {
1241     struct qed_update_vport_params *vport_update_params;
1242     u8 set_caps = 0, clr_caps = 0;
1243     int rc = 0;
1244 
1245     DP_VERBOSE(edev, QED_MSG_DEBUG,
1246            "Set rss flags command parameters: flow type = %d, data = %llu\n",
1247            info->flow_type, info->data);
1248 
1249     switch (info->flow_type) {
1250     case TCP_V4_FLOW:
1251     case TCP_V6_FLOW:
1252         /* For TCP only 4-tuple hash is supported */
1253         if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
1254                   RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1255             DP_INFO(edev, "Command parameters not supported\n");
1256             return -EINVAL;
1257         }
1258         return 0;
1259     case UDP_V4_FLOW:
1260         /* For UDP either 2-tuple hash or 4-tuple hash is supported */
1261         if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1262                    RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1263             set_caps = QED_RSS_IPV4_UDP;
1264             DP_VERBOSE(edev, QED_MSG_DEBUG,
1265                    "UDP 4-tuple enabled\n");
1266         } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
1267             clr_caps = QED_RSS_IPV4_UDP;
1268             DP_VERBOSE(edev, QED_MSG_DEBUG,
1269                    "UDP 4-tuple disabled\n");
1270         } else {
1271             return -EINVAL;
1272         }
1273         break;
1274     case UDP_V6_FLOW:
1275         /* For UDP either 2-tuple hash or 4-tuple hash is supported */
1276         if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1277                    RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1278             set_caps = QED_RSS_IPV6_UDP;
1279             DP_VERBOSE(edev, QED_MSG_DEBUG,
1280                    "UDP 4-tuple enabled\n");
1281         } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
1282             clr_caps = QED_RSS_IPV6_UDP;
1283             DP_VERBOSE(edev, QED_MSG_DEBUG,
1284                    "UDP 4-tuple disabled\n");
1285         } else {
1286             return -EINVAL;
1287         }
1288         break;
1289     case IPV4_FLOW:
1290     case IPV6_FLOW:
1291         /* For IP only 2-tuple hash is supported */
1292         if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
1293             DP_INFO(edev, "Command parameters not supported\n");
1294             return -EINVAL;
1295         }
1296         return 0;
1297     case SCTP_V4_FLOW:
1298     case AH_ESP_V4_FLOW:
1299     case AH_V4_FLOW:
1300     case ESP_V4_FLOW:
1301     case SCTP_V6_FLOW:
1302     case AH_ESP_V6_FLOW:
1303     case AH_V6_FLOW:
1304     case ESP_V6_FLOW:
1305     case IP_USER_FLOW:
1306     case ETHER_FLOW:
1307         /* RSS is not supported for these protocols */
1308         if (info->data) {
1309             DP_INFO(edev, "Command parameters not supported\n");
1310             return -EINVAL;
1311         }
1312         return 0;
1313     default:
1314         return -EINVAL;
1315     }
1316 
1317     /* No action is needed if there is no change in the rss capability */
1318     if (edev->rss_caps == ((edev->rss_caps & ~clr_caps) | set_caps))
1319         return 0;
1320 
1321     /* Update internal configuration */
1322     edev->rss_caps = ((edev->rss_caps & ~clr_caps) | set_caps);
1323     edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
1324 
1325     /* Re-configure if possible */
1326     __qede_lock(edev);
1327     if (edev->state == QEDE_STATE_OPEN) {
1328         vport_update_params = vzalloc(sizeof(*vport_update_params));
1329         if (!vport_update_params) {
1330             __qede_unlock(edev);
1331             return -ENOMEM;
1332         }
1333         qede_fill_rss_params(edev, &vport_update_params->rss_params,
1334                      &vport_update_params->update_rss_flg);
1335         rc = edev->ops->vport_update(edev->cdev, vport_update_params);
1336         vfree(vport_update_params);
1337     }
1338     __qede_unlock(edev);
1339 
1340     return rc;
1341 }
1342 
1343 static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
1344 {
1345     struct qede_dev *edev = netdev_priv(dev);
1346     int rc;
1347 
1348     switch (info->cmd) {
1349     case ETHTOOL_SRXFH:
1350         rc = qede_set_rss_flags(edev, info);
1351         break;
1352     case ETHTOOL_SRXCLSRLINS:
1353         rc = qede_add_cls_rule(edev, info);
1354         break;
1355     case ETHTOOL_SRXCLSRLDEL:
1356         rc = qede_delete_flow_filter(edev, info->fs.location);
1357         break;
1358     default:
1359         DP_INFO(edev, "Command parameters not supported\n");
1360         rc = -EOPNOTSUPP;
1361     }
1362 
1363     return rc;
1364 }
1365 
1366 static u32 qede_get_rxfh_indir_size(struct net_device *dev)
1367 {
1368     return QED_RSS_IND_TABLE_SIZE;
1369 }
1370 
1371 static u32 qede_get_rxfh_key_size(struct net_device *dev)
1372 {
1373     struct qede_dev *edev = netdev_priv(dev);
1374 
1375     return sizeof(edev->rss_key);
1376 }
1377 
1378 static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
1379 {
1380     struct qede_dev *edev = netdev_priv(dev);
1381     int i;
1382 
1383     if (hfunc)
1384         *hfunc = ETH_RSS_HASH_TOP;
1385 
1386     if (!indir)
1387         return 0;
1388 
1389     for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
1390         indir[i] = edev->rss_ind_table[i];
1391 
1392     if (key)
1393         memcpy(key, edev->rss_key, qede_get_rxfh_key_size(dev));
1394 
1395     return 0;
1396 }
1397 
1398 static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
1399              const u8 *key, const u8 hfunc)
1400 {
1401     struct qed_update_vport_params *vport_update_params;
1402     struct qede_dev *edev = netdev_priv(dev);
1403     int i, rc = 0;
1404 
1405     if (edev->dev_info.common.num_hwfns > 1) {
1406         DP_INFO(edev,
1407             "RSS configuration is not supported for 100G devices\n");
1408         return -EOPNOTSUPP;
1409     }
1410 
1411     if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1412         return -EOPNOTSUPP;
1413 
1414     if (!indir && !key)
1415         return 0;
1416 
1417     if (indir) {
1418         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
1419             edev->rss_ind_table[i] = indir[i];
1420         edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
1421     }
1422 
1423     if (key) {
1424         memcpy(&edev->rss_key, key, qede_get_rxfh_key_size(dev));
1425         edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
1426     }
1427 
1428     __qede_lock(edev);
1429     if (edev->state == QEDE_STATE_OPEN) {
1430         vport_update_params = vzalloc(sizeof(*vport_update_params));
1431         if (!vport_update_params) {
1432             __qede_unlock(edev);
1433             return -ENOMEM;
1434         }
1435         qede_fill_rss_params(edev, &vport_update_params->rss_params,
1436                      &vport_update_params->update_rss_flg);
1437         rc = edev->ops->vport_update(edev->cdev, vport_update_params);
1438         vfree(vport_update_params);
1439     }
1440     __qede_unlock(edev);
1441 
1442     return rc;
1443 }
1444 
1445 /* This function enables the interrupt generation and the NAPI on the device */
1446 static void qede_netif_start(struct qede_dev *edev)
1447 {
1448     int i;
1449 
1450     if (!netif_running(edev->ndev))
1451         return;
1452 
1453     for_each_queue(i) {
1454         /* Update and reenable interrupts */
1455         qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
1456         napi_enable(&edev->fp_array[i].napi);
1457     }
1458 }
1459 
1460 /* This function disables the NAPI and the interrupt generation on the device */
1461 static void qede_netif_stop(struct qede_dev *edev)
1462 {
1463     int i;
1464 
1465     for_each_queue(i) {
1466         napi_disable(&edev->fp_array[i].napi);
1467         /* Disable interrupts */
1468         qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
1469     }
1470 }
1471 
1472 static int qede_selftest_transmit_traffic(struct qede_dev *edev,
1473                       struct sk_buff *skb)
1474 {
1475     struct qede_tx_queue *txq = NULL;
1476     struct eth_tx_1st_bd *first_bd;
1477     dma_addr_t mapping;
1478     int i, idx;
1479     u16 val;
1480 
1481     for_each_queue(i) {
1482         struct qede_fastpath *fp = &edev->fp_array[i];
1483 
1484         if (fp->type & QEDE_FASTPATH_TX) {
1485             txq = QEDE_FP_TC0_TXQ(fp);
1486             break;
1487         }
1488     }
1489 
1490     if (!txq) {
1491         DP_NOTICE(edev, "Tx path is not available\n");
1492         return -1;
1493     }
1494 
1495     /* Fill the entry in the SW ring and the BDs in the FW ring */
1496     idx = txq->sw_tx_prod;
1497     txq->sw_tx_ring.skbs[idx].skb = skb;
1498     first_bd = qed_chain_produce(&txq->tx_pbl);
1499     memset(first_bd, 0, sizeof(*first_bd));
1500     val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1501     first_bd->data.bd_flags.bitfields = val;
1502     val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK;
1503     val = val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
1504     first_bd->data.bitfields |= cpu_to_le16(val);
1505 
1506     /* Map skb linear data for DMA and set in the first BD */
1507     mapping = dma_map_single(&edev->pdev->dev, skb->data,
1508                  skb_headlen(skb), DMA_TO_DEVICE);
1509     if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
1510         DP_NOTICE(edev, "SKB mapping failed\n");
1511         return -ENOMEM;
1512     }
1513     BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
1514 
1515     /* update the first BD with the actual num BDs */
1516     first_bd->data.nbds = 1;
1517     txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
1518     /* 'next page' entries are counted in the producer value */
1519     val = qed_chain_get_prod_idx(&txq->tx_pbl);
1520     txq->tx_db.data.bd_prod = cpu_to_le16(val);
1521 
1522     /* wmb makes sure that the BDs data is updated before updating the
1523      * producer, otherwise FW may read old data from the BDs.
1524      */
1525     wmb();
1526     barrier();
1527     writel(txq->tx_db.raw, txq->doorbell_addr);
1528 
1529     for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
1530         if (qede_txq_has_work(txq))
1531             break;
1532         usleep_range(100, 200);
1533     }
1534 
1535     if (!qede_txq_has_work(txq)) {
1536         DP_NOTICE(edev, "Tx completion didn't happen\n");
1537         return -1;
1538     }
1539 
1540     first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
1541     dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
1542              BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
1543     txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
1544     txq->sw_tx_ring.skbs[idx].skb = NULL;
1545 
1546     return 0;
1547 }
1548 
1549 static int qede_selftest_receive_traffic(struct qede_dev *edev)
1550 {
1551     u16 sw_rx_index, len;
1552     struct eth_fast_path_rx_reg_cqe *fp_cqe;
1553     struct qede_rx_queue *rxq = NULL;
1554     struct sw_rx_data *sw_rx_data;
1555     union eth_rx_cqe *cqe;
1556     int i, iter, rc = 0;
1557     u8 *data_ptr;
1558 
1559     for_each_queue(i) {
1560         if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
1561             rxq = edev->fp_array[i].rxq;
1562             break;
1563         }
1564     }
1565 
1566     if (!rxq) {
1567         DP_NOTICE(edev, "Rx path is not available\n");
1568         return -1;
1569     }
1570 
1571     /* The packet is expected to receive on rx-queue 0 even though RSS is
1572      * enabled. This is because the queue 0 is configured as the default
1573      * queue and that the loopback traffic is not IP.
1574      */
1575     for (iter = 0; iter < QEDE_SELFTEST_POLL_COUNT; iter++) {
1576         if (!qede_has_rx_work(rxq)) {
1577             usleep_range(100, 200);
1578             continue;
1579         }
1580 
1581         /* Get the CQE from the completion ring */
1582         cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1583 
1584         /* Get the data from the SW ring */
1585         sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1586         sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
1587         fp_cqe = &cqe->fast_path_regular;
1588         len =  le16_to_cpu(fp_cqe->len_on_first_bd);
1589         data_ptr = (u8 *)(page_address(sw_rx_data->data) +
1590                   fp_cqe->placement_offset +
1591                   sw_rx_data->page_offset +
1592                   rxq->rx_headroom);
1593         if (ether_addr_equal(data_ptr,  edev->ndev->dev_addr) &&
1594             ether_addr_equal(data_ptr + ETH_ALEN,
1595                      edev->ndev->dev_addr)) {
1596             for (i = ETH_HLEN; i < len; i++)
1597                 if (data_ptr[i] != (unsigned char)(i & 0xff)) {
1598                     rc = -1;
1599                     break;
1600                 }
1601 
1602             qede_recycle_rx_bd_ring(rxq, 1);
1603             qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1604             break;
1605         }
1606 
1607         DP_INFO(edev, "Not the transmitted packet\n");
1608         qede_recycle_rx_bd_ring(rxq, 1);
1609         qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1610     }
1611 
1612     if (iter == QEDE_SELFTEST_POLL_COUNT) {
1613         DP_NOTICE(edev, "Failed to receive the traffic\n");
1614         return -1;
1615     }
1616 
1617     qede_update_rx_prod(edev, rxq);
1618 
1619     return rc;
1620 }
1621 
1622 static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
1623 {
1624     struct qed_link_params link_params;
1625     struct sk_buff *skb = NULL;
1626     int rc = 0, i;
1627     u32 pkt_size;
1628     u8 *packet;
1629 
1630     if (!netif_running(edev->ndev)) {
1631         DP_NOTICE(edev, "Interface is down\n");
1632         return -EINVAL;
1633     }
1634 
1635     qede_netif_stop(edev);
1636 
1637     /* Bring up the link in Loopback mode */
1638     memset(&link_params, 0, sizeof(link_params));
1639     link_params.link_up = true;
1640     link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
1641     link_params.loopback_mode = loopback_mode;
1642     edev->ops->common->set_link(edev->cdev, &link_params);
1643 
1644     /* Wait for loopback configuration to apply */
1645     msleep_interruptible(500);
1646 
1647     /* Setting max packet size to 1.5K to avoid data being split over
1648      * multiple BDs in cases where MTU > PAGE_SIZE.
1649      */
1650     pkt_size = (((edev->ndev->mtu < ETH_DATA_LEN) ?
1651              edev->ndev->mtu : ETH_DATA_LEN) + ETH_HLEN);
1652 
1653     skb = netdev_alloc_skb(edev->ndev, pkt_size);
1654     if (!skb) {
1655         DP_INFO(edev, "Can't allocate skb\n");
1656         rc = -ENOMEM;
1657         goto test_loopback_exit;
1658     }
1659     packet = skb_put(skb, pkt_size);
1660     ether_addr_copy(packet, edev->ndev->dev_addr);
1661     ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr);
1662     memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN)));
1663     for (i = ETH_HLEN; i < pkt_size; i++)
1664         packet[i] = (unsigned char)(i & 0xff);
1665 
1666     rc = qede_selftest_transmit_traffic(edev, skb);
1667     if (rc)
1668         goto test_loopback_exit;
1669 
1670     rc = qede_selftest_receive_traffic(edev);
1671     if (rc)
1672         goto test_loopback_exit;
1673 
1674     DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n");
1675 
1676 test_loopback_exit:
1677     dev_kfree_skb(skb);
1678 
1679     /* Bring up the link in Normal mode */
1680     memset(&link_params, 0, sizeof(link_params));
1681     link_params.link_up = true;
1682     link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
1683     link_params.loopback_mode = QED_LINK_LOOPBACK_NONE;
1684     edev->ops->common->set_link(edev->cdev, &link_params);
1685 
1686     /* Wait for loopback configuration to apply */
1687     msleep_interruptible(500);
1688 
1689     qede_netif_start(edev);
1690 
1691     return rc;
1692 }
1693 
1694 static void qede_self_test(struct net_device *dev,
1695                struct ethtool_test *etest, u64 *buf)
1696 {
1697     struct qede_dev *edev = netdev_priv(dev);
1698 
1699     DP_VERBOSE(edev, QED_MSG_DEBUG,
1700            "Self-test command parameters: offline = %d, external_lb = %d\n",
1701            (etest->flags & ETH_TEST_FL_OFFLINE),
1702            (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2);
1703 
1704     memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX);
1705 
1706     if (etest->flags & ETH_TEST_FL_OFFLINE) {
1707         if (qede_selftest_run_loopback(edev,
1708                            QED_LINK_LOOPBACK_INT_PHY)) {
1709             buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1;
1710             etest->flags |= ETH_TEST_FL_FAILED;
1711         }
1712     }
1713 
1714     if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) {
1715         buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1;
1716         etest->flags |= ETH_TEST_FL_FAILED;
1717     }
1718 
1719     if (edev->ops->common->selftest->selftest_memory(edev->cdev)) {
1720         buf[QEDE_ETHTOOL_MEMORY_TEST] = 1;
1721         etest->flags |= ETH_TEST_FL_FAILED;
1722     }
1723 
1724     if (edev->ops->common->selftest->selftest_register(edev->cdev)) {
1725         buf[QEDE_ETHTOOL_REGISTER_TEST] = 1;
1726         etest->flags |= ETH_TEST_FL_FAILED;
1727     }
1728 
1729     if (edev->ops->common->selftest->selftest_clock(edev->cdev)) {
1730         buf[QEDE_ETHTOOL_CLOCK_TEST] = 1;
1731         etest->flags |= ETH_TEST_FL_FAILED;
1732     }
1733 
1734     if (edev->ops->common->selftest->selftest_nvram(edev->cdev)) {
1735         buf[QEDE_ETHTOOL_NVRAM_TEST] = 1;
1736         etest->flags |= ETH_TEST_FL_FAILED;
1737     }
1738 }
1739 
1740 static int qede_set_tunable(struct net_device *dev,
1741                 const struct ethtool_tunable *tuna,
1742                 const void *data)
1743 {
1744     struct qede_dev *edev = netdev_priv(dev);
1745     u32 val;
1746 
1747     switch (tuna->id) {
1748     case ETHTOOL_RX_COPYBREAK:
1749         val = *(u32 *)data;
1750         if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) {
1751             DP_VERBOSE(edev, QED_MSG_DEBUG,
1752                    "Invalid rx copy break value, range is [%u, %u]",
1753                    QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE);
1754             return -EINVAL;
1755         }
1756 
1757         edev->rx_copybreak = *(u32 *)data;
1758         break;
1759     default:
1760         return -EOPNOTSUPP;
1761     }
1762 
1763     return 0;
1764 }
1765 
1766 static int qede_get_tunable(struct net_device *dev,
1767                 const struct ethtool_tunable *tuna, void *data)
1768 {
1769     struct qede_dev *edev = netdev_priv(dev);
1770 
1771     switch (tuna->id) {
1772     case ETHTOOL_RX_COPYBREAK:
1773         *(u32 *)data = edev->rx_copybreak;
1774         break;
1775     default:
1776         return -EOPNOTSUPP;
1777     }
1778 
1779     return 0;
1780 }
1781 
1782 static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata)
1783 {
1784     struct qede_dev *edev = netdev_priv(dev);
1785     struct qed_link_output current_link;
1786 
1787     memset(&current_link, 0, sizeof(current_link));
1788     edev->ops->common->get_link(edev->cdev, &current_link);
1789 
1790     if (!current_link.eee_supported) {
1791         DP_INFO(edev, "EEE is not supported\n");
1792         return -EOPNOTSUPP;
1793     }
1794 
1795     if (current_link.eee.adv_caps & QED_EEE_1G_ADV)
1796         edata->advertised = ADVERTISED_1000baseT_Full;
1797     if (current_link.eee.adv_caps & QED_EEE_10G_ADV)
1798         edata->advertised |= ADVERTISED_10000baseT_Full;
1799     if (current_link.sup_caps & QED_EEE_1G_ADV)
1800         edata->supported = ADVERTISED_1000baseT_Full;
1801     if (current_link.sup_caps & QED_EEE_10G_ADV)
1802         edata->supported |= ADVERTISED_10000baseT_Full;
1803     if (current_link.eee.lp_adv_caps & QED_EEE_1G_ADV)
1804         edata->lp_advertised = ADVERTISED_1000baseT_Full;
1805     if (current_link.eee.lp_adv_caps & QED_EEE_10G_ADV)
1806         edata->lp_advertised |= ADVERTISED_10000baseT_Full;
1807 
1808     edata->tx_lpi_timer = current_link.eee.tx_lpi_timer;
1809     edata->eee_enabled = current_link.eee.enable;
1810     edata->tx_lpi_enabled = current_link.eee.tx_lpi_enable;
1811     edata->eee_active = current_link.eee_active;
1812 
1813     return 0;
1814 }
1815 
1816 static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1817 {
1818     struct qede_dev *edev = netdev_priv(dev);
1819     struct qed_link_output current_link;
1820     struct qed_link_params params;
1821 
1822     if (!edev->ops->common->can_link_change(edev->cdev)) {
1823         DP_INFO(edev, "Link settings are not allowed to be changed\n");
1824         return -EOPNOTSUPP;
1825     }
1826 
1827     memset(&current_link, 0, sizeof(current_link));
1828     edev->ops->common->get_link(edev->cdev, &current_link);
1829 
1830     if (!current_link.eee_supported) {
1831         DP_INFO(edev, "EEE is not supported\n");
1832         return -EOPNOTSUPP;
1833     }
1834 
1835     memset(&params, 0, sizeof(params));
1836     params.override_flags |= QED_LINK_OVERRIDE_EEE_CONFIG;
1837 
1838     if (!(edata->advertised & (ADVERTISED_1000baseT_Full |
1839                    ADVERTISED_10000baseT_Full)) ||
1840         ((edata->advertised & (ADVERTISED_1000baseT_Full |
1841                    ADVERTISED_10000baseT_Full)) !=
1842          edata->advertised)) {
1843         DP_VERBOSE(edev, QED_MSG_DEBUG,
1844                "Invalid advertised capabilities %d\n",
1845                edata->advertised);
1846         return -EINVAL;
1847     }
1848 
1849     if (edata->advertised & ADVERTISED_1000baseT_Full)
1850         params.eee.adv_caps = QED_EEE_1G_ADV;
1851     if (edata->advertised & ADVERTISED_10000baseT_Full)
1852         params.eee.adv_caps |= QED_EEE_10G_ADV;
1853     params.eee.enable = edata->eee_enabled;
1854     params.eee.tx_lpi_enable = edata->tx_lpi_enabled;
1855     params.eee.tx_lpi_timer = edata->tx_lpi_timer;
1856 
1857     params.link_up = true;
1858     edev->ops->common->set_link(edev->cdev, &params);
1859 
1860     return 0;
1861 }
1862 
1863 static u32 qede_link_to_ethtool_fec(u32 link_fec)
1864 {
1865     u32 eth_fec = 0;
1866 
1867     if (link_fec & QED_FEC_MODE_NONE)
1868         eth_fec |= ETHTOOL_FEC_OFF;
1869     if (link_fec & QED_FEC_MODE_FIRECODE)
1870         eth_fec |= ETHTOOL_FEC_BASER;
1871     if (link_fec & QED_FEC_MODE_RS)
1872         eth_fec |= ETHTOOL_FEC_RS;
1873     if (link_fec & QED_FEC_MODE_AUTO)
1874         eth_fec |= ETHTOOL_FEC_AUTO;
1875     if (link_fec & QED_FEC_MODE_UNSUPPORTED)
1876         eth_fec |= ETHTOOL_FEC_NONE;
1877 
1878     return eth_fec;
1879 }
1880 
1881 static u32 qede_ethtool_to_link_fec(u32 eth_fec)
1882 {
1883     u32 link_fec = 0;
1884 
1885     if (eth_fec & ETHTOOL_FEC_OFF)
1886         link_fec |= QED_FEC_MODE_NONE;
1887     if (eth_fec & ETHTOOL_FEC_BASER)
1888         link_fec |= QED_FEC_MODE_FIRECODE;
1889     if (eth_fec & ETHTOOL_FEC_RS)
1890         link_fec |= QED_FEC_MODE_RS;
1891     if (eth_fec & ETHTOOL_FEC_AUTO)
1892         link_fec |= QED_FEC_MODE_AUTO;
1893     if (eth_fec & ETHTOOL_FEC_NONE)
1894         link_fec |= QED_FEC_MODE_UNSUPPORTED;
1895 
1896     return link_fec;
1897 }
1898 
1899 static int qede_get_fecparam(struct net_device *dev,
1900                  struct ethtool_fecparam *fecparam)
1901 {
1902     struct qede_dev *edev = netdev_priv(dev);
1903     struct qed_link_output curr_link;
1904 
1905     memset(&curr_link, 0, sizeof(curr_link));
1906     edev->ops->common->get_link(edev->cdev, &curr_link);
1907 
1908     fecparam->active_fec = qede_link_to_ethtool_fec(curr_link.active_fec);
1909     fecparam->fec = qede_link_to_ethtool_fec(curr_link.sup_fec);
1910 
1911     return 0;
1912 }
1913 
1914 static int qede_set_fecparam(struct net_device *dev,
1915                  struct ethtool_fecparam *fecparam)
1916 {
1917     struct qede_dev *edev = netdev_priv(dev);
1918     struct qed_link_params params;
1919 
1920     if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
1921         DP_INFO(edev, "Link settings are not allowed to be changed\n");
1922         return -EOPNOTSUPP;
1923     }
1924 
1925     memset(&params, 0, sizeof(params));
1926     params.override_flags |= QED_LINK_OVERRIDE_FEC_CONFIG;
1927     params.fec = qede_ethtool_to_link_fec(fecparam->fec);
1928     params.link_up = true;
1929 
1930     edev->ops->common->set_link(edev->cdev, &params);
1931 
1932     return 0;
1933 }
1934 
1935 static int qede_get_module_info(struct net_device *dev,
1936                 struct ethtool_modinfo *modinfo)
1937 {
1938     struct qede_dev *edev = netdev_priv(dev);
1939     u8 buf[4];
1940     int rc;
1941 
1942     /* Read first 4 bytes to find the sfp type */
1943     rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
1944                            QED_I2C_DEV_ADDR_A0, 0, 4);
1945     if (rc) {
1946         DP_ERR(edev, "Failed reading EEPROM data %d\n", rc);
1947         return rc;
1948     }
1949 
1950     switch (buf[0]) {
1951     case 0x3: /* SFP, SFP+, SFP-28 */
1952         modinfo->type = ETH_MODULE_SFF_8472;
1953         modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1954         break;
1955     case 0xc: /* QSFP */
1956     case 0xd: /* QSFP+ */
1957         modinfo->type = ETH_MODULE_SFF_8436;
1958         modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
1959         break;
1960     case 0x11: /* QSFP-28 */
1961         modinfo->type = ETH_MODULE_SFF_8636;
1962         modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
1963         break;
1964     default:
1965         DP_ERR(edev, "Unknown transceiver type 0x%x\n", buf[0]);
1966         return -EINVAL;
1967     }
1968 
1969     return 0;
1970 }
1971 
1972 static int qede_get_module_eeprom(struct net_device *dev,
1973                   struct ethtool_eeprom *ee, u8 *data)
1974 {
1975     struct qede_dev *edev = netdev_priv(dev);
1976     u32 start_addr = ee->offset, size = 0;
1977     u8 *buf = data;
1978     int rc = 0;
1979 
1980     /* Read A0 section */
1981     if (ee->offset < ETH_MODULE_SFF_8079_LEN) {
1982         /* Limit transfer size to the A0 section boundary */
1983         if (ee->offset + ee->len > ETH_MODULE_SFF_8079_LEN)
1984             size = ETH_MODULE_SFF_8079_LEN - ee->offset;
1985         else
1986             size = ee->len;
1987 
1988         rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
1989                                QED_I2C_DEV_ADDR_A0,
1990                                start_addr, size);
1991         if (rc) {
1992             DP_ERR(edev, "Failed reading A0 section  %d\n", rc);
1993             return rc;
1994         }
1995 
1996         buf += size;
1997         start_addr += size;
1998     }
1999 
2000     /* Read A2 section */
2001     if (start_addr >= ETH_MODULE_SFF_8079_LEN &&
2002         start_addr < ETH_MODULE_SFF_8472_LEN) {
2003         size = ee->len - size;
2004         /* Limit transfer size to the A2 section boundary */
2005         if (start_addr + size > ETH_MODULE_SFF_8472_LEN)
2006             size = ETH_MODULE_SFF_8472_LEN - start_addr;
2007         start_addr -= ETH_MODULE_SFF_8079_LEN;
2008         rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
2009                                QED_I2C_DEV_ADDR_A2,
2010                                start_addr, size);
2011         if (rc) {
2012             DP_VERBOSE(edev, QED_MSG_DEBUG,
2013                    "Failed reading A2 section %d\n", rc);
2014             return 0;
2015         }
2016     }
2017 
2018     return rc;
2019 }
2020 
2021 static int qede_set_dump(struct net_device *dev, struct ethtool_dump *val)
2022 {
2023     struct qede_dev *edev = netdev_priv(dev);
2024     int rc = 0;
2025 
2026     if (edev->dump_info.cmd == QEDE_DUMP_CMD_NONE) {
2027         if (val->flag > QEDE_DUMP_CMD_MAX) {
2028             DP_ERR(edev, "Invalid command %d\n", val->flag);
2029             return -EINVAL;
2030         }
2031         edev->dump_info.cmd = val->flag;
2032         edev->dump_info.num_args = 0;
2033         return 0;
2034     }
2035 
2036     if (edev->dump_info.num_args == QEDE_DUMP_MAX_ARGS) {
2037         DP_ERR(edev, "Arg count = %d\n", edev->dump_info.num_args);
2038         return -EINVAL;
2039     }
2040 
2041     switch (edev->dump_info.cmd) {
2042     case QEDE_DUMP_CMD_NVM_CFG:
2043         edev->dump_info.args[edev->dump_info.num_args] = val->flag;
2044         edev->dump_info.num_args++;
2045         break;
2046     case QEDE_DUMP_CMD_GRCDUMP:
2047         rc = edev->ops->common->set_grc_config(edev->cdev,
2048                                val->flag, 1);
2049         break;
2050     default:
2051         break;
2052     }
2053 
2054     return rc;
2055 }
2056 
2057 static int qede_get_dump_flag(struct net_device *dev,
2058                   struct ethtool_dump *dump)
2059 {
2060     struct qede_dev *edev = netdev_priv(dev);
2061 
2062     if (!edev->ops || !edev->ops->common) {
2063         DP_ERR(edev, "Edev ops not populated\n");
2064         return -EINVAL;
2065     }
2066 
2067     dump->version = QEDE_DUMP_VERSION;
2068     switch (edev->dump_info.cmd) {
2069     case QEDE_DUMP_CMD_NVM_CFG:
2070         dump->flag = QEDE_DUMP_CMD_NVM_CFG;
2071         dump->len = edev->ops->common->read_nvm_cfg_len(edev->cdev,
2072                         edev->dump_info.args[0]);
2073         break;
2074     case QEDE_DUMP_CMD_GRCDUMP:
2075         dump->flag = QEDE_DUMP_CMD_GRCDUMP;
2076         dump->len = edev->ops->common->dbg_all_data_size(edev->cdev);
2077         break;
2078     default:
2079         DP_ERR(edev, "Invalid cmd = %d\n", edev->dump_info.cmd);
2080         return -EINVAL;
2081     }
2082 
2083     DP_VERBOSE(edev, QED_MSG_DEBUG,
2084            "dump->version = 0x%x dump->flag = %d dump->len = %d\n",
2085            dump->version, dump->flag, dump->len);
2086     return 0;
2087 }
2088 
2089 static int qede_get_dump_data(struct net_device *dev,
2090                   struct ethtool_dump *dump, void *buf)
2091 {
2092     struct qede_dev *edev = netdev_priv(dev);
2093     int rc = 0;
2094 
2095     if (!edev->ops || !edev->ops->common) {
2096         DP_ERR(edev, "Edev ops not populated\n");
2097         rc = -EINVAL;
2098         goto err;
2099     }
2100 
2101     switch (edev->dump_info.cmd) {
2102     case QEDE_DUMP_CMD_NVM_CFG:
2103         if (edev->dump_info.num_args != QEDE_DUMP_NVM_ARG_COUNT) {
2104             DP_ERR(edev, "Arg count = %d required = %d\n",
2105                    edev->dump_info.num_args,
2106                    QEDE_DUMP_NVM_ARG_COUNT);
2107             rc = -EINVAL;
2108             goto err;
2109         }
2110         rc =  edev->ops->common->read_nvm_cfg(edev->cdev, (u8 **)&buf,
2111                               edev->dump_info.args[0],
2112                               edev->dump_info.args[1]);
2113         break;
2114     case QEDE_DUMP_CMD_GRCDUMP:
2115         memset(buf, 0, dump->len);
2116         rc = edev->ops->common->dbg_all_data(edev->cdev, buf);
2117         break;
2118     default:
2119         DP_ERR(edev, "Invalid cmd = %d\n", edev->dump_info.cmd);
2120         rc = -EINVAL;
2121         break;
2122     }
2123 
2124 err:
2125     edev->dump_info.cmd = QEDE_DUMP_CMD_NONE;
2126     edev->dump_info.num_args = 0;
2127     memset(edev->dump_info.args, 0, sizeof(edev->dump_info.args));
2128 
2129     return rc;
2130 }
2131 
2132 int qede_set_per_coalesce(struct net_device *dev, u32 queue,
2133               struct ethtool_coalesce *coal)
2134 {
2135     struct qede_dev *edev = netdev_priv(dev);
2136     struct qede_fastpath *fp;
2137     u16 rxc, txc;
2138     int rc = 0;
2139 
2140     if (coal->rx_coalesce_usecs > QED_COALESCE_MAX ||
2141         coal->tx_coalesce_usecs > QED_COALESCE_MAX) {
2142         DP_INFO(edev,
2143             "Can't support requested %s coalesce value [max supported value %d]\n",
2144             coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx"
2145                                    : "tx",
2146             QED_COALESCE_MAX);
2147         return -EINVAL;
2148     }
2149 
2150     rxc = (u16)coal->rx_coalesce_usecs;
2151     txc = (u16)coal->tx_coalesce_usecs;
2152 
2153     __qede_lock(edev);
2154     if (queue >= edev->num_queues) {
2155         DP_INFO(edev, "Invalid queue\n");
2156         rc = -EINVAL;
2157         goto out;
2158     }
2159 
2160     if (edev->state != QEDE_STATE_OPEN) {
2161         rc = -EINVAL;
2162         goto out;
2163     }
2164 
2165     fp = &edev->fp_array[queue];
2166 
2167     if (edev->fp_array[queue].type & QEDE_FASTPATH_RX) {
2168         rc = edev->ops->common->set_coalesce(edev->cdev,
2169                              rxc, 0,
2170                              fp->rxq->handle);
2171         if (rc) {
2172             DP_INFO(edev,
2173                 "Set RX coalesce error, rc = %d\n", rc);
2174             goto out;
2175         }
2176         edev->coal_entry[queue].rxc = rxc;
2177         edev->coal_entry[queue].isvalid = true;
2178     }
2179 
2180     if (edev->fp_array[queue].type & QEDE_FASTPATH_TX) {
2181         rc = edev->ops->common->set_coalesce(edev->cdev,
2182                              0, txc,
2183                              fp->txq->handle);
2184         if (rc) {
2185             DP_INFO(edev,
2186                 "Set TX coalesce error, rc = %d\n", rc);
2187             goto out;
2188         }
2189         edev->coal_entry[queue].txc = txc;
2190         edev->coal_entry[queue].isvalid = true;
2191     }
2192 out:
2193     __qede_unlock(edev);
2194 
2195     return rc;
2196 }
2197 
2198 static int qede_get_per_coalesce(struct net_device *dev,
2199                  u32 queue,
2200                  struct ethtool_coalesce *coal)
2201 {
2202     void *rx_handle = NULL, *tx_handle = NULL;
2203     struct qede_dev *edev = netdev_priv(dev);
2204     struct qede_fastpath *fp;
2205     u16 rx_coal, tx_coal;
2206     int rc = 0;
2207 
2208     rx_coal = QED_DEFAULT_RX_USECS;
2209     tx_coal = QED_DEFAULT_TX_USECS;
2210 
2211     memset(coal, 0, sizeof(struct ethtool_coalesce));
2212 
2213     __qede_lock(edev);
2214     if (queue >= edev->num_queues) {
2215         DP_INFO(edev, "Invalid queue\n");
2216         rc = -EINVAL;
2217         goto out;
2218     }
2219 
2220     if (edev->state != QEDE_STATE_OPEN) {
2221         rc = -EINVAL;
2222         goto out;
2223     }
2224 
2225     fp = &edev->fp_array[queue];
2226 
2227     if (fp->type & QEDE_FASTPATH_RX)
2228         rx_handle = fp->rxq->handle;
2229 
2230     rc = edev->ops->get_coalesce(edev->cdev, &rx_coal,
2231                      rx_handle);
2232     if (rc) {
2233         DP_INFO(edev, "Read Rx coalesce error\n");
2234         goto out;
2235     }
2236 
2237     fp = &edev->fp_array[queue];
2238     if (fp->type & QEDE_FASTPATH_TX)
2239         tx_handle = fp->txq->handle;
2240 
2241     rc = edev->ops->get_coalesce(edev->cdev, &tx_coal,
2242                       tx_handle);
2243     if (rc)
2244         DP_INFO(edev, "Read Tx coalesce error\n");
2245 
2246 out:
2247     __qede_unlock(edev);
2248 
2249     coal->rx_coalesce_usecs = rx_coal;
2250     coal->tx_coalesce_usecs = tx_coal;
2251 
2252     return rc;
2253 }
2254 
2255 static const struct ethtool_ops qede_ethtool_ops = {
2256     .supported_coalesce_params  = ETHTOOL_COALESCE_USECS,
2257     .get_link_ksettings     = qede_get_link_ksettings,
2258     .set_link_ksettings     = qede_set_link_ksettings,
2259     .get_drvinfo            = qede_get_drvinfo,
2260     .get_regs_len           = qede_get_regs_len,
2261     .get_regs           = qede_get_regs,
2262     .get_wol            = qede_get_wol,
2263     .set_wol            = qede_set_wol,
2264     .get_msglevel           = qede_get_msglevel,
2265     .set_msglevel           = qede_set_msglevel,
2266     .nway_reset         = qede_nway_reset,
2267     .get_link           = qede_get_link,
2268     .get_coalesce           = qede_get_coalesce,
2269     .set_coalesce           = qede_set_coalesce,
2270     .get_ringparam          = qede_get_ringparam,
2271     .set_ringparam          = qede_set_ringparam,
2272     .get_pauseparam         = qede_get_pauseparam,
2273     .set_pauseparam         = qede_set_pauseparam,
2274     .get_strings            = qede_get_strings,
2275     .set_phys_id            = qede_set_phys_id,
2276     .get_ethtool_stats      = qede_get_ethtool_stats,
2277     .get_priv_flags         = qede_get_priv_flags,
2278     .set_priv_flags         = qede_set_priv_flags,
2279     .get_sset_count         = qede_get_sset_count,
2280     .get_rxnfc          = qede_get_rxnfc,
2281     .set_rxnfc          = qede_set_rxnfc,
2282     .get_rxfh_indir_size        = qede_get_rxfh_indir_size,
2283     .get_rxfh_key_size      = qede_get_rxfh_key_size,
2284     .get_rxfh           = qede_get_rxfh,
2285     .set_rxfh           = qede_set_rxfh,
2286     .get_ts_info            = qede_get_ts_info,
2287     .get_channels           = qede_get_channels,
2288     .set_channels           = qede_set_channels,
2289     .self_test          = qede_self_test,
2290     .get_module_info        = qede_get_module_info,
2291     .get_module_eeprom      = qede_get_module_eeprom,
2292     .get_eee            = qede_get_eee,
2293     .set_eee            = qede_set_eee,
2294     .get_fecparam           = qede_get_fecparam,
2295     .set_fecparam           = qede_set_fecparam,
2296     .get_tunable            = qede_get_tunable,
2297     .set_tunable            = qede_set_tunable,
2298     .get_per_queue_coalesce     = qede_get_per_coalesce,
2299     .set_per_queue_coalesce     = qede_set_per_coalesce,
2300     .flash_device           = qede_flash_device,
2301     .get_dump_flag          = qede_get_dump_flag,
2302     .get_dump_data          = qede_get_dump_data,
2303     .set_dump           = qede_set_dump,
2304 };
2305 
2306 static const struct ethtool_ops qede_vf_ethtool_ops = {
2307     .supported_coalesce_params  = ETHTOOL_COALESCE_USECS,
2308     .get_link_ksettings     = qede_get_link_ksettings,
2309     .get_drvinfo            = qede_get_drvinfo,
2310     .get_msglevel           = qede_get_msglevel,
2311     .set_msglevel           = qede_set_msglevel,
2312     .get_link           = qede_get_link,
2313     .get_coalesce           = qede_get_coalesce,
2314     .set_coalesce           = qede_set_coalesce,
2315     .get_ringparam          = qede_get_ringparam,
2316     .set_ringparam          = qede_set_ringparam,
2317     .get_strings            = qede_get_strings,
2318     .get_ethtool_stats      = qede_get_ethtool_stats,
2319     .get_priv_flags         = qede_get_priv_flags,
2320     .get_sset_count         = qede_get_sset_count,
2321     .get_rxnfc          = qede_get_rxnfc,
2322     .set_rxnfc          = qede_set_rxnfc,
2323     .get_rxfh_indir_size        = qede_get_rxfh_indir_size,
2324     .get_rxfh_key_size      = qede_get_rxfh_key_size,
2325     .get_rxfh           = qede_get_rxfh,
2326     .set_rxfh           = qede_set_rxfh,
2327     .get_channels           = qede_get_channels,
2328     .set_channels           = qede_set_channels,
2329     .get_per_queue_coalesce     = qede_get_per_coalesce,
2330     .set_per_queue_coalesce     = qede_set_per_coalesce,
2331     .get_tunable            = qede_get_tunable,
2332     .set_tunable            = qede_set_tunable,
2333 };
2334 
2335 void qede_set_ethtool_ops(struct net_device *dev)
2336 {
2337     struct qede_dev *edev = netdev_priv(dev);
2338 
2339     if (IS_VF(edev))
2340         dev->ethtool_ops = &qede_vf_ethtool_ops;
2341     else
2342         dev->ethtool_ops = &qede_ethtool_ops;
2343 }