Back to home page

OSCL-LXR

 
 

    


0001 /**********************************************************************
0002  * Author: Cavium, Inc.
0003  *
0004  * Contact: support@cavium.com
0005  *          Please include "LiquidIO" in the subject.
0006  *
0007  * Copyright (c) 2003-2016 Cavium, Inc.
0008  *
0009  * This file is free software; you can redistribute it and/or modify
0010  * it under the terms of the GNU General Public License, Version 2, as
0011  * published by the Free Software Foundation.
0012  *
0013  * This file is distributed in the hope that it will be useful, but
0014  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
0015  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
0016  * NONINFRINGEMENT.  See the GNU General Public License for more details.
0017  ***********************************************************************/
0018 #include <linux/ethtool.h>
0019 #include <linux/netdevice.h>
0020 #include <linux/net_tstamp.h>
0021 #include <linux/pci.h>
0022 #include "liquidio_common.h"
0023 #include "octeon_droq.h"
0024 #include "octeon_iq.h"
0025 #include "response_manager.h"
0026 #include "octeon_device.h"
0027 #include "octeon_nic.h"
0028 #include "octeon_main.h"
0029 #include "octeon_network.h"
0030 #include "cn66xx_regs.h"
0031 #include "cn66xx_device.h"
0032 #include "cn23xx_pf_device.h"
0033 #include "cn23xx_vf_device.h"
0034 
0035 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs);
0036 
0037 struct oct_intrmod_resp {
0038     u64     rh;
0039     struct oct_intrmod_cfg intrmod;
0040     u64     status;
0041 };
0042 
0043 struct oct_mdio_cmd_resp {
0044     u64 rh;
0045     struct oct_mdio_cmd resp;
0046     u64 status;
0047 };
0048 
0049 #define OCT_MDIO45_RESP_SIZE   (sizeof(struct oct_mdio_cmd_resp))
0050 
0051 /* Octeon's interface mode of operation */
0052 enum {
0053     INTERFACE_MODE_DISABLED,
0054     INTERFACE_MODE_RGMII,
0055     INTERFACE_MODE_GMII,
0056     INTERFACE_MODE_SPI,
0057     INTERFACE_MODE_PCIE,
0058     INTERFACE_MODE_XAUI,
0059     INTERFACE_MODE_SGMII,
0060     INTERFACE_MODE_PICMG,
0061     INTERFACE_MODE_NPI,
0062     INTERFACE_MODE_LOOP,
0063     INTERFACE_MODE_SRIO,
0064     INTERFACE_MODE_ILK,
0065     INTERFACE_MODE_RXAUI,
0066     INTERFACE_MODE_QSGMII,
0067     INTERFACE_MODE_AGL,
0068     INTERFACE_MODE_XLAUI,
0069     INTERFACE_MODE_XFI,
0070     INTERFACE_MODE_10G_KR,
0071     INTERFACE_MODE_40G_KR4,
0072     INTERFACE_MODE_MIXED,
0073 };
0074 
0075 #define OCT_ETHTOOL_REGDUMP_LEN  4096
0076 #define OCT_ETHTOOL_REGDUMP_LEN_23XX  (4096 * 11)
0077 #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF  (4096 * 2)
0078 #define OCT_ETHTOOL_REGSVER  1
0079 
0080 /* statistics of PF */
0081 static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
0082     "rx_packets",
0083     "tx_packets",
0084     "rx_bytes",
0085     "tx_bytes",
0086     "rx_errors",
0087     "tx_errors",
0088     "rx_dropped",
0089     "tx_dropped",
0090 
0091     "tx_total_sent",
0092     "tx_total_fwd",
0093     "tx_err_pko",
0094     "tx_err_pki",
0095     "tx_err_link",
0096     "tx_err_drop",
0097 
0098     "tx_tso",
0099     "tx_tso_packets",
0100     "tx_tso_err",
0101     "tx_vxlan",
0102 
0103     "tx_mcast",
0104     "tx_bcast",
0105 
0106     "mac_tx_total_pkts",
0107     "mac_tx_total_bytes",
0108     "mac_tx_mcast_pkts",
0109     "mac_tx_bcast_pkts",
0110     "mac_tx_ctl_packets",
0111     "mac_tx_total_collisions",
0112     "mac_tx_one_collision",
0113     "mac_tx_multi_collision",
0114     "mac_tx_max_collision_fail",
0115     "mac_tx_max_deferral_fail",
0116     "mac_tx_fifo_err",
0117     "mac_tx_runts",
0118 
0119     "rx_total_rcvd",
0120     "rx_total_fwd",
0121     "rx_mcast",
0122     "rx_bcast",
0123     "rx_jabber_err",
0124     "rx_l2_err",
0125     "rx_frame_err",
0126     "rx_err_pko",
0127     "rx_err_link",
0128     "rx_err_drop",
0129 
0130     "rx_vxlan",
0131     "rx_vxlan_err",
0132 
0133     "rx_lro_pkts",
0134     "rx_lro_bytes",
0135     "rx_total_lro",
0136 
0137     "rx_lro_aborts",
0138     "rx_lro_aborts_port",
0139     "rx_lro_aborts_seq",
0140     "rx_lro_aborts_tsval",
0141     "rx_lro_aborts_timer",
0142     "rx_fwd_rate",
0143 
0144     "mac_rx_total_rcvd",
0145     "mac_rx_bytes",
0146     "mac_rx_total_bcst",
0147     "mac_rx_total_mcst",
0148     "mac_rx_runts",
0149     "mac_rx_ctl_packets",
0150     "mac_rx_fifo_err",
0151     "mac_rx_dma_drop",
0152     "mac_rx_fcs_err",
0153 
0154     "link_state_changes",
0155 };
0156 
0157 /* statistics of VF */
0158 static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
0159     "rx_packets",
0160     "tx_packets",
0161     "rx_bytes",
0162     "tx_bytes",
0163     "rx_errors",
0164     "tx_errors",
0165     "rx_dropped",
0166     "tx_dropped",
0167     "rx_mcast",
0168     "tx_mcast",
0169     "rx_bcast",
0170     "tx_bcast",
0171     "link_state_changes",
0172 };
0173 
0174 /* statistics of host tx queue */
0175 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
0176     "packets",
0177     "bytes",
0178     "dropped",
0179     "iq_busy",
0180     "sgentry_sent",
0181 
0182     "fw_instr_posted",
0183     "fw_instr_processed",
0184     "fw_instr_dropped",
0185     "fw_bytes_sent",
0186 
0187     "tso",
0188     "vxlan",
0189     "txq_restart",
0190 };
0191 
0192 /* statistics of host rx queue */
0193 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
0194     "packets",
0195     "bytes",
0196     "dropped",
0197     "dropped_nomem",
0198     "dropped_toomany",
0199     "fw_dropped",
0200     "fw_pkts_received",
0201     "fw_bytes_received",
0202     "fw_dropped_nodispatch",
0203 
0204     "vxlan",
0205     "buffer_alloc_failure",
0206 };
0207 
0208 /* LiquidIO driver private flags */
0209 static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
0210 };
0211 
0212 #define OCTNIC_NCMD_AUTONEG_ON  0x1
0213 #define OCTNIC_NCMD_PHY_ON      0x2
0214 
0215 static int lio_get_link_ksettings(struct net_device *netdev,
0216                   struct ethtool_link_ksettings *ecmd)
0217 {
0218     struct lio *lio = GET_LIO(netdev);
0219     struct octeon_device *oct = lio->oct_dev;
0220     struct oct_link_info *linfo;
0221 
0222     linfo = &lio->linfo;
0223 
0224     ethtool_link_ksettings_zero_link_mode(ecmd, supported);
0225     ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
0226 
0227     switch (linfo->link.s.phy_type) {
0228     case LIO_PHY_PORT_TP:
0229         ecmd->base.port = PORT_TP;
0230         ecmd->base.autoneg = AUTONEG_DISABLE;
0231         ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
0232         ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
0233         ethtool_link_ksettings_add_link_mode(ecmd, supported,
0234                              10000baseT_Full);
0235 
0236         ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
0237         ethtool_link_ksettings_add_link_mode(ecmd, advertising,
0238                              10000baseT_Full);
0239 
0240         break;
0241 
0242     case LIO_PHY_PORT_FIBRE:
0243         if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
0244             linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
0245             linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
0246             linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
0247             dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n");
0248             ecmd->base.transceiver = XCVR_EXTERNAL;
0249         } else {
0250             dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n",
0251                 linfo->link.s.if_mode);
0252         }
0253 
0254         ecmd->base.port = PORT_FIBRE;
0255         ecmd->base.autoneg = AUTONEG_DISABLE;
0256         ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
0257 
0258         ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
0259         ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
0260         if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
0261             oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
0262             if (OCTEON_CN23XX_PF(oct)) {
0263                 ethtool_link_ksettings_add_link_mode
0264                     (ecmd, supported, 25000baseSR_Full);
0265                 ethtool_link_ksettings_add_link_mode
0266                     (ecmd, supported, 25000baseKR_Full);
0267                 ethtool_link_ksettings_add_link_mode
0268                     (ecmd, supported, 25000baseCR_Full);
0269 
0270                 if (oct->no_speed_setting == 0)  {
0271                     ethtool_link_ksettings_add_link_mode
0272                         (ecmd, supported,
0273                          10000baseSR_Full);
0274                     ethtool_link_ksettings_add_link_mode
0275                         (ecmd, supported,
0276                          10000baseKR_Full);
0277                     ethtool_link_ksettings_add_link_mode
0278                         (ecmd, supported,
0279                          10000baseCR_Full);
0280                 }
0281 
0282                 if (oct->no_speed_setting == 0) {
0283                     liquidio_get_speed(lio);
0284                     liquidio_get_fec(lio);
0285                 } else {
0286                     oct->speed_setting = 25;
0287                 }
0288 
0289                 if (oct->speed_setting == 10) {
0290                     ethtool_link_ksettings_add_link_mode
0291                         (ecmd, advertising,
0292                          10000baseSR_Full);
0293                     ethtool_link_ksettings_add_link_mode
0294                         (ecmd, advertising,
0295                          10000baseKR_Full);
0296                     ethtool_link_ksettings_add_link_mode
0297                         (ecmd, advertising,
0298                          10000baseCR_Full);
0299                 }
0300                 if (oct->speed_setting == 25) {
0301                     ethtool_link_ksettings_add_link_mode
0302                         (ecmd, advertising,
0303                          25000baseSR_Full);
0304                     ethtool_link_ksettings_add_link_mode
0305                         (ecmd, advertising,
0306                          25000baseKR_Full);
0307                     ethtool_link_ksettings_add_link_mode
0308                         (ecmd, advertising,
0309                          25000baseCR_Full);
0310                 }
0311 
0312                 if (oct->no_speed_setting)
0313                     break;
0314 
0315                 ethtool_link_ksettings_add_link_mode
0316                     (ecmd, supported, FEC_RS);
0317                 ethtool_link_ksettings_add_link_mode
0318                     (ecmd, supported, FEC_NONE);
0319                     /*FEC_OFF*/
0320                 if (oct->props[lio->ifidx].fec == 1) {
0321                     /* ETHTOOL_FEC_RS */
0322                     ethtool_link_ksettings_add_link_mode
0323                         (ecmd, advertising, FEC_RS);
0324                 } else {
0325                     /* ETHTOOL_FEC_OFF */
0326                     ethtool_link_ksettings_add_link_mode
0327                         (ecmd, advertising, FEC_NONE);
0328                 }
0329             } else { /* VF */
0330                 if (linfo->link.s.speed == 10000) {
0331                     ethtool_link_ksettings_add_link_mode
0332                         (ecmd, supported,
0333                          10000baseSR_Full);
0334                     ethtool_link_ksettings_add_link_mode
0335                         (ecmd, supported,
0336                          10000baseKR_Full);
0337                     ethtool_link_ksettings_add_link_mode
0338                         (ecmd, supported,
0339                          10000baseCR_Full);
0340 
0341                     ethtool_link_ksettings_add_link_mode
0342                         (ecmd, advertising,
0343                          10000baseSR_Full);
0344                     ethtool_link_ksettings_add_link_mode
0345                         (ecmd, advertising,
0346                          10000baseKR_Full);
0347                     ethtool_link_ksettings_add_link_mode
0348                         (ecmd, advertising,
0349                          10000baseCR_Full);
0350                 }
0351 
0352                 if (linfo->link.s.speed == 25000) {
0353                     ethtool_link_ksettings_add_link_mode
0354                         (ecmd, supported,
0355                          25000baseSR_Full);
0356                     ethtool_link_ksettings_add_link_mode
0357                         (ecmd, supported,
0358                          25000baseKR_Full);
0359                     ethtool_link_ksettings_add_link_mode
0360                         (ecmd, supported,
0361                          25000baseCR_Full);
0362 
0363                     ethtool_link_ksettings_add_link_mode
0364                         (ecmd, advertising,
0365                          25000baseSR_Full);
0366                     ethtool_link_ksettings_add_link_mode
0367                         (ecmd, advertising,
0368                          25000baseKR_Full);
0369                     ethtool_link_ksettings_add_link_mode
0370                         (ecmd, advertising,
0371                          25000baseCR_Full);
0372                 }
0373             }
0374         } else {
0375             ethtool_link_ksettings_add_link_mode(ecmd, supported,
0376                                  10000baseT_Full);
0377             ethtool_link_ksettings_add_link_mode(ecmd, advertising,
0378                                  10000baseT_Full);
0379         }
0380         break;
0381     }
0382 
0383     if (linfo->link.s.link_up) {
0384         ecmd->base.speed = linfo->link.s.speed;
0385         ecmd->base.duplex = linfo->link.s.duplex;
0386     } else {
0387         ecmd->base.speed = SPEED_UNKNOWN;
0388         ecmd->base.duplex = DUPLEX_UNKNOWN;
0389     }
0390 
0391     return 0;
0392 }
0393 
0394 static int lio_set_link_ksettings(struct net_device *netdev,
0395                   const struct ethtool_link_ksettings *ecmd)
0396 {
0397     const int speed = ecmd->base.speed;
0398     struct lio *lio = GET_LIO(netdev);
0399     struct oct_link_info *linfo;
0400     struct octeon_device *oct;
0401 
0402     oct = lio->oct_dev;
0403 
0404     linfo = &lio->linfo;
0405 
0406     if (!(oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
0407           oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID))
0408         return -EOPNOTSUPP;
0409 
0410     if (oct->no_speed_setting) {
0411         dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n",
0412             __func__);
0413         return -EOPNOTSUPP;
0414     }
0415 
0416     if ((ecmd->base.duplex != DUPLEX_UNKNOWN &&
0417          ecmd->base.duplex != linfo->link.s.duplex) ||
0418          ecmd->base.autoneg != AUTONEG_DISABLE ||
0419         (ecmd->base.speed != 10000 && ecmd->base.speed != 25000 &&
0420          ecmd->base.speed != SPEED_UNKNOWN))
0421         return -EOPNOTSUPP;
0422 
0423     if ((oct->speed_boot == speed / 1000) &&
0424         oct->speed_boot == oct->speed_setting)
0425         return 0;
0426 
0427     liquidio_set_speed(lio, speed / 1000);
0428 
0429     dev_dbg(&oct->pci_dev->dev, "Port speed is set to %dG\n",
0430         oct->speed_setting);
0431 
0432     return 0;
0433 }
0434 
0435 static void
0436 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
0437 {
0438     struct lio *lio;
0439     struct octeon_device *oct;
0440 
0441     lio = GET_LIO(netdev);
0442     oct = lio->oct_dev;
0443 
0444     memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
0445     strcpy(drvinfo->driver, "liquidio");
0446     strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
0447         ETHTOOL_FWVERS_LEN);
0448     strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
0449 }
0450 
0451 static void
0452 lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
0453 {
0454     struct octeon_device *oct;
0455     struct lio *lio;
0456 
0457     lio = GET_LIO(netdev);
0458     oct = lio->oct_dev;
0459 
0460     memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
0461     strcpy(drvinfo->driver, "liquidio_vf");
0462     strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
0463         ETHTOOL_FWVERS_LEN);
0464     strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
0465 }
0466 
0467 static int
0468 lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues)
0469 {
0470     struct lio *lio = GET_LIO(netdev);
0471     struct octeon_device *oct = lio->oct_dev;
0472     struct octnic_ctrl_pkt nctrl;
0473     int ret = 0;
0474 
0475     memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
0476 
0477     nctrl.ncmd.u64 = 0;
0478     nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL;
0479     nctrl.ncmd.s.param1 = num_queues;
0480     nctrl.ncmd.s.param2 = num_queues;
0481     nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
0482     nctrl.netpndev = (u64)netdev;
0483     nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
0484 
0485     ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
0486     if (ret) {
0487         dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n",
0488             ret);
0489         return -1;
0490     }
0491 
0492     return 0;
0493 }
0494 
0495 static void
0496 lio_ethtool_get_channels(struct net_device *dev,
0497              struct ethtool_channels *channel)
0498 {
0499     struct lio *lio = GET_LIO(dev);
0500     struct octeon_device *oct = lio->oct_dev;
0501     u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
0502     u32 combined_count = 0, max_combined = 0;
0503 
0504     if (OCTEON_CN6XXX(oct)) {
0505         struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
0506 
0507         max_rx = CFG_GET_OQ_MAX_Q(conf6x);
0508         max_tx = CFG_GET_IQ_MAX_Q(conf6x);
0509         rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
0510         tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
0511     } else if (OCTEON_CN23XX_PF(oct)) {
0512         if (oct->sriov_info.sriov_enabled) {
0513             max_combined = lio->linfo.num_txpciq;
0514         } else {
0515             struct octeon_config *conf23_pf =
0516                 CHIP_CONF(oct, cn23xx_pf);
0517 
0518             max_combined = CFG_GET_IQ_MAX_Q(conf23_pf);
0519         }
0520         combined_count = oct->num_iqs;
0521     } else if (OCTEON_CN23XX_VF(oct)) {
0522         u64 reg_val = 0ULL;
0523         u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
0524 
0525         reg_val = octeon_read_csr64(oct, ctrl);
0526         reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
0527         max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
0528         combined_count = oct->num_iqs;
0529     }
0530 
0531     channel->max_rx = max_rx;
0532     channel->max_tx = max_tx;
0533     channel->max_combined = max_combined;
0534     channel->rx_count = rx_count;
0535     channel->tx_count = tx_count;
0536     channel->combined_count = combined_count;
0537 }
0538 
0539 static int
0540 lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs)
0541 {
0542     struct msix_entry *msix_entries;
0543     int num_msix_irqs = 0;
0544     int i;
0545 
0546     if (!oct->msix_on)
0547         return 0;
0548 
0549     /* Disable the input and output queues now. No more packets will
0550      * arrive from Octeon.
0551      */
0552     oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
0553 
0554     if (oct->msix_on) {
0555         if (OCTEON_CN23XX_PF(oct))
0556             num_msix_irqs = oct->num_msix_irqs - 1;
0557         else if (OCTEON_CN23XX_VF(oct))
0558             num_msix_irqs = oct->num_msix_irqs;
0559 
0560         msix_entries = (struct msix_entry *)oct->msix_entries;
0561         for (i = 0; i < num_msix_irqs; i++) {
0562             if (oct->ioq_vector[i].vector) {
0563                 /* clear the affinity_cpumask */
0564                 irq_set_affinity_hint(msix_entries[i].vector,
0565                               NULL);
0566                 free_irq(msix_entries[i].vector,
0567                      &oct->ioq_vector[i]);
0568                 oct->ioq_vector[i].vector = 0;
0569             }
0570         }
0571 
0572         /* non-iov vector's argument is oct struct */
0573         if (OCTEON_CN23XX_PF(oct))
0574             free_irq(msix_entries[i].vector, oct);
0575 
0576         pci_disable_msix(oct->pci_dev);
0577         kfree(oct->msix_entries);
0578         oct->msix_entries = NULL;
0579     }
0580 
0581     kfree(oct->irq_name_storage);
0582     oct->irq_name_storage = NULL;
0583 
0584     if (octeon_allocate_ioq_vector(oct, num_ioqs)) {
0585         dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
0586         return -1;
0587     }
0588 
0589     if (octeon_setup_interrupt(oct, num_ioqs)) {
0590         dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n");
0591         return -1;
0592     }
0593 
0594     /* Enable Octeon device interrupts */
0595     oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
0596 
0597     return 0;
0598 }
0599 
0600 static int
0601 lio_ethtool_set_channels(struct net_device *dev,
0602              struct ethtool_channels *channel)
0603 {
0604     u32 combined_count, max_combined;
0605     struct lio *lio = GET_LIO(dev);
0606     struct octeon_device *oct = lio->oct_dev;
0607     int stopped = 0;
0608 
0609     if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) {
0610         dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n");
0611         return -EINVAL;
0612     }
0613 
0614     if (!channel->combined_count || channel->other_count ||
0615         channel->rx_count || channel->tx_count)
0616         return -EINVAL;
0617 
0618     combined_count = channel->combined_count;
0619 
0620     if (OCTEON_CN23XX_PF(oct)) {
0621         if (oct->sriov_info.sriov_enabled) {
0622             max_combined = lio->linfo.num_txpciq;
0623         } else {
0624             struct octeon_config *conf23_pf =
0625                 CHIP_CONF(oct,
0626                       cn23xx_pf);
0627 
0628             max_combined =
0629                 CFG_GET_IQ_MAX_Q(conf23_pf);
0630         }
0631     } else if (OCTEON_CN23XX_VF(oct)) {
0632         u64 reg_val = 0ULL;
0633         u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
0634 
0635         reg_val = octeon_read_csr64(oct, ctrl);
0636         reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
0637         max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
0638     } else {
0639         return -EINVAL;
0640     }
0641 
0642     if (combined_count > max_combined || combined_count < 1)
0643         return -EINVAL;
0644 
0645     if (combined_count == oct->num_iqs)
0646         return 0;
0647 
0648     ifstate_set(lio, LIO_IFSTATE_RESETTING);
0649 
0650     if (netif_running(dev)) {
0651         dev->netdev_ops->ndo_stop(dev);
0652         stopped = 1;
0653     }
0654 
0655     if (lio_reset_queues(dev, combined_count))
0656         return -EINVAL;
0657 
0658     if (stopped)
0659         dev->netdev_ops->ndo_open(dev);
0660 
0661     ifstate_reset(lio, LIO_IFSTATE_RESETTING);
0662 
0663     return 0;
0664 }
0665 
0666 static int lio_get_eeprom_len(struct net_device *netdev)
0667 {
0668     u8 buf[192];
0669     struct lio *lio = GET_LIO(netdev);
0670     struct octeon_device *oct_dev = lio->oct_dev;
0671     struct octeon_board_info *board_info;
0672     int len;
0673 
0674     board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
0675     len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
0676               board_info->name, board_info->serial_number,
0677               board_info->major, board_info->minor);
0678 
0679     return len;
0680 }
0681 
0682 static int
0683 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
0684            u8 *bytes)
0685 {
0686     struct lio *lio = GET_LIO(netdev);
0687     struct octeon_device *oct_dev = lio->oct_dev;
0688     struct octeon_board_info *board_info;
0689 
0690     if (eeprom->offset)
0691         return -EINVAL;
0692 
0693     eeprom->magic = oct_dev->pci_dev->vendor;
0694     board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
0695     sprintf((char *)bytes,
0696         "boardname:%s serialnum:%s maj:%lld min:%lld\n",
0697         board_info->name, board_info->serial_number,
0698         board_info->major, board_info->minor);
0699 
0700     return 0;
0701 }
0702 
0703 static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
0704 {
0705     struct lio *lio = GET_LIO(netdev);
0706     struct octeon_device *oct = lio->oct_dev;
0707     struct octnic_ctrl_pkt nctrl;
0708     int ret = 0;
0709 
0710     memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
0711 
0712     nctrl.ncmd.u64 = 0;
0713     nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
0714     nctrl.ncmd.s.param1 = addr;
0715     nctrl.ncmd.s.param2 = val;
0716     nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
0717     nctrl.netpndev = (u64)netdev;
0718     nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
0719 
0720     ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
0721     if (ret) {
0722         dev_err(&oct->pci_dev->dev,
0723             "Failed to configure gpio value, ret=%d\n", ret);
0724         return -EINVAL;
0725     }
0726 
0727     return 0;
0728 }
0729 
0730 static int octnet_id_active(struct net_device *netdev, int val)
0731 {
0732     struct lio *lio = GET_LIO(netdev);
0733     struct octeon_device *oct = lio->oct_dev;
0734     struct octnic_ctrl_pkt nctrl;
0735     int ret = 0;
0736 
0737     memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
0738 
0739     nctrl.ncmd.u64 = 0;
0740     nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
0741     nctrl.ncmd.s.param1 = val;
0742     nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
0743     nctrl.netpndev = (u64)netdev;
0744     nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
0745 
0746     ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
0747     if (ret) {
0748         dev_err(&oct->pci_dev->dev,
0749             "Failed to configure gpio value, ret=%d\n", ret);
0750         return -EINVAL;
0751     }
0752 
0753     return 0;
0754 }
0755 
0756 /* This routine provides PHY access routines for
0757  * mdio  clause45 .
0758  */
0759 static int
0760 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
0761 {
0762     struct octeon_device *oct_dev = lio->oct_dev;
0763     struct octeon_soft_command *sc;
0764     struct oct_mdio_cmd_resp *mdio_cmd_rsp;
0765     struct oct_mdio_cmd *mdio_cmd;
0766     int retval = 0;
0767 
0768     sc = (struct octeon_soft_command *)
0769         octeon_alloc_soft_command(oct_dev,
0770                       sizeof(struct oct_mdio_cmd),
0771                       sizeof(struct oct_mdio_cmd_resp), 0);
0772 
0773     if (!sc)
0774         return -ENOMEM;
0775 
0776     mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
0777     mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
0778 
0779     mdio_cmd->op = op;
0780     mdio_cmd->mdio_addr = loc;
0781     if (op)
0782         mdio_cmd->value1 = *value;
0783     octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
0784 
0785     sc->iq_no = lio->linfo.txpciq[0].s.q_no;
0786 
0787     octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
0788                     0, 0, 0);
0789 
0790     init_completion(&sc->complete);
0791     sc->sc_status = OCTEON_REQUEST_PENDING;
0792 
0793     retval = octeon_send_soft_command(oct_dev, sc);
0794     if (retval == IQ_SEND_FAILED) {
0795         dev_err(&oct_dev->pci_dev->dev,
0796             "octnet_mdio45_access instruction failed status: %x\n",
0797             retval);
0798         octeon_free_soft_command(oct_dev, sc);
0799         return -EBUSY;
0800     } else {
0801         /* Sleep on a wait queue till the cond flag indicates that the
0802          * response arrived
0803          */
0804         retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
0805         if (retval)
0806             return retval;
0807 
0808         retval = mdio_cmd_rsp->status;
0809         if (retval) {
0810             dev_err(&oct_dev->pci_dev->dev,
0811                 "octnet mdio45 access failed: %x\n", retval);
0812             WRITE_ONCE(sc->caller_is_done, true);
0813             return -EBUSY;
0814         }
0815 
0816         octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
0817                     sizeof(struct oct_mdio_cmd) / 8);
0818 
0819         if (!op)
0820             *value = mdio_cmd_rsp->resp.value1;
0821 
0822         WRITE_ONCE(sc->caller_is_done, true);
0823     }
0824 
0825     return retval;
0826 }
0827 
0828 static int lio_set_phys_id(struct net_device *netdev,
0829                enum ethtool_phys_id_state state)
0830 {
0831     struct lio *lio = GET_LIO(netdev);
0832     struct octeon_device *oct = lio->oct_dev;
0833     struct oct_link_info *linfo;
0834     int value, ret;
0835     u32 cur_ver;
0836 
0837     linfo = &lio->linfo;
0838     cur_ver = OCT_FW_VER(oct->fw_info.ver.maj,
0839                  oct->fw_info.ver.min,
0840                  oct->fw_info.ver.rev);
0841 
0842     switch (state) {
0843     case ETHTOOL_ID_ACTIVE:
0844         if (oct->chip_id == OCTEON_CN66XX) {
0845             octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
0846                        VITESSE_PHY_GPIO_DRIVEON);
0847             return 2;
0848 
0849         } else if (oct->chip_id == OCTEON_CN68XX) {
0850             /* Save the current LED settings */
0851             ret = octnet_mdio45_access(lio, 0,
0852                            LIO68XX_LED_BEACON_ADDR,
0853                            &lio->phy_beacon_val);
0854             if (ret)
0855                 return ret;
0856 
0857             ret = octnet_mdio45_access(lio, 0,
0858                            LIO68XX_LED_CTRL_ADDR,
0859                            &lio->led_ctrl_val);
0860             if (ret)
0861                 return ret;
0862 
0863             /* Configure Beacon values */
0864             value = LIO68XX_LED_BEACON_CFGON;
0865             ret = octnet_mdio45_access(lio, 1,
0866                            LIO68XX_LED_BEACON_ADDR,
0867                            &value);
0868             if (ret)
0869                 return ret;
0870 
0871             value = LIO68XX_LED_CTRL_CFGON;
0872             ret = octnet_mdio45_access(lio, 1,
0873                            LIO68XX_LED_CTRL_ADDR,
0874                            &value);
0875             if (ret)
0876                 return ret;
0877         } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
0878             octnet_id_active(netdev, LED_IDENTIFICATION_ON);
0879             if (linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
0880                 cur_ver > OCT_FW_VER(1, 7, 2))
0881                 return 2;
0882             else
0883                 return 0;
0884         } else {
0885             return -EINVAL;
0886         }
0887         break;
0888 
0889     case ETHTOOL_ID_ON:
0890         if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
0891             linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
0892             cur_ver > OCT_FW_VER(1, 7, 2))
0893             octnet_id_active(netdev, LED_IDENTIFICATION_ON);
0894         else if (oct->chip_id == OCTEON_CN66XX)
0895             octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
0896                        VITESSE_PHY_GPIO_HIGH);
0897         else
0898             return -EINVAL;
0899 
0900         break;
0901 
0902     case ETHTOOL_ID_OFF:
0903         if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
0904             linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
0905             cur_ver > OCT_FW_VER(1, 7, 2))
0906             octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
0907         else if (oct->chip_id == OCTEON_CN66XX)
0908             octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
0909                        VITESSE_PHY_GPIO_LOW);
0910         else
0911             return -EINVAL;
0912 
0913         break;
0914 
0915     case ETHTOOL_ID_INACTIVE:
0916         if (oct->chip_id == OCTEON_CN66XX) {
0917             octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
0918                        VITESSE_PHY_GPIO_DRIVEOFF);
0919         } else if (oct->chip_id == OCTEON_CN68XX) {
0920             /* Restore LED settings */
0921             ret = octnet_mdio45_access(lio, 1,
0922                            LIO68XX_LED_CTRL_ADDR,
0923                            &lio->led_ctrl_val);
0924             if (ret)
0925                 return ret;
0926 
0927             ret = octnet_mdio45_access(lio, 1,
0928                            LIO68XX_LED_BEACON_ADDR,
0929                            &lio->phy_beacon_val);
0930             if (ret)
0931                 return ret;
0932         } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
0933             octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
0934 
0935             return 0;
0936         } else {
0937             return -EINVAL;
0938         }
0939         break;
0940 
0941     default:
0942         return -EINVAL;
0943     }
0944 
0945     return 0;
0946 }
0947 
0948 static void
0949 lio_ethtool_get_ringparam(struct net_device *netdev,
0950               struct ethtool_ringparam *ering,
0951               struct kernel_ethtool_ringparam *kernel_ering,
0952               struct netlink_ext_ack *extack)
0953 {
0954     struct lio *lio = GET_LIO(netdev);
0955     struct octeon_device *oct = lio->oct_dev;
0956     u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
0957         rx_pending = 0;
0958 
0959     if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
0960         return;
0961 
0962     if (OCTEON_CN6XXX(oct)) {
0963         struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
0964 
0965         tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
0966         rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
0967         rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
0968         tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
0969     } else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
0970         tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
0971         rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
0972         rx_pending = oct->droq[0]->max_count;
0973         tx_pending = oct->instr_queue[0]->max_count;
0974     }
0975 
0976     ering->tx_pending = tx_pending;
0977     ering->tx_max_pending = tx_max_pending;
0978     ering->rx_pending = rx_pending;
0979     ering->rx_max_pending = rx_max_pending;
0980     ering->rx_mini_pending = 0;
0981     ering->rx_jumbo_pending = 0;
0982     ering->rx_mini_max_pending = 0;
0983     ering->rx_jumbo_max_pending = 0;
0984 }
0985 
0986 static int lio_23xx_reconfigure_queue_count(struct lio *lio)
0987 {
0988     struct octeon_device *oct = lio->oct_dev;
0989     u32 resp_size, data_size;
0990     struct liquidio_if_cfg_resp *resp;
0991     struct octeon_soft_command *sc;
0992     union oct_nic_if_cfg if_cfg;
0993     struct lio_version *vdata;
0994     u32 ifidx_or_pfnum;
0995     int retval;
0996     int j;
0997 
0998     resp_size = sizeof(struct liquidio_if_cfg_resp);
0999     data_size = sizeof(struct lio_version);
1000     sc = (struct octeon_soft_command *)
1001         octeon_alloc_soft_command(oct, data_size,
1002                       resp_size, 0);
1003     if (!sc) {
1004         dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n",
1005             __func__);
1006         return -1;
1007     }
1008 
1009     resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1010     vdata = (struct lio_version *)sc->virtdptr;
1011 
1012     vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
1013     vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
1014     vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
1015 
1016     ifidx_or_pfnum = oct->pf_num;
1017 
1018     if_cfg.u64 = 0;
1019     if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings;
1020     if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings;
1021     if_cfg.s.base_queue = oct->sriov_info.pf_srn;
1022     if_cfg.s.gmx_port_id = oct->pf_num;
1023 
1024     sc->iq_no = 0;
1025     octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1026                     OPCODE_NIC_QCOUNT_UPDATE, 0,
1027                     if_cfg.u64, 0);
1028 
1029     init_completion(&sc->complete);
1030     sc->sc_status = OCTEON_REQUEST_PENDING;
1031 
1032     retval = octeon_send_soft_command(oct, sc);
1033     if (retval == IQ_SEND_FAILED) {
1034         dev_err(&oct->pci_dev->dev,
1035             "Sending iq/oq config failed status: %x\n",
1036             retval);
1037         octeon_free_soft_command(oct, sc);
1038         return -EIO;
1039     }
1040 
1041     retval = wait_for_sc_completion_timeout(oct, sc, 0);
1042     if (retval)
1043         return retval;
1044 
1045     retval = resp->status;
1046     if (retval) {
1047         dev_err(&oct->pci_dev->dev,
1048             "iq/oq config failed: %x\n", retval);
1049         WRITE_ONCE(sc->caller_is_done, true);
1050         return -1;
1051     }
1052 
1053     octeon_swap_8B_data((u64 *)(&resp->cfg_info),
1054                 (sizeof(struct liquidio_if_cfg_info)) >> 3);
1055 
1056     lio->ifidx = ifidx_or_pfnum;
1057     lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask);
1058     lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask);
1059     for (j = 0; j < lio->linfo.num_rxpciq; j++) {
1060         lio->linfo.rxpciq[j].u64 =
1061             resp->cfg_info.linfo.rxpciq[j].u64;
1062     }
1063 
1064     for (j = 0; j < lio->linfo.num_txpciq; j++) {
1065         lio->linfo.txpciq[j].u64 =
1066             resp->cfg_info.linfo.txpciq[j].u64;
1067     }
1068 
1069     lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1070     lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1071     lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
1072     lio->txq = lio->linfo.txpciq[0].s.q_no;
1073     lio->rxq = lio->linfo.rxpciq[0].s.q_no;
1074 
1075     dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n",
1076          lio->linfo.num_rxpciq);
1077 
1078     WRITE_ONCE(sc->caller_is_done, true);
1079 
1080     return 0;
1081 }
1082 
1083 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
1084 {
1085     struct lio *lio = GET_LIO(netdev);
1086     struct octeon_device *oct = lio->oct_dev;
1087     int i, queue_count_update = 0;
1088     struct napi_struct *napi, *n;
1089     int ret;
1090 
1091     schedule_timeout_uninterruptible(msecs_to_jiffies(100));
1092 
1093     if (wait_for_pending_requests(oct))
1094         dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1095 
1096     if (lio_wait_for_instr_fetch(oct))
1097         dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1098 
1099     if (octeon_set_io_queues_off(oct)) {
1100         dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n");
1101         return -1;
1102     }
1103 
1104     /* Disable the input and output queues now. No more packets will
1105      * arrive from Octeon.
1106      */
1107     oct->fn_list.disable_io_queues(oct);
1108     /* Delete NAPI */
1109     list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1110         netif_napi_del(napi);
1111 
1112     if (num_qs != oct->num_iqs) {
1113         ret = netif_set_real_num_rx_queues(netdev, num_qs);
1114         if (ret) {
1115             dev_err(&oct->pci_dev->dev,
1116                 "Setting real number rx failed\n");
1117             return ret;
1118         }
1119 
1120         ret = netif_set_real_num_tx_queues(netdev, num_qs);
1121         if (ret) {
1122             dev_err(&oct->pci_dev->dev,
1123                 "Setting real number tx failed\n");
1124             return ret;
1125         }
1126 
1127         /* The value of queue_count_update decides whether it is the
1128          * queue count or the descriptor count that is being
1129          * re-configured.
1130          */
1131         queue_count_update = 1;
1132     }
1133 
1134     /* Re-configuration of queues can happen in two scenarios, SRIOV enabled
1135      * and SRIOV disabled. Few things like recreating queue zero, resetting
1136      * glists and IRQs are required for both. For the latter, some more
1137      * steps like updating sriov_info for the octeon device need to be done.
1138      */
1139     if (queue_count_update) {
1140         cleanup_rx_oom_poll_fn(netdev);
1141 
1142         lio_delete_glists(lio);
1143 
1144         /* Delete mbox for PF which is SRIOV disabled because sriov_info
1145          * will be now changed.
1146          */
1147         if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled)
1148             oct->fn_list.free_mbox(oct);
1149     }
1150 
1151     for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1152         if (!(oct->io_qmask.oq & BIT_ULL(i)))
1153             continue;
1154         octeon_delete_droq(oct, i);
1155     }
1156 
1157     for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1158         if (!(oct->io_qmask.iq & BIT_ULL(i)))
1159             continue;
1160         octeon_delete_instr_queue(oct, i);
1161     }
1162 
1163     if (queue_count_update) {
1164         /* For PF re-configure sriov related information */
1165         if ((OCTEON_CN23XX_PF(oct)) &&
1166             !oct->sriov_info.sriov_enabled) {
1167             oct->sriov_info.num_pf_rings = num_qs;
1168             if (cn23xx_sriov_config(oct)) {
1169                 dev_err(&oct->pci_dev->dev,
1170                     "Queue reset aborted: SRIOV config failed\n");
1171                 return -1;
1172             }
1173 
1174             num_qs = oct->sriov_info.num_pf_rings;
1175         }
1176     }
1177 
1178     if (oct->fn_list.setup_device_regs(oct)) {
1179         dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n");
1180         return -1;
1181     }
1182 
1183     /* The following are needed in case of queue count re-configuration and
1184      * not for descriptor count re-configuration.
1185      */
1186     if (queue_count_update) {
1187         if (octeon_setup_instr_queues(oct))
1188             return -1;
1189 
1190         if (octeon_setup_output_queues(oct))
1191             return -1;
1192 
1193         /* Recreating mbox for PF that is SRIOV disabled */
1194         if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
1195             if (oct->fn_list.setup_mbox(oct)) {
1196                 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
1197                 return -1;
1198             }
1199         }
1200 
1201         /* Deleting and recreating IRQs whether the interface is SRIOV
1202          * enabled or disabled.
1203          */
1204         if (lio_irq_reallocate_irqs(oct, num_qs)) {
1205             dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n");
1206             return -1;
1207         }
1208 
1209         /* Enable the input and output queues for this Octeon device */
1210         if (oct->fn_list.enable_io_queues(oct)) {
1211             dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n");
1212             return -1;
1213         }
1214 
1215         for (i = 0; i < oct->num_oqs; i++)
1216             writel(oct->droq[i]->max_count,
1217                    oct->droq[i]->pkts_credit_reg);
1218 
1219         /* Informing firmware about the new queue count. It is required
1220          * for firmware to allocate more number of queues than those at
1221          * load time.
1222          */
1223         if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
1224             if (lio_23xx_reconfigure_queue_count(lio))
1225                 return -1;
1226         }
1227     }
1228 
1229     /* Once firmware is aware of the new value, queues can be recreated */
1230     if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) {
1231         dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n");
1232         return -1;
1233     }
1234 
1235     if (queue_count_update) {
1236         if (lio_setup_glists(oct, lio, num_qs)) {
1237             dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n");
1238             return -1;
1239         }
1240 
1241         if (setup_rx_oom_poll_fn(netdev)) {
1242             dev_err(&oct->pci_dev->dev, "lio_setup_rx_oom_poll_fn failed\n");
1243             return 1;
1244         }
1245 
1246         /* Send firmware the information about new number of queues
1247          * if the interface is a VF or a PF that is SRIOV enabled.
1248          */
1249         if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct))
1250             if (lio_send_queue_count_update(netdev, num_qs))
1251                 return -1;
1252     }
1253 
1254     return 0;
1255 }
1256 
1257 static int
1258 lio_ethtool_set_ringparam(struct net_device *netdev,
1259               struct ethtool_ringparam *ering,
1260               struct kernel_ethtool_ringparam *kernel_ering,
1261               struct netlink_ext_ack *extack)
1262 {
1263     u32 rx_count, tx_count, rx_count_old, tx_count_old;
1264     struct lio *lio = GET_LIO(netdev);
1265     struct octeon_device *oct = lio->oct_dev;
1266     int stopped = 0;
1267 
1268     if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct))
1269         return -EINVAL;
1270 
1271     if (ering->rx_mini_pending || ering->rx_jumbo_pending)
1272         return -EINVAL;
1273 
1274     rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS,
1275                CN23XX_MAX_OQ_DESCRIPTORS);
1276     tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS,
1277                CN23XX_MAX_IQ_DESCRIPTORS);
1278 
1279     rx_count_old = oct->droq[0]->max_count;
1280     tx_count_old = oct->instr_queue[0]->max_count;
1281 
1282     if (rx_count == rx_count_old && tx_count == tx_count_old)
1283         return 0;
1284 
1285     ifstate_set(lio, LIO_IFSTATE_RESETTING);
1286 
1287     if (netif_running(netdev)) {
1288         netdev->netdev_ops->ndo_stop(netdev);
1289         stopped = 1;
1290     }
1291 
1292     /* Change RX/TX DESCS  count */
1293     if (tx_count != tx_count_old)
1294         CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1295                         tx_count);
1296     if (rx_count != rx_count_old)
1297         CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1298                         rx_count);
1299 
1300     if (lio_reset_queues(netdev, oct->num_iqs))
1301         goto err_lio_reset_queues;
1302 
1303     if (stopped)
1304         netdev->netdev_ops->ndo_open(netdev);
1305 
1306     ifstate_reset(lio, LIO_IFSTATE_RESETTING);
1307 
1308     return 0;
1309 
1310 err_lio_reset_queues:
1311     if (tx_count != tx_count_old)
1312         CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1313                         tx_count_old);
1314     if (rx_count != rx_count_old)
1315         CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1316                         rx_count_old);
1317     return -EINVAL;
1318 }
1319 
1320 static u32 lio_get_msglevel(struct net_device *netdev)
1321 {
1322     struct lio *lio = GET_LIO(netdev);
1323 
1324     return lio->msg_enable;
1325 }
1326 
1327 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
1328 {
1329     struct lio *lio = GET_LIO(netdev);
1330 
1331     if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
1332         if (msglvl & NETIF_MSG_HW)
1333             liquidio_set_feature(netdev,
1334                          OCTNET_CMD_VERBOSE_ENABLE, 0);
1335         else
1336             liquidio_set_feature(netdev,
1337                          OCTNET_CMD_VERBOSE_DISABLE, 0);
1338     }
1339 
1340     lio->msg_enable = msglvl;
1341 }
1342 
1343 static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl)
1344 {
1345     struct lio *lio = GET_LIO(netdev);
1346 
1347     lio->msg_enable = msglvl;
1348 }
1349 
1350 static void
1351 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
1352 {
1353     /* Notes: Not supporting any auto negotiation in these
1354      * drivers. Just report pause frame support.
1355      */
1356     struct lio *lio = GET_LIO(netdev);
1357     struct octeon_device *oct = lio->oct_dev;
1358 
1359     pause->autoneg = 0;
1360 
1361     pause->tx_pause = oct->tx_pause;
1362     pause->rx_pause = oct->rx_pause;
1363 }
1364 
1365 static int
1366 lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
1367 {
1368     /* Notes: Not supporting any auto negotiation in these
1369      * drivers.
1370      */
1371     struct lio *lio = GET_LIO(netdev);
1372     struct octeon_device *oct = lio->oct_dev;
1373     struct octnic_ctrl_pkt nctrl;
1374     struct oct_link_info *linfo = &lio->linfo;
1375 
1376     int ret = 0;
1377 
1378     if (oct->chip_id != OCTEON_CN23XX_PF_VID)
1379         return -EINVAL;
1380 
1381     if (linfo->link.s.duplex == 0) {
1382         /*no flow control for half duplex*/
1383         if (pause->rx_pause || pause->tx_pause)
1384             return -EINVAL;
1385     }
1386 
1387     /*do not support autoneg of link flow control*/
1388     if (pause->autoneg == AUTONEG_ENABLE)
1389         return -EINVAL;
1390 
1391     memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1392 
1393     nctrl.ncmd.u64 = 0;
1394     nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
1395     nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1396     nctrl.netpndev = (u64)netdev;
1397     nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1398 
1399     if (pause->rx_pause) {
1400         /*enable rx pause*/
1401         nctrl.ncmd.s.param1 = 1;
1402     } else {
1403         /*disable rx pause*/
1404         nctrl.ncmd.s.param1 = 0;
1405     }
1406 
1407     if (pause->tx_pause) {
1408         /*enable tx pause*/
1409         nctrl.ncmd.s.param2 = 1;
1410     } else {
1411         /*disable tx pause*/
1412         nctrl.ncmd.s.param2 = 0;
1413     }
1414 
1415     ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1416     if (ret) {
1417         dev_err(&oct->pci_dev->dev,
1418             "Failed to set pause parameter, ret=%d\n", ret);
1419         return -EINVAL;
1420     }
1421 
1422     oct->rx_pause = pause->rx_pause;
1423     oct->tx_pause = pause->tx_pause;
1424 
1425     return 0;
1426 }
1427 
1428 static void
1429 lio_get_ethtool_stats(struct net_device *netdev,
1430               struct ethtool_stats *stats  __attribute__((unused)),
1431               u64 *data)
1432 {
1433     struct lio *lio = GET_LIO(netdev);
1434     struct octeon_device *oct_dev = lio->oct_dev;
1435     struct rtnl_link_stats64 lstats;
1436     int i = 0, j;
1437 
1438     if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1439         return;
1440 
1441     netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
1442     /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
1443     data[i++] = lstats.rx_packets;
1444     /*sum of oct->instr_queue[iq_no]->stats.tx_done */
1445     data[i++] = lstats.tx_packets;
1446     /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
1447     data[i++] = lstats.rx_bytes;
1448     /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1449     data[i++] = lstats.tx_bytes;
1450     data[i++] = lstats.rx_errors +
1451             oct_dev->link_stats.fromwire.fcs_err +
1452             oct_dev->link_stats.fromwire.jabber_err +
1453             oct_dev->link_stats.fromwire.l2_err +
1454             oct_dev->link_stats.fromwire.frame_err;
1455     data[i++] = lstats.tx_errors;
1456     /*sum of oct->droq[oq_no]->stats->rx_dropped +
1457      *oct->droq[oq_no]->stats->dropped_nodispatch +
1458      *oct->droq[oq_no]->stats->dropped_toomany +
1459      *oct->droq[oq_no]->stats->dropped_nomem
1460      */
1461     data[i++] = lstats.rx_dropped +
1462             oct_dev->link_stats.fromwire.fifo_err +
1463             oct_dev->link_stats.fromwire.dmac_drop +
1464             oct_dev->link_stats.fromwire.red_drops +
1465             oct_dev->link_stats.fromwire.fw_err_pko +
1466             oct_dev->link_stats.fromwire.fw_err_link +
1467             oct_dev->link_stats.fromwire.fw_err_drop;
1468     /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1469     data[i++] = lstats.tx_dropped +
1470             oct_dev->link_stats.fromhost.max_collision_fail +
1471             oct_dev->link_stats.fromhost.max_deferral_fail +
1472             oct_dev->link_stats.fromhost.total_collisions +
1473             oct_dev->link_stats.fromhost.fw_err_pko +
1474             oct_dev->link_stats.fromhost.fw_err_link +
1475             oct_dev->link_stats.fromhost.fw_err_drop +
1476             oct_dev->link_stats.fromhost.fw_err_pki;
1477 
1478     /* firmware tx stats */
1479     /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
1480      *fromhost.fw_total_sent
1481      */
1482     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
1483     /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
1484     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
1485     /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
1486     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
1487     /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */
1488     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki);
1489     /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
1490     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
1491     /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1492      *fw_err_drop
1493      */
1494     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
1495 
1496     /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
1497     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
1498     /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1499      *fw_tso_fwd
1500      */
1501     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
1502     /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1503      *fw_err_tso
1504      */
1505     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
1506     /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1507      *fw_tx_vxlan
1508      */
1509     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
1510 
1511     /* Multicast packets sent by this port */
1512     data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
1513     data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
1514 
1515     /* mac tx statistics */
1516     /*CVMX_BGXX_CMRX_TX_STAT5 */
1517     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
1518     /*CVMX_BGXX_CMRX_TX_STAT4 */
1519     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
1520     /*CVMX_BGXX_CMRX_TX_STAT15 */
1521     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
1522     /*CVMX_BGXX_CMRX_TX_STAT14 */
1523     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
1524     /*CVMX_BGXX_CMRX_TX_STAT17 */
1525     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
1526     /*CVMX_BGXX_CMRX_TX_STAT0 */
1527     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
1528     /*CVMX_BGXX_CMRX_TX_STAT3 */
1529     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
1530     /*CVMX_BGXX_CMRX_TX_STAT2 */
1531     data[i++] =
1532         CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
1533     /*CVMX_BGXX_CMRX_TX_STAT0 */
1534     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
1535     /*CVMX_BGXX_CMRX_TX_STAT1 */
1536     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
1537     /*CVMX_BGXX_CMRX_TX_STAT16 */
1538     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
1539     /*CVMX_BGXX_CMRX_TX_STAT6 */
1540     data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
1541 
1542     /* RX firmware stats */
1543     /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1544      *fw_total_rcvd
1545      */
1546     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
1547     /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1548      *fw_total_fwd
1549      */
1550     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
1551     /* Multicast packets received on this port */
1552     data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
1553     data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
1554     /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
1555     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
1556     /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
1557     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
1558     /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
1559     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
1560     /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1561      *fw_err_pko
1562      */
1563     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
1564     /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
1565     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
1566     /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1567      *fromwire.fw_err_drop
1568      */
1569     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
1570 
1571     /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1572      *fromwire.fw_rx_vxlan
1573      */
1574     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
1575     /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1576      *fromwire.fw_rx_vxlan_err
1577      */
1578     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
1579 
1580     /* LRO */
1581     /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1582      *fw_lro_pkts
1583      */
1584     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
1585     /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1586      *fw_lro_octs
1587      */
1588     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
1589     /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
1590     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
1591     /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1592     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
1593     /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1594      *fw_lro_aborts_port
1595      */
1596     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
1597     /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1598      *fw_lro_aborts_seq
1599      */
1600     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
1601     /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1602      *fw_lro_aborts_tsval
1603      */
1604     data[i++] =
1605         CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
1606     /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1607      *fw_lro_aborts_timer
1608      */
1609     /* intrmod: packet forward rate */
1610     data[i++] =
1611         CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
1612     /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1613     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
1614 
1615     /* mac: link-level stats */
1616     /*CVMX_BGXX_CMRX_RX_STAT0 */
1617     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
1618     /*CVMX_BGXX_CMRX_RX_STAT1 */
1619     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
1620     /*CVMX_PKI_STATX_STAT5 */
1621     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
1622     /*CVMX_PKI_STATX_STAT5 */
1623     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
1624     /*wqe->word2.err_code or wqe->word2.err_level */
1625     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
1626     /*CVMX_BGXX_CMRX_RX_STAT2 */
1627     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
1628     /*CVMX_BGXX_CMRX_RX_STAT6 */
1629     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
1630     /*CVMX_BGXX_CMRX_RX_STAT4 */
1631     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
1632     /*wqe->word2.err_code or wqe->word2.err_level */
1633     data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
1634     /*lio->link_changes*/
1635     data[i++] = CVM_CAST64(lio->link_changes);
1636 
1637     for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
1638         if (!(oct_dev->io_qmask.iq & BIT_ULL(j)))
1639             continue;
1640         /*packets to network port*/
1641         /*# of packets tx to network */
1642         data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
1643         /*# of bytes tx to network */
1644         data[i++] =
1645             CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
1646         /*# of packets dropped */
1647         data[i++] =
1648             CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
1649         /*# of tx fails due to queue full */
1650         data[i++] =
1651             CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
1652         /*XXX gather entries sent */
1653         data[i++] =
1654             CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
1655 
1656         /*instruction to firmware: data and control */
1657         /*# of instructions to the queue */
1658         data[i++] =
1659             CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
1660         /*# of instructions processed */
1661         data[i++] = CVM_CAST64(
1662                 oct_dev->instr_queue[j]->stats.instr_processed);
1663         /*# of instructions could not be processed */
1664         data[i++] = CVM_CAST64(
1665                 oct_dev->instr_queue[j]->stats.instr_dropped);
1666         /*bytes sent through the queue */
1667         data[i++] =
1668             CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
1669 
1670         /*tso request*/
1671         data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1672         /*vxlan request*/
1673         data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1674         /*txq restart*/
1675         data[i++] =
1676             CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
1677     }
1678 
1679     /* RX */
1680     for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
1681         if (!(oct_dev->io_qmask.oq & BIT_ULL(j)))
1682             continue;
1683 
1684         /*packets send to TCP/IP network stack */
1685         /*# of packets to network stack */
1686         data[i++] =
1687             CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
1688         /*# of bytes to network stack */
1689         data[i++] =
1690             CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
1691         /*# of packets dropped */
1692         data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1693                        oct_dev->droq[j]->stats.dropped_toomany +
1694                        oct_dev->droq[j]->stats.rx_dropped);
1695         data[i++] =
1696             CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1697         data[i++] =
1698             CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1699         data[i++] =
1700             CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1701 
1702         /*control and data path*/
1703         data[i++] =
1704             CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1705         data[i++] =
1706             CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1707         data[i++] =
1708             CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1709 
1710         data[i++] =
1711             CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1712         data[i++] =
1713             CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1714     }
1715 }
1716 
1717 static void lio_vf_get_ethtool_stats(struct net_device *netdev,
1718                      struct ethtool_stats *stats
1719                      __attribute__((unused)),
1720                      u64 *data)
1721 {
1722     struct rtnl_link_stats64 lstats;
1723     struct lio *lio = GET_LIO(netdev);
1724     struct octeon_device *oct_dev = lio->oct_dev;
1725     int i = 0, j, vj;
1726 
1727     if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1728         return;
1729 
1730     netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
1731     /* sum of oct->droq[oq_no]->stats->rx_pkts_received */
1732     data[i++] = lstats.rx_packets;
1733     /* sum of oct->instr_queue[iq_no]->stats.tx_done */
1734     data[i++] = lstats.tx_packets;
1735     /* sum of oct->droq[oq_no]->stats->rx_bytes_received */
1736     data[i++] = lstats.rx_bytes;
1737     /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1738     data[i++] = lstats.tx_bytes;
1739     data[i++] = lstats.rx_errors;
1740     data[i++] = lstats.tx_errors;
1741      /* sum of oct->droq[oq_no]->stats->rx_dropped +
1742       * oct->droq[oq_no]->stats->dropped_nodispatch +
1743       * oct->droq[oq_no]->stats->dropped_toomany +
1744       * oct->droq[oq_no]->stats->dropped_nomem
1745       */
1746     data[i++] = lstats.rx_dropped;
1747     /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1748     data[i++] = lstats.tx_dropped +
1749         oct_dev->link_stats.fromhost.fw_err_drop;
1750 
1751     data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
1752     data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
1753     data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
1754     data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
1755 
1756     /* lio->link_changes */
1757     data[i++] = CVM_CAST64(lio->link_changes);
1758 
1759     for (vj = 0; vj < oct_dev->num_iqs; vj++) {
1760         j = lio->linfo.txpciq[vj].s.q_no;
1761 
1762         /* packets to network port */
1763         /* # of packets tx to network */
1764         data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
1765          /* # of bytes tx to network */
1766         data[i++] = CVM_CAST64(
1767                 oct_dev->instr_queue[j]->stats.tx_tot_bytes);
1768         /* # of packets dropped */
1769         data[i++] = CVM_CAST64(
1770                 oct_dev->instr_queue[j]->stats.tx_dropped);
1771         /* # of tx fails due to queue full */
1772         data[i++] = CVM_CAST64(
1773                 oct_dev->instr_queue[j]->stats.tx_iq_busy);
1774         /* XXX gather entries sent */
1775         data[i++] = CVM_CAST64(
1776                 oct_dev->instr_queue[j]->stats.sgentry_sent);
1777 
1778         /* instruction to firmware: data and control */
1779         /* # of instructions to the queue */
1780         data[i++] = CVM_CAST64(
1781                 oct_dev->instr_queue[j]->stats.instr_posted);
1782         /* # of instructions processed */
1783         data[i++] =
1784             CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed);
1785         /* # of instructions could not be processed */
1786         data[i++] =
1787             CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped);
1788         /* bytes sent through the queue */
1789         data[i++] = CVM_CAST64(
1790                 oct_dev->instr_queue[j]->stats.bytes_sent);
1791         /* tso request */
1792         data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1793         /* vxlan request */
1794         data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1795         /* txq restart */
1796         data[i++] = CVM_CAST64(
1797                 oct_dev->instr_queue[j]->stats.tx_restart);
1798     }
1799 
1800     /* RX */
1801     for (vj = 0; vj < oct_dev->num_oqs; vj++) {
1802         j = lio->linfo.rxpciq[vj].s.q_no;
1803 
1804         /* packets send to TCP/IP network stack */
1805         /* # of packets to network stack */
1806         data[i++] = CVM_CAST64(
1807                 oct_dev->droq[j]->stats.rx_pkts_received);
1808         /* # of bytes to network stack */
1809         data[i++] = CVM_CAST64(
1810                 oct_dev->droq[j]->stats.rx_bytes_received);
1811         data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1812                        oct_dev->droq[j]->stats.dropped_toomany +
1813                        oct_dev->droq[j]->stats.rx_dropped);
1814         data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1815         data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1816         data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1817 
1818         /* control and data path */
1819         data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1820         data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1821         data[i++] =
1822             CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1823 
1824         data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1825         data[i++] =
1826             CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1827     }
1828 }
1829 
1830 static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
1831 {
1832     struct octeon_device *oct_dev = lio->oct_dev;
1833     int i;
1834 
1835     switch (oct_dev->chip_id) {
1836     case OCTEON_CN23XX_PF_VID:
1837     case OCTEON_CN23XX_VF_VID:
1838         for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
1839             sprintf(data, "%s", oct_priv_flags_strings[i]);
1840             data += ETH_GSTRING_LEN;
1841         }
1842         break;
1843     case OCTEON_CN68XX:
1844     case OCTEON_CN66XX:
1845         break;
1846     default:
1847         netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1848         break;
1849     }
1850 }
1851 
1852 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1853 {
1854     struct lio *lio = GET_LIO(netdev);
1855     struct octeon_device *oct_dev = lio->oct_dev;
1856     int num_iq_stats, num_oq_stats, i, j;
1857     int num_stats;
1858 
1859     switch (stringset) {
1860     case ETH_SS_STATS:
1861         num_stats = ARRAY_SIZE(oct_stats_strings);
1862         for (j = 0; j < num_stats; j++) {
1863             sprintf(data, "%s", oct_stats_strings[j]);
1864             data += ETH_GSTRING_LEN;
1865         }
1866 
1867         num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1868         for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1869             if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1870                 continue;
1871             for (j = 0; j < num_iq_stats; j++) {
1872                 sprintf(data, "tx-%d-%s", i,
1873                     oct_iq_stats_strings[j]);
1874                 data += ETH_GSTRING_LEN;
1875             }
1876         }
1877 
1878         num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1879         for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1880             if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1881                 continue;
1882             for (j = 0; j < num_oq_stats; j++) {
1883                 sprintf(data, "rx-%d-%s", i,
1884                     oct_droq_stats_strings[j]);
1885                 data += ETH_GSTRING_LEN;
1886             }
1887         }
1888         break;
1889 
1890     case ETH_SS_PRIV_FLAGS:
1891         lio_get_priv_flags_strings(lio, data);
1892         break;
1893     default:
1894         netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1895         break;
1896     }
1897 }
1898 
1899 static void lio_vf_get_strings(struct net_device *netdev, u32 stringset,
1900                    u8 *data)
1901 {
1902     int num_iq_stats, num_oq_stats, i, j;
1903     struct lio *lio = GET_LIO(netdev);
1904     struct octeon_device *oct_dev = lio->oct_dev;
1905     int num_stats;
1906 
1907     switch (stringset) {
1908     case ETH_SS_STATS:
1909         num_stats = ARRAY_SIZE(oct_vf_stats_strings);
1910         for (j = 0; j < num_stats; j++) {
1911             sprintf(data, "%s", oct_vf_stats_strings[j]);
1912             data += ETH_GSTRING_LEN;
1913         }
1914 
1915         num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1916         for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1917             if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1918                 continue;
1919             for (j = 0; j < num_iq_stats; j++) {
1920                 sprintf(data, "tx-%d-%s", i,
1921                     oct_iq_stats_strings[j]);
1922                 data += ETH_GSTRING_LEN;
1923             }
1924         }
1925 
1926         num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1927         for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1928             if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1929                 continue;
1930             for (j = 0; j < num_oq_stats; j++) {
1931                 sprintf(data, "rx-%d-%s", i,
1932                     oct_droq_stats_strings[j]);
1933                 data += ETH_GSTRING_LEN;
1934             }
1935         }
1936         break;
1937 
1938     case ETH_SS_PRIV_FLAGS:
1939         lio_get_priv_flags_strings(lio, data);
1940         break;
1941     default:
1942         netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1943         break;
1944     }
1945 }
1946 
1947 static int lio_get_priv_flags_ss_count(struct lio *lio)
1948 {
1949     struct octeon_device *oct_dev = lio->oct_dev;
1950 
1951     switch (oct_dev->chip_id) {
1952     case OCTEON_CN23XX_PF_VID:
1953     case OCTEON_CN23XX_VF_VID:
1954         return ARRAY_SIZE(oct_priv_flags_strings);
1955     case OCTEON_CN68XX:
1956     case OCTEON_CN66XX:
1957         return -EOPNOTSUPP;
1958     default:
1959         netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1960         return -EOPNOTSUPP;
1961     }
1962 }
1963 
1964 static int lio_get_sset_count(struct net_device *netdev, int sset)
1965 {
1966     struct lio *lio = GET_LIO(netdev);
1967     struct octeon_device *oct_dev = lio->oct_dev;
1968 
1969     switch (sset) {
1970     case ETH_SS_STATS:
1971         return (ARRAY_SIZE(oct_stats_strings) +
1972             ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1973             ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1974     case ETH_SS_PRIV_FLAGS:
1975         return lio_get_priv_flags_ss_count(lio);
1976     default:
1977         return -EOPNOTSUPP;
1978     }
1979 }
1980 
1981 static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
1982 {
1983     struct lio *lio = GET_LIO(netdev);
1984     struct octeon_device *oct_dev = lio->oct_dev;
1985 
1986     switch (sset) {
1987     case ETH_SS_STATS:
1988         return (ARRAY_SIZE(oct_vf_stats_strings) +
1989             ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1990             ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1991     case ETH_SS_PRIV_FLAGS:
1992         return lio_get_priv_flags_ss_count(lio);
1993     default:
1994         return -EOPNOTSUPP;
1995     }
1996 }
1997 
1998 /*  get interrupt moderation parameters */
1999 static int octnet_get_intrmod_cfg(struct lio *lio,
2000                   struct oct_intrmod_cfg *intr_cfg)
2001 {
2002     struct octeon_soft_command *sc;
2003     struct oct_intrmod_resp *resp;
2004     int retval;
2005     struct octeon_device *oct_dev = lio->oct_dev;
2006 
2007     /* Alloc soft command */
2008     sc = (struct octeon_soft_command *)
2009         octeon_alloc_soft_command(oct_dev,
2010                       0,
2011                       sizeof(struct oct_intrmod_resp), 0);
2012 
2013     if (!sc)
2014         return -ENOMEM;
2015 
2016     resp = (struct oct_intrmod_resp *)sc->virtrptr;
2017     memset(resp, 0, sizeof(struct oct_intrmod_resp));
2018 
2019     sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2020 
2021     octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
2022                     OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0);
2023 
2024     init_completion(&sc->complete);
2025     sc->sc_status = OCTEON_REQUEST_PENDING;
2026 
2027     retval = octeon_send_soft_command(oct_dev, sc);
2028     if (retval == IQ_SEND_FAILED) {
2029         octeon_free_soft_command(oct_dev, sc);
2030         return -EINVAL;
2031     }
2032 
2033     /* Sleep on a wait queue till the cond flag indicates that the
2034      * response arrived or timed-out.
2035      */
2036     retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
2037     if (retval)
2038         return -ENODEV;
2039 
2040     if (resp->status) {
2041         dev_err(&oct_dev->pci_dev->dev,
2042             "Get interrupt moderation parameters failed\n");
2043         WRITE_ONCE(sc->caller_is_done, true);
2044         return -ENODEV;
2045     }
2046 
2047     octeon_swap_8B_data((u64 *)&resp->intrmod,
2048                 (sizeof(struct oct_intrmod_cfg)) / 8);
2049     memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg));
2050     WRITE_ONCE(sc->caller_is_done, true);
2051 
2052     return 0;
2053 }
2054 
2055 /*  Configure interrupt moderation parameters */
2056 static int octnet_set_intrmod_cfg(struct lio *lio,
2057                   struct oct_intrmod_cfg *intr_cfg)
2058 {
2059     struct octeon_soft_command *sc;
2060     struct oct_intrmod_cfg *cfg;
2061     int retval;
2062     struct octeon_device *oct_dev = lio->oct_dev;
2063 
2064     /* Alloc soft command */
2065     sc = (struct octeon_soft_command *)
2066         octeon_alloc_soft_command(oct_dev,
2067                       sizeof(struct oct_intrmod_cfg),
2068                       16, 0);
2069 
2070     if (!sc)
2071         return -ENOMEM;
2072 
2073     cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
2074 
2075     memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
2076     octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
2077 
2078     sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2079 
2080     octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
2081                     OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
2082 
2083     init_completion(&sc->complete);
2084     sc->sc_status = OCTEON_REQUEST_PENDING;
2085 
2086     retval = octeon_send_soft_command(oct_dev, sc);
2087     if (retval == IQ_SEND_FAILED) {
2088         octeon_free_soft_command(oct_dev, sc);
2089         return -EINVAL;
2090     }
2091 
2092     /* Sleep on a wait queue till the cond flag indicates that the
2093      * response arrived or timed-out.
2094      */
2095     retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
2096     if (retval)
2097         return retval;
2098 
2099     retval = sc->sc_status;
2100     if (retval == 0) {
2101         dev_info(&oct_dev->pci_dev->dev,
2102              "Rx-Adaptive Interrupt moderation %s\n",
2103              (intr_cfg->rx_enable) ?
2104              "enabled" : "disabled");
2105         WRITE_ONCE(sc->caller_is_done, true);
2106         return 0;
2107     }
2108 
2109     dev_err(&oct_dev->pci_dev->dev,
2110         "intrmod config failed. Status: %x\n", retval);
2111     WRITE_ONCE(sc->caller_is_done, true);
2112     return -ENODEV;
2113 }
2114 
2115 static int lio_get_intr_coalesce(struct net_device *netdev,
2116                  struct ethtool_coalesce *intr_coal,
2117                  struct kernel_ethtool_coalesce *kernel_coal,
2118                  struct netlink_ext_ack *extack)
2119 {
2120     struct lio *lio = GET_LIO(netdev);
2121     struct octeon_device *oct = lio->oct_dev;
2122     struct octeon_instr_queue *iq;
2123     struct oct_intrmod_cfg intrmod_cfg;
2124 
2125     if (octnet_get_intrmod_cfg(lio, &intrmod_cfg))
2126         return -ENODEV;
2127 
2128     switch (oct->chip_id) {
2129     case OCTEON_CN23XX_PF_VID:
2130     case OCTEON_CN23XX_VF_VID: {
2131         if (!intrmod_cfg.rx_enable) {
2132             intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs;
2133             intr_coal->rx_max_coalesced_frames =
2134                 oct->rx_max_coalesced_frames;
2135         }
2136         if (!intrmod_cfg.tx_enable)
2137             intr_coal->tx_max_coalesced_frames =
2138                 oct->tx_max_coalesced_frames;
2139         break;
2140     }
2141     case OCTEON_CN68XX:
2142     case OCTEON_CN66XX: {
2143         struct octeon_cn6xxx *cn6xxx =
2144             (struct octeon_cn6xxx *)oct->chip;
2145 
2146         if (!intrmod_cfg.rx_enable) {
2147             intr_coal->rx_coalesce_usecs =
2148                 CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
2149             intr_coal->rx_max_coalesced_frames =
2150                 CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
2151         }
2152         iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
2153         intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
2154         break;
2155     }
2156     default:
2157         netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
2158         return -EINVAL;
2159     }
2160     if (intrmod_cfg.rx_enable) {
2161         intr_coal->use_adaptive_rx_coalesce =
2162             intrmod_cfg.rx_enable;
2163         intr_coal->rate_sample_interval =
2164             intrmod_cfg.check_intrvl;
2165         intr_coal->pkt_rate_high =
2166             intrmod_cfg.maxpkt_ratethr;
2167         intr_coal->pkt_rate_low =
2168             intrmod_cfg.minpkt_ratethr;
2169         intr_coal->rx_max_coalesced_frames_high =
2170             intrmod_cfg.rx_maxcnt_trigger;
2171         intr_coal->rx_coalesce_usecs_high =
2172             intrmod_cfg.rx_maxtmr_trigger;
2173         intr_coal->rx_coalesce_usecs_low =
2174             intrmod_cfg.rx_mintmr_trigger;
2175         intr_coal->rx_max_coalesced_frames_low =
2176             intrmod_cfg.rx_mincnt_trigger;
2177     }
2178     if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
2179         (intrmod_cfg.tx_enable)) {
2180         intr_coal->use_adaptive_tx_coalesce =
2181             intrmod_cfg.tx_enable;
2182         intr_coal->tx_max_coalesced_frames_high =
2183             intrmod_cfg.tx_maxcnt_trigger;
2184         intr_coal->tx_max_coalesced_frames_low =
2185             intrmod_cfg.tx_mincnt_trigger;
2186     }
2187     return 0;
2188 }
2189 
2190 /* Enable/Disable auto interrupt Moderation */
2191 static int oct_cfg_adaptive_intr(struct lio *lio,
2192                  struct oct_intrmod_cfg *intrmod_cfg,
2193                  struct ethtool_coalesce *intr_coal)
2194 {
2195     int ret = 0;
2196 
2197     if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) {
2198         intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval;
2199         intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high;
2200         intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low;
2201     }
2202     if (intrmod_cfg->rx_enable) {
2203         intrmod_cfg->rx_maxcnt_trigger =
2204             intr_coal->rx_max_coalesced_frames_high;
2205         intrmod_cfg->rx_maxtmr_trigger =
2206             intr_coal->rx_coalesce_usecs_high;
2207         intrmod_cfg->rx_mintmr_trigger =
2208             intr_coal->rx_coalesce_usecs_low;
2209         intrmod_cfg->rx_mincnt_trigger =
2210             intr_coal->rx_max_coalesced_frames_low;
2211     }
2212     if (intrmod_cfg->tx_enable) {
2213         intrmod_cfg->tx_maxcnt_trigger =
2214             intr_coal->tx_max_coalesced_frames_high;
2215         intrmod_cfg->tx_mincnt_trigger =
2216             intr_coal->tx_max_coalesced_frames_low;
2217     }
2218 
2219     ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
2220 
2221     return ret;
2222 }
2223 
2224 static int
2225 oct_cfg_rx_intrcnt(struct lio *lio,
2226            struct oct_intrmod_cfg *intrmod,
2227            struct ethtool_coalesce *intr_coal)
2228 {
2229     struct octeon_device *oct = lio->oct_dev;
2230     u32 rx_max_coalesced_frames;
2231 
2232     /* Config Cnt based interrupt values */
2233     switch (oct->chip_id) {
2234     case OCTEON_CN68XX:
2235     case OCTEON_CN66XX: {
2236         struct octeon_cn6xxx *cn6xxx =
2237             (struct octeon_cn6xxx *)oct->chip;
2238 
2239         if (!intr_coal->rx_max_coalesced_frames)
2240             rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
2241         else
2242             rx_max_coalesced_frames =
2243                 intr_coal->rx_max_coalesced_frames;
2244         octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
2245                  rx_max_coalesced_frames);
2246         CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
2247         break;
2248     }
2249     case OCTEON_CN23XX_PF_VID: {
2250         int q_no;
2251 
2252         if (!intr_coal->rx_max_coalesced_frames)
2253             rx_max_coalesced_frames = intrmod->rx_frames;
2254         else
2255             rx_max_coalesced_frames =
2256                 intr_coal->rx_max_coalesced_frames;
2257         for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2258             q_no += oct->sriov_info.pf_srn;
2259             octeon_write_csr64(
2260                 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
2261                 (octeon_read_csr64(
2262                  oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
2263                  (0x3fffff00000000UL)) |
2264                 (rx_max_coalesced_frames - 1));
2265             /*consider setting resend bit*/
2266         }
2267         intrmod->rx_frames = rx_max_coalesced_frames;
2268         oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
2269         break;
2270     }
2271     case OCTEON_CN23XX_VF_VID: {
2272         int q_no;
2273 
2274         if (!intr_coal->rx_max_coalesced_frames)
2275             rx_max_coalesced_frames = intrmod->rx_frames;
2276         else
2277             rx_max_coalesced_frames =
2278                 intr_coal->rx_max_coalesced_frames;
2279         for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2280             octeon_write_csr64(
2281                 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
2282                 (octeon_read_csr64(
2283                  oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
2284                  (0x3fffff00000000UL)) |
2285                 (rx_max_coalesced_frames - 1));
2286             /*consider writing to resend bit here*/
2287         }
2288         intrmod->rx_frames = rx_max_coalesced_frames;
2289         oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
2290         break;
2291     }
2292     default:
2293         return -EINVAL;
2294     }
2295     return 0;
2296 }
2297 
2298 static int oct_cfg_rx_intrtime(struct lio *lio,
2299                    struct oct_intrmod_cfg *intrmod,
2300                    struct ethtool_coalesce *intr_coal)
2301 {
2302     struct octeon_device *oct = lio->oct_dev;
2303     u32 time_threshold, rx_coalesce_usecs;
2304 
2305     /* Config Time based interrupt values */
2306     switch (oct->chip_id) {
2307     case OCTEON_CN68XX:
2308     case OCTEON_CN66XX: {
2309         struct octeon_cn6xxx *cn6xxx =
2310             (struct octeon_cn6xxx *)oct->chip;
2311         if (!intr_coal->rx_coalesce_usecs)
2312             rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
2313         else
2314             rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2315 
2316         time_threshold = lio_cn6xxx_get_oq_ticks(oct,
2317                              rx_coalesce_usecs);
2318         octeon_write_csr(oct,
2319                  CN6XXX_SLI_OQ_INT_LEVEL_TIME,
2320                  time_threshold);
2321 
2322         CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
2323         break;
2324     }
2325     case OCTEON_CN23XX_PF_VID: {
2326         u64 time_threshold;
2327         int q_no;
2328 
2329         if (!intr_coal->rx_coalesce_usecs)
2330             rx_coalesce_usecs = intrmod->rx_usecs;
2331         else
2332             rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2333         time_threshold =
2334             cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
2335         for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2336             q_no += oct->sriov_info.pf_srn;
2337             octeon_write_csr64(oct,
2338                        CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
2339                        (intrmod->rx_frames |
2340                         ((u64)time_threshold << 32)));
2341             /*consider writing to resend bit here*/
2342         }
2343         intrmod->rx_usecs = rx_coalesce_usecs;
2344         oct->rx_coalesce_usecs = rx_coalesce_usecs;
2345         break;
2346     }
2347     case OCTEON_CN23XX_VF_VID: {
2348         u64 time_threshold;
2349         int q_no;
2350 
2351         if (!intr_coal->rx_coalesce_usecs)
2352             rx_coalesce_usecs = intrmod->rx_usecs;
2353         else
2354             rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2355 
2356         time_threshold =
2357             cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
2358         for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2359             octeon_write_csr64(
2360                 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
2361                 (intrmod->rx_frames |
2362                  ((u64)time_threshold << 32)));
2363             /*consider setting resend bit*/
2364         }
2365         intrmod->rx_usecs = rx_coalesce_usecs;
2366         oct->rx_coalesce_usecs = rx_coalesce_usecs;
2367         break;
2368     }
2369     default:
2370         return -EINVAL;
2371     }
2372 
2373     return 0;
2374 }
2375 
2376 static int
2377 oct_cfg_tx_intrcnt(struct lio *lio,
2378            struct oct_intrmod_cfg *intrmod,
2379            struct ethtool_coalesce *intr_coal)
2380 {
2381     struct octeon_device *oct = lio->oct_dev;
2382     u32 iq_intr_pkt;
2383     void __iomem *inst_cnt_reg;
2384     u64 val;
2385 
2386     /* Config Cnt based interrupt values */
2387     switch (oct->chip_id) {
2388     case OCTEON_CN68XX:
2389     case OCTEON_CN66XX:
2390         break;
2391     case OCTEON_CN23XX_VF_VID:
2392     case OCTEON_CN23XX_PF_VID: {
2393         int q_no;
2394 
2395         if (!intr_coal->tx_max_coalesced_frames)
2396             iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
2397                       CN23XX_PKT_IN_DONE_WMARK_MASK;
2398         else
2399             iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
2400                       CN23XX_PKT_IN_DONE_WMARK_MASK;
2401         for (q_no = 0; q_no < oct->num_iqs; q_no++) {
2402             inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
2403             val = readq(inst_cnt_reg);
2404             /*clear wmark and count.dont want to write count back*/
2405             val = (val & 0xFFFF000000000000ULL) |
2406                   ((u64)(iq_intr_pkt - 1)
2407                    << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
2408             writeq(val, inst_cnt_reg);
2409             /*consider setting resend bit*/
2410         }
2411         intrmod->tx_frames = iq_intr_pkt;
2412         oct->tx_max_coalesced_frames = iq_intr_pkt;
2413         break;
2414     }
2415     default:
2416         return -EINVAL;
2417     }
2418     return 0;
2419 }
2420 
2421 static int lio_set_intr_coalesce(struct net_device *netdev,
2422                  struct ethtool_coalesce *intr_coal,
2423                  struct kernel_ethtool_coalesce *kernel_coal,
2424                  struct netlink_ext_ack *extack)
2425 {
2426     struct lio *lio = GET_LIO(netdev);
2427     int ret;
2428     struct octeon_device *oct = lio->oct_dev;
2429     struct oct_intrmod_cfg intrmod = {0};
2430     u32 j, q_no;
2431     int db_max, db_min;
2432 
2433     switch (oct->chip_id) {
2434     case OCTEON_CN68XX:
2435     case OCTEON_CN66XX:
2436         db_min = CN6XXX_DB_MIN;
2437         db_max = CN6XXX_DB_MAX;
2438         if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
2439             (intr_coal->tx_max_coalesced_frames <= db_max)) {
2440             for (j = 0; j < lio->linfo.num_txpciq; j++) {
2441                 q_no = lio->linfo.txpciq[j].s.q_no;
2442                 oct->instr_queue[q_no]->fill_threshold =
2443                     intr_coal->tx_max_coalesced_frames;
2444             }
2445         } else {
2446             dev_err(&oct->pci_dev->dev,
2447                 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
2448                 intr_coal->tx_max_coalesced_frames,
2449                 db_min, db_max);
2450             return -EINVAL;
2451         }
2452         break;
2453     case OCTEON_CN23XX_PF_VID:
2454     case OCTEON_CN23XX_VF_VID:
2455         break;
2456     default:
2457         return -EINVAL;
2458     }
2459 
2460     intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
2461     intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
2462     intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
2463     intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
2464     intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
2465 
2466     ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal);
2467 
2468     if (!intr_coal->use_adaptive_rx_coalesce) {
2469         ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal);
2470         if (ret)
2471             goto ret_intrmod;
2472 
2473         ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal);
2474         if (ret)
2475             goto ret_intrmod;
2476     } else {
2477         oct->rx_coalesce_usecs =
2478             CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
2479         oct->rx_max_coalesced_frames =
2480             CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
2481     }
2482 
2483     if (!intr_coal->use_adaptive_tx_coalesce) {
2484         ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal);
2485         if (ret)
2486             goto ret_intrmod;
2487     } else {
2488         oct->tx_max_coalesced_frames =
2489             CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
2490     }
2491 
2492     return 0;
2493 ret_intrmod:
2494     return ret;
2495 }
2496 
2497 static int lio_get_ts_info(struct net_device *netdev,
2498                struct ethtool_ts_info *info)
2499 {
2500     struct lio *lio = GET_LIO(netdev);
2501 
2502     info->so_timestamping =
2503 #ifdef PTP_HARDWARE_TIMESTAMPING
2504         SOF_TIMESTAMPING_TX_HARDWARE |
2505         SOF_TIMESTAMPING_RX_HARDWARE |
2506         SOF_TIMESTAMPING_RAW_HARDWARE |
2507         SOF_TIMESTAMPING_TX_SOFTWARE |
2508 #endif
2509         SOF_TIMESTAMPING_RX_SOFTWARE |
2510         SOF_TIMESTAMPING_SOFTWARE;
2511 
2512     if (lio->ptp_clock)
2513         info->phc_index = ptp_clock_index(lio->ptp_clock);
2514     else
2515         info->phc_index = -1;
2516 
2517 #ifdef PTP_HARDWARE_TIMESTAMPING
2518     info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
2519 
2520     info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2521                (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2522                (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
2523                (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
2524 #endif
2525 
2526     return 0;
2527 }
2528 
2529 /* Return register dump len. */
2530 static int lio_get_regs_len(struct net_device *dev)
2531 {
2532     struct lio *lio = GET_LIO(dev);
2533     struct octeon_device *oct = lio->oct_dev;
2534 
2535     switch (oct->chip_id) {
2536     case OCTEON_CN23XX_PF_VID:
2537         return OCT_ETHTOOL_REGDUMP_LEN_23XX;
2538     case OCTEON_CN23XX_VF_VID:
2539         return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF;
2540     default:
2541         return OCT_ETHTOOL_REGDUMP_LEN;
2542     }
2543 }
2544 
2545 static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
2546 {
2547     u32 reg;
2548     u8 pf_num = oct->pf_num;
2549     int len = 0;
2550     int i;
2551 
2552     /* PCI  Window Registers */
2553 
2554     len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2555 
2556     /*0x29030 or 0x29040*/
2557     reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
2558     len += sprintf(s + len,
2559                "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
2560                reg, oct->pcie_port, oct->pf_num,
2561                (u64)octeon_read_csr64(oct, reg));
2562 
2563     /*0x27080 or 0x27090*/
2564     reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
2565     len +=
2566         sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
2567             reg, oct->pcie_port, oct->pf_num,
2568             (u64)octeon_read_csr64(oct, reg));
2569 
2570     /*0x27000 or 0x27010*/
2571     reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
2572     len +=
2573         sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
2574             reg, oct->pcie_port, oct->pf_num,
2575             (u64)octeon_read_csr64(oct, reg));
2576 
2577     /*0x29120*/
2578     reg = 0x29120;
2579     len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
2580                (u64)octeon_read_csr64(oct, reg));
2581 
2582     /*0x27300*/
2583     reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2584           (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2585     len += sprintf(
2586         s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
2587         oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
2588 
2589     /*0x27200*/
2590     reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2591           (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2592     len += sprintf(s + len,
2593                "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
2594                reg, oct->pcie_port, oct->pf_num,
2595                (u64)octeon_read_csr64(oct, reg));
2596 
2597     /*29130*/
2598     reg = CN23XX_SLI_PKT_CNT_INT;
2599     len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
2600                (u64)octeon_read_csr64(oct, reg));
2601 
2602     /*0x29140*/
2603     reg = CN23XX_SLI_PKT_TIME_INT;
2604     len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
2605                (u64)octeon_read_csr64(oct, reg));
2606 
2607     /*0x29160*/
2608     reg = 0x29160;
2609     len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
2610                (u64)octeon_read_csr64(oct, reg));
2611 
2612     /*0x29180*/
2613     reg = CN23XX_SLI_OQ_WMARK;
2614     len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
2615                reg, (u64)octeon_read_csr64(oct, reg));
2616 
2617     /*0x291E0*/
2618     reg = CN23XX_SLI_PKT_IOQ_RING_RST;
2619     len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
2620                (u64)octeon_read_csr64(oct, reg));
2621 
2622     /*0x29210*/
2623     reg = CN23XX_SLI_GBL_CONTROL;
2624     len += sprintf(s + len,
2625                "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
2626                (u64)octeon_read_csr64(oct, reg));
2627 
2628     /*0x29220*/
2629     reg = 0x29220;
2630     len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
2631                reg, (u64)octeon_read_csr64(oct, reg));
2632 
2633     /*PF only*/
2634     if (pf_num == 0) {
2635         /*0x29260*/
2636         reg = CN23XX_SLI_OUT_BP_EN_W1S;
2637         len += sprintf(s + len,
2638                    "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S):  %016llx\n",
2639                    reg, (u64)octeon_read_csr64(oct, reg));
2640     } else if (pf_num == 1) {
2641         /*0x29270*/
2642         reg = CN23XX_SLI_OUT_BP_EN2_W1S;
2643         len += sprintf(s + len,
2644                    "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
2645                    reg, (u64)octeon_read_csr64(oct, reg));
2646     }
2647 
2648     for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2649         reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
2650         len +=
2651             sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2652                 reg, i, (u64)octeon_read_csr64(oct, reg));
2653     }
2654 
2655     /*0x10040*/
2656     for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2657         reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2658         len += sprintf(s + len,
2659                    "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2660                    reg, i, (u64)octeon_read_csr64(oct, reg));
2661     }
2662 
2663     /*0x10080*/
2664     for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2665         reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
2666         len += sprintf(s + len,
2667                    "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2668                    reg, i, (u64)octeon_read_csr64(oct, reg));
2669     }
2670 
2671     /*0x10090*/
2672     for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2673         reg = CN23XX_SLI_OQ_SIZE(i);
2674         len += sprintf(
2675             s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2676             reg, i, (u64)octeon_read_csr64(oct, reg));
2677     }
2678 
2679     /*0x10050*/
2680     for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2681         reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
2682         len += sprintf(
2683             s + len,
2684             "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2685             reg, i, (u64)octeon_read_csr64(oct, reg));
2686     }
2687 
2688     /*0x10070*/
2689     for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2690         reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
2691         len += sprintf(s + len,
2692                    "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2693                    reg, i, (u64)octeon_read_csr64(oct, reg));
2694     }
2695 
2696     /*0x100a0*/
2697     for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2698         reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
2699         len += sprintf(s + len,
2700                    "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2701                    reg, i, (u64)octeon_read_csr64(oct, reg));
2702     }
2703 
2704     /*0x100b0*/
2705     for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2706         reg = CN23XX_SLI_OQ_PKTS_SENT(i);
2707         len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2708                    reg, i, (u64)octeon_read_csr64(oct, reg));
2709     }
2710 
2711     /*0x100c0*/
2712     for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2713         reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
2714         len += sprintf(s + len,
2715                    "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2716                    reg, i, (u64)octeon_read_csr64(oct, reg));
2717 
2718         /*0x10000*/
2719         for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2720             reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
2721             len += sprintf(
2722                 s + len,
2723                 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2724                 reg, i, (u64)octeon_read_csr64(oct, reg));
2725         }
2726 
2727         /*0x10010*/
2728         for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2729             reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
2730             len += sprintf(
2731                 s + len,
2732                 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
2733                 i, (u64)octeon_read_csr64(oct, reg));
2734         }
2735 
2736         /*0x10020*/
2737         for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2738             reg = CN23XX_SLI_IQ_DOORBELL(i);
2739             len += sprintf(
2740                 s + len,
2741                 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2742                 reg, i, (u64)octeon_read_csr64(oct, reg));
2743         }
2744 
2745         /*0x10030*/
2746         for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2747             reg = CN23XX_SLI_IQ_SIZE(i);
2748             len += sprintf(
2749                 s + len,
2750                 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2751                 reg, i, (u64)octeon_read_csr64(oct, reg));
2752         }
2753 
2754         /*0x10040*/
2755         for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
2756             reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2757         len += sprintf(s + len,
2758                    "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2759                    reg, i, (u64)octeon_read_csr64(oct, reg));
2760     }
2761 
2762     return len;
2763 }
2764 
2765 static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct)
2766 {
2767     int len = 0;
2768     u32 reg;
2769     int i;
2770 
2771     /* PCI  Window Registers */
2772 
2773     len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2774 
2775     for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2776         reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i);
2777         len += sprintf(s + len,
2778                    "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2779                    reg, i, (u64)octeon_read_csr64(oct, reg));
2780     }
2781 
2782     for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2783         reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2784         len += sprintf(s + len,
2785                    "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2786                    reg, i, (u64)octeon_read_csr64(oct, reg));
2787     }
2788 
2789     for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2790         reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i);
2791         len += sprintf(s + len,
2792                    "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2793                    reg, i, (u64)octeon_read_csr64(oct, reg));
2794     }
2795 
2796     for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2797         reg = CN23XX_VF_SLI_OQ_SIZE(i);
2798         len += sprintf(s + len,
2799                    "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2800                    reg, i, (u64)octeon_read_csr64(oct, reg));
2801     }
2802 
2803     for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2804         reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i);
2805         len += sprintf(s + len,
2806                    "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2807                    reg, i, (u64)octeon_read_csr64(oct, reg));
2808     }
2809 
2810     for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2811         reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i);
2812         len += sprintf(s + len,
2813                    "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2814                    reg, i, (u64)octeon_read_csr64(oct, reg));
2815     }
2816 
2817     for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2818         reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i);
2819         len += sprintf(s + len,
2820                    "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2821                    reg, i, (u64)octeon_read_csr64(oct, reg));
2822     }
2823 
2824     for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2825         reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i);
2826         len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2827                    reg, i, (u64)octeon_read_csr64(oct, reg));
2828     }
2829 
2830     for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2831         reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET;
2832         len += sprintf(s + len,
2833                    "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2834                    reg, i, (u64)octeon_read_csr64(oct, reg));
2835     }
2836 
2837     for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2838         reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET;
2839         len += sprintf(s + len,
2840                    "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n",
2841                    reg, i, (u64)octeon_read_csr64(oct, reg));
2842     }
2843 
2844     for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2845         reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i);
2846         len += sprintf(s + len,
2847                    "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2848                    reg, i, (u64)octeon_read_csr64(oct, reg));
2849     }
2850 
2851     for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2852         reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i);
2853         len += sprintf(s + len,
2854                    "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n",
2855                    reg, i, (u64)octeon_read_csr64(oct, reg));
2856     }
2857 
2858     for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2859         reg = CN23XX_VF_SLI_IQ_DOORBELL(i);
2860         len += sprintf(s + len,
2861                    "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2862                    reg, i, (u64)octeon_read_csr64(oct, reg));
2863     }
2864 
2865     for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2866         reg = CN23XX_VF_SLI_IQ_SIZE(i);
2867         len += sprintf(s + len,
2868                    "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2869                    reg, i, (u64)octeon_read_csr64(oct, reg));
2870     }
2871 
2872     for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2873         reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2874         len += sprintf(s + len,
2875                    "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2876                    reg, i, (u64)octeon_read_csr64(oct, reg));
2877     }
2878 
2879     return len;
2880 }
2881 
2882 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
2883 {
2884     u32 reg;
2885     int i, len = 0;
2886 
2887     /* PCI  Window Registers */
2888 
2889     len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2890     reg = CN6XXX_WIN_WR_ADDR_LO;
2891     len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
2892                CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
2893     reg = CN6XXX_WIN_WR_ADDR_HI;
2894     len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
2895                CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
2896     reg = CN6XXX_WIN_RD_ADDR_LO;
2897     len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
2898                CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
2899     reg = CN6XXX_WIN_RD_ADDR_HI;
2900     len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
2901                CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
2902     reg = CN6XXX_WIN_WR_DATA_LO;
2903     len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
2904                CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
2905     reg = CN6XXX_WIN_WR_DATA_HI;
2906     len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
2907                CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
2908     len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
2909                CN6XXX_WIN_WR_MASK_REG,
2910                octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
2911 
2912     /* PCI  Interrupt Register */
2913     len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
2914                CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
2915                         CN6XXX_SLI_INT_ENB64_PORT0));
2916     len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
2917                CN6XXX_SLI_INT_ENB64_PORT1,
2918                octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
2919     len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
2920                octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
2921 
2922     /* PCI  Output queue registers */
2923     for (i = 0; i < oct->num_oqs; i++) {
2924         reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
2925         len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
2926                    reg, i, octeon_read_csr(oct, reg));
2927         reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
2928         len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
2929                    reg, i, octeon_read_csr(oct, reg));
2930     }
2931     reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
2932     len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
2933                reg, octeon_read_csr(oct, reg));
2934     reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
2935     len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
2936                reg, octeon_read_csr(oct, reg));
2937 
2938     /* PCI  Input queue registers */
2939     for (i = 0; i <= 3; i++) {
2940         u32 reg;
2941 
2942         reg = CN6XXX_SLI_IQ_DOORBELL(i);
2943         len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
2944                    reg, i, octeon_read_csr(oct, reg));
2945         reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
2946         len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
2947                    reg, i, octeon_read_csr(oct, reg));
2948     }
2949 
2950     /* PCI  DMA registers */
2951 
2952     len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
2953                CN6XXX_DMA_CNT(0),
2954                octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
2955     reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
2956     len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
2957                CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
2958     reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
2959     len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
2960                CN6XXX_DMA_TIME_INT_LEVEL(0),
2961                octeon_read_csr(oct, reg));
2962 
2963     len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
2964                CN6XXX_DMA_CNT(1),
2965                octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
2966     reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2967     len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
2968                CN6XXX_DMA_PKT_INT_LEVEL(1),
2969                octeon_read_csr(oct, reg));
2970     reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2971     len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
2972                CN6XXX_DMA_TIME_INT_LEVEL(1),
2973                octeon_read_csr(oct, reg));
2974 
2975     /* PCI  Index registers */
2976 
2977     len += sprintf(s + len, "\n");
2978 
2979     for (i = 0; i < 16; i++) {
2980         reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
2981         len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
2982                    CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
2983     }
2984 
2985     return len;
2986 }
2987 
2988 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
2989 {
2990     u32 val;
2991     int i, len = 0;
2992 
2993     /* PCI CONFIG Registers */
2994 
2995     len += sprintf(s + len,
2996                "\n\t Octeon Config space Registers\n\n");
2997 
2998     for (i = 0; i <= 13; i++) {
2999         pci_read_config_dword(oct->pci_dev, (i * 4), &val);
3000         len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
3001                    (i * 4), i, val);
3002     }
3003 
3004     for (i = 30; i <= 34; i++) {
3005         pci_read_config_dword(oct->pci_dev, (i * 4), &val);
3006         len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
3007                    (i * 4), i, val);
3008     }
3009 
3010     return len;
3011 }
3012 
3013 /*  Return register dump user app.  */
3014 static void lio_get_regs(struct net_device *dev,
3015              struct ethtool_regs *regs, void *regbuf)
3016 {
3017     struct lio *lio = GET_LIO(dev);
3018     int len = 0;
3019     struct octeon_device *oct = lio->oct_dev;
3020 
3021     regs->version = OCT_ETHTOOL_REGSVER;
3022 
3023     switch (oct->chip_id) {
3024     case OCTEON_CN23XX_PF_VID:
3025         memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
3026         len += cn23xx_read_csr_reg(regbuf + len, oct);
3027         break;
3028     case OCTEON_CN23XX_VF_VID:
3029         memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF);
3030         len += cn23xx_vf_read_csr_reg(regbuf + len, oct);
3031         break;
3032     case OCTEON_CN68XX:
3033     case OCTEON_CN66XX:
3034         memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
3035         len += cn6xxx_read_csr_reg(regbuf + len, oct);
3036         len += cn6xxx_read_config_reg(regbuf + len, oct);
3037         break;
3038     default:
3039         dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
3040             __func__, oct->chip_id);
3041     }
3042 }
3043 
3044 static u32 lio_get_priv_flags(struct net_device *netdev)
3045 {
3046     struct lio *lio = GET_LIO(netdev);
3047 
3048     return lio->oct_dev->priv_flags;
3049 }
3050 
3051 static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
3052 {
3053     struct lio *lio = GET_LIO(netdev);
3054     bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
3055 
3056     lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
3057               intr_by_tx_bytes);
3058     return 0;
3059 }
3060 
3061 static int lio_get_fecparam(struct net_device *netdev,
3062                 struct ethtool_fecparam *fec)
3063 {
3064     struct lio *lio = GET_LIO(netdev);
3065     struct octeon_device *oct = lio->oct_dev;
3066 
3067     fec->active_fec = ETHTOOL_FEC_NONE;
3068     fec->fec = ETHTOOL_FEC_NONE;
3069 
3070     if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
3071         oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
3072         if (oct->no_speed_setting == 1)
3073             return 0;
3074 
3075         liquidio_get_fec(lio);
3076         fec->fec = (ETHTOOL_FEC_RS | ETHTOOL_FEC_OFF);
3077         if (oct->props[lio->ifidx].fec == 1)
3078             fec->active_fec = ETHTOOL_FEC_RS;
3079         else
3080             fec->active_fec = ETHTOOL_FEC_OFF;
3081     }
3082 
3083     return 0;
3084 }
3085 
3086 static int lio_set_fecparam(struct net_device *netdev,
3087                 struct ethtool_fecparam *fec)
3088 {
3089     struct lio *lio = GET_LIO(netdev);
3090     struct octeon_device *oct = lio->oct_dev;
3091 
3092     if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
3093         oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
3094         if (oct->no_speed_setting == 1)
3095             return -EOPNOTSUPP;
3096 
3097         if (fec->fec & ETHTOOL_FEC_OFF)
3098             liquidio_set_fec(lio, 0);
3099         else if (fec->fec & ETHTOOL_FEC_RS)
3100             liquidio_set_fec(lio, 1);
3101         else
3102             return -EOPNOTSUPP;
3103     } else {
3104         return -EOPNOTSUPP;
3105     }
3106 
3107     return 0;
3108 }
3109 
3110 #define LIO_ETHTOOL_COALESCE    (ETHTOOL_COALESCE_RX_USECS |        \
3111                  ETHTOOL_COALESCE_MAX_FRAMES |      \
3112                  ETHTOOL_COALESCE_USE_ADAPTIVE |    \
3113                  ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW |   \
3114                  ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW |   \
3115                  ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH |  \
3116                  ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH |  \
3117                  ETHTOOL_COALESCE_PKT_RATE_RX_USECS)
3118 
3119 static const struct ethtool_ops lio_ethtool_ops = {
3120     .supported_coalesce_params = LIO_ETHTOOL_COALESCE,
3121     .get_link_ksettings = lio_get_link_ksettings,
3122     .set_link_ksettings = lio_set_link_ksettings,
3123     .get_fecparam       = lio_get_fecparam,
3124     .set_fecparam       = lio_set_fecparam,
3125     .get_link       = ethtool_op_get_link,
3126     .get_drvinfo        = lio_get_drvinfo,
3127     .get_ringparam      = lio_ethtool_get_ringparam,
3128     .set_ringparam      = lio_ethtool_set_ringparam,
3129     .get_channels       = lio_ethtool_get_channels,
3130     .set_channels       = lio_ethtool_set_channels,
3131     .set_phys_id        = lio_set_phys_id,
3132     .get_eeprom_len     = lio_get_eeprom_len,
3133     .get_eeprom     = lio_get_eeprom,
3134     .get_strings        = lio_get_strings,
3135     .get_ethtool_stats  = lio_get_ethtool_stats,
3136     .get_pauseparam     = lio_get_pauseparam,
3137     .set_pauseparam     = lio_set_pauseparam,
3138     .get_regs_len       = lio_get_regs_len,
3139     .get_regs       = lio_get_regs,
3140     .get_msglevel       = lio_get_msglevel,
3141     .set_msglevel       = lio_set_msglevel,
3142     .get_sset_count     = lio_get_sset_count,
3143     .get_coalesce       = lio_get_intr_coalesce,
3144     .set_coalesce       = lio_set_intr_coalesce,
3145     .get_priv_flags     = lio_get_priv_flags,
3146     .set_priv_flags     = lio_set_priv_flags,
3147     .get_ts_info        = lio_get_ts_info,
3148 };
3149 
3150 static const struct ethtool_ops lio_vf_ethtool_ops = {
3151     .supported_coalesce_params = LIO_ETHTOOL_COALESCE,
3152     .get_link_ksettings = lio_get_link_ksettings,
3153     .get_link       = ethtool_op_get_link,
3154     .get_drvinfo        = lio_get_vf_drvinfo,
3155     .get_ringparam      = lio_ethtool_get_ringparam,
3156     .set_ringparam          = lio_ethtool_set_ringparam,
3157     .get_channels       = lio_ethtool_get_channels,
3158     .set_channels       = lio_ethtool_set_channels,
3159     .get_strings        = lio_vf_get_strings,
3160     .get_ethtool_stats  = lio_vf_get_ethtool_stats,
3161     .get_regs_len       = lio_get_regs_len,
3162     .get_regs       = lio_get_regs,
3163     .get_msglevel       = lio_get_msglevel,
3164     .set_msglevel       = lio_vf_set_msglevel,
3165     .get_sset_count     = lio_vf_get_sset_count,
3166     .get_coalesce       = lio_get_intr_coalesce,
3167     .set_coalesce       = lio_set_intr_coalesce,
3168     .get_priv_flags     = lio_get_priv_flags,
3169     .set_priv_flags     = lio_set_priv_flags,
3170     .get_ts_info        = lio_get_ts_info,
3171 };
3172 
3173 void liquidio_set_ethtool_ops(struct net_device *netdev)
3174 {
3175     struct lio *lio = GET_LIO(netdev);
3176     struct octeon_device *oct = lio->oct_dev;
3177 
3178     if (OCTEON_CN23XX_VF(oct))
3179         netdev->ethtool_ops = &lio_vf_ethtool_ops;
3180     else
3181         netdev->ethtool_ops = &lio_ethtool_ops;
3182 }