Back to home page

OSCL-LXR

 
 

    


0001 /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
0002  *
0003  * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
0004  *
0005  * This program is dual-licensed; you may select either version 2 of
0006  * the GNU General Public License ("GPL") or BSD license ("BSD").
0007  *
0008  * This Synopsys DWC XLGMAC software driver and associated documentation
0009  * (hereinafter the "Software") is an unsupported proprietary work of
0010  * Synopsys, Inc. unless otherwise expressly agreed to in writing between
0011  * Synopsys and you. The Software IS NOT an item of Licensed Software or a
0012  * Licensed Product under any End User Software License Agreement or
0013  * Agreement for Licensed Products with Synopsys or any supplement thereto.
0014  * Synopsys is a registered trademark of Synopsys, Inc. Other names included
0015  * in the SOFTWARE may be the trademarks of their respective owners.
0016  */
0017 
0018 #include <linux/phy.h>
0019 #include <linux/mdio.h>
0020 #include <linux/clk.h>
0021 #include <linux/bitrev.h>
0022 #include <linux/crc32.h>
0023 #include <linux/crc32poly.h>
0024 #include <linux/dcbnl.h>
0025 
0026 #include "dwc-xlgmac.h"
0027 #include "dwc-xlgmac-reg.h"
0028 
0029 static int xlgmac_tx_complete(struct xlgmac_dma_desc *dma_desc)
0030 {
0031     return !XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
0032                 TX_NORMAL_DESC3_OWN_POS,
0033                 TX_NORMAL_DESC3_OWN_LEN);
0034 }
0035 
0036 static int xlgmac_disable_rx_csum(struct xlgmac_pdata *pdata)
0037 {
0038     u32 regval;
0039 
0040     regval = readl(pdata->mac_regs + MAC_RCR);
0041     regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS,
0042                      MAC_RCR_IPC_LEN, 0);
0043     writel(regval, pdata->mac_regs + MAC_RCR);
0044 
0045     return 0;
0046 }
0047 
0048 static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata)
0049 {
0050     u32 regval;
0051 
0052     regval = readl(pdata->mac_regs + MAC_RCR);
0053     regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS,
0054                      MAC_RCR_IPC_LEN, 1);
0055     writel(regval, pdata->mac_regs + MAC_RCR);
0056 
0057     return 0;
0058 }
0059 
0060 static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, const u8 *addr)
0061 {
0062     unsigned int mac_addr_hi, mac_addr_lo;
0063 
0064     mac_addr_hi = (addr[5] <<  8) | (addr[4] <<  0);
0065     mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
0066               (addr[1] <<  8) | (addr[0] <<  0);
0067 
0068     writel(mac_addr_hi, pdata->mac_regs + MAC_MACA0HR);
0069     writel(mac_addr_lo, pdata->mac_regs + MAC_MACA0LR);
0070 
0071     return 0;
0072 }
0073 
0074 static void xlgmac_set_mac_reg(struct xlgmac_pdata *pdata,
0075                    struct netdev_hw_addr *ha,
0076                    unsigned int *mac_reg)
0077 {
0078     unsigned int mac_addr_hi, mac_addr_lo;
0079     u8 *mac_addr;
0080 
0081     mac_addr_lo = 0;
0082     mac_addr_hi = 0;
0083 
0084     if (ha) {
0085         mac_addr = (u8 *)&mac_addr_lo;
0086         mac_addr[0] = ha->addr[0];
0087         mac_addr[1] = ha->addr[1];
0088         mac_addr[2] = ha->addr[2];
0089         mac_addr[3] = ha->addr[3];
0090         mac_addr = (u8 *)&mac_addr_hi;
0091         mac_addr[0] = ha->addr[4];
0092         mac_addr[1] = ha->addr[5];
0093 
0094         netif_dbg(pdata, drv, pdata->netdev,
0095               "adding mac address %pM at %#x\n",
0096               ha->addr, *mac_reg);
0097 
0098         mac_addr_hi = XLGMAC_SET_REG_BITS(mac_addr_hi,
0099                           MAC_MACA1HR_AE_POS,
0100                         MAC_MACA1HR_AE_LEN,
0101                         1);
0102     }
0103 
0104     writel(mac_addr_hi, pdata->mac_regs + *mac_reg);
0105     *mac_reg += MAC_MACA_INC;
0106     writel(mac_addr_lo, pdata->mac_regs + *mac_reg);
0107     *mac_reg += MAC_MACA_INC;
0108 }
0109 
0110 static int xlgmac_enable_rx_vlan_stripping(struct xlgmac_pdata *pdata)
0111 {
0112     u32 regval;
0113 
0114     regval = readl(pdata->mac_regs + MAC_VLANTR);
0115     /* Put the VLAN tag in the Rx descriptor */
0116     regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLRXS_POS,
0117                      MAC_VLANTR_EVLRXS_LEN, 1);
0118     /* Don't check the VLAN type */
0119     regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_DOVLTC_POS,
0120                      MAC_VLANTR_DOVLTC_LEN, 1);
0121     /* Check only C-TAG (0x8100) packets */
0122     regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ERSVLM_POS,
0123                      MAC_VLANTR_ERSVLM_LEN, 0);
0124     /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
0125     regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ESVL_POS,
0126                      MAC_VLANTR_ESVL_LEN, 0);
0127     /* Enable VLAN tag stripping */
0128     regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS,
0129                      MAC_VLANTR_EVLS_LEN, 0x3);
0130     writel(regval, pdata->mac_regs + MAC_VLANTR);
0131 
0132     return 0;
0133 }
0134 
0135 static int xlgmac_disable_rx_vlan_stripping(struct xlgmac_pdata *pdata)
0136 {
0137     u32 regval;
0138 
0139     regval = readl(pdata->mac_regs + MAC_VLANTR);
0140     regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS,
0141                      MAC_VLANTR_EVLS_LEN, 0);
0142     writel(regval, pdata->mac_regs + MAC_VLANTR);
0143 
0144     return 0;
0145 }
0146 
0147 static int xlgmac_enable_rx_vlan_filtering(struct xlgmac_pdata *pdata)
0148 {
0149     u32 regval;
0150 
0151     regval = readl(pdata->mac_regs + MAC_PFR);
0152     /* Enable VLAN filtering */
0153     regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS,
0154                      MAC_PFR_VTFE_LEN, 1);
0155     writel(regval, pdata->mac_regs + MAC_PFR);
0156 
0157     regval = readl(pdata->mac_regs + MAC_VLANTR);
0158     /* Enable VLAN Hash Table filtering */
0159     regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTHM_POS,
0160                      MAC_VLANTR_VTHM_LEN, 1);
0161     /* Disable VLAN tag inverse matching */
0162     regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTIM_POS,
0163                      MAC_VLANTR_VTIM_LEN, 0);
0164     /* Only filter on the lower 12-bits of the VLAN tag */
0165     regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ETV_POS,
0166                      MAC_VLANTR_ETV_LEN, 1);
0167     /* In order for the VLAN Hash Table filtering to be effective,
0168      * the VLAN tag identifier in the VLAN Tag Register must not
0169      * be zero.  Set the VLAN tag identifier to "1" to enable the
0170      * VLAN Hash Table filtering.  This implies that a VLAN tag of
0171      * 1 will always pass filtering.
0172      */
0173     regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS,
0174                      MAC_VLANTR_VL_LEN, 1);
0175     writel(regval, pdata->mac_regs + MAC_VLANTR);
0176 
0177     return 0;
0178 }
0179 
0180 static int xlgmac_disable_rx_vlan_filtering(struct xlgmac_pdata *pdata)
0181 {
0182     u32 regval;
0183 
0184     regval = readl(pdata->mac_regs + MAC_PFR);
0185     /* Disable VLAN filtering */
0186     regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS,
0187                      MAC_PFR_VTFE_LEN, 0);
0188     writel(regval, pdata->mac_regs + MAC_PFR);
0189 
0190     return 0;
0191 }
0192 
0193 static u32 xlgmac_vid_crc32_le(__le16 vid_le)
0194 {
0195     unsigned char *data = (unsigned char *)&vid_le;
0196     unsigned char data_byte = 0;
0197     u32 crc = ~0;
0198     u32 temp = 0;
0199     int i, bits;
0200 
0201     bits = get_bitmask_order(VLAN_VID_MASK);
0202     for (i = 0; i < bits; i++) {
0203         if ((i % 8) == 0)
0204             data_byte = data[i / 8];
0205 
0206         temp = ((crc & 1) ^ data_byte) & 1;
0207         crc >>= 1;
0208         data_byte >>= 1;
0209 
0210         if (temp)
0211             crc ^= CRC32_POLY_LE;
0212     }
0213 
0214     return crc;
0215 }
0216 
0217 static int xlgmac_update_vlan_hash_table(struct xlgmac_pdata *pdata)
0218 {
0219     u16 vlan_hash_table = 0;
0220     __le16 vid_le;
0221     u32 regval;
0222     u32 crc;
0223     u16 vid;
0224 
0225     /* Generate the VLAN Hash Table value */
0226     for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
0227         /* Get the CRC32 value of the VLAN ID */
0228         vid_le = cpu_to_le16(vid);
0229         crc = bitrev32(~xlgmac_vid_crc32_le(vid_le)) >> 28;
0230 
0231         vlan_hash_table |= (1 << crc);
0232     }
0233 
0234     regval = readl(pdata->mac_regs + MAC_VLANHTR);
0235     /* Set the VLAN Hash Table filtering register */
0236     regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANHTR_VLHT_POS,
0237                      MAC_VLANHTR_VLHT_LEN, vlan_hash_table);
0238     writel(regval, pdata->mac_regs + MAC_VLANHTR);
0239 
0240     return 0;
0241 }
0242 
0243 static int xlgmac_set_promiscuous_mode(struct xlgmac_pdata *pdata,
0244                        unsigned int enable)
0245 {
0246     unsigned int val = enable ? 1 : 0;
0247     u32 regval;
0248 
0249     regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR),
0250                      MAC_PFR_PR_POS, MAC_PFR_PR_LEN);
0251     if (regval == val)
0252         return 0;
0253 
0254     netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
0255           enable ? "entering" : "leaving");
0256 
0257     regval = readl(pdata->mac_regs + MAC_PFR);
0258     regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PR_POS,
0259                      MAC_PFR_PR_LEN, val);
0260     writel(regval, pdata->mac_regs + MAC_PFR);
0261 
0262     /* Hardware will still perform VLAN filtering in promiscuous mode */
0263     if (enable) {
0264         xlgmac_disable_rx_vlan_filtering(pdata);
0265     } else {
0266         if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
0267             xlgmac_enable_rx_vlan_filtering(pdata);
0268     }
0269 
0270     return 0;
0271 }
0272 
0273 static int xlgmac_set_all_multicast_mode(struct xlgmac_pdata *pdata,
0274                      unsigned int enable)
0275 {
0276     unsigned int val = enable ? 1 : 0;
0277     u32 regval;
0278 
0279     regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR),
0280                      MAC_PFR_PM_POS, MAC_PFR_PM_LEN);
0281     if (regval == val)
0282         return 0;
0283 
0284     netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
0285           enable ? "entering" : "leaving");
0286 
0287     regval = readl(pdata->mac_regs + MAC_PFR);
0288     regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PM_POS,
0289                      MAC_PFR_PM_LEN, val);
0290     writel(regval, pdata->mac_regs + MAC_PFR);
0291 
0292     return 0;
0293 }
0294 
0295 static void xlgmac_set_mac_addn_addrs(struct xlgmac_pdata *pdata)
0296 {
0297     struct net_device *netdev = pdata->netdev;
0298     struct netdev_hw_addr *ha;
0299     unsigned int addn_macs;
0300     unsigned int mac_reg;
0301 
0302     mac_reg = MAC_MACA1HR;
0303     addn_macs = pdata->hw_feat.addn_mac;
0304 
0305     if (netdev_uc_count(netdev) > addn_macs) {
0306         xlgmac_set_promiscuous_mode(pdata, 1);
0307     } else {
0308         netdev_for_each_uc_addr(ha, netdev) {
0309             xlgmac_set_mac_reg(pdata, ha, &mac_reg);
0310             addn_macs--;
0311         }
0312 
0313         if (netdev_mc_count(netdev) > addn_macs) {
0314             xlgmac_set_all_multicast_mode(pdata, 1);
0315         } else {
0316             netdev_for_each_mc_addr(ha, netdev) {
0317                 xlgmac_set_mac_reg(pdata, ha, &mac_reg);
0318                 addn_macs--;
0319             }
0320         }
0321     }
0322 
0323     /* Clear remaining additional MAC address entries */
0324     while (addn_macs--)
0325         xlgmac_set_mac_reg(pdata, NULL, &mac_reg);
0326 }
0327 
0328 static void xlgmac_set_mac_hash_table(struct xlgmac_pdata *pdata)
0329 {
0330     unsigned int hash_table_shift, hash_table_count;
0331     u32 hash_table[XLGMAC_MAC_HASH_TABLE_SIZE];
0332     struct net_device *netdev = pdata->netdev;
0333     struct netdev_hw_addr *ha;
0334     unsigned int hash_reg;
0335     unsigned int i;
0336     u32 crc;
0337 
0338     hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
0339     hash_table_count = pdata->hw_feat.hash_table_size / 32;
0340     memset(hash_table, 0, sizeof(hash_table));
0341 
0342     /* Build the MAC Hash Table register values */
0343     netdev_for_each_uc_addr(ha, netdev) {
0344         crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
0345         crc >>= hash_table_shift;
0346         hash_table[crc >> 5] |= (1 << (crc & 0x1f));
0347     }
0348 
0349     netdev_for_each_mc_addr(ha, netdev) {
0350         crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
0351         crc >>= hash_table_shift;
0352         hash_table[crc >> 5] |= (1 << (crc & 0x1f));
0353     }
0354 
0355     /* Set the MAC Hash Table registers */
0356     hash_reg = MAC_HTR0;
0357     for (i = 0; i < hash_table_count; i++) {
0358         writel(hash_table[i], pdata->mac_regs + hash_reg);
0359         hash_reg += MAC_HTR_INC;
0360     }
0361 }
0362 
0363 static int xlgmac_add_mac_addresses(struct xlgmac_pdata *pdata)
0364 {
0365     if (pdata->hw_feat.hash_table_size)
0366         xlgmac_set_mac_hash_table(pdata);
0367     else
0368         xlgmac_set_mac_addn_addrs(pdata);
0369 
0370     return 0;
0371 }
0372 
0373 static void xlgmac_config_mac_address(struct xlgmac_pdata *pdata)
0374 {
0375     u32 regval;
0376 
0377     xlgmac_set_mac_address(pdata, pdata->netdev->dev_addr);
0378 
0379     /* Filtering is done using perfect filtering and hash filtering */
0380     if (pdata->hw_feat.hash_table_size) {
0381         regval = readl(pdata->mac_regs + MAC_PFR);
0382         regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HPF_POS,
0383                          MAC_PFR_HPF_LEN, 1);
0384         regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HUC_POS,
0385                          MAC_PFR_HUC_LEN, 1);
0386         regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HMC_POS,
0387                          MAC_PFR_HMC_LEN, 1);
0388         writel(regval, pdata->mac_regs + MAC_PFR);
0389     }
0390 }
0391 
0392 static void xlgmac_config_jumbo_enable(struct xlgmac_pdata *pdata)
0393 {
0394     unsigned int val;
0395     u32 regval;
0396 
0397     val = (pdata->netdev->mtu > XLGMAC_STD_PACKET_MTU) ? 1 : 0;
0398 
0399     regval = readl(pdata->mac_regs + MAC_RCR);
0400     regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_JE_POS,
0401                      MAC_RCR_JE_LEN, val);
0402     writel(regval, pdata->mac_regs + MAC_RCR);
0403 }
0404 
0405 static void xlgmac_config_checksum_offload(struct xlgmac_pdata *pdata)
0406 {
0407     if (pdata->netdev->features & NETIF_F_RXCSUM)
0408         xlgmac_enable_rx_csum(pdata);
0409     else
0410         xlgmac_disable_rx_csum(pdata);
0411 }
0412 
0413 static void xlgmac_config_vlan_support(struct xlgmac_pdata *pdata)
0414 {
0415     u32 regval;
0416 
0417     regval = readl(pdata->mac_regs + MAC_VLANIR);
0418     /* Indicate that VLAN Tx CTAGs come from context descriptors */
0419     regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_CSVL_POS,
0420                      MAC_VLANIR_CSVL_LEN, 0);
0421     regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLTI_POS,
0422                      MAC_VLANIR_VLTI_LEN, 1);
0423     writel(regval, pdata->mac_regs + MAC_VLANIR);
0424 
0425     /* Set the current VLAN Hash Table register value */
0426     xlgmac_update_vlan_hash_table(pdata);
0427 
0428     if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
0429         xlgmac_enable_rx_vlan_filtering(pdata);
0430     else
0431         xlgmac_disable_rx_vlan_filtering(pdata);
0432 
0433     if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
0434         xlgmac_enable_rx_vlan_stripping(pdata);
0435     else
0436         xlgmac_disable_rx_vlan_stripping(pdata);
0437 }
0438 
0439 static int xlgmac_config_rx_mode(struct xlgmac_pdata *pdata)
0440 {
0441     struct net_device *netdev = pdata->netdev;
0442     unsigned int pr_mode, am_mode;
0443 
0444     pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
0445     am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
0446 
0447     xlgmac_set_promiscuous_mode(pdata, pr_mode);
0448     xlgmac_set_all_multicast_mode(pdata, am_mode);
0449 
0450     xlgmac_add_mac_addresses(pdata);
0451 
0452     return 0;
0453 }
0454 
0455 static void xlgmac_prepare_tx_stop(struct xlgmac_pdata *pdata,
0456                    struct xlgmac_channel *channel)
0457 {
0458     unsigned int tx_dsr, tx_pos, tx_qidx;
0459     unsigned long tx_timeout;
0460     unsigned int tx_status;
0461 
0462     /* Calculate the status register to read and the position within */
0463     if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
0464         tx_dsr = DMA_DSR0;
0465         tx_pos = (channel->queue_index * DMA_DSR_Q_LEN) +
0466              DMA_DSR0_TPS_START;
0467     } else {
0468         tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
0469 
0470         tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
0471         tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_LEN) +
0472              DMA_DSRX_TPS_START;
0473     }
0474 
0475     /* The Tx engine cannot be stopped if it is actively processing
0476      * descriptors. Wait for the Tx engine to enter the stopped or
0477      * suspended state.  Don't wait forever though...
0478      */
0479     tx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ);
0480     while (time_before(jiffies, tx_timeout)) {
0481         tx_status = readl(pdata->mac_regs + tx_dsr);
0482         tx_status = XLGMAC_GET_REG_BITS(tx_status, tx_pos,
0483                         DMA_DSR_TPS_LEN);
0484         if ((tx_status == DMA_TPS_STOPPED) ||
0485             (tx_status == DMA_TPS_SUSPENDED))
0486             break;
0487 
0488         usleep_range(500, 1000);
0489     }
0490 
0491     if (!time_before(jiffies, tx_timeout))
0492         netdev_info(pdata->netdev,
0493                 "timed out waiting for Tx DMA channel %u to stop\n",
0494                 channel->queue_index);
0495 }
0496 
0497 static void xlgmac_enable_tx(struct xlgmac_pdata *pdata)
0498 {
0499     struct xlgmac_channel *channel;
0500     unsigned int i;
0501     u32 regval;
0502 
0503     /* Enable each Tx DMA channel */
0504     channel = pdata->channel_head;
0505     for (i = 0; i < pdata->channel_count; i++, channel++) {
0506         if (!channel->tx_ring)
0507             break;
0508 
0509         regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
0510         regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS,
0511                          DMA_CH_TCR_ST_LEN, 1);
0512         writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
0513     }
0514 
0515     /* Enable each Tx queue */
0516     for (i = 0; i < pdata->tx_q_count; i++) {
0517         regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
0518         regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS,
0519                          MTL_Q_TQOMR_TXQEN_LEN,
0520                     MTL_Q_ENABLED);
0521         writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
0522     }
0523 
0524     /* Enable MAC Tx */
0525     regval = readl(pdata->mac_regs + MAC_TCR);
0526     regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS,
0527                      MAC_TCR_TE_LEN, 1);
0528     writel(regval, pdata->mac_regs + MAC_TCR);
0529 }
0530 
0531 static void xlgmac_disable_tx(struct xlgmac_pdata *pdata)
0532 {
0533     struct xlgmac_channel *channel;
0534     unsigned int i;
0535     u32 regval;
0536 
0537     /* Prepare for Tx DMA channel stop */
0538     channel = pdata->channel_head;
0539     for (i = 0; i < pdata->channel_count; i++, channel++) {
0540         if (!channel->tx_ring)
0541             break;
0542 
0543         xlgmac_prepare_tx_stop(pdata, channel);
0544     }
0545 
0546     /* Disable MAC Tx */
0547     regval = readl(pdata->mac_regs + MAC_TCR);
0548     regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS,
0549                      MAC_TCR_TE_LEN, 0);
0550     writel(regval, pdata->mac_regs + MAC_TCR);
0551 
0552     /* Disable each Tx queue */
0553     for (i = 0; i < pdata->tx_q_count; i++) {
0554         regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
0555         regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS,
0556                          MTL_Q_TQOMR_TXQEN_LEN, 0);
0557         writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
0558     }
0559 
0560     /* Disable each Tx DMA channel */
0561     channel = pdata->channel_head;
0562     for (i = 0; i < pdata->channel_count; i++, channel++) {
0563         if (!channel->tx_ring)
0564             break;
0565 
0566         regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
0567         regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS,
0568                          DMA_CH_TCR_ST_LEN, 0);
0569         writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
0570     }
0571 }
0572 
0573 static void xlgmac_prepare_rx_stop(struct xlgmac_pdata *pdata,
0574                    unsigned int queue)
0575 {
0576     unsigned int rx_status, prxq, rxqsts;
0577     unsigned long rx_timeout;
0578 
0579     /* The Rx engine cannot be stopped if it is actively processing
0580      * packets. Wait for the Rx queue to empty the Rx fifo.  Don't
0581      * wait forever though...
0582      */
0583     rx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ);
0584     while (time_before(jiffies, rx_timeout)) {
0585         rx_status = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR));
0586         prxq = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_PRXQ_POS,
0587                        MTL_Q_RQDR_PRXQ_LEN);
0588         rxqsts = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_RXQSTS_POS,
0589                          MTL_Q_RQDR_RXQSTS_LEN);
0590         if ((prxq == 0) && (rxqsts == 0))
0591             break;
0592 
0593         usleep_range(500, 1000);
0594     }
0595 
0596     if (!time_before(jiffies, rx_timeout))
0597         netdev_info(pdata->netdev,
0598                 "timed out waiting for Rx queue %u to empty\n",
0599                 queue);
0600 }
0601 
0602 static void xlgmac_enable_rx(struct xlgmac_pdata *pdata)
0603 {
0604     struct xlgmac_channel *channel;
0605     unsigned int regval, i;
0606 
0607     /* Enable each Rx DMA channel */
0608     channel = pdata->channel_head;
0609     for (i = 0; i < pdata->channel_count; i++, channel++) {
0610         if (!channel->rx_ring)
0611             break;
0612 
0613         regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
0614         regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS,
0615                          DMA_CH_RCR_SR_LEN, 1);
0616         writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
0617     }
0618 
0619     /* Enable each Rx queue */
0620     regval = 0;
0621     for (i = 0; i < pdata->rx_q_count; i++)
0622         regval |= (0x02 << (i << 1));
0623     writel(regval, pdata->mac_regs + MAC_RQC0R);
0624 
0625     /* Enable MAC Rx */
0626     regval = readl(pdata->mac_regs + MAC_RCR);
0627     regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS,
0628                      MAC_RCR_DCRCC_LEN, 1);
0629     regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS,
0630                      MAC_RCR_CST_LEN, 1);
0631     regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS,
0632                      MAC_RCR_ACS_LEN, 1);
0633     regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS,
0634                      MAC_RCR_RE_LEN, 1);
0635     writel(regval, pdata->mac_regs + MAC_RCR);
0636 }
0637 
0638 static void xlgmac_disable_rx(struct xlgmac_pdata *pdata)
0639 {
0640     struct xlgmac_channel *channel;
0641     unsigned int i;
0642     u32 regval;
0643 
0644     /* Disable MAC Rx */
0645     regval = readl(pdata->mac_regs + MAC_RCR);
0646     regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS,
0647                      MAC_RCR_DCRCC_LEN, 0);
0648     regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS,
0649                      MAC_RCR_CST_LEN, 0);
0650     regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS,
0651                      MAC_RCR_ACS_LEN, 0);
0652     regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS,
0653                      MAC_RCR_RE_LEN, 0);
0654     writel(regval, pdata->mac_regs + MAC_RCR);
0655 
0656     /* Prepare for Rx DMA channel stop */
0657     for (i = 0; i < pdata->rx_q_count; i++)
0658         xlgmac_prepare_rx_stop(pdata, i);
0659 
0660     /* Disable each Rx queue */
0661     writel(0, pdata->mac_regs + MAC_RQC0R);
0662 
0663     /* Disable each Rx DMA channel */
0664     channel = pdata->channel_head;
0665     for (i = 0; i < pdata->channel_count; i++, channel++) {
0666         if (!channel->rx_ring)
0667             break;
0668 
0669         regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
0670         regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS,
0671                          DMA_CH_RCR_SR_LEN, 0);
0672         writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
0673     }
0674 }
0675 
0676 static void xlgmac_tx_start_xmit(struct xlgmac_channel *channel,
0677                  struct xlgmac_ring *ring)
0678 {
0679     struct xlgmac_pdata *pdata = channel->pdata;
0680     struct xlgmac_desc_data *desc_data;
0681 
0682     /* Make sure everything is written before the register write */
0683     wmb();
0684 
0685     /* Issue a poll command to Tx DMA by writing address
0686      * of next immediate free descriptor
0687      */
0688     desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
0689     writel(lower_32_bits(desc_data->dma_desc_addr),
0690            XLGMAC_DMA_REG(channel, DMA_CH_TDTR_LO));
0691 
0692     /* Start the Tx timer */
0693     if (pdata->tx_usecs && !channel->tx_timer_active) {
0694         channel->tx_timer_active = 1;
0695         mod_timer(&channel->tx_timer,
0696               jiffies + usecs_to_jiffies(pdata->tx_usecs));
0697     }
0698 
0699     ring->tx.xmit_more = 0;
0700 }
0701 
0702 static void xlgmac_dev_xmit(struct xlgmac_channel *channel)
0703 {
0704     struct xlgmac_pdata *pdata = channel->pdata;
0705     struct xlgmac_ring *ring = channel->tx_ring;
0706     unsigned int tso_context, vlan_context;
0707     struct xlgmac_desc_data *desc_data;
0708     struct xlgmac_dma_desc *dma_desc;
0709     struct xlgmac_pkt_info *pkt_info;
0710     unsigned int csum, tso, vlan;
0711     int start_index = ring->cur;
0712     int cur_index = ring->cur;
0713     unsigned int tx_set_ic;
0714     int i;
0715 
0716     pkt_info = &ring->pkt_info;
0717     csum = XLGMAC_GET_REG_BITS(pkt_info->attributes,
0718                    TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
0719                 TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN);
0720     tso = XLGMAC_GET_REG_BITS(pkt_info->attributes,
0721                   TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
0722                 TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN);
0723     vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes,
0724                    TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
0725                 TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN);
0726 
0727     if (tso && (pkt_info->mss != ring->tx.cur_mss))
0728         tso_context = 1;
0729     else
0730         tso_context = 0;
0731 
0732     if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag))
0733         vlan_context = 1;
0734     else
0735         vlan_context = 0;
0736 
0737     /* Determine if an interrupt should be generated for this Tx:
0738      *   Interrupt:
0739      *     - Tx frame count exceeds the frame count setting
0740      *     - Addition of Tx frame count to the frame count since the
0741      *       last interrupt was set exceeds the frame count setting
0742      *   No interrupt:
0743      *     - No frame count setting specified (ethtool -C ethX tx-frames 0)
0744      *     - Addition of Tx frame count to the frame count since the
0745      *       last interrupt was set does not exceed the frame count setting
0746      */
0747     ring->coalesce_count += pkt_info->tx_packets;
0748     if (!pdata->tx_frames)
0749         tx_set_ic = 0;
0750     else if (pkt_info->tx_packets > pdata->tx_frames)
0751         tx_set_ic = 1;
0752     else if ((ring->coalesce_count % pdata->tx_frames) <
0753          pkt_info->tx_packets)
0754         tx_set_ic = 1;
0755     else
0756         tx_set_ic = 0;
0757 
0758     desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
0759     dma_desc = desc_data->dma_desc;
0760 
0761     /* Create a context descriptor if this is a TSO pkt_info */
0762     if (tso_context || vlan_context) {
0763         if (tso_context) {
0764             netif_dbg(pdata, tx_queued, pdata->netdev,
0765                   "TSO context descriptor, mss=%u\n",
0766                   pkt_info->mss);
0767 
0768             /* Set the MSS size */
0769             dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
0770                         dma_desc->desc2,
0771                         TX_CONTEXT_DESC2_MSS_POS,
0772                         TX_CONTEXT_DESC2_MSS_LEN,
0773                         pkt_info->mss);
0774 
0775             /* Mark it as a CONTEXT descriptor */
0776             dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0777                         dma_desc->desc3,
0778                         TX_CONTEXT_DESC3_CTXT_POS,
0779                         TX_CONTEXT_DESC3_CTXT_LEN,
0780                         1);
0781 
0782             /* Indicate this descriptor contains the MSS */
0783             dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0784                         dma_desc->desc3,
0785                         TX_CONTEXT_DESC3_TCMSSV_POS,
0786                         TX_CONTEXT_DESC3_TCMSSV_LEN,
0787                         1);
0788 
0789             ring->tx.cur_mss = pkt_info->mss;
0790         }
0791 
0792         if (vlan_context) {
0793             netif_dbg(pdata, tx_queued, pdata->netdev,
0794                   "VLAN context descriptor, ctag=%u\n",
0795                   pkt_info->vlan_ctag);
0796 
0797             /* Mark it as a CONTEXT descriptor */
0798             dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0799                         dma_desc->desc3,
0800                         TX_CONTEXT_DESC3_CTXT_POS,
0801                         TX_CONTEXT_DESC3_CTXT_LEN,
0802                         1);
0803 
0804             /* Set the VLAN tag */
0805             dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0806                         dma_desc->desc3,
0807                         TX_CONTEXT_DESC3_VT_POS,
0808                         TX_CONTEXT_DESC3_VT_LEN,
0809                         pkt_info->vlan_ctag);
0810 
0811             /* Indicate this descriptor contains the VLAN tag */
0812             dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0813                         dma_desc->desc3,
0814                         TX_CONTEXT_DESC3_VLTV_POS,
0815                         TX_CONTEXT_DESC3_VLTV_LEN,
0816                         1);
0817 
0818             ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag;
0819         }
0820 
0821         cur_index++;
0822         desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
0823         dma_desc = desc_data->dma_desc;
0824     }
0825 
0826     /* Update buffer address (for TSO this is the header) */
0827     dma_desc->desc0 =  cpu_to_le32(lower_32_bits(desc_data->skb_dma));
0828     dma_desc->desc1 =  cpu_to_le32(upper_32_bits(desc_data->skb_dma));
0829 
0830     /* Update the buffer length */
0831     dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
0832                 dma_desc->desc2,
0833                 TX_NORMAL_DESC2_HL_B1L_POS,
0834                 TX_NORMAL_DESC2_HL_B1L_LEN,
0835                 desc_data->skb_dma_len);
0836 
0837     /* VLAN tag insertion check */
0838     if (vlan) {
0839         dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
0840                     dma_desc->desc2,
0841                     TX_NORMAL_DESC2_VTIR_POS,
0842                     TX_NORMAL_DESC2_VTIR_LEN,
0843                     TX_NORMAL_DESC2_VLAN_INSERT);
0844         pdata->stats.tx_vlan_packets++;
0845     }
0846 
0847     /* Timestamp enablement check */
0848     if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
0849                 TX_PACKET_ATTRIBUTES_PTP_POS,
0850                 TX_PACKET_ATTRIBUTES_PTP_LEN))
0851         dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
0852                     dma_desc->desc2,
0853                     TX_NORMAL_DESC2_TTSE_POS,
0854                     TX_NORMAL_DESC2_TTSE_LEN,
0855                     1);
0856 
0857     /* Mark it as First Descriptor */
0858     dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0859                 dma_desc->desc3,
0860                 TX_NORMAL_DESC3_FD_POS,
0861                 TX_NORMAL_DESC3_FD_LEN,
0862                 1);
0863 
0864     /* Mark it as a NORMAL descriptor */
0865     dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0866                 dma_desc->desc3,
0867                 TX_NORMAL_DESC3_CTXT_POS,
0868                 TX_NORMAL_DESC3_CTXT_LEN,
0869                 0);
0870 
0871     /* Set OWN bit if not the first descriptor */
0872     if (cur_index != start_index)
0873         dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0874                     dma_desc->desc3,
0875                     TX_NORMAL_DESC3_OWN_POS,
0876                     TX_NORMAL_DESC3_OWN_LEN,
0877                     1);
0878 
0879     if (tso) {
0880         /* Enable TSO */
0881         dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0882                     dma_desc->desc3,
0883                     TX_NORMAL_DESC3_TSE_POS,
0884                     TX_NORMAL_DESC3_TSE_LEN, 1);
0885         dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0886                     dma_desc->desc3,
0887                     TX_NORMAL_DESC3_TCPPL_POS,
0888                     TX_NORMAL_DESC3_TCPPL_LEN,
0889                     pkt_info->tcp_payload_len);
0890         dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0891                     dma_desc->desc3,
0892                     TX_NORMAL_DESC3_TCPHDRLEN_POS,
0893                     TX_NORMAL_DESC3_TCPHDRLEN_LEN,
0894                     pkt_info->tcp_header_len / 4);
0895 
0896         pdata->stats.tx_tso_packets++;
0897     } else {
0898         /* Enable CRC and Pad Insertion */
0899         dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0900                     dma_desc->desc3,
0901                     TX_NORMAL_DESC3_CPC_POS,
0902                     TX_NORMAL_DESC3_CPC_LEN, 0);
0903 
0904         /* Enable HW CSUM */
0905         if (csum)
0906             dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0907                         dma_desc->desc3,
0908                         TX_NORMAL_DESC3_CIC_POS,
0909                         TX_NORMAL_DESC3_CIC_LEN,
0910                         0x3);
0911 
0912         /* Set the total length to be transmitted */
0913         dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0914                     dma_desc->desc3,
0915                     TX_NORMAL_DESC3_FL_POS,
0916                     TX_NORMAL_DESC3_FL_LEN,
0917                     pkt_info->length);
0918     }
0919 
0920     for (i = cur_index - start_index + 1; i < pkt_info->desc_count; i++) {
0921         cur_index++;
0922         desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
0923         dma_desc = desc_data->dma_desc;
0924 
0925         /* Update buffer address */
0926         dma_desc->desc0 =
0927             cpu_to_le32(lower_32_bits(desc_data->skb_dma));
0928         dma_desc->desc1 =
0929             cpu_to_le32(upper_32_bits(desc_data->skb_dma));
0930 
0931         /* Update the buffer length */
0932         dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
0933                     dma_desc->desc2,
0934                     TX_NORMAL_DESC2_HL_B1L_POS,
0935                     TX_NORMAL_DESC2_HL_B1L_LEN,
0936                     desc_data->skb_dma_len);
0937 
0938         /* Set OWN bit */
0939         dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0940                     dma_desc->desc3,
0941                     TX_NORMAL_DESC3_OWN_POS,
0942                     TX_NORMAL_DESC3_OWN_LEN, 1);
0943 
0944         /* Mark it as NORMAL descriptor */
0945         dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0946                     dma_desc->desc3,
0947                     TX_NORMAL_DESC3_CTXT_POS,
0948                     TX_NORMAL_DESC3_CTXT_LEN, 0);
0949 
0950         /* Enable HW CSUM */
0951         if (csum)
0952             dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0953                         dma_desc->desc3,
0954                         TX_NORMAL_DESC3_CIC_POS,
0955                         TX_NORMAL_DESC3_CIC_LEN,
0956                         0x3);
0957     }
0958 
0959     /* Set LAST bit for the last descriptor */
0960     dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0961                 dma_desc->desc3,
0962                 TX_NORMAL_DESC3_LD_POS,
0963                 TX_NORMAL_DESC3_LD_LEN, 1);
0964 
0965     /* Set IC bit based on Tx coalescing settings */
0966     if (tx_set_ic)
0967         dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE(
0968                     dma_desc->desc2,
0969                     TX_NORMAL_DESC2_IC_POS,
0970                     TX_NORMAL_DESC2_IC_LEN, 1);
0971 
0972     /* Save the Tx info to report back during cleanup */
0973     desc_data->tx.packets = pkt_info->tx_packets;
0974     desc_data->tx.bytes = pkt_info->tx_bytes;
0975 
0976     /* In case the Tx DMA engine is running, make sure everything
0977      * is written to the descriptor(s) before setting the OWN bit
0978      * for the first descriptor
0979      */
0980     dma_wmb();
0981 
0982     /* Set OWN bit for the first descriptor */
0983     desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
0984     dma_desc = desc_data->dma_desc;
0985     dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
0986                 dma_desc->desc3,
0987                 TX_NORMAL_DESC3_OWN_POS,
0988                 TX_NORMAL_DESC3_OWN_LEN, 1);
0989 
0990     if (netif_msg_tx_queued(pdata))
0991         xlgmac_dump_tx_desc(pdata, ring, start_index,
0992                     pkt_info->desc_count, 1);
0993 
0994     /* Make sure ownership is written to the descriptor */
0995     smp_wmb();
0996 
0997     ring->cur = cur_index + 1;
0998     if (!netdev_xmit_more() ||
0999         netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
1000                            channel->queue_index)))
1001         xlgmac_tx_start_xmit(channel, ring);
1002     else
1003         ring->tx.xmit_more = 1;
1004 
1005     XLGMAC_PR("%s: descriptors %u to %u written\n",
1006           channel->name, start_index & (ring->dma_desc_count - 1),
1007           (ring->cur - 1) & (ring->dma_desc_count - 1));
1008 }
1009 
1010 static void xlgmac_get_rx_tstamp(struct xlgmac_pkt_info *pkt_info,
1011                  struct xlgmac_dma_desc *dma_desc)
1012 {
1013     u32 tsa, tsd;
1014     u64 nsec;
1015 
1016     tsa = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
1017                      RX_CONTEXT_DESC3_TSA_POS,
1018                 RX_CONTEXT_DESC3_TSA_LEN);
1019     tsd = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
1020                      RX_CONTEXT_DESC3_TSD_POS,
1021                 RX_CONTEXT_DESC3_TSD_LEN);
1022     if (tsa && !tsd) {
1023         nsec = le32_to_cpu(dma_desc->desc1);
1024         nsec <<= 32;
1025         nsec |= le32_to_cpu(dma_desc->desc0);
1026         if (nsec != 0xffffffffffffffffULL) {
1027             pkt_info->rx_tstamp = nsec;
1028             pkt_info->attributes = XLGMAC_SET_REG_BITS(
1029                     pkt_info->attributes,
1030                     RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS,
1031                     RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN,
1032                     1);
1033         }
1034     }
1035 }
1036 
1037 static void xlgmac_tx_desc_reset(struct xlgmac_desc_data *desc_data)
1038 {
1039     struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc;
1040 
1041     /* Reset the Tx descriptor
1042      *   Set buffer 1 (lo) address to zero
1043      *   Set buffer 1 (hi) address to zero
1044      *   Reset all other control bits (IC, TTSE, B2L & B1L)
1045      *   Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
1046      */
1047     dma_desc->desc0 = 0;
1048     dma_desc->desc1 = 0;
1049     dma_desc->desc2 = 0;
1050     dma_desc->desc3 = 0;
1051 
1052     /* Make sure ownership is written to the descriptor */
1053     dma_wmb();
1054 }
1055 
1056 static void xlgmac_tx_desc_init(struct xlgmac_channel *channel)
1057 {
1058     struct xlgmac_ring *ring = channel->tx_ring;
1059     struct xlgmac_desc_data *desc_data;
1060     int start_index = ring->cur;
1061     int i;
1062 
1063     /* Initialze all descriptors */
1064     for (i = 0; i < ring->dma_desc_count; i++) {
1065         desc_data = XLGMAC_GET_DESC_DATA(ring, i);
1066 
1067         /* Initialize Tx descriptor */
1068         xlgmac_tx_desc_reset(desc_data);
1069     }
1070 
1071     /* Update the total number of Tx descriptors */
1072     writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_TDRLR));
1073 
1074     /* Update the starting address of descriptor ring */
1075     desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
1076     writel(upper_32_bits(desc_data->dma_desc_addr),
1077            XLGMAC_DMA_REG(channel, DMA_CH_TDLR_HI));
1078     writel(lower_32_bits(desc_data->dma_desc_addr),
1079            XLGMAC_DMA_REG(channel, DMA_CH_TDLR_LO));
1080 }
1081 
1082 static void xlgmac_rx_desc_reset(struct xlgmac_pdata *pdata,
1083                  struct xlgmac_desc_data *desc_data,
1084                  unsigned int index)
1085 {
1086     struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc;
1087     unsigned int rx_frames = pdata->rx_frames;
1088     unsigned int rx_usecs = pdata->rx_usecs;
1089     dma_addr_t hdr_dma, buf_dma;
1090     unsigned int inte;
1091 
1092     if (!rx_usecs && !rx_frames) {
1093         /* No coalescing, interrupt for every descriptor */
1094         inte = 1;
1095     } else {
1096         /* Set interrupt based on Rx frame coalescing setting */
1097         if (rx_frames && !((index + 1) % rx_frames))
1098             inte = 1;
1099         else
1100             inte = 0;
1101     }
1102 
1103     /* Reset the Rx descriptor
1104      *   Set buffer 1 (lo) address to header dma address (lo)
1105      *   Set buffer 1 (hi) address to header dma address (hi)
1106      *   Set buffer 2 (lo) address to buffer dma address (lo)
1107      *   Set buffer 2 (hi) address to buffer dma address (hi) and
1108      *     set control bits OWN and INTE
1109      */
1110     hdr_dma = desc_data->rx.hdr.dma_base + desc_data->rx.hdr.dma_off;
1111     buf_dma = desc_data->rx.buf.dma_base + desc_data->rx.buf.dma_off;
1112     dma_desc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
1113     dma_desc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
1114     dma_desc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
1115     dma_desc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
1116 
1117     dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
1118                 dma_desc->desc3,
1119                 RX_NORMAL_DESC3_INTE_POS,
1120                 RX_NORMAL_DESC3_INTE_LEN,
1121                 inte);
1122 
1123     /* Since the Rx DMA engine is likely running, make sure everything
1124      * is written to the descriptor(s) before setting the OWN bit
1125      * for the descriptor
1126      */
1127     dma_wmb();
1128 
1129     dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE(
1130                 dma_desc->desc3,
1131                 RX_NORMAL_DESC3_OWN_POS,
1132                 RX_NORMAL_DESC3_OWN_LEN,
1133                 1);
1134 
1135     /* Make sure ownership is written to the descriptor */
1136     dma_wmb();
1137 }
1138 
1139 static void xlgmac_rx_desc_init(struct xlgmac_channel *channel)
1140 {
1141     struct xlgmac_pdata *pdata = channel->pdata;
1142     struct xlgmac_ring *ring = channel->rx_ring;
1143     unsigned int start_index = ring->cur;
1144     struct xlgmac_desc_data *desc_data;
1145     unsigned int i;
1146 
1147     /* Initialize all descriptors */
1148     for (i = 0; i < ring->dma_desc_count; i++) {
1149         desc_data = XLGMAC_GET_DESC_DATA(ring, i);
1150 
1151         /* Initialize Rx descriptor */
1152         xlgmac_rx_desc_reset(pdata, desc_data, i);
1153     }
1154 
1155     /* Update the total number of Rx descriptors */
1156     writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_RDRLR));
1157 
1158     /* Update the starting address of descriptor ring */
1159     desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
1160     writel(upper_32_bits(desc_data->dma_desc_addr),
1161            XLGMAC_DMA_REG(channel, DMA_CH_RDLR_HI));
1162     writel(lower_32_bits(desc_data->dma_desc_addr),
1163            XLGMAC_DMA_REG(channel, DMA_CH_RDLR_LO));
1164 
1165     /* Update the Rx Descriptor Tail Pointer */
1166     desc_data = XLGMAC_GET_DESC_DATA(ring, start_index +
1167                       ring->dma_desc_count - 1);
1168     writel(lower_32_bits(desc_data->dma_desc_addr),
1169            XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
1170 }
1171 
1172 static int xlgmac_is_context_desc(struct xlgmac_dma_desc *dma_desc)
1173 {
1174     /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1175     return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
1176                 TX_NORMAL_DESC3_CTXT_POS,
1177                 TX_NORMAL_DESC3_CTXT_LEN);
1178 }
1179 
1180 static int xlgmac_is_last_desc(struct xlgmac_dma_desc *dma_desc)
1181 {
1182     /* Rx and Tx share LD bit, so check TDES3.LD bit */
1183     return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
1184                 TX_NORMAL_DESC3_LD_POS,
1185                 TX_NORMAL_DESC3_LD_LEN);
1186 }
1187 
1188 static int xlgmac_disable_tx_flow_control(struct xlgmac_pdata *pdata)
1189 {
1190     unsigned int max_q_count, q_count;
1191     unsigned int reg, regval;
1192     unsigned int i;
1193 
1194     /* Clear MTL flow control */
1195     for (i = 0; i < pdata->rx_q_count; i++) {
1196         regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1197         regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS,
1198                          MTL_Q_RQOMR_EHFC_LEN, 0);
1199         writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1200     }
1201 
1202     /* Clear MAC flow control */
1203     max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES;
1204     q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
1205     reg = MAC_Q0TFCR;
1206     for (i = 0; i < q_count; i++) {
1207         regval = readl(pdata->mac_regs + reg);
1208         regval = XLGMAC_SET_REG_BITS(regval,
1209                          MAC_Q0TFCR_TFE_POS,
1210                     MAC_Q0TFCR_TFE_LEN,
1211                     0);
1212         writel(regval, pdata->mac_regs + reg);
1213 
1214         reg += MAC_QTFCR_INC;
1215     }
1216 
1217     return 0;
1218 }
1219 
1220 static int xlgmac_enable_tx_flow_control(struct xlgmac_pdata *pdata)
1221 {
1222     unsigned int max_q_count, q_count;
1223     unsigned int reg, regval;
1224     unsigned int i;
1225 
1226     /* Set MTL flow control */
1227     for (i = 0; i < pdata->rx_q_count; i++) {
1228         regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1229         regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS,
1230                          MTL_Q_RQOMR_EHFC_LEN, 1);
1231         writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1232     }
1233 
1234     /* Set MAC flow control */
1235     max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES;
1236     q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
1237     reg = MAC_Q0TFCR;
1238     for (i = 0; i < q_count; i++) {
1239         regval = readl(pdata->mac_regs + reg);
1240 
1241         /* Enable transmit flow control */
1242         regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS,
1243                          MAC_Q0TFCR_TFE_LEN, 1);
1244         /* Set pause time */
1245         regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_PT_POS,
1246                          MAC_Q0TFCR_PT_LEN, 0xffff);
1247 
1248         writel(regval, pdata->mac_regs + reg);
1249 
1250         reg += MAC_QTFCR_INC;
1251     }
1252 
1253     return 0;
1254 }
1255 
1256 static int xlgmac_disable_rx_flow_control(struct xlgmac_pdata *pdata)
1257 {
1258     u32 regval;
1259 
1260     regval = readl(pdata->mac_regs + MAC_RFCR);
1261     regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS,
1262                      MAC_RFCR_RFE_LEN, 0);
1263     writel(regval, pdata->mac_regs + MAC_RFCR);
1264 
1265     return 0;
1266 }
1267 
1268 static int xlgmac_enable_rx_flow_control(struct xlgmac_pdata *pdata)
1269 {
1270     u32 regval;
1271 
1272     regval = readl(pdata->mac_regs + MAC_RFCR);
1273     regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS,
1274                      MAC_RFCR_RFE_LEN, 1);
1275     writel(regval, pdata->mac_regs + MAC_RFCR);
1276 
1277     return 0;
1278 }
1279 
1280 static int xlgmac_config_tx_flow_control(struct xlgmac_pdata *pdata)
1281 {
1282     if (pdata->tx_pause)
1283         xlgmac_enable_tx_flow_control(pdata);
1284     else
1285         xlgmac_disable_tx_flow_control(pdata);
1286 
1287     return 0;
1288 }
1289 
1290 static int xlgmac_config_rx_flow_control(struct xlgmac_pdata *pdata)
1291 {
1292     if (pdata->rx_pause)
1293         xlgmac_enable_rx_flow_control(pdata);
1294     else
1295         xlgmac_disable_rx_flow_control(pdata);
1296 
1297     return 0;
1298 }
1299 
1300 static int xlgmac_config_rx_coalesce(struct xlgmac_pdata *pdata)
1301 {
1302     struct xlgmac_channel *channel;
1303     unsigned int i;
1304     u32 regval;
1305 
1306     channel = pdata->channel_head;
1307     for (i = 0; i < pdata->channel_count; i++, channel++) {
1308         if (!channel->rx_ring)
1309             break;
1310 
1311         regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RIWT));
1312         regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RIWT_RWT_POS,
1313                          DMA_CH_RIWT_RWT_LEN,
1314                          pdata->rx_riwt);
1315         writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RIWT));
1316     }
1317 
1318     return 0;
1319 }
1320 
1321 static void xlgmac_config_flow_control(struct xlgmac_pdata *pdata)
1322 {
1323     xlgmac_config_tx_flow_control(pdata);
1324     xlgmac_config_rx_flow_control(pdata);
1325 }
1326 
1327 static void xlgmac_config_rx_fep_enable(struct xlgmac_pdata *pdata)
1328 {
1329     unsigned int i;
1330     u32 regval;
1331 
1332     for (i = 0; i < pdata->rx_q_count; i++) {
1333         regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1334         regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FEP_POS,
1335                          MTL_Q_RQOMR_FEP_LEN, 1);
1336         writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1337     }
1338 }
1339 
1340 static void xlgmac_config_rx_fup_enable(struct xlgmac_pdata *pdata)
1341 {
1342     unsigned int i;
1343     u32 regval;
1344 
1345     for (i = 0; i < pdata->rx_q_count; i++) {
1346         regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1347         regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FUP_POS,
1348                          MTL_Q_RQOMR_FUP_LEN, 1);
1349         writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1350     }
1351 }
1352 
1353 static int xlgmac_config_tx_coalesce(struct xlgmac_pdata *pdata)
1354 {
1355     return 0;
1356 }
1357 
1358 static void xlgmac_config_rx_buffer_size(struct xlgmac_pdata *pdata)
1359 {
1360     struct xlgmac_channel *channel;
1361     unsigned int i;
1362     u32 regval;
1363 
1364     channel = pdata->channel_head;
1365     for (i = 0; i < pdata->channel_count; i++, channel++) {
1366         if (!channel->rx_ring)
1367             break;
1368 
1369         regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
1370         regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_RBSZ_POS,
1371                          DMA_CH_RCR_RBSZ_LEN,
1372                     pdata->rx_buf_size);
1373         writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
1374     }
1375 }
1376 
1377 static void xlgmac_config_tso_mode(struct xlgmac_pdata *pdata)
1378 {
1379     struct xlgmac_channel *channel;
1380     unsigned int i;
1381     u32 regval;
1382 
1383     channel = pdata->channel_head;
1384     for (i = 0; i < pdata->channel_count; i++, channel++) {
1385         if (!channel->tx_ring)
1386             break;
1387 
1388         if (pdata->hw_feat.tso) {
1389             regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
1390             regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS,
1391                              DMA_CH_TCR_TSE_LEN, 1);
1392             writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
1393         }
1394     }
1395 }
1396 
1397 static void xlgmac_config_sph_mode(struct xlgmac_pdata *pdata)
1398 {
1399     struct xlgmac_channel *channel;
1400     unsigned int i;
1401     u32 regval;
1402 
1403     channel = pdata->channel_head;
1404     for (i = 0; i < pdata->channel_count; i++, channel++) {
1405         if (!channel->rx_ring)
1406             break;
1407 
1408         regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR));
1409         regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_SPH_POS,
1410                          DMA_CH_CR_SPH_LEN, 1);
1411         writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR));
1412     }
1413 
1414     regval = readl(pdata->mac_regs + MAC_RCR);
1415     regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_HDSMS_POS,
1416                      MAC_RCR_HDSMS_LEN,
1417                 XLGMAC_SPH_HDSMS_SIZE);
1418     writel(regval, pdata->mac_regs + MAC_RCR);
1419 }
1420 
1421 static unsigned int xlgmac_usec_to_riwt(struct xlgmac_pdata *pdata,
1422                     unsigned int usec)
1423 {
1424     unsigned long rate;
1425     unsigned int ret;
1426 
1427     rate = pdata->sysclk_rate;
1428 
1429     /* Convert the input usec value to the watchdog timer value. Each
1430      * watchdog timer value is equivalent to 256 clock cycles.
1431      * Calculate the required value as:
1432      *   ( usec * ( system_clock_mhz / 10^6 ) / 256
1433      */
1434     ret = (usec * (rate / 1000000)) / 256;
1435 
1436     return ret;
1437 }
1438 
1439 static unsigned int xlgmac_riwt_to_usec(struct xlgmac_pdata *pdata,
1440                     unsigned int riwt)
1441 {
1442     unsigned long rate;
1443     unsigned int ret;
1444 
1445     rate = pdata->sysclk_rate;
1446 
1447     /* Convert the input watchdog timer value to the usec value. Each
1448      * watchdog timer value is equivalent to 256 clock cycles.
1449      * Calculate the required value as:
1450      *   ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
1451      */
1452     ret = (riwt * 256) / (rate / 1000000);
1453 
1454     return ret;
1455 }
1456 
1457 static int xlgmac_config_rx_threshold(struct xlgmac_pdata *pdata,
1458                       unsigned int val)
1459 {
1460     unsigned int i;
1461     u32 regval;
1462 
1463     for (i = 0; i < pdata->rx_q_count; i++) {
1464         regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1465         regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RTC_POS,
1466                          MTL_Q_RQOMR_RTC_LEN, val);
1467         writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1468     }
1469 
1470     return 0;
1471 }
1472 
1473 static void xlgmac_config_mtl_mode(struct xlgmac_pdata *pdata)
1474 {
1475     unsigned int i;
1476     u32 regval;
1477 
1478     /* Set Tx to weighted round robin scheduling algorithm */
1479     regval = readl(pdata->mac_regs + MTL_OMR);
1480     regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_ETSALG_POS,
1481                      MTL_OMR_ETSALG_LEN, MTL_ETSALG_WRR);
1482     writel(regval, pdata->mac_regs + MTL_OMR);
1483 
1484     /* Set Tx traffic classes to use WRR algorithm with equal weights */
1485     for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
1486         regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR));
1487         regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_ETSCR_TSA_POS,
1488                          MTL_TC_ETSCR_TSA_LEN, MTL_TSA_ETS);
1489         writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR));
1490 
1491         regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR));
1492         regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_QWR_QW_POS,
1493                          MTL_TC_QWR_QW_LEN, 1);
1494         writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR));
1495     }
1496 
1497     /* Set Rx to strict priority algorithm */
1498     regval = readl(pdata->mac_regs + MTL_OMR);
1499     regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_RAA_POS,
1500                      MTL_OMR_RAA_LEN, MTL_RAA_SP);
1501     writel(regval, pdata->mac_regs + MTL_OMR);
1502 }
1503 
1504 static void xlgmac_config_queue_mapping(struct xlgmac_pdata *pdata)
1505 {
1506     unsigned int ppq, ppq_extra, prio, prio_queues;
1507     unsigned int qptc, qptc_extra, queue;
1508     unsigned int reg, regval;
1509     unsigned int mask;
1510     unsigned int i, j;
1511 
1512     /* Map the MTL Tx Queues to Traffic Classes
1513      *   Note: Tx Queues >= Traffic Classes
1514      */
1515     qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
1516     qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
1517 
1518     for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
1519         for (j = 0; j < qptc; j++) {
1520             netif_dbg(pdata, drv, pdata->netdev,
1521                   "TXq%u mapped to TC%u\n", queue, i);
1522             regval = readl(XLGMAC_MTL_REG(pdata, queue,
1523                               MTL_Q_TQOMR));
1524             regval = XLGMAC_SET_REG_BITS(regval,
1525                              MTL_Q_TQOMR_Q2TCMAP_POS,
1526                              MTL_Q_TQOMR_Q2TCMAP_LEN,
1527                              i);
1528             writel(regval, XLGMAC_MTL_REG(pdata, queue,
1529                               MTL_Q_TQOMR));
1530             queue++;
1531         }
1532 
1533         if (i < qptc_extra) {
1534             netif_dbg(pdata, drv, pdata->netdev,
1535                   "TXq%u mapped to TC%u\n", queue, i);
1536             regval = readl(XLGMAC_MTL_REG(pdata, queue,
1537                               MTL_Q_TQOMR));
1538             regval = XLGMAC_SET_REG_BITS(regval,
1539                              MTL_Q_TQOMR_Q2TCMAP_POS,
1540                              MTL_Q_TQOMR_Q2TCMAP_LEN,
1541                              i);
1542             writel(regval, XLGMAC_MTL_REG(pdata, queue,
1543                               MTL_Q_TQOMR));
1544             queue++;
1545         }
1546     }
1547 
1548     /* Map the 8 VLAN priority values to available MTL Rx queues */
1549     prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
1550                 pdata->rx_q_count);
1551     ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
1552     ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
1553 
1554     reg = MAC_RQC2R;
1555     regval = 0;
1556     for (i = 0, prio = 0; i < prio_queues;) {
1557         mask = 0;
1558         for (j = 0; j < ppq; j++) {
1559             netif_dbg(pdata, drv, pdata->netdev,
1560                   "PRIO%u mapped to RXq%u\n", prio, i);
1561             mask |= (1 << prio);
1562             prio++;
1563         }
1564 
1565         if (i < ppq_extra) {
1566             netif_dbg(pdata, drv, pdata->netdev,
1567                   "PRIO%u mapped to RXq%u\n", prio, i);
1568             mask |= (1 << prio);
1569             prio++;
1570         }
1571 
1572         regval |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
1573 
1574         if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
1575             continue;
1576 
1577         writel(regval, pdata->mac_regs + reg);
1578         reg += MAC_RQC2_INC;
1579         regval = 0;
1580     }
1581 
1582     /* Configure one to one, MTL Rx queue to DMA Rx channel mapping
1583      *  ie Q0 <--> CH0, Q1 <--> CH1 ... Q11 <--> CH11
1584      */
1585     reg = MTL_RQDCM0R;
1586     regval = readl(pdata->mac_regs + reg);
1587     regval |= (MTL_RQDCM0R_Q0MDMACH | MTL_RQDCM0R_Q1MDMACH |
1588             MTL_RQDCM0R_Q2MDMACH | MTL_RQDCM0R_Q3MDMACH);
1589     writel(regval, pdata->mac_regs + reg);
1590 
1591     reg += MTL_RQDCM_INC;
1592     regval = readl(pdata->mac_regs + reg);
1593     regval |= (MTL_RQDCM1R_Q4MDMACH | MTL_RQDCM1R_Q5MDMACH |
1594             MTL_RQDCM1R_Q6MDMACH | MTL_RQDCM1R_Q7MDMACH);
1595     writel(regval, pdata->mac_regs + reg);
1596 
1597     reg += MTL_RQDCM_INC;
1598     regval = readl(pdata->mac_regs + reg);
1599     regval |= (MTL_RQDCM2R_Q8MDMACH | MTL_RQDCM2R_Q9MDMACH |
1600             MTL_RQDCM2R_Q10MDMACH | MTL_RQDCM2R_Q11MDMACH);
1601     writel(regval, pdata->mac_regs + reg);
1602 }
1603 
1604 static unsigned int xlgmac_calculate_per_queue_fifo(
1605                     unsigned int fifo_size,
1606                     unsigned int queue_count)
1607 {
1608     unsigned int q_fifo_size;
1609     unsigned int p_fifo;
1610 
1611     /* Calculate the configured fifo size */
1612     q_fifo_size = 1 << (fifo_size + 7);
1613 
1614     /* The configured value may not be the actual amount of fifo RAM */
1615     q_fifo_size = min_t(unsigned int, XLGMAC_MAX_FIFO, q_fifo_size);
1616 
1617     q_fifo_size = q_fifo_size / queue_count;
1618 
1619     /* Each increment in the queue fifo size represents 256 bytes of
1620      * fifo, with 0 representing 256 bytes. Distribute the fifo equally
1621      * between the queues.
1622      */
1623     p_fifo = q_fifo_size / 256;
1624     if (p_fifo)
1625         p_fifo--;
1626 
1627     return p_fifo;
1628 }
1629 
1630 static void xlgmac_config_tx_fifo_size(struct xlgmac_pdata *pdata)
1631 {
1632     unsigned int fifo_size;
1633     unsigned int i;
1634     u32 regval;
1635 
1636     fifo_size = xlgmac_calculate_per_queue_fifo(
1637                 pdata->hw_feat.tx_fifo_size,
1638                 pdata->tx_q_count);
1639 
1640     for (i = 0; i < pdata->tx_q_count; i++) {
1641         regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
1642         regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TQS_POS,
1643                          MTL_Q_TQOMR_TQS_LEN, fifo_size);
1644         writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
1645     }
1646 
1647     netif_info(pdata, drv, pdata->netdev,
1648            "%d Tx hardware queues, %d byte fifo per queue\n",
1649            pdata->tx_q_count, ((fifo_size + 1) * 256));
1650 }
1651 
1652 static void xlgmac_config_rx_fifo_size(struct xlgmac_pdata *pdata)
1653 {
1654     unsigned int fifo_size;
1655     unsigned int i;
1656     u32 regval;
1657 
1658     fifo_size = xlgmac_calculate_per_queue_fifo(
1659                     pdata->hw_feat.rx_fifo_size,
1660                     pdata->rx_q_count);
1661 
1662     for (i = 0; i < pdata->rx_q_count; i++) {
1663         regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1664         regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RQS_POS,
1665                          MTL_Q_RQOMR_RQS_LEN, fifo_size);
1666         writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1667     }
1668 
1669     netif_info(pdata, drv, pdata->netdev,
1670            "%d Rx hardware queues, %d byte fifo per queue\n",
1671            pdata->rx_q_count, ((fifo_size + 1) * 256));
1672 }
1673 
1674 static void xlgmac_config_flow_control_threshold(struct xlgmac_pdata *pdata)
1675 {
1676     unsigned int i;
1677     u32 regval;
1678 
1679     for (i = 0; i < pdata->rx_q_count; i++) {
1680         regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR));
1681         /* Activate flow control when less than 4k left in fifo */
1682         regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFA_POS,
1683                          MTL_Q_RQFCR_RFA_LEN, 2);
1684         /* De-activate flow control when more than 6k left in fifo */
1685         regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFD_POS,
1686                          MTL_Q_RQFCR_RFD_LEN, 4);
1687         writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR));
1688     }
1689 }
1690 
1691 static int xlgmac_config_tx_threshold(struct xlgmac_pdata *pdata,
1692                       unsigned int val)
1693 {
1694     unsigned int i;
1695     u32 regval;
1696 
1697     for (i = 0; i < pdata->tx_q_count; i++) {
1698         regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
1699         regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TTC_POS,
1700                          MTL_Q_TQOMR_TTC_LEN, val);
1701         writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
1702     }
1703 
1704     return 0;
1705 }
1706 
1707 static int xlgmac_config_rsf_mode(struct xlgmac_pdata *pdata,
1708                   unsigned int val)
1709 {
1710     unsigned int i;
1711     u32 regval;
1712 
1713     for (i = 0; i < pdata->rx_q_count; i++) {
1714         regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1715         regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RSF_POS,
1716                          MTL_Q_RQOMR_RSF_LEN, val);
1717         writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR));
1718     }
1719 
1720     return 0;
1721 }
1722 
1723 static int xlgmac_config_tsf_mode(struct xlgmac_pdata *pdata,
1724                   unsigned int val)
1725 {
1726     unsigned int i;
1727     u32 regval;
1728 
1729     for (i = 0; i < pdata->tx_q_count; i++) {
1730         regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
1731         regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TSF_POS,
1732                          MTL_Q_TQOMR_TSF_LEN, val);
1733         writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
1734     }
1735 
1736     return 0;
1737 }
1738 
1739 static int xlgmac_config_osp_mode(struct xlgmac_pdata *pdata)
1740 {
1741     struct xlgmac_channel *channel;
1742     unsigned int i;
1743     u32 regval;
1744 
1745     channel = pdata->channel_head;
1746     for (i = 0; i < pdata->channel_count; i++, channel++) {
1747         if (!channel->tx_ring)
1748             break;
1749 
1750         regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
1751         regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_OSP_POS,
1752                          DMA_CH_TCR_OSP_LEN,
1753                     pdata->tx_osp_mode);
1754         writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
1755     }
1756 
1757     return 0;
1758 }
1759 
1760 static int xlgmac_config_pblx8(struct xlgmac_pdata *pdata)
1761 {
1762     struct xlgmac_channel *channel;
1763     unsigned int i;
1764     u32 regval;
1765 
1766     channel = pdata->channel_head;
1767     for (i = 0; i < pdata->channel_count; i++, channel++) {
1768         regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR));
1769         regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_PBLX8_POS,
1770                          DMA_CH_CR_PBLX8_LEN,
1771                     pdata->pblx8);
1772         writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR));
1773     }
1774 
1775     return 0;
1776 }
1777 
1778 static int xlgmac_get_tx_pbl_val(struct xlgmac_pdata *pdata)
1779 {
1780     u32 regval;
1781 
1782     regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_TCR));
1783     regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_TCR_PBL_POS,
1784                      DMA_CH_TCR_PBL_LEN);
1785     return regval;
1786 }
1787 
1788 static int xlgmac_config_tx_pbl_val(struct xlgmac_pdata *pdata)
1789 {
1790     struct xlgmac_channel *channel;
1791     unsigned int i;
1792     u32 regval;
1793 
1794     channel = pdata->channel_head;
1795     for (i = 0; i < pdata->channel_count; i++, channel++) {
1796         if (!channel->tx_ring)
1797             break;
1798 
1799         regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR));
1800         regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_PBL_POS,
1801                          DMA_CH_TCR_PBL_LEN,
1802                     pdata->tx_pbl);
1803         writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR));
1804     }
1805 
1806     return 0;
1807 }
1808 
1809 static int xlgmac_get_rx_pbl_val(struct xlgmac_pdata *pdata)
1810 {
1811     u32 regval;
1812 
1813     regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_RCR));
1814     regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_RCR_PBL_POS,
1815                      DMA_CH_RCR_PBL_LEN);
1816     return regval;
1817 }
1818 
1819 static int xlgmac_config_rx_pbl_val(struct xlgmac_pdata *pdata)
1820 {
1821     struct xlgmac_channel *channel;
1822     unsigned int i;
1823     u32 regval;
1824 
1825     channel = pdata->channel_head;
1826     for (i = 0; i < pdata->channel_count; i++, channel++) {
1827         if (!channel->rx_ring)
1828             break;
1829 
1830         regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR));
1831         regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_PBL_POS,
1832                          DMA_CH_RCR_PBL_LEN,
1833                     pdata->rx_pbl);
1834         writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR));
1835     }
1836 
1837     return 0;
1838 }
1839 
1840 static u64 xlgmac_mmc_read(struct xlgmac_pdata *pdata, unsigned int reg_lo)
1841 {
1842     bool read_hi;
1843     u64 val;
1844 
1845     switch (reg_lo) {
1846     /* These registers are always 64 bit */
1847     case MMC_TXOCTETCOUNT_GB_LO:
1848     case MMC_TXOCTETCOUNT_G_LO:
1849     case MMC_RXOCTETCOUNT_GB_LO:
1850     case MMC_RXOCTETCOUNT_G_LO:
1851         read_hi = true;
1852         break;
1853 
1854     default:
1855         read_hi = false;
1856     }
1857 
1858     val = (u64)readl(pdata->mac_regs + reg_lo);
1859 
1860     if (read_hi)
1861         val |= ((u64)readl(pdata->mac_regs + reg_lo + 4) << 32);
1862 
1863     return val;
1864 }
1865 
1866 static void xlgmac_tx_mmc_int(struct xlgmac_pdata *pdata)
1867 {
1868     unsigned int mmc_isr = readl(pdata->mac_regs + MMC_TISR);
1869     struct xlgmac_stats *stats = &pdata->stats;
1870 
1871     if (XLGMAC_GET_REG_BITS(mmc_isr,
1872                 MMC_TISR_TXOCTETCOUNT_GB_POS,
1873                 MMC_TISR_TXOCTETCOUNT_GB_LEN))
1874         stats->txoctetcount_gb +=
1875             xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
1876 
1877     if (XLGMAC_GET_REG_BITS(mmc_isr,
1878                 MMC_TISR_TXFRAMECOUNT_GB_POS,
1879                 MMC_TISR_TXFRAMECOUNT_GB_LEN))
1880         stats->txframecount_gb +=
1881             xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
1882 
1883     if (XLGMAC_GET_REG_BITS(mmc_isr,
1884                 MMC_TISR_TXBROADCASTFRAMES_G_POS,
1885                 MMC_TISR_TXBROADCASTFRAMES_G_LEN))
1886         stats->txbroadcastframes_g +=
1887             xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
1888 
1889     if (XLGMAC_GET_REG_BITS(mmc_isr,
1890                 MMC_TISR_TXMULTICASTFRAMES_G_POS,
1891                 MMC_TISR_TXMULTICASTFRAMES_G_LEN))
1892         stats->txmulticastframes_g +=
1893             xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
1894 
1895     if (XLGMAC_GET_REG_BITS(mmc_isr,
1896                 MMC_TISR_TX64OCTETS_GB_POS,
1897                 MMC_TISR_TX64OCTETS_GB_LEN))
1898         stats->tx64octets_gb +=
1899             xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
1900 
1901     if (XLGMAC_GET_REG_BITS(mmc_isr,
1902                 MMC_TISR_TX65TO127OCTETS_GB_POS,
1903                 MMC_TISR_TX65TO127OCTETS_GB_LEN))
1904         stats->tx65to127octets_gb +=
1905             xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
1906 
1907     if (XLGMAC_GET_REG_BITS(mmc_isr,
1908                 MMC_TISR_TX128TO255OCTETS_GB_POS,
1909                 MMC_TISR_TX128TO255OCTETS_GB_LEN))
1910         stats->tx128to255octets_gb +=
1911             xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
1912 
1913     if (XLGMAC_GET_REG_BITS(mmc_isr,
1914                 MMC_TISR_TX256TO511OCTETS_GB_POS,
1915                 MMC_TISR_TX256TO511OCTETS_GB_LEN))
1916         stats->tx256to511octets_gb +=
1917             xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
1918 
1919     if (XLGMAC_GET_REG_BITS(mmc_isr,
1920                 MMC_TISR_TX512TO1023OCTETS_GB_POS,
1921                 MMC_TISR_TX512TO1023OCTETS_GB_LEN))
1922         stats->tx512to1023octets_gb +=
1923             xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
1924 
1925     if (XLGMAC_GET_REG_BITS(mmc_isr,
1926                 MMC_TISR_TX1024TOMAXOCTETS_GB_POS,
1927                 MMC_TISR_TX1024TOMAXOCTETS_GB_LEN))
1928         stats->tx1024tomaxoctets_gb +=
1929             xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
1930 
1931     if (XLGMAC_GET_REG_BITS(mmc_isr,
1932                 MMC_TISR_TXUNICASTFRAMES_GB_POS,
1933                 MMC_TISR_TXUNICASTFRAMES_GB_LEN))
1934         stats->txunicastframes_gb +=
1935             xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
1936 
1937     if (XLGMAC_GET_REG_BITS(mmc_isr,
1938                 MMC_TISR_TXMULTICASTFRAMES_GB_POS,
1939                 MMC_TISR_TXMULTICASTFRAMES_GB_LEN))
1940         stats->txmulticastframes_gb +=
1941             xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
1942 
1943     if (XLGMAC_GET_REG_BITS(mmc_isr,
1944                 MMC_TISR_TXBROADCASTFRAMES_GB_POS,
1945                 MMC_TISR_TXBROADCASTFRAMES_GB_LEN))
1946         stats->txbroadcastframes_g +=
1947             xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
1948 
1949     if (XLGMAC_GET_REG_BITS(mmc_isr,
1950                 MMC_TISR_TXUNDERFLOWERROR_POS,
1951                 MMC_TISR_TXUNDERFLOWERROR_LEN))
1952         stats->txunderflowerror +=
1953             xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
1954 
1955     if (XLGMAC_GET_REG_BITS(mmc_isr,
1956                 MMC_TISR_TXOCTETCOUNT_G_POS,
1957                 MMC_TISR_TXOCTETCOUNT_G_LEN))
1958         stats->txoctetcount_g +=
1959             xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
1960 
1961     if (XLGMAC_GET_REG_BITS(mmc_isr,
1962                 MMC_TISR_TXFRAMECOUNT_G_POS,
1963                 MMC_TISR_TXFRAMECOUNT_G_LEN))
1964         stats->txframecount_g +=
1965             xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
1966 
1967     if (XLGMAC_GET_REG_BITS(mmc_isr,
1968                 MMC_TISR_TXPAUSEFRAMES_POS,
1969                 MMC_TISR_TXPAUSEFRAMES_LEN))
1970         stats->txpauseframes +=
1971             xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
1972 
1973     if (XLGMAC_GET_REG_BITS(mmc_isr,
1974                 MMC_TISR_TXVLANFRAMES_G_POS,
1975                 MMC_TISR_TXVLANFRAMES_G_LEN))
1976         stats->txvlanframes_g +=
1977             xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
1978 }
1979 
1980 static void xlgmac_rx_mmc_int(struct xlgmac_pdata *pdata)
1981 {
1982     unsigned int mmc_isr = readl(pdata->mac_regs + MMC_RISR);
1983     struct xlgmac_stats *stats = &pdata->stats;
1984 
1985     if (XLGMAC_GET_REG_BITS(mmc_isr,
1986                 MMC_RISR_RXFRAMECOUNT_GB_POS,
1987                 MMC_RISR_RXFRAMECOUNT_GB_LEN))
1988         stats->rxframecount_gb +=
1989             xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
1990 
1991     if (XLGMAC_GET_REG_BITS(mmc_isr,
1992                 MMC_RISR_RXOCTETCOUNT_GB_POS,
1993                 MMC_RISR_RXOCTETCOUNT_GB_LEN))
1994         stats->rxoctetcount_gb +=
1995             xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
1996 
1997     if (XLGMAC_GET_REG_BITS(mmc_isr,
1998                 MMC_RISR_RXOCTETCOUNT_G_POS,
1999                 MMC_RISR_RXOCTETCOUNT_G_LEN))
2000         stats->rxoctetcount_g +=
2001             xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2002 
2003     if (XLGMAC_GET_REG_BITS(mmc_isr,
2004                 MMC_RISR_RXBROADCASTFRAMES_G_POS,
2005                 MMC_RISR_RXBROADCASTFRAMES_G_LEN))
2006         stats->rxbroadcastframes_g +=
2007             xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2008 
2009     if (XLGMAC_GET_REG_BITS(mmc_isr,
2010                 MMC_RISR_RXMULTICASTFRAMES_G_POS,
2011                 MMC_RISR_RXMULTICASTFRAMES_G_LEN))
2012         stats->rxmulticastframes_g +=
2013             xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2014 
2015     if (XLGMAC_GET_REG_BITS(mmc_isr,
2016                 MMC_RISR_RXCRCERROR_POS,
2017                 MMC_RISR_RXCRCERROR_LEN))
2018         stats->rxcrcerror +=
2019             xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO);
2020 
2021     if (XLGMAC_GET_REG_BITS(mmc_isr,
2022                 MMC_RISR_RXRUNTERROR_POS,
2023                 MMC_RISR_RXRUNTERROR_LEN))
2024         stats->rxrunterror +=
2025             xlgmac_mmc_read(pdata, MMC_RXRUNTERROR);
2026 
2027     if (XLGMAC_GET_REG_BITS(mmc_isr,
2028                 MMC_RISR_RXJABBERERROR_POS,
2029                 MMC_RISR_RXJABBERERROR_LEN))
2030         stats->rxjabbererror +=
2031             xlgmac_mmc_read(pdata, MMC_RXJABBERERROR);
2032 
2033     if (XLGMAC_GET_REG_BITS(mmc_isr,
2034                 MMC_RISR_RXUNDERSIZE_G_POS,
2035                 MMC_RISR_RXUNDERSIZE_G_LEN))
2036         stats->rxundersize_g +=
2037             xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2038 
2039     if (XLGMAC_GET_REG_BITS(mmc_isr,
2040                 MMC_RISR_RXOVERSIZE_G_POS,
2041                 MMC_RISR_RXOVERSIZE_G_LEN))
2042         stats->rxoversize_g +=
2043             xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G);
2044 
2045     if (XLGMAC_GET_REG_BITS(mmc_isr,
2046                 MMC_RISR_RX64OCTETS_GB_POS,
2047                 MMC_RISR_RX64OCTETS_GB_LEN))
2048         stats->rx64octets_gb +=
2049             xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2050 
2051     if (XLGMAC_GET_REG_BITS(mmc_isr,
2052                 MMC_RISR_RX65TO127OCTETS_GB_POS,
2053                 MMC_RISR_RX65TO127OCTETS_GB_LEN))
2054         stats->rx65to127octets_gb +=
2055             xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2056 
2057     if (XLGMAC_GET_REG_BITS(mmc_isr,
2058                 MMC_RISR_RX128TO255OCTETS_GB_POS,
2059                 MMC_RISR_RX128TO255OCTETS_GB_LEN))
2060         stats->rx128to255octets_gb +=
2061             xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2062 
2063     if (XLGMAC_GET_REG_BITS(mmc_isr,
2064                 MMC_RISR_RX256TO511OCTETS_GB_POS,
2065                 MMC_RISR_RX256TO511OCTETS_GB_LEN))
2066         stats->rx256to511octets_gb +=
2067             xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2068 
2069     if (XLGMAC_GET_REG_BITS(mmc_isr,
2070                 MMC_RISR_RX512TO1023OCTETS_GB_POS,
2071                 MMC_RISR_RX512TO1023OCTETS_GB_LEN))
2072         stats->rx512to1023octets_gb +=
2073             xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2074 
2075     if (XLGMAC_GET_REG_BITS(mmc_isr,
2076                 MMC_RISR_RX1024TOMAXOCTETS_GB_POS,
2077                 MMC_RISR_RX1024TOMAXOCTETS_GB_LEN))
2078         stats->rx1024tomaxoctets_gb +=
2079             xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2080 
2081     if (XLGMAC_GET_REG_BITS(mmc_isr,
2082                 MMC_RISR_RXUNICASTFRAMES_G_POS,
2083                 MMC_RISR_RXUNICASTFRAMES_G_LEN))
2084         stats->rxunicastframes_g +=
2085             xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2086 
2087     if (XLGMAC_GET_REG_BITS(mmc_isr,
2088                 MMC_RISR_RXLENGTHERROR_POS,
2089                 MMC_RISR_RXLENGTHERROR_LEN))
2090         stats->rxlengtherror +=
2091             xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2092 
2093     if (XLGMAC_GET_REG_BITS(mmc_isr,
2094                 MMC_RISR_RXOUTOFRANGETYPE_POS,
2095                 MMC_RISR_RXOUTOFRANGETYPE_LEN))
2096         stats->rxoutofrangetype +=
2097             xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2098 
2099     if (XLGMAC_GET_REG_BITS(mmc_isr,
2100                 MMC_RISR_RXPAUSEFRAMES_POS,
2101                 MMC_RISR_RXPAUSEFRAMES_LEN))
2102         stats->rxpauseframes +=
2103             xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2104 
2105     if (XLGMAC_GET_REG_BITS(mmc_isr,
2106                 MMC_RISR_RXFIFOOVERFLOW_POS,
2107                 MMC_RISR_RXFIFOOVERFLOW_LEN))
2108         stats->rxfifooverflow +=
2109             xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2110 
2111     if (XLGMAC_GET_REG_BITS(mmc_isr,
2112                 MMC_RISR_RXVLANFRAMES_GB_POS,
2113                 MMC_RISR_RXVLANFRAMES_GB_LEN))
2114         stats->rxvlanframes_gb +=
2115             xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2116 
2117     if (XLGMAC_GET_REG_BITS(mmc_isr,
2118                 MMC_RISR_RXWATCHDOGERROR_POS,
2119                 MMC_RISR_RXWATCHDOGERROR_LEN))
2120         stats->rxwatchdogerror +=
2121             xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2122 }
2123 
2124 static void xlgmac_read_mmc_stats(struct xlgmac_pdata *pdata)
2125 {
2126     struct xlgmac_stats *stats = &pdata->stats;
2127     u32 regval;
2128 
2129     /* Freeze counters */
2130     regval = readl(pdata->mac_regs + MMC_CR);
2131     regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS,
2132                      MMC_CR_MCF_LEN, 1);
2133     writel(regval, pdata->mac_regs + MMC_CR);
2134 
2135     stats->txoctetcount_gb +=
2136         xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2137 
2138     stats->txframecount_gb +=
2139         xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2140 
2141     stats->txbroadcastframes_g +=
2142         xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2143 
2144     stats->txmulticastframes_g +=
2145         xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2146 
2147     stats->tx64octets_gb +=
2148         xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2149 
2150     stats->tx65to127octets_gb +=
2151         xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2152 
2153     stats->tx128to255octets_gb +=
2154         xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2155 
2156     stats->tx256to511octets_gb +=
2157         xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2158 
2159     stats->tx512to1023octets_gb +=
2160         xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2161 
2162     stats->tx1024tomaxoctets_gb +=
2163         xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2164 
2165     stats->txunicastframes_gb +=
2166         xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2167 
2168     stats->txmulticastframes_gb +=
2169         xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2170 
2171     stats->txbroadcastframes_g +=
2172         xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2173 
2174     stats->txunderflowerror +=
2175         xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2176 
2177     stats->txoctetcount_g +=
2178         xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2179 
2180     stats->txframecount_g +=
2181         xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2182 
2183     stats->txpauseframes +=
2184         xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2185 
2186     stats->txvlanframes_g +=
2187         xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2188 
2189     stats->rxframecount_gb +=
2190         xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2191 
2192     stats->rxoctetcount_gb +=
2193         xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2194 
2195     stats->rxoctetcount_g +=
2196         xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2197 
2198     stats->rxbroadcastframes_g +=
2199         xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2200 
2201     stats->rxmulticastframes_g +=
2202         xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2203 
2204     stats->rxcrcerror +=
2205         xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO);
2206 
2207     stats->rxrunterror +=
2208         xlgmac_mmc_read(pdata, MMC_RXRUNTERROR);
2209 
2210     stats->rxjabbererror +=
2211         xlgmac_mmc_read(pdata, MMC_RXJABBERERROR);
2212 
2213     stats->rxundersize_g +=
2214         xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2215 
2216     stats->rxoversize_g +=
2217         xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G);
2218 
2219     stats->rx64octets_gb +=
2220         xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2221 
2222     stats->rx65to127octets_gb +=
2223         xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2224 
2225     stats->rx128to255octets_gb +=
2226         xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2227 
2228     stats->rx256to511octets_gb +=
2229         xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2230 
2231     stats->rx512to1023octets_gb +=
2232         xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2233 
2234     stats->rx1024tomaxoctets_gb +=
2235         xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2236 
2237     stats->rxunicastframes_g +=
2238         xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2239 
2240     stats->rxlengtherror +=
2241         xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2242 
2243     stats->rxoutofrangetype +=
2244         xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2245 
2246     stats->rxpauseframes +=
2247         xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2248 
2249     stats->rxfifooverflow +=
2250         xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2251 
2252     stats->rxvlanframes_gb +=
2253         xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2254 
2255     stats->rxwatchdogerror +=
2256         xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2257 
2258     /* Un-freeze counters */
2259     regval = readl(pdata->mac_regs + MMC_CR);
2260     regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS,
2261                      MMC_CR_MCF_LEN, 0);
2262     writel(regval, pdata->mac_regs + MMC_CR);
2263 }
2264 
2265 static void xlgmac_config_mmc(struct xlgmac_pdata *pdata)
2266 {
2267     u32 regval;
2268 
2269     regval = readl(pdata->mac_regs + MMC_CR);
2270     /* Set counters to reset on read */
2271     regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_ROR_POS,
2272                      MMC_CR_ROR_LEN, 1);
2273     /* Reset the counters */
2274     regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_CR_POS,
2275                      MMC_CR_CR_LEN, 1);
2276     writel(regval, pdata->mac_regs + MMC_CR);
2277 }
2278 
2279 static int xlgmac_write_rss_reg(struct xlgmac_pdata *pdata, unsigned int type,
2280                 unsigned int index, unsigned int val)
2281 {
2282     unsigned int wait;
2283     int ret = 0;
2284     u32 regval;
2285 
2286     mutex_lock(&pdata->rss_mutex);
2287 
2288     regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR),
2289                      MAC_RSSAR_OB_POS, MAC_RSSAR_OB_LEN);
2290     if (regval) {
2291         ret = -EBUSY;
2292         goto unlock;
2293     }
2294 
2295     writel(val, pdata->mac_regs + MAC_RSSDR);
2296 
2297     regval = readl(pdata->mac_regs + MAC_RSSAR);
2298     regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_RSSIA_POS,
2299                      MAC_RSSAR_RSSIA_LEN, index);
2300     regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_ADDRT_POS,
2301                      MAC_RSSAR_ADDRT_LEN, type);
2302     regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_CT_POS,
2303                      MAC_RSSAR_CT_LEN, 0);
2304     regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_OB_POS,
2305                      MAC_RSSAR_OB_LEN, 1);
2306     writel(regval, pdata->mac_regs + MAC_RSSAR);
2307 
2308     wait = 1000;
2309     while (wait--) {
2310         regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR),
2311                          MAC_RSSAR_OB_POS,
2312                          MAC_RSSAR_OB_LEN);
2313         if (!regval)
2314             goto unlock;
2315 
2316         usleep_range(1000, 1500);
2317     }
2318 
2319     ret = -EBUSY;
2320 
2321 unlock:
2322     mutex_unlock(&pdata->rss_mutex);
2323 
2324     return ret;
2325 }
2326 
2327 static int xlgmac_write_rss_hash_key(struct xlgmac_pdata *pdata)
2328 {
2329     unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
2330     unsigned int *key = (unsigned int *)&pdata->rss_key;
2331     int ret;
2332 
2333     while (key_regs--) {
2334         ret = xlgmac_write_rss_reg(pdata, XLGMAC_RSS_HASH_KEY_TYPE,
2335                        key_regs, *key++);
2336         if (ret)
2337             return ret;
2338     }
2339 
2340     return 0;
2341 }
2342 
2343 static int xlgmac_write_rss_lookup_table(struct xlgmac_pdata *pdata)
2344 {
2345     unsigned int i;
2346     int ret;
2347 
2348     for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
2349         ret = xlgmac_write_rss_reg(pdata,
2350                        XLGMAC_RSS_LOOKUP_TABLE_TYPE, i,
2351                        pdata->rss_table[i]);
2352         if (ret)
2353             return ret;
2354     }
2355 
2356     return 0;
2357 }
2358 
2359 static int xlgmac_set_rss_hash_key(struct xlgmac_pdata *pdata, const u8 *key)
2360 {
2361     memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
2362 
2363     return xlgmac_write_rss_hash_key(pdata);
2364 }
2365 
2366 static int xlgmac_set_rss_lookup_table(struct xlgmac_pdata *pdata,
2367                        const u32 *table)
2368 {
2369     unsigned int i;
2370     u32 tval;
2371 
2372     for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
2373         tval = table[i];
2374         pdata->rss_table[i] = XLGMAC_SET_REG_BITS(
2375                         pdata->rss_table[i],
2376                         MAC_RSSDR_DMCH_POS,
2377                         MAC_RSSDR_DMCH_LEN,
2378                         tval);
2379     }
2380 
2381     return xlgmac_write_rss_lookup_table(pdata);
2382 }
2383 
2384 static int xlgmac_enable_rss(struct xlgmac_pdata *pdata)
2385 {
2386     u32 regval;
2387     int ret;
2388 
2389     if (!pdata->hw_feat.rss)
2390         return -EOPNOTSUPP;
2391 
2392     /* Program the hash key */
2393     ret = xlgmac_write_rss_hash_key(pdata);
2394     if (ret)
2395         return ret;
2396 
2397     /* Program the lookup table */
2398     ret = xlgmac_write_rss_lookup_table(pdata);
2399     if (ret)
2400         return ret;
2401 
2402     /* Set the RSS options */
2403     writel(pdata->rss_options, pdata->mac_regs + MAC_RSSCR);
2404 
2405     /* Enable RSS */
2406     regval = readl(pdata->mac_regs + MAC_RSSCR);
2407     regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS,
2408                      MAC_RSSCR_RSSE_LEN, 1);
2409     writel(regval, pdata->mac_regs + MAC_RSSCR);
2410 
2411     return 0;
2412 }
2413 
2414 static int xlgmac_disable_rss(struct xlgmac_pdata *pdata)
2415 {
2416     u32 regval;
2417 
2418     if (!pdata->hw_feat.rss)
2419         return -EOPNOTSUPP;
2420 
2421     regval = readl(pdata->mac_regs + MAC_RSSCR);
2422     regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS,
2423                      MAC_RSSCR_RSSE_LEN, 0);
2424     writel(regval, pdata->mac_regs + MAC_RSSCR);
2425 
2426     return 0;
2427 }
2428 
2429 static void xlgmac_config_rss(struct xlgmac_pdata *pdata)
2430 {
2431     int ret;
2432 
2433     if (!pdata->hw_feat.rss)
2434         return;
2435 
2436     if (pdata->netdev->features & NETIF_F_RXHASH)
2437         ret = xlgmac_enable_rss(pdata);
2438     else
2439         ret = xlgmac_disable_rss(pdata);
2440 
2441     if (ret)
2442         netdev_err(pdata->netdev,
2443                "error configuring RSS, RSS disabled\n");
2444 }
2445 
2446 static void xlgmac_enable_dma_interrupts(struct xlgmac_pdata *pdata)
2447 {
2448     unsigned int dma_ch_isr, dma_ch_ier;
2449     struct xlgmac_channel *channel;
2450     unsigned int i;
2451 
2452     channel = pdata->channel_head;
2453     for (i = 0; i < pdata->channel_count; i++, channel++) {
2454         /* Clear all the interrupts which are set */
2455         dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
2456         writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
2457 
2458         /* Clear all interrupt enable bits */
2459         dma_ch_ier = 0;
2460 
2461         /* Enable following interrupts
2462          *   NIE  - Normal Interrupt Summary Enable
2463          *   AIE  - Abnormal Interrupt Summary Enable
2464          *   FBEE - Fatal Bus Error Enable
2465          */
2466         dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
2467                          DMA_CH_IER_NIE_POS,
2468                     DMA_CH_IER_NIE_LEN, 1);
2469         dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
2470                          DMA_CH_IER_AIE_POS,
2471                     DMA_CH_IER_AIE_LEN, 1);
2472         dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier,
2473                          DMA_CH_IER_FBEE_POS,
2474                     DMA_CH_IER_FBEE_LEN, 1);
2475 
2476         if (channel->tx_ring) {
2477             /* Enable the following Tx interrupts
2478              *   TIE  - Transmit Interrupt Enable (unless using
2479              *          per channel interrupts)
2480              */
2481             if (!pdata->per_channel_irq)
2482                 dma_ch_ier = XLGMAC_SET_REG_BITS(
2483                         dma_ch_ier,
2484                         DMA_CH_IER_TIE_POS,
2485                         DMA_CH_IER_TIE_LEN,
2486                         1);
2487         }
2488         if (channel->rx_ring) {
2489             /* Enable following Rx interrupts
2490              *   RBUE - Receive Buffer Unavailable Enable
2491              *   RIE  - Receive Interrupt Enable (unless using
2492              *          per channel interrupts)
2493              */
2494             dma_ch_ier = XLGMAC_SET_REG_BITS(
2495                     dma_ch_ier,
2496                     DMA_CH_IER_RBUE_POS,
2497                     DMA_CH_IER_RBUE_LEN,
2498                     1);
2499             if (!pdata->per_channel_irq)
2500                 dma_ch_ier = XLGMAC_SET_REG_BITS(
2501                         dma_ch_ier,
2502                         DMA_CH_IER_RIE_POS,
2503                         DMA_CH_IER_RIE_LEN,
2504                         1);
2505         }
2506 
2507         writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_IER));
2508     }
2509 }
2510 
2511 static void xlgmac_enable_mtl_interrupts(struct xlgmac_pdata *pdata)
2512 {
2513     unsigned int q_count, i;
2514     unsigned int mtl_q_isr;
2515 
2516     q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
2517     for (i = 0; i < q_count; i++) {
2518         /* Clear all the interrupts which are set */
2519         mtl_q_isr = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR));
2520         writel(mtl_q_isr, XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR));
2521 
2522         /* No MTL interrupts to be enabled */
2523         writel(0, XLGMAC_MTL_REG(pdata, i, MTL_Q_IER));
2524     }
2525 }
2526 
2527 static void xlgmac_enable_mac_interrupts(struct xlgmac_pdata *pdata)
2528 {
2529     unsigned int mac_ier = 0;
2530     u32 regval;
2531 
2532     /* Enable Timestamp interrupt */
2533     mac_ier = XLGMAC_SET_REG_BITS(mac_ier, MAC_IER_TSIE_POS,
2534                       MAC_IER_TSIE_LEN, 1);
2535 
2536     writel(mac_ier, pdata->mac_regs + MAC_IER);
2537 
2538     /* Enable all counter interrupts */
2539     regval = readl(pdata->mac_regs + MMC_RIER);
2540     regval = XLGMAC_SET_REG_BITS(regval, MMC_RIER_ALL_INTERRUPTS_POS,
2541                      MMC_RIER_ALL_INTERRUPTS_LEN, 0xffffffff);
2542     writel(regval, pdata->mac_regs + MMC_RIER);
2543     regval = readl(pdata->mac_regs + MMC_TIER);
2544     regval = XLGMAC_SET_REG_BITS(regval, MMC_TIER_ALL_INTERRUPTS_POS,
2545                      MMC_TIER_ALL_INTERRUPTS_LEN, 0xffffffff);
2546     writel(regval, pdata->mac_regs + MMC_TIER);
2547 }
2548 
2549 static int xlgmac_set_xlgmii_25000_speed(struct xlgmac_pdata *pdata)
2550 {
2551     u32 regval;
2552 
2553     regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
2554                      MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
2555     if (regval == 0x1)
2556         return 0;
2557 
2558     regval = readl(pdata->mac_regs + MAC_TCR);
2559     regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
2560                      MAC_TCR_SS_LEN, 0x1);
2561     writel(regval, pdata->mac_regs + MAC_TCR);
2562 
2563     return 0;
2564 }
2565 
2566 static int xlgmac_set_xlgmii_40000_speed(struct xlgmac_pdata *pdata)
2567 {
2568     u32 regval;
2569 
2570     regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
2571                      MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
2572     if (regval == 0)
2573         return 0;
2574 
2575     regval = readl(pdata->mac_regs + MAC_TCR);
2576     regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
2577                      MAC_TCR_SS_LEN, 0);
2578     writel(regval, pdata->mac_regs + MAC_TCR);
2579 
2580     return 0;
2581 }
2582 
2583 static int xlgmac_set_xlgmii_50000_speed(struct xlgmac_pdata *pdata)
2584 {
2585     u32 regval;
2586 
2587     regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
2588                      MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
2589     if (regval == 0x2)
2590         return 0;
2591 
2592     regval = readl(pdata->mac_regs + MAC_TCR);
2593     regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
2594                      MAC_TCR_SS_LEN, 0x2);
2595     writel(regval, pdata->mac_regs + MAC_TCR);
2596 
2597     return 0;
2598 }
2599 
2600 static int xlgmac_set_xlgmii_100000_speed(struct xlgmac_pdata *pdata)
2601 {
2602     u32 regval;
2603 
2604     regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR),
2605                      MAC_TCR_SS_POS, MAC_TCR_SS_LEN);
2606     if (regval == 0x3)
2607         return 0;
2608 
2609     regval = readl(pdata->mac_regs + MAC_TCR);
2610     regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS,
2611                      MAC_TCR_SS_LEN, 0x3);
2612     writel(regval, pdata->mac_regs + MAC_TCR);
2613 
2614     return 0;
2615 }
2616 
2617 static void xlgmac_config_mac_speed(struct xlgmac_pdata *pdata)
2618 {
2619     switch (pdata->phy_speed) {
2620     case SPEED_100000:
2621         xlgmac_set_xlgmii_100000_speed(pdata);
2622         break;
2623 
2624     case SPEED_50000:
2625         xlgmac_set_xlgmii_50000_speed(pdata);
2626         break;
2627 
2628     case SPEED_40000:
2629         xlgmac_set_xlgmii_40000_speed(pdata);
2630         break;
2631 
2632     case SPEED_25000:
2633         xlgmac_set_xlgmii_25000_speed(pdata);
2634         break;
2635     }
2636 }
2637 
2638 static int xlgmac_dev_read(struct xlgmac_channel *channel)
2639 {
2640     struct xlgmac_pdata *pdata = channel->pdata;
2641     struct xlgmac_ring *ring = channel->rx_ring;
2642     struct net_device *netdev = pdata->netdev;
2643     struct xlgmac_desc_data *desc_data;
2644     struct xlgmac_dma_desc *dma_desc;
2645     struct xlgmac_pkt_info *pkt_info;
2646     unsigned int err, etlt, l34t;
2647 
2648     desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
2649     dma_desc = desc_data->dma_desc;
2650     pkt_info = &ring->pkt_info;
2651 
2652     /* Check for data availability */
2653     if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2654                    RX_NORMAL_DESC3_OWN_POS,
2655                    RX_NORMAL_DESC3_OWN_LEN))
2656         return 1;
2657 
2658     /* Make sure descriptor fields are read after reading the OWN bit */
2659     dma_rmb();
2660 
2661     if (netif_msg_rx_status(pdata))
2662         xlgmac_dump_rx_desc(pdata, ring, ring->cur);
2663 
2664     if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2665                    RX_NORMAL_DESC3_CTXT_POS,
2666                    RX_NORMAL_DESC3_CTXT_LEN)) {
2667         /* Timestamp Context Descriptor */
2668         xlgmac_get_rx_tstamp(pkt_info, dma_desc);
2669 
2670         pkt_info->attributes = XLGMAC_SET_REG_BITS(
2671                     pkt_info->attributes,
2672                     RX_PACKET_ATTRIBUTES_CONTEXT_POS,
2673                     RX_PACKET_ATTRIBUTES_CONTEXT_LEN,
2674                     1);
2675         pkt_info->attributes = XLGMAC_SET_REG_BITS(
2676                 pkt_info->attributes,
2677                 RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
2678                 RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN,
2679                 0);
2680         return 0;
2681     }
2682 
2683     /* Normal Descriptor, be sure Context Descriptor bit is off */
2684     pkt_info->attributes = XLGMAC_SET_REG_BITS(
2685                 pkt_info->attributes,
2686                 RX_PACKET_ATTRIBUTES_CONTEXT_POS,
2687                 RX_PACKET_ATTRIBUTES_CONTEXT_LEN,
2688                 0);
2689 
2690     /* Indicate if a Context Descriptor is next */
2691     if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2692                    RX_NORMAL_DESC3_CDA_POS,
2693                    RX_NORMAL_DESC3_CDA_LEN))
2694         pkt_info->attributes = XLGMAC_SET_REG_BITS(
2695                 pkt_info->attributes,
2696                 RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
2697                 RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN,
2698                 1);
2699 
2700     /* Get the header length */
2701     if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2702                    RX_NORMAL_DESC3_FD_POS,
2703                    RX_NORMAL_DESC3_FD_LEN)) {
2704         desc_data->rx.hdr_len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc2,
2705                             RX_NORMAL_DESC2_HL_POS,
2706                             RX_NORMAL_DESC2_HL_LEN);
2707         if (desc_data->rx.hdr_len)
2708             pdata->stats.rx_split_header_packets++;
2709     }
2710 
2711     /* Get the RSS hash */
2712     if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2713                    RX_NORMAL_DESC3_RSV_POS,
2714                    RX_NORMAL_DESC3_RSV_LEN)) {
2715         pkt_info->attributes = XLGMAC_SET_REG_BITS(
2716                 pkt_info->attributes,
2717                 RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
2718                 RX_PACKET_ATTRIBUTES_RSS_HASH_LEN,
2719                 1);
2720 
2721         pkt_info->rss_hash = le32_to_cpu(dma_desc->desc1);
2722 
2723         l34t = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2724                           RX_NORMAL_DESC3_L34T_POS,
2725                       RX_NORMAL_DESC3_L34T_LEN);
2726         switch (l34t) {
2727         case RX_DESC3_L34T_IPV4_TCP:
2728         case RX_DESC3_L34T_IPV4_UDP:
2729         case RX_DESC3_L34T_IPV6_TCP:
2730         case RX_DESC3_L34T_IPV6_UDP:
2731             pkt_info->rss_hash_type = PKT_HASH_TYPE_L4;
2732             break;
2733         default:
2734             pkt_info->rss_hash_type = PKT_HASH_TYPE_L3;
2735         }
2736     }
2737 
2738     /* Get the pkt_info length */
2739     desc_data->rx.len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2740                     RX_NORMAL_DESC3_PL_POS,
2741                     RX_NORMAL_DESC3_PL_LEN);
2742 
2743     if (!XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2744                     RX_NORMAL_DESC3_LD_POS,
2745                     RX_NORMAL_DESC3_LD_LEN)) {
2746         /* Not all the data has been transferred for this pkt_info */
2747         pkt_info->attributes = XLGMAC_SET_REG_BITS(
2748                 pkt_info->attributes,
2749                 RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
2750                 RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN,
2751                 1);
2752         return 0;
2753     }
2754 
2755     /* This is the last of the data for this pkt_info */
2756     pkt_info->attributes = XLGMAC_SET_REG_BITS(
2757             pkt_info->attributes,
2758             RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
2759             RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN,
2760             0);
2761 
2762     /* Set checksum done indicator as appropriate */
2763     if (netdev->features & NETIF_F_RXCSUM)
2764         pkt_info->attributes = XLGMAC_SET_REG_BITS(
2765                 pkt_info->attributes,
2766                 RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
2767                 RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN,
2768                 1);
2769 
2770     /* Check for errors (only valid in last descriptor) */
2771     err = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2772                      RX_NORMAL_DESC3_ES_POS,
2773                      RX_NORMAL_DESC3_ES_LEN);
2774     etlt = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3,
2775                       RX_NORMAL_DESC3_ETLT_POS,
2776                       RX_NORMAL_DESC3_ETLT_LEN);
2777     netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
2778 
2779     if (!err || !etlt) {
2780         /* No error if err is 0 or etlt is 0 */
2781         if ((etlt == 0x09) &&
2782             (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
2783             pkt_info->attributes = XLGMAC_SET_REG_BITS(
2784                     pkt_info->attributes,
2785                     RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
2786                     RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
2787                     1);
2788             pkt_info->vlan_ctag =
2789                 XLGMAC_GET_REG_BITS_LE(dma_desc->desc0,
2790                                RX_NORMAL_DESC0_OVT_POS,
2791                            RX_NORMAL_DESC0_OVT_LEN);
2792             netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
2793                   pkt_info->vlan_ctag);
2794         }
2795     } else {
2796         if ((etlt == 0x05) || (etlt == 0x06))
2797             pkt_info->attributes = XLGMAC_SET_REG_BITS(
2798                     pkt_info->attributes,
2799                     RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
2800                     RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN,
2801                     0);
2802         else
2803             pkt_info->errors = XLGMAC_SET_REG_BITS(
2804                     pkt_info->errors,
2805                     RX_PACKET_ERRORS_FRAME_POS,
2806                     RX_PACKET_ERRORS_FRAME_LEN,
2807                     1);
2808     }
2809 
2810     XLGMAC_PR("%s - descriptor=%u (cur=%d)\n", channel->name,
2811           ring->cur & (ring->dma_desc_count - 1), ring->cur);
2812 
2813     return 0;
2814 }
2815 
2816 static int xlgmac_enable_int(struct xlgmac_channel *channel,
2817                  enum xlgmac_int int_id)
2818 {
2819     unsigned int dma_ch_ier;
2820 
2821     dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER));
2822 
2823     switch (int_id) {
2824     case XLGMAC_INT_DMA_CH_SR_TI:
2825         dma_ch_ier = XLGMAC_SET_REG_BITS(
2826                 dma_ch_ier, DMA_CH_IER_TIE_POS,
2827                 DMA_CH_IER_TIE_LEN, 1);
2828         break;
2829     case XLGMAC_INT_DMA_CH_SR_TPS:
2830         dma_ch_ier = XLGMAC_SET_REG_BITS(
2831                 dma_ch_ier, DMA_CH_IER_TXSE_POS,
2832                 DMA_CH_IER_TXSE_LEN, 1);
2833         break;
2834     case XLGMAC_INT_DMA_CH_SR_TBU:
2835         dma_ch_ier = XLGMAC_SET_REG_BITS(
2836                 dma_ch_ier, DMA_CH_IER_TBUE_POS,
2837                 DMA_CH_IER_TBUE_LEN, 1);
2838         break;
2839     case XLGMAC_INT_DMA_CH_SR_RI:
2840         dma_ch_ier = XLGMAC_SET_REG_BITS(
2841                 dma_ch_ier, DMA_CH_IER_RIE_POS,
2842                 DMA_CH_IER_RIE_LEN, 1);
2843         break;
2844     case XLGMAC_INT_DMA_CH_SR_RBU:
2845         dma_ch_ier = XLGMAC_SET_REG_BITS(
2846                 dma_ch_ier, DMA_CH_IER_RBUE_POS,
2847                 DMA_CH_IER_RBUE_LEN, 1);
2848         break;
2849     case XLGMAC_INT_DMA_CH_SR_RPS:
2850         dma_ch_ier = XLGMAC_SET_REG_BITS(
2851                 dma_ch_ier, DMA_CH_IER_RSE_POS,
2852                 DMA_CH_IER_RSE_LEN, 1);
2853         break;
2854     case XLGMAC_INT_DMA_CH_SR_TI_RI:
2855         dma_ch_ier = XLGMAC_SET_REG_BITS(
2856                 dma_ch_ier, DMA_CH_IER_TIE_POS,
2857                 DMA_CH_IER_TIE_LEN, 1);
2858         dma_ch_ier = XLGMAC_SET_REG_BITS(
2859                 dma_ch_ier, DMA_CH_IER_RIE_POS,
2860                 DMA_CH_IER_RIE_LEN, 1);
2861         break;
2862     case XLGMAC_INT_DMA_CH_SR_FBE:
2863         dma_ch_ier = XLGMAC_SET_REG_BITS(
2864                 dma_ch_ier, DMA_CH_IER_FBEE_POS,
2865                 DMA_CH_IER_FBEE_LEN, 1);
2866         break;
2867     case XLGMAC_INT_DMA_ALL:
2868         dma_ch_ier |= channel->saved_ier;
2869         break;
2870     default:
2871         return -1;
2872     }
2873 
2874     writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER));
2875 
2876     return 0;
2877 }
2878 
2879 static int xlgmac_disable_int(struct xlgmac_channel *channel,
2880                   enum xlgmac_int int_id)
2881 {
2882     unsigned int dma_ch_ier;
2883 
2884     dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER));
2885 
2886     switch (int_id) {
2887     case XLGMAC_INT_DMA_CH_SR_TI:
2888         dma_ch_ier = XLGMAC_SET_REG_BITS(
2889                 dma_ch_ier, DMA_CH_IER_TIE_POS,
2890                 DMA_CH_IER_TIE_LEN, 0);
2891         break;
2892     case XLGMAC_INT_DMA_CH_SR_TPS:
2893         dma_ch_ier = XLGMAC_SET_REG_BITS(
2894                 dma_ch_ier, DMA_CH_IER_TXSE_POS,
2895                 DMA_CH_IER_TXSE_LEN, 0);
2896         break;
2897     case XLGMAC_INT_DMA_CH_SR_TBU:
2898         dma_ch_ier = XLGMAC_SET_REG_BITS(
2899                 dma_ch_ier, DMA_CH_IER_TBUE_POS,
2900                 DMA_CH_IER_TBUE_LEN, 0);
2901         break;
2902     case XLGMAC_INT_DMA_CH_SR_RI:
2903         dma_ch_ier = XLGMAC_SET_REG_BITS(
2904                 dma_ch_ier, DMA_CH_IER_RIE_POS,
2905                 DMA_CH_IER_RIE_LEN, 0);
2906         break;
2907     case XLGMAC_INT_DMA_CH_SR_RBU:
2908         dma_ch_ier = XLGMAC_SET_REG_BITS(
2909                 dma_ch_ier, DMA_CH_IER_RBUE_POS,
2910                 DMA_CH_IER_RBUE_LEN, 0);
2911         break;
2912     case XLGMAC_INT_DMA_CH_SR_RPS:
2913         dma_ch_ier = XLGMAC_SET_REG_BITS(
2914                 dma_ch_ier, DMA_CH_IER_RSE_POS,
2915                 DMA_CH_IER_RSE_LEN, 0);
2916         break;
2917     case XLGMAC_INT_DMA_CH_SR_TI_RI:
2918         dma_ch_ier = XLGMAC_SET_REG_BITS(
2919                 dma_ch_ier, DMA_CH_IER_TIE_POS,
2920                 DMA_CH_IER_TIE_LEN, 0);
2921         dma_ch_ier = XLGMAC_SET_REG_BITS(
2922                 dma_ch_ier, DMA_CH_IER_RIE_POS,
2923                 DMA_CH_IER_RIE_LEN, 0);
2924         break;
2925     case XLGMAC_INT_DMA_CH_SR_FBE:
2926         dma_ch_ier = XLGMAC_SET_REG_BITS(
2927                 dma_ch_ier, DMA_CH_IER_FBEE_POS,
2928                 DMA_CH_IER_FBEE_LEN, 0);
2929         break;
2930     case XLGMAC_INT_DMA_ALL:
2931         channel->saved_ier = dma_ch_ier & XLGMAC_DMA_INTERRUPT_MASK;
2932         dma_ch_ier &= ~XLGMAC_DMA_INTERRUPT_MASK;
2933         break;
2934     default:
2935         return -1;
2936     }
2937 
2938     writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER));
2939 
2940     return 0;
2941 }
2942 
2943 static int xlgmac_flush_tx_queues(struct xlgmac_pdata *pdata)
2944 {
2945     unsigned int i, count;
2946     u32 regval;
2947 
2948     for (i = 0; i < pdata->tx_q_count; i++) {
2949         regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
2950         regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS,
2951                          MTL_Q_TQOMR_FTQ_LEN, 1);
2952         writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
2953     }
2954 
2955     /* Poll Until Poll Condition */
2956     for (i = 0; i < pdata->tx_q_count; i++) {
2957         count = 2000;
2958         regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR));
2959         regval = XLGMAC_GET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS,
2960                          MTL_Q_TQOMR_FTQ_LEN);
2961         while (--count && regval)
2962             usleep_range(500, 600);
2963 
2964         if (!count)
2965             return -EBUSY;
2966     }
2967 
2968     return 0;
2969 }
2970 
2971 static void xlgmac_config_dma_bus(struct xlgmac_pdata *pdata)
2972 {
2973     u32 regval;
2974 
2975     regval = readl(pdata->mac_regs + DMA_SBMR);
2976     /* Set enhanced addressing mode */
2977     regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_EAME_POS,
2978                      DMA_SBMR_EAME_LEN, 1);
2979     /* Set the System Bus mode */
2980     regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_UNDEF_POS,
2981                      DMA_SBMR_UNDEF_LEN, 1);
2982     regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_256_POS,
2983                      DMA_SBMR_BLEN_256_LEN, 1);
2984     writel(regval, pdata->mac_regs + DMA_SBMR);
2985 }
2986 
2987 static int xlgmac_hw_init(struct xlgmac_pdata *pdata)
2988 {
2989     struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
2990     int ret;
2991 
2992     /* Flush Tx queues */
2993     ret = xlgmac_flush_tx_queues(pdata);
2994     if (ret)
2995         return ret;
2996 
2997     /* Initialize DMA related features */
2998     xlgmac_config_dma_bus(pdata);
2999     xlgmac_config_osp_mode(pdata);
3000     xlgmac_config_pblx8(pdata);
3001     xlgmac_config_tx_pbl_val(pdata);
3002     xlgmac_config_rx_pbl_val(pdata);
3003     xlgmac_config_rx_coalesce(pdata);
3004     xlgmac_config_tx_coalesce(pdata);
3005     xlgmac_config_rx_buffer_size(pdata);
3006     xlgmac_config_tso_mode(pdata);
3007     xlgmac_config_sph_mode(pdata);
3008     xlgmac_config_rss(pdata);
3009     desc_ops->tx_desc_init(pdata);
3010     desc_ops->rx_desc_init(pdata);
3011     xlgmac_enable_dma_interrupts(pdata);
3012 
3013     /* Initialize MTL related features */
3014     xlgmac_config_mtl_mode(pdata);
3015     xlgmac_config_queue_mapping(pdata);
3016     xlgmac_config_tsf_mode(pdata, pdata->tx_sf_mode);
3017     xlgmac_config_rsf_mode(pdata, pdata->rx_sf_mode);
3018     xlgmac_config_tx_threshold(pdata, pdata->tx_threshold);
3019     xlgmac_config_rx_threshold(pdata, pdata->rx_threshold);
3020     xlgmac_config_tx_fifo_size(pdata);
3021     xlgmac_config_rx_fifo_size(pdata);
3022     xlgmac_config_flow_control_threshold(pdata);
3023     xlgmac_config_rx_fep_enable(pdata);
3024     xlgmac_config_rx_fup_enable(pdata);
3025     xlgmac_enable_mtl_interrupts(pdata);
3026 
3027     /* Initialize MAC related features */
3028     xlgmac_config_mac_address(pdata);
3029     xlgmac_config_rx_mode(pdata);
3030     xlgmac_config_jumbo_enable(pdata);
3031     xlgmac_config_flow_control(pdata);
3032     xlgmac_config_mac_speed(pdata);
3033     xlgmac_config_checksum_offload(pdata);
3034     xlgmac_config_vlan_support(pdata);
3035     xlgmac_config_mmc(pdata);
3036     xlgmac_enable_mac_interrupts(pdata);
3037 
3038     return 0;
3039 }
3040 
3041 static int xlgmac_hw_exit(struct xlgmac_pdata *pdata)
3042 {
3043     unsigned int count = 2000;
3044     u32 regval;
3045 
3046     /* Issue a software reset */
3047     regval = readl(pdata->mac_regs + DMA_MR);
3048     regval = XLGMAC_SET_REG_BITS(regval, DMA_MR_SWR_POS,
3049                      DMA_MR_SWR_LEN, 1);
3050     writel(regval, pdata->mac_regs + DMA_MR);
3051     usleep_range(10, 15);
3052 
3053     /* Poll Until Poll Condition */
3054     while (--count &&
3055            XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + DMA_MR),
3056                    DMA_MR_SWR_POS, DMA_MR_SWR_LEN))
3057         usleep_range(500, 600);
3058 
3059     if (!count)
3060         return -EBUSY;
3061 
3062     return 0;
3063 }
3064 
3065 void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops)
3066 {
3067     hw_ops->init = xlgmac_hw_init;
3068     hw_ops->exit = xlgmac_hw_exit;
3069 
3070     hw_ops->tx_complete = xlgmac_tx_complete;
3071 
3072     hw_ops->enable_tx = xlgmac_enable_tx;
3073     hw_ops->disable_tx = xlgmac_disable_tx;
3074     hw_ops->enable_rx = xlgmac_enable_rx;
3075     hw_ops->disable_rx = xlgmac_disable_rx;
3076 
3077     hw_ops->dev_xmit = xlgmac_dev_xmit;
3078     hw_ops->dev_read = xlgmac_dev_read;
3079     hw_ops->enable_int = xlgmac_enable_int;
3080     hw_ops->disable_int = xlgmac_disable_int;
3081 
3082     hw_ops->set_mac_address = xlgmac_set_mac_address;
3083     hw_ops->config_rx_mode = xlgmac_config_rx_mode;
3084     hw_ops->enable_rx_csum = xlgmac_enable_rx_csum;
3085     hw_ops->disable_rx_csum = xlgmac_disable_rx_csum;
3086 
3087     /* For MII speed configuration */
3088     hw_ops->set_xlgmii_25000_speed = xlgmac_set_xlgmii_25000_speed;
3089     hw_ops->set_xlgmii_40000_speed = xlgmac_set_xlgmii_40000_speed;
3090     hw_ops->set_xlgmii_50000_speed = xlgmac_set_xlgmii_50000_speed;
3091     hw_ops->set_xlgmii_100000_speed = xlgmac_set_xlgmii_100000_speed;
3092 
3093     /* For descriptor related operation */
3094     hw_ops->tx_desc_init = xlgmac_tx_desc_init;
3095     hw_ops->rx_desc_init = xlgmac_rx_desc_init;
3096     hw_ops->tx_desc_reset = xlgmac_tx_desc_reset;
3097     hw_ops->rx_desc_reset = xlgmac_rx_desc_reset;
3098     hw_ops->is_last_desc = xlgmac_is_last_desc;
3099     hw_ops->is_context_desc = xlgmac_is_context_desc;
3100     hw_ops->tx_start_xmit = xlgmac_tx_start_xmit;
3101 
3102     /* For Flow Control */
3103     hw_ops->config_tx_flow_control = xlgmac_config_tx_flow_control;
3104     hw_ops->config_rx_flow_control = xlgmac_config_rx_flow_control;
3105 
3106     /* For Vlan related config */
3107     hw_ops->enable_rx_vlan_stripping = xlgmac_enable_rx_vlan_stripping;
3108     hw_ops->disable_rx_vlan_stripping = xlgmac_disable_rx_vlan_stripping;
3109     hw_ops->enable_rx_vlan_filtering = xlgmac_enable_rx_vlan_filtering;
3110     hw_ops->disable_rx_vlan_filtering = xlgmac_disable_rx_vlan_filtering;
3111     hw_ops->update_vlan_hash_table = xlgmac_update_vlan_hash_table;
3112 
3113     /* For RX coalescing */
3114     hw_ops->config_rx_coalesce = xlgmac_config_rx_coalesce;
3115     hw_ops->config_tx_coalesce = xlgmac_config_tx_coalesce;
3116     hw_ops->usec_to_riwt = xlgmac_usec_to_riwt;
3117     hw_ops->riwt_to_usec = xlgmac_riwt_to_usec;
3118 
3119     /* For RX and TX threshold config */
3120     hw_ops->config_rx_threshold = xlgmac_config_rx_threshold;
3121     hw_ops->config_tx_threshold = xlgmac_config_tx_threshold;
3122 
3123     /* For RX and TX Store and Forward Mode config */
3124     hw_ops->config_rsf_mode = xlgmac_config_rsf_mode;
3125     hw_ops->config_tsf_mode = xlgmac_config_tsf_mode;
3126 
3127     /* For TX DMA Operating on Second Frame config */
3128     hw_ops->config_osp_mode = xlgmac_config_osp_mode;
3129 
3130     /* For RX and TX PBL config */
3131     hw_ops->config_rx_pbl_val = xlgmac_config_rx_pbl_val;
3132     hw_ops->get_rx_pbl_val = xlgmac_get_rx_pbl_val;
3133     hw_ops->config_tx_pbl_val = xlgmac_config_tx_pbl_val;
3134     hw_ops->get_tx_pbl_val = xlgmac_get_tx_pbl_val;
3135     hw_ops->config_pblx8 = xlgmac_config_pblx8;
3136 
3137     /* For MMC statistics support */
3138     hw_ops->tx_mmc_int = xlgmac_tx_mmc_int;
3139     hw_ops->rx_mmc_int = xlgmac_rx_mmc_int;
3140     hw_ops->read_mmc_stats = xlgmac_read_mmc_stats;
3141 
3142     /* For Receive Side Scaling */
3143     hw_ops->enable_rss = xlgmac_enable_rss;
3144     hw_ops->disable_rss = xlgmac_disable_rss;
3145     hw_ops->set_rss_hash_key = xlgmac_set_rss_hash_key;
3146     hw_ops->set_rss_lookup_table = xlgmac_set_rss_lookup_table;
3147 }