Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Xilinx Axi Ethernet device driver
0004  *
0005  * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
0006  * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
0007  * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
0008  * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
0009  * Copyright (c) 2010 - 2011 PetaLogix
0010  * Copyright (c) 2019 - 2022 Calian Advanced Technologies
0011  * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
0012  *
0013  * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
0014  * and Spartan6.
0015  *
0016  * TODO:
0017  *  - Add Axi Fifo support.
0018  *  - Factor out Axi DMA code into separate driver.
0019  *  - Test and fix basic multicast filtering.
0020  *  - Add support for extended multicast filtering.
0021  *  - Test basic VLAN support.
0022  *  - Add support for extended VLAN support.
0023  */
0024 
0025 #include <linux/clk.h>
0026 #include <linux/delay.h>
0027 #include <linux/etherdevice.h>
0028 #include <linux/module.h>
0029 #include <linux/netdevice.h>
0030 #include <linux/of_mdio.h>
0031 #include <linux/of_net.h>
0032 #include <linux/of_platform.h>
0033 #include <linux/of_irq.h>
0034 #include <linux/of_address.h>
0035 #include <linux/skbuff.h>
0036 #include <linux/math64.h>
0037 #include <linux/phy.h>
0038 #include <linux/mii.h>
0039 #include <linux/ethtool.h>
0040 
0041 #include "xilinx_axienet.h"
0042 
0043 /* Descriptors defines for Tx and Rx DMA */
0044 #define TX_BD_NUM_DEFAULT       128
0045 #define RX_BD_NUM_DEFAULT       1024
0046 #define TX_BD_NUM_MIN           (MAX_SKB_FRAGS + 1)
0047 #define TX_BD_NUM_MAX           4096
0048 #define RX_BD_NUM_MAX           4096
0049 
0050 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
0051 #define DRIVER_NAME     "xaxienet"
0052 #define DRIVER_DESCRIPTION  "Xilinx Axi Ethernet driver"
0053 #define DRIVER_VERSION      "1.00a"
0054 
0055 #define AXIENET_REGS_N      40
0056 
0057 /* Match table for of_platform binding */
0058 static const struct of_device_id axienet_of_match[] = {
0059     { .compatible = "xlnx,axi-ethernet-1.00.a", },
0060     { .compatible = "xlnx,axi-ethernet-1.01.a", },
0061     { .compatible = "xlnx,axi-ethernet-2.01.a", },
0062     {},
0063 };
0064 
0065 MODULE_DEVICE_TABLE(of, axienet_of_match);
0066 
0067 /* Option table for setting up Axi Ethernet hardware options */
0068 static struct axienet_option axienet_options[] = {
0069     /* Turn on jumbo packet support for both Rx and Tx */
0070     {
0071         .opt = XAE_OPTION_JUMBO,
0072         .reg = XAE_TC_OFFSET,
0073         .m_or = XAE_TC_JUM_MASK,
0074     }, {
0075         .opt = XAE_OPTION_JUMBO,
0076         .reg = XAE_RCW1_OFFSET,
0077         .m_or = XAE_RCW1_JUM_MASK,
0078     }, { /* Turn on VLAN packet support for both Rx and Tx */
0079         .opt = XAE_OPTION_VLAN,
0080         .reg = XAE_TC_OFFSET,
0081         .m_or = XAE_TC_VLAN_MASK,
0082     }, {
0083         .opt = XAE_OPTION_VLAN,
0084         .reg = XAE_RCW1_OFFSET,
0085         .m_or = XAE_RCW1_VLAN_MASK,
0086     }, { /* Turn on FCS stripping on receive packets */
0087         .opt = XAE_OPTION_FCS_STRIP,
0088         .reg = XAE_RCW1_OFFSET,
0089         .m_or = XAE_RCW1_FCS_MASK,
0090     }, { /* Turn on FCS insertion on transmit packets */
0091         .opt = XAE_OPTION_FCS_INSERT,
0092         .reg = XAE_TC_OFFSET,
0093         .m_or = XAE_TC_FCS_MASK,
0094     }, { /* Turn off length/type field checking on receive packets */
0095         .opt = XAE_OPTION_LENTYPE_ERR,
0096         .reg = XAE_RCW1_OFFSET,
0097         .m_or = XAE_RCW1_LT_DIS_MASK,
0098     }, { /* Turn on Rx flow control */
0099         .opt = XAE_OPTION_FLOW_CONTROL,
0100         .reg = XAE_FCC_OFFSET,
0101         .m_or = XAE_FCC_FCRX_MASK,
0102     }, { /* Turn on Tx flow control */
0103         .opt = XAE_OPTION_FLOW_CONTROL,
0104         .reg = XAE_FCC_OFFSET,
0105         .m_or = XAE_FCC_FCTX_MASK,
0106     }, { /* Turn on promiscuous frame filtering */
0107         .opt = XAE_OPTION_PROMISC,
0108         .reg = XAE_FMI_OFFSET,
0109         .m_or = XAE_FMI_PM_MASK,
0110     }, { /* Enable transmitter */
0111         .opt = XAE_OPTION_TXEN,
0112         .reg = XAE_TC_OFFSET,
0113         .m_or = XAE_TC_TX_MASK,
0114     }, { /* Enable receiver */
0115         .opt = XAE_OPTION_RXEN,
0116         .reg = XAE_RCW1_OFFSET,
0117         .m_or = XAE_RCW1_RX_MASK,
0118     },
0119     {}
0120 };
0121 
0122 /**
0123  * axienet_dma_in32 - Memory mapped Axi DMA register read
0124  * @lp:     Pointer to axienet local structure
0125  * @reg:    Address offset from the base address of the Axi DMA core
0126  *
0127  * Return: The contents of the Axi DMA register
0128  *
0129  * This function returns the contents of the corresponding Axi DMA register.
0130  */
0131 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
0132 {
0133     return ioread32(lp->dma_regs + reg);
0134 }
0135 
0136 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
0137                    struct axidma_bd *desc)
0138 {
0139     desc->phys = lower_32_bits(addr);
0140     if (lp->features & XAE_FEATURE_DMA_64BIT)
0141         desc->phys_msb = upper_32_bits(addr);
0142 }
0143 
0144 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
0145                      struct axidma_bd *desc)
0146 {
0147     dma_addr_t ret = desc->phys;
0148 
0149     if (lp->features & XAE_FEATURE_DMA_64BIT)
0150         ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
0151 
0152     return ret;
0153 }
0154 
0155 /**
0156  * axienet_dma_bd_release - Release buffer descriptor rings
0157  * @ndev:   Pointer to the net_device structure
0158  *
0159  * This function is used to release the descriptors allocated in
0160  * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
0161  * driver stop api is called.
0162  */
0163 static void axienet_dma_bd_release(struct net_device *ndev)
0164 {
0165     int i;
0166     struct axienet_local *lp = netdev_priv(ndev);
0167 
0168     /* If we end up here, tx_bd_v must have been DMA allocated. */
0169     dma_free_coherent(lp->dev,
0170               sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
0171               lp->tx_bd_v,
0172               lp->tx_bd_p);
0173 
0174     if (!lp->rx_bd_v)
0175         return;
0176 
0177     for (i = 0; i < lp->rx_bd_num; i++) {
0178         dma_addr_t phys;
0179 
0180         /* A NULL skb means this descriptor has not been initialised
0181          * at all.
0182          */
0183         if (!lp->rx_bd_v[i].skb)
0184             break;
0185 
0186         dev_kfree_skb(lp->rx_bd_v[i].skb);
0187 
0188         /* For each descriptor, we programmed cntrl with the (non-zero)
0189          * descriptor size, after it had been successfully allocated.
0190          * So a non-zero value in there means we need to unmap it.
0191          */
0192         if (lp->rx_bd_v[i].cntrl) {
0193             phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
0194             dma_unmap_single(lp->dev, phys,
0195                      lp->max_frm_size, DMA_FROM_DEVICE);
0196         }
0197     }
0198 
0199     dma_free_coherent(lp->dev,
0200               sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
0201               lp->rx_bd_v,
0202               lp->rx_bd_p);
0203 }
0204 
0205 /**
0206  * axienet_usec_to_timer - Calculate IRQ delay timer value
0207  * @lp:     Pointer to the axienet_local structure
0208  * @coalesce_usec: Microseconds to convert into timer value
0209  */
0210 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
0211 {
0212     u32 result;
0213     u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
0214 
0215     if (lp->axi_clk)
0216         clk_rate = clk_get_rate(lp->axi_clk);
0217 
0218     /* 1 Timeout Interval = 125 * (clock period of SG clock) */
0219     result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
0220                      (u64)125000000);
0221     if (result > 255)
0222         result = 255;
0223 
0224     return result;
0225 }
0226 
0227 /**
0228  * axienet_dma_start - Set up DMA registers and start DMA operation
0229  * @lp:     Pointer to the axienet_local structure
0230  */
0231 static void axienet_dma_start(struct axienet_local *lp)
0232 {
0233     /* Start updating the Rx channel control register */
0234     lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
0235             XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
0236     /* Only set interrupt delay timer if not generating an interrupt on
0237      * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
0238      */
0239     if (lp->coalesce_count_rx > 1)
0240         lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
0241                     << XAXIDMA_DELAY_SHIFT) |
0242                  XAXIDMA_IRQ_DELAY_MASK;
0243     axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
0244 
0245     /* Start updating the Tx channel control register */
0246     lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
0247             XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
0248     /* Only set interrupt delay timer if not generating an interrupt on
0249      * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
0250      */
0251     if (lp->coalesce_count_tx > 1)
0252         lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
0253                     << XAXIDMA_DELAY_SHIFT) |
0254                  XAXIDMA_IRQ_DELAY_MASK;
0255     axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
0256 
0257     /* Populate the tail pointer and bring the Rx Axi DMA engine out of
0258      * halted state. This will make the Rx side ready for reception.
0259      */
0260     axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
0261     lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
0262     axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
0263     axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
0264                  (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
0265 
0266     /* Write to the RS (Run-stop) bit in the Tx channel control register.
0267      * Tx channel is now ready to run. But only after we write to the
0268      * tail pointer register that the Tx channel will start transmitting.
0269      */
0270     axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
0271     lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
0272     axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
0273 }
0274 
0275 /**
0276  * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
0277  * @ndev:   Pointer to the net_device structure
0278  *
0279  * Return: 0, on success -ENOMEM, on failure
0280  *
0281  * This function is called to initialize the Rx and Tx DMA descriptor
0282  * rings. This initializes the descriptors with required default values
0283  * and is called when Axi Ethernet driver reset is called.
0284  */
0285 static int axienet_dma_bd_init(struct net_device *ndev)
0286 {
0287     int i;
0288     struct sk_buff *skb;
0289     struct axienet_local *lp = netdev_priv(ndev);
0290 
0291     /* Reset the indexes which are used for accessing the BDs */
0292     lp->tx_bd_ci = 0;
0293     lp->tx_bd_tail = 0;
0294     lp->rx_bd_ci = 0;
0295 
0296     /* Allocate the Tx and Rx buffer descriptors. */
0297     lp->tx_bd_v = dma_alloc_coherent(lp->dev,
0298                      sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
0299                      &lp->tx_bd_p, GFP_KERNEL);
0300     if (!lp->tx_bd_v)
0301         return -ENOMEM;
0302 
0303     lp->rx_bd_v = dma_alloc_coherent(lp->dev,
0304                      sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
0305                      &lp->rx_bd_p, GFP_KERNEL);
0306     if (!lp->rx_bd_v)
0307         goto out;
0308 
0309     for (i = 0; i < lp->tx_bd_num; i++) {
0310         dma_addr_t addr = lp->tx_bd_p +
0311                   sizeof(*lp->tx_bd_v) *
0312                   ((i + 1) % lp->tx_bd_num);
0313 
0314         lp->tx_bd_v[i].next = lower_32_bits(addr);
0315         if (lp->features & XAE_FEATURE_DMA_64BIT)
0316             lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
0317     }
0318 
0319     for (i = 0; i < lp->rx_bd_num; i++) {
0320         dma_addr_t addr;
0321 
0322         addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
0323             ((i + 1) % lp->rx_bd_num);
0324         lp->rx_bd_v[i].next = lower_32_bits(addr);
0325         if (lp->features & XAE_FEATURE_DMA_64BIT)
0326             lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
0327 
0328         skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
0329         if (!skb)
0330             goto out;
0331 
0332         lp->rx_bd_v[i].skb = skb;
0333         addr = dma_map_single(lp->dev, skb->data,
0334                       lp->max_frm_size, DMA_FROM_DEVICE);
0335         if (dma_mapping_error(lp->dev, addr)) {
0336             netdev_err(ndev, "DMA mapping error\n");
0337             goto out;
0338         }
0339         desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
0340 
0341         lp->rx_bd_v[i].cntrl = lp->max_frm_size;
0342     }
0343 
0344     axienet_dma_start(lp);
0345 
0346     return 0;
0347 out:
0348     axienet_dma_bd_release(ndev);
0349     return -ENOMEM;
0350 }
0351 
0352 /**
0353  * axienet_set_mac_address - Write the MAC address
0354  * @ndev:   Pointer to the net_device structure
0355  * @address:    6 byte Address to be written as MAC address
0356  *
0357  * This function is called to initialize the MAC address of the Axi Ethernet
0358  * core. It writes to the UAW0 and UAW1 registers of the core.
0359  */
0360 static void axienet_set_mac_address(struct net_device *ndev,
0361                     const void *address)
0362 {
0363     struct axienet_local *lp = netdev_priv(ndev);
0364 
0365     if (address)
0366         eth_hw_addr_set(ndev, address);
0367     if (!is_valid_ether_addr(ndev->dev_addr))
0368         eth_hw_addr_random(ndev);
0369 
0370     /* Set up unicast MAC address filter set its mac address */
0371     axienet_iow(lp, XAE_UAW0_OFFSET,
0372             (ndev->dev_addr[0]) |
0373             (ndev->dev_addr[1] << 8) |
0374             (ndev->dev_addr[2] << 16) |
0375             (ndev->dev_addr[3] << 24));
0376     axienet_iow(lp, XAE_UAW1_OFFSET,
0377             (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
0378               ~XAE_UAW1_UNICASTADDR_MASK) |
0379              (ndev->dev_addr[4] |
0380              (ndev->dev_addr[5] << 8))));
0381 }
0382 
0383 /**
0384  * netdev_set_mac_address - Write the MAC address (from outside the driver)
0385  * @ndev:   Pointer to the net_device structure
0386  * @p:      6 byte Address to be written as MAC address
0387  *
0388  * Return: 0 for all conditions. Presently, there is no failure case.
0389  *
0390  * This function is called to initialize the MAC address of the Axi Ethernet
0391  * core. It calls the core specific axienet_set_mac_address. This is the
0392  * function that goes into net_device_ops structure entry ndo_set_mac_address.
0393  */
0394 static int netdev_set_mac_address(struct net_device *ndev, void *p)
0395 {
0396     struct sockaddr *addr = p;
0397     axienet_set_mac_address(ndev, addr->sa_data);
0398     return 0;
0399 }
0400 
0401 /**
0402  * axienet_set_multicast_list - Prepare the multicast table
0403  * @ndev:   Pointer to the net_device structure
0404  *
0405  * This function is called to initialize the multicast table during
0406  * initialization. The Axi Ethernet basic multicast support has a four-entry
0407  * multicast table which is initialized here. Additionally this function
0408  * goes into the net_device_ops structure entry ndo_set_multicast_list. This
0409  * means whenever the multicast table entries need to be updated this
0410  * function gets called.
0411  */
0412 static void axienet_set_multicast_list(struct net_device *ndev)
0413 {
0414     int i;
0415     u32 reg, af0reg, af1reg;
0416     struct axienet_local *lp = netdev_priv(ndev);
0417 
0418     if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
0419         netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
0420         /* We must make the kernel realize we had to move into
0421          * promiscuous mode. If it was a promiscuous mode request
0422          * the flag is already set. If not we set it.
0423          */
0424         ndev->flags |= IFF_PROMISC;
0425         reg = axienet_ior(lp, XAE_FMI_OFFSET);
0426         reg |= XAE_FMI_PM_MASK;
0427         axienet_iow(lp, XAE_FMI_OFFSET, reg);
0428         dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
0429     } else if (!netdev_mc_empty(ndev)) {
0430         struct netdev_hw_addr *ha;
0431 
0432         i = 0;
0433         netdev_for_each_mc_addr(ha, ndev) {
0434             if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
0435                 break;
0436 
0437             af0reg = (ha->addr[0]);
0438             af0reg |= (ha->addr[1] << 8);
0439             af0reg |= (ha->addr[2] << 16);
0440             af0reg |= (ha->addr[3] << 24);
0441 
0442             af1reg = (ha->addr[4]);
0443             af1reg |= (ha->addr[5] << 8);
0444 
0445             reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
0446             reg |= i;
0447 
0448             axienet_iow(lp, XAE_FMI_OFFSET, reg);
0449             axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
0450             axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
0451             i++;
0452         }
0453     } else {
0454         reg = axienet_ior(lp, XAE_FMI_OFFSET);
0455         reg &= ~XAE_FMI_PM_MASK;
0456 
0457         axienet_iow(lp, XAE_FMI_OFFSET, reg);
0458 
0459         for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
0460             reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
0461             reg |= i;
0462 
0463             axienet_iow(lp, XAE_FMI_OFFSET, reg);
0464             axienet_iow(lp, XAE_AF0_OFFSET, 0);
0465             axienet_iow(lp, XAE_AF1_OFFSET, 0);
0466         }
0467 
0468         dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
0469     }
0470 }
0471 
0472 /**
0473  * axienet_setoptions - Set an Axi Ethernet option
0474  * @ndev:   Pointer to the net_device structure
0475  * @options:    Option to be enabled/disabled
0476  *
0477  * The Axi Ethernet core has multiple features which can be selectively turned
0478  * on or off. The typical options could be jumbo frame option, basic VLAN
0479  * option, promiscuous mode option etc. This function is used to set or clear
0480  * these options in the Axi Ethernet hardware. This is done through
0481  * axienet_option structure .
0482  */
0483 static void axienet_setoptions(struct net_device *ndev, u32 options)
0484 {
0485     int reg;
0486     struct axienet_local *lp = netdev_priv(ndev);
0487     struct axienet_option *tp = &axienet_options[0];
0488 
0489     while (tp->opt) {
0490         reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
0491         if (options & tp->opt)
0492             reg |= tp->m_or;
0493         axienet_iow(lp, tp->reg, reg);
0494         tp++;
0495     }
0496 
0497     lp->options |= options;
0498 }
0499 
0500 static int __axienet_device_reset(struct axienet_local *lp)
0501 {
0502     u32 value;
0503     int ret;
0504 
0505     /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
0506      * process of Axi DMA takes a while to complete as all pending
0507      * commands/transfers will be flushed or completed during this
0508      * reset process.
0509      * Note that even though both TX and RX have their own reset register,
0510      * they both reset the entire DMA core, so only one needs to be used.
0511      */
0512     axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
0513     ret = read_poll_timeout(axienet_dma_in32, value,
0514                 !(value & XAXIDMA_CR_RESET_MASK),
0515                 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
0516                 XAXIDMA_TX_CR_OFFSET);
0517     if (ret) {
0518         dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
0519         return ret;
0520     }
0521 
0522     /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
0523     ret = read_poll_timeout(axienet_ior, value,
0524                 value & XAE_INT_PHYRSTCMPLT_MASK,
0525                 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
0526                 XAE_IS_OFFSET);
0527     if (ret) {
0528         dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
0529         return ret;
0530     }
0531 
0532     return 0;
0533 }
0534 
0535 /**
0536  * axienet_dma_stop - Stop DMA operation
0537  * @lp:     Pointer to the axienet_local structure
0538  */
0539 static void axienet_dma_stop(struct axienet_local *lp)
0540 {
0541     int count;
0542     u32 cr, sr;
0543 
0544     cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
0545     cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
0546     axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
0547     synchronize_irq(lp->rx_irq);
0548 
0549     cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
0550     cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
0551     axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
0552     synchronize_irq(lp->tx_irq);
0553 
0554     /* Give DMAs a chance to halt gracefully */
0555     sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
0556     for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
0557         msleep(20);
0558         sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
0559     }
0560 
0561     sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
0562     for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
0563         msleep(20);
0564         sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
0565     }
0566 
0567     /* Do a reset to ensure DMA is really stopped */
0568     axienet_lock_mii(lp);
0569     __axienet_device_reset(lp);
0570     axienet_unlock_mii(lp);
0571 }
0572 
0573 /**
0574  * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
0575  * @ndev:   Pointer to the net_device structure
0576  *
0577  * This function is called to reset and initialize the Axi Ethernet core. This
0578  * is typically called during initialization. It does a reset of the Axi DMA
0579  * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
0580  * are connected to Axi Ethernet reset lines, this in turn resets the Axi
0581  * Ethernet core. No separate hardware reset is done for the Axi Ethernet
0582  * core.
0583  * Returns 0 on success or a negative error number otherwise.
0584  */
0585 static int axienet_device_reset(struct net_device *ndev)
0586 {
0587     u32 axienet_status;
0588     struct axienet_local *lp = netdev_priv(ndev);
0589     int ret;
0590 
0591     ret = __axienet_device_reset(lp);
0592     if (ret)
0593         return ret;
0594 
0595     lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
0596     lp->options |= XAE_OPTION_VLAN;
0597     lp->options &= (~XAE_OPTION_JUMBO);
0598 
0599     if ((ndev->mtu > XAE_MTU) &&
0600         (ndev->mtu <= XAE_JUMBO_MTU)) {
0601         lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
0602                     XAE_TRL_SIZE;
0603 
0604         if (lp->max_frm_size <= lp->rxmem)
0605             lp->options |= XAE_OPTION_JUMBO;
0606     }
0607 
0608     ret = axienet_dma_bd_init(ndev);
0609     if (ret) {
0610         netdev_err(ndev, "%s: descriptor allocation failed\n",
0611                __func__);
0612         return ret;
0613     }
0614 
0615     axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
0616     axienet_status &= ~XAE_RCW1_RX_MASK;
0617     axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
0618 
0619     axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
0620     if (axienet_status & XAE_INT_RXRJECT_MASK)
0621         axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
0622     axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
0623             XAE_INT_RECV_ERROR_MASK : 0);
0624 
0625     axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
0626 
0627     /* Sync default options with HW but leave receiver and
0628      * transmitter disabled.
0629      */
0630     axienet_setoptions(ndev, lp->options &
0631                ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
0632     axienet_set_mac_address(ndev, NULL);
0633     axienet_set_multicast_list(ndev);
0634     axienet_setoptions(ndev, lp->options);
0635 
0636     netif_trans_update(ndev);
0637 
0638     return 0;
0639 }
0640 
0641 /**
0642  * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
0643  * @lp:     Pointer to the axienet_local structure
0644  * @first_bd:   Index of first descriptor to clean up
0645  * @nr_bds: Max number of descriptors to clean up
0646  * @force:  Whether to clean descriptors even if not complete
0647  * @sizep:  Pointer to a u32 filled with the total sum of all bytes
0648  *      in all cleaned-up descriptors. Ignored if NULL.
0649  * @budget: NAPI budget (use 0 when not called from NAPI poll)
0650  *
0651  * Would either be called after a successful transmit operation, or after
0652  * there was an error when setting up the chain.
0653  * Returns the number of descriptors handled.
0654  */
0655 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
0656                  int nr_bds, bool force, u32 *sizep, int budget)
0657 {
0658     struct axidma_bd *cur_p;
0659     unsigned int status;
0660     dma_addr_t phys;
0661     int i;
0662 
0663     for (i = 0; i < nr_bds; i++) {
0664         cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
0665         status = cur_p->status;
0666 
0667         /* If force is not specified, clean up only descriptors
0668          * that have been completed by the MAC.
0669          */
0670         if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
0671             break;
0672 
0673         /* Ensure we see complete descriptor update */
0674         dma_rmb();
0675         phys = desc_get_phys_addr(lp, cur_p);
0676         dma_unmap_single(lp->dev, phys,
0677                  (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
0678                  DMA_TO_DEVICE);
0679 
0680         if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
0681             napi_consume_skb(cur_p->skb, budget);
0682 
0683         cur_p->app0 = 0;
0684         cur_p->app1 = 0;
0685         cur_p->app2 = 0;
0686         cur_p->app4 = 0;
0687         cur_p->skb = NULL;
0688         /* ensure our transmit path and device don't prematurely see status cleared */
0689         wmb();
0690         cur_p->cntrl = 0;
0691         cur_p->status = 0;
0692 
0693         if (sizep)
0694             *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
0695     }
0696 
0697     return i;
0698 }
0699 
0700 /**
0701  * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
0702  * @lp:     Pointer to the axienet_local structure
0703  * @num_frag:   The number of BDs to check for
0704  *
0705  * Return: 0, on success
0706  *      NETDEV_TX_BUSY, if any of the descriptors are not free
0707  *
0708  * This function is invoked before BDs are allocated and transmission starts.
0709  * This function returns 0 if a BD or group of BDs can be allocated for
0710  * transmission. If the BD or any of the BDs are not free the function
0711  * returns a busy status.
0712  */
0713 static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
0714                         int num_frag)
0715 {
0716     struct axidma_bd *cur_p;
0717 
0718     /* Ensure we see all descriptor updates from device or TX polling */
0719     rmb();
0720     cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
0721                  lp->tx_bd_num];
0722     if (cur_p->cntrl)
0723         return NETDEV_TX_BUSY;
0724     return 0;
0725 }
0726 
0727 /**
0728  * axienet_tx_poll - Invoked once a transmit is completed by the
0729  * Axi DMA Tx channel.
0730  * @napi:   Pointer to NAPI structure.
0731  * @budget: Max number of TX packets to process.
0732  *
0733  * Return: Number of TX packets processed.
0734  *
0735  * This function is invoked from the NAPI processing to notify the completion
0736  * of transmit operation. It clears fields in the corresponding Tx BDs and
0737  * unmaps the corresponding buffer so that CPU can regain ownership of the
0738  * buffer. It finally invokes "netif_wake_queue" to restart transmission if
0739  * required.
0740  */
0741 static int axienet_tx_poll(struct napi_struct *napi, int budget)
0742 {
0743     struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
0744     struct net_device *ndev = lp->ndev;
0745     u32 size = 0;
0746     int packets;
0747 
0748     packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget);
0749 
0750     if (packets) {
0751         lp->tx_bd_ci += packets;
0752         if (lp->tx_bd_ci >= lp->tx_bd_num)
0753             lp->tx_bd_ci %= lp->tx_bd_num;
0754 
0755         ndev->stats.tx_packets += packets;
0756         ndev->stats.tx_bytes += size;
0757 
0758         /* Matches barrier in axienet_start_xmit */
0759         smp_mb();
0760 
0761         if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
0762             netif_wake_queue(ndev);
0763     }
0764 
0765     if (packets < budget && napi_complete_done(napi, packets)) {
0766         /* Re-enable TX completion interrupts. This should
0767          * cause an immediate interrupt if any TX packets are
0768          * already pending.
0769          */
0770         axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
0771     }
0772     return packets;
0773 }
0774 
0775 /**
0776  * axienet_start_xmit - Starts the transmission.
0777  * @skb:    sk_buff pointer that contains data to be Txed.
0778  * @ndev:   Pointer to net_device structure.
0779  *
0780  * Return: NETDEV_TX_OK, on success
0781  *      NETDEV_TX_BUSY, if any of the descriptors are not free
0782  *
0783  * This function is invoked from upper layers to initiate transmission. The
0784  * function uses the next available free BDs and populates their fields to
0785  * start the transmission. Additionally if checksum offloading is supported,
0786  * it populates AXI Stream Control fields with appropriate values.
0787  */
0788 static netdev_tx_t
0789 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
0790 {
0791     u32 ii;
0792     u32 num_frag;
0793     u32 csum_start_off;
0794     u32 csum_index_off;
0795     skb_frag_t *frag;
0796     dma_addr_t tail_p, phys;
0797     u32 orig_tail_ptr, new_tail_ptr;
0798     struct axienet_local *lp = netdev_priv(ndev);
0799     struct axidma_bd *cur_p;
0800 
0801     orig_tail_ptr = lp->tx_bd_tail;
0802     new_tail_ptr = orig_tail_ptr;
0803 
0804     num_frag = skb_shinfo(skb)->nr_frags;
0805     cur_p = &lp->tx_bd_v[orig_tail_ptr];
0806 
0807     if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
0808         /* Should not happen as last start_xmit call should have
0809          * checked for sufficient space and queue should only be
0810          * woken when sufficient space is available.
0811          */
0812         netif_stop_queue(ndev);
0813         if (net_ratelimit())
0814             netdev_warn(ndev, "TX ring unexpectedly full\n");
0815         return NETDEV_TX_BUSY;
0816     }
0817 
0818     if (skb->ip_summed == CHECKSUM_PARTIAL) {
0819         if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
0820             /* Tx Full Checksum Offload Enabled */
0821             cur_p->app0 |= 2;
0822         } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
0823             csum_start_off = skb_transport_offset(skb);
0824             csum_index_off = csum_start_off + skb->csum_offset;
0825             /* Tx Partial Checksum Offload Enabled */
0826             cur_p->app0 |= 1;
0827             cur_p->app1 = (csum_start_off << 16) | csum_index_off;
0828         }
0829     } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
0830         cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
0831     }
0832 
0833     phys = dma_map_single(lp->dev, skb->data,
0834                   skb_headlen(skb), DMA_TO_DEVICE);
0835     if (unlikely(dma_mapping_error(lp->dev, phys))) {
0836         if (net_ratelimit())
0837             netdev_err(ndev, "TX DMA mapping error\n");
0838         ndev->stats.tx_dropped++;
0839         return NETDEV_TX_OK;
0840     }
0841     desc_set_phys_addr(lp, phys, cur_p);
0842     cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
0843 
0844     for (ii = 0; ii < num_frag; ii++) {
0845         if (++new_tail_ptr >= lp->tx_bd_num)
0846             new_tail_ptr = 0;
0847         cur_p = &lp->tx_bd_v[new_tail_ptr];
0848         frag = &skb_shinfo(skb)->frags[ii];
0849         phys = dma_map_single(lp->dev,
0850                       skb_frag_address(frag),
0851                       skb_frag_size(frag),
0852                       DMA_TO_DEVICE);
0853         if (unlikely(dma_mapping_error(lp->dev, phys))) {
0854             if (net_ratelimit())
0855                 netdev_err(ndev, "TX DMA mapping error\n");
0856             ndev->stats.tx_dropped++;
0857             axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
0858                           true, NULL, 0);
0859             return NETDEV_TX_OK;
0860         }
0861         desc_set_phys_addr(lp, phys, cur_p);
0862         cur_p->cntrl = skb_frag_size(frag);
0863     }
0864 
0865     cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
0866     cur_p->skb = skb;
0867 
0868     tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
0869     if (++new_tail_ptr >= lp->tx_bd_num)
0870         new_tail_ptr = 0;
0871     WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
0872 
0873     /* Start the transfer */
0874     axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
0875 
0876     /* Stop queue if next transmit may not have space */
0877     if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
0878         netif_stop_queue(ndev);
0879 
0880         /* Matches barrier in axienet_tx_poll */
0881         smp_mb();
0882 
0883         /* Space might have just been freed - check again */
0884         if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
0885             netif_wake_queue(ndev);
0886     }
0887 
0888     return NETDEV_TX_OK;
0889 }
0890 
0891 /**
0892  * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
0893  * @napi:   Pointer to NAPI structure.
0894  * @budget: Max number of RX packets to process.
0895  *
0896  * Return: Number of RX packets processed.
0897  */
0898 static int axienet_rx_poll(struct napi_struct *napi, int budget)
0899 {
0900     u32 length;
0901     u32 csumstatus;
0902     u32 size = 0;
0903     int packets = 0;
0904     dma_addr_t tail_p = 0;
0905     struct axidma_bd *cur_p;
0906     struct sk_buff *skb, *new_skb;
0907     struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
0908 
0909     cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
0910 
0911     while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
0912         dma_addr_t phys;
0913 
0914         /* Ensure we see complete descriptor update */
0915         dma_rmb();
0916 
0917         skb = cur_p->skb;
0918         cur_p->skb = NULL;
0919 
0920         /* skb could be NULL if a previous pass already received the
0921          * packet for this slot in the ring, but failed to refill it
0922          * with a newly allocated buffer. In this case, don't try to
0923          * receive it again.
0924          */
0925         if (likely(skb)) {
0926             length = cur_p->app4 & 0x0000FFFF;
0927 
0928             phys = desc_get_phys_addr(lp, cur_p);
0929             dma_unmap_single(lp->dev, phys, lp->max_frm_size,
0930                      DMA_FROM_DEVICE);
0931 
0932             skb_put(skb, length);
0933             skb->protocol = eth_type_trans(skb, lp->ndev);
0934             /*skb_checksum_none_assert(skb);*/
0935             skb->ip_summed = CHECKSUM_NONE;
0936 
0937             /* if we're doing Rx csum offload, set it up */
0938             if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
0939                 csumstatus = (cur_p->app2 &
0940                           XAE_FULL_CSUM_STATUS_MASK) >> 3;
0941                 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
0942                     csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
0943                     skb->ip_summed = CHECKSUM_UNNECESSARY;
0944                 }
0945             } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
0946                    skb->protocol == htons(ETH_P_IP) &&
0947                    skb->len > 64) {
0948                 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
0949                 skb->ip_summed = CHECKSUM_COMPLETE;
0950             }
0951 
0952             napi_gro_receive(napi, skb);
0953 
0954             size += length;
0955             packets++;
0956         }
0957 
0958         new_skb = napi_alloc_skb(napi, lp->max_frm_size);
0959         if (!new_skb)
0960             break;
0961 
0962         phys = dma_map_single(lp->dev, new_skb->data,
0963                       lp->max_frm_size,
0964                       DMA_FROM_DEVICE);
0965         if (unlikely(dma_mapping_error(lp->dev, phys))) {
0966             if (net_ratelimit())
0967                 netdev_err(lp->ndev, "RX DMA mapping error\n");
0968             dev_kfree_skb(new_skb);
0969             break;
0970         }
0971         desc_set_phys_addr(lp, phys, cur_p);
0972 
0973         cur_p->cntrl = lp->max_frm_size;
0974         cur_p->status = 0;
0975         cur_p->skb = new_skb;
0976 
0977         /* Only update tail_p to mark this slot as usable after it has
0978          * been successfully refilled.
0979          */
0980         tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
0981 
0982         if (++lp->rx_bd_ci >= lp->rx_bd_num)
0983             lp->rx_bd_ci = 0;
0984         cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
0985     }
0986 
0987     lp->ndev->stats.rx_packets += packets;
0988     lp->ndev->stats.rx_bytes += size;
0989 
0990     if (tail_p)
0991         axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
0992 
0993     if (packets < budget && napi_complete_done(napi, packets)) {
0994         /* Re-enable RX completion interrupts. This should
0995          * cause an immediate interrupt if any RX packets are
0996          * already pending.
0997          */
0998         axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
0999     }
1000     return packets;
1001 }
1002 
1003 /**
1004  * axienet_tx_irq - Tx Done Isr.
1005  * @irq:    irq number
1006  * @_ndev:  net_device pointer
1007  *
1008  * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1009  *
1010  * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1011  * TX BD processing.
1012  */
1013 static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1014 {
1015     unsigned int status;
1016     struct net_device *ndev = _ndev;
1017     struct axienet_local *lp = netdev_priv(ndev);
1018 
1019     status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1020 
1021     if (!(status & XAXIDMA_IRQ_ALL_MASK))
1022         return IRQ_NONE;
1023 
1024     axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1025 
1026     if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1027         netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1028         netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1029                (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1030                (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1031         schedule_work(&lp->dma_err_task);
1032     } else {
1033         /* Disable further TX completion interrupts and schedule
1034          * NAPI to handle the completions.
1035          */
1036         u32 cr = lp->tx_dma_cr;
1037 
1038         cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1039         axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1040 
1041         napi_schedule(&lp->napi_tx);
1042     }
1043 
1044     return IRQ_HANDLED;
1045 }
1046 
1047 /**
1048  * axienet_rx_irq - Rx Isr.
1049  * @irq:    irq number
1050  * @_ndev:  net_device pointer
1051  *
1052  * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1053  *
1054  * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1055  * processing.
1056  */
1057 static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1058 {
1059     unsigned int status;
1060     struct net_device *ndev = _ndev;
1061     struct axienet_local *lp = netdev_priv(ndev);
1062 
1063     status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1064 
1065     if (!(status & XAXIDMA_IRQ_ALL_MASK))
1066         return IRQ_NONE;
1067 
1068     axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1069 
1070     if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1071         netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1072         netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1073                (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1074                (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1075         schedule_work(&lp->dma_err_task);
1076     } else {
1077         /* Disable further RX completion interrupts and schedule
1078          * NAPI receive.
1079          */
1080         u32 cr = lp->rx_dma_cr;
1081 
1082         cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1083         axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1084 
1085         napi_schedule(&lp->napi_rx);
1086     }
1087 
1088     return IRQ_HANDLED;
1089 }
1090 
1091 /**
1092  * axienet_eth_irq - Ethernet core Isr.
1093  * @irq:    irq number
1094  * @_ndev:  net_device pointer
1095  *
1096  * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1097  *
1098  * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1099  */
1100 static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1101 {
1102     struct net_device *ndev = _ndev;
1103     struct axienet_local *lp = netdev_priv(ndev);
1104     unsigned int pending;
1105 
1106     pending = axienet_ior(lp, XAE_IP_OFFSET);
1107     if (!pending)
1108         return IRQ_NONE;
1109 
1110     if (pending & XAE_INT_RXFIFOOVR_MASK)
1111         ndev->stats.rx_missed_errors++;
1112 
1113     if (pending & XAE_INT_RXRJECT_MASK)
1114         ndev->stats.rx_frame_errors++;
1115 
1116     axienet_iow(lp, XAE_IS_OFFSET, pending);
1117     return IRQ_HANDLED;
1118 }
1119 
1120 static void axienet_dma_err_handler(struct work_struct *work);
1121 
1122 /**
1123  * axienet_open - Driver open routine.
1124  * @ndev:   Pointer to net_device structure
1125  *
1126  * Return: 0, on success.
1127  *      non-zero error value on failure
1128  *
1129  * This is the driver open routine. It calls phylink_start to start the
1130  * PHY device.
1131  * It also allocates interrupt service routines, enables the interrupt lines
1132  * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1133  * descriptors are initialized.
1134  */
1135 static int axienet_open(struct net_device *ndev)
1136 {
1137     int ret;
1138     struct axienet_local *lp = netdev_priv(ndev);
1139 
1140     dev_dbg(&ndev->dev, "axienet_open()\n");
1141 
1142     /* When we do an Axi Ethernet reset, it resets the complete core
1143      * including the MDIO. MDIO must be disabled before resetting.
1144      * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1145      */
1146     axienet_lock_mii(lp);
1147     ret = axienet_device_reset(ndev);
1148     axienet_unlock_mii(lp);
1149 
1150     ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1151     if (ret) {
1152         dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1153         return ret;
1154     }
1155 
1156     phylink_start(lp->phylink);
1157 
1158     /* Enable worker thread for Axi DMA error handling */
1159     INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1160 
1161     napi_enable(&lp->napi_rx);
1162     napi_enable(&lp->napi_tx);
1163 
1164     /* Enable interrupts for Axi DMA Tx */
1165     ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1166               ndev->name, ndev);
1167     if (ret)
1168         goto err_tx_irq;
1169     /* Enable interrupts for Axi DMA Rx */
1170     ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1171               ndev->name, ndev);
1172     if (ret)
1173         goto err_rx_irq;
1174     /* Enable interrupts for Axi Ethernet core (if defined) */
1175     if (lp->eth_irq > 0) {
1176         ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1177                   ndev->name, ndev);
1178         if (ret)
1179             goto err_eth_irq;
1180     }
1181 
1182     return 0;
1183 
1184 err_eth_irq:
1185     free_irq(lp->rx_irq, ndev);
1186 err_rx_irq:
1187     free_irq(lp->tx_irq, ndev);
1188 err_tx_irq:
1189     napi_disable(&lp->napi_tx);
1190     napi_disable(&lp->napi_rx);
1191     phylink_stop(lp->phylink);
1192     phylink_disconnect_phy(lp->phylink);
1193     cancel_work_sync(&lp->dma_err_task);
1194     dev_err(lp->dev, "request_irq() failed\n");
1195     return ret;
1196 }
1197 
1198 /**
1199  * axienet_stop - Driver stop routine.
1200  * @ndev:   Pointer to net_device structure
1201  *
1202  * Return: 0, on success.
1203  *
1204  * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1205  * device. It also removes the interrupt handlers and disables the interrupts.
1206  * The Axi DMA Tx/Rx BDs are released.
1207  */
1208 static int axienet_stop(struct net_device *ndev)
1209 {
1210     struct axienet_local *lp = netdev_priv(ndev);
1211 
1212     dev_dbg(&ndev->dev, "axienet_close()\n");
1213 
1214     napi_disable(&lp->napi_tx);
1215     napi_disable(&lp->napi_rx);
1216 
1217     phylink_stop(lp->phylink);
1218     phylink_disconnect_phy(lp->phylink);
1219 
1220     axienet_setoptions(ndev, lp->options &
1221                ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1222 
1223     axienet_dma_stop(lp);
1224 
1225     axienet_iow(lp, XAE_IE_OFFSET, 0);
1226 
1227     cancel_work_sync(&lp->dma_err_task);
1228 
1229     if (lp->eth_irq > 0)
1230         free_irq(lp->eth_irq, ndev);
1231     free_irq(lp->tx_irq, ndev);
1232     free_irq(lp->rx_irq, ndev);
1233 
1234     axienet_dma_bd_release(ndev);
1235     return 0;
1236 }
1237 
1238 /**
1239  * axienet_change_mtu - Driver change mtu routine.
1240  * @ndev:   Pointer to net_device structure
1241  * @new_mtu:    New mtu value to be applied
1242  *
1243  * Return: Always returns 0 (success).
1244  *
1245  * This is the change mtu driver routine. It checks if the Axi Ethernet
1246  * hardware supports jumbo frames before changing the mtu. This can be
1247  * called only when the device is not up.
1248  */
1249 static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1250 {
1251     struct axienet_local *lp = netdev_priv(ndev);
1252 
1253     if (netif_running(ndev))
1254         return -EBUSY;
1255 
1256     if ((new_mtu + VLAN_ETH_HLEN +
1257         XAE_TRL_SIZE) > lp->rxmem)
1258         return -EINVAL;
1259 
1260     ndev->mtu = new_mtu;
1261 
1262     return 0;
1263 }
1264 
1265 #ifdef CONFIG_NET_POLL_CONTROLLER
1266 /**
1267  * axienet_poll_controller - Axi Ethernet poll mechanism.
1268  * @ndev:   Pointer to net_device structure
1269  *
1270  * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1271  * to polling the ISRs and are enabled back after the polling is done.
1272  */
1273 static void axienet_poll_controller(struct net_device *ndev)
1274 {
1275     struct axienet_local *lp = netdev_priv(ndev);
1276     disable_irq(lp->tx_irq);
1277     disable_irq(lp->rx_irq);
1278     axienet_rx_irq(lp->tx_irq, ndev);
1279     axienet_tx_irq(lp->rx_irq, ndev);
1280     enable_irq(lp->tx_irq);
1281     enable_irq(lp->rx_irq);
1282 }
1283 #endif
1284 
1285 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1286 {
1287     struct axienet_local *lp = netdev_priv(dev);
1288 
1289     if (!netif_running(dev))
1290         return -EINVAL;
1291 
1292     return phylink_mii_ioctl(lp->phylink, rq, cmd);
1293 }
1294 
1295 static const struct net_device_ops axienet_netdev_ops = {
1296     .ndo_open = axienet_open,
1297     .ndo_stop = axienet_stop,
1298     .ndo_start_xmit = axienet_start_xmit,
1299     .ndo_change_mtu = axienet_change_mtu,
1300     .ndo_set_mac_address = netdev_set_mac_address,
1301     .ndo_validate_addr = eth_validate_addr,
1302     .ndo_eth_ioctl = axienet_ioctl,
1303     .ndo_set_rx_mode = axienet_set_multicast_list,
1304 #ifdef CONFIG_NET_POLL_CONTROLLER
1305     .ndo_poll_controller = axienet_poll_controller,
1306 #endif
1307 };
1308 
1309 /**
1310  * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1311  * @ndev:   Pointer to net_device structure
1312  * @ed:     Pointer to ethtool_drvinfo structure
1313  *
1314  * This implements ethtool command for getting the driver information.
1315  * Issue "ethtool -i ethX" under linux prompt to execute this function.
1316  */
1317 static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1318                      struct ethtool_drvinfo *ed)
1319 {
1320     strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1321     strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1322 }
1323 
1324 /**
1325  * axienet_ethtools_get_regs_len - Get the total regs length present in the
1326  *                 AxiEthernet core.
1327  * @ndev:   Pointer to net_device structure
1328  *
1329  * This implements ethtool command for getting the total register length
1330  * information.
1331  *
1332  * Return: the total regs length
1333  */
1334 static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1335 {
1336     return sizeof(u32) * AXIENET_REGS_N;
1337 }
1338 
1339 /**
1340  * axienet_ethtools_get_regs - Dump the contents of all registers present
1341  *                 in AxiEthernet core.
1342  * @ndev:   Pointer to net_device structure
1343  * @regs:   Pointer to ethtool_regs structure
1344  * @ret:    Void pointer used to return the contents of the registers.
1345  *
1346  * This implements ethtool command for getting the Axi Ethernet register dump.
1347  * Issue "ethtool -d ethX" to execute this function.
1348  */
1349 static void axienet_ethtools_get_regs(struct net_device *ndev,
1350                       struct ethtool_regs *regs, void *ret)
1351 {
1352     u32 *data = (u32 *) ret;
1353     size_t len = sizeof(u32) * AXIENET_REGS_N;
1354     struct axienet_local *lp = netdev_priv(ndev);
1355 
1356     regs->version = 0;
1357     regs->len = len;
1358 
1359     memset(data, 0, len);
1360     data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1361     data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1362     data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1363     data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1364     data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1365     data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1366     data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1367     data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1368     data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1369     data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1370     data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1371     data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1372     data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1373     data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1374     data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1375     data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1376     data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1377     data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1378     data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1379     data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1380     data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1381     data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1382     data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1383     data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1384     data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1385     data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1386     data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1387     data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1388     data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1389     data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1390     data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1391     data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1392     data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1393     data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1394     data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1395     data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1396 }
1397 
1398 static void
1399 axienet_ethtools_get_ringparam(struct net_device *ndev,
1400                    struct ethtool_ringparam *ering,
1401                    struct kernel_ethtool_ringparam *kernel_ering,
1402                    struct netlink_ext_ack *extack)
1403 {
1404     struct axienet_local *lp = netdev_priv(ndev);
1405 
1406     ering->rx_max_pending = RX_BD_NUM_MAX;
1407     ering->rx_mini_max_pending = 0;
1408     ering->rx_jumbo_max_pending = 0;
1409     ering->tx_max_pending = TX_BD_NUM_MAX;
1410     ering->rx_pending = lp->rx_bd_num;
1411     ering->rx_mini_pending = 0;
1412     ering->rx_jumbo_pending = 0;
1413     ering->tx_pending = lp->tx_bd_num;
1414 }
1415 
1416 static int
1417 axienet_ethtools_set_ringparam(struct net_device *ndev,
1418                    struct ethtool_ringparam *ering,
1419                    struct kernel_ethtool_ringparam *kernel_ering,
1420                    struct netlink_ext_ack *extack)
1421 {
1422     struct axienet_local *lp = netdev_priv(ndev);
1423 
1424     if (ering->rx_pending > RX_BD_NUM_MAX ||
1425         ering->rx_mini_pending ||
1426         ering->rx_jumbo_pending ||
1427         ering->tx_pending < TX_BD_NUM_MIN ||
1428         ering->tx_pending > TX_BD_NUM_MAX)
1429         return -EINVAL;
1430 
1431     if (netif_running(ndev))
1432         return -EBUSY;
1433 
1434     lp->rx_bd_num = ering->rx_pending;
1435     lp->tx_bd_num = ering->tx_pending;
1436     return 0;
1437 }
1438 
1439 /**
1440  * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1441  *                   Tx and Rx paths.
1442  * @ndev:   Pointer to net_device structure
1443  * @epauseparm: Pointer to ethtool_pauseparam structure.
1444  *
1445  * This implements ethtool command for getting axi ethernet pause frame
1446  * setting. Issue "ethtool -a ethX" to execute this function.
1447  */
1448 static void
1449 axienet_ethtools_get_pauseparam(struct net_device *ndev,
1450                 struct ethtool_pauseparam *epauseparm)
1451 {
1452     struct axienet_local *lp = netdev_priv(ndev);
1453 
1454     phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1455 }
1456 
1457 /**
1458  * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1459  *                   settings.
1460  * @ndev:   Pointer to net_device structure
1461  * @epauseparm:Pointer to ethtool_pauseparam structure
1462  *
1463  * This implements ethtool command for enabling flow control on Rx and Tx
1464  * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1465  * function.
1466  *
1467  * Return: 0 on success, -EFAULT if device is running
1468  */
1469 static int
1470 axienet_ethtools_set_pauseparam(struct net_device *ndev,
1471                 struct ethtool_pauseparam *epauseparm)
1472 {
1473     struct axienet_local *lp = netdev_priv(ndev);
1474 
1475     return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
1476 }
1477 
1478 /**
1479  * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1480  * @ndev:   Pointer to net_device structure
1481  * @ecoalesce:  Pointer to ethtool_coalesce structure
1482  * @kernel_coal: ethtool CQE mode setting structure
1483  * @extack: extack for reporting error messages
1484  *
1485  * This implements ethtool command for getting the DMA interrupt coalescing
1486  * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
1487  * execute this function.
1488  *
1489  * Return: 0 always
1490  */
1491 static int
1492 axienet_ethtools_get_coalesce(struct net_device *ndev,
1493                   struct ethtool_coalesce *ecoalesce,
1494                   struct kernel_ethtool_coalesce *kernel_coal,
1495                   struct netlink_ext_ack *extack)
1496 {
1497     struct axienet_local *lp = netdev_priv(ndev);
1498 
1499     ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
1500     ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
1501     ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
1502     ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
1503     return 0;
1504 }
1505 
1506 /**
1507  * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
1508  * @ndev:   Pointer to net_device structure
1509  * @ecoalesce:  Pointer to ethtool_coalesce structure
1510  * @kernel_coal: ethtool CQE mode setting structure
1511  * @extack: extack for reporting error messages
1512  *
1513  * This implements ethtool command for setting the DMA interrupt coalescing
1514  * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
1515  * prompt to execute this function.
1516  *
1517  * Return: 0, on success, Non-zero error value on failure.
1518  */
1519 static int
1520 axienet_ethtools_set_coalesce(struct net_device *ndev,
1521                   struct ethtool_coalesce *ecoalesce,
1522                   struct kernel_ethtool_coalesce *kernel_coal,
1523                   struct netlink_ext_ack *extack)
1524 {
1525     struct axienet_local *lp = netdev_priv(ndev);
1526 
1527     if (netif_running(ndev)) {
1528         netdev_err(ndev,
1529                "Please stop netif before applying configuration\n");
1530         return -EFAULT;
1531     }
1532 
1533     if (ecoalesce->rx_max_coalesced_frames)
1534         lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
1535     if (ecoalesce->rx_coalesce_usecs)
1536         lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
1537     if (ecoalesce->tx_max_coalesced_frames)
1538         lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
1539     if (ecoalesce->tx_coalesce_usecs)
1540         lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
1541 
1542     return 0;
1543 }
1544 
1545 static int
1546 axienet_ethtools_get_link_ksettings(struct net_device *ndev,
1547                     struct ethtool_link_ksettings *cmd)
1548 {
1549     struct axienet_local *lp = netdev_priv(ndev);
1550 
1551     return phylink_ethtool_ksettings_get(lp->phylink, cmd);
1552 }
1553 
1554 static int
1555 axienet_ethtools_set_link_ksettings(struct net_device *ndev,
1556                     const struct ethtool_link_ksettings *cmd)
1557 {
1558     struct axienet_local *lp = netdev_priv(ndev);
1559 
1560     return phylink_ethtool_ksettings_set(lp->phylink, cmd);
1561 }
1562 
1563 static int axienet_ethtools_nway_reset(struct net_device *dev)
1564 {
1565     struct axienet_local *lp = netdev_priv(dev);
1566 
1567     return phylink_ethtool_nway_reset(lp->phylink);
1568 }
1569 
1570 static const struct ethtool_ops axienet_ethtool_ops = {
1571     .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
1572                      ETHTOOL_COALESCE_USECS,
1573     .get_drvinfo    = axienet_ethtools_get_drvinfo,
1574     .get_regs_len   = axienet_ethtools_get_regs_len,
1575     .get_regs       = axienet_ethtools_get_regs,
1576     .get_link       = ethtool_op_get_link,
1577     .get_ringparam  = axienet_ethtools_get_ringparam,
1578     .set_ringparam  = axienet_ethtools_set_ringparam,
1579     .get_pauseparam = axienet_ethtools_get_pauseparam,
1580     .set_pauseparam = axienet_ethtools_set_pauseparam,
1581     .get_coalesce   = axienet_ethtools_get_coalesce,
1582     .set_coalesce   = axienet_ethtools_set_coalesce,
1583     .get_link_ksettings = axienet_ethtools_get_link_ksettings,
1584     .set_link_ksettings = axienet_ethtools_set_link_ksettings,
1585     .nway_reset = axienet_ethtools_nway_reset,
1586 };
1587 
1588 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
1589 {
1590     return container_of(pcs, struct axienet_local, pcs);
1591 }
1592 
1593 static void axienet_pcs_get_state(struct phylink_pcs *pcs,
1594                   struct phylink_link_state *state)
1595 {
1596     struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
1597 
1598     phylink_mii_c22_pcs_get_state(pcs_phy, state);
1599 }
1600 
1601 static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
1602 {
1603     struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
1604 
1605     phylink_mii_c22_pcs_an_restart(pcs_phy);
1606 }
1607 
1608 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
1609                   phy_interface_t interface,
1610                   const unsigned long *advertising,
1611                   bool permit_pause_to_mac)
1612 {
1613     struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
1614     struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
1615     struct axienet_local *lp = netdev_priv(ndev);
1616     int ret;
1617 
1618     if (lp->switch_x_sgmii) {
1619         ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
1620                     interface == PHY_INTERFACE_MODE_SGMII ?
1621                     XLNX_MII_STD_SELECT_SGMII : 0);
1622         if (ret < 0) {
1623             netdev_warn(ndev,
1624                     "Failed to switch PHY interface: %d\n",
1625                     ret);
1626             return ret;
1627         }
1628     }
1629 
1630     ret = phylink_mii_c22_pcs_config(pcs_phy, mode, interface, advertising);
1631     if (ret < 0)
1632         netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
1633 
1634     return ret;
1635 }
1636 
1637 static const struct phylink_pcs_ops axienet_pcs_ops = {
1638     .pcs_get_state = axienet_pcs_get_state,
1639     .pcs_config = axienet_pcs_config,
1640     .pcs_an_restart = axienet_pcs_an_restart,
1641 };
1642 
1643 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
1644                           phy_interface_t interface)
1645 {
1646     struct net_device *ndev = to_net_dev(config->dev);
1647     struct axienet_local *lp = netdev_priv(ndev);
1648 
1649     if (interface == PHY_INTERFACE_MODE_1000BASEX ||
1650         interface ==  PHY_INTERFACE_MODE_SGMII)
1651         return &lp->pcs;
1652 
1653     return NULL;
1654 }
1655 
1656 static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
1657                    const struct phylink_link_state *state)
1658 {
1659     /* nothing meaningful to do */
1660 }
1661 
1662 static void axienet_mac_link_down(struct phylink_config *config,
1663                   unsigned int mode,
1664                   phy_interface_t interface)
1665 {
1666     /* nothing meaningful to do */
1667 }
1668 
1669 static void axienet_mac_link_up(struct phylink_config *config,
1670                 struct phy_device *phy,
1671                 unsigned int mode, phy_interface_t interface,
1672                 int speed, int duplex,
1673                 bool tx_pause, bool rx_pause)
1674 {
1675     struct net_device *ndev = to_net_dev(config->dev);
1676     struct axienet_local *lp = netdev_priv(ndev);
1677     u32 emmc_reg, fcc_reg;
1678 
1679     emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
1680     emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
1681 
1682     switch (speed) {
1683     case SPEED_1000:
1684         emmc_reg |= XAE_EMMC_LINKSPD_1000;
1685         break;
1686     case SPEED_100:
1687         emmc_reg |= XAE_EMMC_LINKSPD_100;
1688         break;
1689     case SPEED_10:
1690         emmc_reg |= XAE_EMMC_LINKSPD_10;
1691         break;
1692     default:
1693         dev_err(&ndev->dev,
1694             "Speed other than 10, 100 or 1Gbps is not supported\n");
1695         break;
1696     }
1697 
1698     axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
1699 
1700     fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
1701     if (tx_pause)
1702         fcc_reg |= XAE_FCC_FCTX_MASK;
1703     else
1704         fcc_reg &= ~XAE_FCC_FCTX_MASK;
1705     if (rx_pause)
1706         fcc_reg |= XAE_FCC_FCRX_MASK;
1707     else
1708         fcc_reg &= ~XAE_FCC_FCRX_MASK;
1709     axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
1710 }
1711 
1712 static const struct phylink_mac_ops axienet_phylink_ops = {
1713     .validate = phylink_generic_validate,
1714     .mac_select_pcs = axienet_mac_select_pcs,
1715     .mac_config = axienet_mac_config,
1716     .mac_link_down = axienet_mac_link_down,
1717     .mac_link_up = axienet_mac_link_up,
1718 };
1719 
1720 /**
1721  * axienet_dma_err_handler - Work queue task for Axi DMA Error
1722  * @work:   pointer to work_struct
1723  *
1724  * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
1725  * Tx/Rx BDs.
1726  */
1727 static void axienet_dma_err_handler(struct work_struct *work)
1728 {
1729     u32 i;
1730     u32 axienet_status;
1731     struct axidma_bd *cur_p;
1732     struct axienet_local *lp = container_of(work, struct axienet_local,
1733                         dma_err_task);
1734     struct net_device *ndev = lp->ndev;
1735 
1736     napi_disable(&lp->napi_tx);
1737     napi_disable(&lp->napi_rx);
1738 
1739     axienet_setoptions(ndev, lp->options &
1740                ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1741 
1742     axienet_dma_stop(lp);
1743 
1744     for (i = 0; i < lp->tx_bd_num; i++) {
1745         cur_p = &lp->tx_bd_v[i];
1746         if (cur_p->cntrl) {
1747             dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
1748 
1749             dma_unmap_single(lp->dev, addr,
1750                      (cur_p->cntrl &
1751                       XAXIDMA_BD_CTRL_LENGTH_MASK),
1752                      DMA_TO_DEVICE);
1753         }
1754         if (cur_p->skb)
1755             dev_kfree_skb_irq(cur_p->skb);
1756         cur_p->phys = 0;
1757         cur_p->phys_msb = 0;
1758         cur_p->cntrl = 0;
1759         cur_p->status = 0;
1760         cur_p->app0 = 0;
1761         cur_p->app1 = 0;
1762         cur_p->app2 = 0;
1763         cur_p->app3 = 0;
1764         cur_p->app4 = 0;
1765         cur_p->skb = NULL;
1766     }
1767 
1768     for (i = 0; i < lp->rx_bd_num; i++) {
1769         cur_p = &lp->rx_bd_v[i];
1770         cur_p->status = 0;
1771         cur_p->app0 = 0;
1772         cur_p->app1 = 0;
1773         cur_p->app2 = 0;
1774         cur_p->app3 = 0;
1775         cur_p->app4 = 0;
1776     }
1777 
1778     lp->tx_bd_ci = 0;
1779     lp->tx_bd_tail = 0;
1780     lp->rx_bd_ci = 0;
1781 
1782     axienet_dma_start(lp);
1783 
1784     axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
1785     axienet_status &= ~XAE_RCW1_RX_MASK;
1786     axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
1787 
1788     axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
1789     if (axienet_status & XAE_INT_RXRJECT_MASK)
1790         axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
1791     axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
1792             XAE_INT_RECV_ERROR_MASK : 0);
1793     axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
1794 
1795     /* Sync default options with HW but leave receiver and
1796      * transmitter disabled.
1797      */
1798     axienet_setoptions(ndev, lp->options &
1799                ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1800     axienet_set_mac_address(ndev, NULL);
1801     axienet_set_multicast_list(ndev);
1802     axienet_setoptions(ndev, lp->options);
1803     napi_enable(&lp->napi_rx);
1804     napi_enable(&lp->napi_tx);
1805 }
1806 
1807 /**
1808  * axienet_probe - Axi Ethernet probe function.
1809  * @pdev:   Pointer to platform device structure.
1810  *
1811  * Return: 0, on success
1812  *      Non-zero error value on failure.
1813  *
1814  * This is the probe routine for Axi Ethernet driver. This is called before
1815  * any other driver routines are invoked. It allocates and sets up the Ethernet
1816  * device. Parses through device tree and populates fields of
1817  * axienet_local. It registers the Ethernet device.
1818  */
1819 static int axienet_probe(struct platform_device *pdev)
1820 {
1821     int ret;
1822     struct device_node *np;
1823     struct axienet_local *lp;
1824     struct net_device *ndev;
1825     struct resource *ethres;
1826     u8 mac_addr[ETH_ALEN];
1827     int addr_width = 32;
1828     u32 value;
1829 
1830     ndev = alloc_etherdev(sizeof(*lp));
1831     if (!ndev)
1832         return -ENOMEM;
1833 
1834     platform_set_drvdata(pdev, ndev);
1835 
1836     SET_NETDEV_DEV(ndev, &pdev->dev);
1837     ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
1838     ndev->features = NETIF_F_SG;
1839     ndev->netdev_ops = &axienet_netdev_ops;
1840     ndev->ethtool_ops = &axienet_ethtool_ops;
1841 
1842     /* MTU range: 64 - 9000 */
1843     ndev->min_mtu = 64;
1844     ndev->max_mtu = XAE_JUMBO_MTU;
1845 
1846     lp = netdev_priv(ndev);
1847     lp->ndev = ndev;
1848     lp->dev = &pdev->dev;
1849     lp->options = XAE_OPTION_DEFAULTS;
1850     lp->rx_bd_num = RX_BD_NUM_DEFAULT;
1851     lp->tx_bd_num = TX_BD_NUM_DEFAULT;
1852 
1853     netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll, NAPI_POLL_WEIGHT);
1854     netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll, NAPI_POLL_WEIGHT);
1855 
1856     lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
1857     if (!lp->axi_clk) {
1858         /* For backward compatibility, if named AXI clock is not present,
1859          * treat the first clock specified as the AXI clock.
1860          */
1861         lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
1862     }
1863     if (IS_ERR(lp->axi_clk)) {
1864         ret = PTR_ERR(lp->axi_clk);
1865         goto free_netdev;
1866     }
1867     ret = clk_prepare_enable(lp->axi_clk);
1868     if (ret) {
1869         dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
1870         goto free_netdev;
1871     }
1872 
1873     lp->misc_clks[0].id = "axis_clk";
1874     lp->misc_clks[1].id = "ref_clk";
1875     lp->misc_clks[2].id = "mgt_clk";
1876 
1877     ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
1878     if (ret)
1879         goto cleanup_clk;
1880 
1881     ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
1882     if (ret)
1883         goto cleanup_clk;
1884 
1885     /* Map device registers */
1886     lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
1887     if (IS_ERR(lp->regs)) {
1888         ret = PTR_ERR(lp->regs);
1889         goto cleanup_clk;
1890     }
1891     lp->regs_start = ethres->start;
1892 
1893     /* Setup checksum offload, but default to off if not specified */
1894     lp->features = 0;
1895 
1896     ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
1897     if (!ret) {
1898         switch (value) {
1899         case 1:
1900             lp->csum_offload_on_tx_path =
1901                 XAE_FEATURE_PARTIAL_TX_CSUM;
1902             lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
1903             /* Can checksum TCP/UDP over IPv4. */
1904             ndev->features |= NETIF_F_IP_CSUM;
1905             break;
1906         case 2:
1907             lp->csum_offload_on_tx_path =
1908                 XAE_FEATURE_FULL_TX_CSUM;
1909             lp->features |= XAE_FEATURE_FULL_TX_CSUM;
1910             /* Can checksum TCP/UDP over IPv4. */
1911             ndev->features |= NETIF_F_IP_CSUM;
1912             break;
1913         default:
1914             lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
1915         }
1916     }
1917     ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
1918     if (!ret) {
1919         switch (value) {
1920         case 1:
1921             lp->csum_offload_on_rx_path =
1922                 XAE_FEATURE_PARTIAL_RX_CSUM;
1923             lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
1924             break;
1925         case 2:
1926             lp->csum_offload_on_rx_path =
1927                 XAE_FEATURE_FULL_RX_CSUM;
1928             lp->features |= XAE_FEATURE_FULL_RX_CSUM;
1929             break;
1930         default:
1931             lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
1932         }
1933     }
1934     /* For supporting jumbo frames, the Axi Ethernet hardware must have
1935      * a larger Rx/Tx Memory. Typically, the size must be large so that
1936      * we can enable jumbo option and start supporting jumbo frames.
1937      * Here we check for memory allocated for Rx/Tx in the hardware from
1938      * the device-tree and accordingly set flags.
1939      */
1940     of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
1941 
1942     lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
1943                            "xlnx,switch-x-sgmii");
1944 
1945     /* Start with the proprietary, and broken phy_type */
1946     ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
1947     if (!ret) {
1948         netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
1949         switch (value) {
1950         case XAE_PHY_TYPE_MII:
1951             lp->phy_mode = PHY_INTERFACE_MODE_MII;
1952             break;
1953         case XAE_PHY_TYPE_GMII:
1954             lp->phy_mode = PHY_INTERFACE_MODE_GMII;
1955             break;
1956         case XAE_PHY_TYPE_RGMII_2_0:
1957             lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
1958             break;
1959         case XAE_PHY_TYPE_SGMII:
1960             lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
1961             break;
1962         case XAE_PHY_TYPE_1000BASE_X:
1963             lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
1964             break;
1965         default:
1966             ret = -EINVAL;
1967             goto cleanup_clk;
1968         }
1969     } else {
1970         ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
1971         if (ret)
1972             goto cleanup_clk;
1973     }
1974     if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1975         lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
1976         dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
1977         ret = -EINVAL;
1978         goto cleanup_clk;
1979     }
1980 
1981     /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
1982     np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
1983     if (np) {
1984         struct resource dmares;
1985 
1986         ret = of_address_to_resource(np, 0, &dmares);
1987         if (ret) {
1988             dev_err(&pdev->dev,
1989                 "unable to get DMA resource\n");
1990             of_node_put(np);
1991             goto cleanup_clk;
1992         }
1993         lp->dma_regs = devm_ioremap_resource(&pdev->dev,
1994                              &dmares);
1995         lp->rx_irq = irq_of_parse_and_map(np, 1);
1996         lp->tx_irq = irq_of_parse_and_map(np, 0);
1997         of_node_put(np);
1998         lp->eth_irq = platform_get_irq_optional(pdev, 0);
1999     } else {
2000         /* Check for these resources directly on the Ethernet node. */
2001         lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2002         lp->rx_irq = platform_get_irq(pdev, 1);
2003         lp->tx_irq = platform_get_irq(pdev, 0);
2004         lp->eth_irq = platform_get_irq_optional(pdev, 2);
2005     }
2006     if (IS_ERR(lp->dma_regs)) {
2007         dev_err(&pdev->dev, "could not map DMA regs\n");
2008         ret = PTR_ERR(lp->dma_regs);
2009         goto cleanup_clk;
2010     }
2011     if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
2012         dev_err(&pdev->dev, "could not determine irqs\n");
2013         ret = -ENOMEM;
2014         goto cleanup_clk;
2015     }
2016 
2017     /* Autodetect the need for 64-bit DMA pointers.
2018      * When the IP is configured for a bus width bigger than 32 bits,
2019      * writing the MSB registers is mandatory, even if they are all 0.
2020      * We can detect this case by writing all 1's to one such register
2021      * and see if that sticks: when the IP is configured for 32 bits
2022      * only, those registers are RES0.
2023      * Those MSB registers were introduced in IP v7.1, which we check first.
2024      */
2025     if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2026         void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2027 
2028         iowrite32(0x0, desc);
2029         if (ioread32(desc) == 0) {  /* sanity check */
2030             iowrite32(0xffffffff, desc);
2031             if (ioread32(desc) > 0) {
2032                 lp->features |= XAE_FEATURE_DMA_64BIT;
2033                 addr_width = 64;
2034                 dev_info(&pdev->dev,
2035                      "autodetected 64-bit DMA range\n");
2036             }
2037             iowrite32(0x0, desc);
2038         }
2039     }
2040     if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2041         dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2042         ret = -EINVAL;
2043         goto cleanup_clk;
2044     }
2045 
2046     ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2047     if (ret) {
2048         dev_err(&pdev->dev, "No suitable DMA available\n");
2049         goto cleanup_clk;
2050     }
2051 
2052     /* Check for Ethernet core IRQ (optional) */
2053     if (lp->eth_irq <= 0)
2054         dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2055 
2056     /* Retrieve the MAC address */
2057     ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2058     if (!ret) {
2059         axienet_set_mac_address(ndev, mac_addr);
2060     } else {
2061         dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2062              ret);
2063         axienet_set_mac_address(ndev, NULL);
2064     }
2065 
2066     lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2067     lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
2068     lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2069     lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2070 
2071     /* Reset core now that clocks are enabled, prior to accessing MDIO */
2072     ret = __axienet_device_reset(lp);
2073     if (ret)
2074         goto cleanup_clk;
2075 
2076     ret = axienet_mdio_setup(lp);
2077     if (ret)
2078         dev_warn(&pdev->dev,
2079              "error registering MDIO bus: %d\n", ret);
2080 
2081     if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2082         lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2083         np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2084         if (!np) {
2085             /* Deprecated: Always use "pcs-handle" for pcs_phy.
2086              * Falling back to "phy-handle" here is only for
2087              * backward compatibility with old device trees.
2088              */
2089             np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2090         }
2091         if (!np) {
2092             dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2093             ret = -EINVAL;
2094             goto cleanup_mdio;
2095         }
2096         lp->pcs_phy = of_mdio_find_device(np);
2097         if (!lp->pcs_phy) {
2098             ret = -EPROBE_DEFER;
2099             of_node_put(np);
2100             goto cleanup_mdio;
2101         }
2102         of_node_put(np);
2103         lp->pcs.ops = &axienet_pcs_ops;
2104         lp->pcs.poll = true;
2105     }
2106 
2107     lp->phylink_config.dev = &ndev->dev;
2108     lp->phylink_config.type = PHYLINK_NETDEV;
2109     lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2110         MAC_10FD | MAC_100FD | MAC_1000FD;
2111 
2112     __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2113     if (lp->switch_x_sgmii) {
2114         __set_bit(PHY_INTERFACE_MODE_1000BASEX,
2115               lp->phylink_config.supported_interfaces);
2116         __set_bit(PHY_INTERFACE_MODE_SGMII,
2117               lp->phylink_config.supported_interfaces);
2118     }
2119 
2120     lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2121                      lp->phy_mode,
2122                      &axienet_phylink_ops);
2123     if (IS_ERR(lp->phylink)) {
2124         ret = PTR_ERR(lp->phylink);
2125         dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2126         goto cleanup_mdio;
2127     }
2128 
2129     ret = register_netdev(lp->ndev);
2130     if (ret) {
2131         dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2132         goto cleanup_phylink;
2133     }
2134 
2135     return 0;
2136 
2137 cleanup_phylink:
2138     phylink_destroy(lp->phylink);
2139 
2140 cleanup_mdio:
2141     if (lp->pcs_phy)
2142         put_device(&lp->pcs_phy->dev);
2143     if (lp->mii_bus)
2144         axienet_mdio_teardown(lp);
2145 cleanup_clk:
2146     clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2147     clk_disable_unprepare(lp->axi_clk);
2148 
2149 free_netdev:
2150     free_netdev(ndev);
2151 
2152     return ret;
2153 }
2154 
2155 static int axienet_remove(struct platform_device *pdev)
2156 {
2157     struct net_device *ndev = platform_get_drvdata(pdev);
2158     struct axienet_local *lp = netdev_priv(ndev);
2159 
2160     unregister_netdev(ndev);
2161 
2162     if (lp->phylink)
2163         phylink_destroy(lp->phylink);
2164 
2165     if (lp->pcs_phy)
2166         put_device(&lp->pcs_phy->dev);
2167 
2168     axienet_mdio_teardown(lp);
2169 
2170     clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2171     clk_disable_unprepare(lp->axi_clk);
2172 
2173     free_netdev(ndev);
2174 
2175     return 0;
2176 }
2177 
2178 static void axienet_shutdown(struct platform_device *pdev)
2179 {
2180     struct net_device *ndev = platform_get_drvdata(pdev);
2181 
2182     rtnl_lock();
2183     netif_device_detach(ndev);
2184 
2185     if (netif_running(ndev))
2186         dev_close(ndev);
2187 
2188     rtnl_unlock();
2189 }
2190 
2191 static struct platform_driver axienet_driver = {
2192     .probe = axienet_probe,
2193     .remove = axienet_remove,
2194     .shutdown = axienet_shutdown,
2195     .driver = {
2196          .name = "xilinx_axienet",
2197          .of_match_table = axienet_of_match,
2198     },
2199 };
2200 
2201 module_platform_driver(axienet_driver);
2202 
2203 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
2204 MODULE_AUTHOR("Xilinx");
2205 MODULE_LICENSE("GPL");