Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*  Atheros AR71xx built-in ethernet mac driver
0003  *
0004  *  Copyright (C) 2019 Oleksij Rempel <o.rempel@pengutronix.de>
0005  *
0006  *  List of authors contributed to this driver before mainlining:
0007  *  Alexander Couzens <lynxis@fe80.eu>
0008  *  Christian Lamparter <chunkeey@gmail.com>
0009  *  Chuanhong Guo <gch981213@gmail.com>
0010  *  Daniel F. Dickinson <cshored@thecshore.com>
0011  *  David Bauer <mail@david-bauer.net>
0012  *  Felix Fietkau <nbd@nbd.name>
0013  *  Gabor Juhos <juhosg@freemail.hu>
0014  *  Hauke Mehrtens <hauke@hauke-m.de>
0015  *  Johann Neuhauser <johann@it-neuhauser.de>
0016  *  John Crispin <john@phrozen.org>
0017  *  Jo-Philipp Wich <jo@mein.io>
0018  *  Koen Vandeputte <koen.vandeputte@ncentric.com>
0019  *  Lucian Cristian <lucian.cristian@gmail.com>
0020  *  Matt Merhar <mattmerhar@protonmail.com>
0021  *  Milan Krstic <milan.krstic@gmail.com>
0022  *  Petr Štetiar <ynezz@true.cz>
0023  *  Rosen Penev <rosenp@gmail.com>
0024  *  Stephen Walker <stephendwalker+github@gmail.com>
0025  *  Vittorio Gambaletta <openwrt@vittgam.net>
0026  *  Weijie Gao <hackpascal@gmail.com>
0027  *  Imre Kaloz <kaloz@openwrt.org>
0028  */
0029 
0030 #include <linux/if_vlan.h>
0031 #include <linux/mfd/syscon.h>
0032 #include <linux/of_mdio.h>
0033 #include <linux/of_net.h>
0034 #include <linux/of_platform.h>
0035 #include <linux/phylink.h>
0036 #include <linux/regmap.h>
0037 #include <linux/reset.h>
0038 #include <linux/clk.h>
0039 #include <linux/io.h>
0040 #include <net/selftests.h>
0041 
0042 /* For our NAPI weight bigger does *NOT* mean better - it means more
0043  * D-cache misses and lots more wasted cycles than we'll ever
0044  * possibly gain from saving instructions.
0045  */
0046 #define AG71XX_NAPI_WEIGHT  32
0047 #define AG71XX_OOM_REFILL   (1 + HZ / 10)
0048 
0049 #define AG71XX_INT_ERR  (AG71XX_INT_RX_BE | AG71XX_INT_TX_BE)
0050 #define AG71XX_INT_TX   (AG71XX_INT_TX_PS)
0051 #define AG71XX_INT_RX   (AG71XX_INT_RX_PR | AG71XX_INT_RX_OF)
0052 
0053 #define AG71XX_INT_POLL (AG71XX_INT_RX | AG71XX_INT_TX)
0054 #define AG71XX_INT_INIT (AG71XX_INT_ERR | AG71XX_INT_POLL)
0055 
0056 #define AG71XX_TX_MTU_LEN   1540
0057 
0058 #define AG71XX_TX_RING_SPLIT        512
0059 #define AG71XX_TX_RING_DS_PER_PKT   DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \
0060                              AG71XX_TX_RING_SPLIT)
0061 #define AG71XX_TX_RING_SIZE_DEFAULT 128
0062 #define AG71XX_RX_RING_SIZE_DEFAULT 256
0063 
0064 #define AG71XX_MDIO_RETRY   1000
0065 #define AG71XX_MDIO_DELAY   5
0066 #define AG71XX_MDIO_MAX_CLK 5000000
0067 
0068 /* Register offsets */
0069 #define AG71XX_REG_MAC_CFG1 0x0000
0070 #define MAC_CFG1_TXE        BIT(0)  /* Tx Enable */
0071 #define MAC_CFG1_STX        BIT(1)  /* Synchronize Tx Enable */
0072 #define MAC_CFG1_RXE        BIT(2)  /* Rx Enable */
0073 #define MAC_CFG1_SRX        BIT(3)  /* Synchronize Rx Enable */
0074 #define MAC_CFG1_TFC        BIT(4)  /* Tx Flow Control Enable */
0075 #define MAC_CFG1_RFC        BIT(5)  /* Rx Flow Control Enable */
0076 #define MAC_CFG1_SR     BIT(31) /* Soft Reset */
0077 #define MAC_CFG1_INIT   (MAC_CFG1_RXE | MAC_CFG1_TXE | \
0078              MAC_CFG1_SRX | MAC_CFG1_STX)
0079 
0080 #define AG71XX_REG_MAC_CFG2 0x0004
0081 #define MAC_CFG2_FDX        BIT(0)
0082 #define MAC_CFG2_PAD_CRC_EN BIT(2)
0083 #define MAC_CFG2_LEN_CHECK  BIT(4)
0084 #define MAC_CFG2_IF_1000    BIT(9)
0085 #define MAC_CFG2_IF_10_100  BIT(8)
0086 
0087 #define AG71XX_REG_MAC_MFL  0x0010
0088 
0089 #define AG71XX_REG_MII_CFG  0x0020
0090 #define MII_CFG_CLK_DIV_4   0
0091 #define MII_CFG_CLK_DIV_6   2
0092 #define MII_CFG_CLK_DIV_8   3
0093 #define MII_CFG_CLK_DIV_10  4
0094 #define MII_CFG_CLK_DIV_14  5
0095 #define MII_CFG_CLK_DIV_20  6
0096 #define MII_CFG_CLK_DIV_28  7
0097 #define MII_CFG_CLK_DIV_34  8
0098 #define MII_CFG_CLK_DIV_42  9
0099 #define MII_CFG_CLK_DIV_50  10
0100 #define MII_CFG_CLK_DIV_58  11
0101 #define MII_CFG_CLK_DIV_66  12
0102 #define MII_CFG_CLK_DIV_74  13
0103 #define MII_CFG_CLK_DIV_82  14
0104 #define MII_CFG_CLK_DIV_98  15
0105 #define MII_CFG_RESET       BIT(31)
0106 
0107 #define AG71XX_REG_MII_CMD  0x0024
0108 #define MII_CMD_READ        BIT(0)
0109 
0110 #define AG71XX_REG_MII_ADDR 0x0028
0111 #define MII_ADDR_SHIFT      8
0112 
0113 #define AG71XX_REG_MII_CTRL 0x002c
0114 #define AG71XX_REG_MII_STATUS   0x0030
0115 #define AG71XX_REG_MII_IND  0x0034
0116 #define MII_IND_BUSY        BIT(0)
0117 #define MII_IND_INVALID     BIT(2)
0118 
0119 #define AG71XX_REG_MAC_IFCTL    0x0038
0120 #define MAC_IFCTL_SPEED     BIT(16)
0121 
0122 #define AG71XX_REG_MAC_ADDR1    0x0040
0123 #define AG71XX_REG_MAC_ADDR2    0x0044
0124 #define AG71XX_REG_FIFO_CFG0    0x0048
0125 #define FIFO_CFG0_WTM       BIT(0)  /* Watermark Module */
0126 #define FIFO_CFG0_RXS       BIT(1)  /* Rx System Module */
0127 #define FIFO_CFG0_RXF       BIT(2)  /* Rx Fabric Module */
0128 #define FIFO_CFG0_TXS       BIT(3)  /* Tx System Module */
0129 #define FIFO_CFG0_TXF       BIT(4)  /* Tx Fabric Module */
0130 #define FIFO_CFG0_ALL   (FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \
0131             | FIFO_CFG0_TXS | FIFO_CFG0_TXF)
0132 #define FIFO_CFG0_INIT  (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
0133 
0134 #define FIFO_CFG0_ENABLE_SHIFT  8
0135 
0136 #define AG71XX_REG_FIFO_CFG1    0x004c
0137 #define AG71XX_REG_FIFO_CFG2    0x0050
0138 #define AG71XX_REG_FIFO_CFG3    0x0054
0139 #define AG71XX_REG_FIFO_CFG4    0x0058
0140 #define FIFO_CFG4_DE        BIT(0)  /* Drop Event */
0141 #define FIFO_CFG4_DV        BIT(1)  /* RX_DV Event */
0142 #define FIFO_CFG4_FC        BIT(2)  /* False Carrier */
0143 #define FIFO_CFG4_CE        BIT(3)  /* Code Error */
0144 #define FIFO_CFG4_CR        BIT(4)  /* CRC error */
0145 #define FIFO_CFG4_LM        BIT(5)  /* Length Mismatch */
0146 #define FIFO_CFG4_LO        BIT(6)  /* Length out of range */
0147 #define FIFO_CFG4_OK        BIT(7)  /* Packet is OK */
0148 #define FIFO_CFG4_MC        BIT(8)  /* Multicast Packet */
0149 #define FIFO_CFG4_BC        BIT(9)  /* Broadcast Packet */
0150 #define FIFO_CFG4_DR        BIT(10) /* Dribble */
0151 #define FIFO_CFG4_LE        BIT(11) /* Long Event */
0152 #define FIFO_CFG4_CF        BIT(12) /* Control Frame */
0153 #define FIFO_CFG4_PF        BIT(13) /* Pause Frame */
0154 #define FIFO_CFG4_UO        BIT(14) /* Unsupported Opcode */
0155 #define FIFO_CFG4_VT        BIT(15) /* VLAN tag detected */
0156 #define FIFO_CFG4_FT        BIT(16) /* Frame Truncated */
0157 #define FIFO_CFG4_UC        BIT(17) /* Unicast Packet */
0158 #define FIFO_CFG4_INIT  (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
0159              FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
0160              FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
0161              FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
0162              FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
0163              FIFO_CFG4_VT)
0164 
0165 #define AG71XX_REG_FIFO_CFG5    0x005c
0166 #define FIFO_CFG5_DE        BIT(0)  /* Drop Event */
0167 #define FIFO_CFG5_DV        BIT(1)  /* RX_DV Event */
0168 #define FIFO_CFG5_FC        BIT(2)  /* False Carrier */
0169 #define FIFO_CFG5_CE        BIT(3)  /* Code Error */
0170 #define FIFO_CFG5_LM        BIT(4)  /* Length Mismatch */
0171 #define FIFO_CFG5_LO        BIT(5)  /* Length Out of Range */
0172 #define FIFO_CFG5_OK        BIT(6)  /* Packet is OK */
0173 #define FIFO_CFG5_MC        BIT(7)  /* Multicast Packet */
0174 #define FIFO_CFG5_BC        BIT(8)  /* Broadcast Packet */
0175 #define FIFO_CFG5_DR        BIT(9)  /* Dribble */
0176 #define FIFO_CFG5_CF        BIT(10) /* Control Frame */
0177 #define FIFO_CFG5_PF        BIT(11) /* Pause Frame */
0178 #define FIFO_CFG5_UO        BIT(12) /* Unsupported Opcode */
0179 #define FIFO_CFG5_VT        BIT(13) /* VLAN tag detected */
0180 #define FIFO_CFG5_LE        BIT(14) /* Long Event */
0181 #define FIFO_CFG5_FT        BIT(15) /* Frame Truncated */
0182 #define FIFO_CFG5_16        BIT(16) /* unknown */
0183 #define FIFO_CFG5_17        BIT(17) /* unknown */
0184 #define FIFO_CFG5_SF        BIT(18) /* Short Frame */
0185 #define FIFO_CFG5_BM        BIT(19) /* Byte Mode */
0186 #define FIFO_CFG5_INIT  (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
0187              FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
0188              FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
0189              FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
0190              FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
0191              FIFO_CFG5_17 | FIFO_CFG5_SF)
0192 
0193 #define AG71XX_REG_TX_CTRL  0x0180
0194 #define TX_CTRL_TXE     BIT(0)  /* Tx Enable */
0195 
0196 #define AG71XX_REG_TX_DESC  0x0184
0197 #define AG71XX_REG_TX_STATUS    0x0188
0198 #define TX_STATUS_PS        BIT(0)  /* Packet Sent */
0199 #define TX_STATUS_UR        BIT(1)  /* Tx Underrun */
0200 #define TX_STATUS_BE        BIT(3)  /* Bus Error */
0201 
0202 #define AG71XX_REG_RX_CTRL  0x018c
0203 #define RX_CTRL_RXE     BIT(0)  /* Rx Enable */
0204 
0205 #define AG71XX_DMA_RETRY    10
0206 #define AG71XX_DMA_DELAY    1
0207 
0208 #define AG71XX_REG_RX_DESC  0x0190
0209 #define AG71XX_REG_RX_STATUS    0x0194
0210 #define RX_STATUS_PR        BIT(0)  /* Packet Received */
0211 #define RX_STATUS_OF        BIT(2)  /* Rx Overflow */
0212 #define RX_STATUS_BE        BIT(3)  /* Bus Error */
0213 
0214 #define AG71XX_REG_INT_ENABLE   0x0198
0215 #define AG71XX_REG_INT_STATUS   0x019c
0216 #define AG71XX_INT_TX_PS    BIT(0)
0217 #define AG71XX_INT_TX_UR    BIT(1)
0218 #define AG71XX_INT_TX_BE    BIT(3)
0219 #define AG71XX_INT_RX_PR    BIT(4)
0220 #define AG71XX_INT_RX_OF    BIT(6)
0221 #define AG71XX_INT_RX_BE    BIT(7)
0222 
0223 #define AG71XX_REG_FIFO_DEPTH   0x01a8
0224 #define AG71XX_REG_RX_SM    0x01b0
0225 #define AG71XX_REG_TX_SM    0x01b4
0226 
0227 #define AG71XX_DEFAULT_MSG_ENABLE   \
0228     (NETIF_MSG_DRV          \
0229     | NETIF_MSG_PROBE       \
0230     | NETIF_MSG_LINK        \
0231     | NETIF_MSG_TIMER       \
0232     | NETIF_MSG_IFDOWN      \
0233     | NETIF_MSG_IFUP        \
0234     | NETIF_MSG_RX_ERR      \
0235     | NETIF_MSG_TX_ERR)
0236 
0237 struct ag71xx_statistic {
0238     unsigned short offset;
0239     u32 mask;
0240     const char name[ETH_GSTRING_LEN];
0241 };
0242 
0243 static const struct ag71xx_statistic ag71xx_statistics[] = {
0244     { 0x0080, GENMASK(17, 0), "Tx/Rx 64 Byte", },
0245     { 0x0084, GENMASK(17, 0), "Tx/Rx 65-127 Byte", },
0246     { 0x0088, GENMASK(17, 0), "Tx/Rx 128-255 Byte", },
0247     { 0x008C, GENMASK(17, 0), "Tx/Rx 256-511 Byte", },
0248     { 0x0090, GENMASK(17, 0), "Tx/Rx 512-1023 Byte", },
0249     { 0x0094, GENMASK(17, 0), "Tx/Rx 1024-1518 Byte", },
0250     { 0x0098, GENMASK(17, 0), "Tx/Rx 1519-1522 Byte VLAN", },
0251     { 0x009C, GENMASK(23, 0), "Rx Byte", },
0252     { 0x00A0, GENMASK(17, 0), "Rx Packet", },
0253     { 0x00A4, GENMASK(11, 0), "Rx FCS Error", },
0254     { 0x00A8, GENMASK(17, 0), "Rx Multicast Packet", },
0255     { 0x00AC, GENMASK(21, 0), "Rx Broadcast Packet", },
0256     { 0x00B0, GENMASK(17, 0), "Rx Control Frame Packet", },
0257     { 0x00B4, GENMASK(11, 0), "Rx Pause Frame Packet", },
0258     { 0x00B8, GENMASK(11, 0), "Rx Unknown OPCode Packet", },
0259     { 0x00BC, GENMASK(11, 0), "Rx Alignment Error", },
0260     { 0x00C0, GENMASK(15, 0), "Rx Frame Length Error", },
0261     { 0x00C4, GENMASK(11, 0), "Rx Code Error", },
0262     { 0x00C8, GENMASK(11, 0), "Rx Carrier Sense Error", },
0263     { 0x00CC, GENMASK(11, 0), "Rx Undersize Packet", },
0264     { 0x00D0, GENMASK(11, 0), "Rx Oversize Packet", },
0265     { 0x00D4, GENMASK(11, 0), "Rx Fragments", },
0266     { 0x00D8, GENMASK(11, 0), "Rx Jabber", },
0267     { 0x00DC, GENMASK(11, 0), "Rx Dropped Packet", },
0268     { 0x00E0, GENMASK(23, 0), "Tx Byte", },
0269     { 0x00E4, GENMASK(17, 0), "Tx Packet", },
0270     { 0x00E8, GENMASK(17, 0), "Tx Multicast Packet", },
0271     { 0x00EC, GENMASK(17, 0), "Tx Broadcast Packet", },
0272     { 0x00F0, GENMASK(11, 0), "Tx Pause Control Frame", },
0273     { 0x00F4, GENMASK(11, 0), "Tx Deferral Packet", },
0274     { 0x00F8, GENMASK(11, 0), "Tx Excessive Deferral Packet", },
0275     { 0x00FC, GENMASK(11, 0), "Tx Single Collision Packet", },
0276     { 0x0100, GENMASK(11, 0), "Tx Multiple Collision", },
0277     { 0x0104, GENMASK(11, 0), "Tx Late Collision Packet", },
0278     { 0x0108, GENMASK(11, 0), "Tx Excessive Collision Packet", },
0279     { 0x010C, GENMASK(12, 0), "Tx Total Collision", },
0280     { 0x0110, GENMASK(11, 0), "Tx Pause Frames Honored", },
0281     { 0x0114, GENMASK(11, 0), "Tx Drop Frame", },
0282     { 0x0118, GENMASK(11, 0), "Tx Jabber Frame", },
0283     { 0x011C, GENMASK(11, 0), "Tx FCS Error", },
0284     { 0x0120, GENMASK(11, 0), "Tx Control Frame", },
0285     { 0x0124, GENMASK(11, 0), "Tx Oversize Frame", },
0286     { 0x0128, GENMASK(11, 0), "Tx Undersize Frame", },
0287     { 0x012C, GENMASK(11, 0), "Tx Fragment", },
0288 };
0289 
0290 #define DESC_EMPTY      BIT(31)
0291 #define DESC_MORE       BIT(24)
0292 #define DESC_PKTLEN_M       0xfff
0293 struct ag71xx_desc {
0294     u32 data;
0295     u32 ctrl;
0296     u32 next;
0297     u32 pad;
0298 } __aligned(4);
0299 
0300 #define AG71XX_DESC_SIZE    roundup(sizeof(struct ag71xx_desc), \
0301                     L1_CACHE_BYTES)
0302 
0303 struct ag71xx_buf {
0304     union {
0305         struct {
0306             struct sk_buff *skb;
0307             unsigned int len;
0308         } tx;
0309         struct {
0310             dma_addr_t dma_addr;
0311             void *rx_buf;
0312         } rx;
0313     };
0314 };
0315 
0316 struct ag71xx_ring {
0317     /* "Hot" fields in the data path. */
0318     unsigned int curr;
0319     unsigned int dirty;
0320 
0321     /* "Cold" fields - not used in the data path. */
0322     struct ag71xx_buf *buf;
0323     u16 order;
0324     u16 desc_split;
0325     dma_addr_t descs_dma;
0326     u8 *descs_cpu;
0327 };
0328 
0329 enum ag71xx_type {
0330     AR7100,
0331     AR7240,
0332     AR9130,
0333     AR9330,
0334     AR9340,
0335     QCA9530,
0336     QCA9550,
0337 };
0338 
0339 struct ag71xx_dcfg {
0340     u32 max_frame_len;
0341     const u32 *fifodata;
0342     u16 desc_pktlen_mask;
0343     bool tx_hang_workaround;
0344     enum ag71xx_type type;
0345 };
0346 
0347 struct ag71xx {
0348     /* Critical data related to the per-packet data path are clustered
0349      * early in this structure to help improve the D-cache footprint.
0350      */
0351     struct ag71xx_ring rx_ring ____cacheline_aligned;
0352     struct ag71xx_ring tx_ring ____cacheline_aligned;
0353 
0354     u16 rx_buf_size;
0355     u8 rx_buf_offset;
0356 
0357     struct net_device *ndev;
0358     struct platform_device *pdev;
0359     struct napi_struct napi;
0360     u32 msg_enable;
0361     const struct ag71xx_dcfg *dcfg;
0362 
0363     /* From this point onwards we're not looking at per-packet fields. */
0364     void __iomem *mac_base;
0365 
0366     struct ag71xx_desc *stop_desc;
0367     dma_addr_t stop_desc_dma;
0368 
0369     phy_interface_t phy_if_mode;
0370     struct phylink *phylink;
0371     struct phylink_config phylink_config;
0372 
0373     struct delayed_work restart_work;
0374     struct timer_list oom_timer;
0375 
0376     struct reset_control *mac_reset;
0377 
0378     u32 fifodata[3];
0379     int mac_idx;
0380 
0381     struct reset_control *mdio_reset;
0382     struct mii_bus *mii_bus;
0383     struct clk *clk_mdio;
0384     struct clk *clk_eth;
0385 };
0386 
0387 static int ag71xx_desc_empty(struct ag71xx_desc *desc)
0388 {
0389     return (desc->ctrl & DESC_EMPTY) != 0;
0390 }
0391 
0392 static struct ag71xx_desc *ag71xx_ring_desc(struct ag71xx_ring *ring, int idx)
0393 {
0394     return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE];
0395 }
0396 
0397 static int ag71xx_ring_size_order(int size)
0398 {
0399     return fls(size - 1);
0400 }
0401 
0402 static bool ag71xx_is(struct ag71xx *ag, enum ag71xx_type type)
0403 {
0404     return ag->dcfg->type == type;
0405 }
0406 
0407 static void ag71xx_wr(struct ag71xx *ag, unsigned int reg, u32 value)
0408 {
0409     iowrite32(value, ag->mac_base + reg);
0410     /* flush write */
0411     (void)ioread32(ag->mac_base + reg);
0412 }
0413 
0414 static u32 ag71xx_rr(struct ag71xx *ag, unsigned int reg)
0415 {
0416     return ioread32(ag->mac_base + reg);
0417 }
0418 
0419 static void ag71xx_sb(struct ag71xx *ag, unsigned int reg, u32 mask)
0420 {
0421     void __iomem *r;
0422 
0423     r = ag->mac_base + reg;
0424     iowrite32(ioread32(r) | mask, r);
0425     /* flush write */
0426     (void)ioread32(r);
0427 }
0428 
0429 static void ag71xx_cb(struct ag71xx *ag, unsigned int reg, u32 mask)
0430 {
0431     void __iomem *r;
0432 
0433     r = ag->mac_base + reg;
0434     iowrite32(ioread32(r) & ~mask, r);
0435     /* flush write */
0436     (void)ioread32(r);
0437 }
0438 
0439 static void ag71xx_int_enable(struct ag71xx *ag, u32 ints)
0440 {
0441     ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints);
0442 }
0443 
0444 static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
0445 {
0446     ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
0447 }
0448 
0449 static void ag71xx_get_drvinfo(struct net_device *ndev,
0450                    struct ethtool_drvinfo *info)
0451 {
0452     struct ag71xx *ag = netdev_priv(ndev);
0453 
0454     strlcpy(info->driver, "ag71xx", sizeof(info->driver));
0455     strlcpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
0456         sizeof(info->bus_info));
0457 }
0458 
0459 static int ag71xx_get_link_ksettings(struct net_device *ndev,
0460                    struct ethtool_link_ksettings *kset)
0461 {
0462     struct ag71xx *ag = netdev_priv(ndev);
0463 
0464     return phylink_ethtool_ksettings_get(ag->phylink, kset);
0465 }
0466 
0467 static int ag71xx_set_link_ksettings(struct net_device *ndev,
0468                    const struct ethtool_link_ksettings *kset)
0469 {
0470     struct ag71xx *ag = netdev_priv(ndev);
0471 
0472     return phylink_ethtool_ksettings_set(ag->phylink, kset);
0473 }
0474 
0475 static int ag71xx_ethtool_nway_reset(struct net_device *ndev)
0476 {
0477     struct ag71xx *ag = netdev_priv(ndev);
0478 
0479     return phylink_ethtool_nway_reset(ag->phylink);
0480 }
0481 
0482 static void ag71xx_ethtool_get_pauseparam(struct net_device *ndev,
0483                       struct ethtool_pauseparam *pause)
0484 {
0485     struct ag71xx *ag = netdev_priv(ndev);
0486 
0487     phylink_ethtool_get_pauseparam(ag->phylink, pause);
0488 }
0489 
0490 static int ag71xx_ethtool_set_pauseparam(struct net_device *ndev,
0491                      struct ethtool_pauseparam *pause)
0492 {
0493     struct ag71xx *ag = netdev_priv(ndev);
0494 
0495     return phylink_ethtool_set_pauseparam(ag->phylink, pause);
0496 }
0497 
0498 static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
0499                        u8 *data)
0500 {
0501     int i;
0502 
0503     switch (sset) {
0504     case ETH_SS_STATS:
0505         for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
0506             memcpy(data + i * ETH_GSTRING_LEN,
0507                    ag71xx_statistics[i].name, ETH_GSTRING_LEN);
0508         break;
0509     case ETH_SS_TEST:
0510         net_selftest_get_strings(data);
0511         break;
0512     }
0513 }
0514 
0515 static void ag71xx_ethtool_get_stats(struct net_device *ndev,
0516                      struct ethtool_stats *stats, u64 *data)
0517 {
0518     struct ag71xx *ag = netdev_priv(ndev);
0519     int i;
0520 
0521     for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
0522         *data++ = ag71xx_rr(ag, ag71xx_statistics[i].offset)
0523                 & ag71xx_statistics[i].mask;
0524 }
0525 
0526 static int ag71xx_ethtool_get_sset_count(struct net_device *ndev, int sset)
0527 {
0528     switch (sset) {
0529     case ETH_SS_STATS:
0530         return ARRAY_SIZE(ag71xx_statistics);
0531     case ETH_SS_TEST:
0532         return net_selftest_get_count();
0533     default:
0534         return -EOPNOTSUPP;
0535     }
0536 }
0537 
0538 static const struct ethtool_ops ag71xx_ethtool_ops = {
0539     .get_drvinfo            = ag71xx_get_drvinfo,
0540     .get_link           = ethtool_op_get_link,
0541     .get_ts_info            = ethtool_op_get_ts_info,
0542     .get_link_ksettings     = ag71xx_get_link_ksettings,
0543     .set_link_ksettings     = ag71xx_set_link_ksettings,
0544     .nway_reset         = ag71xx_ethtool_nway_reset,
0545     .get_pauseparam         = ag71xx_ethtool_get_pauseparam,
0546     .set_pauseparam         = ag71xx_ethtool_set_pauseparam,
0547     .get_strings            = ag71xx_ethtool_get_strings,
0548     .get_ethtool_stats      = ag71xx_ethtool_get_stats,
0549     .get_sset_count         = ag71xx_ethtool_get_sset_count,
0550     .self_test          = net_selftest,
0551 };
0552 
0553 static int ag71xx_mdio_wait_busy(struct ag71xx *ag)
0554 {
0555     struct net_device *ndev = ag->ndev;
0556     int i;
0557 
0558     for (i = 0; i < AG71XX_MDIO_RETRY; i++) {
0559         u32 busy;
0560 
0561         udelay(AG71XX_MDIO_DELAY);
0562 
0563         busy = ag71xx_rr(ag, AG71XX_REG_MII_IND);
0564         if (!busy)
0565             return 0;
0566 
0567         udelay(AG71XX_MDIO_DELAY);
0568     }
0569 
0570     netif_err(ag, link, ndev, "MDIO operation timed out\n");
0571 
0572     return -ETIMEDOUT;
0573 }
0574 
0575 static int ag71xx_mdio_mii_read(struct mii_bus *bus, int addr, int reg)
0576 {
0577     struct ag71xx *ag = bus->priv;
0578     int err, val;
0579 
0580     err = ag71xx_mdio_wait_busy(ag);
0581     if (err)
0582         return err;
0583 
0584     ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
0585           ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
0586     /* enable read mode */
0587     ag71xx_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_READ);
0588 
0589     err = ag71xx_mdio_wait_busy(ag);
0590     if (err)
0591         return err;
0592 
0593     val = ag71xx_rr(ag, AG71XX_REG_MII_STATUS);
0594     /* disable read mode */
0595     ag71xx_wr(ag, AG71XX_REG_MII_CMD, 0);
0596 
0597     netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n",
0598           addr, reg, val);
0599 
0600     return val;
0601 }
0602 
0603 static int ag71xx_mdio_mii_write(struct mii_bus *bus, int addr, int reg,
0604                  u16 val)
0605 {
0606     struct ag71xx *ag = bus->priv;
0607 
0608     netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n",
0609           addr, reg, val);
0610 
0611     ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
0612           ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
0613     ag71xx_wr(ag, AG71XX_REG_MII_CTRL, val);
0614 
0615     return ag71xx_mdio_wait_busy(ag);
0616 }
0617 
0618 static const u32 ar71xx_mdio_div_table[] = {
0619     4, 4, 6, 8, 10, 14, 20, 28,
0620 };
0621 
0622 static const u32 ar7240_mdio_div_table[] = {
0623     2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96,
0624 };
0625 
0626 static const u32 ar933x_mdio_div_table[] = {
0627     4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98,
0628 };
0629 
0630 static int ag71xx_mdio_get_divider(struct ag71xx *ag, u32 *div)
0631 {
0632     unsigned long ref_clock;
0633     const u32 *table;
0634     int ndivs, i;
0635 
0636     ref_clock = clk_get_rate(ag->clk_mdio);
0637     if (!ref_clock)
0638         return -EINVAL;
0639 
0640     if (ag71xx_is(ag, AR9330) || ag71xx_is(ag, AR9340)) {
0641         table = ar933x_mdio_div_table;
0642         ndivs = ARRAY_SIZE(ar933x_mdio_div_table);
0643     } else if (ag71xx_is(ag, AR7240)) {
0644         table = ar7240_mdio_div_table;
0645         ndivs = ARRAY_SIZE(ar7240_mdio_div_table);
0646     } else {
0647         table = ar71xx_mdio_div_table;
0648         ndivs = ARRAY_SIZE(ar71xx_mdio_div_table);
0649     }
0650 
0651     for (i = 0; i < ndivs; i++) {
0652         unsigned long t;
0653 
0654         t = ref_clock / table[i];
0655         if (t <= AG71XX_MDIO_MAX_CLK) {
0656             *div = i;
0657             return 0;
0658         }
0659     }
0660 
0661     return -ENOENT;
0662 }
0663 
0664 static int ag71xx_mdio_reset(struct mii_bus *bus)
0665 {
0666     struct ag71xx *ag = bus->priv;
0667     int err;
0668     u32 t;
0669 
0670     err = ag71xx_mdio_get_divider(ag, &t);
0671     if (err)
0672         return err;
0673 
0674     ag71xx_wr(ag, AG71XX_REG_MII_CFG, t | MII_CFG_RESET);
0675     usleep_range(100, 200);
0676 
0677     ag71xx_wr(ag, AG71XX_REG_MII_CFG, t);
0678     usleep_range(100, 200);
0679 
0680     return 0;
0681 }
0682 
0683 static int ag71xx_mdio_probe(struct ag71xx *ag)
0684 {
0685     struct device *dev = &ag->pdev->dev;
0686     struct net_device *ndev = ag->ndev;
0687     static struct mii_bus *mii_bus;
0688     struct device_node *np, *mnp;
0689     int err;
0690 
0691     np = dev->of_node;
0692     ag->mii_bus = NULL;
0693 
0694     ag->clk_mdio = devm_clk_get(dev, "mdio");
0695     if (IS_ERR(ag->clk_mdio)) {
0696         netif_err(ag, probe, ndev, "Failed to get mdio clk.\n");
0697         return PTR_ERR(ag->clk_mdio);
0698     }
0699 
0700     err = clk_prepare_enable(ag->clk_mdio);
0701     if (err) {
0702         netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n");
0703         return err;
0704     }
0705 
0706     mii_bus = devm_mdiobus_alloc(dev);
0707     if (!mii_bus) {
0708         err = -ENOMEM;
0709         goto mdio_err_put_clk;
0710     }
0711 
0712     ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
0713     if (IS_ERR(ag->mdio_reset)) {
0714         netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
0715         err = PTR_ERR(ag->mdio_reset);
0716         goto mdio_err_put_clk;
0717     }
0718 
0719     mii_bus->name = "ag71xx_mdio";
0720     mii_bus->read = ag71xx_mdio_mii_read;
0721     mii_bus->write = ag71xx_mdio_mii_write;
0722     mii_bus->reset = ag71xx_mdio_reset;
0723     mii_bus->priv = ag;
0724     mii_bus->parent = dev;
0725     snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx);
0726 
0727     if (!IS_ERR(ag->mdio_reset)) {
0728         reset_control_assert(ag->mdio_reset);
0729         msleep(100);
0730         reset_control_deassert(ag->mdio_reset);
0731         msleep(200);
0732     }
0733 
0734     mnp = of_get_child_by_name(np, "mdio");
0735     err = of_mdiobus_register(mii_bus, mnp);
0736     of_node_put(mnp);
0737     if (err)
0738         goto mdio_err_put_clk;
0739 
0740     ag->mii_bus = mii_bus;
0741 
0742     return 0;
0743 
0744 mdio_err_put_clk:
0745     clk_disable_unprepare(ag->clk_mdio);
0746     return err;
0747 }
0748 
0749 static void ag71xx_mdio_remove(struct ag71xx *ag)
0750 {
0751     if (ag->mii_bus)
0752         mdiobus_unregister(ag->mii_bus);
0753     clk_disable_unprepare(ag->clk_mdio);
0754 }
0755 
0756 static void ag71xx_hw_stop(struct ag71xx *ag)
0757 {
0758     /* disable all interrupts and stop the rx/tx engine */
0759     ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
0760     ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
0761     ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
0762 }
0763 
0764 static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
0765 {
0766     unsigned long timestamp;
0767     u32 rx_sm, tx_sm, rx_fd;
0768 
0769     timestamp = READ_ONCE(netdev_get_tx_queue(ag->ndev, 0)->trans_start);
0770     if (likely(time_before(jiffies, timestamp + HZ / 10)))
0771         return false;
0772 
0773     if (!netif_carrier_ok(ag->ndev))
0774         return false;
0775 
0776     rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
0777     if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
0778         return true;
0779 
0780     tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
0781     rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
0782     if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
0783         ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
0784         return true;
0785 
0786     return false;
0787 }
0788 
0789 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush, int budget)
0790 {
0791     struct ag71xx_ring *ring = &ag->tx_ring;
0792     int sent = 0, bytes_compl = 0, n = 0;
0793     struct net_device *ndev = ag->ndev;
0794     int ring_mask, ring_size;
0795     bool dma_stuck = false;
0796 
0797     ring_mask = BIT(ring->order) - 1;
0798     ring_size = BIT(ring->order);
0799 
0800     netif_dbg(ag, tx_queued, ndev, "processing TX ring\n");
0801 
0802     while (ring->dirty + n != ring->curr) {
0803         struct ag71xx_desc *desc;
0804         struct sk_buff *skb;
0805         unsigned int i;
0806 
0807         i = (ring->dirty + n) & ring_mask;
0808         desc = ag71xx_ring_desc(ring, i);
0809         skb = ring->buf[i].tx.skb;
0810 
0811         if (!flush && !ag71xx_desc_empty(desc)) {
0812             if (ag->dcfg->tx_hang_workaround &&
0813                 ag71xx_check_dma_stuck(ag)) {
0814                 schedule_delayed_work(&ag->restart_work,
0815                               HZ / 2);
0816                 dma_stuck = true;
0817             }
0818             break;
0819         }
0820 
0821         if (flush)
0822             desc->ctrl |= DESC_EMPTY;
0823 
0824         n++;
0825         if (!skb)
0826             continue;
0827 
0828         napi_consume_skb(skb, budget);
0829         ring->buf[i].tx.skb = NULL;
0830 
0831         bytes_compl += ring->buf[i].tx.len;
0832 
0833         sent++;
0834         ring->dirty += n;
0835 
0836         while (n > 0) {
0837             ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
0838             n--;
0839         }
0840     }
0841 
0842     netif_dbg(ag, tx_done, ndev, "%d packets sent out\n", sent);
0843 
0844     if (!sent)
0845         return 0;
0846 
0847     ag->ndev->stats.tx_bytes += bytes_compl;
0848     ag->ndev->stats.tx_packets += sent;
0849 
0850     netdev_completed_queue(ag->ndev, sent, bytes_compl);
0851     if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
0852         netif_wake_queue(ag->ndev);
0853 
0854     if (!dma_stuck)
0855         cancel_delayed_work(&ag->restart_work);
0856 
0857     return sent;
0858 }
0859 
0860 static void ag71xx_dma_wait_stop(struct ag71xx *ag)
0861 {
0862     struct net_device *ndev = ag->ndev;
0863     int i;
0864 
0865     for (i = 0; i < AG71XX_DMA_RETRY; i++) {
0866         u32 rx, tx;
0867 
0868         mdelay(AG71XX_DMA_DELAY);
0869 
0870         rx = ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE;
0871         tx = ag71xx_rr(ag, AG71XX_REG_TX_CTRL) & TX_CTRL_TXE;
0872         if (!rx && !tx)
0873             return;
0874     }
0875 
0876     netif_err(ag, hw, ndev, "DMA stop operation timed out\n");
0877 }
0878 
0879 static void ag71xx_dma_reset(struct ag71xx *ag)
0880 {
0881     struct net_device *ndev = ag->ndev;
0882     u32 val;
0883     int i;
0884 
0885     /* stop RX and TX */
0886     ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
0887     ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
0888 
0889     /* give the hardware some time to really stop all rx/tx activity
0890      * clearing the descriptors too early causes random memory corruption
0891      */
0892     ag71xx_dma_wait_stop(ag);
0893 
0894     /* clear descriptor addresses */
0895     ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
0896     ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
0897 
0898     /* clear pending RX/TX interrupts */
0899     for (i = 0; i < 256; i++) {
0900         ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
0901         ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
0902     }
0903 
0904     /* clear pending errors */
0905     ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
0906     ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
0907 
0908     val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
0909     if (val)
0910         netif_err(ag, hw, ndev, "unable to clear DMA Rx status: %08x\n",
0911               val);
0912 
0913     val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
0914 
0915     /* mask out reserved bits */
0916     val &= ~0xff000000;
0917 
0918     if (val)
0919         netif_err(ag, hw, ndev, "unable to clear DMA Tx status: %08x\n",
0920               val);
0921 }
0922 
0923 static void ag71xx_hw_setup(struct ag71xx *ag)
0924 {
0925     u32 init = MAC_CFG1_INIT;
0926 
0927     /* setup MAC configuration registers */
0928     ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
0929 
0930     ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
0931           MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
0932 
0933     /* setup max frame length to zero */
0934     ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
0935 
0936     /* setup FIFO configuration registers */
0937     ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
0938     ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]);
0939     ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]);
0940     ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
0941     ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
0942 }
0943 
0944 static unsigned int ag71xx_max_frame_len(unsigned int mtu)
0945 {
0946     return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
0947 }
0948 
0949 static void ag71xx_hw_set_macaddr(struct ag71xx *ag, const unsigned char *mac)
0950 {
0951     u32 t;
0952 
0953     t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16)
0954       | (((u32)mac[3]) << 8) | ((u32)mac[2]);
0955 
0956     ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
0957 
0958     t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16);
0959     ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
0960 }
0961 
0962 static void ag71xx_fast_reset(struct ag71xx *ag)
0963 {
0964     struct net_device *dev = ag->ndev;
0965     u32 rx_ds;
0966     u32 mii_reg;
0967 
0968     ag71xx_hw_stop(ag);
0969 
0970     mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
0971     rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
0972 
0973     ag71xx_tx_packets(ag, true, 0);
0974 
0975     reset_control_assert(ag->mac_reset);
0976     usleep_range(10, 20);
0977     reset_control_deassert(ag->mac_reset);
0978     usleep_range(10, 20);
0979 
0980     ag71xx_dma_reset(ag);
0981     ag71xx_hw_setup(ag);
0982     ag->tx_ring.curr = 0;
0983     ag->tx_ring.dirty = 0;
0984     netdev_reset_queue(ag->ndev);
0985 
0986     /* setup max frame length */
0987     ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
0988           ag71xx_max_frame_len(ag->ndev->mtu));
0989 
0990     ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
0991     ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
0992     ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
0993 
0994     ag71xx_hw_set_macaddr(ag, dev->dev_addr);
0995 }
0996 
0997 static void ag71xx_hw_start(struct ag71xx *ag)
0998 {
0999     /* start RX engine */
1000     ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1001 
1002     /* enable interrupts */
1003     ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
1004 
1005     netif_wake_queue(ag->ndev);
1006 }
1007 
1008 static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode,
1009                   const struct phylink_link_state *state)
1010 {
1011     struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1012 
1013     if (phylink_autoneg_inband(mode))
1014         return;
1015 
1016     if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
1017         ag71xx_fast_reset(ag);
1018 
1019     if (ag->tx_ring.desc_split) {
1020         ag->fifodata[2] &= 0xffff;
1021         ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
1022     }
1023 
1024     ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
1025 }
1026 
1027 static void ag71xx_mac_link_down(struct phylink_config *config,
1028                  unsigned int mode, phy_interface_t interface)
1029 {
1030     struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1031 
1032     ag71xx_hw_stop(ag);
1033 }
1034 
1035 static void ag71xx_mac_link_up(struct phylink_config *config,
1036                    struct phy_device *phy,
1037                    unsigned int mode, phy_interface_t interface,
1038                    int speed, int duplex,
1039                    bool tx_pause, bool rx_pause)
1040 {
1041     struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1042     u32 cfg1, cfg2;
1043     u32 ifctl;
1044     u32 fifo5;
1045 
1046     cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
1047     cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
1048     cfg2 |= duplex ? MAC_CFG2_FDX : 0;
1049 
1050     ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
1051     ifctl &= ~(MAC_IFCTL_SPEED);
1052 
1053     fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
1054     fifo5 &= ~FIFO_CFG5_BM;
1055 
1056     switch (speed) {
1057     case SPEED_1000:
1058         cfg2 |= MAC_CFG2_IF_1000;
1059         fifo5 |= FIFO_CFG5_BM;
1060         break;
1061     case SPEED_100:
1062         cfg2 |= MAC_CFG2_IF_10_100;
1063         ifctl |= MAC_IFCTL_SPEED;
1064         break;
1065     case SPEED_10:
1066         cfg2 |= MAC_CFG2_IF_10_100;
1067         break;
1068     default:
1069         return;
1070     }
1071 
1072     ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
1073     ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
1074     ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
1075 
1076     cfg1 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG1);
1077     cfg1 &= ~(MAC_CFG1_TFC | MAC_CFG1_RFC);
1078     if (tx_pause)
1079         cfg1 |= MAC_CFG1_TFC;
1080 
1081     if (rx_pause)
1082         cfg1 |= MAC_CFG1_RFC;
1083     ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, cfg1);
1084 
1085     ag71xx_hw_start(ag);
1086 }
1087 
1088 static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
1089     .validate = phylink_generic_validate,
1090     .mac_config = ag71xx_mac_config,
1091     .mac_link_down = ag71xx_mac_link_down,
1092     .mac_link_up = ag71xx_mac_link_up,
1093 };
1094 
1095 static int ag71xx_phylink_setup(struct ag71xx *ag)
1096 {
1097     struct phylink *phylink;
1098 
1099     ag->phylink_config.dev = &ag->ndev->dev;
1100     ag->phylink_config.type = PHYLINK_NETDEV;
1101     ag->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
1102         MAC_10 | MAC_100 | MAC_1000FD;
1103 
1104     if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
1105         ag71xx_is(ag, AR9340) ||
1106         ag71xx_is(ag, QCA9530) ||
1107         (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
1108         __set_bit(PHY_INTERFACE_MODE_MII,
1109               ag->phylink_config.supported_interfaces);
1110 
1111     if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
1112         (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
1113         (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
1114         __set_bit(PHY_INTERFACE_MODE_GMII,
1115               ag->phylink_config.supported_interfaces);
1116 
1117     if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
1118         __set_bit(PHY_INTERFACE_MODE_SGMII,
1119               ag->phylink_config.supported_interfaces);
1120 
1121     if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
1122         __set_bit(PHY_INTERFACE_MODE_RMII,
1123               ag->phylink_config.supported_interfaces);
1124 
1125     if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
1126         (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
1127         __set_bit(PHY_INTERFACE_MODE_RGMII,
1128               ag->phylink_config.supported_interfaces);
1129 
1130     phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
1131                  ag->phy_if_mode, &ag71xx_phylink_mac_ops);
1132     if (IS_ERR(phylink))
1133         return PTR_ERR(phylink);
1134 
1135     ag->phylink = phylink;
1136     return 0;
1137 }
1138 
1139 static void ag71xx_ring_tx_clean(struct ag71xx *ag)
1140 {
1141     struct ag71xx_ring *ring = &ag->tx_ring;
1142     int ring_mask = BIT(ring->order) - 1;
1143     u32 bytes_compl = 0, pkts_compl = 0;
1144     struct net_device *ndev = ag->ndev;
1145 
1146     while (ring->curr != ring->dirty) {
1147         struct ag71xx_desc *desc;
1148         u32 i = ring->dirty & ring_mask;
1149 
1150         desc = ag71xx_ring_desc(ring, i);
1151         if (!ag71xx_desc_empty(desc)) {
1152             desc->ctrl = 0;
1153             ndev->stats.tx_errors++;
1154         }
1155 
1156         if (ring->buf[i].tx.skb) {
1157             bytes_compl += ring->buf[i].tx.len;
1158             pkts_compl++;
1159             dev_kfree_skb_any(ring->buf[i].tx.skb);
1160         }
1161         ring->buf[i].tx.skb = NULL;
1162         ring->dirty++;
1163     }
1164 
1165     /* flush descriptors */
1166     wmb();
1167 
1168     netdev_completed_queue(ndev, pkts_compl, bytes_compl);
1169 }
1170 
1171 static void ag71xx_ring_tx_init(struct ag71xx *ag)
1172 {
1173     struct ag71xx_ring *ring = &ag->tx_ring;
1174     int ring_size = BIT(ring->order);
1175     int ring_mask = ring_size - 1;
1176     int i;
1177 
1178     for (i = 0; i < ring_size; i++) {
1179         struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1180 
1181         desc->next = (u32)(ring->descs_dma +
1182             AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
1183 
1184         desc->ctrl = DESC_EMPTY;
1185         ring->buf[i].tx.skb = NULL;
1186     }
1187 
1188     /* flush descriptors */
1189     wmb();
1190 
1191     ring->curr = 0;
1192     ring->dirty = 0;
1193     netdev_reset_queue(ag->ndev);
1194 }
1195 
1196 static void ag71xx_ring_rx_clean(struct ag71xx *ag)
1197 {
1198     struct ag71xx_ring *ring = &ag->rx_ring;
1199     int ring_size = BIT(ring->order);
1200     int i;
1201 
1202     if (!ring->buf)
1203         return;
1204 
1205     for (i = 0; i < ring_size; i++)
1206         if (ring->buf[i].rx.rx_buf) {
1207             dma_unmap_single(&ag->pdev->dev,
1208                      ring->buf[i].rx.dma_addr,
1209                      ag->rx_buf_size, DMA_FROM_DEVICE);
1210             skb_free_frag(ring->buf[i].rx.rx_buf);
1211         }
1212 }
1213 
1214 static int ag71xx_buffer_size(struct ag71xx *ag)
1215 {
1216     return ag->rx_buf_size +
1217            SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1218 }
1219 
1220 static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
1221                    int offset,
1222                    void *(*alloc)(unsigned int size))
1223 {
1224     struct ag71xx_ring *ring = &ag->rx_ring;
1225     struct ag71xx_desc *desc;
1226     void *data;
1227 
1228     desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
1229 
1230     data = alloc(ag71xx_buffer_size(ag));
1231     if (!data)
1232         return false;
1233 
1234     buf->rx.rx_buf = data;
1235     buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
1236                       DMA_FROM_DEVICE);
1237     desc->data = (u32)buf->rx.dma_addr + offset;
1238     return true;
1239 }
1240 
1241 static int ag71xx_ring_rx_init(struct ag71xx *ag)
1242 {
1243     struct ag71xx_ring *ring = &ag->rx_ring;
1244     struct net_device *ndev = ag->ndev;
1245     int ring_mask = BIT(ring->order) - 1;
1246     int ring_size = BIT(ring->order);
1247     unsigned int i;
1248     int ret;
1249 
1250     ret = 0;
1251     for (i = 0; i < ring_size; i++) {
1252         struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1253 
1254         desc->next = (u32)(ring->descs_dma +
1255             AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
1256 
1257         netif_dbg(ag, rx_status, ndev, "RX desc at %p, next is %08x\n",
1258               desc, desc->next);
1259     }
1260 
1261     for (i = 0; i < ring_size; i++) {
1262         struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1263 
1264         if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset,
1265                     netdev_alloc_frag)) {
1266             ret = -ENOMEM;
1267             break;
1268         }
1269 
1270         desc->ctrl = DESC_EMPTY;
1271     }
1272 
1273     /* flush descriptors */
1274     wmb();
1275 
1276     ring->curr = 0;
1277     ring->dirty = 0;
1278 
1279     return ret;
1280 }
1281 
1282 static int ag71xx_ring_rx_refill(struct ag71xx *ag)
1283 {
1284     struct ag71xx_ring *ring = &ag->rx_ring;
1285     int ring_mask = BIT(ring->order) - 1;
1286     int offset = ag->rx_buf_offset;
1287     unsigned int count;
1288 
1289     count = 0;
1290     for (; ring->curr - ring->dirty > 0; ring->dirty++) {
1291         struct ag71xx_desc *desc;
1292         unsigned int i;
1293 
1294         i = ring->dirty & ring_mask;
1295         desc = ag71xx_ring_desc(ring, i);
1296 
1297         if (!ring->buf[i].rx.rx_buf &&
1298             !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
1299                     napi_alloc_frag))
1300             break;
1301 
1302         desc->ctrl = DESC_EMPTY;
1303         count++;
1304     }
1305 
1306     /* flush descriptors */
1307     wmb();
1308 
1309     netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n",
1310           count);
1311 
1312     return count;
1313 }
1314 
1315 static int ag71xx_rings_init(struct ag71xx *ag)
1316 {
1317     struct ag71xx_ring *tx = &ag->tx_ring;
1318     struct ag71xx_ring *rx = &ag->rx_ring;
1319     int ring_size, tx_size;
1320 
1321     ring_size = BIT(tx->order) + BIT(rx->order);
1322     tx_size = BIT(tx->order);
1323 
1324     tx->buf = kcalloc(ring_size, sizeof(*tx->buf), GFP_KERNEL);
1325     if (!tx->buf)
1326         return -ENOMEM;
1327 
1328     tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev,
1329                        ring_size * AG71XX_DESC_SIZE,
1330                        &tx->descs_dma, GFP_KERNEL);
1331     if (!tx->descs_cpu) {
1332         kfree(tx->buf);
1333         tx->buf = NULL;
1334         return -ENOMEM;
1335     }
1336 
1337     rx->buf = &tx->buf[tx_size];
1338     rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
1339     rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
1340 
1341     ag71xx_ring_tx_init(ag);
1342     return ag71xx_ring_rx_init(ag);
1343 }
1344 
1345 static void ag71xx_rings_free(struct ag71xx *ag)
1346 {
1347     struct ag71xx_ring *tx = &ag->tx_ring;
1348     struct ag71xx_ring *rx = &ag->rx_ring;
1349     int ring_size;
1350 
1351     ring_size = BIT(tx->order) + BIT(rx->order);
1352 
1353     if (tx->descs_cpu)
1354         dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE,
1355                   tx->descs_cpu, tx->descs_dma);
1356 
1357     kfree(tx->buf);
1358 
1359     tx->descs_cpu = NULL;
1360     rx->descs_cpu = NULL;
1361     tx->buf = NULL;
1362     rx->buf = NULL;
1363 }
1364 
1365 static void ag71xx_rings_cleanup(struct ag71xx *ag)
1366 {
1367     ag71xx_ring_rx_clean(ag);
1368     ag71xx_ring_tx_clean(ag);
1369     ag71xx_rings_free(ag);
1370 
1371     netdev_reset_queue(ag->ndev);
1372 }
1373 
1374 static void ag71xx_hw_init(struct ag71xx *ag)
1375 {
1376     ag71xx_hw_stop(ag);
1377 
1378     ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
1379     usleep_range(20, 30);
1380 
1381     reset_control_assert(ag->mac_reset);
1382     msleep(100);
1383     reset_control_deassert(ag->mac_reset);
1384     msleep(200);
1385 
1386     ag71xx_hw_setup(ag);
1387 
1388     ag71xx_dma_reset(ag);
1389 }
1390 
1391 static int ag71xx_hw_enable(struct ag71xx *ag)
1392 {
1393     int ret;
1394 
1395     ret = ag71xx_rings_init(ag);
1396     if (ret)
1397         return ret;
1398 
1399     napi_enable(&ag->napi);
1400     ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
1401     ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
1402     netif_start_queue(ag->ndev);
1403 
1404     return 0;
1405 }
1406 
1407 static void ag71xx_hw_disable(struct ag71xx *ag)
1408 {
1409     netif_stop_queue(ag->ndev);
1410 
1411     ag71xx_hw_stop(ag);
1412     ag71xx_dma_reset(ag);
1413 
1414     napi_disable(&ag->napi);
1415     del_timer_sync(&ag->oom_timer);
1416 
1417     ag71xx_rings_cleanup(ag);
1418 }
1419 
1420 static int ag71xx_open(struct net_device *ndev)
1421 {
1422     struct ag71xx *ag = netdev_priv(ndev);
1423     unsigned int max_frame_len;
1424     int ret;
1425 
1426     ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0);
1427     if (ret) {
1428         netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
1429               ret);
1430         goto err;
1431     }
1432 
1433     max_frame_len = ag71xx_max_frame_len(ndev->mtu);
1434     ag->rx_buf_size =
1435         SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
1436 
1437     /* setup max frame length */
1438     ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
1439     ag71xx_hw_set_macaddr(ag, ndev->dev_addr);
1440 
1441     ret = ag71xx_hw_enable(ag);
1442     if (ret)
1443         goto err;
1444 
1445     phylink_start(ag->phylink);
1446 
1447     return 0;
1448 
1449 err:
1450     ag71xx_rings_cleanup(ag);
1451     return ret;
1452 }
1453 
1454 static int ag71xx_stop(struct net_device *ndev)
1455 {
1456     struct ag71xx *ag = netdev_priv(ndev);
1457 
1458     phylink_stop(ag->phylink);
1459     phylink_disconnect_phy(ag->phylink);
1460     ag71xx_hw_disable(ag);
1461 
1462     return 0;
1463 }
1464 
1465 static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
1466 {
1467     int i, ring_mask, ndesc, split;
1468     struct ag71xx_desc *desc;
1469 
1470     ring_mask = BIT(ring->order) - 1;
1471     ndesc = 0;
1472     split = ring->desc_split;
1473 
1474     if (!split)
1475         split = len;
1476 
1477     while (len > 0) {
1478         unsigned int cur_len = len;
1479 
1480         i = (ring->curr + ndesc) & ring_mask;
1481         desc = ag71xx_ring_desc(ring, i);
1482 
1483         if (!ag71xx_desc_empty(desc))
1484             return -1;
1485 
1486         if (cur_len > split) {
1487             cur_len = split;
1488 
1489             /*  TX will hang if DMA transfers <= 4 bytes,
1490              * make sure next segment is more than 4 bytes long.
1491              */
1492             if (len <= split + 4)
1493                 cur_len -= 4;
1494         }
1495 
1496         desc->data = addr;
1497         addr += cur_len;
1498         len -= cur_len;
1499 
1500         if (len > 0)
1501             cur_len |= DESC_MORE;
1502 
1503         /* prevent early tx attempt of this descriptor */
1504         if (!ndesc)
1505             cur_len |= DESC_EMPTY;
1506 
1507         desc->ctrl = cur_len;
1508         ndesc++;
1509     }
1510 
1511     return ndesc;
1512 }
1513 
1514 static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
1515                       struct net_device *ndev)
1516 {
1517     int i, n, ring_min, ring_mask, ring_size;
1518     struct ag71xx *ag = netdev_priv(ndev);
1519     struct ag71xx_ring *ring;
1520     struct ag71xx_desc *desc;
1521     dma_addr_t dma_addr;
1522 
1523     ring = &ag->tx_ring;
1524     ring_mask = BIT(ring->order) - 1;
1525     ring_size = BIT(ring->order);
1526 
1527     if (skb->len <= 4) {
1528         netif_dbg(ag, tx_err, ndev, "packet len is too small\n");
1529         goto err_drop;
1530     }
1531 
1532     dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
1533                   DMA_TO_DEVICE);
1534 
1535     i = ring->curr & ring_mask;
1536     desc = ag71xx_ring_desc(ring, i);
1537 
1538     /* setup descriptor fields */
1539     n = ag71xx_fill_dma_desc(ring, (u32)dma_addr,
1540                  skb->len & ag->dcfg->desc_pktlen_mask);
1541     if (n < 0)
1542         goto err_drop_unmap;
1543 
1544     i = (ring->curr + n - 1) & ring_mask;
1545     ring->buf[i].tx.len = skb->len;
1546     ring->buf[i].tx.skb = skb;
1547 
1548     netdev_sent_queue(ndev, skb->len);
1549 
1550     skb_tx_timestamp(skb);
1551 
1552     desc->ctrl &= ~DESC_EMPTY;
1553     ring->curr += n;
1554 
1555     /* flush descriptor */
1556     wmb();
1557 
1558     ring_min = 2;
1559     if (ring->desc_split)
1560         ring_min *= AG71XX_TX_RING_DS_PER_PKT;
1561 
1562     if (ring->curr - ring->dirty >= ring_size - ring_min) {
1563         netif_dbg(ag, tx_err, ndev, "tx queue full\n");
1564         netif_stop_queue(ndev);
1565     }
1566 
1567     netif_dbg(ag, tx_queued, ndev, "packet injected into TX queue\n");
1568 
1569     /* enable TX engine */
1570     ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
1571 
1572     return NETDEV_TX_OK;
1573 
1574 err_drop_unmap:
1575     dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
1576 
1577 err_drop:
1578     ndev->stats.tx_dropped++;
1579 
1580     dev_kfree_skb(skb);
1581     return NETDEV_TX_OK;
1582 }
1583 
1584 static void ag71xx_oom_timer_handler(struct timer_list *t)
1585 {
1586     struct ag71xx *ag = from_timer(ag, t, oom_timer);
1587 
1588     napi_schedule(&ag->napi);
1589 }
1590 
1591 static void ag71xx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1592 {
1593     struct ag71xx *ag = netdev_priv(ndev);
1594 
1595     netif_err(ag, tx_err, ndev, "tx timeout\n");
1596 
1597     schedule_delayed_work(&ag->restart_work, 1);
1598 }
1599 
1600 static void ag71xx_restart_work_func(struct work_struct *work)
1601 {
1602     struct ag71xx *ag = container_of(work, struct ag71xx,
1603                      restart_work.work);
1604 
1605     rtnl_lock();
1606     ag71xx_hw_disable(ag);
1607     ag71xx_hw_enable(ag);
1608 
1609     phylink_stop(ag->phylink);
1610     phylink_start(ag->phylink);
1611 
1612     rtnl_unlock();
1613 }
1614 
1615 static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
1616 {
1617     struct net_device *ndev = ag->ndev;
1618     int ring_mask, ring_size, done = 0;
1619     unsigned int pktlen_mask, offset;
1620     struct ag71xx_ring *ring;
1621     struct list_head rx_list;
1622     struct sk_buff *skb;
1623 
1624     ring = &ag->rx_ring;
1625     pktlen_mask = ag->dcfg->desc_pktlen_mask;
1626     offset = ag->rx_buf_offset;
1627     ring_mask = BIT(ring->order) - 1;
1628     ring_size = BIT(ring->order);
1629 
1630     netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n",
1631           limit, ring->curr, ring->dirty);
1632 
1633     INIT_LIST_HEAD(&rx_list);
1634 
1635     while (done < limit) {
1636         unsigned int i = ring->curr & ring_mask;
1637         struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1638         int pktlen;
1639         int err = 0;
1640 
1641         if (ag71xx_desc_empty(desc))
1642             break;
1643 
1644         if ((ring->dirty + ring_size) == ring->curr) {
1645             WARN_ONCE(1, "RX out of ring");
1646             break;
1647         }
1648 
1649         ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
1650 
1651         pktlen = desc->ctrl & pktlen_mask;
1652         pktlen -= ETH_FCS_LEN;
1653 
1654         dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr,
1655                  ag->rx_buf_size, DMA_FROM_DEVICE);
1656 
1657         ndev->stats.rx_packets++;
1658         ndev->stats.rx_bytes += pktlen;
1659 
1660         skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
1661         if (!skb) {
1662             skb_free_frag(ring->buf[i].rx.rx_buf);
1663             goto next;
1664         }
1665 
1666         skb_reserve(skb, offset);
1667         skb_put(skb, pktlen);
1668 
1669         if (err) {
1670             ndev->stats.rx_dropped++;
1671             kfree_skb(skb);
1672         } else {
1673             skb->dev = ndev;
1674             skb->ip_summed = CHECKSUM_NONE;
1675             list_add_tail(&skb->list, &rx_list);
1676         }
1677 
1678 next:
1679         ring->buf[i].rx.rx_buf = NULL;
1680         done++;
1681 
1682         ring->curr++;
1683     }
1684 
1685     ag71xx_ring_rx_refill(ag);
1686 
1687     list_for_each_entry(skb, &rx_list, list)
1688         skb->protocol = eth_type_trans(skb, ndev);
1689     netif_receive_skb_list(&rx_list);
1690 
1691     netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n",
1692           ring->curr, ring->dirty, done);
1693 
1694     return done;
1695 }
1696 
1697 static int ag71xx_poll(struct napi_struct *napi, int limit)
1698 {
1699     struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
1700     struct ag71xx_ring *rx_ring = &ag->rx_ring;
1701     int rx_ring_size = BIT(rx_ring->order);
1702     struct net_device *ndev = ag->ndev;
1703     int tx_done, rx_done;
1704     u32 status;
1705 
1706     tx_done = ag71xx_tx_packets(ag, false, limit);
1707 
1708     netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
1709     rx_done = ag71xx_rx_packets(ag, limit);
1710 
1711     if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf)
1712         goto oom;
1713 
1714     status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
1715     if (unlikely(status & RX_STATUS_OF)) {
1716         ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
1717         ndev->stats.rx_fifo_errors++;
1718 
1719         /* restart RX */
1720         ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1721     }
1722 
1723     if (rx_done < limit) {
1724         if (status & RX_STATUS_PR)
1725             goto more;
1726 
1727         status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
1728         if (status & TX_STATUS_PS)
1729             goto more;
1730 
1731         netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n",
1732               rx_done, tx_done, limit);
1733 
1734         napi_complete(napi);
1735 
1736         /* enable interrupts */
1737         ag71xx_int_enable(ag, AG71XX_INT_POLL);
1738         return rx_done;
1739     }
1740 
1741 more:
1742     netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1743           rx_done, tx_done, limit);
1744     return limit;
1745 
1746 oom:
1747     netif_err(ag, rx_err, ndev, "out of memory\n");
1748 
1749     mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
1750     napi_complete(napi);
1751     return 0;
1752 }
1753 
1754 static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
1755 {
1756     struct net_device *ndev = dev_id;
1757     struct ag71xx *ag;
1758     u32 status;
1759 
1760     ag = netdev_priv(ndev);
1761     status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
1762 
1763     if (unlikely(!status))
1764         return IRQ_NONE;
1765 
1766     if (unlikely(status & AG71XX_INT_ERR)) {
1767         if (status & AG71XX_INT_TX_BE) {
1768             ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
1769             netif_err(ag, intr, ndev, "TX BUS error\n");
1770         }
1771         if (status & AG71XX_INT_RX_BE) {
1772             ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
1773             netif_err(ag, intr, ndev, "RX BUS error\n");
1774         }
1775     }
1776 
1777     if (likely(status & AG71XX_INT_POLL)) {
1778         ag71xx_int_disable(ag, AG71XX_INT_POLL);
1779         netif_dbg(ag, intr, ndev, "enable polling mode\n");
1780         napi_schedule(&ag->napi);
1781     }
1782 
1783     return IRQ_HANDLED;
1784 }
1785 
1786 static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu)
1787 {
1788     struct ag71xx *ag = netdev_priv(ndev);
1789 
1790     ndev->mtu = new_mtu;
1791     ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
1792           ag71xx_max_frame_len(ndev->mtu));
1793 
1794     return 0;
1795 }
1796 
1797 static const struct net_device_ops ag71xx_netdev_ops = {
1798     .ndo_open       = ag71xx_open,
1799     .ndo_stop       = ag71xx_stop,
1800     .ndo_start_xmit     = ag71xx_hard_start_xmit,
1801     .ndo_eth_ioctl      = phy_do_ioctl,
1802     .ndo_tx_timeout     = ag71xx_tx_timeout,
1803     .ndo_change_mtu     = ag71xx_change_mtu,
1804     .ndo_set_mac_address    = eth_mac_addr,
1805     .ndo_validate_addr  = eth_validate_addr,
1806 };
1807 
1808 static const u32 ar71xx_addr_ar7100[] = {
1809     0x19000000, 0x1a000000,
1810 };
1811 
1812 static int ag71xx_probe(struct platform_device *pdev)
1813 {
1814     struct device_node *np = pdev->dev.of_node;
1815     const struct ag71xx_dcfg *dcfg;
1816     struct net_device *ndev;
1817     struct resource *res;
1818     int tx_size, err, i;
1819     struct ag71xx *ag;
1820 
1821     if (!np)
1822         return -ENODEV;
1823 
1824     ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag));
1825     if (!ndev)
1826         return -ENOMEM;
1827 
1828     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1829     if (!res)
1830         return -EINVAL;
1831 
1832     dcfg = of_device_get_match_data(&pdev->dev);
1833     if (!dcfg)
1834         return -EINVAL;
1835 
1836     ag = netdev_priv(ndev);
1837     ag->mac_idx = -1;
1838     for (i = 0; i < ARRAY_SIZE(ar71xx_addr_ar7100); i++) {
1839         if (ar71xx_addr_ar7100[i] == res->start)
1840             ag->mac_idx = i;
1841     }
1842 
1843     if (ag->mac_idx < 0) {
1844         netif_err(ag, probe, ndev, "unknown mac idx\n");
1845         return -EINVAL;
1846     }
1847 
1848     ag->clk_eth = devm_clk_get(&pdev->dev, "eth");
1849     if (IS_ERR(ag->clk_eth)) {
1850         netif_err(ag, probe, ndev, "Failed to get eth clk.\n");
1851         return PTR_ERR(ag->clk_eth);
1852     }
1853 
1854     SET_NETDEV_DEV(ndev, &pdev->dev);
1855 
1856     ag->pdev = pdev;
1857     ag->ndev = ndev;
1858     ag->dcfg = dcfg;
1859     ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE);
1860     memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata));
1861 
1862     ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
1863     if (IS_ERR(ag->mac_reset)) {
1864         netif_err(ag, probe, ndev, "missing mac reset\n");
1865         return PTR_ERR(ag->mac_reset);
1866     }
1867 
1868     ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1869     if (!ag->mac_base)
1870         return -ENOMEM;
1871 
1872     ndev->irq = platform_get_irq(pdev, 0);
1873     err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
1874                    0x0, dev_name(&pdev->dev), ndev);
1875     if (err) {
1876         netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
1877               ndev->irq);
1878         return err;
1879     }
1880 
1881     ndev->netdev_ops = &ag71xx_netdev_ops;
1882     ndev->ethtool_ops = &ag71xx_ethtool_ops;
1883 
1884     INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
1885     timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0);
1886 
1887     tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
1888     ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
1889 
1890     ndev->min_mtu = 68;
1891     ndev->max_mtu = dcfg->max_frame_len - ag71xx_max_frame_len(0);
1892 
1893     ag->rx_buf_offset = NET_SKB_PAD;
1894     if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
1895         ag->rx_buf_offset += NET_IP_ALIGN;
1896 
1897     if (ag71xx_is(ag, AR7100)) {
1898         ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
1899         tx_size *= AG71XX_TX_RING_DS_PER_PKT;
1900     }
1901     ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
1902 
1903     ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
1904                         sizeof(struct ag71xx_desc),
1905                         &ag->stop_desc_dma, GFP_KERNEL);
1906     if (!ag->stop_desc)
1907         return -ENOMEM;
1908 
1909     ag->stop_desc->data = 0;
1910     ag->stop_desc->ctrl = 0;
1911     ag->stop_desc->next = (u32)ag->stop_desc_dma;
1912 
1913     err = of_get_ethdev_address(np, ndev);
1914     if (err) {
1915         netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
1916         eth_hw_addr_random(ndev);
1917     }
1918 
1919     err = of_get_phy_mode(np, &ag->phy_if_mode);
1920     if (err) {
1921         netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
1922         return err;
1923     }
1924 
1925     netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll,
1926                   AG71XX_NAPI_WEIGHT);
1927 
1928     err = clk_prepare_enable(ag->clk_eth);
1929     if (err) {
1930         netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
1931         return err;
1932     }
1933 
1934     ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
1935 
1936     ag71xx_hw_init(ag);
1937 
1938     err = ag71xx_mdio_probe(ag);
1939     if (err)
1940         goto err_put_clk;
1941 
1942     platform_set_drvdata(pdev, ndev);
1943 
1944     err = ag71xx_phylink_setup(ag);
1945     if (err) {
1946         netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
1947         goto err_mdio_remove;
1948     }
1949 
1950     err = register_netdev(ndev);
1951     if (err) {
1952         netif_err(ag, probe, ndev, "unable to register net device\n");
1953         platform_set_drvdata(pdev, NULL);
1954         goto err_mdio_remove;
1955     }
1956 
1957     netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
1958            (unsigned long)ag->mac_base, ndev->irq,
1959            phy_modes(ag->phy_if_mode));
1960 
1961     return 0;
1962 
1963 err_mdio_remove:
1964     ag71xx_mdio_remove(ag);
1965 err_put_clk:
1966     clk_disable_unprepare(ag->clk_eth);
1967     return err;
1968 }
1969 
1970 static int ag71xx_remove(struct platform_device *pdev)
1971 {
1972     struct net_device *ndev = platform_get_drvdata(pdev);
1973     struct ag71xx *ag;
1974 
1975     if (!ndev)
1976         return 0;
1977 
1978     ag = netdev_priv(ndev);
1979     unregister_netdev(ndev);
1980     ag71xx_mdio_remove(ag);
1981     clk_disable_unprepare(ag->clk_eth);
1982     platform_set_drvdata(pdev, NULL);
1983 
1984     return 0;
1985 }
1986 
1987 static const u32 ar71xx_fifo_ar7100[] = {
1988     0x0fff0000, 0x00001fff, 0x00780fff,
1989 };
1990 
1991 static const u32 ar71xx_fifo_ar9130[] = {
1992     0x0fff0000, 0x00001fff, 0x008001ff,
1993 };
1994 
1995 static const u32 ar71xx_fifo_ar9330[] = {
1996     0x0010ffff, 0x015500aa, 0x01f00140,
1997 };
1998 
1999 static const struct ag71xx_dcfg ag71xx_dcfg_ar7100 = {
2000     .type = AR7100,
2001     .fifodata = ar71xx_fifo_ar7100,
2002     .max_frame_len = 1540,
2003     .desc_pktlen_mask = SZ_4K - 1,
2004     .tx_hang_workaround = false,
2005 };
2006 
2007 static const struct ag71xx_dcfg ag71xx_dcfg_ar7240 = {
2008     .type = AR7240,
2009     .fifodata = ar71xx_fifo_ar7100,
2010     .max_frame_len = 1540,
2011     .desc_pktlen_mask = SZ_4K - 1,
2012     .tx_hang_workaround = true,
2013 };
2014 
2015 static const struct ag71xx_dcfg ag71xx_dcfg_ar9130 = {
2016     .type = AR9130,
2017     .fifodata = ar71xx_fifo_ar9130,
2018     .max_frame_len = 1540,
2019     .desc_pktlen_mask = SZ_4K - 1,
2020     .tx_hang_workaround = false,
2021 };
2022 
2023 static const struct ag71xx_dcfg ag71xx_dcfg_ar9330 = {
2024     .type = AR9330,
2025     .fifodata = ar71xx_fifo_ar9330,
2026     .max_frame_len = 1540,
2027     .desc_pktlen_mask = SZ_4K - 1,
2028     .tx_hang_workaround = true,
2029 };
2030 
2031 static const struct ag71xx_dcfg ag71xx_dcfg_ar9340 = {
2032     .type = AR9340,
2033     .fifodata = ar71xx_fifo_ar9330,
2034     .max_frame_len = SZ_16K - 1,
2035     .desc_pktlen_mask = SZ_16K - 1,
2036     .tx_hang_workaround = true,
2037 };
2038 
2039 static const struct ag71xx_dcfg ag71xx_dcfg_qca9530 = {
2040     .type = QCA9530,
2041     .fifodata = ar71xx_fifo_ar9330,
2042     .max_frame_len = SZ_16K - 1,
2043     .desc_pktlen_mask = SZ_16K - 1,
2044     .tx_hang_workaround = true,
2045 };
2046 
2047 static const struct ag71xx_dcfg ag71xx_dcfg_qca9550 = {
2048     .type = QCA9550,
2049     .fifodata = ar71xx_fifo_ar9330,
2050     .max_frame_len = 1540,
2051     .desc_pktlen_mask = SZ_16K - 1,
2052     .tx_hang_workaround = true,
2053 };
2054 
2055 static const struct of_device_id ag71xx_match[] = {
2056     { .compatible = "qca,ar7100-eth", .data = &ag71xx_dcfg_ar7100 },
2057     { .compatible = "qca,ar7240-eth", .data = &ag71xx_dcfg_ar7240 },
2058     { .compatible = "qca,ar7241-eth", .data = &ag71xx_dcfg_ar7240 },
2059     { .compatible = "qca,ar7242-eth", .data = &ag71xx_dcfg_ar7240 },
2060     { .compatible = "qca,ar9130-eth", .data = &ag71xx_dcfg_ar9130 },
2061     { .compatible = "qca,ar9330-eth", .data = &ag71xx_dcfg_ar9330 },
2062     { .compatible = "qca,ar9340-eth", .data = &ag71xx_dcfg_ar9340 },
2063     { .compatible = "qca,qca9530-eth", .data = &ag71xx_dcfg_qca9530 },
2064     { .compatible = "qca,qca9550-eth", .data = &ag71xx_dcfg_qca9550 },
2065     { .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },
2066     {}
2067 };
2068 
2069 static struct platform_driver ag71xx_driver = {
2070     .probe      = ag71xx_probe,
2071     .remove     = ag71xx_remove,
2072     .driver = {
2073         .name   = "ag71xx",
2074         .of_match_table = ag71xx_match,
2075     }
2076 };
2077 
2078 module_platform_driver(ag71xx_driver);
2079 MODULE_LICENSE("GPL v2");