Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Broadcom BCM7xxx System Port Ethernet MAC driver
0004  *
0005  * Copyright (C) 2014 Broadcom Corporation
0006  */
0007 
0008 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0009 
0010 #include <linux/init.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/module.h>
0013 #include <linux/kernel.h>
0014 #include <linux/netdevice.h>
0015 #include <linux/dsa/brcm.h>
0016 #include <linux/etherdevice.h>
0017 #include <linux/platform_device.h>
0018 #include <linux/of.h>
0019 #include <linux/of_net.h>
0020 #include <linux/of_mdio.h>
0021 #include <linux/phy.h>
0022 #include <linux/phy_fixed.h>
0023 #include <net/dsa.h>
0024 #include <linux/clk.h>
0025 #include <net/ip.h>
0026 #include <net/ipv6.h>
0027 
0028 #include "bcmsysport.h"
0029 
0030 /* I/O accessors register helpers */
0031 #define BCM_SYSPORT_IO_MACRO(name, offset) \
0032 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off)  \
0033 {                                   \
0034     u32 reg = readl_relaxed(priv->base + offset + off);     \
0035     return reg;                         \
0036 }                                   \
0037 static inline void name##_writel(struct bcm_sysport_priv *priv,     \
0038                   u32 val, u32 off)         \
0039 {                                   \
0040     writel_relaxed(val, priv->base + offset + off);         \
0041 }                                   \
0042 
0043 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
0044 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
0045 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
0046 BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
0047 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
0048 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
0049 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
0050 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
0051 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
0052 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
0053 
0054 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
0055  * same layout, except it has been moved by 4 bytes up, *sigh*
0056  */
0057 static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
0058 {
0059     if (priv->is_lite && off >= RDMA_STATUS)
0060         off += 4;
0061     return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
0062 }
0063 
0064 static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
0065 {
0066     if (priv->is_lite && off >= RDMA_STATUS)
0067         off += 4;
0068     writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
0069 }
0070 
0071 static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
0072 {
0073     if (!priv->is_lite) {
0074         return BIT(bit);
0075     } else {
0076         if (bit >= ACB_ALGO)
0077             return BIT(bit + 1);
0078         else
0079             return BIT(bit);
0080     }
0081 }
0082 
0083 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
0084  * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
0085   */
0086 #define BCM_SYSPORT_INTR_L2(which)  \
0087 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
0088                         u32 mask)       \
0089 {                                   \
0090     priv->irq##which##_mask &= ~(mask);             \
0091     intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
0092 }                                   \
0093 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
0094                         u32 mask)       \
0095 {                                   \
0096     intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET);  \
0097     priv->irq##which##_mask |= (mask);              \
0098 }                                   \
0099 
0100 BCM_SYSPORT_INTR_L2(0)
0101 BCM_SYSPORT_INTR_L2(1)
0102 
0103 /* Register accesses to GISB/RBUS registers are expensive (few hundred
0104  * nanoseconds), so keep the check for 64-bits explicit here to save
0105  * one register write per-packet on 32-bits platforms.
0106  */
0107 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
0108                      void __iomem *d,
0109                      dma_addr_t addr)
0110 {
0111 #ifdef CONFIG_PHYS_ADDR_T_64BIT
0112     writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
0113              d + DESC_ADDR_HI_STATUS_LEN);
0114 #endif
0115     writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
0116 }
0117 
0118 /* Ethtool operations */
0119 static void bcm_sysport_set_rx_csum(struct net_device *dev,
0120                     netdev_features_t wanted)
0121 {
0122     struct bcm_sysport_priv *priv = netdev_priv(dev);
0123     u32 reg;
0124 
0125     priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
0126     reg = rxchk_readl(priv, RXCHK_CONTROL);
0127     /* Clear L2 header checks, which would prevent BPDUs
0128      * from being received.
0129      */
0130     reg &= ~RXCHK_L2_HDR_DIS;
0131     if (priv->rx_chk_en)
0132         reg |= RXCHK_EN;
0133     else
0134         reg &= ~RXCHK_EN;
0135 
0136     /* If UniMAC forwards CRC, we need to skip over it to get
0137      * a valid CHK bit to be set in the per-packet status word
0138      */
0139     if (priv->rx_chk_en && priv->crc_fwd)
0140         reg |= RXCHK_SKIP_FCS;
0141     else
0142         reg &= ~RXCHK_SKIP_FCS;
0143 
0144     /* If Broadcom tags are enabled (e.g: using a switch), make
0145      * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
0146      * tag after the Ethernet MAC Source Address.
0147      */
0148     if (netdev_uses_dsa(dev))
0149         reg |= RXCHK_BRCM_TAG_EN;
0150     else
0151         reg &= ~RXCHK_BRCM_TAG_EN;
0152 
0153     rxchk_writel(priv, reg, RXCHK_CONTROL);
0154 }
0155 
0156 static void bcm_sysport_set_tx_csum(struct net_device *dev,
0157                     netdev_features_t wanted)
0158 {
0159     struct bcm_sysport_priv *priv = netdev_priv(dev);
0160     u32 reg;
0161 
0162     /* Hardware transmit checksum requires us to enable the Transmit status
0163      * block prepended to the packet contents
0164      */
0165     priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
0166                     NETIF_F_HW_VLAN_CTAG_TX));
0167     reg = tdma_readl(priv, TDMA_CONTROL);
0168     if (priv->tsb_en)
0169         reg |= tdma_control_bit(priv, TSB_EN);
0170     else
0171         reg &= ~tdma_control_bit(priv, TSB_EN);
0172     /* Indicating that software inserts Broadcom tags is needed for the TX
0173      * checksum to be computed correctly when using VLAN HW acceleration,
0174      * else it has no effect, so it can always be turned on.
0175      */
0176     if (netdev_uses_dsa(dev))
0177         reg |= tdma_control_bit(priv, SW_BRCM_TAG);
0178     else
0179         reg &= ~tdma_control_bit(priv, SW_BRCM_TAG);
0180     tdma_writel(priv, reg, TDMA_CONTROL);
0181 
0182     /* Default TPID is ETH_P_8021AD, change to ETH_P_8021Q */
0183     if (wanted & NETIF_F_HW_VLAN_CTAG_TX)
0184         tdma_writel(priv, ETH_P_8021Q, TDMA_TPID);
0185 }
0186 
0187 static int bcm_sysport_set_features(struct net_device *dev,
0188                     netdev_features_t features)
0189 {
0190     struct bcm_sysport_priv *priv = netdev_priv(dev);
0191     int ret;
0192 
0193     ret = clk_prepare_enable(priv->clk);
0194     if (ret)
0195         return ret;
0196 
0197     /* Read CRC forward */
0198     if (!priv->is_lite)
0199         priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
0200     else
0201         priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
0202                   GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
0203 
0204     bcm_sysport_set_rx_csum(dev, features);
0205     bcm_sysport_set_tx_csum(dev, features);
0206 
0207     clk_disable_unprepare(priv->clk);
0208 
0209     return 0;
0210 }
0211 
0212 /* Hardware counters must be kept in sync because the order/offset
0213  * is important here (order in structure declaration = order in hardware)
0214  */
0215 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
0216     /* general stats */
0217     STAT_NETDEV64(rx_packets),
0218     STAT_NETDEV64(tx_packets),
0219     STAT_NETDEV64(rx_bytes),
0220     STAT_NETDEV64(tx_bytes),
0221     STAT_NETDEV(rx_errors),
0222     STAT_NETDEV(tx_errors),
0223     STAT_NETDEV(rx_dropped),
0224     STAT_NETDEV(tx_dropped),
0225     STAT_NETDEV(multicast),
0226     /* UniMAC RSV counters */
0227     STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
0228     STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
0229     STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
0230     STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
0231     STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
0232     STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
0233     STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
0234     STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
0235     STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
0236     STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
0237     STAT_MIB_RX("rx_pkts", mib.rx.pkt),
0238     STAT_MIB_RX("rx_bytes", mib.rx.bytes),
0239     STAT_MIB_RX("rx_multicast", mib.rx.mca),
0240     STAT_MIB_RX("rx_broadcast", mib.rx.bca),
0241     STAT_MIB_RX("rx_fcs", mib.rx.fcs),
0242     STAT_MIB_RX("rx_control", mib.rx.cf),
0243     STAT_MIB_RX("rx_pause", mib.rx.pf),
0244     STAT_MIB_RX("rx_unknown", mib.rx.uo),
0245     STAT_MIB_RX("rx_align", mib.rx.aln),
0246     STAT_MIB_RX("rx_outrange", mib.rx.flr),
0247     STAT_MIB_RX("rx_code", mib.rx.cde),
0248     STAT_MIB_RX("rx_carrier", mib.rx.fcr),
0249     STAT_MIB_RX("rx_oversize", mib.rx.ovr),
0250     STAT_MIB_RX("rx_jabber", mib.rx.jbr),
0251     STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
0252     STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
0253     STAT_MIB_RX("rx_unicast", mib.rx.uc),
0254     STAT_MIB_RX("rx_ppp", mib.rx.ppp),
0255     STAT_MIB_RX("rx_crc", mib.rx.rcrc),
0256     /* UniMAC TSV counters */
0257     STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
0258     STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
0259     STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
0260     STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
0261     STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
0262     STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
0263     STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
0264     STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
0265     STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
0266     STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
0267     STAT_MIB_TX("tx_pkts", mib.tx.pkts),
0268     STAT_MIB_TX("tx_multicast", mib.tx.mca),
0269     STAT_MIB_TX("tx_broadcast", mib.tx.bca),
0270     STAT_MIB_TX("tx_pause", mib.tx.pf),
0271     STAT_MIB_TX("tx_control", mib.tx.cf),
0272     STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
0273     STAT_MIB_TX("tx_oversize", mib.tx.ovr),
0274     STAT_MIB_TX("tx_defer", mib.tx.drf),
0275     STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
0276     STAT_MIB_TX("tx_single_col", mib.tx.scl),
0277     STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
0278     STAT_MIB_TX("tx_late_col", mib.tx.lcl),
0279     STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
0280     STAT_MIB_TX("tx_frags", mib.tx.frg),
0281     STAT_MIB_TX("tx_total_col", mib.tx.ncl),
0282     STAT_MIB_TX("tx_jabber", mib.tx.jbr),
0283     STAT_MIB_TX("tx_bytes", mib.tx.bytes),
0284     STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
0285     STAT_MIB_TX("tx_unicast", mib.tx.uc),
0286     /* UniMAC RUNT counters */
0287     STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
0288     STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
0289     STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
0290     STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
0291     /* RXCHK misc statistics */
0292     STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
0293     STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
0294            RXCHK_OTHER_DISC_CNTR),
0295     /* RBUF misc statistics */
0296     STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
0297     STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
0298     STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
0299     STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
0300     STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
0301     STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb),
0302     STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed),
0303     /* Per TX-queue statistics are dynamically appended */
0304 };
0305 
0306 #define BCM_SYSPORT_STATS_LEN   ARRAY_SIZE(bcm_sysport_gstrings_stats)
0307 
0308 static void bcm_sysport_get_drvinfo(struct net_device *dev,
0309                     struct ethtool_drvinfo *info)
0310 {
0311     strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
0312     strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
0313 }
0314 
0315 static u32 bcm_sysport_get_msglvl(struct net_device *dev)
0316 {
0317     struct bcm_sysport_priv *priv = netdev_priv(dev);
0318 
0319     return priv->msg_enable;
0320 }
0321 
0322 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
0323 {
0324     struct bcm_sysport_priv *priv = netdev_priv(dev);
0325 
0326     priv->msg_enable = enable;
0327 }
0328 
0329 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
0330 {
0331     switch (type) {
0332     case BCM_SYSPORT_STAT_NETDEV:
0333     case BCM_SYSPORT_STAT_NETDEV64:
0334     case BCM_SYSPORT_STAT_RXCHK:
0335     case BCM_SYSPORT_STAT_RBUF:
0336     case BCM_SYSPORT_STAT_SOFT:
0337         return true;
0338     default:
0339         return false;
0340     }
0341 }
0342 
0343 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
0344 {
0345     struct bcm_sysport_priv *priv = netdev_priv(dev);
0346     const struct bcm_sysport_stats *s;
0347     unsigned int i, j;
0348 
0349     switch (string_set) {
0350     case ETH_SS_STATS:
0351         for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
0352             s = &bcm_sysport_gstrings_stats[i];
0353             if (priv->is_lite &&
0354                 !bcm_sysport_lite_stat_valid(s->type))
0355                 continue;
0356             j++;
0357         }
0358         /* Include per-queue statistics */
0359         return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
0360     default:
0361         return -EOPNOTSUPP;
0362     }
0363 }
0364 
0365 static void bcm_sysport_get_strings(struct net_device *dev,
0366                     u32 stringset, u8 *data)
0367 {
0368     struct bcm_sysport_priv *priv = netdev_priv(dev);
0369     const struct bcm_sysport_stats *s;
0370     char buf[128];
0371     int i, j;
0372 
0373     switch (stringset) {
0374     case ETH_SS_STATS:
0375         for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
0376             s = &bcm_sysport_gstrings_stats[i];
0377             if (priv->is_lite &&
0378                 !bcm_sysport_lite_stat_valid(s->type))
0379                 continue;
0380 
0381             memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
0382                    ETH_GSTRING_LEN);
0383             j++;
0384         }
0385 
0386         for (i = 0; i < dev->num_tx_queues; i++) {
0387             snprintf(buf, sizeof(buf), "txq%d_packets", i);
0388             memcpy(data + j * ETH_GSTRING_LEN, buf,
0389                    ETH_GSTRING_LEN);
0390             j++;
0391 
0392             snprintf(buf, sizeof(buf), "txq%d_bytes", i);
0393             memcpy(data + j * ETH_GSTRING_LEN, buf,
0394                    ETH_GSTRING_LEN);
0395             j++;
0396         }
0397         break;
0398     default:
0399         break;
0400     }
0401 }
0402 
0403 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
0404 {
0405     int i, j = 0;
0406 
0407     for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
0408         const struct bcm_sysport_stats *s;
0409         u8 offset = 0;
0410         u32 val = 0;
0411         char *p;
0412 
0413         s = &bcm_sysport_gstrings_stats[i];
0414         switch (s->type) {
0415         case BCM_SYSPORT_STAT_NETDEV:
0416         case BCM_SYSPORT_STAT_NETDEV64:
0417         case BCM_SYSPORT_STAT_SOFT:
0418             continue;
0419         case BCM_SYSPORT_STAT_MIB_RX:
0420         case BCM_SYSPORT_STAT_MIB_TX:
0421         case BCM_SYSPORT_STAT_RUNT:
0422             if (priv->is_lite)
0423                 continue;
0424 
0425             if (s->type != BCM_SYSPORT_STAT_MIB_RX)
0426                 offset = UMAC_MIB_STAT_OFFSET;
0427             val = umac_readl(priv, UMAC_MIB_START + j + offset);
0428             break;
0429         case BCM_SYSPORT_STAT_RXCHK:
0430             val = rxchk_readl(priv, s->reg_offset);
0431             if (val == ~0)
0432                 rxchk_writel(priv, 0, s->reg_offset);
0433             break;
0434         case BCM_SYSPORT_STAT_RBUF:
0435             val = rbuf_readl(priv, s->reg_offset);
0436             if (val == ~0)
0437                 rbuf_writel(priv, 0, s->reg_offset);
0438             break;
0439         }
0440 
0441         j += s->stat_sizeof;
0442         p = (char *)priv + s->stat_offset;
0443         *(u32 *)p = val;
0444     }
0445 
0446     netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
0447 }
0448 
0449 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
0450                     u64 *tx_bytes, u64 *tx_packets)
0451 {
0452     struct bcm_sysport_tx_ring *ring;
0453     u64 bytes = 0, packets = 0;
0454     unsigned int start;
0455     unsigned int q;
0456 
0457     for (q = 0; q < priv->netdev->num_tx_queues; q++) {
0458         ring = &priv->tx_rings[q];
0459         do {
0460             start = u64_stats_fetch_begin_irq(&priv->syncp);
0461             bytes = ring->bytes;
0462             packets = ring->packets;
0463         } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
0464 
0465         *tx_bytes += bytes;
0466         *tx_packets += packets;
0467     }
0468 }
0469 
0470 static void bcm_sysport_get_stats(struct net_device *dev,
0471                   struct ethtool_stats *stats, u64 *data)
0472 {
0473     struct bcm_sysport_priv *priv = netdev_priv(dev);
0474     struct bcm_sysport_stats64 *stats64 = &priv->stats64;
0475     struct u64_stats_sync *syncp = &priv->syncp;
0476     struct bcm_sysport_tx_ring *ring;
0477     u64 tx_bytes = 0, tx_packets = 0;
0478     unsigned int start;
0479     int i, j;
0480 
0481     if (netif_running(dev)) {
0482         bcm_sysport_update_mib_counters(priv);
0483         bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
0484         stats64->tx_bytes = tx_bytes;
0485         stats64->tx_packets = tx_packets;
0486     }
0487 
0488     for (i =  0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
0489         const struct bcm_sysport_stats *s;
0490         char *p;
0491 
0492         s = &bcm_sysport_gstrings_stats[i];
0493         if (s->type == BCM_SYSPORT_STAT_NETDEV)
0494             p = (char *)&dev->stats;
0495         else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
0496             p = (char *)stats64;
0497         else
0498             p = (char *)priv;
0499 
0500         if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
0501             continue;
0502         p += s->stat_offset;
0503 
0504         if (s->stat_sizeof == sizeof(u64) &&
0505             s->type == BCM_SYSPORT_STAT_NETDEV64) {
0506             do {
0507                 start = u64_stats_fetch_begin_irq(syncp);
0508                 data[i] = *(u64 *)p;
0509             } while (u64_stats_fetch_retry_irq(syncp, start));
0510         } else
0511             data[i] = *(u32 *)p;
0512         j++;
0513     }
0514 
0515     /* For SYSTEMPORT Lite since we have holes in our statistics, j would
0516      * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
0517      * needs to point to how many total statistics we have minus the
0518      * number of per TX queue statistics
0519      */
0520     j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
0521         dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
0522 
0523     for (i = 0; i < dev->num_tx_queues; i++) {
0524         ring = &priv->tx_rings[i];
0525         data[j] = ring->packets;
0526         j++;
0527         data[j] = ring->bytes;
0528         j++;
0529     }
0530 }
0531 
0532 static void bcm_sysport_get_wol(struct net_device *dev,
0533                 struct ethtool_wolinfo *wol)
0534 {
0535     struct bcm_sysport_priv *priv = netdev_priv(dev);
0536 
0537     wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
0538     wol->wolopts = priv->wolopts;
0539 
0540     if (!(priv->wolopts & WAKE_MAGICSECURE))
0541         return;
0542 
0543     memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
0544 }
0545 
0546 static int bcm_sysport_set_wol(struct net_device *dev,
0547                    struct ethtool_wolinfo *wol)
0548 {
0549     struct bcm_sysport_priv *priv = netdev_priv(dev);
0550     struct device *kdev = &priv->pdev->dev;
0551     u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
0552 
0553     if (!device_can_wakeup(kdev))
0554         return -ENOTSUPP;
0555 
0556     if (wol->wolopts & ~supported)
0557         return -EINVAL;
0558 
0559     if (wol->wolopts & WAKE_MAGICSECURE)
0560         memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
0561 
0562     /* Flag the device and relevant IRQ as wakeup capable */
0563     if (wol->wolopts) {
0564         device_set_wakeup_enable(kdev, 1);
0565         if (priv->wol_irq_disabled)
0566             enable_irq_wake(priv->wol_irq);
0567         priv->wol_irq_disabled = 0;
0568     } else {
0569         device_set_wakeup_enable(kdev, 0);
0570         /* Avoid unbalanced disable_irq_wake calls */
0571         if (!priv->wol_irq_disabled)
0572             disable_irq_wake(priv->wol_irq);
0573         priv->wol_irq_disabled = 1;
0574     }
0575 
0576     priv->wolopts = wol->wolopts;
0577 
0578     return 0;
0579 }
0580 
0581 static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv,
0582                     u32 usecs, u32 pkts)
0583 {
0584     u32 reg;
0585 
0586     reg = rdma_readl(priv, RDMA_MBDONE_INTR);
0587     reg &= ~(RDMA_INTR_THRESH_MASK |
0588          RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
0589     reg |= pkts;
0590     reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT;
0591     rdma_writel(priv, reg, RDMA_MBDONE_INTR);
0592 }
0593 
0594 static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
0595                     struct ethtool_coalesce *ec)
0596 {
0597     struct bcm_sysport_priv *priv = ring->priv;
0598     u32 reg;
0599 
0600     reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
0601     reg &= ~(RING_INTR_THRESH_MASK |
0602          RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
0603     reg |= ec->tx_max_coalesced_frames;
0604     reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
0605                 RING_TIMEOUT_SHIFT;
0606     tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
0607 }
0608 
0609 static int bcm_sysport_get_coalesce(struct net_device *dev,
0610                     struct ethtool_coalesce *ec,
0611                     struct kernel_ethtool_coalesce *kernel_coal,
0612                     struct netlink_ext_ack *extack)
0613 {
0614     struct bcm_sysport_priv *priv = netdev_priv(dev);
0615     u32 reg;
0616 
0617     reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
0618 
0619     ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
0620     ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
0621 
0622     reg = rdma_readl(priv, RDMA_MBDONE_INTR);
0623 
0624     ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
0625     ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
0626     ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
0627 
0628     return 0;
0629 }
0630 
0631 static int bcm_sysport_set_coalesce(struct net_device *dev,
0632                     struct ethtool_coalesce *ec,
0633                     struct kernel_ethtool_coalesce *kernel_coal,
0634                     struct netlink_ext_ack *extack)
0635 {
0636     struct bcm_sysport_priv *priv = netdev_priv(dev);
0637     struct dim_cq_moder moder;
0638     u32 usecs, pkts;
0639     unsigned int i;
0640 
0641     /* Base system clock is 125Mhz, DMA timeout is this reference clock
0642      * divided by 1024, which yield roughly 8.192 us, our maximum value has
0643      * to fit in the RING_TIMEOUT_MASK (16 bits).
0644      */
0645     if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
0646         ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
0647         ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
0648         ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
0649         return -EINVAL;
0650 
0651     if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
0652         (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
0653         return -EINVAL;
0654 
0655     for (i = 0; i < dev->num_tx_queues; i++)
0656         bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
0657 
0658     priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
0659     priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
0660     usecs = priv->rx_coalesce_usecs;
0661     pkts = priv->rx_max_coalesced_frames;
0662 
0663     if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
0664         moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode);
0665         usecs = moder.usec;
0666         pkts = moder.pkts;
0667     }
0668 
0669     priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
0670 
0671     /* Apply desired coalescing parameters */
0672     bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
0673 
0674     return 0;
0675 }
0676 
0677 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
0678 {
0679     dev_consume_skb_any(cb->skb);
0680     cb->skb = NULL;
0681     dma_unmap_addr_set(cb, dma_addr, 0);
0682 }
0683 
0684 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
0685                          struct bcm_sysport_cb *cb)
0686 {
0687     struct device *kdev = &priv->pdev->dev;
0688     struct net_device *ndev = priv->netdev;
0689     struct sk_buff *skb, *rx_skb;
0690     dma_addr_t mapping;
0691 
0692     /* Allocate a new SKB for a new packet */
0693     skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
0694                  GFP_ATOMIC | __GFP_NOWARN);
0695     if (!skb) {
0696         priv->mib.alloc_rx_buff_failed++;
0697         netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
0698         return NULL;
0699     }
0700 
0701     mapping = dma_map_single(kdev, skb->data,
0702                  RX_BUF_LENGTH, DMA_FROM_DEVICE);
0703     if (dma_mapping_error(kdev, mapping)) {
0704         priv->mib.rx_dma_failed++;
0705         dev_kfree_skb_any(skb);
0706         netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
0707         return NULL;
0708     }
0709 
0710     /* Grab the current SKB on the ring */
0711     rx_skb = cb->skb;
0712     if (likely(rx_skb))
0713         dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
0714                  RX_BUF_LENGTH, DMA_FROM_DEVICE);
0715 
0716     /* Put the new SKB on the ring */
0717     cb->skb = skb;
0718     dma_unmap_addr_set(cb, dma_addr, mapping);
0719     dma_desc_set_addr(priv, cb->bd_addr, mapping);
0720 
0721     netif_dbg(priv, rx_status, ndev, "RX refill\n");
0722 
0723     /* Return the current SKB to the caller */
0724     return rx_skb;
0725 }
0726 
0727 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
0728 {
0729     struct bcm_sysport_cb *cb;
0730     struct sk_buff *skb;
0731     unsigned int i;
0732 
0733     for (i = 0; i < priv->num_rx_bds; i++) {
0734         cb = &priv->rx_cbs[i];
0735         skb = bcm_sysport_rx_refill(priv, cb);
0736         dev_kfree_skb(skb);
0737         if (!cb->skb)
0738             return -ENOMEM;
0739     }
0740 
0741     return 0;
0742 }
0743 
0744 /* Poll the hardware for up to budget packets to process */
0745 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
0746                     unsigned int budget)
0747 {
0748     struct bcm_sysport_stats64 *stats64 = &priv->stats64;
0749     struct net_device *ndev = priv->netdev;
0750     unsigned int processed = 0, to_process;
0751     unsigned int processed_bytes = 0;
0752     struct bcm_sysport_cb *cb;
0753     struct sk_buff *skb;
0754     unsigned int p_index;
0755     u16 len, status;
0756     struct bcm_rsb *rsb;
0757 
0758     /* Clear status before servicing to reduce spurious interrupts */
0759     intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
0760 
0761     /* Determine how much we should process since last call, SYSTEMPORT Lite
0762      * groups the producer and consumer indexes into the same 32-bit
0763      * which we access using RDMA_CONS_INDEX
0764      */
0765     if (!priv->is_lite)
0766         p_index = rdma_readl(priv, RDMA_PROD_INDEX);
0767     else
0768         p_index = rdma_readl(priv, RDMA_CONS_INDEX);
0769     p_index &= RDMA_PROD_INDEX_MASK;
0770 
0771     to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
0772 
0773     netif_dbg(priv, rx_status, ndev,
0774           "p_index=%d rx_c_index=%d to_process=%d\n",
0775           p_index, priv->rx_c_index, to_process);
0776 
0777     while ((processed < to_process) && (processed < budget)) {
0778         cb = &priv->rx_cbs[priv->rx_read_ptr];
0779         skb = bcm_sysport_rx_refill(priv, cb);
0780 
0781 
0782         /* We do not have a backing SKB, so we do not a corresponding
0783          * DMA mapping for this incoming packet since
0784          * bcm_sysport_rx_refill always either has both skb and mapping
0785          * or none.
0786          */
0787         if (unlikely(!skb)) {
0788             netif_err(priv, rx_err, ndev, "out of memory!\n");
0789             ndev->stats.rx_dropped++;
0790             ndev->stats.rx_errors++;
0791             goto next;
0792         }
0793 
0794         /* Extract the Receive Status Block prepended */
0795         rsb = (struct bcm_rsb *)skb->data;
0796         len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
0797         status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
0798               DESC_STATUS_MASK;
0799 
0800         netif_dbg(priv, rx_status, ndev,
0801               "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
0802               p_index, priv->rx_c_index, priv->rx_read_ptr,
0803               len, status);
0804 
0805         if (unlikely(len > RX_BUF_LENGTH)) {
0806             netif_err(priv, rx_status, ndev, "oversized packet\n");
0807             ndev->stats.rx_length_errors++;
0808             ndev->stats.rx_errors++;
0809             dev_kfree_skb_any(skb);
0810             goto next;
0811         }
0812 
0813         if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
0814             netif_err(priv, rx_status, ndev, "fragmented packet!\n");
0815             ndev->stats.rx_dropped++;
0816             ndev->stats.rx_errors++;
0817             dev_kfree_skb_any(skb);
0818             goto next;
0819         }
0820 
0821         if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
0822             netif_err(priv, rx_err, ndev, "error packet\n");
0823             if (status & RX_STATUS_OVFLOW)
0824                 ndev->stats.rx_over_errors++;
0825             ndev->stats.rx_dropped++;
0826             ndev->stats.rx_errors++;
0827             dev_kfree_skb_any(skb);
0828             goto next;
0829         }
0830 
0831         skb_put(skb, len);
0832 
0833         /* Hardware validated our checksum */
0834         if (likely(status & DESC_L4_CSUM))
0835             skb->ip_summed = CHECKSUM_UNNECESSARY;
0836 
0837         /* Hardware pre-pends packets with 2bytes before Ethernet
0838          * header plus we have the Receive Status Block, strip off all
0839          * of this from the SKB.
0840          */
0841         skb_pull(skb, sizeof(*rsb) + 2);
0842         len -= (sizeof(*rsb) + 2);
0843         processed_bytes += len;
0844 
0845         /* UniMAC may forward CRC */
0846         if (priv->crc_fwd) {
0847             skb_trim(skb, len - ETH_FCS_LEN);
0848             len -= ETH_FCS_LEN;
0849         }
0850 
0851         skb->protocol = eth_type_trans(skb, ndev);
0852         ndev->stats.rx_packets++;
0853         ndev->stats.rx_bytes += len;
0854         u64_stats_update_begin(&priv->syncp);
0855         stats64->rx_packets++;
0856         stats64->rx_bytes += len;
0857         u64_stats_update_end(&priv->syncp);
0858 
0859         napi_gro_receive(&priv->napi, skb);
0860 next:
0861         processed++;
0862         priv->rx_read_ptr++;
0863 
0864         if (priv->rx_read_ptr == priv->num_rx_bds)
0865             priv->rx_read_ptr = 0;
0866     }
0867 
0868     priv->dim.packets = processed;
0869     priv->dim.bytes = processed_bytes;
0870 
0871     return processed;
0872 }
0873 
0874 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
0875                        struct bcm_sysport_cb *cb,
0876                        unsigned int *bytes_compl,
0877                        unsigned int *pkts_compl)
0878 {
0879     struct bcm_sysport_priv *priv = ring->priv;
0880     struct device *kdev = &priv->pdev->dev;
0881 
0882     if (cb->skb) {
0883         *bytes_compl += cb->skb->len;
0884         dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
0885                  dma_unmap_len(cb, dma_len),
0886                  DMA_TO_DEVICE);
0887         (*pkts_compl)++;
0888         bcm_sysport_free_cb(cb);
0889     /* SKB fragment */
0890     } else if (dma_unmap_addr(cb, dma_addr)) {
0891         *bytes_compl += dma_unmap_len(cb, dma_len);
0892         dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
0893                    dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
0894         dma_unmap_addr_set(cb, dma_addr, 0);
0895     }
0896 }
0897 
0898 /* Reclaim queued SKBs for transmission completion, lockless version */
0899 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
0900                          struct bcm_sysport_tx_ring *ring)
0901 {
0902     unsigned int pkts_compl = 0, bytes_compl = 0;
0903     struct net_device *ndev = priv->netdev;
0904     unsigned int txbds_processed = 0;
0905     struct bcm_sysport_cb *cb;
0906     unsigned int txbds_ready;
0907     unsigned int c_index;
0908     u32 hw_ind;
0909 
0910     /* Clear status before servicing to reduce spurious interrupts */
0911     if (!ring->priv->is_lite)
0912         intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
0913     else
0914         intrl2_0_writel(ring->priv, BIT(ring->index +
0915                 INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
0916 
0917     /* Compute how many descriptors have been processed since last call */
0918     hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
0919     c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
0920     txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
0921 
0922     netif_dbg(priv, tx_done, ndev,
0923           "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
0924           ring->index, ring->c_index, c_index, txbds_ready);
0925 
0926     while (txbds_processed < txbds_ready) {
0927         cb = &ring->cbs[ring->clean_index];
0928         bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
0929 
0930         ring->desc_count++;
0931         txbds_processed++;
0932 
0933         if (likely(ring->clean_index < ring->size - 1))
0934             ring->clean_index++;
0935         else
0936             ring->clean_index = 0;
0937     }
0938 
0939     u64_stats_update_begin(&priv->syncp);
0940     ring->packets += pkts_compl;
0941     ring->bytes += bytes_compl;
0942     u64_stats_update_end(&priv->syncp);
0943 
0944     ring->c_index = c_index;
0945 
0946     netif_dbg(priv, tx_done, ndev,
0947           "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
0948           ring->index, ring->c_index, pkts_compl, bytes_compl);
0949 
0950     return pkts_compl;
0951 }
0952 
0953 /* Locked version of the per-ring TX reclaim routine */
0954 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
0955                        struct bcm_sysport_tx_ring *ring)
0956 {
0957     struct netdev_queue *txq;
0958     unsigned int released;
0959     unsigned long flags;
0960 
0961     txq = netdev_get_tx_queue(priv->netdev, ring->index);
0962 
0963     spin_lock_irqsave(&ring->lock, flags);
0964     released = __bcm_sysport_tx_reclaim(priv, ring);
0965     if (released)
0966         netif_tx_wake_queue(txq);
0967 
0968     spin_unlock_irqrestore(&ring->lock, flags);
0969 
0970     return released;
0971 }
0972 
0973 /* Locked version of the per-ring TX reclaim, but does not wake the queue */
0974 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
0975                  struct bcm_sysport_tx_ring *ring)
0976 {
0977     unsigned long flags;
0978 
0979     spin_lock_irqsave(&ring->lock, flags);
0980     __bcm_sysport_tx_reclaim(priv, ring);
0981     spin_unlock_irqrestore(&ring->lock, flags);
0982 }
0983 
0984 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
0985 {
0986     struct bcm_sysport_tx_ring *ring =
0987         container_of(napi, struct bcm_sysport_tx_ring, napi);
0988     unsigned int work_done = 0;
0989 
0990     work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
0991 
0992     if (work_done == 0) {
0993         napi_complete(napi);
0994         /* re-enable TX interrupt */
0995         if (!ring->priv->is_lite)
0996             intrl2_1_mask_clear(ring->priv, BIT(ring->index));
0997         else
0998             intrl2_0_mask_clear(ring->priv, BIT(ring->index +
0999                         INTRL2_0_TDMA_MBDONE_SHIFT));
1000 
1001         return 0;
1002     }
1003 
1004     return budget;
1005 }
1006 
1007 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
1008 {
1009     unsigned int q;
1010 
1011     for (q = 0; q < priv->netdev->num_tx_queues; q++)
1012         bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
1013 }
1014 
1015 static int bcm_sysport_poll(struct napi_struct *napi, int budget)
1016 {
1017     struct bcm_sysport_priv *priv =
1018         container_of(napi, struct bcm_sysport_priv, napi);
1019     struct dim_sample dim_sample = {};
1020     unsigned int work_done = 0;
1021 
1022     work_done = bcm_sysport_desc_rx(priv, budget);
1023 
1024     priv->rx_c_index += work_done;
1025     priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
1026 
1027     /* SYSTEMPORT Lite groups the producer/consumer index, producer is
1028      * maintained by HW, but writes to it will be ignore while RDMA
1029      * is active
1030      */
1031     if (!priv->is_lite)
1032         rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
1033     else
1034         rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
1035 
1036     if (work_done < budget) {
1037         napi_complete_done(napi, work_done);
1038         /* re-enable RX interrupts */
1039         intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
1040     }
1041 
1042     if (priv->dim.use_dim) {
1043         dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
1044                   priv->dim.bytes, &dim_sample);
1045         net_dim(&priv->dim.dim, dim_sample);
1046     }
1047 
1048     return work_done;
1049 }
1050 
1051 static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
1052 {
1053     u32 reg, bit;
1054 
1055     reg = umac_readl(priv, UMAC_MPD_CTRL);
1056     if (enable)
1057         reg |= MPD_EN;
1058     else
1059         reg &= ~MPD_EN;
1060     umac_writel(priv, reg, UMAC_MPD_CTRL);
1061 
1062     if (priv->is_lite)
1063         bit = RBUF_ACPI_EN_LITE;
1064     else
1065         bit = RBUF_ACPI_EN;
1066 
1067     reg = rbuf_readl(priv, RBUF_CONTROL);
1068     if (enable)
1069         reg |= bit;
1070     else
1071         reg &= ~bit;
1072     rbuf_writel(priv, reg, RBUF_CONTROL);
1073 }
1074 
1075 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1076 {
1077     unsigned int index;
1078     u32 reg;
1079 
1080     /* Disable RXCHK, active filters and Broadcom tag matching */
1081     reg = rxchk_readl(priv, RXCHK_CONTROL);
1082     reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
1083          RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
1084     rxchk_writel(priv, reg, RXCHK_CONTROL);
1085 
1086     /* Make sure we restore correct CID index in case HW lost
1087      * its context during deep idle state
1088      */
1089     for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
1090         rxchk_writel(priv, priv->filters_loc[index] <<
1091                  RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index));
1092         rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
1093     }
1094 
1095     /* Clear the MagicPacket detection logic */
1096     mpd_enable_set(priv, false);
1097 
1098     reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
1099     if (reg & INTRL2_0_MPD)
1100         netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
1101 
1102     if (reg & INTRL2_0_BRCM_MATCH_TAG) {
1103         reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
1104                   RXCHK_BRCM_TAG_MATCH_MASK;
1105         netdev_info(priv->netdev,
1106                 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
1107     }
1108 
1109     netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
1110 }
1111 
1112 static void bcm_sysport_dim_work(struct work_struct *work)
1113 {
1114     struct dim *dim = container_of(work, struct dim, work);
1115     struct bcm_sysport_net_dim *ndim =
1116             container_of(dim, struct bcm_sysport_net_dim, dim);
1117     struct bcm_sysport_priv *priv =
1118             container_of(ndim, struct bcm_sysport_priv, dim);
1119     struct dim_cq_moder cur_profile = net_dim_get_rx_moderation(dim->mode,
1120                                     dim->profile_ix);
1121 
1122     bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
1123     dim->state = DIM_START_MEASURE;
1124 }
1125 
1126 /* RX and misc interrupt routine */
1127 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
1128 {
1129     struct net_device *dev = dev_id;
1130     struct bcm_sysport_priv *priv = netdev_priv(dev);
1131     struct bcm_sysport_tx_ring *txr;
1132     unsigned int ring, ring_bit;
1133 
1134     priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
1135               ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1136     intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1137 
1138     if (unlikely(priv->irq0_stat == 0)) {
1139         netdev_warn(priv->netdev, "spurious RX interrupt\n");
1140         return IRQ_NONE;
1141     }
1142 
1143     if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
1144         priv->dim.event_ctr++;
1145         if (likely(napi_schedule_prep(&priv->napi))) {
1146             /* disable RX interrupts */
1147             intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
1148             __napi_schedule_irqoff(&priv->napi);
1149         }
1150     }
1151 
1152     /* TX ring is full, perform a full reclaim since we do not know
1153      * which one would trigger this interrupt
1154      */
1155     if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
1156         bcm_sysport_tx_reclaim_all(priv);
1157 
1158     if (!priv->is_lite)
1159         goto out;
1160 
1161     for (ring = 0; ring < dev->num_tx_queues; ring++) {
1162         ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
1163         if (!(priv->irq0_stat & ring_bit))
1164             continue;
1165 
1166         txr = &priv->tx_rings[ring];
1167 
1168         if (likely(napi_schedule_prep(&txr->napi))) {
1169             intrl2_0_mask_set(priv, ring_bit);
1170             __napi_schedule(&txr->napi);
1171         }
1172     }
1173 out:
1174     return IRQ_HANDLED;
1175 }
1176 
1177 /* TX interrupt service routine */
1178 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
1179 {
1180     struct net_device *dev = dev_id;
1181     struct bcm_sysport_priv *priv = netdev_priv(dev);
1182     struct bcm_sysport_tx_ring *txr;
1183     unsigned int ring;
1184 
1185     priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
1186                 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
1187     intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1188 
1189     if (unlikely(priv->irq1_stat == 0)) {
1190         netdev_warn(priv->netdev, "spurious TX interrupt\n");
1191         return IRQ_NONE;
1192     }
1193 
1194     for (ring = 0; ring < dev->num_tx_queues; ring++) {
1195         if (!(priv->irq1_stat & BIT(ring)))
1196             continue;
1197 
1198         txr = &priv->tx_rings[ring];
1199 
1200         if (likely(napi_schedule_prep(&txr->napi))) {
1201             intrl2_1_mask_set(priv, BIT(ring));
1202             __napi_schedule_irqoff(&txr->napi);
1203         }
1204     }
1205 
1206     return IRQ_HANDLED;
1207 }
1208 
1209 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
1210 {
1211     struct bcm_sysport_priv *priv = dev_id;
1212 
1213     pm_wakeup_event(&priv->pdev->dev, 0);
1214 
1215     return IRQ_HANDLED;
1216 }
1217 
1218 #ifdef CONFIG_NET_POLL_CONTROLLER
1219 static void bcm_sysport_poll_controller(struct net_device *dev)
1220 {
1221     struct bcm_sysport_priv *priv = netdev_priv(dev);
1222 
1223     disable_irq(priv->irq0);
1224     bcm_sysport_rx_isr(priv->irq0, priv);
1225     enable_irq(priv->irq0);
1226 
1227     if (!priv->is_lite) {
1228         disable_irq(priv->irq1);
1229         bcm_sysport_tx_isr(priv->irq1, priv);
1230         enable_irq(priv->irq1);
1231     }
1232 }
1233 #endif
1234 
1235 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
1236                           struct net_device *dev)
1237 {
1238     struct bcm_sysport_priv *priv = netdev_priv(dev);
1239     struct sk_buff *nskb;
1240     struct bcm_tsb *tsb;
1241     u32 csum_info;
1242     u8 ip_proto;
1243     u16 csum_start;
1244     __be16 ip_ver;
1245 
1246     /* Re-allocate SKB if needed */
1247     if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
1248         nskb = skb_realloc_headroom(skb, sizeof(*tsb));
1249         if (!nskb) {
1250             dev_kfree_skb_any(skb);
1251             priv->mib.tx_realloc_tsb_failed++;
1252             dev->stats.tx_errors++;
1253             dev->stats.tx_dropped++;
1254             return NULL;
1255         }
1256         dev_consume_skb_any(skb);
1257         skb = nskb;
1258         priv->mib.tx_realloc_tsb++;
1259     }
1260 
1261     tsb = skb_push(skb, sizeof(*tsb));
1262     /* Zero-out TSB by default */
1263     memset(tsb, 0, sizeof(*tsb));
1264 
1265     if (skb_vlan_tag_present(skb)) {
1266         tsb->pcp_dei_vid = skb_vlan_tag_get_prio(skb) & PCP_DEI_MASK;
1267         tsb->pcp_dei_vid |= (u32)skb_vlan_tag_get_id(skb) << VID_SHIFT;
1268     }
1269 
1270     if (skb->ip_summed == CHECKSUM_PARTIAL) {
1271         ip_ver = skb->protocol;
1272         switch (ip_ver) {
1273         case htons(ETH_P_IP):
1274             ip_proto = ip_hdr(skb)->protocol;
1275             break;
1276         case htons(ETH_P_IPV6):
1277             ip_proto = ipv6_hdr(skb)->nexthdr;
1278             break;
1279         default:
1280             return skb;
1281         }
1282 
1283         /* Get the checksum offset and the L4 (transport) offset */
1284         csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
1285         /* Account for the HW inserted VLAN tag */
1286         if (skb_vlan_tag_present(skb))
1287             csum_start += VLAN_HLEN;
1288         csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
1289         csum_info |= (csum_start << L4_PTR_SHIFT);
1290 
1291         if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1292             csum_info |= L4_LENGTH_VALID;
1293             if (ip_proto == IPPROTO_UDP &&
1294                 ip_ver == htons(ETH_P_IP))
1295                 csum_info |= L4_UDP;
1296         } else {
1297             csum_info = 0;
1298         }
1299 
1300         tsb->l4_ptr_dest_map = csum_info;
1301     }
1302 
1303     return skb;
1304 }
1305 
1306 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1307                     struct net_device *dev)
1308 {
1309     struct bcm_sysport_priv *priv = netdev_priv(dev);
1310     struct device *kdev = &priv->pdev->dev;
1311     struct bcm_sysport_tx_ring *ring;
1312     unsigned long flags, desc_flags;
1313     struct bcm_sysport_cb *cb;
1314     struct netdev_queue *txq;
1315     u32 len_status, addr_lo;
1316     unsigned int skb_len;
1317     dma_addr_t mapping;
1318     u16 queue;
1319     int ret;
1320 
1321     queue = skb_get_queue_mapping(skb);
1322     txq = netdev_get_tx_queue(dev, queue);
1323     ring = &priv->tx_rings[queue];
1324 
1325     /* lock against tx reclaim in BH context and TX ring full interrupt */
1326     spin_lock_irqsave(&ring->lock, flags);
1327     if (unlikely(ring->desc_count == 0)) {
1328         netif_tx_stop_queue(txq);
1329         netdev_err(dev, "queue %d awake and ring full!\n", queue);
1330         ret = NETDEV_TX_BUSY;
1331         goto out;
1332     }
1333 
1334     /* Insert TSB and checksum infos */
1335     if (priv->tsb_en) {
1336         skb = bcm_sysport_insert_tsb(skb, dev);
1337         if (!skb) {
1338             ret = NETDEV_TX_OK;
1339             goto out;
1340         }
1341     }
1342 
1343     skb_len = skb->len;
1344 
1345     mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1346     if (dma_mapping_error(kdev, mapping)) {
1347         priv->mib.tx_dma_failed++;
1348         netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
1349               skb->data, skb_len);
1350         ret = NETDEV_TX_OK;
1351         goto out;
1352     }
1353 
1354     /* Remember the SKB for future freeing */
1355     cb = &ring->cbs[ring->curr_desc];
1356     cb->skb = skb;
1357     dma_unmap_addr_set(cb, dma_addr, mapping);
1358     dma_unmap_len_set(cb, dma_len, skb_len);
1359 
1360     addr_lo = lower_32_bits(mapping);
1361     len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
1362     len_status |= (skb_len << DESC_LEN_SHIFT);
1363     len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
1364                DESC_STATUS_SHIFT;
1365     if (skb->ip_summed == CHECKSUM_PARTIAL)
1366         len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
1367     if (skb_vlan_tag_present(skb))
1368         len_status |= (TX_STATUS_VLAN_VID_TSB << DESC_STATUS_SHIFT);
1369 
1370     ring->curr_desc++;
1371     if (ring->curr_desc == ring->size)
1372         ring->curr_desc = 0;
1373     ring->desc_count--;
1374 
1375     /* Ports are latched, so write upper address first */
1376     spin_lock_irqsave(&priv->desc_lock, desc_flags);
1377     tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
1378     tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
1379     spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
1380 
1381     /* Check ring space and update SW control flow */
1382     if (ring->desc_count == 0)
1383         netif_tx_stop_queue(txq);
1384 
1385     netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
1386           ring->index, ring->desc_count, ring->curr_desc);
1387 
1388     ret = NETDEV_TX_OK;
1389 out:
1390     spin_unlock_irqrestore(&ring->lock, flags);
1391     return ret;
1392 }
1393 
1394 static void bcm_sysport_tx_timeout(struct net_device *dev, unsigned int txqueue)
1395 {
1396     netdev_warn(dev, "transmit timeout!\n");
1397 
1398     netif_trans_update(dev);
1399     dev->stats.tx_errors++;
1400 
1401     netif_tx_wake_all_queues(dev);
1402 }
1403 
1404 /* phylib adjust link callback */
1405 static void bcm_sysport_adj_link(struct net_device *dev)
1406 {
1407     struct bcm_sysport_priv *priv = netdev_priv(dev);
1408     struct phy_device *phydev = dev->phydev;
1409     unsigned int changed = 0;
1410     u32 cmd_bits = 0, reg;
1411 
1412     if (priv->old_link != phydev->link) {
1413         changed = 1;
1414         priv->old_link = phydev->link;
1415     }
1416 
1417     if (priv->old_duplex != phydev->duplex) {
1418         changed = 1;
1419         priv->old_duplex = phydev->duplex;
1420     }
1421 
1422     if (priv->is_lite)
1423         goto out;
1424 
1425     switch (phydev->speed) {
1426     case SPEED_2500:
1427         cmd_bits = CMD_SPEED_2500;
1428         break;
1429     case SPEED_1000:
1430         cmd_bits = CMD_SPEED_1000;
1431         break;
1432     case SPEED_100:
1433         cmd_bits = CMD_SPEED_100;
1434         break;
1435     case SPEED_10:
1436         cmd_bits = CMD_SPEED_10;
1437         break;
1438     default:
1439         break;
1440     }
1441     cmd_bits <<= CMD_SPEED_SHIFT;
1442 
1443     if (phydev->duplex == DUPLEX_HALF)
1444         cmd_bits |= CMD_HD_EN;
1445 
1446     if (priv->old_pause != phydev->pause) {
1447         changed = 1;
1448         priv->old_pause = phydev->pause;
1449     }
1450 
1451     if (!phydev->pause)
1452         cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1453 
1454     if (!changed)
1455         return;
1456 
1457     if (phydev->link) {
1458         reg = umac_readl(priv, UMAC_CMD);
1459         reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
1460             CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1461             CMD_TX_PAUSE_IGNORE);
1462         reg |= cmd_bits;
1463         umac_writel(priv, reg, UMAC_CMD);
1464     }
1465 out:
1466     if (changed)
1467         phy_print_status(phydev);
1468 }
1469 
1470 static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
1471                  void (*cb)(struct work_struct *work))
1472 {
1473     struct bcm_sysport_net_dim *dim = &priv->dim;
1474 
1475     INIT_WORK(&dim->dim.work, cb);
1476     dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1477     dim->event_ctr = 0;
1478     dim->packets = 0;
1479     dim->bytes = 0;
1480 }
1481 
1482 static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
1483 {
1484     struct bcm_sysport_net_dim *dim = &priv->dim;
1485     struct dim_cq_moder moder;
1486     u32 usecs, pkts;
1487 
1488     usecs = priv->rx_coalesce_usecs;
1489     pkts = priv->rx_max_coalesced_frames;
1490 
1491     /* If DIM was enabled, re-apply default parameters */
1492     if (dim->use_dim) {
1493         moder = net_dim_get_def_rx_moderation(dim->dim.mode);
1494         usecs = moder.usec;
1495         pkts = moder.pkts;
1496     }
1497 
1498     bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
1499 }
1500 
1501 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1502                     unsigned int index)
1503 {
1504     struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1505     size_t size;
1506     u32 reg;
1507 
1508     /* Simple descriptors partitioning for now */
1509     size = 256;
1510 
1511     ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1512     if (!ring->cbs) {
1513         netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1514         return -ENOMEM;
1515     }
1516 
1517     /* Initialize SW view of the ring */
1518     spin_lock_init(&ring->lock);
1519     ring->priv = priv;
1520     netif_napi_add_tx(priv->netdev, &ring->napi, bcm_sysport_tx_poll);
1521     ring->index = index;
1522     ring->size = size;
1523     ring->clean_index = 0;
1524     ring->alloc_size = ring->size;
1525     ring->desc_count = ring->size;
1526     ring->curr_desc = 0;
1527 
1528     /* Initialize HW ring */
1529     tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1530     tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1531     tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1532     tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1533 
1534     /* Configure QID and port mapping */
1535     reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
1536     reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT);
1537     if (ring->inspect) {
1538         reg |= ring->switch_queue & RING_QID_MASK;
1539         reg |= ring->switch_port << RING_PORT_ID_SHIFT;
1540     } else {
1541         reg |= RING_IGNORE_STATUS;
1542     }
1543     tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
1544     reg = 0;
1545     /* Adjust the packet size calculations if SYSTEMPORT is responsible
1546      * for HW insertion of VLAN tags
1547      */
1548     if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
1549         reg = VLAN_HLEN << RING_PKT_SIZE_ADJ_SHIFT;
1550     tdma_writel(priv, reg, TDMA_DESC_RING_PCP_DEI_VID(index));
1551 
1552     /* Enable ACB algorithm 2 */
1553     reg = tdma_readl(priv, TDMA_CONTROL);
1554     reg |= tdma_control_bit(priv, ACB_ALGO);
1555     tdma_writel(priv, reg, TDMA_CONTROL);
1556 
1557     /* Do not use tdma_control_bit() here because TSB_SWAP1 collides
1558      * with the original definition of ACB_ALGO
1559      */
1560     reg = tdma_readl(priv, TDMA_CONTROL);
1561     if (priv->is_lite)
1562         reg &= ~BIT(TSB_SWAP1);
1563     /* Set a correct TSB format based on host endian */
1564     if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1565         reg |= tdma_control_bit(priv, TSB_SWAP0);
1566     else
1567         reg &= ~tdma_control_bit(priv, TSB_SWAP0);
1568     tdma_writel(priv, reg, TDMA_CONTROL);
1569 
1570     /* Program the number of descriptors as MAX_THRESHOLD and half of
1571      * its size for the hysteresis trigger
1572      */
1573     tdma_writel(priv, ring->size |
1574             1 << RING_HYST_THRESH_SHIFT,
1575             TDMA_DESC_RING_MAX_HYST(index));
1576 
1577     /* Enable the ring queue in the arbiter */
1578     reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1579     reg |= (1 << index);
1580     tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1581 
1582     napi_enable(&ring->napi);
1583 
1584     netif_dbg(priv, hw, priv->netdev,
1585           "TDMA cfg, size=%d, switch q=%d,port=%d\n",
1586           ring->size, ring->switch_queue,
1587           ring->switch_port);
1588 
1589     return 0;
1590 }
1591 
1592 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1593                      unsigned int index)
1594 {
1595     struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1596     u32 reg;
1597 
1598     /* Caller should stop the TDMA engine */
1599     reg = tdma_readl(priv, TDMA_STATUS);
1600     if (!(reg & TDMA_DISABLED))
1601         netdev_warn(priv->netdev, "TDMA not stopped!\n");
1602 
1603     /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1604      * fail, so by checking this pointer we know whether the TX ring was
1605      * fully initialized or not.
1606      */
1607     if (!ring->cbs)
1608         return;
1609 
1610     napi_disable(&ring->napi);
1611     netif_napi_del(&ring->napi);
1612 
1613     bcm_sysport_tx_clean(priv, ring);
1614 
1615     kfree(ring->cbs);
1616     ring->cbs = NULL;
1617     ring->size = 0;
1618     ring->alloc_size = 0;
1619 
1620     netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1621 }
1622 
1623 /* RDMA helper */
1624 static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1625                   unsigned int enable)
1626 {
1627     unsigned int timeout = 1000;
1628     u32 reg;
1629 
1630     reg = rdma_readl(priv, RDMA_CONTROL);
1631     if (enable)
1632         reg |= RDMA_EN;
1633     else
1634         reg &= ~RDMA_EN;
1635     rdma_writel(priv, reg, RDMA_CONTROL);
1636 
1637     /* Poll for RMDA disabling completion */
1638     do {
1639         reg = rdma_readl(priv, RDMA_STATUS);
1640         if (!!(reg & RDMA_DISABLED) == !enable)
1641             return 0;
1642         usleep_range(1000, 2000);
1643     } while (timeout-- > 0);
1644 
1645     netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1646 
1647     return -ETIMEDOUT;
1648 }
1649 
1650 /* TDMA helper */
1651 static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1652                   unsigned int enable)
1653 {
1654     unsigned int timeout = 1000;
1655     u32 reg;
1656 
1657     reg = tdma_readl(priv, TDMA_CONTROL);
1658     if (enable)
1659         reg |= tdma_control_bit(priv, TDMA_EN);
1660     else
1661         reg &= ~tdma_control_bit(priv, TDMA_EN);
1662     tdma_writel(priv, reg, TDMA_CONTROL);
1663 
1664     /* Poll for TMDA disabling completion */
1665     do {
1666         reg = tdma_readl(priv, TDMA_STATUS);
1667         if (!!(reg & TDMA_DISABLED) == !enable)
1668             return 0;
1669 
1670         usleep_range(1000, 2000);
1671     } while (timeout-- > 0);
1672 
1673     netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1674 
1675     return -ETIMEDOUT;
1676 }
1677 
1678 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1679 {
1680     struct bcm_sysport_cb *cb;
1681     u32 reg;
1682     int ret;
1683     int i;
1684 
1685     /* Initialize SW view of the RX ring */
1686     priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
1687     priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1688     priv->rx_c_index = 0;
1689     priv->rx_read_ptr = 0;
1690     priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1691                 GFP_KERNEL);
1692     if (!priv->rx_cbs) {
1693         netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1694         return -ENOMEM;
1695     }
1696 
1697     for (i = 0; i < priv->num_rx_bds; i++) {
1698         cb = priv->rx_cbs + i;
1699         cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1700     }
1701 
1702     ret = bcm_sysport_alloc_rx_bufs(priv);
1703     if (ret) {
1704         netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1705         return ret;
1706     }
1707 
1708     /* Initialize HW, ensure RDMA is disabled */
1709     reg = rdma_readl(priv, RDMA_STATUS);
1710     if (!(reg & RDMA_DISABLED))
1711         rdma_enable_set(priv, 0);
1712 
1713     rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1714     rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1715     rdma_writel(priv, 0, RDMA_PROD_INDEX);
1716     rdma_writel(priv, 0, RDMA_CONS_INDEX);
1717     rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1718               RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1719     /* Operate the queue in ring mode */
1720     rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1721     rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1722     rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1723     rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
1724 
1725     netif_dbg(priv, hw, priv->netdev,
1726           "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1727           priv->num_rx_bds, priv->rx_bds);
1728 
1729     return 0;
1730 }
1731 
1732 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1733 {
1734     struct bcm_sysport_cb *cb;
1735     unsigned int i;
1736     u32 reg;
1737 
1738     /* Caller should ensure RDMA is disabled */
1739     reg = rdma_readl(priv, RDMA_STATUS);
1740     if (!(reg & RDMA_DISABLED))
1741         netdev_warn(priv->netdev, "RDMA not stopped!\n");
1742 
1743     for (i = 0; i < priv->num_rx_bds; i++) {
1744         cb = &priv->rx_cbs[i];
1745         if (dma_unmap_addr(cb, dma_addr))
1746             dma_unmap_single(&priv->pdev->dev,
1747                      dma_unmap_addr(cb, dma_addr),
1748                      RX_BUF_LENGTH, DMA_FROM_DEVICE);
1749         bcm_sysport_free_cb(cb);
1750     }
1751 
1752     kfree(priv->rx_cbs);
1753     priv->rx_cbs = NULL;
1754 
1755     netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1756 }
1757 
1758 static void bcm_sysport_set_rx_mode(struct net_device *dev)
1759 {
1760     struct bcm_sysport_priv *priv = netdev_priv(dev);
1761     u32 reg;
1762 
1763     if (priv->is_lite)
1764         return;
1765 
1766     reg = umac_readl(priv, UMAC_CMD);
1767     if (dev->flags & IFF_PROMISC)
1768         reg |= CMD_PROMISC;
1769     else
1770         reg &= ~CMD_PROMISC;
1771     umac_writel(priv, reg, UMAC_CMD);
1772 
1773     /* No support for ALLMULTI */
1774     if (dev->flags & IFF_ALLMULTI)
1775         return;
1776 }
1777 
1778 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1779                    u32 mask, unsigned int enable)
1780 {
1781     u32 reg;
1782 
1783     if (!priv->is_lite) {
1784         reg = umac_readl(priv, UMAC_CMD);
1785         if (enable)
1786             reg |= mask;
1787         else
1788             reg &= ~mask;
1789         umac_writel(priv, reg, UMAC_CMD);
1790     } else {
1791         reg = gib_readl(priv, GIB_CONTROL);
1792         if (enable)
1793             reg |= mask;
1794         else
1795             reg &= ~mask;
1796         gib_writel(priv, reg, GIB_CONTROL);
1797     }
1798 
1799     /* UniMAC stops on a packet boundary, wait for a full-sized packet
1800      * to be processed (1 msec).
1801      */
1802     if (enable == 0)
1803         usleep_range(1000, 2000);
1804 }
1805 
1806 static inline void umac_reset(struct bcm_sysport_priv *priv)
1807 {
1808     u32 reg;
1809 
1810     if (priv->is_lite)
1811         return;
1812 
1813     reg = umac_readl(priv, UMAC_CMD);
1814     reg |= CMD_SW_RESET;
1815     umac_writel(priv, reg, UMAC_CMD);
1816     udelay(10);
1817     reg = umac_readl(priv, UMAC_CMD);
1818     reg &= ~CMD_SW_RESET;
1819     umac_writel(priv, reg, UMAC_CMD);
1820 }
1821 
1822 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1823                  const unsigned char *addr)
1824 {
1825     u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
1826             addr[3];
1827     u32 mac1 = (addr[4] << 8) | addr[5];
1828 
1829     if (!priv->is_lite) {
1830         umac_writel(priv, mac0, UMAC_MAC0);
1831         umac_writel(priv, mac1, UMAC_MAC1);
1832     } else {
1833         gib_writel(priv, mac0, GIB_MAC0);
1834         gib_writel(priv, mac1, GIB_MAC1);
1835     }
1836 }
1837 
1838 static void topctrl_flush(struct bcm_sysport_priv *priv)
1839 {
1840     topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1841     topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1842     mdelay(1);
1843     topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1844     topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1845 }
1846 
1847 static int bcm_sysport_change_mac(struct net_device *dev, void *p)
1848 {
1849     struct bcm_sysport_priv *priv = netdev_priv(dev);
1850     struct sockaddr *addr = p;
1851 
1852     if (!is_valid_ether_addr(addr->sa_data))
1853         return -EINVAL;
1854 
1855     eth_hw_addr_set(dev, addr->sa_data);
1856 
1857     /* interface is disabled, changes to MAC will be reflected on next
1858      * open call
1859      */
1860     if (!netif_running(dev))
1861         return 0;
1862 
1863     umac_set_hw_addr(priv, dev->dev_addr);
1864 
1865     return 0;
1866 }
1867 
1868 static void bcm_sysport_get_stats64(struct net_device *dev,
1869                     struct rtnl_link_stats64 *stats)
1870 {
1871     struct bcm_sysport_priv *priv = netdev_priv(dev);
1872     struct bcm_sysport_stats64 *stats64 = &priv->stats64;
1873     unsigned int start;
1874 
1875     netdev_stats_to_stats64(stats, &dev->stats);
1876 
1877     bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
1878                     &stats->tx_packets);
1879 
1880     do {
1881         start = u64_stats_fetch_begin_irq(&priv->syncp);
1882         stats->rx_packets = stats64->rx_packets;
1883         stats->rx_bytes = stats64->rx_bytes;
1884     } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
1885 }
1886 
1887 static void bcm_sysport_netif_start(struct net_device *dev)
1888 {
1889     struct bcm_sysport_priv *priv = netdev_priv(dev);
1890 
1891     /* Enable NAPI */
1892     bcm_sysport_init_dim(priv, bcm_sysport_dim_work);
1893     bcm_sysport_init_rx_coalesce(priv);
1894     napi_enable(&priv->napi);
1895 
1896     /* Enable RX interrupt and TX ring full interrupt */
1897     intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1898 
1899     phy_start(dev->phydev);
1900 
1901     /* Enable TX interrupts for the TXQs */
1902     if (!priv->is_lite)
1903         intrl2_1_mask_clear(priv, 0xffffffff);
1904     else
1905         intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
1906 }
1907 
1908 static void rbuf_init(struct bcm_sysport_priv *priv)
1909 {
1910     u32 reg;
1911 
1912     reg = rbuf_readl(priv, RBUF_CONTROL);
1913     reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1914     /* Set a correct RSB format on SYSTEMPORT Lite */
1915     if (priv->is_lite)
1916         reg &= ~RBUF_RSB_SWAP1;
1917 
1918     /* Set a correct RSB format based on host endian */
1919     if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1920         reg |= RBUF_RSB_SWAP0;
1921     else
1922         reg &= ~RBUF_RSB_SWAP0;
1923     rbuf_writel(priv, reg, RBUF_CONTROL);
1924 }
1925 
1926 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
1927 {
1928     intrl2_0_mask_set(priv, 0xffffffff);
1929     intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1930     if (!priv->is_lite) {
1931         intrl2_1_mask_set(priv, 0xffffffff);
1932         intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1933     }
1934 }
1935 
1936 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
1937 {
1938     u32 reg;
1939 
1940     reg = gib_readl(priv, GIB_CONTROL);
1941     /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
1942     if (netdev_uses_dsa(priv->netdev)) {
1943         reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
1944         reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
1945     }
1946     reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
1947     reg |= 12 << GIB_IPG_LEN_SHIFT;
1948     gib_writel(priv, reg, GIB_CONTROL);
1949 }
1950 
1951 static int bcm_sysport_open(struct net_device *dev)
1952 {
1953     struct bcm_sysport_priv *priv = netdev_priv(dev);
1954     struct phy_device *phydev;
1955     unsigned int i;
1956     int ret;
1957 
1958     clk_prepare_enable(priv->clk);
1959 
1960     /* Reset UniMAC */
1961     umac_reset(priv);
1962 
1963     /* Flush TX and RX FIFOs at TOPCTRL level */
1964     topctrl_flush(priv);
1965 
1966     /* Disable the UniMAC RX/TX */
1967     umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
1968 
1969     /* Enable RBUF 2bytes alignment and Receive Status Block */
1970     rbuf_init(priv);
1971 
1972     /* Set maximum frame length */
1973     if (!priv->is_lite)
1974         umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1975     else
1976         gib_set_pad_extension(priv);
1977 
1978     /* Apply features again in case we changed them while interface was
1979      * down
1980      */
1981     bcm_sysport_set_features(dev, dev->features);
1982 
1983     /* Set MAC address */
1984     umac_set_hw_addr(priv, dev->dev_addr);
1985 
1986     phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1987                 0, priv->phy_interface);
1988     if (!phydev) {
1989         netdev_err(dev, "could not attach to PHY\n");
1990         ret = -ENODEV;
1991         goto out_clk_disable;
1992     }
1993 
1994     /* Reset house keeping link status */
1995     priv->old_duplex = -1;
1996     priv->old_link = -1;
1997     priv->old_pause = -1;
1998 
1999     /* mask all interrupts and request them */
2000     bcm_sysport_mask_all_intrs(priv);
2001 
2002     ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
2003     if (ret) {
2004         netdev_err(dev, "failed to request RX interrupt\n");
2005         goto out_phy_disconnect;
2006     }
2007 
2008     if (!priv->is_lite) {
2009         ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
2010                   dev->name, dev);
2011         if (ret) {
2012             netdev_err(dev, "failed to request TX interrupt\n");
2013             goto out_free_irq0;
2014         }
2015     }
2016 
2017     /* Initialize both hardware and software ring */
2018     spin_lock_init(&priv->desc_lock);
2019     for (i = 0; i < dev->num_tx_queues; i++) {
2020         ret = bcm_sysport_init_tx_ring(priv, i);
2021         if (ret) {
2022             netdev_err(dev, "failed to initialize TX ring %d\n",
2023                    i);
2024             goto out_free_tx_ring;
2025         }
2026     }
2027 
2028     /* Initialize linked-list */
2029     tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2030 
2031     /* Initialize RX ring */
2032     ret = bcm_sysport_init_rx_ring(priv);
2033     if (ret) {
2034         netdev_err(dev, "failed to initialize RX ring\n");
2035         goto out_free_rx_ring;
2036     }
2037 
2038     /* Turn on RDMA */
2039     ret = rdma_enable_set(priv, 1);
2040     if (ret)
2041         goto out_free_rx_ring;
2042 
2043     /* Turn on TDMA */
2044     ret = tdma_enable_set(priv, 1);
2045     if (ret)
2046         goto out_clear_rx_int;
2047 
2048     /* Turn on UniMAC TX/RX */
2049     umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
2050 
2051     bcm_sysport_netif_start(dev);
2052 
2053     netif_tx_start_all_queues(dev);
2054 
2055     return 0;
2056 
2057 out_clear_rx_int:
2058     intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
2059 out_free_rx_ring:
2060     bcm_sysport_fini_rx_ring(priv);
2061 out_free_tx_ring:
2062     for (i = 0; i < dev->num_tx_queues; i++)
2063         bcm_sysport_fini_tx_ring(priv, i);
2064     if (!priv->is_lite)
2065         free_irq(priv->irq1, dev);
2066 out_free_irq0:
2067     free_irq(priv->irq0, dev);
2068 out_phy_disconnect:
2069     phy_disconnect(phydev);
2070 out_clk_disable:
2071     clk_disable_unprepare(priv->clk);
2072     return ret;
2073 }
2074 
2075 static void bcm_sysport_netif_stop(struct net_device *dev)
2076 {
2077     struct bcm_sysport_priv *priv = netdev_priv(dev);
2078 
2079     /* stop all software from updating hardware */
2080     netif_tx_disable(dev);
2081     napi_disable(&priv->napi);
2082     cancel_work_sync(&priv->dim.dim.work);
2083     phy_stop(dev->phydev);
2084 
2085     /* mask all interrupts */
2086     bcm_sysport_mask_all_intrs(priv);
2087 }
2088 
2089 static int bcm_sysport_stop(struct net_device *dev)
2090 {
2091     struct bcm_sysport_priv *priv = netdev_priv(dev);
2092     unsigned int i;
2093     int ret;
2094 
2095     bcm_sysport_netif_stop(dev);
2096 
2097     /* Disable UniMAC RX */
2098     umac_enable_set(priv, CMD_RX_EN, 0);
2099 
2100     ret = tdma_enable_set(priv, 0);
2101     if (ret) {
2102         netdev_err(dev, "timeout disabling RDMA\n");
2103         return ret;
2104     }
2105 
2106     /* Wait for a maximum packet size to be drained */
2107     usleep_range(2000, 3000);
2108 
2109     ret = rdma_enable_set(priv, 0);
2110     if (ret) {
2111         netdev_err(dev, "timeout disabling TDMA\n");
2112         return ret;
2113     }
2114 
2115     /* Disable UniMAC TX */
2116     umac_enable_set(priv, CMD_TX_EN, 0);
2117 
2118     /* Free RX/TX rings SW structures */
2119     for (i = 0; i < dev->num_tx_queues; i++)
2120         bcm_sysport_fini_tx_ring(priv, i);
2121     bcm_sysport_fini_rx_ring(priv);
2122 
2123     free_irq(priv->irq0, dev);
2124     if (!priv->is_lite)
2125         free_irq(priv->irq1, dev);
2126 
2127     /* Disconnect from PHY */
2128     phy_disconnect(dev->phydev);
2129 
2130     clk_disable_unprepare(priv->clk);
2131 
2132     return 0;
2133 }
2134 
2135 static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv,
2136                  u64 location)
2137 {
2138     unsigned int index;
2139     u32 reg;
2140 
2141     for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2142         reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2143         reg >>= RXCHK_BRCM_TAG_CID_SHIFT;
2144         reg &= RXCHK_BRCM_TAG_CID_MASK;
2145         if (reg == location)
2146             return index;
2147     }
2148 
2149     return -EINVAL;
2150 }
2151 
2152 static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv,
2153                 struct ethtool_rxnfc *nfc)
2154 {
2155     int index;
2156 
2157     /* This is not a rule that we know about */
2158     index = bcm_sysport_rule_find(priv, nfc->fs.location);
2159     if (index < 0)
2160         return -EOPNOTSUPP;
2161 
2162     nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE;
2163 
2164     return 0;
2165 }
2166 
2167 static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
2168                 struct ethtool_rxnfc *nfc)
2169 {
2170     unsigned int index;
2171     u32 reg;
2172 
2173     /* We cannot match locations greater than what the classification ID
2174      * permits (256 entries)
2175      */
2176     if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK)
2177         return -E2BIG;
2178 
2179     /* We cannot support flows that are not destined for a wake-up */
2180     if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
2181         return -EOPNOTSUPP;
2182 
2183     index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
2184     if (index >= RXCHK_BRCM_TAG_MAX)
2185         /* All filters are already in use, we cannot match more rules */
2186         return -ENOSPC;
2187 
2188     /* Location is the classification ID, and index is the position
2189      * within one of our 8 possible filters to be programmed
2190      */
2191     reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2192     reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT);
2193     reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT;
2194     rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
2195     rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
2196 
2197     priv->filters_loc[index] = nfc->fs.location;
2198     set_bit(index, priv->filters);
2199 
2200     return 0;
2201 }
2202 
2203 static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
2204                 u64 location)
2205 {
2206     int index;
2207 
2208     /* This is not a rule that we know about */
2209     index = bcm_sysport_rule_find(priv, location);
2210     if (index < 0)
2211         return -EOPNOTSUPP;
2212 
2213     /* No need to disable this filter if it was enabled, this will
2214      * be taken care of during suspend time by bcm_sysport_suspend_to_wol
2215      */
2216     clear_bit(index, priv->filters);
2217     priv->filters_loc[index] = 0;
2218 
2219     return 0;
2220 }
2221 
2222 static int bcm_sysport_get_rxnfc(struct net_device *dev,
2223                  struct ethtool_rxnfc *nfc, u32 *rule_locs)
2224 {
2225     struct bcm_sysport_priv *priv = netdev_priv(dev);
2226     int ret = -EOPNOTSUPP;
2227 
2228     switch (nfc->cmd) {
2229     case ETHTOOL_GRXCLSRULE:
2230         ret = bcm_sysport_rule_get(priv, nfc);
2231         break;
2232     default:
2233         break;
2234     }
2235 
2236     return ret;
2237 }
2238 
2239 static int bcm_sysport_set_rxnfc(struct net_device *dev,
2240                  struct ethtool_rxnfc *nfc)
2241 {
2242     struct bcm_sysport_priv *priv = netdev_priv(dev);
2243     int ret = -EOPNOTSUPP;
2244 
2245     switch (nfc->cmd) {
2246     case ETHTOOL_SRXCLSRLINS:
2247         ret = bcm_sysport_rule_set(priv, nfc);
2248         break;
2249     case ETHTOOL_SRXCLSRLDEL:
2250         ret = bcm_sysport_rule_del(priv, nfc->fs.location);
2251         break;
2252     default:
2253         break;
2254     }
2255 
2256     return ret;
2257 }
2258 
2259 static const struct ethtool_ops bcm_sysport_ethtool_ops = {
2260     .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2261                      ETHTOOL_COALESCE_MAX_FRAMES |
2262                      ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
2263     .get_drvinfo        = bcm_sysport_get_drvinfo,
2264     .get_msglevel       = bcm_sysport_get_msglvl,
2265     .set_msglevel       = bcm_sysport_set_msglvl,
2266     .get_link       = ethtool_op_get_link,
2267     .get_strings        = bcm_sysport_get_strings,
2268     .get_ethtool_stats  = bcm_sysport_get_stats,
2269     .get_sset_count     = bcm_sysport_get_sset_count,
2270     .get_wol        = bcm_sysport_get_wol,
2271     .set_wol        = bcm_sysport_set_wol,
2272     .get_coalesce       = bcm_sysport_get_coalesce,
2273     .set_coalesce       = bcm_sysport_set_coalesce,
2274     .get_link_ksettings     = phy_ethtool_get_link_ksettings,
2275     .set_link_ksettings     = phy_ethtool_set_link_ksettings,
2276     .get_rxnfc      = bcm_sysport_get_rxnfc,
2277     .set_rxnfc      = bcm_sysport_set_rxnfc,
2278 };
2279 
2280 static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
2281                     struct net_device *sb_dev)
2282 {
2283     struct bcm_sysport_priv *priv = netdev_priv(dev);
2284     u16 queue = skb_get_queue_mapping(skb);
2285     struct bcm_sysport_tx_ring *tx_ring;
2286     unsigned int q, port;
2287 
2288     if (!netdev_uses_dsa(dev))
2289         return netdev_pick_tx(dev, skb, NULL);
2290 
2291     /* DSA tagging layer will have configured the correct queue */
2292     q = BRCM_TAG_GET_QUEUE(queue);
2293     port = BRCM_TAG_GET_PORT(queue);
2294     tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
2295 
2296     if (unlikely(!tx_ring))
2297         return netdev_pick_tx(dev, skb, NULL);
2298 
2299     return tx_ring->index;
2300 }
2301 
2302 static const struct net_device_ops bcm_sysport_netdev_ops = {
2303     .ndo_start_xmit     = bcm_sysport_xmit,
2304     .ndo_tx_timeout     = bcm_sysport_tx_timeout,
2305     .ndo_open       = bcm_sysport_open,
2306     .ndo_stop       = bcm_sysport_stop,
2307     .ndo_set_features   = bcm_sysport_set_features,
2308     .ndo_set_rx_mode    = bcm_sysport_set_rx_mode,
2309     .ndo_set_mac_address    = bcm_sysport_change_mac,
2310 #ifdef CONFIG_NET_POLL_CONTROLLER
2311     .ndo_poll_controller    = bcm_sysport_poll_controller,
2312 #endif
2313     .ndo_get_stats64    = bcm_sysport_get_stats64,
2314     .ndo_select_queue   = bcm_sysport_select_queue,
2315 };
2316 
2317 static int bcm_sysport_map_queues(struct net_device *dev,
2318                   struct net_device *slave_dev)
2319 {
2320     struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
2321     struct bcm_sysport_priv *priv = netdev_priv(dev);
2322     struct bcm_sysport_tx_ring *ring;
2323     unsigned int num_tx_queues;
2324     unsigned int q, qp, port;
2325 
2326     /* We can't be setting up queue inspection for non directly attached
2327      * switches
2328      */
2329     if (dp->ds->index)
2330         return 0;
2331 
2332     port = dp->index;
2333 
2334     /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
2335      * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
2336      * per-port (slave_dev) network devices queue, we achieve just that.
2337      * This need to happen now before any slave network device is used such
2338      * it accurately reflects the number of real TX queues.
2339      */
2340     if (priv->is_lite)
2341         netif_set_real_num_tx_queues(slave_dev,
2342                          slave_dev->num_tx_queues / 2);
2343 
2344     num_tx_queues = slave_dev->real_num_tx_queues;
2345 
2346     if (priv->per_port_num_tx_queues &&
2347         priv->per_port_num_tx_queues != num_tx_queues)
2348         netdev_warn(slave_dev, "asymmetric number of per-port queues\n");
2349 
2350     priv->per_port_num_tx_queues = num_tx_queues;
2351 
2352     for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues;
2353          q++) {
2354         ring = &priv->tx_rings[q];
2355 
2356         if (ring->inspect)
2357             continue;
2358 
2359         /* Just remember the mapping actual programming done
2360          * during bcm_sysport_init_tx_ring
2361          */
2362         ring->switch_queue = qp;
2363         ring->switch_port = port;
2364         ring->inspect = true;
2365         priv->ring_map[qp + port * num_tx_queues] = ring;
2366         qp++;
2367     }
2368 
2369     return 0;
2370 }
2371 
2372 static int bcm_sysport_unmap_queues(struct net_device *dev,
2373                     struct net_device *slave_dev)
2374 {
2375     struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
2376     struct bcm_sysport_priv *priv = netdev_priv(dev);
2377     struct bcm_sysport_tx_ring *ring;
2378     unsigned int num_tx_queues;
2379     unsigned int q, qp, port;
2380 
2381     port = dp->index;
2382 
2383     num_tx_queues = slave_dev->real_num_tx_queues;
2384 
2385     for (q = 0; q < dev->num_tx_queues; q++) {
2386         ring = &priv->tx_rings[q];
2387 
2388         if (ring->switch_port != port)
2389             continue;
2390 
2391         if (!ring->inspect)
2392             continue;
2393 
2394         ring->inspect = false;
2395         qp = ring->switch_queue;
2396         priv->ring_map[qp + port * num_tx_queues] = NULL;
2397     }
2398 
2399     return 0;
2400 }
2401 
2402 static int bcm_sysport_netdevice_event(struct notifier_block *nb,
2403                        unsigned long event, void *ptr)
2404 {
2405     struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2406     struct netdev_notifier_changeupper_info *info = ptr;
2407     struct bcm_sysport_priv *priv;
2408     int ret = 0;
2409 
2410     priv = container_of(nb, struct bcm_sysport_priv, netdev_notifier);
2411     if (priv->netdev != dev)
2412         return NOTIFY_DONE;
2413 
2414     switch (event) {
2415     case NETDEV_CHANGEUPPER:
2416         if (dev->netdev_ops != &bcm_sysport_netdev_ops)
2417             return NOTIFY_DONE;
2418 
2419         if (!dsa_slave_dev_check(info->upper_dev))
2420             return NOTIFY_DONE;
2421 
2422         if (info->linking)
2423             ret = bcm_sysport_map_queues(dev, info->upper_dev);
2424         else
2425             ret = bcm_sysport_unmap_queues(dev, info->upper_dev);
2426         break;
2427     }
2428 
2429     return notifier_from_errno(ret);
2430 }
2431 
2432 #define REV_FMT "v%2x.%02x"
2433 
2434 static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
2435     [SYSTEMPORT] = {
2436         .is_lite = false,
2437         .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
2438     },
2439     [SYSTEMPORT_LITE] = {
2440         .is_lite = true,
2441         .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
2442     },
2443 };
2444 
2445 static const struct of_device_id bcm_sysport_of_match[] = {
2446     { .compatible = "brcm,systemportlite-v1.00",
2447       .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
2448     { .compatible = "brcm,systemport-v1.00",
2449       .data = &bcm_sysport_params[SYSTEMPORT] },
2450     { .compatible = "brcm,systemport",
2451       .data = &bcm_sysport_params[SYSTEMPORT] },
2452     { /* sentinel */ }
2453 };
2454 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
2455 
2456 static int bcm_sysport_probe(struct platform_device *pdev)
2457 {
2458     const struct bcm_sysport_hw_params *params;
2459     const struct of_device_id *of_id = NULL;
2460     struct bcm_sysport_priv *priv;
2461     struct device_node *dn;
2462     struct net_device *dev;
2463     u32 txq, rxq;
2464     int ret;
2465 
2466     dn = pdev->dev.of_node;
2467     of_id = of_match_node(bcm_sysport_of_match, dn);
2468     if (!of_id || !of_id->data)
2469         return -EINVAL;
2470 
2471     ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
2472     if (ret)
2473         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2474     if (ret) {
2475         dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret);
2476         return ret;
2477     }
2478 
2479     /* Fairly quickly we need to know the type of adapter we have */
2480     params = of_id->data;
2481 
2482     /* Read the Transmit/Receive Queue properties */
2483     if (of_property_read_u32(dn, "systemport,num-txq", &txq))
2484         txq = TDMA_NUM_RINGS;
2485     if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
2486         rxq = 1;
2487 
2488     /* Sanity check the number of transmit queues */
2489     if (!txq || txq > TDMA_NUM_RINGS)
2490         return -EINVAL;
2491 
2492     dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
2493     if (!dev)
2494         return -ENOMEM;
2495 
2496     /* Initialize private members */
2497     priv = netdev_priv(dev);
2498 
2499     priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
2500     if (IS_ERR(priv->clk)) {
2501         ret = PTR_ERR(priv->clk);
2502         goto err_free_netdev;
2503     }
2504 
2505     /* Allocate number of TX rings */
2506     priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
2507                       sizeof(struct bcm_sysport_tx_ring),
2508                       GFP_KERNEL);
2509     if (!priv->tx_rings) {
2510         ret = -ENOMEM;
2511         goto err_free_netdev;
2512     }
2513 
2514     priv->is_lite = params->is_lite;
2515     priv->num_rx_desc_words = params->num_rx_desc_words;
2516 
2517     priv->irq0 = platform_get_irq(pdev, 0);
2518     if (!priv->is_lite) {
2519         priv->irq1 = platform_get_irq(pdev, 1);
2520         priv->wol_irq = platform_get_irq(pdev, 2);
2521     } else {
2522         priv->wol_irq = platform_get_irq(pdev, 1);
2523     }
2524     if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2525         ret = -EINVAL;
2526         goto err_free_netdev;
2527     }
2528 
2529     priv->base = devm_platform_ioremap_resource(pdev, 0);
2530     if (IS_ERR(priv->base)) {
2531         ret = PTR_ERR(priv->base);
2532         goto err_free_netdev;
2533     }
2534 
2535     priv->netdev = dev;
2536     priv->pdev = pdev;
2537 
2538     ret = of_get_phy_mode(dn, &priv->phy_interface);
2539     /* Default to GMII interface mode */
2540     if (ret)
2541         priv->phy_interface = PHY_INTERFACE_MODE_GMII;
2542 
2543     /* In the case of a fixed PHY, the DT node associated
2544      * to the PHY is the Ethernet MAC DT node.
2545      */
2546     if (of_phy_is_fixed_link(dn)) {
2547         ret = of_phy_register_fixed_link(dn);
2548         if (ret) {
2549             dev_err(&pdev->dev, "failed to register fixed PHY\n");
2550             goto err_free_netdev;
2551         }
2552 
2553         priv->phy_dn = dn;
2554     }
2555 
2556     /* Initialize netdevice members */
2557     ret = of_get_ethdev_address(dn, dev);
2558     if (ret) {
2559         dev_warn(&pdev->dev, "using random Ethernet MAC\n");
2560         eth_hw_addr_random(dev);
2561     }
2562 
2563     SET_NETDEV_DEV(dev, &pdev->dev);
2564     dev_set_drvdata(&pdev->dev, dev);
2565     dev->ethtool_ops = &bcm_sysport_ethtool_ops;
2566     dev->netdev_ops = &bcm_sysport_netdev_ops;
2567     netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
2568 
2569     dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
2570              NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2571              NETIF_F_HW_VLAN_CTAG_TX;
2572     dev->hw_features |= dev->features;
2573     dev->vlan_features |= dev->features;
2574     dev->max_mtu = UMAC_MAX_MTU_SIZE;
2575 
2576     /* Request the WOL interrupt and advertise suspend if available */
2577     priv->wol_irq_disabled = 1;
2578     ret = devm_request_irq(&pdev->dev, priv->wol_irq,
2579                    bcm_sysport_wol_isr, 0, dev->name, priv);
2580     if (!ret)
2581         device_set_wakeup_capable(&pdev->dev, 1);
2582 
2583     priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
2584     if (IS_ERR(priv->wol_clk)) {
2585         ret = PTR_ERR(priv->wol_clk);
2586         goto err_deregister_fixed_link;
2587     }
2588 
2589     /* Set the needed headroom once and for all */
2590     BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
2591     dev->needed_headroom += sizeof(struct bcm_tsb);
2592 
2593     /* libphy will adjust the link state accordingly */
2594     netif_carrier_off(dev);
2595 
2596     priv->rx_max_coalesced_frames = 1;
2597     u64_stats_init(&priv->syncp);
2598 
2599     priv->netdev_notifier.notifier_call = bcm_sysport_netdevice_event;
2600 
2601     ret = register_netdevice_notifier(&priv->netdev_notifier);
2602     if (ret) {
2603         dev_err(&pdev->dev, "failed to register DSA notifier\n");
2604         goto err_deregister_fixed_link;
2605     }
2606 
2607     ret = register_netdev(dev);
2608     if (ret) {
2609         dev_err(&pdev->dev, "failed to register net_device\n");
2610         goto err_deregister_notifier;
2611     }
2612 
2613     clk_prepare_enable(priv->clk);
2614 
2615     priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
2616     dev_info(&pdev->dev,
2617          "Broadcom SYSTEMPORT%s " REV_FMT
2618          " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
2619          priv->is_lite ? " Lite" : "",
2620          (priv->rev >> 8) & 0xff, priv->rev & 0xff,
2621          priv->irq0, priv->irq1, txq, rxq);
2622 
2623     clk_disable_unprepare(priv->clk);
2624 
2625     return 0;
2626 
2627 err_deregister_notifier:
2628     unregister_netdevice_notifier(&priv->netdev_notifier);
2629 err_deregister_fixed_link:
2630     if (of_phy_is_fixed_link(dn))
2631         of_phy_deregister_fixed_link(dn);
2632 err_free_netdev:
2633     free_netdev(dev);
2634     return ret;
2635 }
2636 
2637 static int bcm_sysport_remove(struct platform_device *pdev)
2638 {
2639     struct net_device *dev = dev_get_drvdata(&pdev->dev);
2640     struct bcm_sysport_priv *priv = netdev_priv(dev);
2641     struct device_node *dn = pdev->dev.of_node;
2642 
2643     /* Not much to do, ndo_close has been called
2644      * and we use managed allocations
2645      */
2646     unregister_netdevice_notifier(&priv->netdev_notifier);
2647     unregister_netdev(dev);
2648     if (of_phy_is_fixed_link(dn))
2649         of_phy_deregister_fixed_link(dn);
2650     free_netdev(dev);
2651     dev_set_drvdata(&pdev->dev, NULL);
2652 
2653     return 0;
2654 }
2655 
2656 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2657 {
2658     struct net_device *ndev = priv->netdev;
2659     unsigned int timeout = 1000;
2660     unsigned int index, i = 0;
2661     u32 reg;
2662 
2663     reg = umac_readl(priv, UMAC_MPD_CTRL);
2664     if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
2665         reg |= MPD_EN;
2666     reg &= ~PSW_EN;
2667     if (priv->wolopts & WAKE_MAGICSECURE) {
2668         /* Program the SecureOn password */
2669         umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
2670                 UMAC_PSW_MS);
2671         umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
2672                 UMAC_PSW_LS);
2673         reg |= PSW_EN;
2674     }
2675     umac_writel(priv, reg, UMAC_MPD_CTRL);
2676 
2677     if (priv->wolopts & WAKE_FILTER) {
2678         /* Turn on ACPI matching to steal packets from RBUF */
2679         reg = rbuf_readl(priv, RBUF_CONTROL);
2680         if (priv->is_lite)
2681             reg |= RBUF_ACPI_EN_LITE;
2682         else
2683             reg |= RBUF_ACPI_EN;
2684         rbuf_writel(priv, reg, RBUF_CONTROL);
2685 
2686         /* Enable RXCHK, active filters and Broadcom tag matching */
2687         reg = rxchk_readl(priv, RXCHK_CONTROL);
2688         reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
2689              RXCHK_BRCM_TAG_MATCH_SHIFT);
2690         for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2691             reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i);
2692             i++;
2693         }
2694         reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN;
2695         rxchk_writel(priv, reg, RXCHK_CONTROL);
2696     }
2697 
2698     /* Make sure RBUF entered WoL mode as result */
2699     do {
2700         reg = rbuf_readl(priv, RBUF_STATUS);
2701         if (reg & RBUF_WOL_MODE)
2702             break;
2703 
2704         udelay(10);
2705     } while (timeout-- > 0);
2706 
2707     /* Do not leave the UniMAC RBUF matching only MPD packets */
2708     if (!timeout) {
2709         mpd_enable_set(priv, false);
2710         netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
2711         return -ETIMEDOUT;
2712     }
2713 
2714     /* UniMAC receive needs to be turned on */
2715     umac_enable_set(priv, CMD_RX_EN, 1);
2716 
2717     netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2718 
2719     return 0;
2720 }
2721 
2722 static int __maybe_unused bcm_sysport_suspend(struct device *d)
2723 {
2724     struct net_device *dev = dev_get_drvdata(d);
2725     struct bcm_sysport_priv *priv = netdev_priv(dev);
2726     unsigned int i;
2727     int ret = 0;
2728     u32 reg;
2729 
2730     if (!netif_running(dev))
2731         return 0;
2732 
2733     netif_device_detach(dev);
2734 
2735     bcm_sysport_netif_stop(dev);
2736 
2737     phy_suspend(dev->phydev);
2738 
2739     /* Disable UniMAC RX */
2740     umac_enable_set(priv, CMD_RX_EN, 0);
2741 
2742     ret = rdma_enable_set(priv, 0);
2743     if (ret) {
2744         netdev_err(dev, "RDMA timeout!\n");
2745         return ret;
2746     }
2747 
2748     /* Disable RXCHK if enabled */
2749     if (priv->rx_chk_en) {
2750         reg = rxchk_readl(priv, RXCHK_CONTROL);
2751         reg &= ~RXCHK_EN;
2752         rxchk_writel(priv, reg, RXCHK_CONTROL);
2753     }
2754 
2755     /* Flush RX pipe */
2756     if (!priv->wolopts)
2757         topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
2758 
2759     ret = tdma_enable_set(priv, 0);
2760     if (ret) {
2761         netdev_err(dev, "TDMA timeout!\n");
2762         return ret;
2763     }
2764 
2765     /* Wait for a packet boundary */
2766     usleep_range(2000, 3000);
2767 
2768     umac_enable_set(priv, CMD_TX_EN, 0);
2769 
2770     topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
2771 
2772     /* Free RX/TX rings SW structures */
2773     for (i = 0; i < dev->num_tx_queues; i++)
2774         bcm_sysport_fini_tx_ring(priv, i);
2775     bcm_sysport_fini_rx_ring(priv);
2776 
2777     /* Get prepared for Wake-on-LAN */
2778     if (device_may_wakeup(d) && priv->wolopts) {
2779         clk_prepare_enable(priv->wol_clk);
2780         ret = bcm_sysport_suspend_to_wol(priv);
2781     }
2782 
2783     clk_disable_unprepare(priv->clk);
2784 
2785     return ret;
2786 }
2787 
2788 static int __maybe_unused bcm_sysport_resume(struct device *d)
2789 {
2790     struct net_device *dev = dev_get_drvdata(d);
2791     struct bcm_sysport_priv *priv = netdev_priv(dev);
2792     unsigned int i;
2793     int ret;
2794 
2795     if (!netif_running(dev))
2796         return 0;
2797 
2798     clk_prepare_enable(priv->clk);
2799     if (priv->wolopts)
2800         clk_disable_unprepare(priv->wol_clk);
2801 
2802     umac_reset(priv);
2803 
2804     /* Disable the UniMAC RX/TX */
2805     umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
2806 
2807     /* We may have been suspended and never received a WOL event that
2808      * would turn off MPD detection, take care of that now
2809      */
2810     bcm_sysport_resume_from_wol(priv);
2811 
2812     /* Initialize both hardware and software ring */
2813     for (i = 0; i < dev->num_tx_queues; i++) {
2814         ret = bcm_sysport_init_tx_ring(priv, i);
2815         if (ret) {
2816             netdev_err(dev, "failed to initialize TX ring %d\n",
2817                    i);
2818             goto out_free_tx_rings;
2819         }
2820     }
2821 
2822     /* Initialize linked-list */
2823     tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2824 
2825     /* Initialize RX ring */
2826     ret = bcm_sysport_init_rx_ring(priv);
2827     if (ret) {
2828         netdev_err(dev, "failed to initialize RX ring\n");
2829         goto out_free_rx_ring;
2830     }
2831 
2832     /* RX pipe enable */
2833     topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2834 
2835     ret = rdma_enable_set(priv, 1);
2836     if (ret) {
2837         netdev_err(dev, "failed to enable RDMA\n");
2838         goto out_free_rx_ring;
2839     }
2840 
2841     /* Restore enabled features */
2842     bcm_sysport_set_features(dev, dev->features);
2843 
2844     rbuf_init(priv);
2845 
2846     /* Set maximum frame length */
2847     if (!priv->is_lite)
2848         umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2849     else
2850         gib_set_pad_extension(priv);
2851 
2852     /* Set MAC address */
2853     umac_set_hw_addr(priv, dev->dev_addr);
2854 
2855     umac_enable_set(priv, CMD_RX_EN, 1);
2856 
2857     /* TX pipe enable */
2858     topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2859 
2860     umac_enable_set(priv, CMD_TX_EN, 1);
2861 
2862     ret = tdma_enable_set(priv, 1);
2863     if (ret) {
2864         netdev_err(dev, "TDMA timeout!\n");
2865         goto out_free_rx_ring;
2866     }
2867 
2868     phy_resume(dev->phydev);
2869 
2870     bcm_sysport_netif_start(dev);
2871 
2872     netif_device_attach(dev);
2873 
2874     return 0;
2875 
2876 out_free_rx_ring:
2877     bcm_sysport_fini_rx_ring(priv);
2878 out_free_tx_rings:
2879     for (i = 0; i < dev->num_tx_queues; i++)
2880         bcm_sysport_fini_tx_ring(priv, i);
2881     clk_disable_unprepare(priv->clk);
2882     return ret;
2883 }
2884 
2885 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
2886         bcm_sysport_suspend, bcm_sysport_resume);
2887 
2888 static struct platform_driver bcm_sysport_driver = {
2889     .probe  = bcm_sysport_probe,
2890     .remove = bcm_sysport_remove,
2891     .driver =  {
2892         .name = "brcm-systemport",
2893         .of_match_table = bcm_sysport_of_match,
2894         .pm = &bcm_sysport_pm_ops,
2895     },
2896 };
2897 module_platform_driver(bcm_sysport_driver);
2898 
2899 MODULE_AUTHOR("Broadcom Corporation");
2900 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2901 MODULE_ALIAS("platform:brcm-systemport");
2902 MODULE_LICENSE("GPL");