Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Driver for BCM963xx builtin Ethernet mac
0004  *
0005  * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
0006  */
0007 #include <linux/init.h>
0008 #include <linux/interrupt.h>
0009 #include <linux/module.h>
0010 #include <linux/clk.h>
0011 #include <linux/etherdevice.h>
0012 #include <linux/slab.h>
0013 #include <linux/delay.h>
0014 #include <linux/ethtool.h>
0015 #include <linux/crc32.h>
0016 #include <linux/err.h>
0017 #include <linux/dma-mapping.h>
0018 #include <linux/platform_device.h>
0019 #include <linux/if_vlan.h>
0020 
0021 #include <bcm63xx_dev_enet.h>
0022 #include "bcm63xx_enet.h"
0023 
0024 static char bcm_enet_driver_name[] = "bcm63xx_enet";
0025 
0026 static int copybreak __read_mostly = 128;
0027 module_param(copybreak, int, 0);
0028 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
0029 
0030 /* io registers memory shared between all devices */
0031 static void __iomem *bcm_enet_shared_base[3];
0032 
0033 /*
0034  * io helpers to access mac registers
0035  */
0036 static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
0037 {
0038     return bcm_readl(priv->base + off);
0039 }
0040 
0041 static inline void enet_writel(struct bcm_enet_priv *priv,
0042                    u32 val, u32 off)
0043 {
0044     bcm_writel(val, priv->base + off);
0045 }
0046 
0047 /*
0048  * io helpers to access switch registers
0049  */
0050 static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
0051 {
0052     return bcm_readl(priv->base + off);
0053 }
0054 
0055 static inline void enetsw_writel(struct bcm_enet_priv *priv,
0056                  u32 val, u32 off)
0057 {
0058     bcm_writel(val, priv->base + off);
0059 }
0060 
0061 static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
0062 {
0063     return bcm_readw(priv->base + off);
0064 }
0065 
0066 static inline void enetsw_writew(struct bcm_enet_priv *priv,
0067                  u16 val, u32 off)
0068 {
0069     bcm_writew(val, priv->base + off);
0070 }
0071 
0072 static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
0073 {
0074     return bcm_readb(priv->base + off);
0075 }
0076 
0077 static inline void enetsw_writeb(struct bcm_enet_priv *priv,
0078                  u8 val, u32 off)
0079 {
0080     bcm_writeb(val, priv->base + off);
0081 }
0082 
0083 
0084 /* io helpers to access shared registers */
0085 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
0086 {
0087     return bcm_readl(bcm_enet_shared_base[0] + off);
0088 }
0089 
0090 static inline void enet_dma_writel(struct bcm_enet_priv *priv,
0091                        u32 val, u32 off)
0092 {
0093     bcm_writel(val, bcm_enet_shared_base[0] + off);
0094 }
0095 
0096 static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
0097 {
0098     return bcm_readl(bcm_enet_shared_base[1] +
0099         bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
0100 }
0101 
0102 static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
0103                        u32 val, u32 off, int chan)
0104 {
0105     bcm_writel(val, bcm_enet_shared_base[1] +
0106         bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
0107 }
0108 
0109 static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
0110 {
0111     return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
0112 }
0113 
0114 static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
0115                        u32 val, u32 off, int chan)
0116 {
0117     bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
0118 }
0119 
0120 /*
0121  * write given data into mii register and wait for transfer to end
0122  * with timeout (average measured transfer time is 25us)
0123  */
0124 static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
0125 {
0126     int limit;
0127 
0128     /* make sure mii interrupt status is cleared */
0129     enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
0130 
0131     enet_writel(priv, data, ENET_MIIDATA_REG);
0132     wmb();
0133 
0134     /* busy wait on mii interrupt bit, with timeout */
0135     limit = 1000;
0136     do {
0137         if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
0138             break;
0139         udelay(1);
0140     } while (limit-- > 0);
0141 
0142     return (limit < 0) ? 1 : 0;
0143 }
0144 
0145 /*
0146  * MII internal read callback
0147  */
0148 static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
0149                   int regnum)
0150 {
0151     u32 tmp, val;
0152 
0153     tmp = regnum << ENET_MIIDATA_REG_SHIFT;
0154     tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
0155     tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
0156     tmp |= ENET_MIIDATA_OP_READ_MASK;
0157 
0158     if (do_mdio_op(priv, tmp))
0159         return -1;
0160 
0161     val = enet_readl(priv, ENET_MIIDATA_REG);
0162     val &= 0xffff;
0163     return val;
0164 }
0165 
0166 /*
0167  * MII internal write callback
0168  */
0169 static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
0170                    int regnum, u16 value)
0171 {
0172     u32 tmp;
0173 
0174     tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
0175     tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
0176     tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
0177     tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
0178     tmp |= ENET_MIIDATA_OP_WRITE_MASK;
0179 
0180     (void)do_mdio_op(priv, tmp);
0181     return 0;
0182 }
0183 
0184 /*
0185  * MII read callback from phylib
0186  */
0187 static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
0188                      int regnum)
0189 {
0190     return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
0191 }
0192 
0193 /*
0194  * MII write callback from phylib
0195  */
0196 static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
0197                       int regnum, u16 value)
0198 {
0199     return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
0200 }
0201 
0202 /*
0203  * MII read callback from mii core
0204  */
0205 static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
0206                   int regnum)
0207 {
0208     return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
0209 }
0210 
0211 /*
0212  * MII write callback from mii core
0213  */
0214 static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
0215                     int regnum, int value)
0216 {
0217     bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
0218 }
0219 
0220 /*
0221  * refill rx queue
0222  */
0223 static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode)
0224 {
0225     struct bcm_enet_priv *priv;
0226 
0227     priv = netdev_priv(dev);
0228 
0229     while (priv->rx_desc_count < priv->rx_ring_size) {
0230         struct bcm_enet_desc *desc;
0231         int desc_idx;
0232         u32 len_stat;
0233 
0234         desc_idx = priv->rx_dirty_desc;
0235         desc = &priv->rx_desc_cpu[desc_idx];
0236 
0237         if (!priv->rx_buf[desc_idx]) {
0238             void *buf;
0239 
0240             if (likely(napi_mode))
0241                 buf = napi_alloc_frag(priv->rx_frag_size);
0242             else
0243                 buf = netdev_alloc_frag(priv->rx_frag_size);
0244             if (unlikely(!buf))
0245                 break;
0246             priv->rx_buf[desc_idx] = buf;
0247             desc->address = dma_map_single(&priv->pdev->dev,
0248                                buf + priv->rx_buf_offset,
0249                                priv->rx_buf_size,
0250                                DMA_FROM_DEVICE);
0251         }
0252 
0253         len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT;
0254         len_stat |= DMADESC_OWNER_MASK;
0255         if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
0256             len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
0257             priv->rx_dirty_desc = 0;
0258         } else {
0259             priv->rx_dirty_desc++;
0260         }
0261         wmb();
0262         desc->len_stat = len_stat;
0263 
0264         priv->rx_desc_count++;
0265 
0266         /* tell dma engine we allocated one buffer */
0267         if (priv->dma_has_sram)
0268             enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
0269         else
0270             enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
0271     }
0272 
0273     /* If rx ring is still empty, set a timer to try allocating
0274      * again at a later time. */
0275     if (priv->rx_desc_count == 0 && netif_running(dev)) {
0276         dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
0277         priv->rx_timeout.expires = jiffies + HZ;
0278         add_timer(&priv->rx_timeout);
0279     }
0280 
0281     return 0;
0282 }
0283 
0284 /*
0285  * timer callback to defer refill rx queue in case we're OOM
0286  */
0287 static void bcm_enet_refill_rx_timer(struct timer_list *t)
0288 {
0289     struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
0290     struct net_device *dev = priv->net_dev;
0291 
0292     spin_lock(&priv->rx_lock);
0293     bcm_enet_refill_rx(dev, false);
0294     spin_unlock(&priv->rx_lock);
0295 }
0296 
0297 /*
0298  * extract packet from rx queue
0299  */
0300 static int bcm_enet_receive_queue(struct net_device *dev, int budget)
0301 {
0302     struct bcm_enet_priv *priv;
0303     struct list_head rx_list;
0304     struct device *kdev;
0305     int processed;
0306 
0307     priv = netdev_priv(dev);
0308     INIT_LIST_HEAD(&rx_list);
0309     kdev = &priv->pdev->dev;
0310     processed = 0;
0311 
0312     /* don't scan ring further than number of refilled
0313      * descriptor */
0314     if (budget > priv->rx_desc_count)
0315         budget = priv->rx_desc_count;
0316 
0317     do {
0318         struct bcm_enet_desc *desc;
0319         struct sk_buff *skb;
0320         int desc_idx;
0321         u32 len_stat;
0322         unsigned int len;
0323         void *buf;
0324 
0325         desc_idx = priv->rx_curr_desc;
0326         desc = &priv->rx_desc_cpu[desc_idx];
0327 
0328         /* make sure we actually read the descriptor status at
0329          * each loop */
0330         rmb();
0331 
0332         len_stat = desc->len_stat;
0333 
0334         /* break if dma ownership belongs to hw */
0335         if (len_stat & DMADESC_OWNER_MASK)
0336             break;
0337 
0338         processed++;
0339         priv->rx_curr_desc++;
0340         if (priv->rx_curr_desc == priv->rx_ring_size)
0341             priv->rx_curr_desc = 0;
0342 
0343         /* if the packet does not have start of packet _and_
0344          * end of packet flag set, then just recycle it */
0345         if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
0346             (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
0347             dev->stats.rx_dropped++;
0348             continue;
0349         }
0350 
0351         /* recycle packet if it's marked as bad */
0352         if (!priv->enet_is_sw &&
0353             unlikely(len_stat & DMADESC_ERR_MASK)) {
0354             dev->stats.rx_errors++;
0355 
0356             if (len_stat & DMADESC_OVSIZE_MASK)
0357                 dev->stats.rx_length_errors++;
0358             if (len_stat & DMADESC_CRC_MASK)
0359                 dev->stats.rx_crc_errors++;
0360             if (len_stat & DMADESC_UNDER_MASK)
0361                 dev->stats.rx_frame_errors++;
0362             if (len_stat & DMADESC_OV_MASK)
0363                 dev->stats.rx_fifo_errors++;
0364             continue;
0365         }
0366 
0367         /* valid packet */
0368         buf = priv->rx_buf[desc_idx];
0369         len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
0370         /* don't include FCS */
0371         len -= 4;
0372 
0373         if (len < copybreak) {
0374             skb = napi_alloc_skb(&priv->napi, len);
0375             if (unlikely(!skb)) {
0376                 /* forget packet, just rearm desc */
0377                 dev->stats.rx_dropped++;
0378                 continue;
0379             }
0380 
0381             dma_sync_single_for_cpu(kdev, desc->address,
0382                         len, DMA_FROM_DEVICE);
0383             memcpy(skb->data, buf + priv->rx_buf_offset, len);
0384             dma_sync_single_for_device(kdev, desc->address,
0385                            len, DMA_FROM_DEVICE);
0386         } else {
0387             dma_unmap_single(kdev, desc->address,
0388                      priv->rx_buf_size, DMA_FROM_DEVICE);
0389             priv->rx_buf[desc_idx] = NULL;
0390 
0391             skb = napi_build_skb(buf, priv->rx_frag_size);
0392             if (unlikely(!skb)) {
0393                 skb_free_frag(buf);
0394                 dev->stats.rx_dropped++;
0395                 continue;
0396             }
0397             skb_reserve(skb, priv->rx_buf_offset);
0398         }
0399 
0400         skb_put(skb, len);
0401         skb->protocol = eth_type_trans(skb, dev);
0402         dev->stats.rx_packets++;
0403         dev->stats.rx_bytes += len;
0404         list_add_tail(&skb->list, &rx_list);
0405 
0406     } while (processed < budget);
0407 
0408     netif_receive_skb_list(&rx_list);
0409     priv->rx_desc_count -= processed;
0410 
0411     if (processed || !priv->rx_desc_count) {
0412         bcm_enet_refill_rx(dev, true);
0413 
0414         /* kick rx dma */
0415         enet_dmac_writel(priv, priv->dma_chan_en_mask,
0416                      ENETDMAC_CHANCFG, priv->rx_chan);
0417     }
0418 
0419     return processed;
0420 }
0421 
0422 
0423 /*
0424  * try to or force reclaim of transmitted buffers
0425  */
0426 static int bcm_enet_tx_reclaim(struct net_device *dev, int force, int budget)
0427 {
0428     struct bcm_enet_priv *priv;
0429     unsigned int bytes;
0430     int released;
0431 
0432     priv = netdev_priv(dev);
0433     bytes = 0;
0434     released = 0;
0435 
0436     while (priv->tx_desc_count < priv->tx_ring_size) {
0437         struct bcm_enet_desc *desc;
0438         struct sk_buff *skb;
0439 
0440         /* We run in a bh and fight against start_xmit, which
0441          * is called with bh disabled  */
0442         spin_lock(&priv->tx_lock);
0443 
0444         desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
0445 
0446         if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
0447             spin_unlock(&priv->tx_lock);
0448             break;
0449         }
0450 
0451         /* ensure other field of the descriptor were not read
0452          * before we checked ownership */
0453         rmb();
0454 
0455         skb = priv->tx_skb[priv->tx_dirty_desc];
0456         priv->tx_skb[priv->tx_dirty_desc] = NULL;
0457         dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
0458                  DMA_TO_DEVICE);
0459 
0460         priv->tx_dirty_desc++;
0461         if (priv->tx_dirty_desc == priv->tx_ring_size)
0462             priv->tx_dirty_desc = 0;
0463         priv->tx_desc_count++;
0464 
0465         spin_unlock(&priv->tx_lock);
0466 
0467         if (desc->len_stat & DMADESC_UNDER_MASK)
0468             dev->stats.tx_errors++;
0469 
0470         bytes += skb->len;
0471         napi_consume_skb(skb, budget);
0472         released++;
0473     }
0474 
0475     netdev_completed_queue(dev, released, bytes);
0476 
0477     if (netif_queue_stopped(dev) && released)
0478         netif_wake_queue(dev);
0479 
0480     return released;
0481 }
0482 
0483 /*
0484  * poll func, called by network core
0485  */
0486 static int bcm_enet_poll(struct napi_struct *napi, int budget)
0487 {
0488     struct bcm_enet_priv *priv;
0489     struct net_device *dev;
0490     int rx_work_done;
0491 
0492     priv = container_of(napi, struct bcm_enet_priv, napi);
0493     dev = priv->net_dev;
0494 
0495     /* ack interrupts */
0496     enet_dmac_writel(priv, priv->dma_chan_int_mask,
0497              ENETDMAC_IR, priv->rx_chan);
0498     enet_dmac_writel(priv, priv->dma_chan_int_mask,
0499              ENETDMAC_IR, priv->tx_chan);
0500 
0501     /* reclaim sent skb */
0502     bcm_enet_tx_reclaim(dev, 0, budget);
0503 
0504     spin_lock(&priv->rx_lock);
0505     rx_work_done = bcm_enet_receive_queue(dev, budget);
0506     spin_unlock(&priv->rx_lock);
0507 
0508     if (rx_work_done >= budget) {
0509         /* rx queue is not yet empty/clean */
0510         return rx_work_done;
0511     }
0512 
0513     /* no more packet in rx/tx queue, remove device from poll
0514      * queue */
0515     napi_complete_done(napi, rx_work_done);
0516 
0517     /* restore rx/tx interrupt */
0518     enet_dmac_writel(priv, priv->dma_chan_int_mask,
0519              ENETDMAC_IRMASK, priv->rx_chan);
0520     enet_dmac_writel(priv, priv->dma_chan_int_mask,
0521              ENETDMAC_IRMASK, priv->tx_chan);
0522 
0523     return rx_work_done;
0524 }
0525 
0526 /*
0527  * mac interrupt handler
0528  */
0529 static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
0530 {
0531     struct net_device *dev;
0532     struct bcm_enet_priv *priv;
0533     u32 stat;
0534 
0535     dev = dev_id;
0536     priv = netdev_priv(dev);
0537 
0538     stat = enet_readl(priv, ENET_IR_REG);
0539     if (!(stat & ENET_IR_MIB))
0540         return IRQ_NONE;
0541 
0542     /* clear & mask interrupt */
0543     enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
0544     enet_writel(priv, 0, ENET_IRMASK_REG);
0545 
0546     /* read mib registers in workqueue */
0547     schedule_work(&priv->mib_update_task);
0548 
0549     return IRQ_HANDLED;
0550 }
0551 
0552 /*
0553  * rx/tx dma interrupt handler
0554  */
0555 static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
0556 {
0557     struct net_device *dev;
0558     struct bcm_enet_priv *priv;
0559 
0560     dev = dev_id;
0561     priv = netdev_priv(dev);
0562 
0563     /* mask rx/tx interrupts */
0564     enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
0565     enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
0566 
0567     napi_schedule(&priv->napi);
0568 
0569     return IRQ_HANDLED;
0570 }
0571 
0572 /*
0573  * tx request callback
0574  */
0575 static netdev_tx_t
0576 bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
0577 {
0578     struct bcm_enet_priv *priv;
0579     struct bcm_enet_desc *desc;
0580     u32 len_stat;
0581     netdev_tx_t ret;
0582 
0583     priv = netdev_priv(dev);
0584 
0585     /* lock against tx reclaim */
0586     spin_lock(&priv->tx_lock);
0587 
0588     /* make sure  the tx hw queue  is not full,  should not happen
0589      * since we stop queue before it's the case */
0590     if (unlikely(!priv->tx_desc_count)) {
0591         netif_stop_queue(dev);
0592         dev_err(&priv->pdev->dev, "xmit called with no tx desc "
0593             "available?\n");
0594         ret = NETDEV_TX_BUSY;
0595         goto out_unlock;
0596     }
0597 
0598     /* pad small packets sent on a switch device */
0599     if (priv->enet_is_sw && skb->len < 64) {
0600         int needed = 64 - skb->len;
0601         char *data;
0602 
0603         if (unlikely(skb_tailroom(skb) < needed)) {
0604             struct sk_buff *nskb;
0605 
0606             nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
0607             if (!nskb) {
0608                 ret = NETDEV_TX_BUSY;
0609                 goto out_unlock;
0610             }
0611             dev_kfree_skb(skb);
0612             skb = nskb;
0613         }
0614         data = skb_put_zero(skb, needed);
0615     }
0616 
0617     /* point to the next available desc */
0618     desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
0619     priv->tx_skb[priv->tx_curr_desc] = skb;
0620 
0621     /* fill descriptor */
0622     desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
0623                        DMA_TO_DEVICE);
0624 
0625     len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
0626     len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
0627         DMADESC_APPEND_CRC |
0628         DMADESC_OWNER_MASK;
0629 
0630     priv->tx_curr_desc++;
0631     if (priv->tx_curr_desc == priv->tx_ring_size) {
0632         priv->tx_curr_desc = 0;
0633         len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
0634     }
0635     priv->tx_desc_count--;
0636 
0637     /* dma might be already polling, make sure we update desc
0638      * fields in correct order */
0639     wmb();
0640     desc->len_stat = len_stat;
0641     wmb();
0642 
0643     netdev_sent_queue(dev, skb->len);
0644 
0645     /* kick tx dma */
0646     if (!netdev_xmit_more() || !priv->tx_desc_count)
0647         enet_dmac_writel(priv, priv->dma_chan_en_mask,
0648                  ENETDMAC_CHANCFG, priv->tx_chan);
0649 
0650     /* stop queue if no more desc available */
0651     if (!priv->tx_desc_count)
0652         netif_stop_queue(dev);
0653 
0654     dev->stats.tx_bytes += skb->len;
0655     dev->stats.tx_packets++;
0656     ret = NETDEV_TX_OK;
0657 
0658 out_unlock:
0659     spin_unlock(&priv->tx_lock);
0660     return ret;
0661 }
0662 
0663 /*
0664  * Change the interface's mac address.
0665  */
0666 static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
0667 {
0668     struct bcm_enet_priv *priv;
0669     struct sockaddr *addr = p;
0670     u32 val;
0671 
0672     priv = netdev_priv(dev);
0673     eth_hw_addr_set(dev, addr->sa_data);
0674 
0675     /* use perfect match register 0 to store my mac address */
0676     val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
0677         (dev->dev_addr[4] << 8) | dev->dev_addr[5];
0678     enet_writel(priv, val, ENET_PML_REG(0));
0679 
0680     val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
0681     val |= ENET_PMH_DATAVALID_MASK;
0682     enet_writel(priv, val, ENET_PMH_REG(0));
0683 
0684     return 0;
0685 }
0686 
0687 /*
0688  * Change rx mode (promiscuous/allmulti) and update multicast list
0689  */
0690 static void bcm_enet_set_multicast_list(struct net_device *dev)
0691 {
0692     struct bcm_enet_priv *priv;
0693     struct netdev_hw_addr *ha;
0694     u32 val;
0695     int i;
0696 
0697     priv = netdev_priv(dev);
0698 
0699     val = enet_readl(priv, ENET_RXCFG_REG);
0700 
0701     if (dev->flags & IFF_PROMISC)
0702         val |= ENET_RXCFG_PROMISC_MASK;
0703     else
0704         val &= ~ENET_RXCFG_PROMISC_MASK;
0705 
0706     /* only 3 perfect match registers left, first one is used for
0707      * own mac address */
0708     if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
0709         val |= ENET_RXCFG_ALLMCAST_MASK;
0710     else
0711         val &= ~ENET_RXCFG_ALLMCAST_MASK;
0712 
0713     /* no need to set perfect match registers if we catch all
0714      * multicast */
0715     if (val & ENET_RXCFG_ALLMCAST_MASK) {
0716         enet_writel(priv, val, ENET_RXCFG_REG);
0717         return;
0718     }
0719 
0720     i = 0;
0721     netdev_for_each_mc_addr(ha, dev) {
0722         u8 *dmi_addr;
0723         u32 tmp;
0724 
0725         if (i == 3)
0726             break;
0727         /* update perfect match registers */
0728         dmi_addr = ha->addr;
0729         tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
0730             (dmi_addr[4] << 8) | dmi_addr[5];
0731         enet_writel(priv, tmp, ENET_PML_REG(i + 1));
0732 
0733         tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
0734         tmp |= ENET_PMH_DATAVALID_MASK;
0735         enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
0736     }
0737 
0738     for (; i < 3; i++) {
0739         enet_writel(priv, 0, ENET_PML_REG(i + 1));
0740         enet_writel(priv, 0, ENET_PMH_REG(i + 1));
0741     }
0742 
0743     enet_writel(priv, val, ENET_RXCFG_REG);
0744 }
0745 
0746 /*
0747  * set mac duplex parameters
0748  */
0749 static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
0750 {
0751     u32 val;
0752 
0753     val = enet_readl(priv, ENET_TXCTL_REG);
0754     if (fullduplex)
0755         val |= ENET_TXCTL_FD_MASK;
0756     else
0757         val &= ~ENET_TXCTL_FD_MASK;
0758     enet_writel(priv, val, ENET_TXCTL_REG);
0759 }
0760 
0761 /*
0762  * set mac flow control parameters
0763  */
0764 static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
0765 {
0766     u32 val;
0767 
0768     /* rx flow control (pause frame handling) */
0769     val = enet_readl(priv, ENET_RXCFG_REG);
0770     if (rx_en)
0771         val |= ENET_RXCFG_ENFLOW_MASK;
0772     else
0773         val &= ~ENET_RXCFG_ENFLOW_MASK;
0774     enet_writel(priv, val, ENET_RXCFG_REG);
0775 
0776     if (!priv->dma_has_sram)
0777         return;
0778 
0779     /* tx flow control (pause frame generation) */
0780     val = enet_dma_readl(priv, ENETDMA_CFG_REG);
0781     if (tx_en)
0782         val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
0783     else
0784         val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
0785     enet_dma_writel(priv, val, ENETDMA_CFG_REG);
0786 }
0787 
0788 /*
0789  * link changed callback (from phylib)
0790  */
0791 static void bcm_enet_adjust_phy_link(struct net_device *dev)
0792 {
0793     struct bcm_enet_priv *priv;
0794     struct phy_device *phydev;
0795     int status_changed;
0796 
0797     priv = netdev_priv(dev);
0798     phydev = dev->phydev;
0799     status_changed = 0;
0800 
0801     if (priv->old_link != phydev->link) {
0802         status_changed = 1;
0803         priv->old_link = phydev->link;
0804     }
0805 
0806     /* reflect duplex change in mac configuration */
0807     if (phydev->link && phydev->duplex != priv->old_duplex) {
0808         bcm_enet_set_duplex(priv,
0809                     (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
0810         status_changed = 1;
0811         priv->old_duplex = phydev->duplex;
0812     }
0813 
0814     /* enable flow control if remote advertise it (trust phylib to
0815      * check that duplex is full */
0816     if (phydev->link && phydev->pause != priv->old_pause) {
0817         int rx_pause_en, tx_pause_en;
0818 
0819         if (phydev->pause) {
0820             /* pause was advertised by lpa and us */
0821             rx_pause_en = 1;
0822             tx_pause_en = 1;
0823         } else if (!priv->pause_auto) {
0824             /* pause setting overridden by user */
0825             rx_pause_en = priv->pause_rx;
0826             tx_pause_en = priv->pause_tx;
0827         } else {
0828             rx_pause_en = 0;
0829             tx_pause_en = 0;
0830         }
0831 
0832         bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
0833         status_changed = 1;
0834         priv->old_pause = phydev->pause;
0835     }
0836 
0837     if (status_changed) {
0838         pr_info("%s: link %s", dev->name, phydev->link ?
0839             "UP" : "DOWN");
0840         if (phydev->link)
0841             pr_cont(" - %d/%s - flow control %s", phydev->speed,
0842                    DUPLEX_FULL == phydev->duplex ? "full" : "half",
0843                    phydev->pause == 1 ? "rx&tx" : "off");
0844 
0845         pr_cont("\n");
0846     }
0847 }
0848 
0849 /*
0850  * link changed callback (if phylib is not used)
0851  */
0852 static void bcm_enet_adjust_link(struct net_device *dev)
0853 {
0854     struct bcm_enet_priv *priv;
0855 
0856     priv = netdev_priv(dev);
0857     bcm_enet_set_duplex(priv, priv->force_duplex_full);
0858     bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
0859     netif_carrier_on(dev);
0860 
0861     pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
0862         dev->name,
0863         priv->force_speed_100 ? 100 : 10,
0864         priv->force_duplex_full ? "full" : "half",
0865         priv->pause_rx ? "rx" : "off",
0866         priv->pause_tx ? "tx" : "off");
0867 }
0868 
0869 static void bcm_enet_free_rx_buf_ring(struct device *kdev, struct bcm_enet_priv *priv)
0870 {
0871     int i;
0872 
0873     for (i = 0; i < priv->rx_ring_size; i++) {
0874         struct bcm_enet_desc *desc;
0875 
0876         if (!priv->rx_buf[i])
0877             continue;
0878 
0879         desc = &priv->rx_desc_cpu[i];
0880         dma_unmap_single(kdev, desc->address, priv->rx_buf_size,
0881                  DMA_FROM_DEVICE);
0882         skb_free_frag(priv->rx_buf[i]);
0883     }
0884     kfree(priv->rx_buf);
0885 }
0886 
0887 /*
0888  * open callback, allocate dma rings & buffers and start rx operation
0889  */
0890 static int bcm_enet_open(struct net_device *dev)
0891 {
0892     struct bcm_enet_priv *priv;
0893     struct sockaddr addr;
0894     struct device *kdev;
0895     struct phy_device *phydev;
0896     int i, ret;
0897     unsigned int size;
0898     char phy_id[MII_BUS_ID_SIZE + 3];
0899     void *p;
0900     u32 val;
0901 
0902     priv = netdev_priv(dev);
0903     kdev = &priv->pdev->dev;
0904 
0905     if (priv->has_phy) {
0906         /* connect to PHY */
0907         snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
0908              priv->mii_bus->id, priv->phy_id);
0909 
0910         phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
0911                      PHY_INTERFACE_MODE_MII);
0912 
0913         if (IS_ERR(phydev)) {
0914             dev_err(kdev, "could not attach to PHY\n");
0915             return PTR_ERR(phydev);
0916         }
0917 
0918         /* mask with MAC supported features */
0919         phy_support_sym_pause(phydev);
0920         phy_set_max_speed(phydev, SPEED_100);
0921         phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx,
0922                   priv->pause_auto);
0923 
0924         phy_attached_info(phydev);
0925 
0926         priv->old_link = 0;
0927         priv->old_duplex = -1;
0928         priv->old_pause = -1;
0929     } else {
0930         phydev = NULL;
0931     }
0932 
0933     /* mask all interrupts and request them */
0934     enet_writel(priv, 0, ENET_IRMASK_REG);
0935     enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
0936     enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
0937 
0938     ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
0939     if (ret)
0940         goto out_phy_disconnect;
0941 
0942     ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
0943               dev->name, dev);
0944     if (ret)
0945         goto out_freeirq;
0946 
0947     ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
0948               0, dev->name, dev);
0949     if (ret)
0950         goto out_freeirq_rx;
0951 
0952     /* initialize perfect match registers */
0953     for (i = 0; i < 4; i++) {
0954         enet_writel(priv, 0, ENET_PML_REG(i));
0955         enet_writel(priv, 0, ENET_PMH_REG(i));
0956     }
0957 
0958     /* write device mac address */
0959     memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
0960     bcm_enet_set_mac_address(dev, &addr);
0961 
0962     /* allocate rx dma ring */
0963     size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
0964     p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
0965     if (!p) {
0966         ret = -ENOMEM;
0967         goto out_freeirq_tx;
0968     }
0969 
0970     priv->rx_desc_alloc_size = size;
0971     priv->rx_desc_cpu = p;
0972 
0973     /* allocate tx dma ring */
0974     size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
0975     p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
0976     if (!p) {
0977         ret = -ENOMEM;
0978         goto out_free_rx_ring;
0979     }
0980 
0981     priv->tx_desc_alloc_size = size;
0982     priv->tx_desc_cpu = p;
0983 
0984     priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
0985                    GFP_KERNEL);
0986     if (!priv->tx_skb) {
0987         ret = -ENOMEM;
0988         goto out_free_tx_ring;
0989     }
0990 
0991     priv->tx_desc_count = priv->tx_ring_size;
0992     priv->tx_dirty_desc = 0;
0993     priv->tx_curr_desc = 0;
0994     spin_lock_init(&priv->tx_lock);
0995 
0996     /* init & fill rx ring with buffers */
0997     priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
0998                    GFP_KERNEL);
0999     if (!priv->rx_buf) {
1000         ret = -ENOMEM;
1001         goto out_free_tx_skb;
1002     }
1003 
1004     priv->rx_desc_count = 0;
1005     priv->rx_dirty_desc = 0;
1006     priv->rx_curr_desc = 0;
1007 
1008     /* initialize flow control buffer allocation */
1009     if (priv->dma_has_sram)
1010         enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1011                 ENETDMA_BUFALLOC_REG(priv->rx_chan));
1012     else
1013         enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1014                 ENETDMAC_BUFALLOC, priv->rx_chan);
1015 
1016     if (bcm_enet_refill_rx(dev, false)) {
1017         dev_err(kdev, "cannot allocate rx buffer queue\n");
1018         ret = -ENOMEM;
1019         goto out;
1020     }
1021 
1022     /* write rx & tx ring addresses */
1023     if (priv->dma_has_sram) {
1024         enet_dmas_writel(priv, priv->rx_desc_dma,
1025                  ENETDMAS_RSTART_REG, priv->rx_chan);
1026         enet_dmas_writel(priv, priv->tx_desc_dma,
1027              ENETDMAS_RSTART_REG, priv->tx_chan);
1028     } else {
1029         enet_dmac_writel(priv, priv->rx_desc_dma,
1030                 ENETDMAC_RSTART, priv->rx_chan);
1031         enet_dmac_writel(priv, priv->tx_desc_dma,
1032                 ENETDMAC_RSTART, priv->tx_chan);
1033     }
1034 
1035     /* clear remaining state ram for rx & tx channel */
1036     if (priv->dma_has_sram) {
1037         enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
1038         enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
1039         enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
1040         enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
1041         enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
1042         enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
1043     } else {
1044         enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
1045         enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
1046     }
1047 
1048     /* set max rx/tx length */
1049     enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
1050     enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
1051 
1052     /* set dma maximum burst len */
1053     enet_dmac_writel(priv, priv->dma_maxburst,
1054              ENETDMAC_MAXBURST, priv->rx_chan);
1055     enet_dmac_writel(priv, priv->dma_maxburst,
1056              ENETDMAC_MAXBURST, priv->tx_chan);
1057 
1058     /* set correct transmit fifo watermark */
1059     enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1060 
1061     /* set flow control low/high threshold to 1/3 / 2/3 */
1062     if (priv->dma_has_sram) {
1063         val = priv->rx_ring_size / 3;
1064         enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1065         val = (priv->rx_ring_size * 2) / 3;
1066         enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1067     } else {
1068         enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
1069         enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
1070         enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
1071     }
1072 
1073     /* all set, enable mac and interrupts, start dma engine and
1074      * kick rx dma channel */
1075     wmb();
1076     val = enet_readl(priv, ENET_CTL_REG);
1077     val |= ENET_CTL_ENABLE_MASK;
1078     enet_writel(priv, val, ENET_CTL_REG);
1079     if (priv->dma_has_sram)
1080         enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1081     enet_dmac_writel(priv, priv->dma_chan_en_mask,
1082              ENETDMAC_CHANCFG, priv->rx_chan);
1083 
1084     /* watch "mib counters about to overflow" interrupt */
1085     enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1086     enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1087 
1088     /* watch "packet transferred" interrupt in rx and tx */
1089     enet_dmac_writel(priv, priv->dma_chan_int_mask,
1090              ENETDMAC_IR, priv->rx_chan);
1091     enet_dmac_writel(priv, priv->dma_chan_int_mask,
1092              ENETDMAC_IR, priv->tx_chan);
1093 
1094     /* make sure we enable napi before rx interrupt  */
1095     napi_enable(&priv->napi);
1096 
1097     enet_dmac_writel(priv, priv->dma_chan_int_mask,
1098              ENETDMAC_IRMASK, priv->rx_chan);
1099     enet_dmac_writel(priv, priv->dma_chan_int_mask,
1100              ENETDMAC_IRMASK, priv->tx_chan);
1101 
1102     if (phydev)
1103         phy_start(phydev);
1104     else
1105         bcm_enet_adjust_link(dev);
1106 
1107     netif_start_queue(dev);
1108     return 0;
1109 
1110 out:
1111     bcm_enet_free_rx_buf_ring(kdev, priv);
1112 
1113 out_free_tx_skb:
1114     kfree(priv->tx_skb);
1115 
1116 out_free_tx_ring:
1117     dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1118               priv->tx_desc_cpu, priv->tx_desc_dma);
1119 
1120 out_free_rx_ring:
1121     dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1122               priv->rx_desc_cpu, priv->rx_desc_dma);
1123 
1124 out_freeirq_tx:
1125     free_irq(priv->irq_tx, dev);
1126 
1127 out_freeirq_rx:
1128     free_irq(priv->irq_rx, dev);
1129 
1130 out_freeirq:
1131     free_irq(dev->irq, dev);
1132 
1133 out_phy_disconnect:
1134     if (phydev)
1135         phy_disconnect(phydev);
1136 
1137     return ret;
1138 }
1139 
1140 /*
1141  * disable mac
1142  */
1143 static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1144 {
1145     int limit;
1146     u32 val;
1147 
1148     val = enet_readl(priv, ENET_CTL_REG);
1149     val |= ENET_CTL_DISABLE_MASK;
1150     enet_writel(priv, val, ENET_CTL_REG);
1151 
1152     limit = 1000;
1153     do {
1154         u32 val;
1155 
1156         val = enet_readl(priv, ENET_CTL_REG);
1157         if (!(val & ENET_CTL_DISABLE_MASK))
1158             break;
1159         udelay(1);
1160     } while (limit--);
1161 }
1162 
1163 /*
1164  * disable dma in given channel
1165  */
1166 static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1167 {
1168     int limit;
1169 
1170     enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
1171 
1172     limit = 1000;
1173     do {
1174         u32 val;
1175 
1176         val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
1177         if (!(val & ENETDMAC_CHANCFG_EN_MASK))
1178             break;
1179         udelay(1);
1180     } while (limit--);
1181 }
1182 
1183 /*
1184  * stop callback
1185  */
1186 static int bcm_enet_stop(struct net_device *dev)
1187 {
1188     struct bcm_enet_priv *priv;
1189     struct device *kdev;
1190 
1191     priv = netdev_priv(dev);
1192     kdev = &priv->pdev->dev;
1193 
1194     netif_stop_queue(dev);
1195     napi_disable(&priv->napi);
1196     if (priv->has_phy)
1197         phy_stop(dev->phydev);
1198     del_timer_sync(&priv->rx_timeout);
1199 
1200     /* mask all interrupts */
1201     enet_writel(priv, 0, ENET_IRMASK_REG);
1202     enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
1203     enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
1204 
1205     /* make sure no mib update is scheduled */
1206     cancel_work_sync(&priv->mib_update_task);
1207 
1208     /* disable dma & mac */
1209     bcm_enet_disable_dma(priv, priv->tx_chan);
1210     bcm_enet_disable_dma(priv, priv->rx_chan);
1211     bcm_enet_disable_mac(priv);
1212 
1213     /* force reclaim of all tx buffers */
1214     bcm_enet_tx_reclaim(dev, 1, 0);
1215 
1216     /* free the rx buffer ring */
1217     bcm_enet_free_rx_buf_ring(kdev, priv);
1218 
1219     /* free remaining allocated memory */
1220     kfree(priv->tx_skb);
1221     dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1222               priv->rx_desc_cpu, priv->rx_desc_dma);
1223     dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1224               priv->tx_desc_cpu, priv->tx_desc_dma);
1225     free_irq(priv->irq_tx, dev);
1226     free_irq(priv->irq_rx, dev);
1227     free_irq(dev->irq, dev);
1228 
1229     /* release phy */
1230     if (priv->has_phy)
1231         phy_disconnect(dev->phydev);
1232 
1233     /* reset BQL after forced tx reclaim to prevent kernel panic */
1234     netdev_reset_queue(dev);
1235 
1236     return 0;
1237 }
1238 
1239 /*
1240  * ethtool callbacks
1241  */
1242 struct bcm_enet_stats {
1243     char stat_string[ETH_GSTRING_LEN];
1244     int sizeof_stat;
1245     int stat_offset;
1246     int mib_reg;
1247 };
1248 
1249 #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),     \
1250              offsetof(struct bcm_enet_priv, m)
1251 #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m),      \
1252              offsetof(struct net_device_stats, m)
1253 
1254 static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1255     { "rx_packets", DEV_STAT(rx_packets), -1 },
1256     { "tx_packets", DEV_STAT(tx_packets), -1 },
1257     { "rx_bytes", DEV_STAT(rx_bytes), -1 },
1258     { "tx_bytes", DEV_STAT(tx_bytes), -1 },
1259     { "rx_errors", DEV_STAT(rx_errors), -1 },
1260     { "tx_errors", DEV_STAT(tx_errors), -1 },
1261     { "rx_dropped", DEV_STAT(rx_dropped), -1 },
1262     { "tx_dropped", DEV_STAT(tx_dropped), -1 },
1263 
1264     { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1265     { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1266     { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1267     { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1268     { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1269     { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1270     { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1271     { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1272     { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1273     { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1274     { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1275     { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1276     { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1277     { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1278     { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1279     { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1280     { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1281     { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1282     { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1283     { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1284     { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1285 
1286     { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1287     { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1288     { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1289     { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1290     { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1291     { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1292     { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1293     { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1294     { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1295     { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1296     { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1297     { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1298     { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1299     { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1300     { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1301     { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1302     { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1303     { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1304     { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1305     { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1306     { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1307     { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1308 
1309 };
1310 
1311 #define BCM_ENET_STATS_LEN  ARRAY_SIZE(bcm_enet_gstrings_stats)
1312 
1313 static const u32 unused_mib_regs[] = {
1314     ETH_MIB_TX_ALL_OCTETS,
1315     ETH_MIB_TX_ALL_PKTS,
1316     ETH_MIB_RX_ALL_OCTETS,
1317     ETH_MIB_RX_ALL_PKTS,
1318 };
1319 
1320 
1321 static void bcm_enet_get_drvinfo(struct net_device *netdev,
1322                  struct ethtool_drvinfo *drvinfo)
1323 {
1324     strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
1325     strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
1326 }
1327 
1328 static int bcm_enet_get_sset_count(struct net_device *netdev,
1329                     int string_set)
1330 {
1331     switch (string_set) {
1332     case ETH_SS_STATS:
1333         return BCM_ENET_STATS_LEN;
1334     default:
1335         return -EINVAL;
1336     }
1337 }
1338 
1339 static void bcm_enet_get_strings(struct net_device *netdev,
1340                  u32 stringset, u8 *data)
1341 {
1342     int i;
1343 
1344     switch (stringset) {
1345     case ETH_SS_STATS:
1346         for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1347             memcpy(data + i * ETH_GSTRING_LEN,
1348                    bcm_enet_gstrings_stats[i].stat_string,
1349                    ETH_GSTRING_LEN);
1350         }
1351         break;
1352     }
1353 }
1354 
1355 static void update_mib_counters(struct bcm_enet_priv *priv)
1356 {
1357     int i;
1358 
1359     for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1360         const struct bcm_enet_stats *s;
1361         u32 val;
1362         char *p;
1363 
1364         s = &bcm_enet_gstrings_stats[i];
1365         if (s->mib_reg == -1)
1366             continue;
1367 
1368         val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1369         p = (char *)priv + s->stat_offset;
1370 
1371         if (s->sizeof_stat == sizeof(u64))
1372             *(u64 *)p += val;
1373         else
1374             *(u32 *)p += val;
1375     }
1376 
1377     /* also empty unused mib counters to make sure mib counter
1378      * overflow interrupt is cleared */
1379     for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1380         (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1381 }
1382 
1383 static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1384 {
1385     struct bcm_enet_priv *priv;
1386 
1387     priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1388     mutex_lock(&priv->mib_update_lock);
1389     update_mib_counters(priv);
1390     mutex_unlock(&priv->mib_update_lock);
1391 
1392     /* reenable mib interrupt */
1393     if (netif_running(priv->net_dev))
1394         enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1395 }
1396 
1397 static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1398                        struct ethtool_stats *stats,
1399                        u64 *data)
1400 {
1401     struct bcm_enet_priv *priv;
1402     int i;
1403 
1404     priv = netdev_priv(netdev);
1405 
1406     mutex_lock(&priv->mib_update_lock);
1407     update_mib_counters(priv);
1408 
1409     for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1410         const struct bcm_enet_stats *s;
1411         char *p;
1412 
1413         s = &bcm_enet_gstrings_stats[i];
1414         if (s->mib_reg == -1)
1415             p = (char *)&netdev->stats;
1416         else
1417             p = (char *)priv;
1418         p += s->stat_offset;
1419         data[i] = (s->sizeof_stat == sizeof(u64)) ?
1420             *(u64 *)p : *(u32 *)p;
1421     }
1422     mutex_unlock(&priv->mib_update_lock);
1423 }
1424 
1425 static int bcm_enet_nway_reset(struct net_device *dev)
1426 {
1427     struct bcm_enet_priv *priv;
1428 
1429     priv = netdev_priv(dev);
1430     if (priv->has_phy)
1431         return phy_ethtool_nway_reset(dev);
1432 
1433     return -EOPNOTSUPP;
1434 }
1435 
1436 static int bcm_enet_get_link_ksettings(struct net_device *dev,
1437                        struct ethtool_link_ksettings *cmd)
1438 {
1439     struct bcm_enet_priv *priv;
1440     u32 supported, advertising;
1441 
1442     priv = netdev_priv(dev);
1443 
1444     if (priv->has_phy) {
1445         if (!dev->phydev)
1446             return -ENODEV;
1447 
1448         phy_ethtool_ksettings_get(dev->phydev, cmd);
1449 
1450         return 0;
1451     } else {
1452         cmd->base.autoneg = 0;
1453         cmd->base.speed = (priv->force_speed_100) ?
1454             SPEED_100 : SPEED_10;
1455         cmd->base.duplex = (priv->force_duplex_full) ?
1456             DUPLEX_FULL : DUPLEX_HALF;
1457         supported = ADVERTISED_10baseT_Half |
1458             ADVERTISED_10baseT_Full |
1459             ADVERTISED_100baseT_Half |
1460             ADVERTISED_100baseT_Full;
1461         advertising = 0;
1462         ethtool_convert_legacy_u32_to_link_mode(
1463             cmd->link_modes.supported, supported);
1464         ethtool_convert_legacy_u32_to_link_mode(
1465             cmd->link_modes.advertising, advertising);
1466         cmd->base.port = PORT_MII;
1467     }
1468     return 0;
1469 }
1470 
1471 static int bcm_enet_set_link_ksettings(struct net_device *dev,
1472                        const struct ethtool_link_ksettings *cmd)
1473 {
1474     struct bcm_enet_priv *priv;
1475 
1476     priv = netdev_priv(dev);
1477     if (priv->has_phy) {
1478         if (!dev->phydev)
1479             return -ENODEV;
1480         return phy_ethtool_ksettings_set(dev->phydev, cmd);
1481     } else {
1482 
1483         if (cmd->base.autoneg ||
1484             (cmd->base.speed != SPEED_100 &&
1485              cmd->base.speed != SPEED_10) ||
1486             cmd->base.port != PORT_MII)
1487             return -EINVAL;
1488 
1489         priv->force_speed_100 =
1490             (cmd->base.speed == SPEED_100) ? 1 : 0;
1491         priv->force_duplex_full =
1492             (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0;
1493 
1494         if (netif_running(dev))
1495             bcm_enet_adjust_link(dev);
1496         return 0;
1497     }
1498 }
1499 
1500 static void
1501 bcm_enet_get_ringparam(struct net_device *dev,
1502                struct ethtool_ringparam *ering,
1503                struct kernel_ethtool_ringparam *kernel_ering,
1504                struct netlink_ext_ack *extack)
1505 {
1506     struct bcm_enet_priv *priv;
1507 
1508     priv = netdev_priv(dev);
1509 
1510     /* rx/tx ring is actually only limited by memory */
1511     ering->rx_max_pending = 8192;
1512     ering->tx_max_pending = 8192;
1513     ering->rx_pending = priv->rx_ring_size;
1514     ering->tx_pending = priv->tx_ring_size;
1515 }
1516 
1517 static int bcm_enet_set_ringparam(struct net_device *dev,
1518                   struct ethtool_ringparam *ering,
1519                   struct kernel_ethtool_ringparam *kernel_ering,
1520                   struct netlink_ext_ack *extack)
1521 {
1522     struct bcm_enet_priv *priv;
1523     int was_running;
1524 
1525     priv = netdev_priv(dev);
1526 
1527     was_running = 0;
1528     if (netif_running(dev)) {
1529         bcm_enet_stop(dev);
1530         was_running = 1;
1531     }
1532 
1533     priv->rx_ring_size = ering->rx_pending;
1534     priv->tx_ring_size = ering->tx_pending;
1535 
1536     if (was_running) {
1537         int err;
1538 
1539         err = bcm_enet_open(dev);
1540         if (err)
1541             dev_close(dev);
1542         else
1543             bcm_enet_set_multicast_list(dev);
1544     }
1545     return 0;
1546 }
1547 
1548 static void bcm_enet_get_pauseparam(struct net_device *dev,
1549                     struct ethtool_pauseparam *ecmd)
1550 {
1551     struct bcm_enet_priv *priv;
1552 
1553     priv = netdev_priv(dev);
1554     ecmd->autoneg = priv->pause_auto;
1555     ecmd->rx_pause = priv->pause_rx;
1556     ecmd->tx_pause = priv->pause_tx;
1557 }
1558 
1559 static int bcm_enet_set_pauseparam(struct net_device *dev,
1560                    struct ethtool_pauseparam *ecmd)
1561 {
1562     struct bcm_enet_priv *priv;
1563 
1564     priv = netdev_priv(dev);
1565 
1566     if (priv->has_phy) {
1567         if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1568             /* asymetric pause mode not supported,
1569              * actually possible but integrated PHY has RO
1570              * asym_pause bit */
1571             return -EINVAL;
1572         }
1573     } else {
1574         /* no pause autoneg on direct mii connection */
1575         if (ecmd->autoneg)
1576             return -EINVAL;
1577     }
1578 
1579     priv->pause_auto = ecmd->autoneg;
1580     priv->pause_rx = ecmd->rx_pause;
1581     priv->pause_tx = ecmd->tx_pause;
1582 
1583     return 0;
1584 }
1585 
1586 static const struct ethtool_ops bcm_enet_ethtool_ops = {
1587     .get_strings        = bcm_enet_get_strings,
1588     .get_sset_count     = bcm_enet_get_sset_count,
1589     .get_ethtool_stats      = bcm_enet_get_ethtool_stats,
1590     .nway_reset     = bcm_enet_nway_reset,
1591     .get_drvinfo        = bcm_enet_get_drvinfo,
1592     .get_link       = ethtool_op_get_link,
1593     .get_ringparam      = bcm_enet_get_ringparam,
1594     .set_ringparam      = bcm_enet_set_ringparam,
1595     .get_pauseparam     = bcm_enet_get_pauseparam,
1596     .set_pauseparam     = bcm_enet_set_pauseparam,
1597     .get_link_ksettings = bcm_enet_get_link_ksettings,
1598     .set_link_ksettings = bcm_enet_set_link_ksettings,
1599 };
1600 
1601 static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1602 {
1603     struct bcm_enet_priv *priv;
1604 
1605     priv = netdev_priv(dev);
1606     if (priv->has_phy) {
1607         if (!dev->phydev)
1608             return -ENODEV;
1609         return phy_mii_ioctl(dev->phydev, rq, cmd);
1610     } else {
1611         struct mii_if_info mii;
1612 
1613         mii.dev = dev;
1614         mii.mdio_read = bcm_enet_mdio_read_mii;
1615         mii.mdio_write = bcm_enet_mdio_write_mii;
1616         mii.phy_id = 0;
1617         mii.phy_id_mask = 0x3f;
1618         mii.reg_num_mask = 0x1f;
1619         return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1620     }
1621 }
1622 
1623 /*
1624  * adjust mtu, can't be called while device is running
1625  */
1626 static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1627 {
1628     struct bcm_enet_priv *priv = netdev_priv(dev);
1629     int actual_mtu = new_mtu;
1630 
1631     if (netif_running(dev))
1632         return -EBUSY;
1633 
1634     /* add ethernet header + vlan tag size */
1635     actual_mtu += VLAN_ETH_HLEN;
1636 
1637     /*
1638      * setup maximum size before we get overflow mark in
1639      * descriptor, note that this will not prevent reception of
1640      * big frames, they will be split into multiple buffers
1641      * anyway
1642      */
1643     priv->hw_mtu = actual_mtu;
1644 
1645     /*
1646      * align rx buffer size to dma burst len, account FCS since
1647      * it's appended
1648      */
1649     priv->rx_buf_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1650                   priv->dma_maxburst * 4);
1651 
1652     priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
1653                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1654 
1655     dev->mtu = new_mtu;
1656     return 0;
1657 }
1658 
1659 /*
1660  * preinit hardware to allow mii operation while device is down
1661  */
1662 static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1663 {
1664     u32 val;
1665     int limit;
1666 
1667     /* make sure mac is disabled */
1668     bcm_enet_disable_mac(priv);
1669 
1670     /* soft reset mac */
1671     val = ENET_CTL_SRESET_MASK;
1672     enet_writel(priv, val, ENET_CTL_REG);
1673     wmb();
1674 
1675     limit = 1000;
1676     do {
1677         val = enet_readl(priv, ENET_CTL_REG);
1678         if (!(val & ENET_CTL_SRESET_MASK))
1679             break;
1680         udelay(1);
1681     } while (limit--);
1682 
1683     /* select correct mii interface */
1684     val = enet_readl(priv, ENET_CTL_REG);
1685     if (priv->use_external_mii)
1686         val |= ENET_CTL_EPHYSEL_MASK;
1687     else
1688         val &= ~ENET_CTL_EPHYSEL_MASK;
1689     enet_writel(priv, val, ENET_CTL_REG);
1690 
1691     /* turn on mdc clock */
1692     enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1693             ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1694 
1695     /* set mib counters to self-clear when read */
1696     val = enet_readl(priv, ENET_MIBCTL_REG);
1697     val |= ENET_MIBCTL_RDCLEAR_MASK;
1698     enet_writel(priv, val, ENET_MIBCTL_REG);
1699 }
1700 
1701 static const struct net_device_ops bcm_enet_ops = {
1702     .ndo_open       = bcm_enet_open,
1703     .ndo_stop       = bcm_enet_stop,
1704     .ndo_start_xmit     = bcm_enet_start_xmit,
1705     .ndo_set_mac_address    = bcm_enet_set_mac_address,
1706     .ndo_set_rx_mode    = bcm_enet_set_multicast_list,
1707     .ndo_eth_ioctl      = bcm_enet_ioctl,
1708     .ndo_change_mtu     = bcm_enet_change_mtu,
1709 };
1710 
1711 /*
1712  * allocate netdevice, request register memory and register device.
1713  */
1714 static int bcm_enet_probe(struct platform_device *pdev)
1715 {
1716     struct bcm_enet_priv *priv;
1717     struct net_device *dev;
1718     struct bcm63xx_enet_platform_data *pd;
1719     int irq, irq_rx, irq_tx;
1720     struct mii_bus *bus;
1721     int i, ret;
1722 
1723     if (!bcm_enet_shared_base[0])
1724         return -EPROBE_DEFER;
1725 
1726     irq = platform_get_irq(pdev, 0);
1727     irq_rx = platform_get_irq(pdev, 1);
1728     irq_tx = platform_get_irq(pdev, 2);
1729     if (irq < 0 || irq_rx < 0 || irq_tx < 0)
1730         return -ENODEV;
1731 
1732     dev = alloc_etherdev(sizeof(*priv));
1733     if (!dev)
1734         return -ENOMEM;
1735     priv = netdev_priv(dev);
1736 
1737     priv->enet_is_sw = false;
1738     priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1739     priv->rx_buf_offset = NET_SKB_PAD;
1740 
1741     ret = bcm_enet_change_mtu(dev, dev->mtu);
1742     if (ret)
1743         goto out;
1744 
1745     priv->base = devm_platform_ioremap_resource(pdev, 0);
1746     if (IS_ERR(priv->base)) {
1747         ret = PTR_ERR(priv->base);
1748         goto out;
1749     }
1750 
1751     dev->irq = priv->irq = irq;
1752     priv->irq_rx = irq_rx;
1753     priv->irq_tx = irq_tx;
1754 
1755     priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
1756     if (IS_ERR(priv->mac_clk)) {
1757         ret = PTR_ERR(priv->mac_clk);
1758         goto out;
1759     }
1760     ret = clk_prepare_enable(priv->mac_clk);
1761     if (ret)
1762         goto out;
1763 
1764     /* initialize default and fetch platform data */
1765     priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1766     priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1767 
1768     pd = dev_get_platdata(&pdev->dev);
1769     if (pd) {
1770         eth_hw_addr_set(dev, pd->mac_addr);
1771         priv->has_phy = pd->has_phy;
1772         priv->phy_id = pd->phy_id;
1773         priv->has_phy_interrupt = pd->has_phy_interrupt;
1774         priv->phy_interrupt = pd->phy_interrupt;
1775         priv->use_external_mii = !pd->use_internal_phy;
1776         priv->pause_auto = pd->pause_auto;
1777         priv->pause_rx = pd->pause_rx;
1778         priv->pause_tx = pd->pause_tx;
1779         priv->force_duplex_full = pd->force_duplex_full;
1780         priv->force_speed_100 = pd->force_speed_100;
1781         priv->dma_chan_en_mask = pd->dma_chan_en_mask;
1782         priv->dma_chan_int_mask = pd->dma_chan_int_mask;
1783         priv->dma_chan_width = pd->dma_chan_width;
1784         priv->dma_has_sram = pd->dma_has_sram;
1785         priv->dma_desc_shift = pd->dma_desc_shift;
1786         priv->rx_chan = pd->rx_chan;
1787         priv->tx_chan = pd->tx_chan;
1788     }
1789 
1790     if (priv->has_phy && !priv->use_external_mii) {
1791         /* using internal PHY, enable clock */
1792         priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
1793         if (IS_ERR(priv->phy_clk)) {
1794             ret = PTR_ERR(priv->phy_clk);
1795             priv->phy_clk = NULL;
1796             goto out_disable_clk_mac;
1797         }
1798         ret = clk_prepare_enable(priv->phy_clk);
1799         if (ret)
1800             goto out_disable_clk_mac;
1801     }
1802 
1803     /* do minimal hardware init to be able to probe mii bus */
1804     bcm_enet_hw_preinit(priv);
1805 
1806     /* MII bus registration */
1807     if (priv->has_phy) {
1808 
1809         priv->mii_bus = mdiobus_alloc();
1810         if (!priv->mii_bus) {
1811             ret = -ENOMEM;
1812             goto out_uninit_hw;
1813         }
1814 
1815         bus = priv->mii_bus;
1816         bus->name = "bcm63xx_enet MII bus";
1817         bus->parent = &pdev->dev;
1818         bus->priv = priv;
1819         bus->read = bcm_enet_mdio_read_phylib;
1820         bus->write = bcm_enet_mdio_write_phylib;
1821         sprintf(bus->id, "%s-%d", pdev->name, pdev->id);
1822 
1823         /* only probe bus where we think the PHY is, because
1824          * the mdio read operation return 0 instead of 0xffff
1825          * if a slave is not present on hw */
1826         bus->phy_mask = ~(1 << priv->phy_id);
1827 
1828         if (priv->has_phy_interrupt)
1829             bus->irq[priv->phy_id] = priv->phy_interrupt;
1830 
1831         ret = mdiobus_register(bus);
1832         if (ret) {
1833             dev_err(&pdev->dev, "unable to register mdio bus\n");
1834             goto out_free_mdio;
1835         }
1836     } else {
1837 
1838         /* run platform code to initialize PHY device */
1839         if (pd && pd->mii_config &&
1840             pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1841                    bcm_enet_mdio_write_mii)) {
1842             dev_err(&pdev->dev, "unable to configure mdio bus\n");
1843             goto out_uninit_hw;
1844         }
1845     }
1846 
1847     spin_lock_init(&priv->rx_lock);
1848 
1849     /* init rx timeout (used for oom) */
1850     timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
1851 
1852     /* init the mib update lock&work */
1853     mutex_init(&priv->mib_update_lock);
1854     INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1855 
1856     /* zero mib counters */
1857     for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1858         enet_writel(priv, 0, ENET_MIB_REG(i));
1859 
1860     /* register netdevice */
1861     dev->netdev_ops = &bcm_enet_ops;
1862     netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16);
1863 
1864     dev->ethtool_ops = &bcm_enet_ethtool_ops;
1865     /* MTU range: 46 - 2028 */
1866     dev->min_mtu = ETH_ZLEN - ETH_HLEN;
1867     dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN;
1868     SET_NETDEV_DEV(dev, &pdev->dev);
1869 
1870     ret = register_netdev(dev);
1871     if (ret)
1872         goto out_unregister_mdio;
1873 
1874     netif_carrier_off(dev);
1875     platform_set_drvdata(pdev, dev);
1876     priv->pdev = pdev;
1877     priv->net_dev = dev;
1878 
1879     return 0;
1880 
1881 out_unregister_mdio:
1882     if (priv->mii_bus)
1883         mdiobus_unregister(priv->mii_bus);
1884 
1885 out_free_mdio:
1886     if (priv->mii_bus)
1887         mdiobus_free(priv->mii_bus);
1888 
1889 out_uninit_hw:
1890     /* turn off mdc clock */
1891     enet_writel(priv, 0, ENET_MIISC_REG);
1892     clk_disable_unprepare(priv->phy_clk);
1893 
1894 out_disable_clk_mac:
1895     clk_disable_unprepare(priv->mac_clk);
1896 out:
1897     free_netdev(dev);
1898     return ret;
1899 }
1900 
1901 
1902 /*
1903  * exit func, stops hardware and unregisters netdevice
1904  */
1905 static int bcm_enet_remove(struct platform_device *pdev)
1906 {
1907     struct bcm_enet_priv *priv;
1908     struct net_device *dev;
1909 
1910     /* stop netdevice */
1911     dev = platform_get_drvdata(pdev);
1912     priv = netdev_priv(dev);
1913     unregister_netdev(dev);
1914 
1915     /* turn off mdc clock */
1916     enet_writel(priv, 0, ENET_MIISC_REG);
1917 
1918     if (priv->has_phy) {
1919         mdiobus_unregister(priv->mii_bus);
1920         mdiobus_free(priv->mii_bus);
1921     } else {
1922         struct bcm63xx_enet_platform_data *pd;
1923 
1924         pd = dev_get_platdata(&pdev->dev);
1925         if (pd && pd->mii_config)
1926             pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1927                        bcm_enet_mdio_write_mii);
1928     }
1929 
1930     /* disable hw block clocks */
1931     clk_disable_unprepare(priv->phy_clk);
1932     clk_disable_unprepare(priv->mac_clk);
1933 
1934     free_netdev(dev);
1935     return 0;
1936 }
1937 
1938 static struct platform_driver bcm63xx_enet_driver = {
1939     .probe  = bcm_enet_probe,
1940     .remove = bcm_enet_remove,
1941     .driver = {
1942         .name   = "bcm63xx_enet",
1943         .owner  = THIS_MODULE,
1944     },
1945 };
1946 
1947 /*
1948  * switch mii access callbacks
1949  */
1950 static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1951                 int ext, int phy_id, int location)
1952 {
1953     u32 reg;
1954     int ret;
1955 
1956     spin_lock_bh(&priv->enetsw_mdio_lock);
1957     enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1958 
1959     reg = ENETSW_MDIOC_RD_MASK |
1960         (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1961         (location << ENETSW_MDIOC_REG_SHIFT);
1962 
1963     if (ext)
1964         reg |= ENETSW_MDIOC_EXT_MASK;
1965 
1966     enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1967     udelay(50);
1968     ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
1969     spin_unlock_bh(&priv->enetsw_mdio_lock);
1970     return ret;
1971 }
1972 
1973 static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
1974                  int ext, int phy_id, int location,
1975                  uint16_t data)
1976 {
1977     u32 reg;
1978 
1979     spin_lock_bh(&priv->enetsw_mdio_lock);
1980     enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1981 
1982     reg = ENETSW_MDIOC_WR_MASK |
1983         (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1984         (location << ENETSW_MDIOC_REG_SHIFT);
1985 
1986     if (ext)
1987         reg |= ENETSW_MDIOC_EXT_MASK;
1988 
1989     reg |= data;
1990 
1991     enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1992     udelay(50);
1993     spin_unlock_bh(&priv->enetsw_mdio_lock);
1994 }
1995 
1996 static inline int bcm_enet_port_is_rgmii(int portid)
1997 {
1998     return portid >= ENETSW_RGMII_PORT0;
1999 }
2000 
2001 /*
2002  * enet sw PHY polling
2003  */
2004 static void swphy_poll_timer(struct timer_list *t)
2005 {
2006     struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
2007     unsigned int i;
2008 
2009     for (i = 0; i < priv->num_ports; i++) {
2010         struct bcm63xx_enetsw_port *port;
2011         int val, j, up, advertise, lpa, speed, duplex, media;
2012         int external_phy = bcm_enet_port_is_rgmii(i);
2013         u8 override;
2014 
2015         port = &priv->used_ports[i];
2016         if (!port->used)
2017             continue;
2018 
2019         if (port->bypass_link)
2020             continue;
2021 
2022         /* dummy read to clear */
2023         for (j = 0; j < 2; j++)
2024             val = bcmenet_sw_mdio_read(priv, external_phy,
2025                            port->phy_id, MII_BMSR);
2026 
2027         if (val == 0xffff)
2028             continue;
2029 
2030         up = (val & BMSR_LSTATUS) ? 1 : 0;
2031         if (!(up ^ priv->sw_port_link[i]))
2032             continue;
2033 
2034         priv->sw_port_link[i] = up;
2035 
2036         /* link changed */
2037         if (!up) {
2038             dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2039                  port->name);
2040             enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2041                       ENETSW_PORTOV_REG(i));
2042             enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2043                       ENETSW_PTCTRL_TXDIS_MASK,
2044                       ENETSW_PTCTRL_REG(i));
2045             continue;
2046         }
2047 
2048         advertise = bcmenet_sw_mdio_read(priv, external_phy,
2049                          port->phy_id, MII_ADVERTISE);
2050 
2051         lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2052                        MII_LPA);
2053 
2054         /* figure out media and duplex from advertise and LPA values */
2055         media = mii_nway_result(lpa & advertise);
2056         duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2057 
2058         if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2059             speed = 100;
2060         else
2061             speed = 10;
2062 
2063         if (val & BMSR_ESTATEN) {
2064             advertise = bcmenet_sw_mdio_read(priv, external_phy,
2065                         port->phy_id, MII_CTRL1000);
2066 
2067             lpa = bcmenet_sw_mdio_read(priv, external_phy,
2068                         port->phy_id, MII_STAT1000);
2069 
2070             if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
2071                     && lpa & (LPA_1000FULL | LPA_1000HALF)) {
2072                 speed = 1000;
2073                 duplex = (lpa & LPA_1000FULL);
2074             }
2075         }
2076 
2077         dev_info(&priv->pdev->dev,
2078              "link UP on %s, %dMbps, %s-duplex\n",
2079              port->name, speed, duplex ? "full" : "half");
2080 
2081         override = ENETSW_PORTOV_ENABLE_MASK |
2082             ENETSW_PORTOV_LINKUP_MASK;
2083 
2084         if (speed == 1000)
2085             override |= ENETSW_IMPOV_1000_MASK;
2086         else if (speed == 100)
2087             override |= ENETSW_IMPOV_100_MASK;
2088         if (duplex)
2089             override |= ENETSW_IMPOV_FDX_MASK;
2090 
2091         enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2092         enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2093     }
2094 
2095     priv->swphy_poll.expires = jiffies + HZ;
2096     add_timer(&priv->swphy_poll);
2097 }
2098 
2099 /*
2100  * open callback, allocate dma rings & buffers and start rx operation
2101  */
2102 static int bcm_enetsw_open(struct net_device *dev)
2103 {
2104     struct bcm_enet_priv *priv;
2105     struct device *kdev;
2106     int i, ret;
2107     unsigned int size;
2108     void *p;
2109     u32 val;
2110 
2111     priv = netdev_priv(dev);
2112     kdev = &priv->pdev->dev;
2113 
2114     /* mask all interrupts and request them */
2115     enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2116     enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2117 
2118     ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2119               0, dev->name, dev);
2120     if (ret)
2121         goto out_freeirq;
2122 
2123     if (priv->irq_tx != -1) {
2124         ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2125                   0, dev->name, dev);
2126         if (ret)
2127             goto out_freeirq_rx;
2128     }
2129 
2130     /* allocate rx dma ring */
2131     size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2132     p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2133     if (!p) {
2134         dev_err(kdev, "cannot allocate rx ring %u\n", size);
2135         ret = -ENOMEM;
2136         goto out_freeirq_tx;
2137     }
2138 
2139     priv->rx_desc_alloc_size = size;
2140     priv->rx_desc_cpu = p;
2141 
2142     /* allocate tx dma ring */
2143     size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2144     p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2145     if (!p) {
2146         dev_err(kdev, "cannot allocate tx ring\n");
2147         ret = -ENOMEM;
2148         goto out_free_rx_ring;
2149     }
2150 
2151     priv->tx_desc_alloc_size = size;
2152     priv->tx_desc_cpu = p;
2153 
2154     priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
2155                    GFP_KERNEL);
2156     if (!priv->tx_skb) {
2157         dev_err(kdev, "cannot allocate tx skb queue\n");
2158         ret = -ENOMEM;
2159         goto out_free_tx_ring;
2160     }
2161 
2162     priv->tx_desc_count = priv->tx_ring_size;
2163     priv->tx_dirty_desc = 0;
2164     priv->tx_curr_desc = 0;
2165     spin_lock_init(&priv->tx_lock);
2166 
2167     /* init & fill rx ring with buffers */
2168     priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
2169                    GFP_KERNEL);
2170     if (!priv->rx_buf) {
2171         dev_err(kdev, "cannot allocate rx buffer queue\n");
2172         ret = -ENOMEM;
2173         goto out_free_tx_skb;
2174     }
2175 
2176     priv->rx_desc_count = 0;
2177     priv->rx_dirty_desc = 0;
2178     priv->rx_curr_desc = 0;
2179 
2180     /* disable all ports */
2181     for (i = 0; i < priv->num_ports; i++) {
2182         enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2183                   ENETSW_PORTOV_REG(i));
2184         enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2185                   ENETSW_PTCTRL_TXDIS_MASK,
2186                   ENETSW_PTCTRL_REG(i));
2187 
2188         priv->sw_port_link[i] = 0;
2189     }
2190 
2191     /* reset mib */
2192     val = enetsw_readb(priv, ENETSW_GMCR_REG);
2193     val |= ENETSW_GMCR_RST_MIB_MASK;
2194     enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2195     mdelay(1);
2196     val &= ~ENETSW_GMCR_RST_MIB_MASK;
2197     enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2198     mdelay(1);
2199 
2200     /* force CPU port state */
2201     val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2202     val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
2203     enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2204 
2205     /* enable switch forward engine */
2206     val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2207     val |= ENETSW_SWMODE_FWD_EN_MASK;
2208     enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2209 
2210     /* enable jumbo on all ports */
2211     enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2212     enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2213 
2214     /* initialize flow control buffer allocation */
2215     enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2216             ENETDMA_BUFALLOC_REG(priv->rx_chan));
2217 
2218     if (bcm_enet_refill_rx(dev, false)) {
2219         dev_err(kdev, "cannot allocate rx buffer queue\n");
2220         ret = -ENOMEM;
2221         goto out;
2222     }
2223 
2224     /* write rx & tx ring addresses */
2225     enet_dmas_writel(priv, priv->rx_desc_dma,
2226              ENETDMAS_RSTART_REG, priv->rx_chan);
2227     enet_dmas_writel(priv, priv->tx_desc_dma,
2228              ENETDMAS_RSTART_REG, priv->tx_chan);
2229 
2230     /* clear remaining state ram for rx & tx channel */
2231     enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
2232     enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
2233     enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
2234     enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
2235     enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
2236     enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
2237 
2238     /* set dma maximum burst len */
2239     enet_dmac_writel(priv, priv->dma_maxburst,
2240              ENETDMAC_MAXBURST, priv->rx_chan);
2241     enet_dmac_writel(priv, priv->dma_maxburst,
2242              ENETDMAC_MAXBURST, priv->tx_chan);
2243 
2244     /* set flow control low/high threshold to 1/3 / 2/3 */
2245     val = priv->rx_ring_size / 3;
2246     enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2247     val = (priv->rx_ring_size * 2) / 3;
2248     enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2249 
2250     /* all set, enable mac and interrupts, start dma engine and
2251      * kick rx dma channel
2252      */
2253     wmb();
2254     enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2255     enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2256              ENETDMAC_CHANCFG, priv->rx_chan);
2257 
2258     /* watch "packet transferred" interrupt in rx and tx */
2259     enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2260              ENETDMAC_IR, priv->rx_chan);
2261     enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2262              ENETDMAC_IR, priv->tx_chan);
2263 
2264     /* make sure we enable napi before rx interrupt  */
2265     napi_enable(&priv->napi);
2266 
2267     enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2268              ENETDMAC_IRMASK, priv->rx_chan);
2269     enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2270              ENETDMAC_IRMASK, priv->tx_chan);
2271 
2272     netif_carrier_on(dev);
2273     netif_start_queue(dev);
2274 
2275     /* apply override config for bypass_link ports here. */
2276     for (i = 0; i < priv->num_ports; i++) {
2277         struct bcm63xx_enetsw_port *port;
2278         u8 override;
2279         port = &priv->used_ports[i];
2280         if (!port->used)
2281             continue;
2282 
2283         if (!port->bypass_link)
2284             continue;
2285 
2286         override = ENETSW_PORTOV_ENABLE_MASK |
2287             ENETSW_PORTOV_LINKUP_MASK;
2288 
2289         switch (port->force_speed) {
2290         case 1000:
2291             override |= ENETSW_IMPOV_1000_MASK;
2292             break;
2293         case 100:
2294             override |= ENETSW_IMPOV_100_MASK;
2295             break;
2296         case 10:
2297             break;
2298         default:
2299             pr_warn("invalid forced speed on port %s: assume 10\n",
2300                    port->name);
2301             break;
2302         }
2303 
2304         if (port->force_duplex_full)
2305             override |= ENETSW_IMPOV_FDX_MASK;
2306 
2307 
2308         enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2309         enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2310     }
2311 
2312     /* start phy polling timer */
2313     timer_setup(&priv->swphy_poll, swphy_poll_timer, 0);
2314     mod_timer(&priv->swphy_poll, jiffies);
2315     return 0;
2316 
2317 out:
2318     bcm_enet_free_rx_buf_ring(kdev, priv);
2319 
2320 out_free_tx_skb:
2321     kfree(priv->tx_skb);
2322 
2323 out_free_tx_ring:
2324     dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2325               priv->tx_desc_cpu, priv->tx_desc_dma);
2326 
2327 out_free_rx_ring:
2328     dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2329               priv->rx_desc_cpu, priv->rx_desc_dma);
2330 
2331 out_freeirq_tx:
2332     if (priv->irq_tx != -1)
2333         free_irq(priv->irq_tx, dev);
2334 
2335 out_freeirq_rx:
2336     free_irq(priv->irq_rx, dev);
2337 
2338 out_freeirq:
2339     return ret;
2340 }
2341 
2342 /* stop callback */
2343 static int bcm_enetsw_stop(struct net_device *dev)
2344 {
2345     struct bcm_enet_priv *priv;
2346     struct device *kdev;
2347 
2348     priv = netdev_priv(dev);
2349     kdev = &priv->pdev->dev;
2350 
2351     del_timer_sync(&priv->swphy_poll);
2352     netif_stop_queue(dev);
2353     napi_disable(&priv->napi);
2354     del_timer_sync(&priv->rx_timeout);
2355 
2356     /* mask all interrupts */
2357     enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2358     enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2359 
2360     /* disable dma & mac */
2361     bcm_enet_disable_dma(priv, priv->tx_chan);
2362     bcm_enet_disable_dma(priv, priv->rx_chan);
2363 
2364     /* force reclaim of all tx buffers */
2365     bcm_enet_tx_reclaim(dev, 1, 0);
2366 
2367     /* free the rx buffer ring */
2368     bcm_enet_free_rx_buf_ring(kdev, priv);
2369 
2370     /* free remaining allocated memory */
2371     kfree(priv->tx_skb);
2372     dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2373               priv->rx_desc_cpu, priv->rx_desc_dma);
2374     dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2375               priv->tx_desc_cpu, priv->tx_desc_dma);
2376     if (priv->irq_tx != -1)
2377         free_irq(priv->irq_tx, dev);
2378     free_irq(priv->irq_rx, dev);
2379 
2380     /* reset BQL after forced tx reclaim to prevent kernel panic */
2381     netdev_reset_queue(dev);
2382 
2383     return 0;
2384 }
2385 
2386 /* try to sort out phy external status by walking the used_port field
2387  * in the bcm_enet_priv structure. in case the phy address is not
2388  * assigned to any physical port on the switch, assume it is external
2389  * (and yell at the user).
2390  */
2391 static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2392 {
2393     int i;
2394 
2395     for (i = 0; i < priv->num_ports; ++i) {
2396         if (!priv->used_ports[i].used)
2397             continue;
2398         if (priv->used_ports[i].phy_id == phy_id)
2399             return bcm_enet_port_is_rgmii(i);
2400     }
2401 
2402     printk_once(KERN_WARNING  "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
2403             phy_id);
2404     return 1;
2405 }
2406 
2407 /* can't use bcmenet_sw_mdio_read directly as we need to sort out
2408  * external/internal status of the given phy_id first.
2409  */
2410 static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
2411                     int location)
2412 {
2413     struct bcm_enet_priv *priv;
2414 
2415     priv = netdev_priv(dev);
2416     return bcmenet_sw_mdio_read(priv,
2417                     bcm_enetsw_phy_is_external(priv, phy_id),
2418                     phy_id, location);
2419 }
2420 
2421 /* can't use bcmenet_sw_mdio_write directly as we need to sort out
2422  * external/internal status of the given phy_id first.
2423  */
2424 static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
2425                       int location,
2426                       int val)
2427 {
2428     struct bcm_enet_priv *priv;
2429 
2430     priv = netdev_priv(dev);
2431     bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2432                   phy_id, location, val);
2433 }
2434 
2435 static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2436 {
2437     struct mii_if_info mii;
2438 
2439     mii.dev = dev;
2440     mii.mdio_read = bcm_enetsw_mii_mdio_read;
2441     mii.mdio_write = bcm_enetsw_mii_mdio_write;
2442     mii.phy_id = 0;
2443     mii.phy_id_mask = 0x3f;
2444     mii.reg_num_mask = 0x1f;
2445     return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
2446 
2447 }
2448 
2449 static const struct net_device_ops bcm_enetsw_ops = {
2450     .ndo_open       = bcm_enetsw_open,
2451     .ndo_stop       = bcm_enetsw_stop,
2452     .ndo_start_xmit     = bcm_enet_start_xmit,
2453     .ndo_change_mtu     = bcm_enet_change_mtu,
2454     .ndo_eth_ioctl      = bcm_enetsw_ioctl,
2455 };
2456 
2457 
2458 static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
2459     { "rx_packets", DEV_STAT(rx_packets), -1 },
2460     { "tx_packets", DEV_STAT(tx_packets), -1 },
2461     { "rx_bytes", DEV_STAT(rx_bytes), -1 },
2462     { "tx_bytes", DEV_STAT(tx_bytes), -1 },
2463     { "rx_errors", DEV_STAT(rx_errors), -1 },
2464     { "tx_errors", DEV_STAT(tx_errors), -1 },
2465     { "rx_dropped", DEV_STAT(rx_dropped), -1 },
2466     { "tx_dropped", DEV_STAT(tx_dropped), -1 },
2467 
2468     { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
2469     { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
2470     { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
2471     { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
2472     { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
2473     { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
2474     { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
2475     { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
2476     { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
2477     { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
2478       ETHSW_MIB_RX_1024_1522 },
2479     { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
2480       ETHSW_MIB_RX_1523_2047 },
2481     { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
2482       ETHSW_MIB_RX_2048_4095 },
2483     { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
2484       ETHSW_MIB_RX_4096_8191 },
2485     { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
2486       ETHSW_MIB_RX_8192_9728 },
2487     { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
2488     { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
2489     { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
2490     { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
2491     { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
2492 
2493     { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
2494     { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
2495     { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
2496     { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
2497     { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
2498     { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
2499 
2500 };
2501 
2502 #define BCM_ENETSW_STATS_LEN    \
2503     (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
2504 
2505 static void bcm_enetsw_get_strings(struct net_device *netdev,
2506                    u32 stringset, u8 *data)
2507 {
2508     int i;
2509 
2510     switch (stringset) {
2511     case ETH_SS_STATS:
2512         for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2513             memcpy(data + i * ETH_GSTRING_LEN,
2514                    bcm_enetsw_gstrings_stats[i].stat_string,
2515                    ETH_GSTRING_LEN);
2516         }
2517         break;
2518     }
2519 }
2520 
2521 static int bcm_enetsw_get_sset_count(struct net_device *netdev,
2522                      int string_set)
2523 {
2524     switch (string_set) {
2525     case ETH_SS_STATS:
2526         return BCM_ENETSW_STATS_LEN;
2527     default:
2528         return -EINVAL;
2529     }
2530 }
2531 
2532 static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
2533                    struct ethtool_drvinfo *drvinfo)
2534 {
2535     strncpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
2536     strncpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
2537 }
2538 
2539 static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
2540                      struct ethtool_stats *stats,
2541                      u64 *data)
2542 {
2543     struct bcm_enet_priv *priv;
2544     int i;
2545 
2546     priv = netdev_priv(netdev);
2547 
2548     for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2549         const struct bcm_enet_stats *s;
2550         u32 lo, hi;
2551         char *p;
2552         int reg;
2553 
2554         s = &bcm_enetsw_gstrings_stats[i];
2555 
2556         reg = s->mib_reg;
2557         if (reg == -1)
2558             continue;
2559 
2560         lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2561         p = (char *)priv + s->stat_offset;
2562 
2563         if (s->sizeof_stat == sizeof(u64)) {
2564             hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2565             *(u64 *)p = ((u64)hi << 32 | lo);
2566         } else {
2567             *(u32 *)p = lo;
2568         }
2569     }
2570 
2571     for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2572         const struct bcm_enet_stats *s;
2573         char *p;
2574 
2575         s = &bcm_enetsw_gstrings_stats[i];
2576 
2577         if (s->mib_reg == -1)
2578             p = (char *)&netdev->stats + s->stat_offset;
2579         else
2580             p = (char *)priv + s->stat_offset;
2581 
2582         data[i] = (s->sizeof_stat == sizeof(u64)) ?
2583             *(u64 *)p : *(u32 *)p;
2584     }
2585 }
2586 
2587 static void
2588 bcm_enetsw_get_ringparam(struct net_device *dev,
2589              struct ethtool_ringparam *ering,
2590              struct kernel_ethtool_ringparam *kernel_ering,
2591              struct netlink_ext_ack *extack)
2592 {
2593     struct bcm_enet_priv *priv;
2594 
2595     priv = netdev_priv(dev);
2596 
2597     /* rx/tx ring is actually only limited by memory */
2598     ering->rx_max_pending = 8192;
2599     ering->tx_max_pending = 8192;
2600     ering->rx_mini_max_pending = 0;
2601     ering->rx_jumbo_max_pending = 0;
2602     ering->rx_pending = priv->rx_ring_size;
2603     ering->tx_pending = priv->tx_ring_size;
2604 }
2605 
2606 static int
2607 bcm_enetsw_set_ringparam(struct net_device *dev,
2608              struct ethtool_ringparam *ering,
2609              struct kernel_ethtool_ringparam *kernel_ering,
2610              struct netlink_ext_ack *extack)
2611 {
2612     struct bcm_enet_priv *priv;
2613     int was_running;
2614 
2615     priv = netdev_priv(dev);
2616 
2617     was_running = 0;
2618     if (netif_running(dev)) {
2619         bcm_enetsw_stop(dev);
2620         was_running = 1;
2621     }
2622 
2623     priv->rx_ring_size = ering->rx_pending;
2624     priv->tx_ring_size = ering->tx_pending;
2625 
2626     if (was_running) {
2627         int err;
2628 
2629         err = bcm_enetsw_open(dev);
2630         if (err)
2631             dev_close(dev);
2632     }
2633     return 0;
2634 }
2635 
2636 static const struct ethtool_ops bcm_enetsw_ethtool_ops = {
2637     .get_strings        = bcm_enetsw_get_strings,
2638     .get_sset_count     = bcm_enetsw_get_sset_count,
2639     .get_ethtool_stats      = bcm_enetsw_get_ethtool_stats,
2640     .get_drvinfo        = bcm_enetsw_get_drvinfo,
2641     .get_ringparam      = bcm_enetsw_get_ringparam,
2642     .set_ringparam      = bcm_enetsw_set_ringparam,
2643 };
2644 
2645 /* allocate netdevice, request register memory and register device. */
2646 static int bcm_enetsw_probe(struct platform_device *pdev)
2647 {
2648     struct bcm_enet_priv *priv;
2649     struct net_device *dev;
2650     struct bcm63xx_enetsw_platform_data *pd;
2651     struct resource *res_mem;
2652     int ret, irq_rx, irq_tx;
2653 
2654     if (!bcm_enet_shared_base[0])
2655         return -EPROBE_DEFER;
2656 
2657     res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2658     irq_rx = platform_get_irq(pdev, 0);
2659     irq_tx = platform_get_irq(pdev, 1);
2660     if (!res_mem || irq_rx < 0)
2661         return -ENODEV;
2662 
2663     dev = alloc_etherdev(sizeof(*priv));
2664     if (!dev)
2665         return -ENOMEM;
2666     priv = netdev_priv(dev);
2667 
2668     /* initialize default and fetch platform data */
2669     priv->enet_is_sw = true;
2670     priv->irq_rx = irq_rx;
2671     priv->irq_tx = irq_tx;
2672     priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2673     priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2674     priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2675     priv->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN;
2676 
2677     pd = dev_get_platdata(&pdev->dev);
2678     if (pd) {
2679         eth_hw_addr_set(dev, pd->mac_addr);
2680         memcpy(priv->used_ports, pd->used_ports,
2681                sizeof(pd->used_ports));
2682         priv->num_ports = pd->num_ports;
2683         priv->dma_has_sram = pd->dma_has_sram;
2684         priv->dma_chan_en_mask = pd->dma_chan_en_mask;
2685         priv->dma_chan_int_mask = pd->dma_chan_int_mask;
2686         priv->dma_chan_width = pd->dma_chan_width;
2687     }
2688 
2689     ret = bcm_enet_change_mtu(dev, dev->mtu);
2690     if (ret)
2691         goto out;
2692 
2693     priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
2694     if (IS_ERR(priv->base)) {
2695         ret = PTR_ERR(priv->base);
2696         goto out;
2697     }
2698 
2699     priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw");
2700     if (IS_ERR(priv->mac_clk)) {
2701         ret = PTR_ERR(priv->mac_clk);
2702         goto out;
2703     }
2704     ret = clk_prepare_enable(priv->mac_clk);
2705     if (ret)
2706         goto out;
2707 
2708     priv->rx_chan = 0;
2709     priv->tx_chan = 1;
2710     spin_lock_init(&priv->rx_lock);
2711 
2712     /* init rx timeout (used for oom) */
2713     timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
2714 
2715     /* register netdevice */
2716     dev->netdev_ops = &bcm_enetsw_ops;
2717     netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16);
2718     dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
2719     SET_NETDEV_DEV(dev, &pdev->dev);
2720 
2721     spin_lock_init(&priv->enetsw_mdio_lock);
2722 
2723     ret = register_netdev(dev);
2724     if (ret)
2725         goto out_disable_clk;
2726 
2727     netif_carrier_off(dev);
2728     platform_set_drvdata(pdev, dev);
2729     priv->pdev = pdev;
2730     priv->net_dev = dev;
2731 
2732     return 0;
2733 
2734 out_disable_clk:
2735     clk_disable_unprepare(priv->mac_clk);
2736 out:
2737     free_netdev(dev);
2738     return ret;
2739 }
2740 
2741 
2742 /* exit func, stops hardware and unregisters netdevice */
2743 static int bcm_enetsw_remove(struct platform_device *pdev)
2744 {
2745     struct bcm_enet_priv *priv;
2746     struct net_device *dev;
2747 
2748     /* stop netdevice */
2749     dev = platform_get_drvdata(pdev);
2750     priv = netdev_priv(dev);
2751     unregister_netdev(dev);
2752 
2753     clk_disable_unprepare(priv->mac_clk);
2754 
2755     free_netdev(dev);
2756     return 0;
2757 }
2758 
2759 static struct platform_driver bcm63xx_enetsw_driver = {
2760     .probe  = bcm_enetsw_probe,
2761     .remove = bcm_enetsw_remove,
2762     .driver = {
2763         .name   = "bcm63xx_enetsw",
2764         .owner  = THIS_MODULE,
2765     },
2766 };
2767 
2768 /* reserve & remap memory space shared between all macs */
2769 static int bcm_enet_shared_probe(struct platform_device *pdev)
2770 {
2771     void __iomem *p[3];
2772     unsigned int i;
2773 
2774     memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
2775 
2776     for (i = 0; i < 3; i++) {
2777         p[i] = devm_platform_ioremap_resource(pdev, i);
2778         if (IS_ERR(p[i]))
2779             return PTR_ERR(p[i]);
2780     }
2781 
2782     memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
2783 
2784     return 0;
2785 }
2786 
2787 static int bcm_enet_shared_remove(struct platform_device *pdev)
2788 {
2789     return 0;
2790 }
2791 
2792 /* this "shared" driver is needed because both macs share a single
2793  * address space
2794  */
2795 struct platform_driver bcm63xx_enet_shared_driver = {
2796     .probe  = bcm_enet_shared_probe,
2797     .remove = bcm_enet_shared_remove,
2798     .driver = {
2799         .name   = "bcm63xx_enet_shared",
2800         .owner  = THIS_MODULE,
2801     },
2802 };
2803 
2804 static struct platform_driver * const drivers[] = {
2805     &bcm63xx_enet_shared_driver,
2806     &bcm63xx_enet_driver,
2807     &bcm63xx_enetsw_driver,
2808 };
2809 
2810 /* entry point */
2811 static int __init bcm_enet_init(void)
2812 {
2813     return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2814 }
2815 
2816 static void __exit bcm_enet_exit(void)
2817 {
2818     platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2819 }
2820 
2821 
2822 module_init(bcm_enet_init);
2823 module_exit(bcm_enet_exit);
2824 
2825 MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2826 MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
2827 MODULE_LICENSE("GPL");