Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * linux/drivers/net/ethernet/ethoc.c
0004  *
0005  * Copyright (C) 2007-2008 Avionic Design Development GmbH
0006  * Copyright (C) 2008-2009 Avionic Design GmbH
0007  *
0008  * Written by Thierry Reding <thierry.reding@avionic-design.de>
0009  */
0010 
0011 #include <linux/dma-mapping.h>
0012 #include <linux/etherdevice.h>
0013 #include <linux/clk.h>
0014 #include <linux/crc32.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/io.h>
0017 #include <linux/mii.h>
0018 #include <linux/phy.h>
0019 #include <linux/platform_device.h>
0020 #include <linux/sched.h>
0021 #include <linux/slab.h>
0022 #include <linux/of.h>
0023 #include <linux/of_net.h>
0024 #include <linux/module.h>
0025 #include <net/ethoc.h>
0026 
0027 static int buffer_size = 0x8000; /* 32 KBytes */
0028 module_param(buffer_size, int, 0);
0029 MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
0030 
0031 /* register offsets */
0032 #define MODER       0x00
0033 #define INT_SOURCE  0x04
0034 #define INT_MASK    0x08
0035 #define IPGT        0x0c
0036 #define IPGR1       0x10
0037 #define IPGR2       0x14
0038 #define PACKETLEN   0x18
0039 #define COLLCONF    0x1c
0040 #define TX_BD_NUM   0x20
0041 #define CTRLMODER   0x24
0042 #define MIIMODER    0x28
0043 #define MIICOMMAND  0x2c
0044 #define MIIADDRESS  0x30
0045 #define MIITX_DATA  0x34
0046 #define MIIRX_DATA  0x38
0047 #define MIISTATUS   0x3c
0048 #define MAC_ADDR0   0x40
0049 #define MAC_ADDR1   0x44
0050 #define ETH_HASH0   0x48
0051 #define ETH_HASH1   0x4c
0052 #define ETH_TXCTRL  0x50
0053 #define ETH_END     0x54
0054 
0055 /* mode register */
0056 #define MODER_RXEN  (1 <<  0) /* receive enable */
0057 #define MODER_TXEN  (1 <<  1) /* transmit enable */
0058 #define MODER_NOPRE (1 <<  2) /* no preamble */
0059 #define MODER_BRO   (1 <<  3) /* broadcast address */
0060 #define MODER_IAM   (1 <<  4) /* individual address mode */
0061 #define MODER_PRO   (1 <<  5) /* promiscuous mode */
0062 #define MODER_IFG   (1 <<  6) /* interframe gap for incoming frames */
0063 #define MODER_LOOP  (1 <<  7) /* loopback */
0064 #define MODER_NBO   (1 <<  8) /* no back-off */
0065 #define MODER_EDE   (1 <<  9) /* excess defer enable */
0066 #define MODER_FULLD (1 << 10) /* full duplex */
0067 #define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */
0068 #define MODER_DCRC  (1 << 12) /* delayed CRC enable */
0069 #define MODER_CRC   (1 << 13) /* CRC enable */
0070 #define MODER_HUGE  (1 << 14) /* huge packets enable */
0071 #define MODER_PAD   (1 << 15) /* padding enabled */
0072 #define MODER_RSM   (1 << 16) /* receive small packets */
0073 
0074 /* interrupt source and mask registers */
0075 #define INT_MASK_TXF    (1 << 0) /* transmit frame */
0076 #define INT_MASK_TXE    (1 << 1) /* transmit error */
0077 #define INT_MASK_RXF    (1 << 2) /* receive frame */
0078 #define INT_MASK_RXE    (1 << 3) /* receive error */
0079 #define INT_MASK_BUSY   (1 << 4)
0080 #define INT_MASK_TXC    (1 << 5) /* transmit control frame */
0081 #define INT_MASK_RXC    (1 << 6) /* receive control frame */
0082 
0083 #define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE)
0084 #define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE)
0085 
0086 #define INT_MASK_ALL ( \
0087         INT_MASK_TXF | INT_MASK_TXE | \
0088         INT_MASK_RXF | INT_MASK_RXE | \
0089         INT_MASK_TXC | INT_MASK_RXC | \
0090         INT_MASK_BUSY \
0091     )
0092 
0093 /* packet length register */
0094 #define PACKETLEN_MIN(min)      (((min) & 0xffff) << 16)
0095 #define PACKETLEN_MAX(max)      (((max) & 0xffff) <<  0)
0096 #define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \
0097                     PACKETLEN_MAX(max))
0098 
0099 /* transmit buffer number register */
0100 #define TX_BD_NUM_VAL(x)    (((x) <= 0x80) ? (x) : 0x80)
0101 
0102 /* control module mode register */
0103 #define CTRLMODER_PASSALL   (1 << 0) /* pass all receive frames */
0104 #define CTRLMODER_RXFLOW    (1 << 1) /* receive control flow */
0105 #define CTRLMODER_TXFLOW    (1 << 2) /* transmit control flow */
0106 
0107 /* MII mode register */
0108 #define MIIMODER_CLKDIV(x)  ((x) & 0xfe) /* needs to be an even number */
0109 #define MIIMODER_NOPRE      (1 << 8) /* no preamble */
0110 
0111 /* MII command register */
0112 #define MIICOMMAND_SCAN     (1 << 0) /* scan status */
0113 #define MIICOMMAND_READ     (1 << 1) /* read status */
0114 #define MIICOMMAND_WRITE    (1 << 2) /* write control data */
0115 
0116 /* MII address register */
0117 #define MIIADDRESS_FIAD(x)      (((x) & 0x1f) << 0)
0118 #define MIIADDRESS_RGAD(x)      (((x) & 0x1f) << 8)
0119 #define MIIADDRESS_ADDR(phy, reg)   (MIIADDRESS_FIAD(phy) | \
0120                     MIIADDRESS_RGAD(reg))
0121 
0122 /* MII transmit data register */
0123 #define MIITX_DATA_VAL(x)   ((x) & 0xffff)
0124 
0125 /* MII receive data register */
0126 #define MIIRX_DATA_VAL(x)   ((x) & 0xffff)
0127 
0128 /* MII status register */
0129 #define MIISTATUS_LINKFAIL  (1 << 0)
0130 #define MIISTATUS_BUSY      (1 << 1)
0131 #define MIISTATUS_INVALID   (1 << 2)
0132 
0133 /* TX buffer descriptor */
0134 #define TX_BD_CS        (1 <<  0) /* carrier sense lost */
0135 #define TX_BD_DF        (1 <<  1) /* defer indication */
0136 #define TX_BD_LC        (1 <<  2) /* late collision */
0137 #define TX_BD_RL        (1 <<  3) /* retransmission limit */
0138 #define TX_BD_RETRY_MASK    (0x00f0)
0139 #define TX_BD_RETRY(x)      (((x) & 0x00f0) >>  4)
0140 #define TX_BD_UR        (1 <<  8) /* transmitter underrun */
0141 #define TX_BD_CRC       (1 << 11) /* TX CRC enable */
0142 #define TX_BD_PAD       (1 << 12) /* pad enable for short packets */
0143 #define TX_BD_WRAP      (1 << 13)
0144 #define TX_BD_IRQ       (1 << 14) /* interrupt request enable */
0145 #define TX_BD_READY     (1 << 15) /* TX buffer ready */
0146 #define TX_BD_LEN(x)        (((x) & 0xffff) << 16)
0147 #define TX_BD_LEN_MASK      (0xffff << 16)
0148 
0149 #define TX_BD_STATS     (TX_BD_CS | TX_BD_DF | TX_BD_LC | \
0150                 TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR)
0151 
0152 /* RX buffer descriptor */
0153 #define RX_BD_LC    (1 <<  0) /* late collision */
0154 #define RX_BD_CRC   (1 <<  1) /* RX CRC error */
0155 #define RX_BD_SF    (1 <<  2) /* short frame */
0156 #define RX_BD_TL    (1 <<  3) /* too long */
0157 #define RX_BD_DN    (1 <<  4) /* dribble nibble */
0158 #define RX_BD_IS    (1 <<  5) /* invalid symbol */
0159 #define RX_BD_OR    (1 <<  6) /* receiver overrun */
0160 #define RX_BD_MISS  (1 <<  7)
0161 #define RX_BD_CF    (1 <<  8) /* control frame */
0162 #define RX_BD_WRAP  (1 << 13)
0163 #define RX_BD_IRQ   (1 << 14) /* interrupt request enable */
0164 #define RX_BD_EMPTY (1 << 15)
0165 #define RX_BD_LEN(x)    (((x) & 0xffff) << 16)
0166 
0167 #define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \
0168             RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS)
0169 
0170 #define ETHOC_BUFSIZ        1536
0171 #define ETHOC_ZLEN      64
0172 #define ETHOC_BD_BASE       0x400
0173 #define ETHOC_TIMEOUT       (HZ / 2)
0174 #define ETHOC_MII_TIMEOUT   (1 + (HZ / 5))
0175 
0176 /**
0177  * struct ethoc - driver-private device structure
0178  * @iobase: pointer to I/O memory region
0179  * @membase:    pointer to buffer memory region
0180  * @big_endian: just big or little (endian)
0181  * @num_bd: number of buffer descriptors
0182  * @num_tx: number of send buffers
0183  * @cur_tx: last send buffer written
0184  * @dty_tx: last buffer actually sent
0185  * @num_rx: number of receive buffers
0186  * @cur_rx: current receive buffer
0187  * @vma:        pointer to array of virtual memory addresses for buffers
0188  * @netdev: pointer to network device structure
0189  * @napi:   NAPI structure
0190  * @msg_enable: device state flags
0191  * @lock:   device lock
0192  * @mdio:   MDIO bus for PHY access
0193  * @clk:    clock
0194  * @phy_id: address of attached PHY
0195  * @old_link:   previous link info
0196  * @old_duplex: previous duplex info
0197  */
0198 struct ethoc {
0199     void __iomem *iobase;
0200     void __iomem *membase;
0201     bool big_endian;
0202 
0203     unsigned int num_bd;
0204     unsigned int num_tx;
0205     unsigned int cur_tx;
0206     unsigned int dty_tx;
0207 
0208     unsigned int num_rx;
0209     unsigned int cur_rx;
0210 
0211     void **vma;
0212 
0213     struct net_device *netdev;
0214     struct napi_struct napi;
0215     u32 msg_enable;
0216 
0217     spinlock_t lock;
0218 
0219     struct mii_bus *mdio;
0220     struct clk *clk;
0221     s8 phy_id;
0222 
0223     int old_link;
0224     int old_duplex;
0225 };
0226 
0227 /**
0228  * struct ethoc_bd - buffer descriptor
0229  * @stat:   buffer statistics
0230  * @addr:   physical memory address
0231  */
0232 struct ethoc_bd {
0233     u32 stat;
0234     u32 addr;
0235 };
0236 
0237 static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
0238 {
0239     if (dev->big_endian)
0240         return ioread32be(dev->iobase + offset);
0241     else
0242         return ioread32(dev->iobase + offset);
0243 }
0244 
0245 static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
0246 {
0247     if (dev->big_endian)
0248         iowrite32be(data, dev->iobase + offset);
0249     else
0250         iowrite32(data, dev->iobase + offset);
0251 }
0252 
0253 static inline void ethoc_read_bd(struct ethoc *dev, int index,
0254         struct ethoc_bd *bd)
0255 {
0256     loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
0257     bd->stat = ethoc_read(dev, offset + 0);
0258     bd->addr = ethoc_read(dev, offset + 4);
0259 }
0260 
0261 static inline void ethoc_write_bd(struct ethoc *dev, int index,
0262         const struct ethoc_bd *bd)
0263 {
0264     loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
0265     ethoc_write(dev, offset + 0, bd->stat);
0266     ethoc_write(dev, offset + 4, bd->addr);
0267 }
0268 
0269 static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
0270 {
0271     u32 imask = ethoc_read(dev, INT_MASK);
0272     imask |= mask;
0273     ethoc_write(dev, INT_MASK, imask);
0274 }
0275 
0276 static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
0277 {
0278     u32 imask = ethoc_read(dev, INT_MASK);
0279     imask &= ~mask;
0280     ethoc_write(dev, INT_MASK, imask);
0281 }
0282 
0283 static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
0284 {
0285     ethoc_write(dev, INT_SOURCE, mask);
0286 }
0287 
0288 static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
0289 {
0290     u32 mode = ethoc_read(dev, MODER);
0291     mode |= MODER_RXEN | MODER_TXEN;
0292     ethoc_write(dev, MODER, mode);
0293 }
0294 
0295 static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
0296 {
0297     u32 mode = ethoc_read(dev, MODER);
0298     mode &= ~(MODER_RXEN | MODER_TXEN);
0299     ethoc_write(dev, MODER, mode);
0300 }
0301 
0302 static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start)
0303 {
0304     struct ethoc_bd bd;
0305     int i;
0306     void *vma;
0307 
0308     dev->cur_tx = 0;
0309     dev->dty_tx = 0;
0310     dev->cur_rx = 0;
0311 
0312     ethoc_write(dev, TX_BD_NUM, dev->num_tx);
0313 
0314     /* setup transmission buffers */
0315     bd.addr = mem_start;
0316     bd.stat = TX_BD_IRQ | TX_BD_CRC;
0317     vma = dev->membase;
0318 
0319     for (i = 0; i < dev->num_tx; i++) {
0320         if (i == dev->num_tx - 1)
0321             bd.stat |= TX_BD_WRAP;
0322 
0323         ethoc_write_bd(dev, i, &bd);
0324         bd.addr += ETHOC_BUFSIZ;
0325 
0326         dev->vma[i] = vma;
0327         vma += ETHOC_BUFSIZ;
0328     }
0329 
0330     bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
0331 
0332     for (i = 0; i < dev->num_rx; i++) {
0333         if (i == dev->num_rx - 1)
0334             bd.stat |= RX_BD_WRAP;
0335 
0336         ethoc_write_bd(dev, dev->num_tx + i, &bd);
0337         bd.addr += ETHOC_BUFSIZ;
0338 
0339         dev->vma[dev->num_tx + i] = vma;
0340         vma += ETHOC_BUFSIZ;
0341     }
0342 
0343     return 0;
0344 }
0345 
0346 static int ethoc_reset(struct ethoc *dev)
0347 {
0348     u32 mode;
0349 
0350     /* TODO: reset controller? */
0351 
0352     ethoc_disable_rx_and_tx(dev);
0353 
0354     /* TODO: setup registers */
0355 
0356     /* enable FCS generation and automatic padding */
0357     mode = ethoc_read(dev, MODER);
0358     mode |= MODER_CRC | MODER_PAD;
0359     ethoc_write(dev, MODER, mode);
0360 
0361     /* set full-duplex mode */
0362     mode = ethoc_read(dev, MODER);
0363     mode |= MODER_FULLD;
0364     ethoc_write(dev, MODER, mode);
0365     ethoc_write(dev, IPGT, 0x15);
0366 
0367     ethoc_ack_irq(dev, INT_MASK_ALL);
0368     ethoc_enable_irq(dev, INT_MASK_ALL);
0369     ethoc_enable_rx_and_tx(dev);
0370     return 0;
0371 }
0372 
0373 static unsigned int ethoc_update_rx_stats(struct ethoc *dev,
0374         struct ethoc_bd *bd)
0375 {
0376     struct net_device *netdev = dev->netdev;
0377     unsigned int ret = 0;
0378 
0379     if (bd->stat & RX_BD_TL) {
0380         dev_err(&netdev->dev, "RX: frame too long\n");
0381         netdev->stats.rx_length_errors++;
0382         ret++;
0383     }
0384 
0385     if (bd->stat & RX_BD_SF) {
0386         dev_err(&netdev->dev, "RX: frame too short\n");
0387         netdev->stats.rx_length_errors++;
0388         ret++;
0389     }
0390 
0391     if (bd->stat & RX_BD_DN) {
0392         dev_err(&netdev->dev, "RX: dribble nibble\n");
0393         netdev->stats.rx_frame_errors++;
0394     }
0395 
0396     if (bd->stat & RX_BD_CRC) {
0397         dev_err(&netdev->dev, "RX: wrong CRC\n");
0398         netdev->stats.rx_crc_errors++;
0399         ret++;
0400     }
0401 
0402     if (bd->stat & RX_BD_OR) {
0403         dev_err(&netdev->dev, "RX: overrun\n");
0404         netdev->stats.rx_over_errors++;
0405         ret++;
0406     }
0407 
0408     if (bd->stat & RX_BD_MISS)
0409         netdev->stats.rx_missed_errors++;
0410 
0411     if (bd->stat & RX_BD_LC) {
0412         dev_err(&netdev->dev, "RX: late collision\n");
0413         netdev->stats.collisions++;
0414         ret++;
0415     }
0416 
0417     return ret;
0418 }
0419 
0420 static int ethoc_rx(struct net_device *dev, int limit)
0421 {
0422     struct ethoc *priv = netdev_priv(dev);
0423     int count;
0424 
0425     for (count = 0; count < limit; ++count) {
0426         unsigned int entry;
0427         struct ethoc_bd bd;
0428 
0429         entry = priv->num_tx + priv->cur_rx;
0430         ethoc_read_bd(priv, entry, &bd);
0431         if (bd.stat & RX_BD_EMPTY) {
0432             ethoc_ack_irq(priv, INT_MASK_RX);
0433             /* If packet (interrupt) came in between checking
0434              * BD_EMTPY and clearing the interrupt source, then we
0435              * risk missing the packet as the RX interrupt won't
0436              * trigger right away when we reenable it; hence, check
0437              * BD_EMTPY here again to make sure there isn't such a
0438              * packet waiting for us...
0439              */
0440             ethoc_read_bd(priv, entry, &bd);
0441             if (bd.stat & RX_BD_EMPTY)
0442                 break;
0443         }
0444 
0445         if (ethoc_update_rx_stats(priv, &bd) == 0) {
0446             int size = bd.stat >> 16;
0447             struct sk_buff *skb;
0448 
0449             size -= 4; /* strip the CRC */
0450             skb = netdev_alloc_skb_ip_align(dev, size);
0451 
0452             if (likely(skb)) {
0453                 void *src = priv->vma[entry];
0454                 memcpy_fromio(skb_put(skb, size), src, size);
0455                 skb->protocol = eth_type_trans(skb, dev);
0456                 dev->stats.rx_packets++;
0457                 dev->stats.rx_bytes += size;
0458                 netif_receive_skb(skb);
0459             } else {
0460                 if (net_ratelimit())
0461                     dev_warn(&dev->dev,
0462                         "low on memory - packet dropped\n");
0463 
0464                 dev->stats.rx_dropped++;
0465                 break;
0466             }
0467         }
0468 
0469         /* clear the buffer descriptor so it can be reused */
0470         bd.stat &= ~RX_BD_STATS;
0471         bd.stat |=  RX_BD_EMPTY;
0472         ethoc_write_bd(priv, entry, &bd);
0473         if (++priv->cur_rx == priv->num_rx)
0474             priv->cur_rx = 0;
0475     }
0476 
0477     return count;
0478 }
0479 
0480 static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
0481 {
0482     struct net_device *netdev = dev->netdev;
0483 
0484     if (bd->stat & TX_BD_LC) {
0485         dev_err(&netdev->dev, "TX: late collision\n");
0486         netdev->stats.tx_window_errors++;
0487     }
0488 
0489     if (bd->stat & TX_BD_RL) {
0490         dev_err(&netdev->dev, "TX: retransmit limit\n");
0491         netdev->stats.tx_aborted_errors++;
0492     }
0493 
0494     if (bd->stat & TX_BD_UR) {
0495         dev_err(&netdev->dev, "TX: underrun\n");
0496         netdev->stats.tx_fifo_errors++;
0497     }
0498 
0499     if (bd->stat & TX_BD_CS) {
0500         dev_err(&netdev->dev, "TX: carrier sense lost\n");
0501         netdev->stats.tx_carrier_errors++;
0502     }
0503 
0504     if (bd->stat & TX_BD_STATS)
0505         netdev->stats.tx_errors++;
0506 
0507     netdev->stats.collisions += (bd->stat >> 4) & 0xf;
0508     netdev->stats.tx_bytes += bd->stat >> 16;
0509     netdev->stats.tx_packets++;
0510 }
0511 
0512 static int ethoc_tx(struct net_device *dev, int limit)
0513 {
0514     struct ethoc *priv = netdev_priv(dev);
0515     int count;
0516     struct ethoc_bd bd;
0517 
0518     for (count = 0; count < limit; ++count) {
0519         unsigned int entry;
0520 
0521         entry = priv->dty_tx & (priv->num_tx-1);
0522 
0523         ethoc_read_bd(priv, entry, &bd);
0524 
0525         if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) {
0526             ethoc_ack_irq(priv, INT_MASK_TX);
0527             /* If interrupt came in between reading in the BD
0528              * and clearing the interrupt source, then we risk
0529              * missing the event as the TX interrupt won't trigger
0530              * right away when we reenable it; hence, check
0531              * BD_EMPTY here again to make sure there isn't such an
0532              * event pending...
0533              */
0534             ethoc_read_bd(priv, entry, &bd);
0535             if (bd.stat & TX_BD_READY ||
0536                 (priv->dty_tx == priv->cur_tx))
0537                 break;
0538         }
0539 
0540         ethoc_update_tx_stats(priv, &bd);
0541         priv->dty_tx++;
0542     }
0543 
0544     if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
0545         netif_wake_queue(dev);
0546 
0547     return count;
0548 }
0549 
0550 static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
0551 {
0552     struct net_device *dev = dev_id;
0553     struct ethoc *priv = netdev_priv(dev);
0554     u32 pending;
0555     u32 mask;
0556 
0557     /* Figure out what triggered the interrupt...
0558      * The tricky bit here is that the interrupt source bits get
0559      * set in INT_SOURCE for an event regardless of whether that
0560      * event is masked or not.  Thus, in order to figure out what
0561      * triggered the interrupt, we need to remove the sources
0562      * for all events that are currently masked.  This behaviour
0563      * is not particularly well documented but reasonable...
0564      */
0565     mask = ethoc_read(priv, INT_MASK);
0566     pending = ethoc_read(priv, INT_SOURCE);
0567     pending &= mask;
0568 
0569     if (unlikely(pending == 0))
0570         return IRQ_NONE;
0571 
0572     ethoc_ack_irq(priv, pending);
0573 
0574     /* We always handle the dropped packet interrupt */
0575     if (pending & INT_MASK_BUSY) {
0576         dev_dbg(&dev->dev, "packet dropped\n");
0577         dev->stats.rx_dropped++;
0578     }
0579 
0580     /* Handle receive/transmit event by switching to polling */
0581     if (pending & (INT_MASK_TX | INT_MASK_RX)) {
0582         ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
0583         napi_schedule(&priv->napi);
0584     }
0585 
0586     return IRQ_HANDLED;
0587 }
0588 
0589 static int ethoc_get_mac_address(struct net_device *dev, void *addr)
0590 {
0591     struct ethoc *priv = netdev_priv(dev);
0592     u8 *mac = (u8 *)addr;
0593     u32 reg;
0594 
0595     reg = ethoc_read(priv, MAC_ADDR0);
0596     mac[2] = (reg >> 24) & 0xff;
0597     mac[3] = (reg >> 16) & 0xff;
0598     mac[4] = (reg >>  8) & 0xff;
0599     mac[5] = (reg >>  0) & 0xff;
0600 
0601     reg = ethoc_read(priv, MAC_ADDR1);
0602     mac[0] = (reg >>  8) & 0xff;
0603     mac[1] = (reg >>  0) & 0xff;
0604 
0605     return 0;
0606 }
0607 
0608 static int ethoc_poll(struct napi_struct *napi, int budget)
0609 {
0610     struct ethoc *priv = container_of(napi, struct ethoc, napi);
0611     int rx_work_done = 0;
0612     int tx_work_done = 0;
0613 
0614     rx_work_done = ethoc_rx(priv->netdev, budget);
0615     tx_work_done = ethoc_tx(priv->netdev, budget);
0616 
0617     if (rx_work_done < budget && tx_work_done < budget) {
0618         napi_complete_done(napi, rx_work_done);
0619         ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
0620     }
0621 
0622     return rx_work_done;
0623 }
0624 
0625 static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
0626 {
0627     struct ethoc *priv = bus->priv;
0628     int i;
0629 
0630     ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
0631     ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
0632 
0633     for (i = 0; i < 5; i++) {
0634         u32 status = ethoc_read(priv, MIISTATUS);
0635         if (!(status & MIISTATUS_BUSY)) {
0636             u32 data = ethoc_read(priv, MIIRX_DATA);
0637             /* reset MII command register */
0638             ethoc_write(priv, MIICOMMAND, 0);
0639             return data;
0640         }
0641         usleep_range(100, 200);
0642     }
0643 
0644     return -EBUSY;
0645 }
0646 
0647 static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
0648 {
0649     struct ethoc *priv = bus->priv;
0650     int i;
0651 
0652     ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
0653     ethoc_write(priv, MIITX_DATA, val);
0654     ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
0655 
0656     for (i = 0; i < 5; i++) {
0657         u32 stat = ethoc_read(priv, MIISTATUS);
0658         if (!(stat & MIISTATUS_BUSY)) {
0659             /* reset MII command register */
0660             ethoc_write(priv, MIICOMMAND, 0);
0661             return 0;
0662         }
0663         usleep_range(100, 200);
0664     }
0665 
0666     return -EBUSY;
0667 }
0668 
0669 static void ethoc_mdio_poll(struct net_device *dev)
0670 {
0671     struct ethoc *priv = netdev_priv(dev);
0672     struct phy_device *phydev = dev->phydev;
0673     bool changed = false;
0674     u32 mode;
0675 
0676     if (priv->old_link != phydev->link) {
0677         changed = true;
0678         priv->old_link = phydev->link;
0679     }
0680 
0681     if (priv->old_duplex != phydev->duplex) {
0682         changed = true;
0683         priv->old_duplex = phydev->duplex;
0684     }
0685 
0686     if (!changed)
0687         return;
0688 
0689     mode = ethoc_read(priv, MODER);
0690     if (phydev->duplex == DUPLEX_FULL)
0691         mode |= MODER_FULLD;
0692     else
0693         mode &= ~MODER_FULLD;
0694     ethoc_write(priv, MODER, mode);
0695 
0696     phy_print_status(phydev);
0697 }
0698 
0699 static int ethoc_mdio_probe(struct net_device *dev)
0700 {
0701     struct ethoc *priv = netdev_priv(dev);
0702     struct phy_device *phy;
0703     int err;
0704 
0705     if (priv->phy_id != -1)
0706         phy = mdiobus_get_phy(priv->mdio, priv->phy_id);
0707     else
0708         phy = phy_find_first(priv->mdio);
0709 
0710     if (!phy)
0711         return dev_err_probe(&dev->dev, -ENXIO, "no PHY found\n");
0712 
0713     priv->old_duplex = -1;
0714     priv->old_link = -1;
0715 
0716     err = phy_connect_direct(dev, phy, ethoc_mdio_poll,
0717                  PHY_INTERFACE_MODE_GMII);
0718     if (err)
0719         return dev_err_probe(&dev->dev, err, "could not attach to PHY\n");
0720 
0721     phy_set_max_speed(phy, SPEED_100);
0722 
0723     return 0;
0724 }
0725 
0726 static int ethoc_open(struct net_device *dev)
0727 {
0728     struct ethoc *priv = netdev_priv(dev);
0729     int ret;
0730 
0731     ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
0732             dev->name, dev);
0733     if (ret)
0734         return ret;
0735 
0736     napi_enable(&priv->napi);
0737 
0738     ethoc_init_ring(priv, dev->mem_start);
0739     ethoc_reset(priv);
0740 
0741     if (netif_queue_stopped(dev)) {
0742         dev_dbg(&dev->dev, " resuming queue\n");
0743         netif_wake_queue(dev);
0744     } else {
0745         dev_dbg(&dev->dev, " starting queue\n");
0746         netif_start_queue(dev);
0747     }
0748 
0749     priv->old_link = -1;
0750     priv->old_duplex = -1;
0751 
0752     phy_start(dev->phydev);
0753 
0754     if (netif_msg_ifup(priv)) {
0755         dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
0756                 dev->base_addr, dev->mem_start, dev->mem_end);
0757     }
0758 
0759     return 0;
0760 }
0761 
0762 static int ethoc_stop(struct net_device *dev)
0763 {
0764     struct ethoc *priv = netdev_priv(dev);
0765 
0766     napi_disable(&priv->napi);
0767 
0768     if (dev->phydev)
0769         phy_stop(dev->phydev);
0770 
0771     ethoc_disable_rx_and_tx(priv);
0772     free_irq(dev->irq, dev);
0773 
0774     if (!netif_queue_stopped(dev))
0775         netif_stop_queue(dev);
0776 
0777     return 0;
0778 }
0779 
0780 static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
0781 {
0782     struct ethoc *priv = netdev_priv(dev);
0783     struct mii_ioctl_data *mdio = if_mii(ifr);
0784     struct phy_device *phy = NULL;
0785 
0786     if (!netif_running(dev))
0787         return -EINVAL;
0788 
0789     if (cmd != SIOCGMIIPHY) {
0790         if (mdio->phy_id >= PHY_MAX_ADDR)
0791             return -ERANGE;
0792 
0793         phy = mdiobus_get_phy(priv->mdio, mdio->phy_id);
0794         if (!phy)
0795             return -ENODEV;
0796     } else {
0797         phy = dev->phydev;
0798     }
0799 
0800     return phy_mii_ioctl(phy, ifr, cmd);
0801 }
0802 
0803 static void ethoc_do_set_mac_address(struct net_device *dev)
0804 {
0805     const unsigned char *mac = dev->dev_addr;
0806     struct ethoc *priv = netdev_priv(dev);
0807 
0808     ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
0809                      (mac[4] <<  8) | (mac[5] <<  0));
0810     ethoc_write(priv, MAC_ADDR1, (mac[0] <<  8) | (mac[1] <<  0));
0811 }
0812 
0813 static int ethoc_set_mac_address(struct net_device *dev, void *p)
0814 {
0815     const struct sockaddr *addr = p;
0816 
0817     if (!is_valid_ether_addr(addr->sa_data))
0818         return -EADDRNOTAVAIL;
0819     eth_hw_addr_set(dev, addr->sa_data);
0820     ethoc_do_set_mac_address(dev);
0821     return 0;
0822 }
0823 
0824 static void ethoc_set_multicast_list(struct net_device *dev)
0825 {
0826     struct ethoc *priv = netdev_priv(dev);
0827     u32 mode = ethoc_read(priv, MODER);
0828     struct netdev_hw_addr *ha;
0829     u32 hash[2] = { 0, 0 };
0830 
0831     /* set loopback mode if requested */
0832     if (dev->flags & IFF_LOOPBACK)
0833         mode |=  MODER_LOOP;
0834     else
0835         mode &= ~MODER_LOOP;
0836 
0837     /* receive broadcast frames if requested */
0838     if (dev->flags & IFF_BROADCAST)
0839         mode &= ~MODER_BRO;
0840     else
0841         mode |=  MODER_BRO;
0842 
0843     /* enable promiscuous mode if requested */
0844     if (dev->flags & IFF_PROMISC)
0845         mode |=  MODER_PRO;
0846     else
0847         mode &= ~MODER_PRO;
0848 
0849     ethoc_write(priv, MODER, mode);
0850 
0851     /* receive multicast frames */
0852     if (dev->flags & IFF_ALLMULTI) {
0853         hash[0] = 0xffffffff;
0854         hash[1] = 0xffffffff;
0855     } else {
0856         netdev_for_each_mc_addr(ha, dev) {
0857             u32 crc = ether_crc(ETH_ALEN, ha->addr);
0858             int bit = (crc >> 26) & 0x3f;
0859             hash[bit >> 5] |= 1 << (bit & 0x1f);
0860         }
0861     }
0862 
0863     ethoc_write(priv, ETH_HASH0, hash[0]);
0864     ethoc_write(priv, ETH_HASH1, hash[1]);
0865 }
0866 
0867 static int ethoc_change_mtu(struct net_device *dev, int new_mtu)
0868 {
0869     return -ENOSYS;
0870 }
0871 
0872 static void ethoc_tx_timeout(struct net_device *dev, unsigned int txqueue)
0873 {
0874     struct ethoc *priv = netdev_priv(dev);
0875     u32 pending = ethoc_read(priv, INT_SOURCE);
0876     if (likely(pending))
0877         ethoc_interrupt(dev->irq, dev);
0878 }
0879 
0880 static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
0881 {
0882     struct ethoc *priv = netdev_priv(dev);
0883     struct ethoc_bd bd;
0884     unsigned int entry;
0885     void *dest;
0886 
0887     if (skb_put_padto(skb, ETHOC_ZLEN)) {
0888         dev->stats.tx_errors++;
0889         goto out_no_free;
0890     }
0891 
0892     if (unlikely(skb->len > ETHOC_BUFSIZ)) {
0893         dev->stats.tx_errors++;
0894         goto out;
0895     }
0896 
0897     entry = priv->cur_tx % priv->num_tx;
0898     spin_lock_irq(&priv->lock);
0899     priv->cur_tx++;
0900 
0901     ethoc_read_bd(priv, entry, &bd);
0902     if (unlikely(skb->len < ETHOC_ZLEN))
0903         bd.stat |=  TX_BD_PAD;
0904     else
0905         bd.stat &= ~TX_BD_PAD;
0906 
0907     dest = priv->vma[entry];
0908     memcpy_toio(dest, skb->data, skb->len);
0909 
0910     bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
0911     bd.stat |= TX_BD_LEN(skb->len);
0912     ethoc_write_bd(priv, entry, &bd);
0913 
0914     bd.stat |= TX_BD_READY;
0915     ethoc_write_bd(priv, entry, &bd);
0916 
0917     if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) {
0918         dev_dbg(&dev->dev, "stopping queue\n");
0919         netif_stop_queue(dev);
0920     }
0921 
0922     spin_unlock_irq(&priv->lock);
0923     skb_tx_timestamp(skb);
0924 out:
0925     dev_kfree_skb(skb);
0926 out_no_free:
0927     return NETDEV_TX_OK;
0928 }
0929 
0930 static int ethoc_get_regs_len(struct net_device *netdev)
0931 {
0932     return ETH_END;
0933 }
0934 
0935 static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
0936                void *p)
0937 {
0938     struct ethoc *priv = netdev_priv(dev);
0939     u32 *regs_buff = p;
0940     unsigned i;
0941 
0942     regs->version = 0;
0943     for (i = 0; i < ETH_END / sizeof(u32); ++i)
0944         regs_buff[i] = ethoc_read(priv, i * sizeof(u32));
0945 }
0946 
0947 static void ethoc_get_ringparam(struct net_device *dev,
0948                 struct ethtool_ringparam *ring,
0949                 struct kernel_ethtool_ringparam *kernel_ring,
0950                 struct netlink_ext_ack *extack)
0951 {
0952     struct ethoc *priv = netdev_priv(dev);
0953 
0954     ring->rx_max_pending = priv->num_bd - 1;
0955     ring->rx_mini_max_pending = 0;
0956     ring->rx_jumbo_max_pending = 0;
0957     ring->tx_max_pending = priv->num_bd - 1;
0958 
0959     ring->rx_pending = priv->num_rx;
0960     ring->rx_mini_pending = 0;
0961     ring->rx_jumbo_pending = 0;
0962     ring->tx_pending = priv->num_tx;
0963 }
0964 
0965 static int ethoc_set_ringparam(struct net_device *dev,
0966                    struct ethtool_ringparam *ring,
0967                    struct kernel_ethtool_ringparam *kernel_ring,
0968                    struct netlink_ext_ack *extack)
0969 {
0970     struct ethoc *priv = netdev_priv(dev);
0971 
0972     if (ring->tx_pending < 1 || ring->rx_pending < 1 ||
0973         ring->tx_pending + ring->rx_pending > priv->num_bd)
0974         return -EINVAL;
0975     if (ring->rx_mini_pending || ring->rx_jumbo_pending)
0976         return -EINVAL;
0977 
0978     if (netif_running(dev)) {
0979         netif_tx_disable(dev);
0980         ethoc_disable_rx_and_tx(priv);
0981         ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
0982         synchronize_irq(dev->irq);
0983     }
0984 
0985     priv->num_tx = rounddown_pow_of_two(ring->tx_pending);
0986     priv->num_rx = ring->rx_pending;
0987     ethoc_init_ring(priv, dev->mem_start);
0988 
0989     if (netif_running(dev)) {
0990         ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
0991         ethoc_enable_rx_and_tx(priv);
0992         netif_wake_queue(dev);
0993     }
0994     return 0;
0995 }
0996 
0997 static const struct ethtool_ops ethoc_ethtool_ops = {
0998     .get_regs_len = ethoc_get_regs_len,
0999     .get_regs = ethoc_get_regs,
1000     .nway_reset = phy_ethtool_nway_reset,
1001     .get_link = ethtool_op_get_link,
1002     .get_ringparam = ethoc_get_ringparam,
1003     .set_ringparam = ethoc_set_ringparam,
1004     .get_ts_info = ethtool_op_get_ts_info,
1005     .get_link_ksettings = phy_ethtool_get_link_ksettings,
1006     .set_link_ksettings = phy_ethtool_set_link_ksettings,
1007 };
1008 
1009 static const struct net_device_ops ethoc_netdev_ops = {
1010     .ndo_open = ethoc_open,
1011     .ndo_stop = ethoc_stop,
1012     .ndo_eth_ioctl = ethoc_ioctl,
1013     .ndo_set_mac_address = ethoc_set_mac_address,
1014     .ndo_set_rx_mode = ethoc_set_multicast_list,
1015     .ndo_change_mtu = ethoc_change_mtu,
1016     .ndo_tx_timeout = ethoc_tx_timeout,
1017     .ndo_start_xmit = ethoc_start_xmit,
1018 };
1019 
1020 /**
1021  * ethoc_probe - initialize OpenCores ethernet MAC
1022  * @pdev:   platform device
1023  */
1024 static int ethoc_probe(struct platform_device *pdev)
1025 {
1026     struct net_device *netdev = NULL;
1027     struct resource *res = NULL;
1028     struct resource *mmio = NULL;
1029     struct resource *mem = NULL;
1030     struct ethoc *priv = NULL;
1031     int num_bd;
1032     int ret = 0;
1033     struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
1034     u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0;
1035 
1036     /* allocate networking device */
1037     netdev = alloc_etherdev(sizeof(struct ethoc));
1038     if (!netdev) {
1039         ret = -ENOMEM;
1040         goto out;
1041     }
1042 
1043     SET_NETDEV_DEV(netdev, &pdev->dev);
1044     platform_set_drvdata(pdev, netdev);
1045 
1046     /* obtain I/O memory space */
1047     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1048     if (!res) {
1049         dev_err(&pdev->dev, "cannot obtain I/O memory space\n");
1050         ret = -ENXIO;
1051         goto free;
1052     }
1053 
1054     mmio = devm_request_mem_region(&pdev->dev, res->start,
1055             resource_size(res), res->name);
1056     if (!mmio) {
1057         dev_err(&pdev->dev, "cannot request I/O memory space\n");
1058         ret = -ENXIO;
1059         goto free;
1060     }
1061 
1062     netdev->base_addr = mmio->start;
1063 
1064     /* obtain buffer memory space */
1065     res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1066     if (res) {
1067         mem = devm_request_mem_region(&pdev->dev, res->start,
1068             resource_size(res), res->name);
1069         if (!mem) {
1070             dev_err(&pdev->dev, "cannot request memory space\n");
1071             ret = -ENXIO;
1072             goto free;
1073         }
1074 
1075         netdev->mem_start = mem->start;
1076         netdev->mem_end   = mem->end;
1077     }
1078 
1079 
1080     /* obtain device IRQ number */
1081     ret = platform_get_irq(pdev, 0);
1082     if (ret < 0)
1083         goto free;
1084 
1085     netdev->irq = ret;
1086 
1087     /* setup driver-private data */
1088     priv = netdev_priv(netdev);
1089     priv->netdev = netdev;
1090 
1091     priv->iobase = devm_ioremap(&pdev->dev, netdev->base_addr,
1092             resource_size(mmio));
1093     if (!priv->iobase) {
1094         dev_err(&pdev->dev, "cannot remap I/O memory space\n");
1095         ret = -ENXIO;
1096         goto free;
1097     }
1098 
1099     if (netdev->mem_end) {
1100         priv->membase = devm_ioremap(&pdev->dev,
1101             netdev->mem_start, resource_size(mem));
1102         if (!priv->membase) {
1103             dev_err(&pdev->dev, "cannot remap memory space\n");
1104             ret = -ENXIO;
1105             goto free;
1106         }
1107     } else {
1108         /* Allocate buffer memory */
1109         priv->membase = dmam_alloc_coherent(&pdev->dev,
1110             buffer_size, (void *)&netdev->mem_start,
1111             GFP_KERNEL);
1112         if (!priv->membase) {
1113             dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
1114                 buffer_size);
1115             ret = -ENOMEM;
1116             goto free;
1117         }
1118         netdev->mem_end = netdev->mem_start + buffer_size;
1119     }
1120 
1121     priv->big_endian = pdata ? pdata->big_endian :
1122         of_device_is_big_endian(pdev->dev.of_node);
1123 
1124     /* calculate the number of TX/RX buffers, maximum 128 supported */
1125     num_bd = min_t(unsigned int,
1126         128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
1127     if (num_bd < 4) {
1128         ret = -ENODEV;
1129         goto free;
1130     }
1131     priv->num_bd = num_bd;
1132     /* num_tx must be a power of two */
1133     priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
1134     priv->num_rx = num_bd - priv->num_tx;
1135 
1136     dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
1137         priv->num_tx, priv->num_rx);
1138 
1139     priv->vma = devm_kcalloc(&pdev->dev, num_bd, sizeof(void *),
1140                  GFP_KERNEL);
1141     if (!priv->vma) {
1142         ret = -ENOMEM;
1143         goto free;
1144     }
1145 
1146     /* Allow the platform setup code to pass in a MAC address. */
1147     if (pdata) {
1148         eth_hw_addr_set(netdev, pdata->hwaddr);
1149         priv->phy_id = pdata->phy_id;
1150     } else {
1151         of_get_ethdev_address(pdev->dev.of_node, netdev);
1152         priv->phy_id = -1;
1153     }
1154 
1155     /* Check that the given MAC address is valid. If it isn't, read the
1156      * current MAC from the controller.
1157      */
1158     if (!is_valid_ether_addr(netdev->dev_addr)) {
1159         u8 addr[ETH_ALEN];
1160 
1161         ethoc_get_mac_address(netdev, addr);
1162         eth_hw_addr_set(netdev, addr);
1163     }
1164 
1165     /* Check the MAC again for validity, if it still isn't choose and
1166      * program a random one.
1167      */
1168     if (!is_valid_ether_addr(netdev->dev_addr))
1169         eth_hw_addr_random(netdev);
1170 
1171     ethoc_do_set_mac_address(netdev);
1172 
1173     /* Allow the platform setup code to adjust MII management bus clock. */
1174     if (!eth_clkfreq) {
1175         struct clk *clk = devm_clk_get(&pdev->dev, NULL);
1176 
1177         if (!IS_ERR(clk)) {
1178             priv->clk = clk;
1179             clk_prepare_enable(clk);
1180             eth_clkfreq = clk_get_rate(clk);
1181         }
1182     }
1183     if (eth_clkfreq) {
1184         u32 clkdiv = MIIMODER_CLKDIV(eth_clkfreq / 2500000 + 1);
1185 
1186         if (!clkdiv)
1187             clkdiv = 2;
1188         dev_dbg(&pdev->dev, "setting MII clkdiv to %u\n", clkdiv);
1189         ethoc_write(priv, MIIMODER,
1190                 (ethoc_read(priv, MIIMODER) & MIIMODER_NOPRE) |
1191                 clkdiv);
1192     }
1193 
1194     /* register MII bus */
1195     priv->mdio = mdiobus_alloc();
1196     if (!priv->mdio) {
1197         ret = -ENOMEM;
1198         goto free2;
1199     }
1200 
1201     priv->mdio->name = "ethoc-mdio";
1202     snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d",
1203             priv->mdio->name, pdev->id);
1204     priv->mdio->read = ethoc_mdio_read;
1205     priv->mdio->write = ethoc_mdio_write;
1206     priv->mdio->priv = priv;
1207 
1208     ret = mdiobus_register(priv->mdio);
1209     if (ret) {
1210         dev_err(&netdev->dev, "failed to register MDIO bus\n");
1211         goto free3;
1212     }
1213 
1214     ret = ethoc_mdio_probe(netdev);
1215     if (ret) {
1216         dev_err(&netdev->dev, "failed to probe MDIO bus\n");
1217         goto error;
1218     }
1219 
1220     /* setup the net_device structure */
1221     netdev->netdev_ops = &ethoc_netdev_ops;
1222     netdev->watchdog_timeo = ETHOC_TIMEOUT;
1223     netdev->features |= 0;
1224     netdev->ethtool_ops = &ethoc_ethtool_ops;
1225 
1226     /* setup NAPI */
1227     netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
1228 
1229     spin_lock_init(&priv->lock);
1230 
1231     ret = register_netdev(netdev);
1232     if (ret < 0) {
1233         dev_err(&netdev->dev, "failed to register interface\n");
1234         goto error2;
1235     }
1236 
1237     goto out;
1238 
1239 error2:
1240     netif_napi_del(&priv->napi);
1241 error:
1242     mdiobus_unregister(priv->mdio);
1243 free3:
1244     mdiobus_free(priv->mdio);
1245 free2:
1246     clk_disable_unprepare(priv->clk);
1247 free:
1248     free_netdev(netdev);
1249 out:
1250     return ret;
1251 }
1252 
1253 /**
1254  * ethoc_remove - shutdown OpenCores ethernet MAC
1255  * @pdev:   platform device
1256  */
1257 static int ethoc_remove(struct platform_device *pdev)
1258 {
1259     struct net_device *netdev = platform_get_drvdata(pdev);
1260     struct ethoc *priv = netdev_priv(netdev);
1261 
1262     if (netdev) {
1263         netif_napi_del(&priv->napi);
1264         phy_disconnect(netdev->phydev);
1265 
1266         if (priv->mdio) {
1267             mdiobus_unregister(priv->mdio);
1268             mdiobus_free(priv->mdio);
1269         }
1270         clk_disable_unprepare(priv->clk);
1271         unregister_netdev(netdev);
1272         free_netdev(netdev);
1273     }
1274 
1275     return 0;
1276 }
1277 
1278 #ifdef CONFIG_PM
1279 static int ethoc_suspend(struct platform_device *pdev, pm_message_t state)
1280 {
1281     return -ENOSYS;
1282 }
1283 
1284 static int ethoc_resume(struct platform_device *pdev)
1285 {
1286     return -ENOSYS;
1287 }
1288 #else
1289 # define ethoc_suspend NULL
1290 # define ethoc_resume  NULL
1291 #endif
1292 
1293 static const struct of_device_id ethoc_match[] = {
1294     { .compatible = "opencores,ethoc", },
1295     {},
1296 };
1297 MODULE_DEVICE_TABLE(of, ethoc_match);
1298 
1299 static struct platform_driver ethoc_driver = {
1300     .probe   = ethoc_probe,
1301     .remove  = ethoc_remove,
1302     .suspend = ethoc_suspend,
1303     .resume  = ethoc_resume,
1304     .driver  = {
1305         .name = "ethoc",
1306         .of_match_table = ethoc_match,
1307     },
1308 };
1309 
1310 module_platform_driver(ethoc_driver);
1311 
1312 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
1313 MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
1314 MODULE_LICENSE("GPL v2");
1315