Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Copyright (C) 2006, 2007 Eugene Konev
0004  *
0005  */
0006 
0007 #include <linux/module.h>
0008 #include <linux/interrupt.h>
0009 #include <linux/moduleparam.h>
0010 
0011 #include <linux/sched.h>
0012 #include <linux/kernel.h>
0013 #include <linux/slab.h>
0014 #include <linux/errno.h>
0015 #include <linux/types.h>
0016 #include <linux/delay.h>
0017 
0018 #include <linux/netdevice.h>
0019 #include <linux/if_vlan.h>
0020 #include <linux/etherdevice.h>
0021 #include <linux/ethtool.h>
0022 #include <linux/skbuff.h>
0023 #include <linux/mii.h>
0024 #include <linux/phy.h>
0025 #include <linux/phy_fixed.h>
0026 #include <linux/platform_device.h>
0027 #include <linux/dma-mapping.h>
0028 #include <linux/clk.h>
0029 #include <linux/gpio.h>
0030 #include <linux/atomic.h>
0031 
0032 #include <asm/mach-ar7/ar7.h>
0033 
0034 MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
0035 MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
0036 MODULE_LICENSE("GPL");
0037 MODULE_ALIAS("platform:cpmac");
0038 
0039 static int debug_level = 8;
0040 static int dumb_switch;
0041 
0042 /* Next 2 are only used in cpmac_probe, so it's pointless to change them */
0043 module_param(debug_level, int, 0444);
0044 module_param(dumb_switch, int, 0444);
0045 
0046 MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
0047 MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
0048 
0049 #define CPMAC_VERSION "0.5.2"
0050 /* frame size + 802.1q tag + FCS size */
0051 #define CPMAC_SKB_SIZE      (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
0052 #define CPMAC_QUEUES    8
0053 
0054 /* Ethernet registers */
0055 #define CPMAC_TX_CONTROL        0x0004
0056 #define CPMAC_TX_TEARDOWN       0x0008
0057 #define CPMAC_RX_CONTROL        0x0014
0058 #define CPMAC_RX_TEARDOWN       0x0018
0059 #define CPMAC_MBP           0x0100
0060 #define MBP_RXPASSCRC           0x40000000
0061 #define MBP_RXQOS           0x20000000
0062 #define MBP_RXNOCHAIN           0x10000000
0063 #define MBP_RXCMF           0x01000000
0064 #define MBP_RXSHORT         0x00800000
0065 #define MBP_RXCEF           0x00400000
0066 #define MBP_RXPROMISC           0x00200000
0067 #define MBP_PROMISCCHAN(channel)    (((channel) & 0x7) << 16)
0068 #define MBP_RXBCAST         0x00002000
0069 #define MBP_BCASTCHAN(channel)      (((channel) & 0x7) << 8)
0070 #define MBP_RXMCAST         0x00000020
0071 #define MBP_MCASTCHAN(channel)      ((channel) & 0x7)
0072 #define CPMAC_UNICAST_ENABLE        0x0104
0073 #define CPMAC_UNICAST_CLEAR     0x0108
0074 #define CPMAC_MAX_LENGTH        0x010c
0075 #define CPMAC_BUFFER_OFFSET     0x0110
0076 #define CPMAC_MAC_CONTROL       0x0160
0077 #define MAC_TXPTYPE         0x00000200
0078 #define MAC_TXPACE          0x00000040
0079 #define MAC_MII             0x00000020
0080 #define MAC_TXFLOW          0x00000010
0081 #define MAC_RXFLOW          0x00000008
0082 #define MAC_MTEST           0x00000004
0083 #define MAC_LOOPBACK            0x00000002
0084 #define MAC_FDX             0x00000001
0085 #define CPMAC_MAC_STATUS        0x0164
0086 #define MAC_STATUS_QOS          0x00000004
0087 #define MAC_STATUS_RXFLOW       0x00000002
0088 #define MAC_STATUS_TXFLOW       0x00000001
0089 #define CPMAC_TX_INT_ENABLE     0x0178
0090 #define CPMAC_TX_INT_CLEAR      0x017c
0091 #define CPMAC_MAC_INT_VECTOR        0x0180
0092 #define MAC_INT_STATUS          0x00080000
0093 #define MAC_INT_HOST            0x00040000
0094 #define MAC_INT_RX          0x00020000
0095 #define MAC_INT_TX          0x00010000
0096 #define CPMAC_MAC_EOI_VECTOR        0x0184
0097 #define CPMAC_RX_INT_ENABLE     0x0198
0098 #define CPMAC_RX_INT_CLEAR      0x019c
0099 #define CPMAC_MAC_INT_ENABLE        0x01a8
0100 #define CPMAC_MAC_INT_CLEAR     0x01ac
0101 #define CPMAC_MAC_ADDR_LO(channel)  (0x01b0 + (channel) * 4)
0102 #define CPMAC_MAC_ADDR_MID      0x01d0
0103 #define CPMAC_MAC_ADDR_HI       0x01d4
0104 #define CPMAC_MAC_HASH_LO       0x01d8
0105 #define CPMAC_MAC_HASH_HI       0x01dc
0106 #define CPMAC_TX_PTR(channel)       (0x0600 + (channel) * 4)
0107 #define CPMAC_RX_PTR(channel)       (0x0620 + (channel) * 4)
0108 #define CPMAC_TX_ACK(channel)       (0x0640 + (channel) * 4)
0109 #define CPMAC_RX_ACK(channel)       (0x0660 + (channel) * 4)
0110 #define CPMAC_REG_END           0x0680
0111 
0112 /* Rx/Tx statistics
0113  * TODO: use some of them to fill stats in cpmac_stats()
0114  */
0115 #define CPMAC_STATS_RX_GOOD     0x0200
0116 #define CPMAC_STATS_RX_BCAST        0x0204
0117 #define CPMAC_STATS_RX_MCAST        0x0208
0118 #define CPMAC_STATS_RX_PAUSE        0x020c
0119 #define CPMAC_STATS_RX_CRC      0x0210
0120 #define CPMAC_STATS_RX_ALIGN        0x0214
0121 #define CPMAC_STATS_RX_OVER     0x0218
0122 #define CPMAC_STATS_RX_JABBER       0x021c
0123 #define CPMAC_STATS_RX_UNDER        0x0220
0124 #define CPMAC_STATS_RX_FRAG     0x0224
0125 #define CPMAC_STATS_RX_FILTER       0x0228
0126 #define CPMAC_STATS_RX_QOSFILTER    0x022c
0127 #define CPMAC_STATS_RX_OCTETS       0x0230
0128 
0129 #define CPMAC_STATS_TX_GOOD     0x0234
0130 #define CPMAC_STATS_TX_BCAST        0x0238
0131 #define CPMAC_STATS_TX_MCAST        0x023c
0132 #define CPMAC_STATS_TX_PAUSE        0x0240
0133 #define CPMAC_STATS_TX_DEFER        0x0244
0134 #define CPMAC_STATS_TX_COLLISION    0x0248
0135 #define CPMAC_STATS_TX_SINGLECOLL   0x024c
0136 #define CPMAC_STATS_TX_MULTICOLL    0x0250
0137 #define CPMAC_STATS_TX_EXCESSCOLL   0x0254
0138 #define CPMAC_STATS_TX_LATECOLL     0x0258
0139 #define CPMAC_STATS_TX_UNDERRUN     0x025c
0140 #define CPMAC_STATS_TX_CARRIERSENSE 0x0260
0141 #define CPMAC_STATS_TX_OCTETS       0x0264
0142 
0143 #define cpmac_read(base, reg)       (readl((void __iomem *)(base) + (reg)))
0144 #define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \
0145                         (reg)))
0146 
0147 /* MDIO bus */
0148 #define CPMAC_MDIO_VERSION      0x0000
0149 #define CPMAC_MDIO_CONTROL      0x0004
0150 #define MDIOC_IDLE          0x80000000
0151 #define MDIOC_ENABLE            0x40000000
0152 #define MDIOC_PREAMBLE          0x00100000
0153 #define MDIOC_FAULT         0x00080000
0154 #define MDIOC_FAULTDETECT       0x00040000
0155 #define MDIOC_INTTEST           0x00020000
0156 #define MDIOC_CLKDIV(div)       ((div) & 0xff)
0157 #define CPMAC_MDIO_ALIVE        0x0008
0158 #define CPMAC_MDIO_LINK         0x000c
0159 #define CPMAC_MDIO_ACCESS(channel)  (0x0080 + (channel) * 8)
0160 #define MDIO_BUSY           0x80000000
0161 #define MDIO_WRITE          0x40000000
0162 #define MDIO_REG(reg)           (((reg) & 0x1f) << 21)
0163 #define MDIO_PHY(phy)           (((phy) & 0x1f) << 16)
0164 #define MDIO_DATA(data)         ((data) & 0xffff)
0165 #define CPMAC_MDIO_PHYSEL(channel)  (0x0084 + (channel) * 8)
0166 #define PHYSEL_LINKSEL          0x00000040
0167 #define PHYSEL_LINKINT          0x00000020
0168 
0169 struct cpmac_desc {
0170     u32 hw_next;
0171     u32 hw_data;
0172     u16 buflen;
0173     u16 bufflags;
0174     u16 datalen;
0175     u16 dataflags;
0176 #define CPMAC_SOP           0x8000
0177 #define CPMAC_EOP           0x4000
0178 #define CPMAC_OWN           0x2000
0179 #define CPMAC_EOQ           0x1000
0180     struct sk_buff *skb;
0181     struct cpmac_desc *next;
0182     struct cpmac_desc *prev;
0183     dma_addr_t mapping;
0184     dma_addr_t data_mapping;
0185 };
0186 
0187 struct cpmac_priv {
0188     spinlock_t lock;
0189     spinlock_t rx_lock;
0190     struct cpmac_desc *rx_head;
0191     int ring_size;
0192     struct cpmac_desc *desc_ring;
0193     dma_addr_t dma_ring;
0194     void __iomem *regs;
0195     struct mii_bus *mii_bus;
0196     char phy_name[MII_BUS_ID_SIZE + 3];
0197     int oldlink, oldspeed, oldduplex;
0198     u32 msg_enable;
0199     struct net_device *dev;
0200     struct work_struct reset_work;
0201     struct platform_device *pdev;
0202     struct napi_struct napi;
0203     atomic_t reset_pending;
0204 };
0205 
0206 static irqreturn_t cpmac_irq(int, void *);
0207 static void cpmac_hw_start(struct net_device *dev);
0208 static void cpmac_hw_stop(struct net_device *dev);
0209 static int cpmac_stop(struct net_device *dev);
0210 static int cpmac_open(struct net_device *dev);
0211 
0212 static void cpmac_dump_regs(struct net_device *dev)
0213 {
0214     int i;
0215     struct cpmac_priv *priv = netdev_priv(dev);
0216 
0217     for (i = 0; i < CPMAC_REG_END; i += 4) {
0218         if (i % 16 == 0) {
0219             if (i)
0220                 printk("\n");
0221             printk("%s: reg[%p]:", dev->name, priv->regs + i);
0222         }
0223         printk(" %08x", cpmac_read(priv->regs, i));
0224     }
0225     printk("\n");
0226 }
0227 
0228 static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
0229 {
0230     int i;
0231 
0232     printk("%s: desc[%p]:", dev->name, desc);
0233     for (i = 0; i < sizeof(*desc) / 4; i++)
0234         printk(" %08x", ((u32 *)desc)[i]);
0235     printk("\n");
0236 }
0237 
0238 static void cpmac_dump_all_desc(struct net_device *dev)
0239 {
0240     struct cpmac_priv *priv = netdev_priv(dev);
0241     struct cpmac_desc *dump = priv->rx_head;
0242 
0243     do {
0244         cpmac_dump_desc(dev, dump);
0245         dump = dump->next;
0246     } while (dump != priv->rx_head);
0247 }
0248 
0249 static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
0250 {
0251     int i;
0252 
0253     printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
0254     for (i = 0; i < skb->len; i++) {
0255         if (i % 16 == 0) {
0256             if (i)
0257                 printk("\n");
0258             printk("%s: data[%p]:", dev->name, skb->data + i);
0259         }
0260         printk(" %02x", ((u8 *)skb->data)[i]);
0261     }
0262     printk("\n");
0263 }
0264 
0265 static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
0266 {
0267     u32 val;
0268 
0269     while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
0270         cpu_relax();
0271     cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) |
0272             MDIO_PHY(phy_id));
0273     while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
0274         cpu_relax();
0275 
0276     return MDIO_DATA(val);
0277 }
0278 
0279 static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
0280                 int reg, u16 val)
0281 {
0282     while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
0283         cpu_relax();
0284     cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
0285             MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
0286 
0287     return 0;
0288 }
0289 
0290 static int cpmac_mdio_reset(struct mii_bus *bus)
0291 {
0292     struct clk *cpmac_clk;
0293 
0294     cpmac_clk = clk_get(&bus->dev, "cpmac");
0295     if (IS_ERR(cpmac_clk)) {
0296         pr_err("unable to get cpmac clock\n");
0297         return -1;
0298     }
0299     ar7_device_reset(AR7_RESET_BIT_MDIO);
0300     cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
0301             MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
0302 
0303     return 0;
0304 }
0305 
0306 static struct mii_bus *cpmac_mii;
0307 
0308 static void cpmac_set_multicast_list(struct net_device *dev)
0309 {
0310     struct netdev_hw_addr *ha;
0311     u8 tmp;
0312     u32 mbp, bit, hash[2] = { 0, };
0313     struct cpmac_priv *priv = netdev_priv(dev);
0314 
0315     mbp = cpmac_read(priv->regs, CPMAC_MBP);
0316     if (dev->flags & IFF_PROMISC) {
0317         cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
0318                 MBP_RXPROMISC);
0319     } else {
0320         cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
0321         if (dev->flags & IFF_ALLMULTI) {
0322             /* enable all multicast mode */
0323             cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
0324             cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
0325         } else {
0326             /* cpmac uses some strange mac address hashing
0327              * (not crc32)
0328              */
0329             netdev_for_each_mc_addr(ha, dev) {
0330                 bit = 0;
0331                 tmp = ha->addr[0];
0332                 bit  ^= (tmp >> 2) ^ (tmp << 4);
0333                 tmp = ha->addr[1];
0334                 bit  ^= (tmp >> 4) ^ (tmp << 2);
0335                 tmp = ha->addr[2];
0336                 bit  ^= (tmp >> 6) ^ tmp;
0337                 tmp = ha->addr[3];
0338                 bit  ^= (tmp >> 2) ^ (tmp << 4);
0339                 tmp = ha->addr[4];
0340                 bit  ^= (tmp >> 4) ^ (tmp << 2);
0341                 tmp = ha->addr[5];
0342                 bit  ^= (tmp >> 6) ^ tmp;
0343                 bit &= 0x3f;
0344                 hash[bit / 32] |= 1 << (bit % 32);
0345             }
0346 
0347             cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
0348             cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
0349         }
0350     }
0351 }
0352 
0353 static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
0354                     struct cpmac_desc *desc)
0355 {
0356     struct sk_buff *skb, *result = NULL;
0357 
0358     if (unlikely(netif_msg_hw(priv)))
0359         cpmac_dump_desc(priv->dev, desc);
0360     cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
0361     if (unlikely(!desc->datalen)) {
0362         if (netif_msg_rx_err(priv) && net_ratelimit())
0363             netdev_warn(priv->dev, "rx: spurious interrupt\n");
0364 
0365         return NULL;
0366     }
0367 
0368     skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
0369     if (likely(skb)) {
0370         skb_put(desc->skb, desc->datalen);
0371         desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
0372         skb_checksum_none_assert(desc->skb);
0373         priv->dev->stats.rx_packets++;
0374         priv->dev->stats.rx_bytes += desc->datalen;
0375         result = desc->skb;
0376         dma_unmap_single(&priv->dev->dev, desc->data_mapping,
0377                  CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
0378         desc->skb = skb;
0379         desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
0380                             CPMAC_SKB_SIZE,
0381                             DMA_FROM_DEVICE);
0382         desc->hw_data = (u32)desc->data_mapping;
0383         if (unlikely(netif_msg_pktdata(priv))) {
0384             netdev_dbg(priv->dev, "received packet:\n");
0385             cpmac_dump_skb(priv->dev, result);
0386         }
0387     } else {
0388         if (netif_msg_rx_err(priv) && net_ratelimit())
0389             netdev_warn(priv->dev,
0390                     "low on skbs, dropping packet\n");
0391 
0392         priv->dev->stats.rx_dropped++;
0393     }
0394 
0395     desc->buflen = CPMAC_SKB_SIZE;
0396     desc->dataflags = CPMAC_OWN;
0397 
0398     return result;
0399 }
0400 
0401 static int cpmac_poll(struct napi_struct *napi, int budget)
0402 {
0403     struct sk_buff *skb;
0404     struct cpmac_desc *desc, *restart;
0405     struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
0406     int received = 0, processed = 0;
0407 
0408     spin_lock(&priv->rx_lock);
0409     if (unlikely(!priv->rx_head)) {
0410         if (netif_msg_rx_err(priv) && net_ratelimit())
0411             netdev_warn(priv->dev, "rx: polling, but no queue\n");
0412 
0413         spin_unlock(&priv->rx_lock);
0414         napi_complete(napi);
0415         return 0;
0416     }
0417 
0418     desc = priv->rx_head;
0419     restart = NULL;
0420     while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
0421         processed++;
0422 
0423         if ((desc->dataflags & CPMAC_EOQ) != 0) {
0424             /* The last update to eoq->hw_next didn't happen
0425              * soon enough, and the receiver stopped here.
0426              * Remember this descriptor so we can restart
0427              * the receiver after freeing some space.
0428              */
0429             if (unlikely(restart)) {
0430                 if (netif_msg_rx_err(priv))
0431                     netdev_err(priv->dev, "poll found a"
0432                            " duplicate EOQ: %p and %p\n",
0433                            restart, desc);
0434                 goto fatal_error;
0435             }
0436 
0437             restart = desc->next;
0438         }
0439 
0440         skb = cpmac_rx_one(priv, desc);
0441         if (likely(skb)) {
0442             netif_receive_skb(skb);
0443             received++;
0444         }
0445         desc = desc->next;
0446     }
0447 
0448     if (desc != priv->rx_head) {
0449         /* We freed some buffers, but not the whole ring,
0450          * add what we did free to the rx list
0451          */
0452         desc->prev->hw_next = (u32)0;
0453         priv->rx_head->prev->hw_next = priv->rx_head->mapping;
0454     }
0455 
0456     /* Optimization: If we did not actually process an EOQ (perhaps because
0457      * of quota limits), check to see if the tail of the queue has EOQ set.
0458      * We should immediately restart in that case so that the receiver can
0459      * restart and run in parallel with more packet processing.
0460      * This lets us handle slightly larger bursts before running
0461      * out of ring space (assuming dev->weight < ring_size)
0462      */
0463 
0464     if (!restart &&
0465          (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
0466             == CPMAC_EOQ &&
0467          (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
0468         /* reset EOQ so the poll loop (above) doesn't try to
0469          * restart this when it eventually gets to this descriptor.
0470          */
0471         priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
0472         restart = priv->rx_head;
0473     }
0474 
0475     if (restart) {
0476         priv->dev->stats.rx_errors++;
0477         priv->dev->stats.rx_fifo_errors++;
0478         if (netif_msg_rx_err(priv) && net_ratelimit())
0479             netdev_warn(priv->dev, "rx dma ring overrun\n");
0480 
0481         if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
0482             if (netif_msg_drv(priv))
0483                 netdev_err(priv->dev, "cpmac_poll is trying "
0484                     "to restart rx from a descriptor "
0485                     "that's not free: %p\n", restart);
0486             goto fatal_error;
0487         }
0488 
0489         cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
0490     }
0491 
0492     priv->rx_head = desc;
0493     spin_unlock(&priv->rx_lock);
0494     if (unlikely(netif_msg_rx_status(priv)))
0495         netdev_dbg(priv->dev, "poll processed %d packets\n", received);
0496 
0497     if (processed == 0) {
0498         /* we ran out of packets to read,
0499          * revert to interrupt-driven mode
0500          */
0501         napi_complete(napi);
0502         cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
0503         return 0;
0504     }
0505 
0506     return 1;
0507 
0508 fatal_error:
0509     /* Something went horribly wrong.
0510      * Reset hardware to try to recover rather than wedging.
0511      */
0512     if (netif_msg_drv(priv)) {
0513         netdev_err(priv->dev, "cpmac_poll is confused. "
0514                "Resetting hardware\n");
0515         cpmac_dump_all_desc(priv->dev);
0516         netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
0517                cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
0518                cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
0519     }
0520 
0521     spin_unlock(&priv->rx_lock);
0522     napi_complete(napi);
0523     netif_tx_stop_all_queues(priv->dev);
0524     napi_disable(&priv->napi);
0525 
0526     atomic_inc(&priv->reset_pending);
0527     cpmac_hw_stop(priv->dev);
0528     if (!schedule_work(&priv->reset_work))
0529         atomic_dec(&priv->reset_pending);
0530 
0531     return 0;
0532 
0533 }
0534 
0535 static netdev_tx_t cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
0536 {
0537     int queue;
0538     unsigned int len;
0539     struct cpmac_desc *desc;
0540     struct cpmac_priv *priv = netdev_priv(dev);
0541 
0542     if (unlikely(atomic_read(&priv->reset_pending)))
0543         return NETDEV_TX_BUSY;
0544 
0545     if (unlikely(skb_padto(skb, ETH_ZLEN)))
0546         return NETDEV_TX_OK;
0547 
0548     len = max_t(unsigned int, skb->len, ETH_ZLEN);
0549     queue = skb_get_queue_mapping(skb);
0550     netif_stop_subqueue(dev, queue);
0551 
0552     desc = &priv->desc_ring[queue];
0553     if (unlikely(desc->dataflags & CPMAC_OWN)) {
0554         if (netif_msg_tx_err(priv) && net_ratelimit())
0555             netdev_warn(dev, "tx dma ring full\n");
0556 
0557         return NETDEV_TX_BUSY;
0558     }
0559 
0560     spin_lock(&priv->lock);
0561     spin_unlock(&priv->lock);
0562     desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
0563     desc->skb = skb;
0564     desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
0565                         DMA_TO_DEVICE);
0566     desc->hw_data = (u32)desc->data_mapping;
0567     desc->datalen = len;
0568     desc->buflen = len;
0569     if (unlikely(netif_msg_tx_queued(priv)))
0570         netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len);
0571     if (unlikely(netif_msg_hw(priv)))
0572         cpmac_dump_desc(dev, desc);
0573     if (unlikely(netif_msg_pktdata(priv)))
0574         cpmac_dump_skb(dev, skb);
0575     cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
0576 
0577     return NETDEV_TX_OK;
0578 }
0579 
0580 static void cpmac_end_xmit(struct net_device *dev, int queue)
0581 {
0582     struct cpmac_desc *desc;
0583     struct cpmac_priv *priv = netdev_priv(dev);
0584 
0585     desc = &priv->desc_ring[queue];
0586     cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
0587     if (likely(desc->skb)) {
0588         spin_lock(&priv->lock);
0589         dev->stats.tx_packets++;
0590         dev->stats.tx_bytes += desc->skb->len;
0591         spin_unlock(&priv->lock);
0592         dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
0593                  DMA_TO_DEVICE);
0594 
0595         if (unlikely(netif_msg_tx_done(priv)))
0596             netdev_dbg(dev, "sent 0x%p, len=%d\n",
0597                    desc->skb, desc->skb->len);
0598 
0599         dev_consume_skb_irq(desc->skb);
0600         desc->skb = NULL;
0601         if (__netif_subqueue_stopped(dev, queue))
0602             netif_wake_subqueue(dev, queue);
0603     } else {
0604         if (netif_msg_tx_err(priv) && net_ratelimit())
0605             netdev_warn(dev, "end_xmit: spurious interrupt\n");
0606         if (__netif_subqueue_stopped(dev, queue))
0607             netif_wake_subqueue(dev, queue);
0608     }
0609 }
0610 
0611 static void cpmac_hw_stop(struct net_device *dev)
0612 {
0613     int i;
0614     struct cpmac_priv *priv = netdev_priv(dev);
0615     struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
0616 
0617     ar7_device_reset(pdata->reset_bit);
0618     cpmac_write(priv->regs, CPMAC_RX_CONTROL,
0619             cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
0620     cpmac_write(priv->regs, CPMAC_TX_CONTROL,
0621             cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
0622     for (i = 0; i < 8; i++) {
0623         cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
0624         cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
0625     }
0626     cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
0627     cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
0628     cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
0629     cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
0630     cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
0631             cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII);
0632 }
0633 
0634 static void cpmac_hw_start(struct net_device *dev)
0635 {
0636     int i;
0637     struct cpmac_priv *priv = netdev_priv(dev);
0638     struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
0639 
0640     ar7_device_reset(pdata->reset_bit);
0641     for (i = 0; i < 8; i++) {
0642         cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
0643         cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
0644     }
0645     cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
0646 
0647     cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST |
0648             MBP_RXMCAST);
0649     cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0);
0650     for (i = 0; i < 8; i++)
0651         cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
0652     cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
0653     cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
0654             (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
0655             (dev->dev_addr[3] << 24));
0656     cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE);
0657     cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
0658     cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
0659     cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
0660     cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
0661     cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1);
0662     cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
0663     cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
0664     cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
0665 
0666     cpmac_write(priv->regs, CPMAC_RX_CONTROL,
0667             cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
0668     cpmac_write(priv->regs, CPMAC_TX_CONTROL,
0669             cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
0670     cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
0671             cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII |
0672             MAC_FDX);
0673 }
0674 
0675 static void cpmac_clear_rx(struct net_device *dev)
0676 {
0677     struct cpmac_priv *priv = netdev_priv(dev);
0678     struct cpmac_desc *desc;
0679     int i;
0680 
0681     if (unlikely(!priv->rx_head))
0682         return;
0683     desc = priv->rx_head;
0684     for (i = 0; i < priv->ring_size; i++) {
0685         if ((desc->dataflags & CPMAC_OWN) == 0) {
0686             if (netif_msg_rx_err(priv) && net_ratelimit())
0687                 netdev_warn(dev, "packet dropped\n");
0688             if (unlikely(netif_msg_hw(priv)))
0689                 cpmac_dump_desc(dev, desc);
0690             desc->dataflags = CPMAC_OWN;
0691             dev->stats.rx_dropped++;
0692         }
0693         desc->hw_next = desc->next->mapping;
0694         desc = desc->next;
0695     }
0696     priv->rx_head->prev->hw_next = 0;
0697 }
0698 
0699 static void cpmac_clear_tx(struct net_device *dev)
0700 {
0701     struct cpmac_priv *priv = netdev_priv(dev);
0702     int i;
0703 
0704     if (unlikely(!priv->desc_ring))
0705         return;
0706     for (i = 0; i < CPMAC_QUEUES; i++) {
0707         priv->desc_ring[i].dataflags = 0;
0708         if (priv->desc_ring[i].skb) {
0709             dev_kfree_skb_any(priv->desc_ring[i].skb);
0710             priv->desc_ring[i].skb = NULL;
0711         }
0712     }
0713 }
0714 
0715 static void cpmac_hw_error(struct work_struct *work)
0716 {
0717     struct cpmac_priv *priv =
0718         container_of(work, struct cpmac_priv, reset_work);
0719 
0720     spin_lock(&priv->rx_lock);
0721     cpmac_clear_rx(priv->dev);
0722     spin_unlock(&priv->rx_lock);
0723     cpmac_clear_tx(priv->dev);
0724     cpmac_hw_start(priv->dev);
0725     barrier();
0726     atomic_dec(&priv->reset_pending);
0727 
0728     netif_tx_wake_all_queues(priv->dev);
0729     cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
0730 }
0731 
0732 static void cpmac_check_status(struct net_device *dev)
0733 {
0734     struct cpmac_priv *priv = netdev_priv(dev);
0735 
0736     u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
0737     int rx_channel = (macstatus >> 8) & 7;
0738     int rx_code = (macstatus >> 12) & 15;
0739     int tx_channel = (macstatus >> 16) & 7;
0740     int tx_code = (macstatus >> 20) & 15;
0741 
0742     if (rx_code || tx_code) {
0743         if (netif_msg_drv(priv) && net_ratelimit()) {
0744             /* Can't find any documentation on what these
0745              * error codes actually are. So just log them and hope..
0746              */
0747             if (rx_code)
0748                 netdev_warn(dev, "host error %d on rx "
0749                     "channel %d (macstatus %08x), resetting\n",
0750                     rx_code, rx_channel, macstatus);
0751             if (tx_code)
0752                 netdev_warn(dev, "host error %d on tx "
0753                     "channel %d (macstatus %08x), resetting\n",
0754                     tx_code, tx_channel, macstatus);
0755         }
0756 
0757         netif_tx_stop_all_queues(dev);
0758         cpmac_hw_stop(dev);
0759         if (schedule_work(&priv->reset_work))
0760             atomic_inc(&priv->reset_pending);
0761         if (unlikely(netif_msg_hw(priv)))
0762             cpmac_dump_regs(dev);
0763     }
0764     cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
0765 }
0766 
0767 static irqreturn_t cpmac_irq(int irq, void *dev_id)
0768 {
0769     struct net_device *dev = dev_id;
0770     struct cpmac_priv *priv;
0771     int queue;
0772     u32 status;
0773 
0774     priv = netdev_priv(dev);
0775 
0776     status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
0777 
0778     if (unlikely(netif_msg_intr(priv)))
0779         netdev_dbg(dev, "interrupt status: 0x%08x\n", status);
0780 
0781     if (status & MAC_INT_TX)
0782         cpmac_end_xmit(dev, (status & 7));
0783 
0784     if (status & MAC_INT_RX) {
0785         queue = (status >> 8) & 7;
0786         if (napi_schedule_prep(&priv->napi)) {
0787             cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
0788             __napi_schedule(&priv->napi);
0789         }
0790     }
0791 
0792     cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
0793 
0794     if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
0795         cpmac_check_status(dev);
0796 
0797     return IRQ_HANDLED;
0798 }
0799 
0800 static void cpmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
0801 {
0802     struct cpmac_priv *priv = netdev_priv(dev);
0803 
0804     spin_lock(&priv->lock);
0805     dev->stats.tx_errors++;
0806     spin_unlock(&priv->lock);
0807     if (netif_msg_tx_err(priv) && net_ratelimit())
0808         netdev_warn(dev, "transmit timeout\n");
0809 
0810     atomic_inc(&priv->reset_pending);
0811     barrier();
0812     cpmac_clear_tx(dev);
0813     barrier();
0814     atomic_dec(&priv->reset_pending);
0815 
0816     netif_tx_wake_all_queues(priv->dev);
0817 }
0818 
0819 static void cpmac_get_ringparam(struct net_device *dev,
0820                 struct ethtool_ringparam *ring,
0821                 struct kernel_ethtool_ringparam *kernel_ring,
0822                 struct netlink_ext_ack *extack)
0823 {
0824     struct cpmac_priv *priv = netdev_priv(dev);
0825 
0826     ring->rx_max_pending = 1024;
0827     ring->rx_mini_max_pending = 1;
0828     ring->rx_jumbo_max_pending = 1;
0829     ring->tx_max_pending = 1;
0830 
0831     ring->rx_pending = priv->ring_size;
0832     ring->rx_mini_pending = 1;
0833     ring->rx_jumbo_pending = 1;
0834     ring->tx_pending = 1;
0835 }
0836 
0837 static int cpmac_set_ringparam(struct net_device *dev,
0838                    struct ethtool_ringparam *ring,
0839                    struct kernel_ethtool_ringparam *kernel_ring,
0840                    struct netlink_ext_ack *extack)
0841 {
0842     struct cpmac_priv *priv = netdev_priv(dev);
0843 
0844     if (netif_running(dev))
0845         return -EBUSY;
0846     priv->ring_size = ring->rx_pending;
0847 
0848     return 0;
0849 }
0850 
0851 static void cpmac_get_drvinfo(struct net_device *dev,
0852                   struct ethtool_drvinfo *info)
0853 {
0854     strlcpy(info->driver, "cpmac", sizeof(info->driver));
0855     strlcpy(info->version, CPMAC_VERSION, sizeof(info->version));
0856     snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
0857 }
0858 
0859 static const struct ethtool_ops cpmac_ethtool_ops = {
0860     .get_drvinfo = cpmac_get_drvinfo,
0861     .get_link = ethtool_op_get_link,
0862     .get_ringparam = cpmac_get_ringparam,
0863     .set_ringparam = cpmac_set_ringparam,
0864     .get_link_ksettings = phy_ethtool_get_link_ksettings,
0865     .set_link_ksettings = phy_ethtool_set_link_ksettings,
0866 };
0867 
0868 static void cpmac_adjust_link(struct net_device *dev)
0869 {
0870     struct cpmac_priv *priv = netdev_priv(dev);
0871     int new_state = 0;
0872 
0873     spin_lock(&priv->lock);
0874     if (dev->phydev->link) {
0875         netif_tx_start_all_queues(dev);
0876         if (dev->phydev->duplex != priv->oldduplex) {
0877             new_state = 1;
0878             priv->oldduplex = dev->phydev->duplex;
0879         }
0880 
0881         if (dev->phydev->speed != priv->oldspeed) {
0882             new_state = 1;
0883             priv->oldspeed = dev->phydev->speed;
0884         }
0885 
0886         if (!priv->oldlink) {
0887             new_state = 1;
0888             priv->oldlink = 1;
0889         }
0890     } else if (priv->oldlink) {
0891         new_state = 1;
0892         priv->oldlink = 0;
0893         priv->oldspeed = 0;
0894         priv->oldduplex = -1;
0895     }
0896 
0897     if (new_state && netif_msg_link(priv) && net_ratelimit())
0898         phy_print_status(dev->phydev);
0899 
0900     spin_unlock(&priv->lock);
0901 }
0902 
0903 static int cpmac_open(struct net_device *dev)
0904 {
0905     int i, size, res;
0906     struct cpmac_priv *priv = netdev_priv(dev);
0907     struct resource *mem;
0908     struct cpmac_desc *desc;
0909     struct sk_buff *skb;
0910 
0911     mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
0912     if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
0913         if (netif_msg_drv(priv))
0914             netdev_err(dev, "failed to request registers\n");
0915 
0916         res = -ENXIO;
0917         goto fail_reserve;
0918     }
0919 
0920     priv->regs = ioremap(mem->start, resource_size(mem));
0921     if (!priv->regs) {
0922         if (netif_msg_drv(priv))
0923             netdev_err(dev, "failed to remap registers\n");
0924 
0925         res = -ENXIO;
0926         goto fail_remap;
0927     }
0928 
0929     size = priv->ring_size + CPMAC_QUEUES;
0930     priv->desc_ring = dma_alloc_coherent(&dev->dev,
0931                          sizeof(struct cpmac_desc) * size,
0932                          &priv->dma_ring,
0933                          GFP_KERNEL);
0934     if (!priv->desc_ring) {
0935         res = -ENOMEM;
0936         goto fail_alloc;
0937     }
0938 
0939     for (i = 0; i < size; i++)
0940         priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
0941 
0942     priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
0943     for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
0944         skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
0945         if (unlikely(!skb)) {
0946             res = -ENOMEM;
0947             goto fail_desc;
0948         }
0949         desc->skb = skb;
0950         desc->data_mapping = dma_map_single(&dev->dev, skb->data,
0951                             CPMAC_SKB_SIZE,
0952                             DMA_FROM_DEVICE);
0953         desc->hw_data = (u32)desc->data_mapping;
0954         desc->buflen = CPMAC_SKB_SIZE;
0955         desc->dataflags = CPMAC_OWN;
0956         desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
0957         desc->next->prev = desc;
0958         desc->hw_next = (u32)desc->next->mapping;
0959     }
0960 
0961     priv->rx_head->prev->hw_next = (u32)0;
0962 
0963     res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
0964     if (res) {
0965         if (netif_msg_drv(priv))
0966             netdev_err(dev, "failed to obtain irq\n");
0967 
0968         goto fail_irq;
0969     }
0970 
0971     atomic_set(&priv->reset_pending, 0);
0972     INIT_WORK(&priv->reset_work, cpmac_hw_error);
0973     cpmac_hw_start(dev);
0974 
0975     napi_enable(&priv->napi);
0976     phy_start(dev->phydev);
0977 
0978     return 0;
0979 
0980 fail_irq:
0981 fail_desc:
0982     for (i = 0; i < priv->ring_size; i++) {
0983         if (priv->rx_head[i].skb) {
0984             dma_unmap_single(&dev->dev,
0985                      priv->rx_head[i].data_mapping,
0986                      CPMAC_SKB_SIZE,
0987                      DMA_FROM_DEVICE);
0988             kfree_skb(priv->rx_head[i].skb);
0989         }
0990     }
0991     dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * size,
0992               priv->desc_ring, priv->dma_ring);
0993 
0994 fail_alloc:
0995     iounmap(priv->regs);
0996 
0997 fail_remap:
0998     release_mem_region(mem->start, resource_size(mem));
0999 
1000 fail_reserve:
1001     return res;
1002 }
1003 
1004 static int cpmac_stop(struct net_device *dev)
1005 {
1006     int i;
1007     struct cpmac_priv *priv = netdev_priv(dev);
1008     struct resource *mem;
1009 
1010     netif_tx_stop_all_queues(dev);
1011 
1012     cancel_work_sync(&priv->reset_work);
1013     napi_disable(&priv->napi);
1014     phy_stop(dev->phydev);
1015 
1016     cpmac_hw_stop(dev);
1017 
1018     for (i = 0; i < 8; i++)
1019         cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
1020     cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
1021     cpmac_write(priv->regs, CPMAC_MBP, 0);
1022 
1023     free_irq(dev->irq, dev);
1024     iounmap(priv->regs);
1025     mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
1026     release_mem_region(mem->start, resource_size(mem));
1027     priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
1028     for (i = 0; i < priv->ring_size; i++) {
1029         if (priv->rx_head[i].skb) {
1030             dma_unmap_single(&dev->dev,
1031                      priv->rx_head[i].data_mapping,
1032                      CPMAC_SKB_SIZE,
1033                      DMA_FROM_DEVICE);
1034             kfree_skb(priv->rx_head[i].skb);
1035         }
1036     }
1037 
1038     dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
1039               (CPMAC_QUEUES + priv->ring_size),
1040               priv->desc_ring, priv->dma_ring);
1041 
1042     return 0;
1043 }
1044 
1045 static const struct net_device_ops cpmac_netdev_ops = {
1046     .ndo_open       = cpmac_open,
1047     .ndo_stop       = cpmac_stop,
1048     .ndo_start_xmit     = cpmac_start_xmit,
1049     .ndo_tx_timeout     = cpmac_tx_timeout,
1050     .ndo_set_rx_mode    = cpmac_set_multicast_list,
1051     .ndo_eth_ioctl      = phy_do_ioctl_running,
1052     .ndo_validate_addr  = eth_validate_addr,
1053     .ndo_set_mac_address    = eth_mac_addr,
1054 };
1055 
1056 static int external_switch;
1057 
1058 static int cpmac_probe(struct platform_device *pdev)
1059 {
1060     int rc, phy_id;
1061     char mdio_bus_id[MII_BUS_ID_SIZE];
1062     struct resource *mem;
1063     struct cpmac_priv *priv;
1064     struct net_device *dev;
1065     struct plat_cpmac_data *pdata;
1066     struct phy_device *phydev = NULL;
1067 
1068     pdata = dev_get_platdata(&pdev->dev);
1069 
1070     if (external_switch || dumb_switch) {
1071         strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
1072         phy_id = pdev->id;
1073     } else {
1074         for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1075             if (!(pdata->phy_mask & (1 << phy_id)))
1076                 continue;
1077             if (!mdiobus_get_phy(cpmac_mii, phy_id))
1078                 continue;
1079             strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
1080             break;
1081         }
1082     }
1083 
1084     if (phy_id == PHY_MAX_ADDR) {
1085         dev_err(&pdev->dev, "no PHY present, falling back "
1086             "to switch on MDIO bus 0\n");
1087         strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
1088         phy_id = pdev->id;
1089     }
1090     mdio_bus_id[sizeof(mdio_bus_id) - 1] = '\0';
1091 
1092     dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
1093     if (!dev)
1094         return -ENOMEM;
1095 
1096     SET_NETDEV_DEV(dev, &pdev->dev);
1097     platform_set_drvdata(pdev, dev);
1098     priv = netdev_priv(dev);
1099 
1100     priv->pdev = pdev;
1101     mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1102     if (!mem) {
1103         rc = -ENODEV;
1104         goto fail;
1105     }
1106 
1107     dev->irq = platform_get_irq_byname(pdev, "irq");
1108 
1109     dev->netdev_ops = &cpmac_netdev_ops;
1110     dev->ethtool_ops = &cpmac_ethtool_ops;
1111 
1112     netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
1113 
1114     spin_lock_init(&priv->lock);
1115     spin_lock_init(&priv->rx_lock);
1116     priv->dev = dev;
1117     priv->ring_size = 64;
1118     priv->msg_enable = netif_msg_init(debug_level, 0xff);
1119     eth_hw_addr_set(dev, pdata->dev_addr);
1120 
1121     snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
1122                         mdio_bus_id, phy_id);
1123 
1124     phydev = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
1125                  PHY_INTERFACE_MODE_MII);
1126 
1127     if (IS_ERR(phydev)) {
1128         if (netif_msg_drv(priv))
1129             dev_err(&pdev->dev, "Could not attach to PHY\n");
1130 
1131         rc = PTR_ERR(phydev);
1132         goto fail;
1133     }
1134 
1135     rc = register_netdev(dev);
1136     if (rc) {
1137         dev_err(&pdev->dev, "Could not register net device\n");
1138         goto fail;
1139     }
1140 
1141     if (netif_msg_probe(priv)) {
1142         dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, "
1143              "mac: %pM\n", (void *)mem->start, dev->irq,
1144              priv->phy_name, dev->dev_addr);
1145     }
1146 
1147     return 0;
1148 
1149 fail:
1150     free_netdev(dev);
1151     return rc;
1152 }
1153 
1154 static int cpmac_remove(struct platform_device *pdev)
1155 {
1156     struct net_device *dev = platform_get_drvdata(pdev);
1157 
1158     unregister_netdev(dev);
1159     free_netdev(dev);
1160 
1161     return 0;
1162 }
1163 
1164 static struct platform_driver cpmac_driver = {
1165     .driver = {
1166         .name   = "cpmac",
1167     },
1168     .probe  = cpmac_probe,
1169     .remove = cpmac_remove,
1170 };
1171 
1172 int cpmac_init(void)
1173 {
1174     u32 mask;
1175     int i, res;
1176 
1177     cpmac_mii = mdiobus_alloc();
1178     if (cpmac_mii == NULL)
1179         return -ENOMEM;
1180 
1181     cpmac_mii->name = "cpmac-mii";
1182     cpmac_mii->read = cpmac_mdio_read;
1183     cpmac_mii->write = cpmac_mdio_write;
1184     cpmac_mii->reset = cpmac_mdio_reset;
1185 
1186     cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
1187 
1188     if (!cpmac_mii->priv) {
1189         pr_err("Can't ioremap mdio registers\n");
1190         res = -ENXIO;
1191         goto fail_alloc;
1192     }
1193 
1194     /* FIXME: unhardcode gpio&reset bits */
1195     ar7_gpio_disable(26);
1196     ar7_gpio_disable(27);
1197     ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
1198     ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
1199     ar7_device_reset(AR7_RESET_BIT_EPHY);
1200 
1201     cpmac_mii->reset(cpmac_mii);
1202 
1203     for (i = 0; i < 300; i++) {
1204         mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE);
1205         if (mask)
1206             break;
1207         else
1208             msleep(10);
1209     }
1210 
1211     mask &= 0x7fffffff;
1212     if (mask & (mask - 1)) {
1213         external_switch = 1;
1214         mask = 0;
1215     }
1216 
1217     cpmac_mii->phy_mask = ~(mask | 0x80000000);
1218     snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1");
1219 
1220     res = mdiobus_register(cpmac_mii);
1221     if (res)
1222         goto fail_mii;
1223 
1224     res = platform_driver_register(&cpmac_driver);
1225     if (res)
1226         goto fail_cpmac;
1227 
1228     return 0;
1229 
1230 fail_cpmac:
1231     mdiobus_unregister(cpmac_mii);
1232 
1233 fail_mii:
1234     iounmap(cpmac_mii->priv);
1235 
1236 fail_alloc:
1237     mdiobus_free(cpmac_mii);
1238 
1239     return res;
1240 }
1241 
1242 void cpmac_exit(void)
1243 {
1244     platform_driver_unregister(&cpmac_driver);
1245     mdiobus_unregister(cpmac_mii);
1246     iounmap(cpmac_mii->priv);
1247     mdiobus_free(cpmac_mii);
1248 }
1249 
1250 module_init(cpmac_init);
1251 module_exit(cpmac_exit);