Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
0004  *
0005  * 2005-2010 (c) Aeroflex Gaisler AB
0006  *
0007  * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
0008  * available in the GRLIB VHDL IP core library.
0009  *
0010  * Full documentation of both cores can be found here:
0011  * https://www.gaisler.com/products/grlib/grip.pdf
0012  *
0013  * The Gigabit version supports scatter/gather DMA, any alignment of
0014  * buffers and checksum offloading.
0015  *
0016  * Contributors: Kristoffer Glembo
0017  *               Daniel Hellstrom
0018  *               Marko Isomaki
0019  */
0020 
0021 #include <linux/dma-mapping.h>
0022 #include <linux/module.h>
0023 #include <linux/uaccess.h>
0024 #include <linux/interrupt.h>
0025 #include <linux/netdevice.h>
0026 #include <linux/etherdevice.h>
0027 #include <linux/ethtool.h>
0028 #include <linux/skbuff.h>
0029 #include <linux/io.h>
0030 #include <linux/crc32.h>
0031 #include <linux/mii.h>
0032 #include <linux/of_device.h>
0033 #include <linux/of_net.h>
0034 #include <linux/of_platform.h>
0035 #include <linux/slab.h>
0036 #include <asm/cacheflush.h>
0037 #include <asm/byteorder.h>
0038 
0039 #ifdef CONFIG_SPARC
0040 #include <asm/idprom.h>
0041 #endif
0042 
0043 #include "greth.h"
0044 
0045 #define GRETH_DEF_MSG_ENABLE      \
0046     (NETIF_MSG_DRV      | \
0047      NETIF_MSG_PROBE    | \
0048      NETIF_MSG_LINK     | \
0049      NETIF_MSG_IFDOWN   | \
0050      NETIF_MSG_IFUP     | \
0051      NETIF_MSG_RX_ERR   | \
0052      NETIF_MSG_TX_ERR)
0053 
0054 static int greth_debug = -1;    /* -1 == use GRETH_DEF_MSG_ENABLE as value */
0055 module_param(greth_debug, int, 0);
0056 MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value");
0057 
0058 /* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
0059 static int macaddr[6];
0060 module_param_array(macaddr, int, NULL, 0);
0061 MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address");
0062 
0063 static int greth_edcl = 1;
0064 module_param(greth_edcl, int, 0);
0065 MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used.");
0066 
0067 static int greth_open(struct net_device *dev);
0068 static netdev_tx_t greth_start_xmit(struct sk_buff *skb,
0069        struct net_device *dev);
0070 static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb,
0071        struct net_device *dev);
0072 static int greth_rx(struct net_device *dev, int limit);
0073 static int greth_rx_gbit(struct net_device *dev, int limit);
0074 static void greth_clean_tx(struct net_device *dev);
0075 static void greth_clean_tx_gbit(struct net_device *dev);
0076 static irqreturn_t greth_interrupt(int irq, void *dev_id);
0077 static int greth_close(struct net_device *dev);
0078 static int greth_set_mac_add(struct net_device *dev, void *p);
0079 static void greth_set_multicast_list(struct net_device *dev);
0080 
0081 #define GRETH_REGLOAD(a)        (be32_to_cpu(__raw_readl(&(a))))
0082 #define GRETH_REGSAVE(a, v)         (__raw_writel(cpu_to_be32(v), &(a)))
0083 #define GRETH_REGORIN(a, v)         (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v))))
0084 #define GRETH_REGANDIN(a, v)        (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v))))
0085 
0086 #define NEXT_TX(N)      (((N) + 1) & GRETH_TXBD_NUM_MASK)
0087 #define SKIP_TX(N, C)   (((N) + C) & GRETH_TXBD_NUM_MASK)
0088 #define NEXT_RX(N)      (((N) + 1) & GRETH_RXBD_NUM_MASK)
0089 
0090 static void greth_print_rx_packet(void *addr, int len)
0091 {
0092     print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1,
0093             addr, len, true);
0094 }
0095 
0096 static void greth_print_tx_packet(struct sk_buff *skb)
0097 {
0098     int i;
0099     int length;
0100 
0101     if (skb_shinfo(skb)->nr_frags == 0)
0102         length = skb->len;
0103     else
0104         length = skb_headlen(skb);
0105 
0106     print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
0107             skb->data, length, true);
0108 
0109     for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
0110 
0111         print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
0112                    skb_frag_address(&skb_shinfo(skb)->frags[i]),
0113                    skb_frag_size(&skb_shinfo(skb)->frags[i]), true);
0114     }
0115 }
0116 
0117 static inline void greth_enable_tx(struct greth_private *greth)
0118 {
0119     wmb();
0120     GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
0121 }
0122 
0123 static inline void greth_enable_tx_and_irq(struct greth_private *greth)
0124 {
0125     wmb(); /* BDs must been written to memory before enabling TX */
0126     GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI);
0127 }
0128 
0129 static inline void greth_disable_tx(struct greth_private *greth)
0130 {
0131     GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
0132 }
0133 
0134 static inline void greth_enable_rx(struct greth_private *greth)
0135 {
0136     wmb();
0137     GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
0138 }
0139 
0140 static inline void greth_disable_rx(struct greth_private *greth)
0141 {
0142     GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN);
0143 }
0144 
0145 static inline void greth_enable_irqs(struct greth_private *greth)
0146 {
0147     GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI);
0148 }
0149 
0150 static inline void greth_disable_irqs(struct greth_private *greth)
0151 {
0152     GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI));
0153 }
0154 
0155 static inline void greth_write_bd(u32 *bd, u32 val)
0156 {
0157     __raw_writel(cpu_to_be32(val), bd);
0158 }
0159 
0160 static inline u32 greth_read_bd(u32 *bd)
0161 {
0162     return be32_to_cpu(__raw_readl(bd));
0163 }
0164 
0165 static void greth_clean_rings(struct greth_private *greth)
0166 {
0167     int i;
0168     struct greth_bd *rx_bdp = greth->rx_bd_base;
0169     struct greth_bd *tx_bdp = greth->tx_bd_base;
0170 
0171     if (greth->gbit_mac) {
0172 
0173         /* Free and unmap RX buffers */
0174         for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
0175             if (greth->rx_skbuff[i] != NULL) {
0176                 dev_kfree_skb(greth->rx_skbuff[i]);
0177                 dma_unmap_single(greth->dev,
0178                          greth_read_bd(&rx_bdp->addr),
0179                          MAX_FRAME_SIZE+NET_IP_ALIGN,
0180                          DMA_FROM_DEVICE);
0181             }
0182         }
0183 
0184         /* TX buffers */
0185         while (greth->tx_free < GRETH_TXBD_NUM) {
0186 
0187             struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
0188             int nr_frags = skb_shinfo(skb)->nr_frags;
0189             tx_bdp = greth->tx_bd_base + greth->tx_last;
0190             greth->tx_last = NEXT_TX(greth->tx_last);
0191 
0192             dma_unmap_single(greth->dev,
0193                      greth_read_bd(&tx_bdp->addr),
0194                      skb_headlen(skb),
0195                      DMA_TO_DEVICE);
0196 
0197             for (i = 0; i < nr_frags; i++) {
0198                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0199                 tx_bdp = greth->tx_bd_base + greth->tx_last;
0200 
0201                 dma_unmap_page(greth->dev,
0202                            greth_read_bd(&tx_bdp->addr),
0203                            skb_frag_size(frag),
0204                            DMA_TO_DEVICE);
0205 
0206                 greth->tx_last = NEXT_TX(greth->tx_last);
0207             }
0208             greth->tx_free += nr_frags+1;
0209             dev_kfree_skb(skb);
0210         }
0211 
0212 
0213     } else { /* 10/100 Mbps MAC */
0214 
0215         for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
0216             kfree(greth->rx_bufs[i]);
0217             dma_unmap_single(greth->dev,
0218                      greth_read_bd(&rx_bdp->addr),
0219                      MAX_FRAME_SIZE,
0220                      DMA_FROM_DEVICE);
0221         }
0222         for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) {
0223             kfree(greth->tx_bufs[i]);
0224             dma_unmap_single(greth->dev,
0225                      greth_read_bd(&tx_bdp->addr),
0226                      MAX_FRAME_SIZE,
0227                      DMA_TO_DEVICE);
0228         }
0229     }
0230 }
0231 
0232 static int greth_init_rings(struct greth_private *greth)
0233 {
0234     struct sk_buff *skb;
0235     struct greth_bd *rx_bd, *tx_bd;
0236     u32 dma_addr;
0237     int i;
0238 
0239     rx_bd = greth->rx_bd_base;
0240     tx_bd = greth->tx_bd_base;
0241 
0242     /* Initialize descriptor rings and buffers */
0243     if (greth->gbit_mac) {
0244 
0245         for (i = 0; i < GRETH_RXBD_NUM; i++) {
0246             skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
0247             if (skb == NULL) {
0248                 if (netif_msg_ifup(greth))
0249                     dev_err(greth->dev, "Error allocating DMA ring.\n");
0250                 goto cleanup;
0251             }
0252             skb_reserve(skb, NET_IP_ALIGN);
0253             dma_addr = dma_map_single(greth->dev,
0254                           skb->data,
0255                           MAX_FRAME_SIZE+NET_IP_ALIGN,
0256                           DMA_FROM_DEVICE);
0257 
0258             if (dma_mapping_error(greth->dev, dma_addr)) {
0259                 if (netif_msg_ifup(greth))
0260                     dev_err(greth->dev, "Could not create initial DMA mapping\n");
0261                 goto cleanup;
0262             }
0263             greth->rx_skbuff[i] = skb;
0264             greth_write_bd(&rx_bd[i].addr, dma_addr);
0265             greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
0266         }
0267 
0268     } else {
0269 
0270         /* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
0271         for (i = 0; i < GRETH_RXBD_NUM; i++) {
0272 
0273             greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
0274 
0275             if (greth->rx_bufs[i] == NULL) {
0276                 if (netif_msg_ifup(greth))
0277                     dev_err(greth->dev, "Error allocating DMA ring.\n");
0278                 goto cleanup;
0279             }
0280 
0281             dma_addr = dma_map_single(greth->dev,
0282                           greth->rx_bufs[i],
0283                           MAX_FRAME_SIZE,
0284                           DMA_FROM_DEVICE);
0285 
0286             if (dma_mapping_error(greth->dev, dma_addr)) {
0287                 if (netif_msg_ifup(greth))
0288                     dev_err(greth->dev, "Could not create initial DMA mapping\n");
0289                 goto cleanup;
0290             }
0291             greth_write_bd(&rx_bd[i].addr, dma_addr);
0292             greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
0293         }
0294         for (i = 0; i < GRETH_TXBD_NUM; i++) {
0295 
0296             greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
0297 
0298             if (greth->tx_bufs[i] == NULL) {
0299                 if (netif_msg_ifup(greth))
0300                     dev_err(greth->dev, "Error allocating DMA ring.\n");
0301                 goto cleanup;
0302             }
0303 
0304             dma_addr = dma_map_single(greth->dev,
0305                           greth->tx_bufs[i],
0306                           MAX_FRAME_SIZE,
0307                           DMA_TO_DEVICE);
0308 
0309             if (dma_mapping_error(greth->dev, dma_addr)) {
0310                 if (netif_msg_ifup(greth))
0311                     dev_err(greth->dev, "Could not create initial DMA mapping\n");
0312                 goto cleanup;
0313             }
0314             greth_write_bd(&tx_bd[i].addr, dma_addr);
0315             greth_write_bd(&tx_bd[i].stat, 0);
0316         }
0317     }
0318     greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
0319                greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);
0320 
0321     /* Initialize pointers. */
0322     greth->rx_cur = 0;
0323     greth->tx_next = 0;
0324     greth->tx_last = 0;
0325     greth->tx_free = GRETH_TXBD_NUM;
0326 
0327     /* Initialize descriptor base address */
0328     GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
0329     GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
0330 
0331     return 0;
0332 
0333 cleanup:
0334     greth_clean_rings(greth);
0335     return -ENOMEM;
0336 }
0337 
0338 static int greth_open(struct net_device *dev)
0339 {
0340     struct greth_private *greth = netdev_priv(dev);
0341     int err;
0342 
0343     err = greth_init_rings(greth);
0344     if (err) {
0345         if (netif_msg_ifup(greth))
0346             dev_err(&dev->dev, "Could not allocate memory for DMA rings\n");
0347         return err;
0348     }
0349 
0350     err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev);
0351     if (err) {
0352         if (netif_msg_ifup(greth))
0353             dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq);
0354         greth_clean_rings(greth);
0355         return err;
0356     }
0357 
0358     if (netif_msg_ifup(greth))
0359         dev_dbg(&dev->dev, " starting queue\n");
0360     netif_start_queue(dev);
0361 
0362     GRETH_REGSAVE(greth->regs->status, 0xFF);
0363 
0364     napi_enable(&greth->napi);
0365 
0366     greth_enable_irqs(greth);
0367     greth_enable_tx(greth);
0368     greth_enable_rx(greth);
0369     return 0;
0370 
0371 }
0372 
0373 static int greth_close(struct net_device *dev)
0374 {
0375     struct greth_private *greth = netdev_priv(dev);
0376 
0377     napi_disable(&greth->napi);
0378 
0379     greth_disable_irqs(greth);
0380     greth_disable_tx(greth);
0381     greth_disable_rx(greth);
0382 
0383     netif_stop_queue(dev);
0384 
0385     free_irq(greth->irq, (void *) dev);
0386 
0387     greth_clean_rings(greth);
0388 
0389     return 0;
0390 }
0391 
0392 static netdev_tx_t
0393 greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
0394 {
0395     struct greth_private *greth = netdev_priv(dev);
0396     struct greth_bd *bdp;
0397     int err = NETDEV_TX_OK;
0398     u32 status, dma_addr, ctrl;
0399     unsigned long flags;
0400 
0401     /* Clean TX Ring */
0402     greth_clean_tx(greth->netdev);
0403 
0404     if (unlikely(greth->tx_free <= 0)) {
0405         spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
0406         ctrl = GRETH_REGLOAD(greth->regs->control);
0407         /* Enable TX IRQ only if not already in poll() routine */
0408         if (ctrl & GRETH_RXI)
0409             GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
0410         netif_stop_queue(dev);
0411         spin_unlock_irqrestore(&greth->devlock, flags);
0412         return NETDEV_TX_BUSY;
0413     }
0414 
0415     if (netif_msg_pktdata(greth))
0416         greth_print_tx_packet(skb);
0417 
0418 
0419     if (unlikely(skb->len > MAX_FRAME_SIZE)) {
0420         dev->stats.tx_errors++;
0421         goto out;
0422     }
0423 
0424     bdp = greth->tx_bd_base + greth->tx_next;
0425     dma_addr = greth_read_bd(&bdp->addr);
0426 
0427     memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
0428 
0429     dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
0430 
0431     status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
0432     greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
0433 
0434     /* Wrap around descriptor ring */
0435     if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
0436         status |= GRETH_BD_WR;
0437     }
0438 
0439     greth->tx_next = NEXT_TX(greth->tx_next);
0440     greth->tx_free--;
0441 
0442     /* Write descriptor control word and enable transmission */
0443     greth_write_bd(&bdp->stat, status);
0444     spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
0445     greth_enable_tx(greth);
0446     spin_unlock_irqrestore(&greth->devlock, flags);
0447 
0448 out:
0449     dev_kfree_skb(skb);
0450     return err;
0451 }
0452 
0453 static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next)
0454 {
0455     if (tx_next < tx_last)
0456         return (tx_last - tx_next) - 1;
0457     else
0458         return GRETH_TXBD_NUM - (tx_next - tx_last) - 1;
0459 }
0460 
0461 static netdev_tx_t
0462 greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
0463 {
0464     struct greth_private *greth = netdev_priv(dev);
0465     struct greth_bd *bdp;
0466     u32 status, dma_addr;
0467     int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
0468     unsigned long flags;
0469     u16 tx_last;
0470 
0471     nr_frags = skb_shinfo(skb)->nr_frags;
0472     tx_last = greth->tx_last;
0473     rmb(); /* tx_last is updated by the poll task */
0474 
0475     if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) {
0476         netif_stop_queue(dev);
0477         err = NETDEV_TX_BUSY;
0478         goto out;
0479     }
0480 
0481     if (netif_msg_pktdata(greth))
0482         greth_print_tx_packet(skb);
0483 
0484     if (unlikely(skb->len > MAX_FRAME_SIZE)) {
0485         dev->stats.tx_errors++;
0486         goto out;
0487     }
0488 
0489     /* Save skb pointer. */
0490     greth->tx_skbuff[greth->tx_next] = skb;
0491 
0492     /* Linear buf */
0493     if (nr_frags != 0)
0494         status = GRETH_TXBD_MORE;
0495     else
0496         status = GRETH_BD_IE;
0497 
0498     if (skb->ip_summed == CHECKSUM_PARTIAL)
0499         status |= GRETH_TXBD_CSALL;
0500     status |= skb_headlen(skb) & GRETH_BD_LEN;
0501     if (greth->tx_next == GRETH_TXBD_NUM_MASK)
0502         status |= GRETH_BD_WR;
0503 
0504 
0505     bdp = greth->tx_bd_base + greth->tx_next;
0506     greth_write_bd(&bdp->stat, status);
0507     dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
0508 
0509     if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
0510         goto map_error;
0511 
0512     greth_write_bd(&bdp->addr, dma_addr);
0513 
0514     curr_tx = NEXT_TX(greth->tx_next);
0515 
0516     /* Frags */
0517     for (i = 0; i < nr_frags; i++) {
0518         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0519         greth->tx_skbuff[curr_tx] = NULL;
0520         bdp = greth->tx_bd_base + curr_tx;
0521 
0522         status = GRETH_BD_EN;
0523         if (skb->ip_summed == CHECKSUM_PARTIAL)
0524             status |= GRETH_TXBD_CSALL;
0525         status |= skb_frag_size(frag) & GRETH_BD_LEN;
0526 
0527         /* Wrap around descriptor ring */
0528         if (curr_tx == GRETH_TXBD_NUM_MASK)
0529             status |= GRETH_BD_WR;
0530 
0531         /* More fragments left */
0532         if (i < nr_frags - 1)
0533             status |= GRETH_TXBD_MORE;
0534         else
0535             status |= GRETH_BD_IE; /* enable IRQ on last fragment */
0536 
0537         greth_write_bd(&bdp->stat, status);
0538 
0539         dma_addr = skb_frag_dma_map(greth->dev, frag, 0, skb_frag_size(frag),
0540                         DMA_TO_DEVICE);
0541 
0542         if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
0543             goto frag_map_error;
0544 
0545         greth_write_bd(&bdp->addr, dma_addr);
0546 
0547         curr_tx = NEXT_TX(curr_tx);
0548     }
0549 
0550     wmb();
0551 
0552     /* Enable the descriptor chain by enabling the first descriptor */
0553     bdp = greth->tx_bd_base + greth->tx_next;
0554     greth_write_bd(&bdp->stat,
0555                greth_read_bd(&bdp->stat) | GRETH_BD_EN);
0556 
0557     spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
0558     greth->tx_next = curr_tx;
0559     greth_enable_tx_and_irq(greth);
0560     spin_unlock_irqrestore(&greth->devlock, flags);
0561 
0562     return NETDEV_TX_OK;
0563 
0564 frag_map_error:
0565     /* Unmap SKB mappings that succeeded and disable descriptor */
0566     for (i = 0; greth->tx_next + i != curr_tx; i++) {
0567         bdp = greth->tx_bd_base + greth->tx_next + i;
0568         dma_unmap_single(greth->dev,
0569                  greth_read_bd(&bdp->addr),
0570                  greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
0571                  DMA_TO_DEVICE);
0572         greth_write_bd(&bdp->stat, 0);
0573     }
0574 map_error:
0575     if (net_ratelimit())
0576         dev_warn(greth->dev, "Could not create TX DMA mapping\n");
0577     dev_kfree_skb(skb);
0578 out:
0579     return err;
0580 }
0581 
0582 static irqreturn_t greth_interrupt(int irq, void *dev_id)
0583 {
0584     struct net_device *dev = dev_id;
0585     struct greth_private *greth;
0586     u32 status, ctrl;
0587     irqreturn_t retval = IRQ_NONE;
0588 
0589     greth = netdev_priv(dev);
0590 
0591     spin_lock(&greth->devlock);
0592 
0593     /* Get the interrupt events that caused us to be here. */
0594     status = GRETH_REGLOAD(greth->regs->status);
0595 
0596     /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
0597      * set regardless of whether IRQ is enabled or not. Especially
0598      * important when shared IRQ.
0599      */
0600     ctrl = GRETH_REGLOAD(greth->regs->control);
0601 
0602     /* Handle rx and tx interrupts through poll */
0603     if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
0604         ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
0605         retval = IRQ_HANDLED;
0606 
0607         /* Disable interrupts and schedule poll() */
0608         greth_disable_irqs(greth);
0609         napi_schedule(&greth->napi);
0610     }
0611 
0612     spin_unlock(&greth->devlock);
0613 
0614     return retval;
0615 }
0616 
0617 static void greth_clean_tx(struct net_device *dev)
0618 {
0619     struct greth_private *greth;
0620     struct greth_bd *bdp;
0621     u32 stat;
0622 
0623     greth = netdev_priv(dev);
0624 
0625     while (1) {
0626         bdp = greth->tx_bd_base + greth->tx_last;
0627         GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
0628         mb();
0629         stat = greth_read_bd(&bdp->stat);
0630 
0631         if (unlikely(stat & GRETH_BD_EN))
0632             break;
0633 
0634         if (greth->tx_free == GRETH_TXBD_NUM)
0635             break;
0636 
0637         /* Check status for errors */
0638         if (unlikely(stat & GRETH_TXBD_STATUS)) {
0639             dev->stats.tx_errors++;
0640             if (stat & GRETH_TXBD_ERR_AL)
0641                 dev->stats.tx_aborted_errors++;
0642             if (stat & GRETH_TXBD_ERR_UE)
0643                 dev->stats.tx_fifo_errors++;
0644         }
0645         dev->stats.tx_packets++;
0646         dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];
0647         greth->tx_last = NEXT_TX(greth->tx_last);
0648         greth->tx_free++;
0649     }
0650 
0651     if (greth->tx_free > 0) {
0652         netif_wake_queue(dev);
0653     }
0654 }
0655 
0656 static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
0657 {
0658     /* Check status for errors */
0659     if (unlikely(stat & GRETH_TXBD_STATUS)) {
0660         dev->stats.tx_errors++;
0661         if (stat & GRETH_TXBD_ERR_AL)
0662             dev->stats.tx_aborted_errors++;
0663         if (stat & GRETH_TXBD_ERR_UE)
0664             dev->stats.tx_fifo_errors++;
0665         if (stat & GRETH_TXBD_ERR_LC)
0666             dev->stats.tx_aborted_errors++;
0667     }
0668     dev->stats.tx_packets++;
0669 }
0670 
0671 static void greth_clean_tx_gbit(struct net_device *dev)
0672 {
0673     struct greth_private *greth;
0674     struct greth_bd *bdp, *bdp_last_frag;
0675     struct sk_buff *skb = NULL;
0676     u32 stat;
0677     int nr_frags, i;
0678     u16 tx_last;
0679 
0680     greth = netdev_priv(dev);
0681     tx_last = greth->tx_last;
0682 
0683     while (tx_last != greth->tx_next) {
0684 
0685         skb = greth->tx_skbuff[tx_last];
0686 
0687         nr_frags = skb_shinfo(skb)->nr_frags;
0688 
0689         /* We only clean fully completed SKBs */
0690         bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags);
0691 
0692         GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
0693         mb();
0694         stat = greth_read_bd(&bdp_last_frag->stat);
0695 
0696         if (stat & GRETH_BD_EN)
0697             break;
0698 
0699         greth->tx_skbuff[tx_last] = NULL;
0700 
0701         greth_update_tx_stats(dev, stat);
0702         dev->stats.tx_bytes += skb->len;
0703 
0704         bdp = greth->tx_bd_base + tx_last;
0705 
0706         tx_last = NEXT_TX(tx_last);
0707 
0708         dma_unmap_single(greth->dev,
0709                  greth_read_bd(&bdp->addr),
0710                  skb_headlen(skb),
0711                  DMA_TO_DEVICE);
0712 
0713         for (i = 0; i < nr_frags; i++) {
0714             skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0715             bdp = greth->tx_bd_base + tx_last;
0716 
0717             dma_unmap_page(greth->dev,
0718                        greth_read_bd(&bdp->addr),
0719                        skb_frag_size(frag),
0720                        DMA_TO_DEVICE);
0721 
0722             tx_last = NEXT_TX(tx_last);
0723         }
0724         dev_kfree_skb(skb);
0725     }
0726     if (skb) { /* skb is set only if the above while loop was entered */
0727         wmb();
0728         greth->tx_last = tx_last;
0729 
0730         if (netif_queue_stopped(dev) &&
0731             (greth_num_free_bds(tx_last, greth->tx_next) >
0732             (MAX_SKB_FRAGS+1)))
0733             netif_wake_queue(dev);
0734     }
0735 }
0736 
0737 static int greth_rx(struct net_device *dev, int limit)
0738 {
0739     struct greth_private *greth;
0740     struct greth_bd *bdp;
0741     struct sk_buff *skb;
0742     int pkt_len;
0743     int bad, count;
0744     u32 status, dma_addr;
0745     unsigned long flags;
0746 
0747     greth = netdev_priv(dev);
0748 
0749     for (count = 0; count < limit; ++count) {
0750 
0751         bdp = greth->rx_bd_base + greth->rx_cur;
0752         GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
0753         mb();
0754         status = greth_read_bd(&bdp->stat);
0755 
0756         if (unlikely(status & GRETH_BD_EN)) {
0757             break;
0758         }
0759 
0760         dma_addr = greth_read_bd(&bdp->addr);
0761         bad = 0;
0762 
0763         /* Check status for errors. */
0764         if (unlikely(status & GRETH_RXBD_STATUS)) {
0765             if (status & GRETH_RXBD_ERR_FT) {
0766                 dev->stats.rx_length_errors++;
0767                 bad = 1;
0768             }
0769             if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
0770                 dev->stats.rx_frame_errors++;
0771                 bad = 1;
0772             }
0773             if (status & GRETH_RXBD_ERR_CRC) {
0774                 dev->stats.rx_crc_errors++;
0775                 bad = 1;
0776             }
0777         }
0778         if (unlikely(bad)) {
0779             dev->stats.rx_errors++;
0780 
0781         } else {
0782 
0783             pkt_len = status & GRETH_BD_LEN;
0784 
0785             skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
0786 
0787             if (unlikely(skb == NULL)) {
0788 
0789                 if (net_ratelimit())
0790                     dev_warn(&dev->dev, "low on memory - " "packet dropped\n");
0791 
0792                 dev->stats.rx_dropped++;
0793 
0794             } else {
0795                 skb_reserve(skb, NET_IP_ALIGN);
0796 
0797                 dma_sync_single_for_cpu(greth->dev,
0798                             dma_addr,
0799                             pkt_len,
0800                             DMA_FROM_DEVICE);
0801 
0802                 if (netif_msg_pktdata(greth))
0803                     greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
0804 
0805                 skb_put_data(skb, phys_to_virt(dma_addr),
0806                          pkt_len);
0807 
0808                 skb->protocol = eth_type_trans(skb, dev);
0809                 dev->stats.rx_bytes += pkt_len;
0810                 dev->stats.rx_packets++;
0811                 netif_receive_skb(skb);
0812             }
0813         }
0814 
0815         status = GRETH_BD_EN | GRETH_BD_IE;
0816         if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
0817             status |= GRETH_BD_WR;
0818         }
0819 
0820         wmb();
0821         greth_write_bd(&bdp->stat, status);
0822 
0823         dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
0824 
0825         spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
0826         greth_enable_rx(greth);
0827         spin_unlock_irqrestore(&greth->devlock, flags);
0828 
0829         greth->rx_cur = NEXT_RX(greth->rx_cur);
0830     }
0831 
0832     return count;
0833 }
0834 
0835 static inline int hw_checksummed(u32 status)
0836 {
0837 
0838     if (status & GRETH_RXBD_IP_FRAG)
0839         return 0;
0840 
0841     if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR)
0842         return 0;
0843 
0844     if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR)
0845         return 0;
0846 
0847     if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR)
0848         return 0;
0849 
0850     return 1;
0851 }
0852 
0853 static int greth_rx_gbit(struct net_device *dev, int limit)
0854 {
0855     struct greth_private *greth;
0856     struct greth_bd *bdp;
0857     struct sk_buff *skb, *newskb;
0858     int pkt_len;
0859     int bad, count = 0;
0860     u32 status, dma_addr;
0861     unsigned long flags;
0862 
0863     greth = netdev_priv(dev);
0864 
0865     for (count = 0; count < limit; ++count) {
0866 
0867         bdp = greth->rx_bd_base + greth->rx_cur;
0868         skb = greth->rx_skbuff[greth->rx_cur];
0869         GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
0870         mb();
0871         status = greth_read_bd(&bdp->stat);
0872         bad = 0;
0873 
0874         if (status & GRETH_BD_EN)
0875             break;
0876 
0877         /* Check status for errors. */
0878         if (unlikely(status & GRETH_RXBD_STATUS)) {
0879 
0880             if (status & GRETH_RXBD_ERR_FT) {
0881                 dev->stats.rx_length_errors++;
0882                 bad = 1;
0883             } else if (status &
0884                    (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
0885                 dev->stats.rx_frame_errors++;
0886                 bad = 1;
0887             } else if (status & GRETH_RXBD_ERR_CRC) {
0888                 dev->stats.rx_crc_errors++;
0889                 bad = 1;
0890             }
0891         }
0892 
0893         /* Allocate new skb to replace current, not needed if the
0894          * current skb can be reused */
0895         if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
0896             skb_reserve(newskb, NET_IP_ALIGN);
0897 
0898             dma_addr = dma_map_single(greth->dev,
0899                               newskb->data,
0900                               MAX_FRAME_SIZE + NET_IP_ALIGN,
0901                               DMA_FROM_DEVICE);
0902 
0903             if (!dma_mapping_error(greth->dev, dma_addr)) {
0904                 /* Process the incoming frame. */
0905                 pkt_len = status & GRETH_BD_LEN;
0906 
0907                 dma_unmap_single(greth->dev,
0908                          greth_read_bd(&bdp->addr),
0909                          MAX_FRAME_SIZE + NET_IP_ALIGN,
0910                          DMA_FROM_DEVICE);
0911 
0912                 if (netif_msg_pktdata(greth))
0913                     greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);
0914 
0915                 skb_put(skb, pkt_len);
0916 
0917                 if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status))
0918                     skb->ip_summed = CHECKSUM_UNNECESSARY;
0919                 else
0920                     skb_checksum_none_assert(skb);
0921 
0922                 skb->protocol = eth_type_trans(skb, dev);
0923                 dev->stats.rx_packets++;
0924                 dev->stats.rx_bytes += pkt_len;
0925                 netif_receive_skb(skb);
0926 
0927                 greth->rx_skbuff[greth->rx_cur] = newskb;
0928                 greth_write_bd(&bdp->addr, dma_addr);
0929             } else {
0930                 if (net_ratelimit())
0931                     dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
0932                 dev_kfree_skb(newskb);
0933                 /* reusing current skb, so it is a drop */
0934                 dev->stats.rx_dropped++;
0935             }
0936         } else if (bad) {
0937             /* Bad Frame transfer, the skb is reused */
0938             dev->stats.rx_dropped++;
0939         } else {
0940             /* Failed Allocating a new skb. This is rather stupid
0941              * but the current "filled" skb is reused, as if
0942              * transfer failure. One could argue that RX descriptor
0943              * table handling should be divided into cleaning and
0944              * filling as the TX part of the driver
0945              */
0946             if (net_ratelimit())
0947                 dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
0948             /* reusing current skb, so it is a drop */
0949             dev->stats.rx_dropped++;
0950         }
0951 
0952         status = GRETH_BD_EN | GRETH_BD_IE;
0953         if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
0954             status |= GRETH_BD_WR;
0955         }
0956 
0957         wmb();
0958         greth_write_bd(&bdp->stat, status);
0959         spin_lock_irqsave(&greth->devlock, flags);
0960         greth_enable_rx(greth);
0961         spin_unlock_irqrestore(&greth->devlock, flags);
0962         greth->rx_cur = NEXT_RX(greth->rx_cur);
0963     }
0964 
0965     return count;
0966 
0967 }
0968 
0969 static int greth_poll(struct napi_struct *napi, int budget)
0970 {
0971     struct greth_private *greth;
0972     int work_done = 0;
0973     unsigned long flags;
0974     u32 mask, ctrl;
0975     greth = container_of(napi, struct greth_private, napi);
0976 
0977 restart_txrx_poll:
0978     if (greth->gbit_mac) {
0979         greth_clean_tx_gbit(greth->netdev);
0980         work_done += greth_rx_gbit(greth->netdev, budget - work_done);
0981     } else {
0982         if (netif_queue_stopped(greth->netdev))
0983             greth_clean_tx(greth->netdev);
0984         work_done += greth_rx(greth->netdev, budget - work_done);
0985     }
0986 
0987     if (work_done < budget) {
0988 
0989         spin_lock_irqsave(&greth->devlock, flags);
0990 
0991         ctrl = GRETH_REGLOAD(greth->regs->control);
0992         if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) ||
0993             (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) {
0994             GRETH_REGSAVE(greth->regs->control,
0995                     ctrl | GRETH_TXI | GRETH_RXI);
0996             mask = GRETH_INT_RX | GRETH_INT_RE |
0997                    GRETH_INT_TX | GRETH_INT_TE;
0998         } else {
0999             GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
1000             mask = GRETH_INT_RX | GRETH_INT_RE;
1001         }
1002 
1003         if (GRETH_REGLOAD(greth->regs->status) & mask) {
1004             GRETH_REGSAVE(greth->regs->control, ctrl);
1005             spin_unlock_irqrestore(&greth->devlock, flags);
1006             goto restart_txrx_poll;
1007         } else {
1008             napi_complete_done(napi, work_done);
1009             spin_unlock_irqrestore(&greth->devlock, flags);
1010         }
1011     }
1012 
1013     return work_done;
1014 }
1015 
1016 static int greth_set_mac_add(struct net_device *dev, void *p)
1017 {
1018     struct sockaddr *addr = p;
1019     struct greth_private *greth;
1020     struct greth_regs *regs;
1021 
1022     greth = netdev_priv(dev);
1023     regs = greth->regs;
1024 
1025     if (!is_valid_ether_addr(addr->sa_data))
1026         return -EADDRNOTAVAIL;
1027 
1028     eth_hw_addr_set(dev, addr->sa_data);
1029     GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
1030     GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
1031               dev->dev_addr[4] << 8 | dev->dev_addr[5]);
1032 
1033     return 0;
1034 }
1035 
1036 static u32 greth_hash_get_index(__u8 *addr)
1037 {
1038     return (ether_crc(6, addr)) & 0x3F;
1039 }
1040 
1041 static void greth_set_hash_filter(struct net_device *dev)
1042 {
1043     struct netdev_hw_addr *ha;
1044     struct greth_private *greth = netdev_priv(dev);
1045     struct greth_regs *regs = greth->regs;
1046     u32 mc_filter[2];
1047     unsigned int bitnr;
1048 
1049     mc_filter[0] = mc_filter[1] = 0;
1050 
1051     netdev_for_each_mc_addr(ha, dev) {
1052         bitnr = greth_hash_get_index(ha->addr);
1053         mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1054     }
1055 
1056     GRETH_REGSAVE(regs->hash_msb, mc_filter[1]);
1057     GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]);
1058 }
1059 
1060 static void greth_set_multicast_list(struct net_device *dev)
1061 {
1062     int cfg;
1063     struct greth_private *greth = netdev_priv(dev);
1064     struct greth_regs *regs = greth->regs;
1065 
1066     cfg = GRETH_REGLOAD(regs->control);
1067     if (dev->flags & IFF_PROMISC)
1068         cfg |= GRETH_CTRL_PR;
1069     else
1070         cfg &= ~GRETH_CTRL_PR;
1071 
1072     if (greth->multicast) {
1073         if (dev->flags & IFF_ALLMULTI) {
1074             GRETH_REGSAVE(regs->hash_msb, -1);
1075             GRETH_REGSAVE(regs->hash_lsb, -1);
1076             cfg |= GRETH_CTRL_MCEN;
1077             GRETH_REGSAVE(regs->control, cfg);
1078             return;
1079         }
1080 
1081         if (netdev_mc_empty(dev)) {
1082             cfg &= ~GRETH_CTRL_MCEN;
1083             GRETH_REGSAVE(regs->control, cfg);
1084             return;
1085         }
1086 
1087         /* Setup multicast filter */
1088         greth_set_hash_filter(dev);
1089         cfg |= GRETH_CTRL_MCEN;
1090     }
1091     GRETH_REGSAVE(regs->control, cfg);
1092 }
1093 
1094 static u32 greth_get_msglevel(struct net_device *dev)
1095 {
1096     struct greth_private *greth = netdev_priv(dev);
1097     return greth->msg_enable;
1098 }
1099 
1100 static void greth_set_msglevel(struct net_device *dev, u32 value)
1101 {
1102     struct greth_private *greth = netdev_priv(dev);
1103     greth->msg_enable = value;
1104 }
1105 
1106 static int greth_get_regs_len(struct net_device *dev)
1107 {
1108     return sizeof(struct greth_regs);
1109 }
1110 
1111 static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1112 {
1113     struct greth_private *greth = netdev_priv(dev);
1114 
1115     strlcpy(info->driver, dev_driver_string(greth->dev),
1116         sizeof(info->driver));
1117     strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
1118 }
1119 
1120 static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
1121 {
1122     int i;
1123     struct greth_private *greth = netdev_priv(dev);
1124     u32 __iomem *greth_regs = (u32 __iomem *) greth->regs;
1125     u32 *buff = p;
1126 
1127     for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++)
1128         buff[i] = greth_read_bd(&greth_regs[i]);
1129 }
1130 
1131 static const struct ethtool_ops greth_ethtool_ops = {
1132     .get_msglevel       = greth_get_msglevel,
1133     .set_msglevel       = greth_set_msglevel,
1134     .get_drvinfo        = greth_get_drvinfo,
1135     .get_regs_len           = greth_get_regs_len,
1136     .get_regs               = greth_get_regs,
1137     .get_link       = ethtool_op_get_link,
1138     .get_link_ksettings = phy_ethtool_get_link_ksettings,
1139     .set_link_ksettings = phy_ethtool_set_link_ksettings,
1140 };
1141 
1142 static struct net_device_ops greth_netdev_ops = {
1143     .ndo_open       = greth_open,
1144     .ndo_stop       = greth_close,
1145     .ndo_start_xmit     = greth_start_xmit,
1146     .ndo_set_mac_address    = greth_set_mac_add,
1147     .ndo_validate_addr  = eth_validate_addr,
1148 };
1149 
1150 static inline int wait_for_mdio(struct greth_private *greth)
1151 {
1152     unsigned long timeout = jiffies + 4*HZ/100;
1153     while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) {
1154         if (time_after(jiffies, timeout))
1155             return 0;
1156     }
1157     return 1;
1158 }
1159 
1160 static int greth_mdio_read(struct mii_bus *bus, int phy, int reg)
1161 {
1162     struct greth_private *greth = bus->priv;
1163     int data;
1164 
1165     if (!wait_for_mdio(greth))
1166         return -EBUSY;
1167 
1168     GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2);
1169 
1170     if (!wait_for_mdio(greth))
1171         return -EBUSY;
1172 
1173     if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) {
1174         data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF;
1175         return data;
1176 
1177     } else {
1178         return -1;
1179     }
1180 }
1181 
1182 static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
1183 {
1184     struct greth_private *greth = bus->priv;
1185 
1186     if (!wait_for_mdio(greth))
1187         return -EBUSY;
1188 
1189     GRETH_REGSAVE(greth->regs->mdio,
1190               ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1);
1191 
1192     if (!wait_for_mdio(greth))
1193         return -EBUSY;
1194 
1195     return 0;
1196 }
1197 
1198 static void greth_link_change(struct net_device *dev)
1199 {
1200     struct greth_private *greth = netdev_priv(dev);
1201     struct phy_device *phydev = dev->phydev;
1202     unsigned long flags;
1203     int status_change = 0;
1204     u32 ctrl;
1205 
1206     spin_lock_irqsave(&greth->devlock, flags);
1207 
1208     if (phydev->link) {
1209 
1210         if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
1211             ctrl = GRETH_REGLOAD(greth->regs->control) &
1212                    ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
1213 
1214             if (phydev->duplex)
1215                 ctrl |= GRETH_CTRL_FD;
1216 
1217             if (phydev->speed == SPEED_100)
1218                 ctrl |= GRETH_CTRL_SP;
1219             else if (phydev->speed == SPEED_1000)
1220                 ctrl |= GRETH_CTRL_GB;
1221 
1222             GRETH_REGSAVE(greth->regs->control, ctrl);
1223             greth->speed = phydev->speed;
1224             greth->duplex = phydev->duplex;
1225             status_change = 1;
1226         }
1227     }
1228 
1229     if (phydev->link != greth->link) {
1230         if (!phydev->link) {
1231             greth->speed = 0;
1232             greth->duplex = -1;
1233         }
1234         greth->link = phydev->link;
1235 
1236         status_change = 1;
1237     }
1238 
1239     spin_unlock_irqrestore(&greth->devlock, flags);
1240 
1241     if (status_change) {
1242         if (phydev->link)
1243             pr_debug("%s: link up (%d/%s)\n",
1244                 dev->name, phydev->speed,
1245                 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
1246         else
1247             pr_debug("%s: link down\n", dev->name);
1248     }
1249 }
1250 
1251 static int greth_mdio_probe(struct net_device *dev)
1252 {
1253     struct greth_private *greth = netdev_priv(dev);
1254     struct phy_device *phy = NULL;
1255     int ret;
1256 
1257     /* Find the first PHY */
1258     phy = phy_find_first(greth->mdio);
1259 
1260     if (!phy) {
1261         if (netif_msg_probe(greth))
1262             dev_err(&dev->dev, "no PHY found\n");
1263         return -ENXIO;
1264     }
1265 
1266     ret = phy_connect_direct(dev, phy, &greth_link_change,
1267                  greth->gbit_mac ? PHY_INTERFACE_MODE_GMII : PHY_INTERFACE_MODE_MII);
1268     if (ret) {
1269         if (netif_msg_ifup(greth))
1270             dev_err(&dev->dev, "could not attach to PHY\n");
1271         return ret;
1272     }
1273 
1274     if (greth->gbit_mac)
1275         phy_set_max_speed(phy, SPEED_1000);
1276     else
1277         phy_set_max_speed(phy, SPEED_100);
1278 
1279     linkmode_copy(phy->advertising, phy->supported);
1280 
1281     greth->link = 0;
1282     greth->speed = 0;
1283     greth->duplex = -1;
1284 
1285     return 0;
1286 }
1287 
1288 static int greth_mdio_init(struct greth_private *greth)
1289 {
1290     int ret;
1291     unsigned long timeout;
1292     struct net_device *ndev = greth->netdev;
1293 
1294     greth->mdio = mdiobus_alloc();
1295     if (!greth->mdio) {
1296         return -ENOMEM;
1297     }
1298 
1299     greth->mdio->name = "greth-mdio";
1300     snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
1301     greth->mdio->read = greth_mdio_read;
1302     greth->mdio->write = greth_mdio_write;
1303     greth->mdio->priv = greth;
1304 
1305     ret = mdiobus_register(greth->mdio);
1306     if (ret) {
1307         goto error;
1308     }
1309 
1310     ret = greth_mdio_probe(greth->netdev);
1311     if (ret) {
1312         if (netif_msg_probe(greth))
1313             dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n");
1314         goto unreg_mdio;
1315     }
1316 
1317     phy_start(ndev->phydev);
1318 
1319     /* If Ethernet debug link is used make autoneg happen right away */
1320     if (greth->edcl && greth_edcl == 1) {
1321         phy_start_aneg(ndev->phydev);
1322         timeout = jiffies + 6*HZ;
1323         while (!phy_aneg_done(ndev->phydev) &&
1324                time_before(jiffies, timeout)) {
1325         }
1326         phy_read_status(ndev->phydev);
1327         greth_link_change(greth->netdev);
1328     }
1329 
1330     return 0;
1331 
1332 unreg_mdio:
1333     mdiobus_unregister(greth->mdio);
1334 error:
1335     mdiobus_free(greth->mdio);
1336     return ret;
1337 }
1338 
1339 /* Initialize the GRETH MAC */
1340 static int greth_of_probe(struct platform_device *ofdev)
1341 {
1342     struct net_device *dev;
1343     struct greth_private *greth;
1344     struct greth_regs *regs;
1345 
1346     int i;
1347     int err;
1348     int tmp;
1349     u8 addr[ETH_ALEN];
1350     unsigned long timeout;
1351 
1352     dev = alloc_etherdev(sizeof(struct greth_private));
1353 
1354     if (dev == NULL)
1355         return -ENOMEM;
1356 
1357     greth = netdev_priv(dev);
1358     greth->netdev = dev;
1359     greth->dev = &ofdev->dev;
1360 
1361     if (greth_debug > 0)
1362         greth->msg_enable = greth_debug;
1363     else
1364         greth->msg_enable = GRETH_DEF_MSG_ENABLE;
1365 
1366     spin_lock_init(&greth->devlock);
1367 
1368     greth->regs = of_ioremap(&ofdev->resource[0], 0,
1369                  resource_size(&ofdev->resource[0]),
1370                  "grlib-greth regs");
1371 
1372     if (greth->regs == NULL) {
1373         if (netif_msg_probe(greth))
1374             dev_err(greth->dev, "ioremap failure.\n");
1375         err = -EIO;
1376         goto error1;
1377     }
1378 
1379     regs = greth->regs;
1380     greth->irq = ofdev->archdata.irqs[0];
1381 
1382     dev_set_drvdata(greth->dev, dev);
1383     SET_NETDEV_DEV(dev, greth->dev);
1384 
1385     if (netif_msg_probe(greth))
1386         dev_dbg(greth->dev, "resetting controller.\n");
1387 
1388     /* Reset the controller. */
1389     GRETH_REGSAVE(regs->control, GRETH_RESET);
1390 
1391     /* Wait for MAC to reset itself */
1392     timeout = jiffies + HZ/100;
1393     while (GRETH_REGLOAD(regs->control) & GRETH_RESET) {
1394         if (time_after(jiffies, timeout)) {
1395             err = -EIO;
1396             if (netif_msg_probe(greth))
1397                 dev_err(greth->dev, "timeout when waiting for reset.\n");
1398             goto error2;
1399         }
1400     }
1401 
1402     /* Get default PHY address  */
1403     greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F;
1404 
1405     /* Check if we have GBIT capable MAC */
1406     tmp = GRETH_REGLOAD(regs->control);
1407     greth->gbit_mac = (tmp >> 27) & 1;
1408 
1409     /* Check for multicast capability */
1410     greth->multicast = (tmp >> 25) & 1;
1411 
1412     greth->edcl = (tmp >> 31) & 1;
1413 
1414     /* If we have EDCL we disable the EDCL speed-duplex FSM so
1415      * it doesn't interfere with the software */
1416     if (greth->edcl != 0)
1417         GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX);
1418 
1419     /* Check if MAC can handle MDIO interrupts */
1420     greth->mdio_int_en = (tmp >> 26) & 1;
1421 
1422     err = greth_mdio_init(greth);
1423     if (err) {
1424         if (netif_msg_probe(greth))
1425             dev_err(greth->dev, "failed to register MDIO bus\n");
1426         goto error2;
1427     }
1428 
1429     /* Allocate TX descriptor ring in coherent memory */
1430     greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1431                            &greth->tx_bd_base_phys,
1432                            GFP_KERNEL);
1433     if (!greth->tx_bd_base) {
1434         err = -ENOMEM;
1435         goto error3;
1436     }
1437 
1438     /* Allocate RX descriptor ring in coherent memory */
1439     greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1440                            &greth->rx_bd_base_phys,
1441                            GFP_KERNEL);
1442     if (!greth->rx_bd_base) {
1443         err = -ENOMEM;
1444         goto error4;
1445     }
1446 
1447     /* Get MAC address from: module param, OF property or ID prom */
1448     for (i = 0; i < 6; i++) {
1449         if (macaddr[i] != 0)
1450             break;
1451     }
1452     if (i == 6) {
1453         err = of_get_mac_address(ofdev->dev.of_node, addr);
1454         if (!err) {
1455             for (i = 0; i < 6; i++)
1456                 macaddr[i] = (unsigned int) addr[i];
1457         } else {
1458 #ifdef CONFIG_SPARC
1459             for (i = 0; i < 6; i++)
1460                 macaddr[i] = (unsigned int) idprom->id_ethaddr[i];
1461 #endif
1462         }
1463     }
1464 
1465     for (i = 0; i < 6; i++)
1466         addr[i] = macaddr[i];
1467     eth_hw_addr_set(dev, addr);
1468 
1469     macaddr[5]++;
1470 
1471     if (!is_valid_ether_addr(&dev->dev_addr[0])) {
1472         if (netif_msg_probe(greth))
1473             dev_err(greth->dev, "no valid ethernet address, aborting.\n");
1474         err = -EINVAL;
1475         goto error5;
1476     }
1477 
1478     GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
1479     GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
1480               dev->dev_addr[4] << 8 | dev->dev_addr[5]);
1481 
1482     /* Clear all pending interrupts except PHY irq */
1483     GRETH_REGSAVE(regs->status, 0xFF);
1484 
1485     if (greth->gbit_mac) {
1486         dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1487             NETIF_F_RXCSUM;
1488         dev->features = dev->hw_features | NETIF_F_HIGHDMA;
1489         greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
1490     }
1491 
1492     if (greth->multicast) {
1493         greth_netdev_ops.ndo_set_rx_mode = greth_set_multicast_list;
1494         dev->flags |= IFF_MULTICAST;
1495     } else {
1496         dev->flags &= ~IFF_MULTICAST;
1497     }
1498 
1499     dev->netdev_ops = &greth_netdev_ops;
1500     dev->ethtool_ops = &greth_ethtool_ops;
1501 
1502     err = register_netdev(dev);
1503     if (err) {
1504         if (netif_msg_probe(greth))
1505             dev_err(greth->dev, "netdevice registration failed.\n");
1506         goto error5;
1507     }
1508 
1509     /* setup NAPI */
1510     netif_napi_add(dev, &greth->napi, greth_poll, 64);
1511 
1512     return 0;
1513 
1514 error5:
1515     dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1516 error4:
1517     dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1518 error3:
1519     mdiobus_unregister(greth->mdio);
1520 error2:
1521     of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0]));
1522 error1:
1523     free_netdev(dev);
1524     return err;
1525 }
1526 
1527 static int greth_of_remove(struct platform_device *of_dev)
1528 {
1529     struct net_device *ndev = platform_get_drvdata(of_dev);
1530     struct greth_private *greth = netdev_priv(ndev);
1531 
1532     /* Free descriptor areas */
1533     dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1534 
1535     dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1536 
1537     if (ndev->phydev)
1538         phy_stop(ndev->phydev);
1539     mdiobus_unregister(greth->mdio);
1540 
1541     unregister_netdev(ndev);
1542 
1543     of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
1544 
1545     free_netdev(ndev);
1546 
1547     return 0;
1548 }
1549 
1550 static const struct of_device_id greth_of_match[] = {
1551     {
1552      .name = "GAISLER_ETHMAC",
1553      },
1554     {
1555      .name = "01_01d",
1556      },
1557     {},
1558 };
1559 
1560 MODULE_DEVICE_TABLE(of, greth_of_match);
1561 
1562 static struct platform_driver greth_of_driver = {
1563     .driver = {
1564         .name = "grlib-greth",
1565         .of_match_table = greth_of_match,
1566     },
1567     .probe = greth_of_probe,
1568     .remove = greth_of_remove,
1569 };
1570 
1571 module_platform_driver(greth_of_driver);
1572 
1573 MODULE_AUTHOR("Aeroflex Gaisler AB.");
1574 MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
1575 MODULE_LICENSE("GPL");