Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
0003  *
0004  * Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle
0005  * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc.
0006  *
0007  * References:
0008  *  o IOC3 ASIC specification 4.51, 1996-04-18
0009  *  o IEEE 802.3 specification, 2000 edition
0010  *  o DP38840A Specification, National Semiconductor, March 1997
0011  *
0012  * To do:
0013  *
0014  *  o Use prefetching for large packets.  What is a good lower limit for
0015  *    prefetching?
0016  *  o Use hardware checksums.
0017  *  o Which PHYs might possibly be attached to the IOC3 in real live,
0018  *    which workarounds are required for them?  Do we ever have Lucent's?
0019  *  o For the 2.5 branch kill the mii-tool ioctls.
0020  */
0021 
0022 #define IOC3_NAME   "ioc3-eth"
0023 #define IOC3_VERSION    "2.6.3-4"
0024 
0025 #include <linux/delay.h>
0026 #include <linux/kernel.h>
0027 #include <linux/mm.h>
0028 #include <linux/errno.h>
0029 #include <linux/module.h>
0030 #include <linux/init.h>
0031 #include <linux/crc16.h>
0032 #include <linux/crc32.h>
0033 #include <linux/mii.h>
0034 #include <linux/in.h>
0035 #include <linux/io.h>
0036 #include <linux/ip.h>
0037 #include <linux/tcp.h>
0038 #include <linux/udp.h>
0039 #include <linux/gfp.h>
0040 #include <linux/netdevice.h>
0041 #include <linux/etherdevice.h>
0042 #include <linux/ethtool.h>
0043 #include <linux/skbuff.h>
0044 #include <linux/dma-mapping.h>
0045 #include <linux/platform_device.h>
0046 #include <linux/nvmem-consumer.h>
0047 
0048 #include <net/ip.h>
0049 
0050 #include <asm/sn/ioc3.h>
0051 #include <asm/pci/bridge.h>
0052 
0053 #define CRC16_INIT  0
0054 #define CRC16_VALID 0xb001
0055 
0056 /* Number of RX buffers.  This is tunable in the range of 16 <= x < 512.
0057  * The value must be a power of two.
0058  */
0059 #define RX_BUFFS        64
0060 #define RX_RING_ENTRIES     512     /* fixed in hardware */
0061 #define RX_RING_MASK        (RX_RING_ENTRIES - 1)
0062 #define RX_RING_SIZE        (RX_RING_ENTRIES * sizeof(u64))
0063 
0064 /* 128 TX buffers (not tunable) */
0065 #define TX_RING_ENTRIES     128
0066 #define TX_RING_MASK        (TX_RING_ENTRIES - 1)
0067 #define TX_RING_SIZE        (TX_RING_ENTRIES * sizeof(struct ioc3_etxd))
0068 
0069 /* IOC3 does dma transfers in 128 byte blocks */
0070 #define IOC3_DMA_XFER_LEN   128UL
0071 
0072 /* Every RX buffer starts with 8 byte descriptor data */
0073 #define RX_OFFSET       (sizeof(struct ioc3_erxbuf) + NET_IP_ALIGN)
0074 #define RX_BUF_SIZE     (13 * IOC3_DMA_XFER_LEN)
0075 
0076 #define ETCSR_FD   ((21 << ETCSR_IPGR2_SHIFT) | (21 << ETCSR_IPGR1_SHIFT) | 21)
0077 #define ETCSR_HD   ((17 << ETCSR_IPGR2_SHIFT) | (11 << ETCSR_IPGR1_SHIFT) | 21)
0078 
0079 /* Private per NIC data of the driver.  */
0080 struct ioc3_private {
0081     struct ioc3_ethregs *regs;
0082     struct device *dma_dev;
0083     u32 *ssram;
0084     unsigned long *rxr;     /* pointer to receiver ring */
0085     void *tx_ring;
0086     struct ioc3_etxd *txr;
0087     dma_addr_t rxr_dma;
0088     dma_addr_t txr_dma;
0089     struct sk_buff *rx_skbs[RX_RING_ENTRIES];
0090     struct sk_buff *tx_skbs[TX_RING_ENTRIES];
0091     int rx_ci;          /* RX consumer index */
0092     int rx_pi;          /* RX producer index */
0093     int tx_ci;          /* TX consumer index */
0094     int tx_pi;          /* TX producer index */
0095     int txqlen;
0096     u32 emcr, ehar_h, ehar_l;
0097     spinlock_t ioc3_lock;
0098     struct mii_if_info mii;
0099 
0100     /* Members used by autonegotiation  */
0101     struct timer_list ioc3_timer;
0102 };
0103 
0104 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
0105 static void ioc3_set_multicast_list(struct net_device *dev);
0106 static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
0107 static void ioc3_timeout(struct net_device *dev, unsigned int txqueue);
0108 static inline unsigned int ioc3_hash(const unsigned char *addr);
0109 static void ioc3_start(struct ioc3_private *ip);
0110 static inline void ioc3_stop(struct ioc3_private *ip);
0111 static void ioc3_init(struct net_device *dev);
0112 static int ioc3_alloc_rx_bufs(struct net_device *dev);
0113 static void ioc3_free_rx_bufs(struct ioc3_private *ip);
0114 static inline void ioc3_clean_tx_ring(struct ioc3_private *ip);
0115 
0116 static const struct ethtool_ops ioc3_ethtool_ops;
0117 
0118 static inline unsigned long aligned_rx_skb_addr(unsigned long addr)
0119 {
0120     return (~addr + 1) & (IOC3_DMA_XFER_LEN - 1UL);
0121 }
0122 
0123 static inline int ioc3_alloc_skb(struct ioc3_private *ip, struct sk_buff **skb,
0124                  struct ioc3_erxbuf **rxb, dma_addr_t *rxb_dma)
0125 {
0126     struct sk_buff *new_skb;
0127     dma_addr_t d;
0128     int offset;
0129 
0130     new_skb = alloc_skb(RX_BUF_SIZE + IOC3_DMA_XFER_LEN - 1, GFP_ATOMIC);
0131     if (!new_skb)
0132         return -ENOMEM;
0133 
0134     /* ensure buffer is aligned to IOC3_DMA_XFER_LEN */
0135     offset = aligned_rx_skb_addr((unsigned long)new_skb->data);
0136     if (offset)
0137         skb_reserve(new_skb, offset);
0138 
0139     d = dma_map_single(ip->dma_dev, new_skb->data,
0140                RX_BUF_SIZE, DMA_FROM_DEVICE);
0141 
0142     if (dma_mapping_error(ip->dma_dev, d)) {
0143         dev_kfree_skb_any(new_skb);
0144         return -ENOMEM;
0145     }
0146     *rxb_dma = d;
0147     *rxb = (struct ioc3_erxbuf *)new_skb->data;
0148     skb_reserve(new_skb, RX_OFFSET);
0149     *skb = new_skb;
0150 
0151     return 0;
0152 }
0153 
0154 #ifdef CONFIG_PCI_XTALK_BRIDGE
0155 static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr)
0156 {
0157     return (addr & ~PCI64_ATTR_BAR) | attr;
0158 }
0159 
0160 #define ERBAR_VAL   (ERBAR_BARRIER_BIT << ERBAR_RXBARR_SHIFT)
0161 #else
0162 static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr)
0163 {
0164     return addr;
0165 }
0166 
0167 #define ERBAR_VAL   0
0168 #endif
0169 
0170 static int ioc3eth_nvmem_match(struct device *dev, const void *data)
0171 {
0172     const char *name = dev_name(dev);
0173     const char *prefix = data;
0174     int prefix_len;
0175 
0176     prefix_len = strlen(prefix);
0177     if (strlen(name) < (prefix_len + 3))
0178         return 0;
0179 
0180     if (memcmp(prefix, name, prefix_len) != 0)
0181         return 0;
0182 
0183     /* found nvmem device which is attached to our ioc3
0184      * now check for one wire family code 09, 89 and 91
0185      */
0186     if (memcmp(name + prefix_len, "09-", 3) == 0)
0187         return 1;
0188     if (memcmp(name + prefix_len, "89-", 3) == 0)
0189         return 1;
0190     if (memcmp(name + prefix_len, "91-", 3) == 0)
0191         return 1;
0192 
0193     return 0;
0194 }
0195 
0196 static int ioc3eth_get_mac_addr(struct resource *res, u8 mac_addr[6])
0197 {
0198     struct nvmem_device *nvmem;
0199     char prefix[24];
0200     u8 prom[16];
0201     int ret;
0202     int i;
0203 
0204     snprintf(prefix, sizeof(prefix), "ioc3-%012llx-",
0205          res->start & ~0xffff);
0206 
0207     nvmem = nvmem_device_find(prefix, ioc3eth_nvmem_match);
0208     if (IS_ERR(nvmem))
0209         return PTR_ERR(nvmem);
0210 
0211     ret = nvmem_device_read(nvmem, 0, 16, prom);
0212     nvmem_device_put(nvmem);
0213     if (ret < 0)
0214         return ret;
0215 
0216     /* check, if content is valid */
0217     if (prom[0] != 0x0a ||
0218         crc16(CRC16_INIT, prom, 13) != CRC16_VALID)
0219         return -EINVAL;
0220 
0221     for (i = 0; i < 6; i++)
0222         mac_addr[i] = prom[10 - i];
0223 
0224     return 0;
0225 }
0226 
0227 static void __ioc3_set_mac_address(struct net_device *dev)
0228 {
0229     struct ioc3_private *ip = netdev_priv(dev);
0230 
0231     writel((dev->dev_addr[5] <<  8) |
0232            dev->dev_addr[4],
0233            &ip->regs->emar_h);
0234     writel((dev->dev_addr[3] << 24) |
0235            (dev->dev_addr[2] << 16) |
0236            (dev->dev_addr[1] <<  8) |
0237            dev->dev_addr[0],
0238            &ip->regs->emar_l);
0239 }
0240 
0241 static int ioc3_set_mac_address(struct net_device *dev, void *addr)
0242 {
0243     struct ioc3_private *ip = netdev_priv(dev);
0244     struct sockaddr *sa = addr;
0245 
0246     eth_hw_addr_set(dev, sa->sa_data);
0247 
0248     spin_lock_irq(&ip->ioc3_lock);
0249     __ioc3_set_mac_address(dev);
0250     spin_unlock_irq(&ip->ioc3_lock);
0251 
0252     return 0;
0253 }
0254 
0255 /* Caller must hold the ioc3_lock ever for MII readers.  This is also
0256  * used to protect the transmitter side but it's low contention.
0257  */
0258 static int ioc3_mdio_read(struct net_device *dev, int phy, int reg)
0259 {
0260     struct ioc3_private *ip = netdev_priv(dev);
0261     struct ioc3_ethregs *regs = ip->regs;
0262 
0263     while (readl(&regs->micr) & MICR_BUSY)
0264         ;
0265     writel((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG,
0266            &regs->micr);
0267     while (readl(&regs->micr) & MICR_BUSY)
0268         ;
0269 
0270     return readl(&regs->midr_r) & MIDR_DATA_MASK;
0271 }
0272 
0273 static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data)
0274 {
0275     struct ioc3_private *ip = netdev_priv(dev);
0276     struct ioc3_ethregs *regs = ip->regs;
0277 
0278     while (readl(&regs->micr) & MICR_BUSY)
0279         ;
0280     writel(data, &regs->midr_w);
0281     writel((phy << MICR_PHYADDR_SHIFT) | reg, &regs->micr);
0282     while (readl(&regs->micr) & MICR_BUSY)
0283         ;
0284 }
0285 
0286 static int ioc3_mii_init(struct ioc3_private *ip);
0287 
0288 static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
0289 {
0290     struct ioc3_private *ip = netdev_priv(dev);
0291     struct ioc3_ethregs *regs = ip->regs;
0292 
0293     dev->stats.collisions += readl(&regs->etcdc) & ETCDC_COLLCNT_MASK;
0294     return &dev->stats;
0295 }
0296 
0297 static void ioc3_tcpudp_checksum(struct sk_buff *skb, u32 hwsum, int len)
0298 {
0299     struct ethhdr *eh = eth_hdr(skb);
0300     unsigned int proto;
0301     unsigned char *cp;
0302     struct iphdr *ih;
0303     u32 csum, ehsum;
0304     u16 *ew;
0305 
0306     /* Did hardware handle the checksum at all?  The cases we can handle
0307      * are:
0308      *
0309      * - TCP and UDP checksums of IPv4 only.
0310      * - IPv6 would be doable but we keep that for later ...
0311      * - Only unfragmented packets.  Did somebody already tell you
0312      *   fragmentation is evil?
0313      * - don't care about packet size.  Worst case when processing a
0314      *   malformed packet we'll try to access the packet at ip header +
0315      *   64 bytes which is still inside the skb.  Even in the unlikely
0316      *   case where the checksum is right the higher layers will still
0317      *   drop the packet as appropriate.
0318      */
0319     if (eh->h_proto != htons(ETH_P_IP))
0320         return;
0321 
0322     ih = (struct iphdr *)((char *)eh + ETH_HLEN);
0323     if (ip_is_fragment(ih))
0324         return;
0325 
0326     proto = ih->protocol;
0327     if (proto != IPPROTO_TCP && proto != IPPROTO_UDP)
0328         return;
0329 
0330     /* Same as tx - compute csum of pseudo header  */
0331     csum = hwsum +
0332            (ih->tot_len - (ih->ihl << 2)) +
0333            htons((u16)ih->protocol) +
0334            (ih->saddr >> 16) + (ih->saddr & 0xffff) +
0335            (ih->daddr >> 16) + (ih->daddr & 0xffff);
0336 
0337     /* Sum up ethernet dest addr, src addr and protocol  */
0338     ew = (u16 *)eh;
0339     ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6];
0340 
0341     ehsum = (ehsum & 0xffff) + (ehsum >> 16);
0342     ehsum = (ehsum & 0xffff) + (ehsum >> 16);
0343 
0344     csum += 0xffff ^ ehsum;
0345 
0346     /* In the next step we also subtract the 1's complement
0347      * checksum of the trailing ethernet CRC.
0348      */
0349     cp = (char *)eh + len;  /* points at trailing CRC */
0350     if (len & 1) {
0351         csum += 0xffff ^ (u16)((cp[1] << 8) | cp[0]);
0352         csum += 0xffff ^ (u16)((cp[3] << 8) | cp[2]);
0353     } else {
0354         csum += 0xffff ^ (u16)((cp[0] << 8) | cp[1]);
0355         csum += 0xffff ^ (u16)((cp[2] << 8) | cp[3]);
0356     }
0357 
0358     csum = (csum & 0xffff) + (csum >> 16);
0359     csum = (csum & 0xffff) + (csum >> 16);
0360 
0361     if (csum == 0xffff)
0362         skb->ip_summed = CHECKSUM_UNNECESSARY;
0363 }
0364 
0365 static inline void ioc3_rx(struct net_device *dev)
0366 {
0367     struct ioc3_private *ip = netdev_priv(dev);
0368     struct sk_buff *skb, *new_skb;
0369     int rx_entry, n_entry, len;
0370     struct ioc3_erxbuf *rxb;
0371     unsigned long *rxr;
0372     dma_addr_t d;
0373     u32 w0, err;
0374 
0375     rxr = ip->rxr;      /* Ring base */
0376     rx_entry = ip->rx_ci;               /* RX consume index */
0377     n_entry = ip->rx_pi;
0378 
0379     skb = ip->rx_skbs[rx_entry];
0380     rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET);
0381     w0 = be32_to_cpu(rxb->w0);
0382 
0383     while (w0 & ERXBUF_V) {
0384         err = be32_to_cpu(rxb->err);        /* It's valid ...  */
0385         if (err & ERXBUF_GOODPKT) {
0386             len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
0387             skb_put(skb, len);
0388             skb->protocol = eth_type_trans(skb, dev);
0389 
0390             if (ioc3_alloc_skb(ip, &new_skb, &rxb, &d)) {
0391                 /* Ouch, drop packet and just recycle packet
0392                  * to keep the ring filled.
0393                  */
0394                 dev->stats.rx_dropped++;
0395                 new_skb = skb;
0396                 d = rxr[rx_entry];
0397                 goto next;
0398             }
0399 
0400             if (likely(dev->features & NETIF_F_RXCSUM))
0401                 ioc3_tcpudp_checksum(skb,
0402                              w0 & ERXBUF_IPCKSUM_MASK,
0403                              len);
0404 
0405             dma_unmap_single(ip->dma_dev, rxr[rx_entry],
0406                      RX_BUF_SIZE, DMA_FROM_DEVICE);
0407 
0408             netif_rx(skb);
0409 
0410             ip->rx_skbs[rx_entry] = NULL;   /* Poison  */
0411 
0412             dev->stats.rx_packets++;        /* Statistics */
0413             dev->stats.rx_bytes += len;
0414         } else {
0415             /* The frame is invalid and the skb never
0416              * reached the network layer so we can just
0417              * recycle it.
0418              */
0419             new_skb = skb;
0420             d = rxr[rx_entry];
0421             dev->stats.rx_errors++;
0422         }
0423         if (err & ERXBUF_CRCERR)    /* Statistics */
0424             dev->stats.rx_crc_errors++;
0425         if (err & ERXBUF_FRAMERR)
0426             dev->stats.rx_frame_errors++;
0427 
0428 next:
0429         ip->rx_skbs[n_entry] = new_skb;
0430         rxr[n_entry] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR));
0431         rxb->w0 = 0;                /* Clear valid flag */
0432         n_entry = (n_entry + 1) & RX_RING_MASK; /* Update erpir */
0433 
0434         /* Now go on to the next ring entry.  */
0435         rx_entry = (rx_entry + 1) & RX_RING_MASK;
0436         skb = ip->rx_skbs[rx_entry];
0437         rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET);
0438         w0 = be32_to_cpu(rxb->w0);
0439     }
0440     writel((n_entry << 3) | ERPIR_ARM, &ip->regs->erpir);
0441     ip->rx_pi = n_entry;
0442     ip->rx_ci = rx_entry;
0443 }
0444 
0445 static inline void ioc3_tx(struct net_device *dev)
0446 {
0447     struct ioc3_private *ip = netdev_priv(dev);
0448     struct ioc3_ethregs *regs = ip->regs;
0449     unsigned long packets, bytes;
0450     int tx_entry, o_entry;
0451     struct sk_buff *skb;
0452     u32 etcir;
0453 
0454     spin_lock(&ip->ioc3_lock);
0455     etcir = readl(&regs->etcir);
0456 
0457     tx_entry = (etcir >> 7) & TX_RING_MASK;
0458     o_entry = ip->tx_ci;
0459     packets = 0;
0460     bytes = 0;
0461 
0462     while (o_entry != tx_entry) {
0463         packets++;
0464         skb = ip->tx_skbs[o_entry];
0465         bytes += skb->len;
0466         dev_consume_skb_irq(skb);
0467         ip->tx_skbs[o_entry] = NULL;
0468 
0469         o_entry = (o_entry + 1) & TX_RING_MASK; /* Next */
0470 
0471         etcir = readl(&regs->etcir);        /* More pkts sent?  */
0472         tx_entry = (etcir >> 7) & TX_RING_MASK;
0473     }
0474 
0475     dev->stats.tx_packets += packets;
0476     dev->stats.tx_bytes += bytes;
0477     ip->txqlen -= packets;
0478 
0479     if (netif_queue_stopped(dev) && ip->txqlen < TX_RING_ENTRIES)
0480         netif_wake_queue(dev);
0481 
0482     ip->tx_ci = o_entry;
0483     spin_unlock(&ip->ioc3_lock);
0484 }
0485 
0486 /* Deal with fatal IOC3 errors.  This condition might be caused by a hard or
0487  * software problems, so we should try to recover
0488  * more gracefully if this ever happens.  In theory we might be flooded
0489  * with such error interrupts if something really goes wrong, so we might
0490  * also consider to take the interface down.
0491  */
0492 static void ioc3_error(struct net_device *dev, u32 eisr)
0493 {
0494     struct ioc3_private *ip = netdev_priv(dev);
0495 
0496     spin_lock(&ip->ioc3_lock);
0497 
0498     if (eisr & EISR_RXOFLO)
0499         net_err_ratelimited("%s: RX overflow.\n", dev->name);
0500     if (eisr & EISR_RXBUFOFLO)
0501         net_err_ratelimited("%s: RX buffer overflow.\n", dev->name);
0502     if (eisr & EISR_RXMEMERR)
0503         net_err_ratelimited("%s: RX PCI error.\n", dev->name);
0504     if (eisr & EISR_RXPARERR)
0505         net_err_ratelimited("%s: RX SSRAM parity error.\n", dev->name);
0506     if (eisr & EISR_TXBUFUFLO)
0507         net_err_ratelimited("%s: TX buffer underflow.\n", dev->name);
0508     if (eisr & EISR_TXMEMERR)
0509         net_err_ratelimited("%s: TX PCI error.\n", dev->name);
0510 
0511     ioc3_stop(ip);
0512     ioc3_free_rx_bufs(ip);
0513     ioc3_clean_tx_ring(ip);
0514 
0515     ioc3_init(dev);
0516     if (ioc3_alloc_rx_bufs(dev)) {
0517         netdev_err(dev, "%s: rx buffer allocation failed\n", __func__);
0518         spin_unlock(&ip->ioc3_lock);
0519         return;
0520     }
0521     ioc3_start(ip);
0522     ioc3_mii_init(ip);
0523 
0524     netif_wake_queue(dev);
0525 
0526     spin_unlock(&ip->ioc3_lock);
0527 }
0528 
0529 /* The interrupt handler does all of the Rx thread work and cleans up
0530  * after the Tx thread.
0531  */
0532 static irqreturn_t ioc3_interrupt(int irq, void *dev_id)
0533 {
0534     struct ioc3_private *ip = netdev_priv(dev_id);
0535     struct ioc3_ethregs *regs = ip->regs;
0536     u32 eisr;
0537 
0538     eisr = readl(&regs->eisr);
0539     writel(eisr, &regs->eisr);
0540     readl(&regs->eisr);             /* Flush */
0541 
0542     if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
0543             EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
0544         ioc3_error(dev_id, eisr);
0545     if (eisr & EISR_RXTIMERINT)
0546         ioc3_rx(dev_id);
0547     if (eisr & EISR_TXEXPLICIT)
0548         ioc3_tx(dev_id);
0549 
0550     return IRQ_HANDLED;
0551 }
0552 
0553 static inline void ioc3_setup_duplex(struct ioc3_private *ip)
0554 {
0555     struct ioc3_ethregs *regs = ip->regs;
0556 
0557     spin_lock_irq(&ip->ioc3_lock);
0558 
0559     if (ip->mii.full_duplex) {
0560         writel(ETCSR_FD, &regs->etcsr);
0561         ip->emcr |= EMCR_DUPLEX;
0562     } else {
0563         writel(ETCSR_HD, &regs->etcsr);
0564         ip->emcr &= ~EMCR_DUPLEX;
0565     }
0566     writel(ip->emcr, &regs->emcr);
0567 
0568     spin_unlock_irq(&ip->ioc3_lock);
0569 }
0570 
0571 static void ioc3_timer(struct timer_list *t)
0572 {
0573     struct ioc3_private *ip = from_timer(ip, t, ioc3_timer);
0574 
0575     /* Print the link status if it has changed */
0576     mii_check_media(&ip->mii, 1, 0);
0577     ioc3_setup_duplex(ip);
0578 
0579     ip->ioc3_timer.expires = jiffies + ((12 * HZ) / 10); /* 1.2s */
0580     add_timer(&ip->ioc3_timer);
0581 }
0582 
0583 /* Try to find a PHY.  There is no apparent relation between the MII addresses
0584  * in the SGI documentation and what we find in reality, so we simply probe
0585  * for the PHY.
0586  */
0587 static int ioc3_mii_init(struct ioc3_private *ip)
0588 {
0589     u16 word;
0590     int i;
0591 
0592     for (i = 0; i < 32; i++) {
0593         word = ioc3_mdio_read(ip->mii.dev, i, MII_PHYSID1);
0594 
0595         if (word != 0xffff && word != 0x0000) {
0596             ip->mii.phy_id = i;
0597             return 0;
0598         }
0599     }
0600     ip->mii.phy_id = -1;
0601     return -ENODEV;
0602 }
0603 
0604 static void ioc3_mii_start(struct ioc3_private *ip)
0605 {
0606     ip->ioc3_timer.expires = jiffies + (12 * HZ) / 10;  /* 1.2 sec. */
0607     add_timer(&ip->ioc3_timer);
0608 }
0609 
0610 static inline void ioc3_tx_unmap(struct ioc3_private *ip, int entry)
0611 {
0612     struct ioc3_etxd *desc;
0613     u32 cmd, bufcnt, len;
0614 
0615     desc = &ip->txr[entry];
0616     cmd = be32_to_cpu(desc->cmd);
0617     bufcnt = be32_to_cpu(desc->bufcnt);
0618     if (cmd & ETXD_B1V) {
0619         len = (bufcnt & ETXD_B1CNT_MASK) >> ETXD_B1CNT_SHIFT;
0620         dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p1),
0621                  len, DMA_TO_DEVICE);
0622     }
0623     if (cmd & ETXD_B2V) {
0624         len = (bufcnt & ETXD_B2CNT_MASK) >> ETXD_B2CNT_SHIFT;
0625         dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p2),
0626                  len, DMA_TO_DEVICE);
0627     }
0628 }
0629 
0630 static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
0631 {
0632     struct sk_buff *skb;
0633     int i;
0634 
0635     for (i = 0; i < TX_RING_ENTRIES; i++) {
0636         skb = ip->tx_skbs[i];
0637         if (skb) {
0638             ioc3_tx_unmap(ip, i);
0639             ip->tx_skbs[i] = NULL;
0640             dev_kfree_skb_any(skb);
0641         }
0642         ip->txr[i].cmd = 0;
0643     }
0644     ip->tx_pi = 0;
0645     ip->tx_ci = 0;
0646 }
0647 
0648 static void ioc3_free_rx_bufs(struct ioc3_private *ip)
0649 {
0650     int rx_entry, n_entry;
0651     struct sk_buff *skb;
0652 
0653     n_entry = ip->rx_ci;
0654     rx_entry = ip->rx_pi;
0655 
0656     while (n_entry != rx_entry) {
0657         skb = ip->rx_skbs[n_entry];
0658         if (skb) {
0659             dma_unmap_single(ip->dma_dev,
0660                      be64_to_cpu(ip->rxr[n_entry]),
0661                      RX_BUF_SIZE, DMA_FROM_DEVICE);
0662             dev_kfree_skb_any(skb);
0663         }
0664         n_entry = (n_entry + 1) & RX_RING_MASK;
0665     }
0666 }
0667 
0668 static int ioc3_alloc_rx_bufs(struct net_device *dev)
0669 {
0670     struct ioc3_private *ip = netdev_priv(dev);
0671     struct ioc3_erxbuf *rxb;
0672     dma_addr_t d;
0673     int i;
0674 
0675     /* Now the rx buffers.  The RX ring may be larger but
0676      * we only allocate 16 buffers for now.  Need to tune
0677      * this for performance and memory later.
0678      */
0679     for (i = 0; i < RX_BUFFS; i++) {
0680         if (ioc3_alloc_skb(ip, &ip->rx_skbs[i], &rxb, &d))
0681             return -ENOMEM;
0682 
0683         rxb->w0 = 0;    /* Clear valid flag */
0684         ip->rxr[i] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR));
0685     }
0686     ip->rx_ci = 0;
0687     ip->rx_pi = RX_BUFFS;
0688 
0689     return 0;
0690 }
0691 
0692 static inline void ioc3_ssram_disc(struct ioc3_private *ip)
0693 {
0694     struct ioc3_ethregs *regs = ip->regs;
0695     u32 *ssram0 = &ip->ssram[0x0000];
0696     u32 *ssram1 = &ip->ssram[0x4000];
0697     u32 pattern = 0x5555;
0698 
0699     /* Assume the larger size SSRAM and enable parity checking */
0700     writel(readl(&regs->emcr) | (EMCR_BUFSIZ | EMCR_RAMPAR), &regs->emcr);
0701     readl(&regs->emcr); /* Flush */
0702 
0703     writel(pattern, ssram0);
0704     writel(~pattern & IOC3_SSRAM_DM, ssram1);
0705 
0706     if ((readl(ssram0) & IOC3_SSRAM_DM) != pattern ||
0707         (readl(ssram1) & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
0708         /* set ssram size to 64 KB */
0709         ip->emcr |= EMCR_RAMPAR;
0710         writel(readl(&regs->emcr) & ~EMCR_BUFSIZ, &regs->emcr);
0711     } else {
0712         ip->emcr |= EMCR_BUFSIZ | EMCR_RAMPAR;
0713     }
0714 }
0715 
0716 static void ioc3_init(struct net_device *dev)
0717 {
0718     struct ioc3_private *ip = netdev_priv(dev);
0719     struct ioc3_ethregs *regs = ip->regs;
0720 
0721     del_timer_sync(&ip->ioc3_timer);    /* Kill if running  */
0722 
0723     writel(EMCR_RST, &regs->emcr);      /* Reset        */
0724     readl(&regs->emcr);         /* Flush WB     */
0725     udelay(4);              /* Give it time ... */
0726     writel(0, &regs->emcr);
0727     readl(&regs->emcr);
0728 
0729     /* Misc registers  */
0730     writel(ERBAR_VAL, &regs->erbar);
0731     readl(&regs->etcdc);            /* Clear on read */
0732     writel(15, &regs->ercsr);       /* RX low watermark  */
0733     writel(0, &regs->ertr);         /* Interrupt immediately */
0734     __ioc3_set_mac_address(dev);
0735     writel(ip->ehar_h, &regs->ehar_h);
0736     writel(ip->ehar_l, &regs->ehar_l);
0737     writel(42, &regs->ersr);        /* XXX should be random */
0738 }
0739 
0740 static void ioc3_start(struct ioc3_private *ip)
0741 {
0742     struct ioc3_ethregs *regs = ip->regs;
0743     unsigned long ring;
0744 
0745     /* Now the rx ring base, consume & produce registers.  */
0746     ring = ioc3_map(ip->rxr_dma, PCI64_ATTR_PREC);
0747     writel(ring >> 32, &regs->erbr_h);
0748     writel(ring & 0xffffffff, &regs->erbr_l);
0749     writel(ip->rx_ci << 3, &regs->ercir);
0750     writel((ip->rx_pi << 3) | ERPIR_ARM, &regs->erpir);
0751 
0752     ring = ioc3_map(ip->txr_dma, PCI64_ATTR_PREC);
0753 
0754     ip->txqlen = 0;                 /* nothing queued  */
0755 
0756     /* Now the tx ring base, consume & produce registers.  */
0757     writel(ring >> 32, &regs->etbr_h);
0758     writel(ring & 0xffffffff, &regs->etbr_l);
0759     writel(ip->tx_pi << 7, &regs->etpir);
0760     writel(ip->tx_ci << 7, &regs->etcir);
0761     readl(&regs->etcir);                /* Flush */
0762 
0763     ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
0764             EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN;
0765     writel(ip->emcr, &regs->emcr);
0766     writel(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
0767            EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
0768            EISR_TXEXPLICIT | EISR_TXMEMERR, &regs->eier);
0769     readl(&regs->eier);
0770 }
0771 
0772 static inline void ioc3_stop(struct ioc3_private *ip)
0773 {
0774     struct ioc3_ethregs *regs = ip->regs;
0775 
0776     writel(0, &regs->emcr);         /* Shutup */
0777     writel(0, &regs->eier);         /* Disable interrupts */
0778     readl(&regs->eier);         /* Flush */
0779 }
0780 
0781 static int ioc3_open(struct net_device *dev)
0782 {
0783     struct ioc3_private *ip = netdev_priv(dev);
0784 
0785     ip->ehar_h = 0;
0786     ip->ehar_l = 0;
0787 
0788     ioc3_init(dev);
0789     if (ioc3_alloc_rx_bufs(dev)) {
0790         netdev_err(dev, "%s: rx buffer allocation failed\n", __func__);
0791         return -ENOMEM;
0792     }
0793     ioc3_start(ip);
0794     ioc3_mii_start(ip);
0795 
0796     netif_start_queue(dev);
0797     return 0;
0798 }
0799 
0800 static int ioc3_close(struct net_device *dev)
0801 {
0802     struct ioc3_private *ip = netdev_priv(dev);
0803 
0804     del_timer_sync(&ip->ioc3_timer);
0805 
0806     netif_stop_queue(dev);
0807 
0808     ioc3_stop(ip);
0809 
0810     ioc3_free_rx_bufs(ip);
0811     ioc3_clean_tx_ring(ip);
0812 
0813     return 0;
0814 }
0815 
0816 static const struct net_device_ops ioc3_netdev_ops = {
0817     .ndo_open       = ioc3_open,
0818     .ndo_stop       = ioc3_close,
0819     .ndo_start_xmit     = ioc3_start_xmit,
0820     .ndo_tx_timeout     = ioc3_timeout,
0821     .ndo_get_stats      = ioc3_get_stats,
0822     .ndo_set_rx_mode    = ioc3_set_multicast_list,
0823     .ndo_eth_ioctl      = ioc3_ioctl,
0824     .ndo_validate_addr  = eth_validate_addr,
0825     .ndo_set_mac_address    = ioc3_set_mac_address,
0826 };
0827 
0828 static int ioc3eth_probe(struct platform_device *pdev)
0829 {
0830     u32 sw_physid1, sw_physid2, vendor, model, rev;
0831     struct ioc3_private *ip;
0832     struct net_device *dev;
0833     struct resource *regs;
0834     u8 mac_addr[6];
0835     int err;
0836 
0837     regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0838     if (!regs) {
0839         dev_err(&pdev->dev, "Invalid resource\n");
0840         return -EINVAL;
0841     }
0842     /* get mac addr from one wire prom */
0843     if (ioc3eth_get_mac_addr(regs, mac_addr))
0844         return -EPROBE_DEFER; /* not available yet */
0845 
0846     dev = alloc_etherdev(sizeof(struct ioc3_private));
0847     if (!dev)
0848         return -ENOMEM;
0849 
0850     SET_NETDEV_DEV(dev, &pdev->dev);
0851 
0852     ip = netdev_priv(dev);
0853     ip->dma_dev = pdev->dev.parent;
0854     ip->regs = devm_platform_ioremap_resource(pdev, 0);
0855     if (IS_ERR(ip->regs)) {
0856         err = PTR_ERR(ip->regs);
0857         goto out_free;
0858     }
0859 
0860     ip->ssram = devm_platform_ioremap_resource(pdev, 1);
0861     if (IS_ERR(ip->ssram)) {
0862         err = PTR_ERR(ip->ssram);
0863         goto out_free;
0864     }
0865 
0866     dev->irq = platform_get_irq(pdev, 0);
0867     if (dev->irq < 0) {
0868         err = dev->irq;
0869         goto out_free;
0870     }
0871 
0872     if (devm_request_irq(&pdev->dev, dev->irq, ioc3_interrupt,
0873                  IRQF_SHARED, "ioc3-eth", dev)) {
0874         dev_err(&pdev->dev, "Can't get irq %d\n", dev->irq);
0875         err = -ENODEV;
0876         goto out_free;
0877     }
0878 
0879     spin_lock_init(&ip->ioc3_lock);
0880     timer_setup(&ip->ioc3_timer, ioc3_timer, 0);
0881 
0882     ioc3_stop(ip);
0883 
0884     /* Allocate rx ring.  4kb = 512 entries, must be 4kb aligned */
0885     ip->rxr = dma_alloc_coherent(ip->dma_dev, RX_RING_SIZE, &ip->rxr_dma,
0886                      GFP_KERNEL);
0887     if (!ip->rxr) {
0888         pr_err("ioc3-eth: rx ring allocation failed\n");
0889         err = -ENOMEM;
0890         goto out_stop;
0891     }
0892 
0893     /* Allocate tx rings.  16kb = 128 bufs, must be 16kb aligned  */
0894     ip->tx_ring = dma_alloc_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1,
0895                      &ip->txr_dma, GFP_KERNEL);
0896     if (!ip->tx_ring) {
0897         pr_err("ioc3-eth: tx ring allocation failed\n");
0898         err = -ENOMEM;
0899         goto out_stop;
0900     }
0901     /* Align TX ring */
0902     ip->txr = PTR_ALIGN(ip->tx_ring, SZ_16K);
0903     ip->txr_dma = ALIGN(ip->txr_dma, SZ_16K);
0904 
0905     ioc3_init(dev);
0906 
0907     ip->mii.phy_id_mask = 0x1f;
0908     ip->mii.reg_num_mask = 0x1f;
0909     ip->mii.dev = dev;
0910     ip->mii.mdio_read = ioc3_mdio_read;
0911     ip->mii.mdio_write = ioc3_mdio_write;
0912 
0913     ioc3_mii_init(ip);
0914 
0915     if (ip->mii.phy_id == -1) {
0916         netdev_err(dev, "Didn't find a PHY, goodbye.\n");
0917         err = -ENODEV;
0918         goto out_stop;
0919     }
0920 
0921     ioc3_mii_start(ip);
0922     ioc3_ssram_disc(ip);
0923     eth_hw_addr_set(dev, mac_addr);
0924 
0925     /* The IOC3-specific entries in the device structure. */
0926     dev->watchdog_timeo = 5 * HZ;
0927     dev->netdev_ops     = &ioc3_netdev_ops;
0928     dev->ethtool_ops    = &ioc3_ethtool_ops;
0929     dev->hw_features    = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
0930     dev->features       = NETIF_F_IP_CSUM | NETIF_F_HIGHDMA;
0931 
0932     sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
0933     sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2);
0934 
0935     err = register_netdev(dev);
0936     if (err)
0937         goto out_stop;
0938 
0939     mii_check_media(&ip->mii, 1, 1);
0940     ioc3_setup_duplex(ip);
0941 
0942     vendor = (sw_physid1 << 12) | (sw_physid2 >> 4);
0943     model  = (sw_physid2 >> 4) & 0x3f;
0944     rev    = sw_physid2 & 0xf;
0945     netdev_info(dev, "Using PHY %d, vendor 0x%x, model %d, rev %d.\n",
0946             ip->mii.phy_id, vendor, model, rev);
0947     netdev_info(dev, "IOC3 SSRAM has %d kbyte.\n",
0948             ip->emcr & EMCR_BUFSIZ ? 128 : 64);
0949 
0950     return 0;
0951 
0952 out_stop:
0953     del_timer_sync(&ip->ioc3_timer);
0954     if (ip->rxr)
0955         dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr,
0956                   ip->rxr_dma);
0957     if (ip->tx_ring)
0958         dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring,
0959                   ip->txr_dma);
0960 out_free:
0961     free_netdev(dev);
0962     return err;
0963 }
0964 
0965 static int ioc3eth_remove(struct platform_device *pdev)
0966 {
0967     struct net_device *dev = platform_get_drvdata(pdev);
0968     struct ioc3_private *ip = netdev_priv(dev);
0969 
0970     dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma);
0971     dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma);
0972 
0973     unregister_netdev(dev);
0974     del_timer_sync(&ip->ioc3_timer);
0975     free_netdev(dev);
0976 
0977     return 0;
0978 }
0979 
0980 
0981 static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
0982 {
0983     struct ioc3_private *ip = netdev_priv(dev);
0984     struct ioc3_etxd *desc;
0985     unsigned long data;
0986     unsigned int len;
0987     int produce;
0988     u32 w0 = 0;
0989 
0990     /* IOC3 has a fairly simple minded checksumming hardware which simply
0991      * adds up the 1's complement checksum for the entire packet and
0992      * inserts it at an offset which can be specified in the descriptor
0993      * into the transmit packet.  This means we have to compensate for the
0994      * MAC header which should not be summed and the TCP/UDP pseudo headers
0995      * manually.
0996      */
0997     if (skb->ip_summed == CHECKSUM_PARTIAL) {
0998         const struct iphdr *ih = ip_hdr(skb);
0999         const int proto = ntohs(ih->protocol);
1000         unsigned int csoff;
1001         u32 csum, ehsum;
1002         u16 *eh;
1003 
1004         /* The MAC header.  skb->mac seem the logic approach
1005          * to find the MAC header - except it's a NULL pointer ...
1006          */
1007         eh = (u16 *)skb->data;
1008 
1009         /* Sum up dest addr, src addr and protocol  */
1010         ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6];
1011 
1012         /* Skip IP header; it's sum is always zero and was
1013          * already filled in by ip_output.c
1014          */
1015         csum = csum_tcpudp_nofold(ih->saddr, ih->daddr,
1016                       ih->tot_len - (ih->ihl << 2),
1017                       proto, csum_fold(ehsum));
1018 
1019         csum = (csum & 0xffff) + (csum >> 16);  /* Fold again */
1020         csum = (csum & 0xffff) + (csum >> 16);
1021 
1022         csoff = ETH_HLEN + (ih->ihl << 2);
1023         if (proto == IPPROTO_UDP) {
1024             csoff += offsetof(struct udphdr, check);
1025             udp_hdr(skb)->check = csum;
1026         }
1027         if (proto == IPPROTO_TCP) {
1028             csoff += offsetof(struct tcphdr, check);
1029             tcp_hdr(skb)->check = csum;
1030         }
1031 
1032         w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT);
1033     }
1034 
1035     spin_lock_irq(&ip->ioc3_lock);
1036 
1037     data = (unsigned long)skb->data;
1038     len = skb->len;
1039 
1040     produce = ip->tx_pi;
1041     desc = &ip->txr[produce];
1042 
1043     if (len <= 104) {
1044         /* Short packet, let's copy it directly into the ring.  */
1045         skb_copy_from_linear_data(skb, desc->data, skb->len);
1046         if (len < ETH_ZLEN) {
1047             /* Very short packet, pad with zeros at the end. */
1048             memset(desc->data + len, 0, ETH_ZLEN - len);
1049             len = ETH_ZLEN;
1050         }
1051         desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V | w0);
1052         desc->bufcnt = cpu_to_be32(len);
1053     } else if ((data ^ (data + len - 1)) & 0x4000) {
1054         unsigned long b2 = (data | 0x3fffUL) + 1UL;
1055         unsigned long s1 = b2 - data;
1056         unsigned long s2 = data + len - b2;
1057         dma_addr_t d1, d2;
1058 
1059         desc->cmd    = cpu_to_be32(len | ETXD_INTWHENDONE |
1060                        ETXD_B1V | ETXD_B2V | w0);
1061         desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) |
1062                        (s2 << ETXD_B2CNT_SHIFT));
1063         d1 = dma_map_single(ip->dma_dev, skb->data, s1, DMA_TO_DEVICE);
1064         if (dma_mapping_error(ip->dma_dev, d1))
1065             goto drop_packet;
1066         d2 = dma_map_single(ip->dma_dev, (void *)b2, s1, DMA_TO_DEVICE);
1067         if (dma_mapping_error(ip->dma_dev, d2)) {
1068             dma_unmap_single(ip->dma_dev, d1, len, DMA_TO_DEVICE);
1069             goto drop_packet;
1070         }
1071         desc->p1     = cpu_to_be64(ioc3_map(d1, PCI64_ATTR_PREF));
1072         desc->p2     = cpu_to_be64(ioc3_map(d2, PCI64_ATTR_PREF));
1073     } else {
1074         dma_addr_t d;
1075 
1076         /* Normal sized packet that doesn't cross a page boundary. */
1077         desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0);
1078         desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT);
1079         d = dma_map_single(ip->dma_dev, skb->data, len, DMA_TO_DEVICE);
1080         if (dma_mapping_error(ip->dma_dev, d))
1081             goto drop_packet;
1082         desc->p1     = cpu_to_be64(ioc3_map(d, PCI64_ATTR_PREF));
1083     }
1084 
1085     mb(); /* make sure all descriptor changes are visible */
1086 
1087     ip->tx_skbs[produce] = skb;         /* Remember skb */
1088     produce = (produce + 1) & TX_RING_MASK;
1089     ip->tx_pi = produce;
1090     writel(produce << 7, &ip->regs->etpir);     /* Fire ... */
1091 
1092     ip->txqlen++;
1093 
1094     if (ip->txqlen >= (TX_RING_ENTRIES - 1))
1095         netif_stop_queue(dev);
1096 
1097     spin_unlock_irq(&ip->ioc3_lock);
1098 
1099     return NETDEV_TX_OK;
1100 
1101 drop_packet:
1102     dev_kfree_skb_any(skb);
1103     dev->stats.tx_dropped++;
1104 
1105     spin_unlock_irq(&ip->ioc3_lock);
1106 
1107     return NETDEV_TX_OK;
1108 }
1109 
1110 static void ioc3_timeout(struct net_device *dev, unsigned int txqueue)
1111 {
1112     struct ioc3_private *ip = netdev_priv(dev);
1113 
1114     netdev_err(dev, "transmit timed out, resetting\n");
1115 
1116     spin_lock_irq(&ip->ioc3_lock);
1117 
1118     ioc3_stop(ip);
1119     ioc3_free_rx_bufs(ip);
1120     ioc3_clean_tx_ring(ip);
1121 
1122     ioc3_init(dev);
1123     if (ioc3_alloc_rx_bufs(dev)) {
1124         netdev_err(dev, "%s: rx buffer allocation failed\n", __func__);
1125         spin_unlock_irq(&ip->ioc3_lock);
1126         return;
1127     }
1128     ioc3_start(ip);
1129     ioc3_mii_init(ip);
1130     ioc3_mii_start(ip);
1131 
1132     spin_unlock_irq(&ip->ioc3_lock);
1133 
1134     netif_wake_queue(dev);
1135 }
1136 
1137 /* Given a multicast ethernet address, this routine calculates the
1138  * address's bit index in the logical address filter mask
1139  */
1140 static inline unsigned int ioc3_hash(const unsigned char *addr)
1141 {
1142     unsigned int temp = 0;
1143     int bits;
1144     u32 crc;
1145 
1146     crc = ether_crc_le(ETH_ALEN, addr);
1147 
1148     crc &= 0x3f;    /* bit reverse lowest 6 bits for hash index */
1149     for (bits = 6; --bits >= 0; ) {
1150         temp <<= 1;
1151         temp |= (crc & 0x1);
1152         crc >>= 1;
1153     }
1154 
1155     return temp;
1156 }
1157 
1158 static void ioc3_get_drvinfo(struct net_device *dev,
1159                  struct ethtool_drvinfo *info)
1160 {
1161     strlcpy(info->driver, IOC3_NAME, sizeof(info->driver));
1162     strlcpy(info->version, IOC3_VERSION, sizeof(info->version));
1163     strlcpy(info->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
1164         sizeof(info->bus_info));
1165 }
1166 
1167 static int ioc3_get_link_ksettings(struct net_device *dev,
1168                    struct ethtool_link_ksettings *cmd)
1169 {
1170     struct ioc3_private *ip = netdev_priv(dev);
1171 
1172     spin_lock_irq(&ip->ioc3_lock);
1173     mii_ethtool_get_link_ksettings(&ip->mii, cmd);
1174     spin_unlock_irq(&ip->ioc3_lock);
1175 
1176     return 0;
1177 }
1178 
1179 static int ioc3_set_link_ksettings(struct net_device *dev,
1180                    const struct ethtool_link_ksettings *cmd)
1181 {
1182     struct ioc3_private *ip = netdev_priv(dev);
1183     int rc;
1184 
1185     spin_lock_irq(&ip->ioc3_lock);
1186     rc = mii_ethtool_set_link_ksettings(&ip->mii, cmd);
1187     spin_unlock_irq(&ip->ioc3_lock);
1188 
1189     return rc;
1190 }
1191 
1192 static int ioc3_nway_reset(struct net_device *dev)
1193 {
1194     struct ioc3_private *ip = netdev_priv(dev);
1195     int rc;
1196 
1197     spin_lock_irq(&ip->ioc3_lock);
1198     rc = mii_nway_restart(&ip->mii);
1199     spin_unlock_irq(&ip->ioc3_lock);
1200 
1201     return rc;
1202 }
1203 
1204 static u32 ioc3_get_link(struct net_device *dev)
1205 {
1206     struct ioc3_private *ip = netdev_priv(dev);
1207     int rc;
1208 
1209     spin_lock_irq(&ip->ioc3_lock);
1210     rc = mii_link_ok(&ip->mii);
1211     spin_unlock_irq(&ip->ioc3_lock);
1212 
1213     return rc;
1214 }
1215 
1216 static const struct ethtool_ops ioc3_ethtool_ops = {
1217     .get_drvinfo        = ioc3_get_drvinfo,
1218     .nway_reset     = ioc3_nway_reset,
1219     .get_link       = ioc3_get_link,
1220     .get_link_ksettings = ioc3_get_link_ksettings,
1221     .set_link_ksettings = ioc3_set_link_ksettings,
1222 };
1223 
1224 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1225 {
1226     struct ioc3_private *ip = netdev_priv(dev);
1227     int rc;
1228 
1229     spin_lock_irq(&ip->ioc3_lock);
1230     rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL);
1231     spin_unlock_irq(&ip->ioc3_lock);
1232 
1233     return rc;
1234 }
1235 
1236 static void ioc3_set_multicast_list(struct net_device *dev)
1237 {
1238     struct ioc3_private *ip = netdev_priv(dev);
1239     struct ioc3_ethregs *regs = ip->regs;
1240     struct netdev_hw_addr *ha;
1241     u64 ehar = 0;
1242 
1243     spin_lock_irq(&ip->ioc3_lock);
1244 
1245     if (dev->flags & IFF_PROMISC) {         /* Set promiscuous.  */
1246         ip->emcr |= EMCR_PROMISC;
1247         writel(ip->emcr, &regs->emcr);
1248         readl(&regs->emcr);
1249     } else {
1250         ip->emcr &= ~EMCR_PROMISC;
1251         writel(ip->emcr, &regs->emcr);      /* Clear promiscuous. */
1252         readl(&regs->emcr);
1253 
1254         if ((dev->flags & IFF_ALLMULTI) ||
1255             (netdev_mc_count(dev) > 64)) {
1256             /* Too many for hashing to make sense or we want all
1257              * multicast packets anyway,  so skip computing all the
1258              * hashes and just accept all packets.
1259              */
1260             ip->ehar_h = 0xffffffff;
1261             ip->ehar_l = 0xffffffff;
1262         } else {
1263             netdev_for_each_mc_addr(ha, dev) {
1264                 ehar |= (1UL << ioc3_hash(ha->addr));
1265             }
1266             ip->ehar_h = ehar >> 32;
1267             ip->ehar_l = ehar & 0xffffffff;
1268         }
1269         writel(ip->ehar_h, &regs->ehar_h);
1270         writel(ip->ehar_l, &regs->ehar_l);
1271     }
1272 
1273     spin_unlock_irq(&ip->ioc3_lock);
1274 }
1275 
1276 static struct platform_driver ioc3eth_driver = {
1277     .probe  = ioc3eth_probe,
1278     .remove = ioc3eth_remove,
1279     .driver = {
1280         .name = "ioc3-eth",
1281     }
1282 };
1283 
1284 module_platform_driver(ioc3eth_driver);
1285 
1286 MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
1287 MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
1288 MODULE_LICENSE("GPL");