Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*  D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */
0003 /*
0004     Copyright (c) 2001, 2002 by D-Link Corporation
0005     Written by Edward Peng.<edward_peng@dlink.com.tw>
0006     Created 03-May-2001, base on Linux' sundance.c.
0007 
0008 */
0009 
0010 #include "dl2k.h"
0011 #include <linux/dma-mapping.h>
0012 
0013 #define dw32(reg, val)  iowrite32(val, ioaddr + (reg))
0014 #define dw16(reg, val)  iowrite16(val, ioaddr + (reg))
0015 #define dw8(reg, val)   iowrite8(val, ioaddr + (reg))
0016 #define dr32(reg)   ioread32(ioaddr + (reg))
0017 #define dr16(reg)   ioread16(ioaddr + (reg))
0018 #define dr8(reg)    ioread8(ioaddr + (reg))
0019 
0020 #define MAX_UNITS 8
0021 static int mtu[MAX_UNITS];
0022 static int vlan[MAX_UNITS];
0023 static int jumbo[MAX_UNITS];
0024 static char *media[MAX_UNITS];
0025 static int tx_flow=-1;
0026 static int rx_flow=-1;
0027 static int copy_thresh;
0028 static int rx_coalesce=10;  /* Rx frame count each interrupt */
0029 static int rx_timeout=200;  /* Rx DMA wait time in 640ns increments */
0030 static int tx_coalesce=16;  /* HW xmit count each TxDMAComplete */
0031 
0032 
0033 MODULE_AUTHOR ("Edward Peng");
0034 MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter");
0035 MODULE_LICENSE("GPL");
0036 module_param_array(mtu, int, NULL, 0);
0037 module_param_array(media, charp, NULL, 0);
0038 module_param_array(vlan, int, NULL, 0);
0039 module_param_array(jumbo, int, NULL, 0);
0040 module_param(tx_flow, int, 0);
0041 module_param(rx_flow, int, 0);
0042 module_param(copy_thresh, int, 0);
0043 module_param(rx_coalesce, int, 0);  /* Rx frame count each interrupt */
0044 module_param(rx_timeout, int, 0);   /* Rx DMA wait time in 64ns increments */
0045 module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
0046 
0047 
0048 /* Enable the default interrupts */
0049 #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
0050        UpdateStats | LinkEvent)
0051 
0052 static void dl2k_enable_int(struct netdev_private *np)
0053 {
0054     void __iomem *ioaddr = np->ioaddr;
0055 
0056     dw16(IntEnable, DEFAULT_INTR);
0057 }
0058 
0059 static const int max_intrloop = 50;
0060 static const int multicast_filter_limit = 0x40;
0061 
0062 static int rio_open (struct net_device *dev);
0063 static void rio_timer (struct timer_list *t);
0064 static void rio_tx_timeout (struct net_device *dev, unsigned int txqueue);
0065 static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev);
0066 static irqreturn_t rio_interrupt (int irq, void *dev_instance);
0067 static void rio_free_tx (struct net_device *dev, int irq);
0068 static void tx_error (struct net_device *dev, int tx_status);
0069 static int receive_packet (struct net_device *dev);
0070 static void rio_error (struct net_device *dev, int int_status);
0071 static void set_multicast (struct net_device *dev);
0072 static struct net_device_stats *get_stats (struct net_device *dev);
0073 static int clear_stats (struct net_device *dev);
0074 static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
0075 static int rio_close (struct net_device *dev);
0076 static int find_miiphy (struct net_device *dev);
0077 static int parse_eeprom (struct net_device *dev);
0078 static int read_eeprom (struct netdev_private *, int eep_addr);
0079 static int mii_wait_link (struct net_device *dev, int wait);
0080 static int mii_set_media (struct net_device *dev);
0081 static int mii_get_media (struct net_device *dev);
0082 static int mii_set_media_pcs (struct net_device *dev);
0083 static int mii_get_media_pcs (struct net_device *dev);
0084 static int mii_read (struct net_device *dev, int phy_addr, int reg_num);
0085 static int mii_write (struct net_device *dev, int phy_addr, int reg_num,
0086               u16 data);
0087 
0088 static const struct ethtool_ops ethtool_ops;
0089 
0090 static const struct net_device_ops netdev_ops = {
0091     .ndo_open       = rio_open,
0092     .ndo_start_xmit = start_xmit,
0093     .ndo_stop       = rio_close,
0094     .ndo_get_stats      = get_stats,
0095     .ndo_validate_addr  = eth_validate_addr,
0096     .ndo_set_mac_address    = eth_mac_addr,
0097     .ndo_set_rx_mode    = set_multicast,
0098     .ndo_eth_ioctl      = rio_ioctl,
0099     .ndo_tx_timeout     = rio_tx_timeout,
0100 };
0101 
0102 static int
0103 rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
0104 {
0105     struct net_device *dev;
0106     struct netdev_private *np;
0107     static int card_idx;
0108     int chip_idx = ent->driver_data;
0109     int err, irq;
0110     void __iomem *ioaddr;
0111     void *ring_space;
0112     dma_addr_t ring_dma;
0113 
0114     err = pci_enable_device (pdev);
0115     if (err)
0116         return err;
0117 
0118     irq = pdev->irq;
0119     err = pci_request_regions (pdev, "dl2k");
0120     if (err)
0121         goto err_out_disable;
0122 
0123     pci_set_master (pdev);
0124 
0125     err = -ENOMEM;
0126 
0127     dev = alloc_etherdev (sizeof (*np));
0128     if (!dev)
0129         goto err_out_res;
0130     SET_NETDEV_DEV(dev, &pdev->dev);
0131 
0132     np = netdev_priv(dev);
0133 
0134     /* IO registers range. */
0135     ioaddr = pci_iomap(pdev, 0, 0);
0136     if (!ioaddr)
0137         goto err_out_dev;
0138     np->eeprom_addr = ioaddr;
0139 
0140 #ifdef MEM_MAPPING
0141     /* MM registers range. */
0142     ioaddr = pci_iomap(pdev, 1, 0);
0143     if (!ioaddr)
0144         goto err_out_iounmap;
0145 #endif
0146     np->ioaddr = ioaddr;
0147     np->chip_id = chip_idx;
0148     np->pdev = pdev;
0149     spin_lock_init (&np->tx_lock);
0150     spin_lock_init (&np->rx_lock);
0151 
0152     /* Parse manual configuration */
0153     np->an_enable = 1;
0154     np->tx_coalesce = 1;
0155     if (card_idx < MAX_UNITS) {
0156         if (media[card_idx] != NULL) {
0157             np->an_enable = 0;
0158             if (strcmp (media[card_idx], "auto") == 0 ||
0159                 strcmp (media[card_idx], "autosense") == 0 ||
0160                 strcmp (media[card_idx], "0") == 0 ) {
0161                 np->an_enable = 2;
0162             } else if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
0163                 strcmp (media[card_idx], "4") == 0) {
0164                 np->speed = 100;
0165                 np->full_duplex = 1;
0166             } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
0167                    strcmp (media[card_idx], "3") == 0) {
0168                 np->speed = 100;
0169                 np->full_duplex = 0;
0170             } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
0171                    strcmp (media[card_idx], "2") == 0) {
0172                 np->speed = 10;
0173                 np->full_duplex = 1;
0174             } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
0175                    strcmp (media[card_idx], "1") == 0) {
0176                 np->speed = 10;
0177                 np->full_duplex = 0;
0178             } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 ||
0179                  strcmp (media[card_idx], "6") == 0) {
0180                 np->speed=1000;
0181                 np->full_duplex=1;
0182             } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 ||
0183                  strcmp (media[card_idx], "5") == 0) {
0184                 np->speed = 1000;
0185                 np->full_duplex = 0;
0186             } else {
0187                 np->an_enable = 1;
0188             }
0189         }
0190         if (jumbo[card_idx] != 0) {
0191             np->jumbo = 1;
0192             dev->mtu = MAX_JUMBO;
0193         } else {
0194             np->jumbo = 0;
0195             if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE)
0196                 dev->mtu = mtu[card_idx];
0197         }
0198         np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
0199             vlan[card_idx] : 0;
0200         if (rx_coalesce > 0 && rx_timeout > 0) {
0201             np->rx_coalesce = rx_coalesce;
0202             np->rx_timeout = rx_timeout;
0203             np->coalesce = 1;
0204         }
0205         np->tx_flow = (tx_flow == 0) ? 0 : 1;
0206         np->rx_flow = (rx_flow == 0) ? 0 : 1;
0207 
0208         if (tx_coalesce < 1)
0209             tx_coalesce = 1;
0210         else if (tx_coalesce > TX_RING_SIZE-1)
0211             tx_coalesce = TX_RING_SIZE - 1;
0212     }
0213     dev->netdev_ops = &netdev_ops;
0214     dev->watchdog_timeo = TX_TIMEOUT;
0215     dev->ethtool_ops = &ethtool_ops;
0216 #if 0
0217     dev->features = NETIF_F_IP_CSUM;
0218 #endif
0219     /* MTU range: 68 - 1536 or 8000 */
0220     dev->min_mtu = ETH_MIN_MTU;
0221     dev->max_mtu = np->jumbo ? MAX_JUMBO : PACKET_SIZE;
0222 
0223     pci_set_drvdata (pdev, dev);
0224 
0225     ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
0226                     GFP_KERNEL);
0227     if (!ring_space)
0228         goto err_out_iounmap;
0229     np->tx_ring = ring_space;
0230     np->tx_ring_dma = ring_dma;
0231 
0232     ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
0233                     GFP_KERNEL);
0234     if (!ring_space)
0235         goto err_out_unmap_tx;
0236     np->rx_ring = ring_space;
0237     np->rx_ring_dma = ring_dma;
0238 
0239     /* Parse eeprom data */
0240     parse_eeprom (dev);
0241 
0242     /* Find PHY address */
0243     err = find_miiphy (dev);
0244     if (err)
0245         goto err_out_unmap_rx;
0246 
0247     /* Fiber device? */
0248     np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
0249     np->link_status = 0;
0250     /* Set media and reset PHY */
0251     if (np->phy_media) {
0252         /* default Auto-Negotiation for fiber deivices */
0253         if (np->an_enable == 2) {
0254             np->an_enable = 1;
0255         }
0256     } else {
0257         /* Auto-Negotiation is mandatory for 1000BASE-T,
0258            IEEE 802.3ab Annex 28D page 14 */
0259         if (np->speed == 1000)
0260             np->an_enable = 1;
0261     }
0262 
0263     err = register_netdev (dev);
0264     if (err)
0265         goto err_out_unmap_rx;
0266 
0267     card_idx++;
0268 
0269     printk (KERN_INFO "%s: %s, %pM, IRQ %d\n",
0270         dev->name, np->name, dev->dev_addr, irq);
0271     if (tx_coalesce > 1)
0272         printk(KERN_INFO "tx_coalesce:\t%d packets\n",
0273                 tx_coalesce);
0274     if (np->coalesce)
0275         printk(KERN_INFO
0276                "rx_coalesce:\t%d packets\n"
0277                "rx_timeout: \t%d ns\n",
0278                 np->rx_coalesce, np->rx_timeout*640);
0279     if (np->vlan)
0280         printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
0281     return 0;
0282 
0283 err_out_unmap_rx:
0284     dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
0285               np->rx_ring_dma);
0286 err_out_unmap_tx:
0287     dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
0288               np->tx_ring_dma);
0289 err_out_iounmap:
0290 #ifdef MEM_MAPPING
0291     pci_iounmap(pdev, np->ioaddr);
0292 #endif
0293     pci_iounmap(pdev, np->eeprom_addr);
0294 err_out_dev:
0295     free_netdev (dev);
0296 err_out_res:
0297     pci_release_regions (pdev);
0298 err_out_disable:
0299     pci_disable_device (pdev);
0300     return err;
0301 }
0302 
0303 static int
0304 find_miiphy (struct net_device *dev)
0305 {
0306     struct netdev_private *np = netdev_priv(dev);
0307     int i, phy_found = 0;
0308 
0309     np->phy_addr = 1;
0310 
0311     for (i = 31; i >= 0; i--) {
0312         int mii_status = mii_read (dev, i, 1);
0313         if (mii_status != 0xffff && mii_status != 0x0000) {
0314             np->phy_addr = i;
0315             phy_found++;
0316         }
0317     }
0318     if (!phy_found) {
0319         printk (KERN_ERR "%s: No MII PHY found!\n", dev->name);
0320         return -ENODEV;
0321     }
0322     return 0;
0323 }
0324 
0325 static int
0326 parse_eeprom (struct net_device *dev)
0327 {
0328     struct netdev_private *np = netdev_priv(dev);
0329     void __iomem *ioaddr = np->ioaddr;
0330     int i, j;
0331     u8 sromdata[256];
0332     u8 *psib;
0333     u32 crc;
0334     PSROM_t psrom = (PSROM_t) sromdata;
0335 
0336     int cid, next;
0337 
0338     for (i = 0; i < 128; i++)
0339         ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i));
0340 
0341     if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) {  /* D-Link Only */
0342         /* Check CRC */
0343         crc = ~ether_crc_le (256 - 4, sromdata);
0344         if (psrom->crc != cpu_to_le32(crc)) {
0345             printk (KERN_ERR "%s: EEPROM data CRC error.\n",
0346                     dev->name);
0347             return -1;
0348         }
0349     }
0350 
0351     /* Set MAC address */
0352     eth_hw_addr_set(dev, psrom->mac_addr);
0353 
0354     if (np->chip_id == CHIP_IP1000A) {
0355         np->led_mode = psrom->led_mode;
0356         return 0;
0357     }
0358 
0359     if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) {
0360         return 0;
0361     }
0362 
0363     /* Parse Software Information Block */
0364     i = 0x30;
0365     psib = (u8 *) sromdata;
0366     do {
0367         cid = psib[i++];
0368         next = psib[i++];
0369         if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) {
0370             printk (KERN_ERR "Cell data error\n");
0371             return -1;
0372         }
0373         switch (cid) {
0374         case 0: /* Format version */
0375             break;
0376         case 1: /* End of cell */
0377             return 0;
0378         case 2: /* Duplex Polarity */
0379             np->duplex_polarity = psib[i];
0380             dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]);
0381             break;
0382         case 3: /* Wake Polarity */
0383             np->wake_polarity = psib[i];
0384             break;
0385         case 9: /* Adapter description */
0386             j = (next - i > 255) ? 255 : next - i;
0387             memcpy (np->name, &(psib[i]), j);
0388             break;
0389         case 4:
0390         case 5:
0391         case 6:
0392         case 7:
0393         case 8: /* Reversed */
0394             break;
0395         default:    /* Unknown cell */
0396             return -1;
0397         }
0398         i = next;
0399     } while (1);
0400 
0401     return 0;
0402 }
0403 
0404 static void rio_set_led_mode(struct net_device *dev)
0405 {
0406     struct netdev_private *np = netdev_priv(dev);
0407     void __iomem *ioaddr = np->ioaddr;
0408     u32 mode;
0409 
0410     if (np->chip_id != CHIP_IP1000A)
0411         return;
0412 
0413     mode = dr32(ASICCtrl);
0414     mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
0415 
0416     if (np->led_mode & 0x01)
0417         mode |= IPG_AC_LED_MODE;
0418     if (np->led_mode & 0x02)
0419         mode |= IPG_AC_LED_MODE_BIT_1;
0420     if (np->led_mode & 0x08)
0421         mode |= IPG_AC_LED_SPEED;
0422 
0423     dw32(ASICCtrl, mode);
0424 }
0425 
0426 static inline dma_addr_t desc_to_dma(struct netdev_desc *desc)
0427 {
0428     return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48);
0429 }
0430 
0431 static void free_list(struct net_device *dev)
0432 {
0433     struct netdev_private *np = netdev_priv(dev);
0434     struct sk_buff *skb;
0435     int i;
0436 
0437     /* Free all the skbuffs in the queue. */
0438     for (i = 0; i < RX_RING_SIZE; i++) {
0439         skb = np->rx_skbuff[i];
0440         if (skb) {
0441             dma_unmap_single(&np->pdev->dev,
0442                      desc_to_dma(&np->rx_ring[i]),
0443                      skb->len, DMA_FROM_DEVICE);
0444             dev_kfree_skb(skb);
0445             np->rx_skbuff[i] = NULL;
0446         }
0447         np->rx_ring[i].status = 0;
0448         np->rx_ring[i].fraginfo = 0;
0449     }
0450     for (i = 0; i < TX_RING_SIZE; i++) {
0451         skb = np->tx_skbuff[i];
0452         if (skb) {
0453             dma_unmap_single(&np->pdev->dev,
0454                      desc_to_dma(&np->tx_ring[i]),
0455                      skb->len, DMA_TO_DEVICE);
0456             dev_kfree_skb(skb);
0457             np->tx_skbuff[i] = NULL;
0458         }
0459     }
0460 }
0461 
0462 static void rio_reset_ring(struct netdev_private *np)
0463 {
0464     int i;
0465 
0466     np->cur_rx = 0;
0467     np->cur_tx = 0;
0468     np->old_rx = 0;
0469     np->old_tx = 0;
0470 
0471     for (i = 0; i < TX_RING_SIZE; i++)
0472         np->tx_ring[i].status = cpu_to_le64(TFDDone);
0473 
0474     for (i = 0; i < RX_RING_SIZE; i++)
0475         np->rx_ring[i].status = 0;
0476 }
0477 
0478  /* allocate and initialize Tx and Rx descriptors */
0479 static int alloc_list(struct net_device *dev)
0480 {
0481     struct netdev_private *np = netdev_priv(dev);
0482     int i;
0483 
0484     rio_reset_ring(np);
0485     np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
0486 
0487     /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
0488     for (i = 0; i < TX_RING_SIZE; i++) {
0489         np->tx_skbuff[i] = NULL;
0490         np->tx_ring[i].next_desc = cpu_to_le64(np->tx_ring_dma +
0491                           ((i + 1) % TX_RING_SIZE) *
0492                           sizeof(struct netdev_desc));
0493     }
0494 
0495     /* Initialize Rx descriptors & allocate buffers */
0496     for (i = 0; i < RX_RING_SIZE; i++) {
0497         /* Allocated fixed size of skbuff */
0498         struct sk_buff *skb;
0499 
0500         skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
0501         np->rx_skbuff[i] = skb;
0502         if (!skb) {
0503             free_list(dev);
0504             return -ENOMEM;
0505         }
0506 
0507         np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma +
0508                         ((i + 1) % RX_RING_SIZE) *
0509                         sizeof(struct netdev_desc));
0510         /* Rubicon now supports 40 bits of addressing space. */
0511         np->rx_ring[i].fraginfo =
0512             cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
0513                            np->rx_buf_sz, DMA_FROM_DEVICE));
0514         np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
0515     }
0516 
0517     return 0;
0518 }
0519 
0520 static void rio_hw_init(struct net_device *dev)
0521 {
0522     struct netdev_private *np = netdev_priv(dev);
0523     void __iomem *ioaddr = np->ioaddr;
0524     int i;
0525     u16 macctrl;
0526 
0527     /* Reset all logic functions */
0528     dw16(ASICCtrl + 2,
0529          GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
0530     mdelay(10);
0531 
0532     rio_set_led_mode(dev);
0533 
0534     /* DebugCtrl bit 4, 5, 9 must set */
0535     dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
0536 
0537     if (np->chip_id == CHIP_IP1000A &&
0538         (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) {
0539         /* PHY magic taken from ipg driver, undocumented registers */
0540         mii_write(dev, np->phy_addr, 31, 0x0001);
0541         mii_write(dev, np->phy_addr, 27, 0x01e0);
0542         mii_write(dev, np->phy_addr, 31, 0x0002);
0543         mii_write(dev, np->phy_addr, 27, 0xeb8e);
0544         mii_write(dev, np->phy_addr, 31, 0x0000);
0545         mii_write(dev, np->phy_addr, 30, 0x005e);
0546         /* advertise 1000BASE-T half & full duplex, prefer MASTER */
0547         mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700);
0548     }
0549 
0550     if (np->phy_media)
0551         mii_set_media_pcs(dev);
0552     else
0553         mii_set_media(dev);
0554 
0555     /* Jumbo frame */
0556     if (np->jumbo != 0)
0557         dw16(MaxFrameSize, MAX_JUMBO+14);
0558 
0559     /* Set RFDListPtr */
0560     dw32(RFDListPtr0, np->rx_ring_dma);
0561     dw32(RFDListPtr1, 0);
0562 
0563     /* Set station address */
0564     /* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works
0565      * too. However, it doesn't work on IP1000A so we use 16-bit access.
0566      */
0567     for (i = 0; i < 3; i++)
0568         dw16(StationAddr0 + 2 * i,
0569              cpu_to_le16(((const u16 *)dev->dev_addr)[i]));
0570 
0571     set_multicast (dev);
0572     if (np->coalesce) {
0573         dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16);
0574     }
0575     /* Set RIO to poll every N*320nsec. */
0576     dw8(RxDMAPollPeriod, 0x20);
0577     dw8(TxDMAPollPeriod, 0xff);
0578     dw8(RxDMABurstThresh, 0x30);
0579     dw8(RxDMAUrgentThresh, 0x30);
0580     dw32(RmonStatMask, 0x0007ffff);
0581     /* clear statistics */
0582     clear_stats (dev);
0583 
0584     /* VLAN supported */
0585     if (np->vlan) {
0586         /* priority field in RxDMAIntCtrl  */
0587         dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10);
0588         /* VLANId */
0589         dw16(VLANId, np->vlan);
0590         /* Length/Type should be 0x8100 */
0591         dw32(VLANTag, 0x8100 << 16 | np->vlan);
0592         /* Enable AutoVLANuntagging, but disable AutoVLANtagging.
0593            VLAN information tagged by TFC' VID, CFI fields. */
0594         dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
0595     }
0596 
0597     /* Start Tx/Rx */
0598     dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable);
0599 
0600     macctrl = 0;
0601     macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
0602     macctrl |= (np->full_duplex) ? DuplexSelect : 0;
0603     macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
0604     macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
0605     dw16(MACCtrl, macctrl);
0606 }
0607 
0608 static void rio_hw_stop(struct net_device *dev)
0609 {
0610     struct netdev_private *np = netdev_priv(dev);
0611     void __iomem *ioaddr = np->ioaddr;
0612 
0613     /* Disable interrupts */
0614     dw16(IntEnable, 0);
0615 
0616     /* Stop Tx and Rx logics */
0617     dw32(MACCtrl, TxDisable | RxDisable | StatsDisable);
0618 }
0619 
0620 static int rio_open(struct net_device *dev)
0621 {
0622     struct netdev_private *np = netdev_priv(dev);
0623     const int irq = np->pdev->irq;
0624     int i;
0625 
0626     i = alloc_list(dev);
0627     if (i)
0628         return i;
0629 
0630     rio_hw_init(dev);
0631 
0632     i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
0633     if (i) {
0634         rio_hw_stop(dev);
0635         free_list(dev);
0636         return i;
0637     }
0638 
0639     timer_setup(&np->timer, rio_timer, 0);
0640     np->timer.expires = jiffies + 1 * HZ;
0641     add_timer(&np->timer);
0642 
0643     netif_start_queue (dev);
0644 
0645     dl2k_enable_int(np);
0646     return 0;
0647 }
0648 
0649 static void
0650 rio_timer (struct timer_list *t)
0651 {
0652     struct netdev_private *np = from_timer(np, t, timer);
0653     struct net_device *dev = pci_get_drvdata(np->pdev);
0654     unsigned int entry;
0655     int next_tick = 1*HZ;
0656     unsigned long flags;
0657 
0658     spin_lock_irqsave(&np->rx_lock, flags);
0659     /* Recover rx ring exhausted error */
0660     if (np->cur_rx - np->old_rx >= RX_RING_SIZE) {
0661         printk(KERN_INFO "Try to recover rx ring exhausted...\n");
0662         /* Re-allocate skbuffs to fill the descriptor ring */
0663         for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
0664             struct sk_buff *skb;
0665             entry = np->old_rx % RX_RING_SIZE;
0666             /* Dropped packets don't need to re-allocate */
0667             if (np->rx_skbuff[entry] == NULL) {
0668                 skb = netdev_alloc_skb_ip_align(dev,
0669                                 np->rx_buf_sz);
0670                 if (skb == NULL) {
0671                     np->rx_ring[entry].fraginfo = 0;
0672                     printk (KERN_INFO
0673                         "%s: Still unable to re-allocate Rx skbuff.#%d\n",
0674                         dev->name, entry);
0675                     break;
0676                 }
0677                 np->rx_skbuff[entry] = skb;
0678                 np->rx_ring[entry].fraginfo =
0679                     cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data,
0680                                 np->rx_buf_sz, DMA_FROM_DEVICE));
0681             }
0682             np->rx_ring[entry].fraginfo |=
0683                 cpu_to_le64((u64)np->rx_buf_sz << 48);
0684             np->rx_ring[entry].status = 0;
0685         } /* end for */
0686     } /* end if */
0687     spin_unlock_irqrestore (&np->rx_lock, flags);
0688     np->timer.expires = jiffies + next_tick;
0689     add_timer(&np->timer);
0690 }
0691 
0692 static void
0693 rio_tx_timeout (struct net_device *dev, unsigned int txqueue)
0694 {
0695     struct netdev_private *np = netdev_priv(dev);
0696     void __iomem *ioaddr = np->ioaddr;
0697 
0698     printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
0699         dev->name, dr32(TxStatus));
0700     rio_free_tx(dev, 0);
0701     dev->if_port = 0;
0702     netif_trans_update(dev); /* prevent tx timeout */
0703 }
0704 
0705 static netdev_tx_t
0706 start_xmit (struct sk_buff *skb, struct net_device *dev)
0707 {
0708     struct netdev_private *np = netdev_priv(dev);
0709     void __iomem *ioaddr = np->ioaddr;
0710     struct netdev_desc *txdesc;
0711     unsigned entry;
0712     u64 tfc_vlan_tag = 0;
0713 
0714     if (np->link_status == 0) { /* Link Down */
0715         dev_kfree_skb(skb);
0716         return NETDEV_TX_OK;
0717     }
0718     entry = np->cur_tx % TX_RING_SIZE;
0719     np->tx_skbuff[entry] = skb;
0720     txdesc = &np->tx_ring[entry];
0721 
0722 #if 0
0723     if (skb->ip_summed == CHECKSUM_PARTIAL) {
0724         txdesc->status |=
0725             cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable |
0726                  IPChecksumEnable);
0727     }
0728 #endif
0729     if (np->vlan) {
0730         tfc_vlan_tag = VLANTagInsert |
0731             ((u64)np->vlan << 32) |
0732             ((u64)skb->priority << 45);
0733     }
0734     txdesc->fraginfo = cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data,
0735                                skb->len, DMA_TO_DEVICE));
0736     txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48);
0737 
0738     /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
0739      * Work around: Always use 1 descriptor in 10Mbps mode */
0740     if (entry % np->tx_coalesce == 0 || np->speed == 10)
0741         txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
0742                           WordAlignDisable |
0743                           TxDMAIndicate |
0744                           (1 << FragCountShift));
0745     else
0746         txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
0747                           WordAlignDisable |
0748                           (1 << FragCountShift));
0749 
0750     /* TxDMAPollNow */
0751     dw32(DMACtrl, dr32(DMACtrl) | 0x00001000);
0752     /* Schedule ISR */
0753     dw32(CountDown, 10000);
0754     np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
0755     if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
0756             < TX_QUEUE_LEN - 1 && np->speed != 10) {
0757         /* do nothing */
0758     } else if (!netif_queue_stopped(dev)) {
0759         netif_stop_queue (dev);
0760     }
0761 
0762     /* The first TFDListPtr */
0763     if (!dr32(TFDListPtr0)) {
0764         dw32(TFDListPtr0, np->tx_ring_dma +
0765              entry * sizeof (struct netdev_desc));
0766         dw32(TFDListPtr1, 0);
0767     }
0768 
0769     return NETDEV_TX_OK;
0770 }
0771 
0772 static irqreturn_t
0773 rio_interrupt (int irq, void *dev_instance)
0774 {
0775     struct net_device *dev = dev_instance;
0776     struct netdev_private *np = netdev_priv(dev);
0777     void __iomem *ioaddr = np->ioaddr;
0778     unsigned int_status;
0779     int cnt = max_intrloop;
0780     int handled = 0;
0781 
0782     while (1) {
0783         int_status = dr16(IntStatus);
0784         dw16(IntStatus, int_status);
0785         int_status &= DEFAULT_INTR;
0786         if (int_status == 0 || --cnt < 0)
0787             break;
0788         handled = 1;
0789         /* Processing received packets */
0790         if (int_status & RxDMAComplete)
0791             receive_packet (dev);
0792         /* TxDMAComplete interrupt */
0793         if ((int_status & (TxDMAComplete|IntRequested))) {
0794             int tx_status;
0795             tx_status = dr32(TxStatus);
0796             if (tx_status & 0x01)
0797                 tx_error (dev, tx_status);
0798             /* Free used tx skbuffs */
0799             rio_free_tx (dev, 1);
0800         }
0801 
0802         /* Handle uncommon events */
0803         if (int_status &
0804             (HostError | LinkEvent | UpdateStats))
0805             rio_error (dev, int_status);
0806     }
0807     if (np->cur_tx != np->old_tx)
0808         dw32(CountDown, 100);
0809     return IRQ_RETVAL(handled);
0810 }
0811 
0812 static void
0813 rio_free_tx (struct net_device *dev, int irq)
0814 {
0815     struct netdev_private *np = netdev_priv(dev);
0816     int entry = np->old_tx % TX_RING_SIZE;
0817     int tx_use = 0;
0818     unsigned long flag = 0;
0819 
0820     if (irq)
0821         spin_lock(&np->tx_lock);
0822     else
0823         spin_lock_irqsave(&np->tx_lock, flag);
0824 
0825     /* Free used tx skbuffs */
0826     while (entry != np->cur_tx) {
0827         struct sk_buff *skb;
0828 
0829         if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone)))
0830             break;
0831         skb = np->tx_skbuff[entry];
0832         dma_unmap_single(&np->pdev->dev,
0833                  desc_to_dma(&np->tx_ring[entry]), skb->len,
0834                  DMA_TO_DEVICE);
0835         if (irq)
0836             dev_consume_skb_irq(skb);
0837         else
0838             dev_kfree_skb(skb);
0839 
0840         np->tx_skbuff[entry] = NULL;
0841         entry = (entry + 1) % TX_RING_SIZE;
0842         tx_use++;
0843     }
0844     if (irq)
0845         spin_unlock(&np->tx_lock);
0846     else
0847         spin_unlock_irqrestore(&np->tx_lock, flag);
0848     np->old_tx = entry;
0849 
0850     /* If the ring is no longer full, clear tx_full and
0851        call netif_wake_queue() */
0852 
0853     if (netif_queue_stopped(dev) &&
0854         ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
0855         < TX_QUEUE_LEN - 1 || np->speed == 10)) {
0856         netif_wake_queue (dev);
0857     }
0858 }
0859 
0860 static void
0861 tx_error (struct net_device *dev, int tx_status)
0862 {
0863     struct netdev_private *np = netdev_priv(dev);
0864     void __iomem *ioaddr = np->ioaddr;
0865     int frame_id;
0866     int i;
0867 
0868     frame_id = (tx_status & 0xffff0000);
0869     printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
0870         dev->name, tx_status, frame_id);
0871     dev->stats.tx_errors++;
0872     /* Ttransmit Underrun */
0873     if (tx_status & 0x10) {
0874         dev->stats.tx_fifo_errors++;
0875         dw16(TxStartThresh, dr16(TxStartThresh) + 0x10);
0876         /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
0877         dw16(ASICCtrl + 2,
0878              TxReset | DMAReset | FIFOReset | NetworkReset);
0879         /* Wait for ResetBusy bit clear */
0880         for (i = 50; i > 0; i--) {
0881             if (!(dr16(ASICCtrl + 2) & ResetBusy))
0882                 break;
0883             mdelay (1);
0884         }
0885         rio_set_led_mode(dev);
0886         rio_free_tx (dev, 1);
0887         /* Reset TFDListPtr */
0888         dw32(TFDListPtr0, np->tx_ring_dma +
0889              np->old_tx * sizeof (struct netdev_desc));
0890         dw32(TFDListPtr1, 0);
0891 
0892         /* Let TxStartThresh stay default value */
0893     }
0894     /* Late Collision */
0895     if (tx_status & 0x04) {
0896         dev->stats.tx_fifo_errors++;
0897         /* TxReset and clear FIFO */
0898         dw16(ASICCtrl + 2, TxReset | FIFOReset);
0899         /* Wait reset done */
0900         for (i = 50; i > 0; i--) {
0901             if (!(dr16(ASICCtrl + 2) & ResetBusy))
0902                 break;
0903             mdelay (1);
0904         }
0905         rio_set_led_mode(dev);
0906         /* Let TxStartThresh stay default value */
0907     }
0908     /* Maximum Collisions */
0909     if (tx_status & 0x08)
0910         dev->stats.collisions++;
0911     /* Restart the Tx */
0912     dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
0913 }
0914 
0915 static int
0916 receive_packet (struct net_device *dev)
0917 {
0918     struct netdev_private *np = netdev_priv(dev);
0919     int entry = np->cur_rx % RX_RING_SIZE;
0920     int cnt = 30;
0921 
0922     /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
0923     while (1) {
0924         struct netdev_desc *desc = &np->rx_ring[entry];
0925         int pkt_len;
0926         u64 frame_status;
0927 
0928         if (!(desc->status & cpu_to_le64(RFDDone)) ||
0929             !(desc->status & cpu_to_le64(FrameStart)) ||
0930             !(desc->status & cpu_to_le64(FrameEnd)))
0931             break;
0932 
0933         /* Chip omits the CRC. */
0934         frame_status = le64_to_cpu(desc->status);
0935         pkt_len = frame_status & 0xffff;
0936         if (--cnt < 0)
0937             break;
0938         /* Update rx error statistics, drop packet. */
0939         if (frame_status & RFS_Errors) {
0940             dev->stats.rx_errors++;
0941             if (frame_status & (RxRuntFrame | RxLengthError))
0942                 dev->stats.rx_length_errors++;
0943             if (frame_status & RxFCSError)
0944                 dev->stats.rx_crc_errors++;
0945             if (frame_status & RxAlignmentError && np->speed != 1000)
0946                 dev->stats.rx_frame_errors++;
0947             if (frame_status & RxFIFOOverrun)
0948                 dev->stats.rx_fifo_errors++;
0949         } else {
0950             struct sk_buff *skb;
0951 
0952             /* Small skbuffs for short packets */
0953             if (pkt_len > copy_thresh) {
0954                 dma_unmap_single(&np->pdev->dev,
0955                          desc_to_dma(desc),
0956                          np->rx_buf_sz,
0957                          DMA_FROM_DEVICE);
0958                 skb_put (skb = np->rx_skbuff[entry], pkt_len);
0959                 np->rx_skbuff[entry] = NULL;
0960             } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
0961                 dma_sync_single_for_cpu(&np->pdev->dev,
0962                             desc_to_dma(desc),
0963                             np->rx_buf_sz,
0964                             DMA_FROM_DEVICE);
0965                 skb_copy_to_linear_data (skb,
0966                           np->rx_skbuff[entry]->data,
0967                           pkt_len);
0968                 skb_put (skb, pkt_len);
0969                 dma_sync_single_for_device(&np->pdev->dev,
0970                                desc_to_dma(desc),
0971                                np->rx_buf_sz,
0972                                DMA_FROM_DEVICE);
0973             }
0974             skb->protocol = eth_type_trans (skb, dev);
0975 #if 0
0976             /* Checksum done by hw, but csum value unavailable. */
0977             if (np->pdev->pci_rev_id >= 0x0c &&
0978                 !(frame_status & (TCPError | UDPError | IPError))) {
0979                 skb->ip_summed = CHECKSUM_UNNECESSARY;
0980             }
0981 #endif
0982             netif_rx (skb);
0983         }
0984         entry = (entry + 1) % RX_RING_SIZE;
0985     }
0986     spin_lock(&np->rx_lock);
0987     np->cur_rx = entry;
0988     /* Re-allocate skbuffs to fill the descriptor ring */
0989     entry = np->old_rx;
0990     while (entry != np->cur_rx) {
0991         struct sk_buff *skb;
0992         /* Dropped packets don't need to re-allocate */
0993         if (np->rx_skbuff[entry] == NULL) {
0994             skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
0995             if (skb == NULL) {
0996                 np->rx_ring[entry].fraginfo = 0;
0997                 printk (KERN_INFO
0998                     "%s: receive_packet: "
0999                     "Unable to re-allocate Rx skbuff.#%d\n",
1000                     dev->name, entry);
1001                 break;
1002             }
1003             np->rx_skbuff[entry] = skb;
1004             np->rx_ring[entry].fraginfo =
1005                 cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
1006                                np->rx_buf_sz, DMA_FROM_DEVICE));
1007         }
1008         np->rx_ring[entry].fraginfo |=
1009             cpu_to_le64((u64)np->rx_buf_sz << 48);
1010         np->rx_ring[entry].status = 0;
1011         entry = (entry + 1) % RX_RING_SIZE;
1012     }
1013     np->old_rx = entry;
1014     spin_unlock(&np->rx_lock);
1015     return 0;
1016 }
1017 
1018 static void
1019 rio_error (struct net_device *dev, int int_status)
1020 {
1021     struct netdev_private *np = netdev_priv(dev);
1022     void __iomem *ioaddr = np->ioaddr;
1023     u16 macctrl;
1024 
1025     /* Link change event */
1026     if (int_status & LinkEvent) {
1027         if (mii_wait_link (dev, 10) == 0) {
1028             printk (KERN_INFO "%s: Link up\n", dev->name);
1029             if (np->phy_media)
1030                 mii_get_media_pcs (dev);
1031             else
1032                 mii_get_media (dev);
1033             if (np->speed == 1000)
1034                 np->tx_coalesce = tx_coalesce;
1035             else
1036                 np->tx_coalesce = 1;
1037             macctrl = 0;
1038             macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
1039             macctrl |= (np->full_duplex) ? DuplexSelect : 0;
1040             macctrl |= (np->tx_flow) ?
1041                 TxFlowControlEnable : 0;
1042             macctrl |= (np->rx_flow) ?
1043                 RxFlowControlEnable : 0;
1044             dw16(MACCtrl, macctrl);
1045             np->link_status = 1;
1046             netif_carrier_on(dev);
1047         } else {
1048             printk (KERN_INFO "%s: Link off\n", dev->name);
1049             np->link_status = 0;
1050             netif_carrier_off(dev);
1051         }
1052     }
1053 
1054     /* UpdateStats statistics registers */
1055     if (int_status & UpdateStats) {
1056         get_stats (dev);
1057     }
1058 
1059     /* PCI Error, a catastronphic error related to the bus interface
1060        occurs, set GlobalReset and HostReset to reset. */
1061     if (int_status & HostError) {
1062         printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
1063             dev->name, int_status);
1064         dw16(ASICCtrl + 2, GlobalReset | HostReset);
1065         mdelay (500);
1066         rio_set_led_mode(dev);
1067     }
1068 }
1069 
1070 static struct net_device_stats *
1071 get_stats (struct net_device *dev)
1072 {
1073     struct netdev_private *np = netdev_priv(dev);
1074     void __iomem *ioaddr = np->ioaddr;
1075 #ifdef MEM_MAPPING
1076     int i;
1077 #endif
1078     unsigned int stat_reg;
1079 
1080     /* All statistics registers need to be acknowledged,
1081        else statistic overflow could cause problems */
1082 
1083     dev->stats.rx_packets += dr32(FramesRcvOk);
1084     dev->stats.tx_packets += dr32(FramesXmtOk);
1085     dev->stats.rx_bytes += dr32(OctetRcvOk);
1086     dev->stats.tx_bytes += dr32(OctetXmtOk);
1087 
1088     dev->stats.multicast = dr32(McstFramesRcvdOk);
1089     dev->stats.collisions += dr32(SingleColFrames)
1090                  +  dr32(MultiColFrames);
1091 
1092     /* detailed tx errors */
1093     stat_reg = dr16(FramesAbortXSColls);
1094     dev->stats.tx_aborted_errors += stat_reg;
1095     dev->stats.tx_errors += stat_reg;
1096 
1097     stat_reg = dr16(CarrierSenseErrors);
1098     dev->stats.tx_carrier_errors += stat_reg;
1099     dev->stats.tx_errors += stat_reg;
1100 
1101     /* Clear all other statistic register. */
1102     dr32(McstOctetXmtOk);
1103     dr16(BcstFramesXmtdOk);
1104     dr32(McstFramesXmtdOk);
1105     dr16(BcstFramesRcvdOk);
1106     dr16(MacControlFramesRcvd);
1107     dr16(FrameTooLongErrors);
1108     dr16(InRangeLengthErrors);
1109     dr16(FramesCheckSeqErrors);
1110     dr16(FramesLostRxErrors);
1111     dr32(McstOctetXmtOk);
1112     dr32(BcstOctetXmtOk);
1113     dr32(McstFramesXmtdOk);
1114     dr32(FramesWDeferredXmt);
1115     dr32(LateCollisions);
1116     dr16(BcstFramesXmtdOk);
1117     dr16(MacControlFramesXmtd);
1118     dr16(FramesWEXDeferal);
1119 
1120 #ifdef MEM_MAPPING
1121     for (i = 0x100; i <= 0x150; i += 4)
1122         dr32(i);
1123 #endif
1124     dr16(TxJumboFrames);
1125     dr16(RxJumboFrames);
1126     dr16(TCPCheckSumErrors);
1127     dr16(UDPCheckSumErrors);
1128     dr16(IPCheckSumErrors);
1129     return &dev->stats;
1130 }
1131 
1132 static int
1133 clear_stats (struct net_device *dev)
1134 {
1135     struct netdev_private *np = netdev_priv(dev);
1136     void __iomem *ioaddr = np->ioaddr;
1137 #ifdef MEM_MAPPING
1138     int i;
1139 #endif
1140 
1141     /* All statistics registers need to be acknowledged,
1142        else statistic overflow could cause problems */
1143     dr32(FramesRcvOk);
1144     dr32(FramesXmtOk);
1145     dr32(OctetRcvOk);
1146     dr32(OctetXmtOk);
1147 
1148     dr32(McstFramesRcvdOk);
1149     dr32(SingleColFrames);
1150     dr32(MultiColFrames);
1151     dr32(LateCollisions);
1152     /* detailed rx errors */
1153     dr16(FrameTooLongErrors);
1154     dr16(InRangeLengthErrors);
1155     dr16(FramesCheckSeqErrors);
1156     dr16(FramesLostRxErrors);
1157 
1158     /* detailed tx errors */
1159     dr16(FramesAbortXSColls);
1160     dr16(CarrierSenseErrors);
1161 
1162     /* Clear all other statistic register. */
1163     dr32(McstOctetXmtOk);
1164     dr16(BcstFramesXmtdOk);
1165     dr32(McstFramesXmtdOk);
1166     dr16(BcstFramesRcvdOk);
1167     dr16(MacControlFramesRcvd);
1168     dr32(McstOctetXmtOk);
1169     dr32(BcstOctetXmtOk);
1170     dr32(McstFramesXmtdOk);
1171     dr32(FramesWDeferredXmt);
1172     dr16(BcstFramesXmtdOk);
1173     dr16(MacControlFramesXmtd);
1174     dr16(FramesWEXDeferal);
1175 #ifdef MEM_MAPPING
1176     for (i = 0x100; i <= 0x150; i += 4)
1177         dr32(i);
1178 #endif
1179     dr16(TxJumboFrames);
1180     dr16(RxJumboFrames);
1181     dr16(TCPCheckSumErrors);
1182     dr16(UDPCheckSumErrors);
1183     dr16(IPCheckSumErrors);
1184     return 0;
1185 }
1186 
1187 static void
1188 set_multicast (struct net_device *dev)
1189 {
1190     struct netdev_private *np = netdev_priv(dev);
1191     void __iomem *ioaddr = np->ioaddr;
1192     u32 hash_table[2];
1193     u16 rx_mode = 0;
1194 
1195     hash_table[0] = hash_table[1] = 0;
1196     /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
1197     hash_table[1] |= 0x02000000;
1198     if (dev->flags & IFF_PROMISC) {
1199         /* Receive all frames promiscuously. */
1200         rx_mode = ReceiveAllFrames;
1201     } else if ((dev->flags & IFF_ALLMULTI) ||
1202             (netdev_mc_count(dev) > multicast_filter_limit)) {
1203         /* Receive broadcast and multicast frames */
1204         rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
1205     } else if (!netdev_mc_empty(dev)) {
1206         struct netdev_hw_addr *ha;
1207         /* Receive broadcast frames and multicast frames filtering
1208            by Hashtable */
1209         rx_mode =
1210             ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
1211         netdev_for_each_mc_addr(ha, dev) {
1212             int bit, index = 0;
1213             int crc = ether_crc_le(ETH_ALEN, ha->addr);
1214             /* The inverted high significant 6 bits of CRC are
1215                used as an index to hashtable */
1216             for (bit = 0; bit < 6; bit++)
1217                 if (crc & (1 << (31 - bit)))
1218                     index |= (1 << bit);
1219             hash_table[index / 32] |= (1 << (index % 32));
1220         }
1221     } else {
1222         rx_mode = ReceiveBroadcast | ReceiveUnicast;
1223     }
1224     if (np->vlan) {
1225         /* ReceiveVLANMatch field in ReceiveMode */
1226         rx_mode |= ReceiveVLANMatch;
1227     }
1228 
1229     dw32(HashTable0, hash_table[0]);
1230     dw32(HashTable1, hash_table[1]);
1231     dw16(ReceiveMode, rx_mode);
1232 }
1233 
1234 static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1235 {
1236     struct netdev_private *np = netdev_priv(dev);
1237 
1238     strlcpy(info->driver, "dl2k", sizeof(info->driver));
1239     strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1240 }
1241 
1242 static int rio_get_link_ksettings(struct net_device *dev,
1243                   struct ethtool_link_ksettings *cmd)
1244 {
1245     struct netdev_private *np = netdev_priv(dev);
1246     u32 supported, advertising;
1247 
1248     if (np->phy_media) {
1249         /* fiber device */
1250         supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1251         advertising = ADVERTISED_Autoneg | ADVERTISED_FIBRE;
1252         cmd->base.port = PORT_FIBRE;
1253     } else {
1254         /* copper device */
1255         supported = SUPPORTED_10baseT_Half |
1256             SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half
1257             | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full |
1258             SUPPORTED_Autoneg | SUPPORTED_MII;
1259         advertising = ADVERTISED_10baseT_Half |
1260             ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half |
1261             ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full |
1262             ADVERTISED_Autoneg | ADVERTISED_MII;
1263         cmd->base.port = PORT_MII;
1264     }
1265     if (np->link_status) {
1266         cmd->base.speed = np->speed;
1267         cmd->base.duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1268     } else {
1269         cmd->base.speed = SPEED_UNKNOWN;
1270         cmd->base.duplex = DUPLEX_UNKNOWN;
1271     }
1272     if (np->an_enable)
1273         cmd->base.autoneg = AUTONEG_ENABLE;
1274     else
1275         cmd->base.autoneg = AUTONEG_DISABLE;
1276 
1277     cmd->base.phy_address = np->phy_addr;
1278 
1279     ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1280                         supported);
1281     ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1282                         advertising);
1283 
1284     return 0;
1285 }
1286 
1287 static int rio_set_link_ksettings(struct net_device *dev,
1288                   const struct ethtool_link_ksettings *cmd)
1289 {
1290     struct netdev_private *np = netdev_priv(dev);
1291     u32 speed = cmd->base.speed;
1292     u8 duplex = cmd->base.duplex;
1293 
1294     netif_carrier_off(dev);
1295     if (cmd->base.autoneg == AUTONEG_ENABLE) {
1296         if (np->an_enable) {
1297             return 0;
1298         } else {
1299             np->an_enable = 1;
1300             mii_set_media(dev);
1301             return 0;
1302         }
1303     } else {
1304         np->an_enable = 0;
1305         if (np->speed == 1000) {
1306             speed = SPEED_100;
1307             duplex = DUPLEX_FULL;
1308             printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n");
1309         }
1310         switch (speed) {
1311         case SPEED_10:
1312             np->speed = 10;
1313             np->full_duplex = (duplex == DUPLEX_FULL);
1314             break;
1315         case SPEED_100:
1316             np->speed = 100;
1317             np->full_duplex = (duplex == DUPLEX_FULL);
1318             break;
1319         case SPEED_1000: /* not supported */
1320         default:
1321             return -EINVAL;
1322         }
1323         mii_set_media(dev);
1324     }
1325     return 0;
1326 }
1327 
1328 static u32 rio_get_link(struct net_device *dev)
1329 {
1330     struct netdev_private *np = netdev_priv(dev);
1331     return np->link_status;
1332 }
1333 
1334 static const struct ethtool_ops ethtool_ops = {
1335     .get_drvinfo = rio_get_drvinfo,
1336     .get_link = rio_get_link,
1337     .get_link_ksettings = rio_get_link_ksettings,
1338     .set_link_ksettings = rio_set_link_ksettings,
1339 };
1340 
1341 static int
1342 rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1343 {
1344     int phy_addr;
1345     struct netdev_private *np = netdev_priv(dev);
1346     struct mii_ioctl_data *miidata = if_mii(rq);
1347 
1348     phy_addr = np->phy_addr;
1349     switch (cmd) {
1350     case SIOCGMIIPHY:
1351         miidata->phy_id = phy_addr;
1352         break;
1353     case SIOCGMIIREG:
1354         miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num);
1355         break;
1356     case SIOCSMIIREG:
1357         if (!capable(CAP_NET_ADMIN))
1358             return -EPERM;
1359         mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in);
1360         break;
1361     default:
1362         return -EOPNOTSUPP;
1363     }
1364     return 0;
1365 }
1366 
1367 #define EEP_READ 0x0200
1368 #define EEP_BUSY 0x8000
1369 /* Read the EEPROM word */
1370 /* We use I/O instruction to read/write eeprom to avoid fail on some machines */
1371 static int read_eeprom(struct netdev_private *np, int eep_addr)
1372 {
1373     void __iomem *ioaddr = np->eeprom_addr;
1374     int i = 1000;
1375 
1376     dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff));
1377     while (i-- > 0) {
1378         if (!(dr16(EepromCtrl) & EEP_BUSY))
1379             return dr16(EepromData);
1380     }
1381     return 0;
1382 }
1383 
1384 enum phy_ctrl_bits {
1385     MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04,
1386     MII_DUPLEX = 0x08,
1387 };
1388 
1389 #define mii_delay() dr8(PhyCtrl)
1390 static void
1391 mii_sendbit (struct net_device *dev, u32 data)
1392 {
1393     struct netdev_private *np = netdev_priv(dev);
1394     void __iomem *ioaddr = np->ioaddr;
1395 
1396     data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE;
1397     dw8(PhyCtrl, data);
1398     mii_delay ();
1399     dw8(PhyCtrl, data | MII_CLK);
1400     mii_delay ();
1401 }
1402 
1403 static int
1404 mii_getbit (struct net_device *dev)
1405 {
1406     struct netdev_private *np = netdev_priv(dev);
1407     void __iomem *ioaddr = np->ioaddr;
1408     u8 data;
1409 
1410     data = (dr8(PhyCtrl) & 0xf8) | MII_READ;
1411     dw8(PhyCtrl, data);
1412     mii_delay ();
1413     dw8(PhyCtrl, data | MII_CLK);
1414     mii_delay ();
1415     return (dr8(PhyCtrl) >> 1) & 1;
1416 }
1417 
1418 static void
1419 mii_send_bits (struct net_device *dev, u32 data, int len)
1420 {
1421     int i;
1422 
1423     for (i = len - 1; i >= 0; i--) {
1424         mii_sendbit (dev, data & (1 << i));
1425     }
1426 }
1427 
1428 static int
1429 mii_read (struct net_device *dev, int phy_addr, int reg_num)
1430 {
1431     u32 cmd;
1432     int i;
1433     u32 retval = 0;
1434 
1435     /* Preamble */
1436     mii_send_bits (dev, 0xffffffff, 32);
1437     /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1438     /* ST,OP = 0110'b for read operation */
1439     cmd = (0x06 << 10 | phy_addr << 5 | reg_num);
1440     mii_send_bits (dev, cmd, 14);
1441     /* Turnaround */
1442     if (mii_getbit (dev))
1443         goto err_out;
1444     /* Read data */
1445     for (i = 0; i < 16; i++) {
1446         retval |= mii_getbit (dev);
1447         retval <<= 1;
1448     }
1449     /* End cycle */
1450     mii_getbit (dev);
1451     return (retval >> 1) & 0xffff;
1452 
1453       err_out:
1454     return 0;
1455 }
1456 static int
1457 mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data)
1458 {
1459     u32 cmd;
1460 
1461     /* Preamble */
1462     mii_send_bits (dev, 0xffffffff, 32);
1463     /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1464     /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
1465     cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data;
1466     mii_send_bits (dev, cmd, 32);
1467     /* End cycle */
1468     mii_getbit (dev);
1469     return 0;
1470 }
1471 static int
1472 mii_wait_link (struct net_device *dev, int wait)
1473 {
1474     __u16 bmsr;
1475     int phy_addr;
1476     struct netdev_private *np;
1477 
1478     np = netdev_priv(dev);
1479     phy_addr = np->phy_addr;
1480 
1481     do {
1482         bmsr = mii_read (dev, phy_addr, MII_BMSR);
1483         if (bmsr & BMSR_LSTATUS)
1484             return 0;
1485         mdelay (1);
1486     } while (--wait > 0);
1487     return -1;
1488 }
1489 static int
1490 mii_get_media (struct net_device *dev)
1491 {
1492     __u16 negotiate;
1493     __u16 bmsr;
1494     __u16 mscr;
1495     __u16 mssr;
1496     int phy_addr;
1497     struct netdev_private *np;
1498 
1499     np = netdev_priv(dev);
1500     phy_addr = np->phy_addr;
1501 
1502     bmsr = mii_read (dev, phy_addr, MII_BMSR);
1503     if (np->an_enable) {
1504         if (!(bmsr & BMSR_ANEGCOMPLETE)) {
1505             /* Auto-Negotiation not completed */
1506             return -1;
1507         }
1508         negotiate = mii_read (dev, phy_addr, MII_ADVERTISE) &
1509             mii_read (dev, phy_addr, MII_LPA);
1510         mscr = mii_read (dev, phy_addr, MII_CTRL1000);
1511         mssr = mii_read (dev, phy_addr, MII_STAT1000);
1512         if (mscr & ADVERTISE_1000FULL && mssr & LPA_1000FULL) {
1513             np->speed = 1000;
1514             np->full_duplex = 1;
1515             printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
1516         } else if (mscr & ADVERTISE_1000HALF && mssr & LPA_1000HALF) {
1517             np->speed = 1000;
1518             np->full_duplex = 0;
1519             printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n");
1520         } else if (negotiate & ADVERTISE_100FULL) {
1521             np->speed = 100;
1522             np->full_duplex = 1;
1523             printk (KERN_INFO "Auto 100 Mbps, Full duplex\n");
1524         } else if (negotiate & ADVERTISE_100HALF) {
1525             np->speed = 100;
1526             np->full_duplex = 0;
1527             printk (KERN_INFO "Auto 100 Mbps, Half duplex\n");
1528         } else if (negotiate & ADVERTISE_10FULL) {
1529             np->speed = 10;
1530             np->full_duplex = 1;
1531             printk (KERN_INFO "Auto 10 Mbps, Full duplex\n");
1532         } else if (negotiate & ADVERTISE_10HALF) {
1533             np->speed = 10;
1534             np->full_duplex = 0;
1535             printk (KERN_INFO "Auto 10 Mbps, Half duplex\n");
1536         }
1537         if (negotiate & ADVERTISE_PAUSE_CAP) {
1538             np->tx_flow &= 1;
1539             np->rx_flow &= 1;
1540         } else if (negotiate & ADVERTISE_PAUSE_ASYM) {
1541             np->tx_flow = 0;
1542             np->rx_flow &= 1;
1543         }
1544         /* else tx_flow, rx_flow = user select  */
1545     } else {
1546         __u16 bmcr = mii_read (dev, phy_addr, MII_BMCR);
1547         switch (bmcr & (BMCR_SPEED100 | BMCR_SPEED1000)) {
1548         case BMCR_SPEED1000:
1549             printk (KERN_INFO "Operating at 1000 Mbps, ");
1550             break;
1551         case BMCR_SPEED100:
1552             printk (KERN_INFO "Operating at 100 Mbps, ");
1553             break;
1554         case 0:
1555             printk (KERN_INFO "Operating at 10 Mbps, ");
1556         }
1557         if (bmcr & BMCR_FULLDPLX) {
1558             printk (KERN_CONT "Full duplex\n");
1559         } else {
1560             printk (KERN_CONT "Half duplex\n");
1561         }
1562     }
1563     if (np->tx_flow)
1564         printk(KERN_INFO "Enable Tx Flow Control\n");
1565     else
1566         printk(KERN_INFO "Disable Tx Flow Control\n");
1567     if (np->rx_flow)
1568         printk(KERN_INFO "Enable Rx Flow Control\n");
1569     else
1570         printk(KERN_INFO "Disable Rx Flow Control\n");
1571 
1572     return 0;
1573 }
1574 
1575 static int
1576 mii_set_media (struct net_device *dev)
1577 {
1578     __u16 pscr;
1579     __u16 bmcr;
1580     __u16 bmsr;
1581     __u16 anar;
1582     int phy_addr;
1583     struct netdev_private *np;
1584     np = netdev_priv(dev);
1585     phy_addr = np->phy_addr;
1586 
1587     /* Does user set speed? */
1588     if (np->an_enable) {
1589         /* Advertise capabilities */
1590         bmsr = mii_read (dev, phy_addr, MII_BMSR);
1591         anar = mii_read (dev, phy_addr, MII_ADVERTISE) &
1592             ~(ADVERTISE_100FULL | ADVERTISE_10FULL |
1593               ADVERTISE_100HALF | ADVERTISE_10HALF |
1594               ADVERTISE_100BASE4);
1595         if (bmsr & BMSR_100FULL)
1596             anar |= ADVERTISE_100FULL;
1597         if (bmsr & BMSR_100HALF)
1598             anar |= ADVERTISE_100HALF;
1599         if (bmsr & BMSR_100BASE4)
1600             anar |= ADVERTISE_100BASE4;
1601         if (bmsr & BMSR_10FULL)
1602             anar |= ADVERTISE_10FULL;
1603         if (bmsr & BMSR_10HALF)
1604             anar |= ADVERTISE_10HALF;
1605         anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1606         mii_write (dev, phy_addr, MII_ADVERTISE, anar);
1607 
1608         /* Enable Auto crossover */
1609         pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
1610         pscr |= 3 << 5; /* 11'b */
1611         mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
1612 
1613         /* Soft reset PHY */
1614         mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET);
1615         bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET;
1616         mii_write (dev, phy_addr, MII_BMCR, bmcr);
1617         mdelay(1);
1618     } else {
1619         /* Force speed setting */
1620         /* 1) Disable Auto crossover */
1621         pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
1622         pscr &= ~(3 << 5);
1623         mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
1624 
1625         /* 2) PHY Reset */
1626         bmcr = mii_read (dev, phy_addr, MII_BMCR);
1627         bmcr |= BMCR_RESET;
1628         mii_write (dev, phy_addr, MII_BMCR, bmcr);
1629 
1630         /* 3) Power Down */
1631         bmcr = 0x1940;  /* must be 0x1940 */
1632         mii_write (dev, phy_addr, MII_BMCR, bmcr);
1633         mdelay (100);   /* wait a certain time */
1634 
1635         /* 4) Advertise nothing */
1636         mii_write (dev, phy_addr, MII_ADVERTISE, 0);
1637 
1638         /* 5) Set media and Power Up */
1639         bmcr = BMCR_PDOWN;
1640         if (np->speed == 100) {
1641             bmcr |= BMCR_SPEED100;
1642             printk (KERN_INFO "Manual 100 Mbps, ");
1643         } else if (np->speed == 10) {
1644             printk (KERN_INFO "Manual 10 Mbps, ");
1645         }
1646         if (np->full_duplex) {
1647             bmcr |= BMCR_FULLDPLX;
1648             printk (KERN_CONT "Full duplex\n");
1649         } else {
1650             printk (KERN_CONT "Half duplex\n");
1651         }
1652 #if 0
1653         /* Set 1000BaseT Master/Slave setting */
1654         mscr = mii_read (dev, phy_addr, MII_CTRL1000);
1655         mscr |= MII_MSCR_CFG_ENABLE;
1656         mscr &= ~MII_MSCR_CFG_VALUE = 0;
1657 #endif
1658         mii_write (dev, phy_addr, MII_BMCR, bmcr);
1659         mdelay(10);
1660     }
1661     return 0;
1662 }
1663 
1664 static int
1665 mii_get_media_pcs (struct net_device *dev)
1666 {
1667     __u16 negotiate;
1668     __u16 bmsr;
1669     int phy_addr;
1670     struct netdev_private *np;
1671 
1672     np = netdev_priv(dev);
1673     phy_addr = np->phy_addr;
1674 
1675     bmsr = mii_read (dev, phy_addr, PCS_BMSR);
1676     if (np->an_enable) {
1677         if (!(bmsr & BMSR_ANEGCOMPLETE)) {
1678             /* Auto-Negotiation not completed */
1679             return -1;
1680         }
1681         negotiate = mii_read (dev, phy_addr, PCS_ANAR) &
1682             mii_read (dev, phy_addr, PCS_ANLPAR);
1683         np->speed = 1000;
1684         if (negotiate & PCS_ANAR_FULL_DUPLEX) {
1685             printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
1686             np->full_duplex = 1;
1687         } else {
1688             printk (KERN_INFO "Auto 1000 Mbps, half duplex\n");
1689             np->full_duplex = 0;
1690         }
1691         if (negotiate & PCS_ANAR_PAUSE) {
1692             np->tx_flow &= 1;
1693             np->rx_flow &= 1;
1694         } else if (negotiate & PCS_ANAR_ASYMMETRIC) {
1695             np->tx_flow = 0;
1696             np->rx_flow &= 1;
1697         }
1698         /* else tx_flow, rx_flow = user select  */
1699     } else {
1700         __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR);
1701         printk (KERN_INFO "Operating at 1000 Mbps, ");
1702         if (bmcr & BMCR_FULLDPLX) {
1703             printk (KERN_CONT "Full duplex\n");
1704         } else {
1705             printk (KERN_CONT "Half duplex\n");
1706         }
1707     }
1708     if (np->tx_flow)
1709         printk(KERN_INFO "Enable Tx Flow Control\n");
1710     else
1711         printk(KERN_INFO "Disable Tx Flow Control\n");
1712     if (np->rx_flow)
1713         printk(KERN_INFO "Enable Rx Flow Control\n");
1714     else
1715         printk(KERN_INFO "Disable Rx Flow Control\n");
1716 
1717     return 0;
1718 }
1719 
1720 static int
1721 mii_set_media_pcs (struct net_device *dev)
1722 {
1723     __u16 bmcr;
1724     __u16 esr;
1725     __u16 anar;
1726     int phy_addr;
1727     struct netdev_private *np;
1728     np = netdev_priv(dev);
1729     phy_addr = np->phy_addr;
1730 
1731     /* Auto-Negotiation? */
1732     if (np->an_enable) {
1733         /* Advertise capabilities */
1734         esr = mii_read (dev, phy_addr, PCS_ESR);
1735         anar = mii_read (dev, phy_addr, MII_ADVERTISE) &
1736             ~PCS_ANAR_HALF_DUPLEX &
1737             ~PCS_ANAR_FULL_DUPLEX;
1738         if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD))
1739             anar |= PCS_ANAR_HALF_DUPLEX;
1740         if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD))
1741             anar |= PCS_ANAR_FULL_DUPLEX;
1742         anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC;
1743         mii_write (dev, phy_addr, MII_ADVERTISE, anar);
1744 
1745         /* Soft reset PHY */
1746         mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET);
1747         bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET;
1748         mii_write (dev, phy_addr, MII_BMCR, bmcr);
1749         mdelay(1);
1750     } else {
1751         /* Force speed setting */
1752         /* PHY Reset */
1753         bmcr = BMCR_RESET;
1754         mii_write (dev, phy_addr, MII_BMCR, bmcr);
1755         mdelay(10);
1756         if (np->full_duplex) {
1757             bmcr = BMCR_FULLDPLX;
1758             printk (KERN_INFO "Manual full duplex\n");
1759         } else {
1760             bmcr = 0;
1761             printk (KERN_INFO "Manual half duplex\n");
1762         }
1763         mii_write (dev, phy_addr, MII_BMCR, bmcr);
1764         mdelay(10);
1765 
1766         /*  Advertise nothing */
1767         mii_write (dev, phy_addr, MII_ADVERTISE, 0);
1768     }
1769     return 0;
1770 }
1771 
1772 
1773 static int
1774 rio_close (struct net_device *dev)
1775 {
1776     struct netdev_private *np = netdev_priv(dev);
1777     struct pci_dev *pdev = np->pdev;
1778 
1779     netif_stop_queue (dev);
1780 
1781     rio_hw_stop(dev);
1782 
1783     free_irq(pdev->irq, dev);
1784     del_timer_sync (&np->timer);
1785 
1786     free_list(dev);
1787 
1788     return 0;
1789 }
1790 
1791 static void
1792 rio_remove1 (struct pci_dev *pdev)
1793 {
1794     struct net_device *dev = pci_get_drvdata (pdev);
1795 
1796     if (dev) {
1797         struct netdev_private *np = netdev_priv(dev);
1798 
1799         unregister_netdev (dev);
1800         dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
1801                   np->rx_ring_dma);
1802         dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
1803                   np->tx_ring_dma);
1804 #ifdef MEM_MAPPING
1805         pci_iounmap(pdev, np->ioaddr);
1806 #endif
1807         pci_iounmap(pdev, np->eeprom_addr);
1808         free_netdev (dev);
1809         pci_release_regions (pdev);
1810         pci_disable_device (pdev);
1811     }
1812 }
1813 
1814 #ifdef CONFIG_PM_SLEEP
1815 static int rio_suspend(struct device *device)
1816 {
1817     struct net_device *dev = dev_get_drvdata(device);
1818     struct netdev_private *np = netdev_priv(dev);
1819 
1820     if (!netif_running(dev))
1821         return 0;
1822 
1823     netif_device_detach(dev);
1824     del_timer_sync(&np->timer);
1825     rio_hw_stop(dev);
1826 
1827     return 0;
1828 }
1829 
1830 static int rio_resume(struct device *device)
1831 {
1832     struct net_device *dev = dev_get_drvdata(device);
1833     struct netdev_private *np = netdev_priv(dev);
1834 
1835     if (!netif_running(dev))
1836         return 0;
1837 
1838     rio_reset_ring(np);
1839     rio_hw_init(dev);
1840     np->timer.expires = jiffies + 1 * HZ;
1841     add_timer(&np->timer);
1842     netif_device_attach(dev);
1843     dl2k_enable_int(np);
1844 
1845     return 0;
1846 }
1847 
1848 static SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume);
1849 #define RIO_PM_OPS    (&rio_pm_ops)
1850 
1851 #else
1852 
1853 #define RIO_PM_OPS  NULL
1854 
1855 #endif /* CONFIG_PM_SLEEP */
1856 
1857 static struct pci_driver rio_driver = {
1858     .name       = "dl2k",
1859     .id_table   = rio_pci_tbl,
1860     .probe      = rio_probe1,
1861     .remove     = rio_remove1,
1862     .driver.pm  = RIO_PM_OPS,
1863 };
1864 
1865 module_pci_driver(rio_driver);
1866 
1867 /* Read Documentation/networking/device_drivers/ethernet/dlink/dl2k.rst. */