Back to home page

OSCL-LXR

 
 

    


0001 /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
0002 /*
0003     Written/copyright 1997-2001 by Donald Becker.
0004 
0005     This software may be used and distributed according to the terms of
0006     the GNU General Public License (GPL), incorporated herein by reference.
0007     Drivers based on or derived from this code fall under the GPL and must
0008     retain the authorship, copyright and license notice.  This file is not
0009     a complete program and may only be used when the entire operating
0010     system is licensed under the GPL.
0011 
0012     This driver is for the SMC83c170/175 "EPIC" series, as used on the
0013     SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
0014 
0015     The author may be reached as becker@scyld.com, or C/O
0016     Scyld Computing Corporation
0017     410 Severn Ave., Suite 210
0018     Annapolis MD 21403
0019 
0020     Information and updates available at
0021     http://www.scyld.com/network/epic100.html
0022     [this link no longer provides anything useful -jgarzik]
0023 
0024     ---------------------------------------------------------------------
0025 
0026 */
0027 
0028 #define DRV_NAME        "epic100"
0029 #define DRV_VERSION     "2.1"
0030 #define DRV_RELDATE     "Sept 11, 2006"
0031 
0032 /* The user-configurable values.
0033    These may be modified when a driver module is loaded.*/
0034 
0035 static int debug = 1;           /* 1 normal messages, 0 quiet .. 7 verbose. */
0036 
0037 /* Used to pass the full-duplex flag, etc. */
0038 #define MAX_UNITS 8     /* More are supported, limit only on options */
0039 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
0040 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
0041 
0042 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
0043    Setting to > 1518 effectively disables this feature. */
0044 static int rx_copybreak;
0045 
0046 /* Operational parameters that are set at compile time. */
0047 
0048 /* Keep the ring sizes a power of two for operational efficiency.
0049    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
0050    Making the Tx ring too large decreases the effectiveness of channel
0051    bonding and packet priority.
0052    There are no ill effects from too-large receive rings. */
0053 #define TX_RING_SIZE    256
0054 #define TX_QUEUE_LEN    240     /* Limit ring entries actually used.  */
0055 #define RX_RING_SIZE    256
0056 #define TX_TOTAL_SIZE   TX_RING_SIZE*sizeof(struct epic_tx_desc)
0057 #define RX_TOTAL_SIZE   RX_RING_SIZE*sizeof(struct epic_rx_desc)
0058 
0059 /* Operational parameters that usually are not changed. */
0060 /* Time in jiffies before concluding the transmitter is hung. */
0061 #define TX_TIMEOUT  (2*HZ)
0062 
0063 #define PKT_BUF_SZ      1536            /* Size of each temporary Rx buffer.*/
0064 
0065 /* Bytes transferred to chip before transmission starts. */
0066 /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
0067 #define TX_FIFO_THRESH 256
0068 #define RX_FIFO_THRESH 1        /* 0-3, 0==32, 64,96, or 3==128 bytes  */
0069 
0070 #include <linux/module.h>
0071 #include <linux/kernel.h>
0072 #include <linux/string.h>
0073 #include <linux/timer.h>
0074 #include <linux/errno.h>
0075 #include <linux/ioport.h>
0076 #include <linux/interrupt.h>
0077 #include <linux/pci.h>
0078 #include <linux/delay.h>
0079 #include <linux/netdevice.h>
0080 #include <linux/etherdevice.h>
0081 #include <linux/skbuff.h>
0082 #include <linux/init.h>
0083 #include <linux/spinlock.h>
0084 #include <linux/ethtool.h>
0085 #include <linux/mii.h>
0086 #include <linux/crc32.h>
0087 #include <linux/bitops.h>
0088 #include <asm/io.h>
0089 #include <linux/uaccess.h>
0090 #include <asm/byteorder.h>
0091 
0092 /* These identify the driver base version and may not be removed. */
0093 static char version[] =
0094 DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>";
0095 static char version2[] =
0096 "  (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")";
0097 
0098 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
0099 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
0100 MODULE_LICENSE("GPL");
0101 
0102 module_param(debug, int, 0);
0103 module_param(rx_copybreak, int, 0);
0104 module_param_array(options, int, NULL, 0);
0105 module_param_array(full_duplex, int, NULL, 0);
0106 MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
0107 MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
0108 MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
0109 MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
0110 
0111 /*
0112                 Theory of Operation
0113 
0114 I. Board Compatibility
0115 
0116 This device driver is designed for the SMC "EPIC/100", the SMC
0117 single-chip Ethernet controllers for PCI.  This chip is used on
0118 the SMC EtherPower II boards.
0119 
0120 II. Board-specific settings
0121 
0122 PCI bus devices are configured by the system at boot time, so no jumpers
0123 need to be set on the board.  The system BIOS will assign the
0124 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
0125 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
0126 interrupt lines.
0127 
0128 III. Driver operation
0129 
0130 IIIa. Ring buffers
0131 
0132 IVb. References
0133 
0134 http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
0135 http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
0136 http://scyld.com/expert/NWay.html
0137 http://www.national.com/pf/DP/DP83840A.html
0138 
0139 IVc. Errata
0140 
0141 */
0142 
0143 
0144 enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
0145 
0146 #define EPIC_TOTAL_SIZE 0x100
0147 #define USE_IO_OPS 1
0148 
0149 #ifdef USE_IO_OPS
0150 #define EPIC_BAR    0
0151 #else
0152 #define EPIC_BAR    1
0153 #endif
0154 
0155 typedef enum {
0156     SMSC_83C170_0,
0157     SMSC_83C170,
0158     SMSC_83C175,
0159 } chip_t;
0160 
0161 
0162 struct epic_chip_info {
0163     const char *name;
0164         int drv_flags;                          /* Driver use, intended as capability flags. */
0165 };
0166 
0167 
0168 /* indexed by chip_t */
0169 static const struct epic_chip_info pci_id_tbl[] = {
0170     { "SMSC EPIC/100 83c170",   TYPE2_INTR | NO_MII | MII_PWRDWN },
0171     { "SMSC EPIC/100 83c170",   TYPE2_INTR },
0172     { "SMSC EPIC/C 83c175",     TYPE2_INTR | MII_PWRDWN },
0173 };
0174 
0175 
0176 static const struct pci_device_id epic_pci_tbl[] = {
0177     { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
0178     { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
0179     { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
0180       PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
0181     { 0,}
0182 };
0183 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
0184 
0185 #define ew16(reg, val)  iowrite16(val, ioaddr + (reg))
0186 #define ew32(reg, val)  iowrite32(val, ioaddr + (reg))
0187 #define er8(reg)    ioread8(ioaddr + (reg))
0188 #define er16(reg)   ioread16(ioaddr + (reg))
0189 #define er32(reg)   ioread32(ioaddr + (reg))
0190 
0191 /* Offsets to registers, using the (ugh) SMC names. */
0192 enum epic_registers {
0193   COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
0194   PCIBurstCnt=0x18,
0195   TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
0196   MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
0197   LAN0=64,                      /* MAC address. */
0198   MC0=80,                       /* Multicast filter table. */
0199   RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
0200   PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
0201 };
0202 
0203 /* Interrupt register bits, using my own meaningful names. */
0204 enum IntrStatus {
0205     TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
0206     PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
0207     RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
0208     TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
0209     RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
0210 };
0211 enum CommandBits {
0212     StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
0213     StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
0214 };
0215 
0216 #define EpicRemoved 0xffffffff  /* Chip failed or removed (CardBus) */
0217 
0218 #define EpicNapiEvent   (TxEmpty | TxDone | \
0219              RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
0220 #define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
0221 
0222 static const u16 media2miictl[16] = {
0223     0, 0x0C00, 0x0C00, 0x2000,  0x0100, 0x2100, 0, 0,
0224     0, 0, 0, 0,  0, 0, 0, 0 };
0225 
0226 /*
0227  * The EPIC100 Rx and Tx buffer descriptors.  Note that these
0228  * really ARE host-endian; it's not a misannotation.  We tell
0229  * the card to byteswap them internally on big-endian hosts -
0230  * look for #ifdef __BIG_ENDIAN in epic_open().
0231  */
0232 
0233 struct epic_tx_desc {
0234     u32 txstatus;
0235     u32 bufaddr;
0236     u32 buflength;
0237     u32 next;
0238 };
0239 
0240 struct epic_rx_desc {
0241     u32 rxstatus;
0242     u32 bufaddr;
0243     u32 buflength;
0244     u32 next;
0245 };
0246 
0247 enum desc_status_bits {
0248     DescOwn=0x8000,
0249 };
0250 
0251 #define PRIV_ALIGN  15  /* Required alignment mask */
0252 struct epic_private {
0253     struct epic_rx_desc *rx_ring;
0254     struct epic_tx_desc *tx_ring;
0255     /* The saved address of a sent-in-place packet/buffer, for skfree(). */
0256     struct sk_buff* tx_skbuff[TX_RING_SIZE];
0257     /* The addresses of receive-in-place skbuffs. */
0258     struct sk_buff* rx_skbuff[RX_RING_SIZE];
0259 
0260     dma_addr_t tx_ring_dma;
0261     dma_addr_t rx_ring_dma;
0262 
0263     /* Ring pointers. */
0264     spinlock_t lock;                /* Group with Tx control cache line. */
0265     spinlock_t napi_lock;
0266     struct napi_struct napi;
0267     unsigned int cur_tx, dirty_tx;
0268 
0269     unsigned int cur_rx, dirty_rx;
0270     u32 irq_mask;
0271     unsigned int rx_buf_sz;             /* Based on MTU+slack. */
0272 
0273     void __iomem *ioaddr;
0274     struct pci_dev *pci_dev;            /* PCI bus location. */
0275     int chip_id, chip_flags;
0276 
0277     struct timer_list timer;            /* Media selection timer. */
0278     int tx_threshold;
0279     unsigned char mc_filter[8];
0280     signed char phys[4];                /* MII device addresses. */
0281     u16 advertising;                    /* NWay media advertisement */
0282     int mii_phy_cnt;
0283     u32 ethtool_ops_nesting;
0284     struct mii_if_info mii;
0285     unsigned int tx_full:1;             /* The Tx queue is full. */
0286     unsigned int default_port:4;        /* Last dev->if_port value. */
0287 };
0288 
0289 static int epic_open(struct net_device *dev);
0290 static int read_eeprom(struct epic_private *, int);
0291 static int mdio_read(struct net_device *dev, int phy_id, int location);
0292 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
0293 static void epic_restart(struct net_device *dev);
0294 static void epic_timer(struct timer_list *t);
0295 static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue);
0296 static void epic_init_ring(struct net_device *dev);
0297 static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
0298                    struct net_device *dev);
0299 static int epic_rx(struct net_device *dev, int budget);
0300 static int epic_poll(struct napi_struct *napi, int budget);
0301 static irqreturn_t epic_interrupt(int irq, void *dev_instance);
0302 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
0303 static const struct ethtool_ops netdev_ethtool_ops;
0304 static int epic_close(struct net_device *dev);
0305 static struct net_device_stats *epic_get_stats(struct net_device *dev);
0306 static void set_rx_mode(struct net_device *dev);
0307 
0308 static const struct net_device_ops epic_netdev_ops = {
0309     .ndo_open       = epic_open,
0310     .ndo_stop       = epic_close,
0311     .ndo_start_xmit     = epic_start_xmit,
0312     .ndo_tx_timeout     = epic_tx_timeout,
0313     .ndo_get_stats      = epic_get_stats,
0314     .ndo_set_rx_mode    = set_rx_mode,
0315     .ndo_eth_ioctl      = netdev_ioctl,
0316     .ndo_set_mac_address    = eth_mac_addr,
0317     .ndo_validate_addr  = eth_validate_addr,
0318 };
0319 
0320 static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
0321 {
0322     static int card_idx = -1;
0323     void __iomem *ioaddr;
0324     int chip_idx = (int) ent->driver_data;
0325     struct net_device *dev;
0326     struct epic_private *ep;
0327     int i, ret, option = 0, duplex = 0;
0328     __le16 addr[ETH_ALEN / 2];
0329     void *ring_space;
0330     dma_addr_t ring_dma;
0331 
0332 /* when built into the kernel, we only print version if device is found */
0333 #ifndef MODULE
0334     pr_info_once("%s%s\n", version, version2);
0335 #endif
0336 
0337     card_idx++;
0338 
0339     ret = pci_enable_device(pdev);
0340     if (ret)
0341         goto out;
0342 
0343     if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
0344         dev_err(&pdev->dev, "no PCI region space\n");
0345         ret = -ENODEV;
0346         goto err_out_disable;
0347     }
0348 
0349     pci_set_master(pdev);
0350 
0351     ret = pci_request_regions(pdev, DRV_NAME);
0352     if (ret < 0)
0353         goto err_out_disable;
0354 
0355     ret = -ENOMEM;
0356 
0357     dev = alloc_etherdev(sizeof (*ep));
0358     if (!dev)
0359         goto err_out_free_res;
0360 
0361     SET_NETDEV_DEV(dev, &pdev->dev);
0362 
0363     ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
0364     if (!ioaddr) {
0365         dev_err(&pdev->dev, "ioremap failed\n");
0366         goto err_out_free_netdev;
0367     }
0368 
0369     pci_set_drvdata(pdev, dev);
0370     ep = netdev_priv(dev);
0371     ep->ioaddr = ioaddr;
0372     ep->mii.dev = dev;
0373     ep->mii.mdio_read = mdio_read;
0374     ep->mii.mdio_write = mdio_write;
0375     ep->mii.phy_id_mask = 0x1f;
0376     ep->mii.reg_num_mask = 0x1f;
0377 
0378     ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
0379                     GFP_KERNEL);
0380     if (!ring_space)
0381         goto err_out_iounmap;
0382     ep->tx_ring = ring_space;
0383     ep->tx_ring_dma = ring_dma;
0384 
0385     ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
0386                     GFP_KERNEL);
0387     if (!ring_space)
0388         goto err_out_unmap_tx;
0389     ep->rx_ring = ring_space;
0390     ep->rx_ring_dma = ring_dma;
0391 
0392     if (dev->mem_start) {
0393         option = dev->mem_start;
0394         duplex = (dev->mem_start & 16) ? 1 : 0;
0395     } else if (card_idx >= 0  &&  card_idx < MAX_UNITS) {
0396         if (options[card_idx] >= 0)
0397             option = options[card_idx];
0398         if (full_duplex[card_idx] >= 0)
0399             duplex = full_duplex[card_idx];
0400     }
0401 
0402     spin_lock_init(&ep->lock);
0403     spin_lock_init(&ep->napi_lock);
0404 
0405     /* Bring the chip out of low-power mode. */
0406     ew32(GENCTL, 0x4200);
0407     /* Magic?!  If we don't set this bit the MII interface won't work. */
0408     /* This magic is documented in SMSC app note 7.15 */
0409     for (i = 16; i > 0; i--)
0410         ew32(TEST1, 0x0008);
0411 
0412     /* Turn on the MII transceiver. */
0413     ew32(MIICfg, 0x12);
0414     if (chip_idx == 1)
0415         ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
0416     ew32(GENCTL, 0x0200);
0417 
0418     /* Note: the '175 does not have a serial EEPROM. */
0419     for (i = 0; i < 3; i++)
0420         addr[i] = cpu_to_le16(er16(LAN0 + i*4));
0421     eth_hw_addr_set(dev, (u8 *)addr);
0422 
0423     if (debug > 2) {
0424         dev_dbg(&pdev->dev, "EEPROM contents:\n");
0425         for (i = 0; i < 64; i++)
0426             pr_cont(" %4.4x%s", read_eeprom(ep, i),
0427                    i % 16 == 15 ? "\n" : "");
0428     }
0429 
0430     ep->pci_dev = pdev;
0431     ep->chip_id = chip_idx;
0432     ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
0433     ep->irq_mask =
0434         (ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
0435          | CntFull | TxUnderrun | EpicNapiEvent;
0436 
0437     /* Find the connected MII xcvrs.
0438        Doing this in open() would allow detecting external xcvrs later, but
0439        takes much time and no cards have external MII. */
0440     {
0441         int phy, phy_idx = 0;
0442         for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
0443             int mii_status = mdio_read(dev, phy, MII_BMSR);
0444             if (mii_status != 0xffff  &&  mii_status != 0x0000) {
0445                 ep->phys[phy_idx++] = phy;
0446                 dev_info(&pdev->dev,
0447                     "MII transceiver #%d control "
0448                     "%4.4x status %4.4x.\n",
0449                     phy, mdio_read(dev, phy, 0), mii_status);
0450             }
0451         }
0452         ep->mii_phy_cnt = phy_idx;
0453         if (phy_idx != 0) {
0454             phy = ep->phys[0];
0455             ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
0456             dev_info(&pdev->dev,
0457                 "Autonegotiation advertising %4.4x link "
0458                    "partner %4.4x.\n",
0459                    ep->mii.advertising, mdio_read(dev, phy, 5));
0460         } else if ( ! (ep->chip_flags & NO_MII)) {
0461             dev_warn(&pdev->dev,
0462                 "***WARNING***: No MII transceiver found!\n");
0463             /* Use the known PHY address of the EPII. */
0464             ep->phys[0] = 3;
0465         }
0466         ep->mii.phy_id = ep->phys[0];
0467     }
0468 
0469     /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
0470     if (ep->chip_flags & MII_PWRDWN)
0471         ew32(NVCTL, er32(NVCTL) & ~0x483c);
0472     ew32(GENCTL, 0x0008);
0473 
0474     /* The lower four bits are the media type. */
0475     if (duplex) {
0476         ep->mii.force_media = ep->mii.full_duplex = 1;
0477         dev_info(&pdev->dev, "Forced full duplex requested.\n");
0478     }
0479     dev->if_port = ep->default_port = option;
0480 
0481     /* The Epic-specific entries in the device structure. */
0482     dev->netdev_ops = &epic_netdev_ops;
0483     dev->ethtool_ops = &netdev_ethtool_ops;
0484     dev->watchdog_timeo = TX_TIMEOUT;
0485     netif_napi_add(dev, &ep->napi, epic_poll, 64);
0486 
0487     ret = register_netdev(dev);
0488     if (ret < 0)
0489         goto err_out_unmap_rx;
0490 
0491     netdev_info(dev, "%s at %lx, IRQ %d, %pM\n",
0492             pci_id_tbl[chip_idx].name,
0493             (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
0494             dev->dev_addr);
0495 
0496 out:
0497     return ret;
0498 
0499 err_out_unmap_rx:
0500     dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
0501               ep->rx_ring_dma);
0502 err_out_unmap_tx:
0503     dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
0504               ep->tx_ring_dma);
0505 err_out_iounmap:
0506     pci_iounmap(pdev, ioaddr);
0507 err_out_free_netdev:
0508     free_netdev(dev);
0509 err_out_free_res:
0510     pci_release_regions(pdev);
0511 err_out_disable:
0512     pci_disable_device(pdev);
0513     goto out;
0514 }
0515 
0516 /* Serial EEPROM section. */
0517 
0518 /*  EEPROM_Ctrl bits. */
0519 #define EE_SHIFT_CLK    0x04    /* EEPROM shift clock. */
0520 #define EE_CS           0x02    /* EEPROM chip select. */
0521 #define EE_DATA_WRITE   0x08    /* EEPROM chip data in. */
0522 #define EE_WRITE_0      0x01
0523 #define EE_WRITE_1      0x09
0524 #define EE_DATA_READ    0x10    /* EEPROM chip data out. */
0525 #define EE_ENB          (0x0001 | EE_CS)
0526 
0527 /* Delay between EEPROM clock transitions.
0528    This serves to flush the operation to the PCI bus.
0529  */
0530 
0531 #define eeprom_delay()  er32(EECTL)
0532 
0533 /* The EEPROM commands include the alway-set leading bit. */
0534 #define EE_WRITE_CMD    (5 << 6)
0535 #define EE_READ64_CMD   (6 << 6)
0536 #define EE_READ256_CMD  (6 << 8)
0537 #define EE_ERASE_CMD    (7 << 6)
0538 
0539 static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
0540 {
0541     void __iomem *ioaddr = ep->ioaddr;
0542 
0543     ew32(INTMASK, 0x00000000);
0544 }
0545 
0546 static inline void __epic_pci_commit(void __iomem *ioaddr)
0547 {
0548 #ifndef USE_IO_OPS
0549     er32(INTMASK);
0550 #endif
0551 }
0552 
0553 static inline void epic_napi_irq_off(struct net_device *dev,
0554                      struct epic_private *ep)
0555 {
0556     void __iomem *ioaddr = ep->ioaddr;
0557 
0558     ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
0559     __epic_pci_commit(ioaddr);
0560 }
0561 
0562 static inline void epic_napi_irq_on(struct net_device *dev,
0563                     struct epic_private *ep)
0564 {
0565     void __iomem *ioaddr = ep->ioaddr;
0566 
0567     /* No need to commit possible posted write */
0568     ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
0569 }
0570 
0571 static int read_eeprom(struct epic_private *ep, int location)
0572 {
0573     void __iomem *ioaddr = ep->ioaddr;
0574     int i;
0575     int retval = 0;
0576     int read_cmd = location |
0577         (er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
0578 
0579     ew32(EECTL, EE_ENB & ~EE_CS);
0580     ew32(EECTL, EE_ENB);
0581 
0582     /* Shift the read command bits out. */
0583     for (i = 12; i >= 0; i--) {
0584         short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
0585         ew32(EECTL, EE_ENB | dataval);
0586         eeprom_delay();
0587         ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
0588         eeprom_delay();
0589     }
0590     ew32(EECTL, EE_ENB);
0591 
0592     for (i = 16; i > 0; i--) {
0593         ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
0594         eeprom_delay();
0595         retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
0596         ew32(EECTL, EE_ENB);
0597         eeprom_delay();
0598     }
0599 
0600     /* Terminate the EEPROM access. */
0601     ew32(EECTL, EE_ENB & ~EE_CS);
0602     return retval;
0603 }
0604 
0605 #define MII_READOP      1
0606 #define MII_WRITEOP     2
0607 static int mdio_read(struct net_device *dev, int phy_id, int location)
0608 {
0609     struct epic_private *ep = netdev_priv(dev);
0610     void __iomem *ioaddr = ep->ioaddr;
0611     int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
0612     int i;
0613 
0614     ew32(MIICtrl, read_cmd);
0615     /* Typical operation takes 25 loops. */
0616     for (i = 400; i > 0; i--) {
0617         barrier();
0618         if ((er32(MIICtrl) & MII_READOP) == 0) {
0619             /* Work around read failure bug. */
0620             if (phy_id == 1 && location < 6 &&
0621                 er16(MIIData) == 0xffff) {
0622                 ew32(MIICtrl, read_cmd);
0623                 continue;
0624             }
0625             return er16(MIIData);
0626         }
0627     }
0628     return 0xffff;
0629 }
0630 
0631 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
0632 {
0633     struct epic_private *ep = netdev_priv(dev);
0634     void __iomem *ioaddr = ep->ioaddr;
0635     int i;
0636 
0637     ew16(MIIData, value);
0638     ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
0639     for (i = 10000; i > 0; i--) {
0640         barrier();
0641         if ((er32(MIICtrl) & MII_WRITEOP) == 0)
0642             break;
0643     }
0644 }
0645 
0646 
0647 static int epic_open(struct net_device *dev)
0648 {
0649     struct epic_private *ep = netdev_priv(dev);
0650     void __iomem *ioaddr = ep->ioaddr;
0651     const int irq = ep->pci_dev->irq;
0652     int rc, i;
0653 
0654     /* Soft reset the chip. */
0655     ew32(GENCTL, 0x4001);
0656 
0657     napi_enable(&ep->napi);
0658     rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
0659     if (rc) {
0660         napi_disable(&ep->napi);
0661         return rc;
0662     }
0663 
0664     epic_init_ring(dev);
0665 
0666     ew32(GENCTL, 0x4000);
0667     /* This magic is documented in SMSC app note 7.15 */
0668     for (i = 16; i > 0; i--)
0669         ew32(TEST1, 0x0008);
0670 
0671     /* Pull the chip out of low-power mode, enable interrupts, and set for
0672        PCI read multiple.  The MIIcfg setting and strange write order are
0673        required by the details of which bits are reset and the transceiver
0674        wiring on the Ositech CardBus card.
0675     */
0676 #if 0
0677     ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
0678 #endif
0679     if (ep->chip_flags & MII_PWRDWN)
0680         ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
0681 
0682     /* Tell the chip to byteswap descriptors on big-endian hosts */
0683 #ifdef __BIG_ENDIAN
0684     ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
0685     er32(GENCTL);
0686     ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
0687 #else
0688     ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
0689     er32(GENCTL);
0690     ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
0691 #endif
0692 
0693     udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
0694 
0695     for (i = 0; i < 3; i++)
0696         ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
0697 
0698     ep->tx_threshold = TX_FIFO_THRESH;
0699     ew32(TxThresh, ep->tx_threshold);
0700 
0701     if (media2miictl[dev->if_port & 15]) {
0702         if (ep->mii_phy_cnt)
0703             mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
0704         if (dev->if_port == 1) {
0705             if (debug > 1)
0706                 netdev_info(dev, "Using the 10base2 transceiver, MII status %4.4x.\n",
0707                         mdio_read(dev, ep->phys[0], MII_BMSR));
0708         }
0709     } else {
0710         int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
0711         if (mii_lpa != 0xffff) {
0712             if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
0713                 ep->mii.full_duplex = 1;
0714             else if (! (mii_lpa & LPA_LPACK))
0715                 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
0716             if (debug > 1)
0717                 netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n",
0718                         ep->mii.full_duplex ? "full"
0719                                 : "half",
0720                         ep->phys[0], mii_lpa);
0721         }
0722     }
0723 
0724     ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
0725     ew32(PRxCDAR, ep->rx_ring_dma);
0726     ew32(PTxCDAR, ep->tx_ring_dma);
0727 
0728     /* Start the chip's Rx process. */
0729     set_rx_mode(dev);
0730     ew32(COMMAND, StartRx | RxQueued);
0731 
0732     netif_start_queue(dev);
0733 
0734     /* Enable interrupts by setting the interrupt mask. */
0735     ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
0736          ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
0737          TxUnderrun);
0738 
0739     if (debug > 1) {
0740         netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n",
0741                ioaddr, irq, er32(GENCTL),
0742                ep->mii.full_duplex ? "full" : "half");
0743     }
0744 
0745     /* Set the timer to switch to check for link beat and perhaps switch
0746        to an alternate media type. */
0747     timer_setup(&ep->timer, epic_timer, 0);
0748     ep->timer.expires = jiffies + 3*HZ;
0749     add_timer(&ep->timer);
0750 
0751     return rc;
0752 }
0753 
0754 /* Reset the chip to recover from a PCI transaction error.
0755    This may occur at interrupt time. */
0756 static void epic_pause(struct net_device *dev)
0757 {
0758     struct net_device_stats *stats = &dev->stats;
0759     struct epic_private *ep = netdev_priv(dev);
0760     void __iomem *ioaddr = ep->ioaddr;
0761 
0762     netif_stop_queue (dev);
0763 
0764     /* Disable interrupts by clearing the interrupt mask. */
0765     ew32(INTMASK, 0x00000000);
0766     /* Stop the chip's Tx and Rx DMA processes. */
0767     ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
0768 
0769     /* Update the error counts. */
0770     if (er16(COMMAND) != 0xffff) {
0771         stats->rx_missed_errors += er8(MPCNT);
0772         stats->rx_frame_errors  += er8(ALICNT);
0773         stats->rx_crc_errors    += er8(CRCCNT);
0774     }
0775 
0776     /* Remove the packets on the Rx queue. */
0777     epic_rx(dev, RX_RING_SIZE);
0778 }
0779 
0780 static void epic_restart(struct net_device *dev)
0781 {
0782     struct epic_private *ep = netdev_priv(dev);
0783     void __iomem *ioaddr = ep->ioaddr;
0784     int i;
0785 
0786     /* Soft reset the chip. */
0787     ew32(GENCTL, 0x4001);
0788 
0789     netdev_dbg(dev, "Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
0790            ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
0791     udelay(1);
0792 
0793     /* This magic is documented in SMSC app note 7.15 */
0794     for (i = 16; i > 0; i--)
0795         ew32(TEST1, 0x0008);
0796 
0797 #ifdef __BIG_ENDIAN
0798     ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
0799 #else
0800     ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
0801 #endif
0802     ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
0803     if (ep->chip_flags & MII_PWRDWN)
0804         ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
0805 
0806     for (i = 0; i < 3; i++)
0807         ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
0808 
0809     ep->tx_threshold = TX_FIFO_THRESH;
0810     ew32(TxThresh, ep->tx_threshold);
0811     ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
0812     ew32(PRxCDAR, ep->rx_ring_dma +
0813          (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
0814     ew32(PTxCDAR, ep->tx_ring_dma +
0815          (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
0816 
0817     /* Start the chip's Rx process. */
0818     set_rx_mode(dev);
0819     ew32(COMMAND, StartRx | RxQueued);
0820 
0821     /* Enable interrupts by setting the interrupt mask. */
0822     ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
0823          ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
0824          TxUnderrun);
0825 
0826     netdev_dbg(dev, "epic_restart() done, cmd status %4.4x, ctl %4.4x interrupt %4.4x.\n",
0827            er32(COMMAND), er32(GENCTL), er32(INTSTAT));
0828 }
0829 
0830 static void check_media(struct net_device *dev)
0831 {
0832     struct epic_private *ep = netdev_priv(dev);
0833     void __iomem *ioaddr = ep->ioaddr;
0834     int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
0835     int negotiated = mii_lpa & ep->mii.advertising;
0836     int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
0837 
0838     if (ep->mii.force_media)
0839         return;
0840     if (mii_lpa == 0xffff)      /* Bogus read */
0841         return;
0842     if (ep->mii.full_duplex != duplex) {
0843         ep->mii.full_duplex = duplex;
0844         netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n",
0845                 ep->mii.full_duplex ? "full" : "half",
0846                 ep->phys[0], mii_lpa);
0847         ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
0848     }
0849 }
0850 
0851 static void epic_timer(struct timer_list *t)
0852 {
0853     struct epic_private *ep = from_timer(ep, t, timer);
0854     struct net_device *dev = ep->mii.dev;
0855     void __iomem *ioaddr = ep->ioaddr;
0856     int next_tick = 5*HZ;
0857 
0858     if (debug > 3) {
0859         netdev_dbg(dev, "Media monitor tick, Tx status %8.8x.\n",
0860                er32(TxSTAT));
0861         netdev_dbg(dev, "Other registers are IntMask %4.4x IntStatus %4.4x RxStatus %4.4x.\n",
0862                er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
0863     }
0864 
0865     check_media(dev);
0866 
0867     ep->timer.expires = jiffies + next_tick;
0868     add_timer(&ep->timer);
0869 }
0870 
0871 static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue)
0872 {
0873     struct epic_private *ep = netdev_priv(dev);
0874     void __iomem *ioaddr = ep->ioaddr;
0875 
0876     if (debug > 0) {
0877         netdev_warn(dev, "Transmit timeout using MII device, Tx status %4.4x.\n",
0878                 er16(TxSTAT));
0879         if (debug > 1) {
0880             netdev_dbg(dev, "Tx indices: dirty_tx %d, cur_tx %d.\n",
0881                    ep->dirty_tx, ep->cur_tx);
0882         }
0883     }
0884     if (er16(TxSTAT) & 0x10) {      /* Tx FIFO underflow. */
0885         dev->stats.tx_fifo_errors++;
0886         ew32(COMMAND, RestartTx);
0887     } else {
0888         epic_restart(dev);
0889         ew32(COMMAND, TxQueued);
0890     }
0891 
0892     netif_trans_update(dev); /* prevent tx timeout */
0893     dev->stats.tx_errors++;
0894     if (!ep->tx_full)
0895         netif_wake_queue(dev);
0896 }
0897 
0898 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
0899 static void epic_init_ring(struct net_device *dev)
0900 {
0901     struct epic_private *ep = netdev_priv(dev);
0902     int i;
0903 
0904     ep->tx_full = 0;
0905     ep->dirty_tx = ep->cur_tx = 0;
0906     ep->cur_rx = ep->dirty_rx = 0;
0907     ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
0908 
0909     /* Initialize all Rx descriptors. */
0910     for (i = 0; i < RX_RING_SIZE; i++) {
0911         ep->rx_ring[i].rxstatus = 0;
0912         ep->rx_ring[i].buflength = ep->rx_buf_sz;
0913         ep->rx_ring[i].next = ep->rx_ring_dma +
0914                       (i+1)*sizeof(struct epic_rx_desc);
0915         ep->rx_skbuff[i] = NULL;
0916     }
0917     /* Mark the last entry as wrapping the ring. */
0918     ep->rx_ring[i-1].next = ep->rx_ring_dma;
0919 
0920     /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
0921     for (i = 0; i < RX_RING_SIZE; i++) {
0922         struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
0923         ep->rx_skbuff[i] = skb;
0924         if (skb == NULL)
0925             break;
0926         skb_reserve(skb, 2);    /* 16 byte align the IP header. */
0927         ep->rx_ring[i].bufaddr = dma_map_single(&ep->pci_dev->dev,
0928                             skb->data,
0929                             ep->rx_buf_sz,
0930                             DMA_FROM_DEVICE);
0931         ep->rx_ring[i].rxstatus = DescOwn;
0932     }
0933     ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
0934 
0935     /* The Tx buffer descriptor is filled in as needed, but we
0936        do need to clear the ownership bit. */
0937     for (i = 0; i < TX_RING_SIZE; i++) {
0938         ep->tx_skbuff[i] = NULL;
0939         ep->tx_ring[i].txstatus = 0x0000;
0940         ep->tx_ring[i].next = ep->tx_ring_dma +
0941             (i+1)*sizeof(struct epic_tx_desc);
0942     }
0943     ep->tx_ring[i-1].next = ep->tx_ring_dma;
0944 }
0945 
0946 static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
0947 {
0948     struct epic_private *ep = netdev_priv(dev);
0949     void __iomem *ioaddr = ep->ioaddr;
0950     int entry, free_count;
0951     u32 ctrl_word;
0952     unsigned long flags;
0953 
0954     if (skb_padto(skb, ETH_ZLEN))
0955         return NETDEV_TX_OK;
0956 
0957     /* Caution: the write order is important here, set the field with the
0958        "ownership" bit last. */
0959 
0960     /* Calculate the next Tx descriptor entry. */
0961     spin_lock_irqsave(&ep->lock, flags);
0962     free_count = ep->cur_tx - ep->dirty_tx;
0963     entry = ep->cur_tx % TX_RING_SIZE;
0964 
0965     ep->tx_skbuff[entry] = skb;
0966     ep->tx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
0967                             skb->data, skb->len,
0968                             DMA_TO_DEVICE);
0969     if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
0970         ctrl_word = 0x100000; /* No interrupt */
0971     } else if (free_count == TX_QUEUE_LEN/2) {
0972         ctrl_word = 0x140000; /* Tx-done intr. */
0973     } else if (free_count < TX_QUEUE_LEN - 1) {
0974         ctrl_word = 0x100000; /* No Tx-done intr. */
0975     } else {
0976         /* Leave room for an additional entry. */
0977         ctrl_word = 0x140000; /* Tx-done intr. */
0978         ep->tx_full = 1;
0979     }
0980     ep->tx_ring[entry].buflength = ctrl_word | skb->len;
0981     ep->tx_ring[entry].txstatus =
0982         ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
0983                 | DescOwn;
0984 
0985     ep->cur_tx++;
0986     if (ep->tx_full)
0987         netif_stop_queue(dev);
0988 
0989     spin_unlock_irqrestore(&ep->lock, flags);
0990     /* Trigger an immediate transmit demand. */
0991     ew32(COMMAND, TxQueued);
0992 
0993     if (debug > 4)
0994         netdev_dbg(dev, "Queued Tx packet size %d to slot %d, flag %2.2x Tx status %8.8x.\n",
0995                skb->len, entry, ctrl_word, er32(TxSTAT));
0996 
0997     return NETDEV_TX_OK;
0998 }
0999 
1000 static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1001               int status)
1002 {
1003     struct net_device_stats *stats = &dev->stats;
1004 
1005 #ifndef final_version
1006     /* There was an major error, log it. */
1007     if (debug > 1)
1008         netdev_dbg(dev, "Transmit error, Tx status %8.8x.\n",
1009                status);
1010 #endif
1011     stats->tx_errors++;
1012     if (status & 0x1050)
1013         stats->tx_aborted_errors++;
1014     if (status & 0x0008)
1015         stats->tx_carrier_errors++;
1016     if (status & 0x0040)
1017         stats->tx_window_errors++;
1018     if (status & 0x0010)
1019         stats->tx_fifo_errors++;
1020 }
1021 
1022 static void epic_tx(struct net_device *dev, struct epic_private *ep)
1023 {
1024     unsigned int dirty_tx, cur_tx;
1025 
1026     /*
1027      * Note: if this lock becomes a problem we can narrow the locked
1028      * region at the cost of occasionally grabbing the lock more times.
1029      */
1030     cur_tx = ep->cur_tx;
1031     for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1032         struct sk_buff *skb;
1033         int entry = dirty_tx % TX_RING_SIZE;
1034         int txstatus = ep->tx_ring[entry].txstatus;
1035 
1036         if (txstatus & DescOwn)
1037             break;  /* It still hasn't been Txed */
1038 
1039         if (likely(txstatus & 0x0001)) {
1040             dev->stats.collisions += (txstatus >> 8) & 15;
1041             dev->stats.tx_packets++;
1042             dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1043         } else
1044             epic_tx_error(dev, ep, txstatus);
1045 
1046         /* Free the original skb. */
1047         skb = ep->tx_skbuff[entry];
1048         dma_unmap_single(&ep->pci_dev->dev,
1049                  ep->tx_ring[entry].bufaddr, skb->len,
1050                  DMA_TO_DEVICE);
1051         dev_consume_skb_irq(skb);
1052         ep->tx_skbuff[entry] = NULL;
1053     }
1054 
1055 #ifndef final_version
1056     if (cur_tx - dirty_tx > TX_RING_SIZE) {
1057         netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1058                 dirty_tx, cur_tx, ep->tx_full);
1059         dirty_tx += TX_RING_SIZE;
1060     }
1061 #endif
1062     ep->dirty_tx = dirty_tx;
1063     if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1064         /* The ring is no longer full, allow new TX entries. */
1065         ep->tx_full = 0;
1066         netif_wake_queue(dev);
1067     }
1068 }
1069 
1070 /* The interrupt handler does all of the Rx thread work and cleans up
1071    after the Tx thread. */
1072 static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1073 {
1074     struct net_device *dev = dev_instance;
1075     struct epic_private *ep = netdev_priv(dev);
1076     void __iomem *ioaddr = ep->ioaddr;
1077     unsigned int handled = 0;
1078     int status;
1079 
1080     status = er32(INTSTAT);
1081     /* Acknowledge all of the current interrupt sources ASAP. */
1082     ew32(INTSTAT, status & EpicNormalEvent);
1083 
1084     if (debug > 4) {
1085         netdev_dbg(dev, "Interrupt, status=%#8.8x new intstat=%#8.8x.\n",
1086                status, er32(INTSTAT));
1087     }
1088 
1089     if ((status & IntrSummary) == 0)
1090         goto out;
1091 
1092     handled = 1;
1093 
1094     if (status & EpicNapiEvent) {
1095         spin_lock(&ep->napi_lock);
1096         if (napi_schedule_prep(&ep->napi)) {
1097             epic_napi_irq_off(dev, ep);
1098             __napi_schedule(&ep->napi);
1099         }
1100         spin_unlock(&ep->napi_lock);
1101     }
1102     status &= ~EpicNapiEvent;
1103 
1104     /* Check uncommon events all at once. */
1105     if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1106         struct net_device_stats *stats = &dev->stats;
1107 
1108         if (status == EpicRemoved)
1109             goto out;
1110 
1111         /* Always update the error counts to avoid overhead later. */
1112         stats->rx_missed_errors += er8(MPCNT);
1113         stats->rx_frame_errors  += er8(ALICNT);
1114         stats->rx_crc_errors    += er8(CRCCNT);
1115 
1116         if (status & TxUnderrun) { /* Tx FIFO underflow. */
1117             stats->tx_fifo_errors++;
1118             ew32(TxThresh, ep->tx_threshold += 128);
1119             /* Restart the transmit process. */
1120             ew32(COMMAND, RestartTx);
1121         }
1122         if (status & PCIBusErr170) {
1123             netdev_err(dev, "PCI Bus Error! status %4.4x.\n",
1124                    status);
1125             epic_pause(dev);
1126             epic_restart(dev);
1127         }
1128         /* Clear all error sources. */
1129         ew32(INTSTAT, status & 0x7f18);
1130     }
1131 
1132 out:
1133     if (debug > 3) {
1134         netdev_dbg(dev, "exit interrupt, intr_status=%#4.4x.\n",
1135                status);
1136     }
1137 
1138     return IRQ_RETVAL(handled);
1139 }
1140 
1141 static int epic_rx(struct net_device *dev, int budget)
1142 {
1143     struct epic_private *ep = netdev_priv(dev);
1144     int entry = ep->cur_rx % RX_RING_SIZE;
1145     int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1146     int work_done = 0;
1147 
1148     if (debug > 4)
1149         netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry,
1150                ep->rx_ring[entry].rxstatus);
1151 
1152     if (rx_work_limit > budget)
1153         rx_work_limit = budget;
1154 
1155     /* If we own the next entry, it's a new packet. Send it up. */
1156     while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1157         int status = ep->rx_ring[entry].rxstatus;
1158 
1159         if (debug > 4)
1160             netdev_dbg(dev, "  epic_rx() status was %8.8x.\n",
1161                    status);
1162         if (--rx_work_limit < 0)
1163             break;
1164         if (status & 0x2006) {
1165             if (debug > 2)
1166                 netdev_dbg(dev, "epic_rx() error status was %8.8x.\n",
1167                        status);
1168             if (status & 0x2000) {
1169                 netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %4.4x!\n",
1170                         status);
1171                 dev->stats.rx_length_errors++;
1172             } else if (status & 0x0006)
1173                 /* Rx Frame errors are counted in hardware. */
1174                 dev->stats.rx_errors++;
1175         } else {
1176             /* Malloc up new buffer, compatible with net-2e. */
1177             /* Omit the four octet CRC from the length. */
1178             short pkt_len = (status >> 16) - 4;
1179             struct sk_buff *skb;
1180 
1181             if (pkt_len > PKT_BUF_SZ - 4) {
1182                 netdev_err(dev, "Oversized Ethernet frame, status %x %d bytes.\n",
1183                        status, pkt_len);
1184                 pkt_len = 1514;
1185             }
1186             /* Check if the packet is long enough to accept without copying
1187                to a minimally-sized skbuff. */
1188             if (pkt_len < rx_copybreak &&
1189                 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1190                 skb_reserve(skb, 2);    /* 16 byte align the IP header */
1191                 dma_sync_single_for_cpu(&ep->pci_dev->dev,
1192                             ep->rx_ring[entry].bufaddr,
1193                             ep->rx_buf_sz,
1194                             DMA_FROM_DEVICE);
1195                 skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1196                 skb_put(skb, pkt_len);
1197                 dma_sync_single_for_device(&ep->pci_dev->dev,
1198                                ep->rx_ring[entry].bufaddr,
1199                                ep->rx_buf_sz,
1200                                DMA_FROM_DEVICE);
1201             } else {
1202                 dma_unmap_single(&ep->pci_dev->dev,
1203                          ep->rx_ring[entry].bufaddr,
1204                          ep->rx_buf_sz,
1205                          DMA_FROM_DEVICE);
1206                 skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1207                 ep->rx_skbuff[entry] = NULL;
1208             }
1209             skb->protocol = eth_type_trans(skb, dev);
1210             netif_receive_skb(skb);
1211             dev->stats.rx_packets++;
1212             dev->stats.rx_bytes += pkt_len;
1213         }
1214         work_done++;
1215         entry = (++ep->cur_rx) % RX_RING_SIZE;
1216     }
1217 
1218     /* Refill the Rx ring buffers. */
1219     for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1220         entry = ep->dirty_rx % RX_RING_SIZE;
1221         if (ep->rx_skbuff[entry] == NULL) {
1222             struct sk_buff *skb;
1223             skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
1224             if (skb == NULL)
1225                 break;
1226             skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1227             ep->rx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
1228                                     skb->data,
1229                                     ep->rx_buf_sz,
1230                                     DMA_FROM_DEVICE);
1231             work_done++;
1232         }
1233         /* AV: shouldn't we add a barrier here? */
1234         ep->rx_ring[entry].rxstatus = DescOwn;
1235     }
1236     return work_done;
1237 }
1238 
1239 static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1240 {
1241     void __iomem *ioaddr = ep->ioaddr;
1242     int status;
1243 
1244     status = er32(INTSTAT);
1245 
1246     if (status == EpicRemoved)
1247         return;
1248     if (status & RxOverflow)    /* Missed a Rx frame. */
1249         dev->stats.rx_errors++;
1250     if (status & (RxOverflow | RxFull))
1251         ew16(COMMAND, RxQueued);
1252 }
1253 
1254 static int epic_poll(struct napi_struct *napi, int budget)
1255 {
1256     struct epic_private *ep = container_of(napi, struct epic_private, napi);
1257     struct net_device *dev = ep->mii.dev;
1258     void __iomem *ioaddr = ep->ioaddr;
1259     int work_done;
1260 
1261     epic_tx(dev, ep);
1262 
1263     work_done = epic_rx(dev, budget);
1264 
1265     epic_rx_err(dev, ep);
1266 
1267     if (work_done < budget && napi_complete_done(napi, work_done)) {
1268         unsigned long flags;
1269 
1270         spin_lock_irqsave(&ep->napi_lock, flags);
1271 
1272         ew32(INTSTAT, EpicNapiEvent);
1273         epic_napi_irq_on(dev, ep);
1274         spin_unlock_irqrestore(&ep->napi_lock, flags);
1275     }
1276 
1277     return work_done;
1278 }
1279 
1280 static int epic_close(struct net_device *dev)
1281 {
1282     struct epic_private *ep = netdev_priv(dev);
1283     struct pci_dev *pdev = ep->pci_dev;
1284     void __iomem *ioaddr = ep->ioaddr;
1285     struct sk_buff *skb;
1286     int i;
1287 
1288     netif_stop_queue(dev);
1289     napi_disable(&ep->napi);
1290 
1291     if (debug > 1)
1292         netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
1293                er32(INTSTAT));
1294 
1295     del_timer_sync(&ep->timer);
1296 
1297     epic_disable_int(dev, ep);
1298 
1299     free_irq(pdev->irq, dev);
1300 
1301     epic_pause(dev);
1302 
1303     /* Free all the skbuffs in the Rx queue. */
1304     for (i = 0; i < RX_RING_SIZE; i++) {
1305         skb = ep->rx_skbuff[i];
1306         ep->rx_skbuff[i] = NULL;
1307         ep->rx_ring[i].rxstatus = 0;        /* Not owned by Epic chip. */
1308         ep->rx_ring[i].buflength = 0;
1309         if (skb) {
1310             dma_unmap_single(&pdev->dev, ep->rx_ring[i].bufaddr,
1311                      ep->rx_buf_sz, DMA_FROM_DEVICE);
1312             dev_kfree_skb(skb);
1313         }
1314         ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1315     }
1316     for (i = 0; i < TX_RING_SIZE; i++) {
1317         skb = ep->tx_skbuff[i];
1318         ep->tx_skbuff[i] = NULL;
1319         if (!skb)
1320             continue;
1321         dma_unmap_single(&pdev->dev, ep->tx_ring[i].bufaddr, skb->len,
1322                  DMA_TO_DEVICE);
1323         dev_kfree_skb(skb);
1324     }
1325 
1326     /* Green! Leave the chip in low-power mode. */
1327     ew32(GENCTL, 0x0008);
1328 
1329     return 0;
1330 }
1331 
1332 static struct net_device_stats *epic_get_stats(struct net_device *dev)
1333 {
1334     struct epic_private *ep = netdev_priv(dev);
1335     void __iomem *ioaddr = ep->ioaddr;
1336 
1337     if (netif_running(dev)) {
1338         struct net_device_stats *stats = &dev->stats;
1339 
1340         stats->rx_missed_errors += er8(MPCNT);
1341         stats->rx_frame_errors  += er8(ALICNT);
1342         stats->rx_crc_errors    += er8(CRCCNT);
1343     }
1344 
1345     return &dev->stats;
1346 }
1347 
1348 /* Set or clear the multicast filter for this adaptor.
1349    Note that we only use exclusion around actually queueing the
1350    new frame, not around filling ep->setup_frame.  This is non-deterministic
1351    when re-entered but still correct. */
1352 
1353 static void set_rx_mode(struct net_device *dev)
1354 {
1355     struct epic_private *ep = netdev_priv(dev);
1356     void __iomem *ioaddr = ep->ioaddr;
1357     unsigned char mc_filter[8];      /* Multicast hash filter */
1358     int i;
1359 
1360     if (dev->flags & IFF_PROMISC) {         /* Set promiscuous. */
1361         ew32(RxCtrl, 0x002c);
1362         /* Unconditionally log net taps. */
1363         memset(mc_filter, 0xff, sizeof(mc_filter));
1364     } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
1365         /* There is apparently a chip bug, so the multicast filter
1366            is never enabled. */
1367         /* Too many to filter perfectly -- accept all multicasts. */
1368         memset(mc_filter, 0xff, sizeof(mc_filter));
1369         ew32(RxCtrl, 0x000c);
1370     } else if (netdev_mc_empty(dev)) {
1371         ew32(RxCtrl, 0x0004);
1372         return;
1373     } else {                    /* Never executed, for now. */
1374         struct netdev_hw_addr *ha;
1375 
1376         memset(mc_filter, 0, sizeof(mc_filter));
1377         netdev_for_each_mc_addr(ha, dev) {
1378             unsigned int bit_nr =
1379                 ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1380             mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1381         }
1382     }
1383     /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1384     if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1385         for (i = 0; i < 4; i++)
1386             ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
1387         memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1388     }
1389 }
1390 
1391 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1392 {
1393     struct epic_private *np = netdev_priv(dev);
1394 
1395     strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1396     strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1397     strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1398 }
1399 
1400 static int netdev_get_link_ksettings(struct net_device *dev,
1401                      struct ethtool_link_ksettings *cmd)
1402 {
1403     struct epic_private *np = netdev_priv(dev);
1404 
1405     spin_lock_irq(&np->lock);
1406     mii_ethtool_get_link_ksettings(&np->mii, cmd);
1407     spin_unlock_irq(&np->lock);
1408 
1409     return 0;
1410 }
1411 
1412 static int netdev_set_link_ksettings(struct net_device *dev,
1413                      const struct ethtool_link_ksettings *cmd)
1414 {
1415     struct epic_private *np = netdev_priv(dev);
1416     int rc;
1417 
1418     spin_lock_irq(&np->lock);
1419     rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
1420     spin_unlock_irq(&np->lock);
1421 
1422     return rc;
1423 }
1424 
1425 static int netdev_nway_reset(struct net_device *dev)
1426 {
1427     struct epic_private *np = netdev_priv(dev);
1428     return mii_nway_restart(&np->mii);
1429 }
1430 
1431 static u32 netdev_get_link(struct net_device *dev)
1432 {
1433     struct epic_private *np = netdev_priv(dev);
1434     return mii_link_ok(&np->mii);
1435 }
1436 
1437 static u32 netdev_get_msglevel(struct net_device *dev)
1438 {
1439     return debug;
1440 }
1441 
1442 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1443 {
1444     debug = value;
1445 }
1446 
1447 static int ethtool_begin(struct net_device *dev)
1448 {
1449     struct epic_private *ep = netdev_priv(dev);
1450     void __iomem *ioaddr = ep->ioaddr;
1451 
1452     if (ep->ethtool_ops_nesting == U32_MAX)
1453         return -EBUSY;
1454     /* power-up, if interface is down */
1455     if (!ep->ethtool_ops_nesting++ && !netif_running(dev)) {
1456         ew32(GENCTL, 0x0200);
1457         ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1458     }
1459     return 0;
1460 }
1461 
1462 static void ethtool_complete(struct net_device *dev)
1463 {
1464     struct epic_private *ep = netdev_priv(dev);
1465     void __iomem *ioaddr = ep->ioaddr;
1466 
1467     /* power-down, if interface is down */
1468     if (!--ep->ethtool_ops_nesting && !netif_running(dev)) {
1469         ew32(GENCTL, 0x0008);
1470         ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1471     }
1472 }
1473 
1474 static const struct ethtool_ops netdev_ethtool_ops = {
1475     .get_drvinfo        = netdev_get_drvinfo,
1476     .nway_reset     = netdev_nway_reset,
1477     .get_link       = netdev_get_link,
1478     .get_msglevel       = netdev_get_msglevel,
1479     .set_msglevel       = netdev_set_msglevel,
1480     .begin          = ethtool_begin,
1481     .complete       = ethtool_complete,
1482     .get_link_ksettings = netdev_get_link_ksettings,
1483     .set_link_ksettings = netdev_set_link_ksettings,
1484 };
1485 
1486 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1487 {
1488     struct epic_private *np = netdev_priv(dev);
1489     void __iomem *ioaddr = np->ioaddr;
1490     struct mii_ioctl_data *data = if_mii(rq);
1491     int rc;
1492 
1493     /* power-up, if interface is down */
1494     if (! netif_running(dev)) {
1495         ew32(GENCTL, 0x0200);
1496         ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1497     }
1498 
1499     /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1500     spin_lock_irq(&np->lock);
1501     rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1502     spin_unlock_irq(&np->lock);
1503 
1504     /* power-down, if interface is down */
1505     if (! netif_running(dev)) {
1506         ew32(GENCTL, 0x0008);
1507         ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1508     }
1509     return rc;
1510 }
1511 
1512 
1513 static void epic_remove_one(struct pci_dev *pdev)
1514 {
1515     struct net_device *dev = pci_get_drvdata(pdev);
1516     struct epic_private *ep = netdev_priv(dev);
1517 
1518     unregister_netdev(dev);
1519     dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
1520               ep->tx_ring_dma);
1521     dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
1522               ep->rx_ring_dma);
1523     pci_iounmap(pdev, ep->ioaddr);
1524     free_netdev(dev);
1525     pci_release_regions(pdev);
1526     pci_disable_device(pdev);
1527     /* pci_power_off(pdev, -1); */
1528 }
1529 
1530 static int __maybe_unused epic_suspend(struct device *dev_d)
1531 {
1532     struct net_device *dev = dev_get_drvdata(dev_d);
1533     struct epic_private *ep = netdev_priv(dev);
1534     void __iomem *ioaddr = ep->ioaddr;
1535 
1536     if (!netif_running(dev))
1537         return 0;
1538     epic_pause(dev);
1539     /* Put the chip into low-power mode. */
1540     ew32(GENCTL, 0x0008);
1541     /* pci_power_off(pdev, -1); */
1542     return 0;
1543 }
1544 
1545 
1546 static int __maybe_unused epic_resume(struct device *dev_d)
1547 {
1548     struct net_device *dev = dev_get_drvdata(dev_d);
1549 
1550     if (!netif_running(dev))
1551         return 0;
1552     epic_restart(dev);
1553     /* pci_power_on(pdev); */
1554     return 0;
1555 }
1556 
1557 static SIMPLE_DEV_PM_OPS(epic_pm_ops, epic_suspend, epic_resume);
1558 
1559 static struct pci_driver epic_driver = {
1560     .name       = DRV_NAME,
1561     .id_table   = epic_pci_tbl,
1562     .probe      = epic_init_one,
1563     .remove     = epic_remove_one,
1564     .driver.pm  = &epic_pm_ops,
1565 };
1566 
1567 
1568 static int __init epic_init (void)
1569 {
1570 /* when a module, this is printed whether or not devices are found in probe */
1571 #ifdef MODULE
1572     pr_info("%s%s\n", version, version2);
1573 #endif
1574 
1575     return pci_register_driver(&epic_driver);
1576 }
1577 
1578 
1579 static void __exit epic_cleanup (void)
1580 {
1581     pci_unregister_driver (&epic_driver);
1582 }
1583 
1584 
1585 module_init(epic_init);
1586 module_exit(epic_cleanup);