Back to home page

OSCL-LXR

 
 

    


0001 /* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
0002 /*
0003     Written/copyright 1993-1998 by Donald Becker.
0004 
0005     Copyright 1993 United States Government as represented by the
0006     Director, National Security Agency.
0007     This software may be used and distributed according to the terms
0008     of the GNU General Public License, incorporated herein by reference.
0009 
0010     This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
0011     with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
0012 
0013     The author may be reached as becker@scyld.com, or C/O
0014     Scyld Computing Corporation
0015     410 Severn Ave., Suite 210
0016     Annapolis MD 21403
0017 
0018     Andrey V. Savochkin:
0019     - alignment problem with 1.3.* kernel and some minor changes.
0020     Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
0021     - added support for Linux/Alpha, but removed most of it, because
0022         it worked only for the PCI chip.
0023       - added hook for the 32bit lance driver
0024       - added PCnetPCI II (79C970A) to chip table
0025     Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
0026     - hopefully fix above so Linux/Alpha can use ISA cards too.
0027     8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
0028     v1.12 10/27/97 Module support -djb
0029     v1.14  2/3/98 Module support modified, made PCI support optional -djb
0030     v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
0031                   before unregister_netdev() which caused NULL pointer
0032                   reference later in the chain (in rtnetlink_fill_ifinfo())
0033                   -- Mika Kuoppala <miku@iki.fi>
0034 
0035     Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
0036     the 2.1 version of the old driver - Alan Cox
0037 
0038     Get rid of check_region, check kmalloc return in lance_probe1
0039     Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
0040 
0041     Reworked detection, added support for Racal InterLan EtherBlaster cards
0042     Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
0043 */
0044 
0045 static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
0046 
0047 #include <linux/module.h>
0048 #include <linux/kernel.h>
0049 #include <linux/string.h>
0050 #include <linux/delay.h>
0051 #include <linux/errno.h>
0052 #include <linux/ioport.h>
0053 #include <linux/slab.h>
0054 #include <linux/interrupt.h>
0055 #include <linux/pci.h>
0056 #include <linux/init.h>
0057 #include <linux/netdevice.h>
0058 #include <linux/etherdevice.h>
0059 #include <linux/skbuff.h>
0060 #include <linux/mm.h>
0061 #include <linux/bitops.h>
0062 
0063 #include <asm/io.h>
0064 #include <asm/dma.h>
0065 
0066 static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
0067 static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
0068 static int __init do_lance_probe(struct net_device *dev);
0069 
0070 
0071 static struct card {
0072     char id_offset14;
0073     char id_offset15;
0074 } cards[] = {
0075     {   //"normal"
0076         .id_offset14 = 0x57,
0077         .id_offset15 = 0x57,
0078     },
0079     {   //NI6510EB
0080         .id_offset14 = 0x52,
0081         .id_offset15 = 0x44,
0082     },
0083     {   //Racal InterLan EtherBlaster
0084         .id_offset14 = 0x52,
0085         .id_offset15 = 0x49,
0086     },
0087 };
0088 #define NUM_CARDS 3
0089 
0090 #ifdef LANCE_DEBUG
0091 static int lance_debug = LANCE_DEBUG;
0092 #else
0093 static int lance_debug = 1;
0094 #endif
0095 
0096 /*
0097                 Theory of Operation
0098 
0099 I. Board Compatibility
0100 
0101 This device driver is designed for the AMD 79C960, the "PCnet-ISA
0102 single-chip ethernet controller for ISA".  This chip is used in a wide
0103 variety of boards from vendors such as Allied Telesis, HP, Kingston,
0104 and Boca.  This driver is also intended to work with older AMD 7990
0105 designs, such as the NE1500 and NE2100, and newer 79C961.  For convenience,
0106 I use the name LANCE to refer to all of the AMD chips, even though it properly
0107 refers only to the original 7990.
0108 
0109 II. Board-specific settings
0110 
0111 The driver is designed to work the boards that use the faster
0112 bus-master mode, rather than in shared memory mode.  (Only older designs
0113 have on-board buffer memory needed to support the slower shared memory mode.)
0114 
0115 Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
0116 channel.  This driver probes the likely base addresses:
0117 {0x300, 0x320, 0x340, 0x360}.
0118 After the board is found it generates a DMA-timeout interrupt and uses
0119 autoIRQ to find the IRQ line.  The DMA channel can be set with the low bits
0120 of the otherwise-unused dev->mem_start value (aka PARAM1).  If unset it is
0121 probed for by enabling each free DMA channel in turn and checking if
0122 initialization succeeds.
0123 
0124 The HP-J2405A board is an exception: with this board it is easy to read the
0125 EEPROM-set values for the base, IRQ, and DMA.  (Of course you must already
0126 _know_ the base address -- that field is for writing the EEPROM.)
0127 
0128 III. Driver operation
0129 
0130 IIIa. Ring buffers
0131 The LANCE uses ring buffers of Tx and Rx descriptors.  Each entry describes
0132 the base and length of the data buffer, along with status bits.  The length
0133 of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
0134 the buffer length (rather than being directly the buffer length) for
0135 implementation ease.  The current values are 2 (Tx) and 4 (Rx), which leads to
0136 ring sizes of 4 (Tx) and 16 (Rx).  Increasing the number of ring entries
0137 needlessly uses extra space and reduces the chance that an upper layer will
0138 be able to reorder queued Tx packets based on priority.  Decreasing the number
0139 of entries makes it more difficult to achieve back-to-back packet transmission
0140 and increases the chance that Rx ring will overflow.  (Consider the worst case
0141 of receiving back-to-back minimum-sized packets.)
0142 
0143 The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
0144 statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
0145 avoid the administrative overhead. For the Rx side this avoids dynamically
0146 allocating full-sized buffers "just in case", at the expense of a
0147 memory-to-memory data copy for each packet received.  For most systems this
0148 is a good tradeoff: the Rx buffer will always be in low memory, the copy
0149 is inexpensive, and it primes the cache for later packet processing.  For Tx
0150 the buffers are only used when needed as low-memory bounce buffers.
0151 
0152 IIIB. 16M memory limitations.
0153 For the ISA bus master mode all structures used directly by the LANCE,
0154 the initialization block, Rx and Tx rings, and data buffers, must be
0155 accessible from the ISA bus, i.e. in the lower 16M of real memory.
0156 This is a problem for current Linux kernels on >16M machines. The network
0157 devices are initialized after memory initialization, and the kernel doles out
0158 memory from the top of memory downward.  The current solution is to have a
0159 special network initialization routine that's called before memory
0160 initialization; this will eventually be generalized for all network devices.
0161 As mentioned before, low-memory "bounce-buffers" are used when needed.
0162 
0163 IIIC. Synchronization
0164 The driver runs as two independent, single-threaded flows of control.  One
0165 is the send-packet routine, which enforces single-threaded use by the
0166 dev->tbusy flag.  The other thread is the interrupt handler, which is single
0167 threaded by the hardware and other software.
0168 
0169 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
0170 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
0171 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
0172 the 'lp->tx_full' flag.
0173 
0174 The interrupt handler has exclusive control over the Rx ring and records stats
0175 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
0176 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
0177 stats.)  After reaping the stats, it marks the queue entry as empty by setting
0178 the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
0179 tx_full and tbusy flags.
0180 
0181 */
0182 
0183 /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
0184    Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
0185    That translates to 4 and 4 (16 == 2^^4).
0186    This is a compile-time option for efficiency.
0187    */
0188 #ifndef LANCE_LOG_TX_BUFFERS
0189 #define LANCE_LOG_TX_BUFFERS 4
0190 #define LANCE_LOG_RX_BUFFERS 4
0191 #endif
0192 
0193 #define TX_RING_SIZE            (1 << (LANCE_LOG_TX_BUFFERS))
0194 #define TX_RING_MOD_MASK        (TX_RING_SIZE - 1)
0195 #define TX_RING_LEN_BITS        ((LANCE_LOG_TX_BUFFERS) << 29)
0196 
0197 #define RX_RING_SIZE            (1 << (LANCE_LOG_RX_BUFFERS))
0198 #define RX_RING_MOD_MASK        (RX_RING_SIZE - 1)
0199 #define RX_RING_LEN_BITS        ((LANCE_LOG_RX_BUFFERS) << 29)
0200 
0201 #define PKT_BUF_SZ      1544
0202 
0203 /* Offsets from base I/O address. */
0204 #define LANCE_DATA 0x10
0205 #define LANCE_ADDR 0x12
0206 #define LANCE_RESET 0x14
0207 #define LANCE_BUS_IF 0x16
0208 #define LANCE_TOTAL_SIZE 0x18
0209 
0210 #define TX_TIMEOUT  (HZ/5)
0211 
0212 /* The LANCE Rx and Tx ring descriptors. */
0213 struct lance_rx_head {
0214     s32 base;
0215     s16 buf_length;         /* This length is 2s complement (negative)! */
0216     s16 msg_length;         /* This length is "normal". */
0217 };
0218 
0219 struct lance_tx_head {
0220     s32 base;
0221     s16 length;             /* Length is 2s complement (negative)! */
0222     s16 misc;
0223 };
0224 
0225 /* The LANCE initialization block, described in databook. */
0226 struct lance_init_block {
0227     u16 mode;       /* Pre-set mode (reg. 15) */
0228     u8  phys_addr[6]; /* Physical ethernet address */
0229     u32 filter[2];          /* Multicast filter (unused). */
0230     /* Receive and transmit ring base, along with extra bits. */
0231     u32  rx_ring;           /* Tx and Rx ring base pointers */
0232     u32  tx_ring;
0233 };
0234 
0235 struct lance_private {
0236     /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
0237     struct lance_rx_head rx_ring[RX_RING_SIZE];
0238     struct lance_tx_head tx_ring[TX_RING_SIZE];
0239     struct lance_init_block init_block;
0240     const char *name;
0241     /* The saved address of a sent-in-place packet/buffer, for skfree(). */
0242     struct sk_buff* tx_skbuff[TX_RING_SIZE];
0243     /* The addresses of receive-in-place skbuffs. */
0244     struct sk_buff* rx_skbuff[RX_RING_SIZE];
0245     unsigned long rx_buffs;     /* Address of Rx and Tx buffers. */
0246     /* Tx low-memory "bounce buffer" address. */
0247     char (*tx_bounce_buffs)[PKT_BUF_SZ];
0248     int cur_rx, cur_tx;         /* The next free ring entry */
0249     int dirty_rx, dirty_tx;     /* The ring entries to be free()ed. */
0250     int dma;
0251     unsigned char chip_version; /* See lance_chip_type. */
0252     spinlock_t devlock;
0253 };
0254 
0255 #define LANCE_MUST_PAD          0x00000001
0256 #define LANCE_ENABLE_AUTOSELECT 0x00000002
0257 #define LANCE_MUST_REINIT_RING  0x00000004
0258 #define LANCE_MUST_UNRESET      0x00000008
0259 #define LANCE_HAS_MISSED_FRAME  0x00000010
0260 
0261 /* A mapping from the chip ID number to the part number and features.
0262    These are from the datasheets -- in real life the '970 version
0263    reportedly has the same ID as the '965. */
0264 static struct lance_chip_type {
0265     int id_number;
0266     const char *name;
0267     int flags;
0268 } chip_table[] = {
0269     {0x0000, "LANCE 7990",              /* Ancient lance chip.  */
0270         LANCE_MUST_PAD + LANCE_MUST_UNRESET},
0271     {0x0003, "PCnet/ISA 79C960",        /* 79C960 PCnet/ISA.  */
0272         LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
0273             LANCE_HAS_MISSED_FRAME},
0274     {0x2260, "PCnet/ISA+ 79C961",       /* 79C961 PCnet/ISA+, Plug-n-Play.  */
0275         LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
0276             LANCE_HAS_MISSED_FRAME},
0277     {0x2420, "PCnet/PCI 79C970",        /* 79C970 or 79C974 PCnet-SCSI, PCI. */
0278         LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
0279             LANCE_HAS_MISSED_FRAME},
0280     /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
0281         it the PCnet32. */
0282     {0x2430, "PCnet32",                 /* 79C965 PCnet for VL bus. */
0283         LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
0284             LANCE_HAS_MISSED_FRAME},
0285         {0x2621, "PCnet/PCI-II 79C970A",        /* 79C970A PCInetPCI II. */
0286                 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
0287                         LANCE_HAS_MISSED_FRAME},
0288     {0x0,    "PCnet (unknown)",
0289         LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
0290             LANCE_HAS_MISSED_FRAME},
0291 };
0292 
0293 enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
0294 
0295 
0296 /* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
0297    Assume yes until we know the memory size. */
0298 static unsigned char lance_need_isa_bounce_buffers = 1;
0299 
0300 static int lance_open(struct net_device *dev);
0301 static void lance_init_ring(struct net_device *dev, gfp_t mode);
0302 static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
0303                     struct net_device *dev);
0304 static int lance_rx(struct net_device *dev);
0305 static irqreturn_t lance_interrupt(int irq, void *dev_id);
0306 static int lance_close(struct net_device *dev);
0307 static struct net_device_stats *lance_get_stats(struct net_device *dev);
0308 static void set_multicast_list(struct net_device *dev);
0309 static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue);
0310 
0311 
0312 
0313 #ifdef MODULE
0314 #define MAX_CARDS       8   /* Max number of interfaces (cards) per module */
0315 
0316 static struct net_device *dev_lance[MAX_CARDS];
0317 static int io[MAX_CARDS];
0318 static int dma[MAX_CARDS];
0319 static int irq[MAX_CARDS];
0320 
0321 module_param_hw_array(io, int, ioport, NULL, 0);
0322 module_param_hw_array(dma, int, dma, NULL, 0);
0323 module_param_hw_array(irq, int, irq, NULL, 0);
0324 module_param(lance_debug, int, 0);
0325 MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
0326 MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
0327 MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
0328 MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
0329 
0330 static int __init lance_init_module(void)
0331 {
0332     struct net_device *dev;
0333     int this_dev, found = 0;
0334 
0335     for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
0336         if (io[this_dev] == 0)  {
0337             if (this_dev != 0) /* only complain once */
0338                 break;
0339             printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
0340             return -EPERM;
0341         }
0342         dev = alloc_etherdev(0);
0343         if (!dev)
0344             break;
0345         dev->irq = irq[this_dev];
0346         dev->base_addr = io[this_dev];
0347         dev->dma = dma[this_dev];
0348         if (do_lance_probe(dev) == 0) {
0349             dev_lance[found++] = dev;
0350             continue;
0351         }
0352         free_netdev(dev);
0353         break;
0354     }
0355     if (found != 0)
0356         return 0;
0357     return -ENXIO;
0358 }
0359 module_init(lance_init_module);
0360 
0361 static void cleanup_card(struct net_device *dev)
0362 {
0363     struct lance_private *lp = dev->ml_priv;
0364     if (dev->dma != 4)
0365         free_dma(dev->dma);
0366     release_region(dev->base_addr, LANCE_TOTAL_SIZE);
0367     kfree(lp->tx_bounce_buffs);
0368     kfree((void*)lp->rx_buffs);
0369     kfree(lp);
0370 }
0371 
0372 static void __exit lance_cleanup_module(void)
0373 {
0374     int this_dev;
0375 
0376     for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
0377         struct net_device *dev = dev_lance[this_dev];
0378         if (dev) {
0379             unregister_netdev(dev);
0380             cleanup_card(dev);
0381             free_netdev(dev);
0382         }
0383     }
0384 }
0385 module_exit(lance_cleanup_module);
0386 #endif /* MODULE */
0387 MODULE_LICENSE("GPL");
0388 
0389 
0390 /* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
0391    board probes now that kmalloc() can allocate ISA DMA-able regions.
0392    This also allows the LANCE driver to be used as a module.
0393    */
0394 static int __init do_lance_probe(struct net_device *dev)
0395 {
0396     unsigned int *port;
0397     int result;
0398 
0399     if (high_memory <= phys_to_virt(16*1024*1024))
0400         lance_need_isa_bounce_buffers = 0;
0401 
0402     for (port = lance_portlist; *port; port++) {
0403         int ioaddr = *port;
0404         struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
0405                             "lance-probe");
0406 
0407         if (r) {
0408             /* Detect the card with minimal I/O reads */
0409             char offset14 = inb(ioaddr + 14);
0410             int card;
0411             for (card = 0; card < NUM_CARDS; ++card)
0412                 if (cards[card].id_offset14 == offset14)
0413                     break;
0414             if (card < NUM_CARDS) {/*yes, the first byte matches*/
0415                 char offset15 = inb(ioaddr + 15);
0416                 for (card = 0; card < NUM_CARDS; ++card)
0417                     if ((cards[card].id_offset14 == offset14) &&
0418                         (cards[card].id_offset15 == offset15))
0419                         break;
0420             }
0421             if (card < NUM_CARDS) { /*Signature OK*/
0422                 result = lance_probe1(dev, ioaddr, 0, 0);
0423                 if (!result) {
0424                     struct lance_private *lp = dev->ml_priv;
0425                     int ver = lp->chip_version;
0426 
0427                     r->name = chip_table[ver].name;
0428                     return 0;
0429                 }
0430             }
0431             release_region(ioaddr, LANCE_TOTAL_SIZE);
0432         }
0433     }
0434     return -ENODEV;
0435 }
0436 
0437 #ifndef MODULE
0438 struct net_device * __init lance_probe(int unit)
0439 {
0440     struct net_device *dev = alloc_etherdev(0);
0441     int err;
0442 
0443     if (!dev)
0444         return ERR_PTR(-ENODEV);
0445 
0446     sprintf(dev->name, "eth%d", unit);
0447     netdev_boot_setup_check(dev);
0448 
0449     err = do_lance_probe(dev);
0450     if (err)
0451         goto out;
0452     return dev;
0453 out:
0454     free_netdev(dev);
0455     return ERR_PTR(err);
0456 }
0457 #endif
0458 
0459 static const struct net_device_ops lance_netdev_ops = {
0460     .ndo_open       = lance_open,
0461     .ndo_start_xmit     = lance_start_xmit,
0462     .ndo_stop       = lance_close,
0463     .ndo_get_stats      = lance_get_stats,
0464     .ndo_set_rx_mode    = set_multicast_list,
0465     .ndo_tx_timeout     = lance_tx_timeout,
0466     .ndo_set_mac_address    = eth_mac_addr,
0467     .ndo_validate_addr  = eth_validate_addr,
0468 };
0469 
0470 static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
0471 {
0472     struct lance_private *lp;
0473     unsigned long dma_channels; /* Mark spuriously-busy DMA channels */
0474     int i, reset_val, lance_version;
0475     const char *chipname;
0476     /* Flags for specific chips or boards. */
0477     unsigned char hpJ2405A = 0; /* HP ISA adaptor */
0478     int hp_builtin = 0;     /* HP on-board ethernet. */
0479     static int did_version;     /* Already printed version info. */
0480     unsigned long flags;
0481     int err = -ENOMEM;
0482     void __iomem *bios;
0483     u8 addr[ETH_ALEN];
0484 
0485     /* First we look for special cases.
0486        Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
0487        There are two HP versions, check the BIOS for the configuration port.
0488        This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
0489        */
0490     bios = ioremap(0xf00f0, 0x14);
0491     if (!bios)
0492         return -ENOMEM;
0493     if (readw(bios + 0x12) == 0x5048)  {
0494         static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
0495         int hp_port = (readl(bios + 1) & 1)  ? 0x499 : 0x99;
0496         /* We can have boards other than the built-in!  Verify this is on-board. */
0497         if ((inb(hp_port) & 0xc0) == 0x80 &&
0498             ioaddr_table[inb(hp_port) & 3] == ioaddr)
0499             hp_builtin = hp_port;
0500     }
0501     iounmap(bios);
0502     /* We also recognize the HP Vectra on-board here, but check below. */
0503     hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 &&
0504             inb(ioaddr+2) == 0x09);
0505 
0506     /* Reset the LANCE.  */
0507     reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
0508 
0509     /* The Un-Reset needed is only needed for the real NE2100, and will
0510        confuse the HP board. */
0511     if (!hpJ2405A)
0512         outw(reset_val, ioaddr+LANCE_RESET);
0513 
0514     outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
0515     if (inw(ioaddr+LANCE_DATA) != 0x0004)
0516         return -ENODEV;
0517 
0518     /* Get the version of the chip. */
0519     outw(88, ioaddr+LANCE_ADDR);
0520     if (inw(ioaddr+LANCE_ADDR) != 88) {
0521         lance_version = 0;
0522     } else {            /* Good, it's a newer chip. */
0523         int chip_version = inw(ioaddr+LANCE_DATA);
0524         outw(89, ioaddr+LANCE_ADDR);
0525         chip_version |= inw(ioaddr+LANCE_DATA) << 16;
0526         if (lance_debug > 2)
0527             printk("  LANCE chip version is %#x.\n", chip_version);
0528         if ((chip_version & 0xfff) != 0x003)
0529             return -ENODEV;
0530         chip_version = (chip_version >> 12) & 0xffff;
0531         for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
0532             if (chip_table[lance_version].id_number == chip_version)
0533                 break;
0534         }
0535     }
0536 
0537     /* We can't allocate private data from alloc_etherdev() because it must
0538        a ISA DMA-able region. */
0539     chipname = chip_table[lance_version].name;
0540     printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
0541 
0542     /* There is a 16 byte station address PROM at the base address.
0543        The first six bytes are the station address. */
0544     for (i = 0; i < 6; i++)
0545         addr[i] = inb(ioaddr + i);
0546     eth_hw_addr_set(dev, addr);
0547     printk("%pM", dev->dev_addr);
0548 
0549     dev->base_addr = ioaddr;
0550     /* Make certain the data structures used by the LANCE are aligned and DMAble. */
0551 
0552     lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
0553     if (!lp)
0554         return -ENOMEM;
0555     if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
0556     dev->ml_priv = lp;
0557     lp->name = chipname;
0558     lp->rx_buffs = (unsigned long)kmalloc_array(RX_RING_SIZE, PKT_BUF_SZ,
0559                             GFP_DMA | GFP_KERNEL);
0560     if (!lp->rx_buffs)
0561         goto out_lp;
0562     if (lance_need_isa_bounce_buffers) {
0563         lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ,
0564                             GFP_DMA | GFP_KERNEL);
0565         if (!lp->tx_bounce_buffs)
0566             goto out_rx;
0567     } else
0568         lp->tx_bounce_buffs = NULL;
0569 
0570     lp->chip_version = lance_version;
0571     spin_lock_init(&lp->devlock);
0572 
0573     lp->init_block.mode = 0x0003;       /* Disable Rx and Tx. */
0574     for (i = 0; i < 6; i++)
0575         lp->init_block.phys_addr[i] = dev->dev_addr[i];
0576     lp->init_block.filter[0] = 0x00000000;
0577     lp->init_block.filter[1] = 0x00000000;
0578     lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
0579     lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
0580 
0581     outw(0x0001, ioaddr+LANCE_ADDR);
0582     inw(ioaddr+LANCE_ADDR);
0583     outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
0584     outw(0x0002, ioaddr+LANCE_ADDR);
0585     inw(ioaddr+LANCE_ADDR);
0586     outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
0587     outw(0x0000, ioaddr+LANCE_ADDR);
0588     inw(ioaddr+LANCE_ADDR);
0589 
0590     if (irq) {                  /* Set iff PCI card. */
0591         dev->dma = 4;           /* Native bus-master, no DMA channel needed. */
0592         dev->irq = irq;
0593     } else if (hp_builtin) {
0594         static const char dma_tbl[4] = {3, 5, 6, 0};
0595         static const char irq_tbl[4] = {3, 4, 5, 9};
0596         unsigned char port_val = inb(hp_builtin);
0597         dev->dma = dma_tbl[(port_val >> 4) & 3];
0598         dev->irq = irq_tbl[(port_val >> 2) & 3];
0599         printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
0600     } else if (hpJ2405A) {
0601         static const char dma_tbl[4] = {3, 5, 6, 7};
0602         static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
0603         short reset_val = inw(ioaddr+LANCE_RESET);
0604         dev->dma = dma_tbl[(reset_val >> 2) & 3];
0605         dev->irq = irq_tbl[(reset_val >> 4) & 7];
0606         printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
0607     } else if (lance_version == PCNET_ISAP) {       /* The plug-n-play version. */
0608         short bus_info;
0609         outw(8, ioaddr+LANCE_ADDR);
0610         bus_info = inw(ioaddr+LANCE_BUS_IF);
0611         dev->dma = bus_info & 0x07;
0612         dev->irq = (bus_info >> 4) & 0x0F;
0613     } else {
0614         /* The DMA channel may be passed in PARAM1. */
0615         if (dev->mem_start & 0x07)
0616             dev->dma = dev->mem_start & 0x07;
0617     }
0618 
0619     if (dev->dma == 0) {
0620         /* Read the DMA channel status register, so that we can avoid
0621            stuck DMA channels in the DMA detection below. */
0622         dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
0623             (inb(DMA2_STAT_REG) & 0xf0);
0624     }
0625     err = -ENODEV;
0626     if (dev->irq >= 2)
0627         printk(" assigned IRQ %d", dev->irq);
0628     else if (lance_version != 0)  { /* 7990 boards need DMA detection first. */
0629         unsigned long irq_mask;
0630 
0631         /* To auto-IRQ we enable the initialization-done and DMA error
0632            interrupts. For ISA boards we get a DMA error, but VLB and PCI
0633            boards will work. */
0634         irq_mask = probe_irq_on();
0635 
0636         /* Trigger an initialization just for the interrupt. */
0637         outw(0x0041, ioaddr+LANCE_DATA);
0638 
0639         mdelay(20);
0640         dev->irq = probe_irq_off(irq_mask);
0641         if (dev->irq)
0642             printk(", probed IRQ %d", dev->irq);
0643         else {
0644             printk(", failed to detect IRQ line.\n");
0645             goto out_tx;
0646         }
0647 
0648         /* Check for the initialization done bit, 0x0100, which means
0649            that we don't need a DMA channel. */
0650         if (inw(ioaddr+LANCE_DATA) & 0x0100)
0651             dev->dma = 4;
0652     }
0653 
0654     if (dev->dma == 4) {
0655         printk(", no DMA needed.\n");
0656     } else if (dev->dma) {
0657         if (request_dma(dev->dma, chipname)) {
0658             printk("DMA %d allocation failed.\n", dev->dma);
0659             goto out_tx;
0660         } else
0661             printk(", assigned DMA %d.\n", dev->dma);
0662     } else {            /* OK, we have to auto-DMA. */
0663         for (i = 0; i < 4; i++) {
0664             static const char dmas[] = { 5, 6, 7, 3 };
0665             int dma = dmas[i];
0666             int boguscnt;
0667 
0668             /* Don't enable a permanently busy DMA channel, or the machine
0669                will hang. */
0670             if (test_bit(dma, &dma_channels))
0671                 continue;
0672             outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
0673             if (request_dma(dma, chipname))
0674                 continue;
0675 
0676             flags=claim_dma_lock();
0677             set_dma_mode(dma, DMA_MODE_CASCADE);
0678             enable_dma(dma);
0679             release_dma_lock(flags);
0680 
0681             /* Trigger an initialization. */
0682             outw(0x0001, ioaddr+LANCE_DATA);
0683             for (boguscnt = 100; boguscnt > 0; --boguscnt)
0684                 if (inw(ioaddr+LANCE_DATA) & 0x0900)
0685                     break;
0686             if (inw(ioaddr+LANCE_DATA) & 0x0100) {
0687                 dev->dma = dma;
0688                 printk(", DMA %d.\n", dev->dma);
0689                 break;
0690             } else {
0691                 flags=claim_dma_lock();
0692                 disable_dma(dma);
0693                 release_dma_lock(flags);
0694                 free_dma(dma);
0695             }
0696         }
0697         if (i == 4) {           /* Failure: bail. */
0698             printk("DMA detection failed.\n");
0699             goto out_tx;
0700         }
0701     }
0702 
0703     if (lance_version == 0 && dev->irq == 0) {
0704         /* We may auto-IRQ now that we have a DMA channel. */
0705         /* Trigger an initialization just for the interrupt. */
0706         unsigned long irq_mask;
0707 
0708         irq_mask = probe_irq_on();
0709         outw(0x0041, ioaddr+LANCE_DATA);
0710 
0711         mdelay(40);
0712         dev->irq = probe_irq_off(irq_mask);
0713         if (dev->irq == 0) {
0714             printk("  Failed to detect the 7990 IRQ line.\n");
0715             goto out_dma;
0716         }
0717         printk("  Auto-IRQ detected IRQ%d.\n", dev->irq);
0718     }
0719 
0720     if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
0721         /* Turn on auto-select of media (10baseT or BNC) so that the user
0722            can watch the LEDs even if the board isn't opened. */
0723         outw(0x0002, ioaddr+LANCE_ADDR);
0724         /* Don't touch 10base2 power bit. */
0725         outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
0726     }
0727 
0728     if (lance_debug > 0  &&  did_version++ == 0)
0729         printk(version);
0730 
0731     /* The LANCE-specific entries in the device structure. */
0732     dev->netdev_ops = &lance_netdev_ops;
0733     dev->watchdog_timeo = TX_TIMEOUT;
0734 
0735     err = register_netdev(dev);
0736     if (err)
0737         goto out_dma;
0738     return 0;
0739 out_dma:
0740     if (dev->dma != 4)
0741         free_dma(dev->dma);
0742 out_tx:
0743     kfree(lp->tx_bounce_buffs);
0744 out_rx:
0745     kfree((void*)lp->rx_buffs);
0746 out_lp:
0747     kfree(lp);
0748     return err;
0749 }
0750 
0751 
0752 static int
0753 lance_open(struct net_device *dev)
0754 {
0755     struct lance_private *lp = dev->ml_priv;
0756     int ioaddr = dev->base_addr;
0757     int i;
0758 
0759     if (dev->irq == 0 ||
0760         request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
0761         return -EAGAIN;
0762     }
0763 
0764     /* We used to allocate DMA here, but that was silly.
0765        DMA lines can't be shared!  We now permanently allocate them. */
0766 
0767     /* Reset the LANCE */
0768     inw(ioaddr+LANCE_RESET);
0769 
0770     /* The DMA controller is used as a no-operation slave, "cascade mode". */
0771     if (dev->dma != 4) {
0772         unsigned long flags=claim_dma_lock();
0773         enable_dma(dev->dma);
0774         set_dma_mode(dev->dma, DMA_MODE_CASCADE);
0775         release_dma_lock(flags);
0776     }
0777 
0778     /* Un-Reset the LANCE, needed only for the NE2100. */
0779     if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
0780         outw(0, ioaddr+LANCE_RESET);
0781 
0782     if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
0783         /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
0784         outw(0x0002, ioaddr+LANCE_ADDR);
0785         /* Only touch autoselect bit. */
0786         outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
0787     }
0788 
0789     if (lance_debug > 1)
0790         printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
0791                dev->name, dev->irq, dev->dma,
0792                    (u32) isa_virt_to_bus(lp->tx_ring),
0793                    (u32) isa_virt_to_bus(lp->rx_ring),
0794                (u32) isa_virt_to_bus(&lp->init_block));
0795 
0796     lance_init_ring(dev, GFP_KERNEL);
0797     /* Re-initialize the LANCE, and start it when done. */
0798     outw(0x0001, ioaddr+LANCE_ADDR);
0799     outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
0800     outw(0x0002, ioaddr+LANCE_ADDR);
0801     outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
0802 
0803     outw(0x0004, ioaddr+LANCE_ADDR);
0804     outw(0x0915, ioaddr+LANCE_DATA);
0805 
0806     outw(0x0000, ioaddr+LANCE_ADDR);
0807     outw(0x0001, ioaddr+LANCE_DATA);
0808 
0809     netif_start_queue (dev);
0810 
0811     i = 0;
0812     while (i++ < 100)
0813         if (inw(ioaddr+LANCE_DATA) & 0x0100)
0814             break;
0815     /*
0816      * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
0817      * reports that doing so triggers a bug in the '974.
0818      */
0819     outw(0x0042, ioaddr+LANCE_DATA);
0820 
0821     if (lance_debug > 2)
0822         printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
0823                dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
0824 
0825     return 0;                   /* Always succeed */
0826 }
0827 
0828 /* The LANCE has been halted for one reason or another (busmaster memory
0829    arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
0830    etc.).  Modern LANCE variants always reload their ring-buffer
0831    configuration when restarted, so we must reinitialize our ring
0832    context before restarting.  As part of this reinitialization,
0833    find all packets still on the Tx ring and pretend that they had been
0834    sent (in effect, drop the packets on the floor) - the higher-level
0835    protocols will time out and retransmit.  It'd be better to shuffle
0836    these skbs to a temp list and then actually re-Tx them after
0837    restarting the chip, but I'm too lazy to do so right now.  dplatt@3do.com
0838 */
0839 
0840 static void
0841 lance_purge_ring(struct net_device *dev)
0842 {
0843     struct lance_private *lp = dev->ml_priv;
0844     int i;
0845 
0846     /* Free all the skbuffs in the Rx and Tx queues. */
0847     for (i = 0; i < RX_RING_SIZE; i++) {
0848         struct sk_buff *skb = lp->rx_skbuff[i];
0849         lp->rx_skbuff[i] = NULL;
0850         lp->rx_ring[i].base = 0;        /* Not owned by LANCE chip. */
0851         if (skb)
0852             dev_kfree_skb_any(skb);
0853     }
0854     for (i = 0; i < TX_RING_SIZE; i++) {
0855         if (lp->tx_skbuff[i]) {
0856             dev_kfree_skb_any(lp->tx_skbuff[i]);
0857             lp->tx_skbuff[i] = NULL;
0858         }
0859     }
0860 }
0861 
0862 
0863 /* Initialize the LANCE Rx and Tx rings. */
0864 static void
0865 lance_init_ring(struct net_device *dev, gfp_t gfp)
0866 {
0867     struct lance_private *lp = dev->ml_priv;
0868     int i;
0869 
0870     lp->cur_rx = lp->cur_tx = 0;
0871     lp->dirty_rx = lp->dirty_tx = 0;
0872 
0873     for (i = 0; i < RX_RING_SIZE; i++) {
0874         struct sk_buff *skb;
0875         void *rx_buff;
0876 
0877         skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
0878         lp->rx_skbuff[i] = skb;
0879         if (skb)
0880             rx_buff = skb->data;
0881         else
0882             rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
0883         if (rx_buff == NULL)
0884             lp->rx_ring[i].base = 0;
0885         else
0886             lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
0887         lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
0888     }
0889     /* The Tx buffer address is filled in as needed, but we do need to clear
0890        the upper ownership bit. */
0891     for (i = 0; i < TX_RING_SIZE; i++) {
0892         lp->tx_skbuff[i] = NULL;
0893         lp->tx_ring[i].base = 0;
0894     }
0895 
0896     lp->init_block.mode = 0x0000;
0897     for (i = 0; i < 6; i++)
0898         lp->init_block.phys_addr[i] = dev->dev_addr[i];
0899     lp->init_block.filter[0] = 0x00000000;
0900     lp->init_block.filter[1] = 0x00000000;
0901     lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
0902     lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
0903 }
0904 
0905 static void
0906 lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
0907 {
0908     struct lance_private *lp = dev->ml_priv;
0909 
0910     if (must_reinit ||
0911         (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
0912         lance_purge_ring(dev);
0913         lance_init_ring(dev, GFP_ATOMIC);
0914     }
0915     outw(0x0000,    dev->base_addr + LANCE_ADDR);
0916     outw(csr0_bits, dev->base_addr + LANCE_DATA);
0917 }
0918 
0919 
0920 static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue)
0921 {
0922     struct lance_private *lp = (struct lance_private *) dev->ml_priv;
0923     int ioaddr = dev->base_addr;
0924 
0925     outw (0, ioaddr + LANCE_ADDR);
0926     printk ("%s: transmit timed out, status %4.4x, resetting.\n",
0927         dev->name, inw (ioaddr + LANCE_DATA));
0928     outw (0x0004, ioaddr + LANCE_DATA);
0929     dev->stats.tx_errors++;
0930 #ifndef final_version
0931     if (lance_debug > 3) {
0932         int i;
0933         printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
0934           lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
0935             lp->cur_rx);
0936         for (i = 0; i < RX_RING_SIZE; i++)
0937             printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
0938              lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
0939                 lp->rx_ring[i].msg_length);
0940         for (i = 0; i < TX_RING_SIZE; i++)
0941             printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
0942                  lp->tx_ring[i].base, -lp->tx_ring[i].length,
0943                 lp->tx_ring[i].misc);
0944         printk ("\n");
0945     }
0946 #endif
0947     lance_restart (dev, 0x0043, 1);
0948 
0949     netif_trans_update(dev); /* prevent tx timeout */
0950     netif_wake_queue (dev);
0951 }
0952 
0953 
0954 static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
0955                     struct net_device *dev)
0956 {
0957     struct lance_private *lp = dev->ml_priv;
0958     int ioaddr = dev->base_addr;
0959     int entry;
0960     unsigned long flags;
0961 
0962     spin_lock_irqsave(&lp->devlock, flags);
0963 
0964     if (lance_debug > 3) {
0965         outw(0x0000, ioaddr+LANCE_ADDR);
0966         printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
0967                inw(ioaddr+LANCE_DATA));
0968         outw(0x0000, ioaddr+LANCE_DATA);
0969     }
0970 
0971     /* Fill in a Tx ring entry */
0972 
0973     /* Mask to ring buffer boundary. */
0974     entry = lp->cur_tx & TX_RING_MOD_MASK;
0975 
0976     /* Caution: the write order is important here, set the base address
0977        with the "ownership" bits last. */
0978 
0979     /* The old LANCE chips doesn't automatically pad buffers to min. size. */
0980     if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
0981         if (skb->len < ETH_ZLEN) {
0982             if (skb_padto(skb, ETH_ZLEN))
0983                 goto out;
0984             lp->tx_ring[entry].length = -ETH_ZLEN;
0985         }
0986         else
0987             lp->tx_ring[entry].length = -skb->len;
0988     } else
0989         lp->tx_ring[entry].length = -skb->len;
0990 
0991     lp->tx_ring[entry].misc = 0x0000;
0992 
0993     dev->stats.tx_bytes += skb->len;
0994 
0995     /* If any part of this buffer is >16M we must copy it to a low-memory
0996        buffer. */
0997     if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
0998         if (lance_debug > 5)
0999             printk("%s: bouncing a high-memory packet (%#x).\n",
1000                    dev->name, (u32)isa_virt_to_bus(skb->data));
1001         skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
1002         lp->tx_ring[entry].base =
1003             ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
1004         dev_kfree_skb(skb);
1005     } else {
1006         lp->tx_skbuff[entry] = skb;
1007         lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
1008     }
1009     lp->cur_tx++;
1010 
1011     /* Trigger an immediate send poll. */
1012     outw(0x0000, ioaddr+LANCE_ADDR);
1013     outw(0x0048, ioaddr+LANCE_DATA);
1014 
1015     if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
1016         netif_stop_queue(dev);
1017 
1018 out:
1019     spin_unlock_irqrestore(&lp->devlock, flags);
1020     return NETDEV_TX_OK;
1021 }
1022 
1023 /* The LANCE interrupt handler. */
1024 static irqreturn_t lance_interrupt(int irq, void *dev_id)
1025 {
1026     struct net_device *dev = dev_id;
1027     struct lance_private *lp;
1028     int csr0, ioaddr, boguscnt=10;
1029     int must_restart;
1030 
1031     ioaddr = dev->base_addr;
1032     lp = dev->ml_priv;
1033 
1034     spin_lock (&lp->devlock);
1035 
1036     outw(0x00, dev->base_addr + LANCE_ADDR);
1037     while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 &&
1038            --boguscnt >= 0) {
1039         /* Acknowledge all of the current interrupt sources ASAP. */
1040         outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
1041 
1042         must_restart = 0;
1043 
1044         if (lance_debug > 5)
1045             printk("%s: interrupt  csr0=%#2.2x new csr=%#2.2x.\n",
1046                    dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
1047 
1048         if (csr0 & 0x0400)          /* Rx interrupt */
1049             lance_rx(dev);
1050 
1051         if (csr0 & 0x0200) {        /* Tx-done interrupt */
1052             int dirty_tx = lp->dirty_tx;
1053 
1054             while (dirty_tx < lp->cur_tx) {
1055                 int entry = dirty_tx & TX_RING_MOD_MASK;
1056                 int status = lp->tx_ring[entry].base;
1057 
1058                 if (status < 0)
1059                     break;          /* It still hasn't been Txed */
1060 
1061                 lp->tx_ring[entry].base = 0;
1062 
1063                 if (status & 0x40000000) {
1064                     /* There was an major error, log it. */
1065                     int err_status = lp->tx_ring[entry].misc;
1066                     dev->stats.tx_errors++;
1067                     if (err_status & 0x0400)
1068                         dev->stats.tx_aborted_errors++;
1069                     if (err_status & 0x0800)
1070                         dev->stats.tx_carrier_errors++;
1071                     if (err_status & 0x1000)
1072                         dev->stats.tx_window_errors++;
1073                     if (err_status & 0x4000) {
1074                         /* Ackk!  On FIFO errors the Tx unit is turned off! */
1075                         dev->stats.tx_fifo_errors++;
1076                         /* Remove this verbosity later! */
1077                         printk("%s: Tx FIFO error! Status %4.4x.\n",
1078                                dev->name, csr0);
1079                         /* Restart the chip. */
1080                         must_restart = 1;
1081                     }
1082                 } else {
1083                     if (status & 0x18000000)
1084                         dev->stats.collisions++;
1085                     dev->stats.tx_packets++;
1086                 }
1087 
1088                 /* We must free the original skb if it's not a data-only copy
1089                    in the bounce buffer. */
1090                 if (lp->tx_skbuff[entry]) {
1091                     dev_consume_skb_irq(lp->tx_skbuff[entry]);
1092                     lp->tx_skbuff[entry] = NULL;
1093                 }
1094                 dirty_tx++;
1095             }
1096 
1097 #ifndef final_version
1098             if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1099                 printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
1100                        dirty_tx, lp->cur_tx,
1101                        netif_queue_stopped(dev) ? "yes" : "no");
1102                 dirty_tx += TX_RING_SIZE;
1103             }
1104 #endif
1105 
1106             /* if the ring is no longer full, accept more packets */
1107             if (netif_queue_stopped(dev) &&
1108                 dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
1109                 netif_wake_queue (dev);
1110 
1111             lp->dirty_tx = dirty_tx;
1112         }
1113 
1114         /* Log misc errors. */
1115         if (csr0 & 0x4000)
1116             dev->stats.tx_errors++; /* Tx babble. */
1117         if (csr0 & 0x1000)
1118             dev->stats.rx_errors++; /* Missed a Rx frame. */
1119         if (csr0 & 0x0800) {
1120             printk("%s: Bus master arbitration failure, status %4.4x.\n",
1121                    dev->name, csr0);
1122             /* Restart the chip. */
1123             must_restart = 1;
1124         }
1125 
1126         if (must_restart) {
1127             /* stop the chip to clear the error condition, then restart */
1128             outw(0x0000, dev->base_addr + LANCE_ADDR);
1129             outw(0x0004, dev->base_addr + LANCE_DATA);
1130             lance_restart(dev, 0x0002, 0);
1131         }
1132     }
1133 
1134     /* Clear any other interrupt, and set interrupt enable. */
1135     outw(0x0000, dev->base_addr + LANCE_ADDR);
1136     outw(0x7940, dev->base_addr + LANCE_DATA);
1137 
1138     if (lance_debug > 4)
1139         printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
1140                dev->name, inw(ioaddr + LANCE_ADDR),
1141                inw(dev->base_addr + LANCE_DATA));
1142 
1143     spin_unlock (&lp->devlock);
1144     return IRQ_HANDLED;
1145 }
1146 
1147 static int
1148 lance_rx(struct net_device *dev)
1149 {
1150     struct lance_private *lp = dev->ml_priv;
1151     int entry = lp->cur_rx & RX_RING_MOD_MASK;
1152     int i;
1153 
1154     /* If we own the next entry, it's a new packet. Send it up. */
1155     while (lp->rx_ring[entry].base >= 0) {
1156         int status = lp->rx_ring[entry].base >> 24;
1157 
1158         if (status != 0x03) {           /* There was an error. */
1159             /* There is a tricky error noted by John Murphy,
1160                <murf@perftech.com> to Russ Nelson: Even with full-sized
1161                buffers it's possible for a jabber packet to use two
1162                buffers, with only the last correctly noting the error. */
1163             if (status & 0x01)  /* Only count a general error at the */
1164                 dev->stats.rx_errors++; /* end of a packet.*/
1165             if (status & 0x20)
1166                 dev->stats.rx_frame_errors++;
1167             if (status & 0x10)
1168                 dev->stats.rx_over_errors++;
1169             if (status & 0x08)
1170                 dev->stats.rx_crc_errors++;
1171             if (status & 0x04)
1172                 dev->stats.rx_fifo_errors++;
1173             lp->rx_ring[entry].base &= 0x03ffffff;
1174         }
1175         else
1176         {
1177             /* Malloc up new buffer, compatible with net3. */
1178             short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1179             struct sk_buff *skb;
1180 
1181             if(pkt_len<60)
1182             {
1183                 printk("%s: Runt packet!\n",dev->name);
1184                 dev->stats.rx_errors++;
1185             }
1186             else
1187             {
1188                 skb = dev_alloc_skb(pkt_len+2);
1189                 if (skb == NULL)
1190                 {
1191                     printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1192                     for (i=0; i < RX_RING_SIZE; i++)
1193                         if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1194                             break;
1195 
1196                     if (i > RX_RING_SIZE -2)
1197                     {
1198                         dev->stats.rx_dropped++;
1199                         lp->rx_ring[entry].base |= 0x80000000;
1200                         lp->cur_rx++;
1201                     }
1202                     break;
1203                 }
1204                 skb_reserve(skb,2); /* 16 byte align */
1205                 skb_put(skb,pkt_len);   /* Make room */
1206                 skb_copy_to_linear_data(skb,
1207                     (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1208                     pkt_len);
1209                 skb->protocol=eth_type_trans(skb,dev);
1210                 netif_rx(skb);
1211                 dev->stats.rx_packets++;
1212                 dev->stats.rx_bytes += pkt_len;
1213             }
1214         }
1215         /* The docs say that the buffer length isn't touched, but Andrew Boyd
1216            of QNX reports that some revs of the 79C965 clear it. */
1217         lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1218         lp->rx_ring[entry].base |= 0x80000000;
1219         entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1220     }
1221 
1222     /* We should check that at least two ring entries are free.  If not,
1223        we should free one and mark stats->rx_dropped++. */
1224 
1225     return 0;
1226 }
1227 
1228 static int
1229 lance_close(struct net_device *dev)
1230 {
1231     int ioaddr = dev->base_addr;
1232     struct lance_private *lp = dev->ml_priv;
1233 
1234     netif_stop_queue (dev);
1235 
1236     if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1237         outw(112, ioaddr+LANCE_ADDR);
1238         dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1239     }
1240     outw(0, ioaddr+LANCE_ADDR);
1241 
1242     if (lance_debug > 1)
1243         printk("%s: Shutting down ethercard, status was %2.2x.\n",
1244                dev->name, inw(ioaddr+LANCE_DATA));
1245 
1246     /* We stop the LANCE here -- it occasionally polls
1247        memory if we don't. */
1248     outw(0x0004, ioaddr+LANCE_DATA);
1249 
1250     if (dev->dma != 4)
1251     {
1252         unsigned long flags=claim_dma_lock();
1253         disable_dma(dev->dma);
1254         release_dma_lock(flags);
1255     }
1256     free_irq(dev->irq, dev);
1257 
1258     lance_purge_ring(dev);
1259 
1260     return 0;
1261 }
1262 
1263 static struct net_device_stats *lance_get_stats(struct net_device *dev)
1264 {
1265     struct lance_private *lp = dev->ml_priv;
1266 
1267     if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1268         short ioaddr = dev->base_addr;
1269         short saved_addr;
1270         unsigned long flags;
1271 
1272         spin_lock_irqsave(&lp->devlock, flags);
1273         saved_addr = inw(ioaddr+LANCE_ADDR);
1274         outw(112, ioaddr+LANCE_ADDR);
1275         dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1276         outw(saved_addr, ioaddr+LANCE_ADDR);
1277         spin_unlock_irqrestore(&lp->devlock, flags);
1278     }
1279 
1280     return &dev->stats;
1281 }
1282 
1283 /* Set or clear the multicast filter for this adaptor.
1284  */
1285 
1286 static void set_multicast_list(struct net_device *dev)
1287 {
1288     short ioaddr = dev->base_addr;
1289 
1290     outw(0, ioaddr+LANCE_ADDR);
1291     outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance.  */
1292 
1293     if (dev->flags&IFF_PROMISC) {
1294         outw(15, ioaddr+LANCE_ADDR);
1295         outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1296     } else {
1297         short multicast_table[4];
1298         int i;
1299         int num_addrs=netdev_mc_count(dev);
1300         if(dev->flags&IFF_ALLMULTI)
1301             num_addrs=1;
1302         /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
1303         memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1304         for (i = 0; i < 4; i++) {
1305             outw(8 + i, ioaddr+LANCE_ADDR);
1306             outw(multicast_table[i], ioaddr+LANCE_DATA);
1307         }
1308         outw(15, ioaddr+LANCE_ADDR);
1309         outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1310     }
1311 
1312     lance_restart(dev, 0x0142, 0); /*  Resume normal operation */
1313 
1314 }
1315