Back to home page

OSCL-LXR

 
 

    


0001 /*
0002     Written 1998-2000 by Donald Becker.
0003 
0004     This software may be used and distributed according to the terms of
0005     the GNU General Public License (GPL), incorporated herein by reference.
0006     Drivers based on or derived from this code fall under the GPL and must
0007     retain the authorship, copyright and license notice.  This file is not
0008     a complete program and may only be used when the entire operating
0009     system is licensed under the GPL.
0010 
0011     The author may be reached as becker@scyld.com, or C/O
0012     Scyld Computing Corporation
0013     410 Severn Ave., Suite 210
0014     Annapolis MD 21403
0015 
0016     Support information and updates available at
0017     http://www.scyld.com/network/pci-skeleton.html
0018 
0019     Linux kernel updates:
0020 
0021     Version 2.51, Nov 17, 2001 (jgarzik):
0022     - Add ethtool support
0023     - Replace some MII-related magic numbers with constants
0024 
0025 */
0026 
0027 #define DRV_NAME    "fealnx"
0028 
0029 static int debug;       /* 1-> print debug message */
0030 static int max_interrupt_work = 20;
0031 
0032 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
0033 static int multicast_filter_limit = 32;
0034 
0035 /* Set the copy breakpoint for the copy-only-tiny-frames scheme. */
0036 /* Setting to > 1518 effectively disables this feature.          */
0037 static int rx_copybreak;
0038 
0039 /* Used to pass the media type, etc.                            */
0040 /* Both 'options[]' and 'full_duplex[]' should exist for driver */
0041 /* interoperability.                                            */
0042 /* The media type is usually passed in 'options[]'.             */
0043 #define MAX_UNITS 8     /* More are supported, limit only on options */
0044 static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
0045 static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
0046 
0047 /* Operational parameters that are set at compile time.                 */
0048 /* Keep the ring sizes a power of two for compile efficiency.           */
0049 /* The compiler will convert <unsigned>'%'<2^N> into a bit mask.        */
0050 /* Making the Tx ring too large decreases the effectiveness of channel  */
0051 /* bonding and packet priority.                                         */
0052 /* There are no ill effects from too-large receive rings.               */
0053 // 88-12-9 modify,
0054 // #define TX_RING_SIZE    16
0055 // #define RX_RING_SIZE    32
0056 #define TX_RING_SIZE    6
0057 #define RX_RING_SIZE    12
0058 #define TX_TOTAL_SIZE   TX_RING_SIZE*sizeof(struct fealnx_desc)
0059 #define RX_TOTAL_SIZE   RX_RING_SIZE*sizeof(struct fealnx_desc)
0060 
0061 /* Operational parameters that usually are not changed. */
0062 /* Time in jiffies before concluding the transmitter is hung. */
0063 #define TX_TIMEOUT      (2*HZ)
0064 
0065 #define PKT_BUF_SZ      1536    /* Size of each temporary Rx buffer. */
0066 
0067 
0068 /* Include files, designed to support most kernel versions 2.0.0 and later. */
0069 #include <linux/module.h>
0070 #include <linux/kernel.h>
0071 #include <linux/string.h>
0072 #include <linux/timer.h>
0073 #include <linux/errno.h>
0074 #include <linux/ioport.h>
0075 #include <linux/interrupt.h>
0076 #include <linux/pci.h>
0077 #include <linux/netdevice.h>
0078 #include <linux/etherdevice.h>
0079 #include <linux/skbuff.h>
0080 #include <linux/init.h>
0081 #include <linux/mii.h>
0082 #include <linux/ethtool.h>
0083 #include <linux/crc32.h>
0084 #include <linux/delay.h>
0085 #include <linux/bitops.h>
0086 
0087 #include <asm/processor.h>  /* Processor type for cache alignment. */
0088 #include <asm/io.h>
0089 #include <linux/uaccess.h>
0090 #include <asm/byteorder.h>
0091 
0092 /* This driver was written to use PCI memory space, however some x86 systems
0093    work only with I/O space accesses. */
0094 #ifndef __alpha__
0095 #define USE_IO_OPS
0096 #endif
0097 
0098 /* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */
0099 /* This is only in the support-all-kernels source code. */
0100 
0101 #define RUN_AT(x) (jiffies + (x))
0102 
0103 MODULE_AUTHOR("Myson or whoever");
0104 MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
0105 MODULE_LICENSE("GPL");
0106 module_param(max_interrupt_work, int, 0);
0107 module_param(debug, int, 0);
0108 module_param(rx_copybreak, int, 0);
0109 module_param(multicast_filter_limit, int, 0);
0110 module_param_array(options, int, NULL, 0);
0111 module_param_array(full_duplex, int, NULL, 0);
0112 MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt");
0113 MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)");
0114 MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames");
0115 MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses");
0116 MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
0117 MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
0118 
0119 enum {
0120     MIN_REGION_SIZE     = 136,
0121 };
0122 
0123 /* A chip capabilities table, matching the entries in pci_tbl[] above. */
0124 enum chip_capability_flags {
0125     HAS_MII_XCVR,
0126     HAS_CHIP_XCVR,
0127 };
0128 
0129 /* 89/6/13 add, */
0130 /* for different PHY */
0131 enum phy_type_flags {
0132     MysonPHY = 1,
0133     AhdocPHY = 2,
0134     SeeqPHY = 3,
0135     MarvellPHY = 4,
0136     Myson981 = 5,
0137     LevelOnePHY = 6,
0138     OtherPHY = 10,
0139 };
0140 
0141 struct chip_info {
0142     char *chip_name;
0143     int flags;
0144 };
0145 
0146 static const struct chip_info skel_netdrv_tbl[] = {
0147     { "100/10M Ethernet PCI Adapter",   HAS_MII_XCVR },
0148     { "100/10M Ethernet PCI Adapter",   HAS_CHIP_XCVR },
0149     { "1000/100/10M Ethernet PCI Adapter",  HAS_MII_XCVR },
0150 };
0151 
0152 /* Offsets to the Command and Status Registers. */
0153 enum fealnx_offsets {
0154     PAR0 = 0x0,     /* physical address 0-3 */
0155     PAR1 = 0x04,        /* physical address 4-5 */
0156     MAR0 = 0x08,        /* multicast address 0-3 */
0157     MAR1 = 0x0C,        /* multicast address 4-7 */
0158     FAR0 = 0x10,        /* flow-control address 0-3 */
0159     FAR1 = 0x14,        /* flow-control address 4-5 */
0160     TCRRCR = 0x18,      /* receive & transmit configuration */
0161     BCR = 0x1C,     /* bus command */
0162     TXPDR = 0x20,       /* transmit polling demand */
0163     RXPDR = 0x24,       /* receive polling demand */
0164     RXCWP = 0x28,       /* receive current word pointer */
0165     TXLBA = 0x2C,       /* transmit list base address */
0166     RXLBA = 0x30,       /* receive list base address */
0167     ISR = 0x34,     /* interrupt status */
0168     IMR = 0x38,     /* interrupt mask */
0169     FTH = 0x3C,     /* flow control high/low threshold */
0170     MANAGEMENT = 0x40,  /* bootrom/eeprom and mii management */
0171     TALLY = 0x44,       /* tally counters for crc and mpa */
0172     TSR = 0x48,     /* tally counter for transmit status */
0173     BMCRSR = 0x4c,      /* basic mode control and status */
0174     PHYIDENTIFIER = 0x50,   /* phy identifier */
0175     ANARANLPAR = 0x54,  /* auto-negotiation advertisement and link
0176                    partner ability */
0177     ANEROCR = 0x58,     /* auto-negotiation expansion and pci conf. */
0178     BPREMRPSR = 0x5c,   /* bypass & receive error mask and phy status */
0179 };
0180 
0181 /* Bits in the interrupt status/enable registers. */
0182 /* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
0183 enum intr_status_bits {
0184     RFCON = 0x00020000, /* receive flow control xon packet */
0185     RFCOFF = 0x00010000,    /* receive flow control xoff packet */
0186     LSCStatus = 0x00008000, /* link status change */
0187     ANCStatus = 0x00004000, /* autonegotiation completed */
0188     FBE = 0x00002000,   /* fatal bus error */
0189     FBEMask = 0x00001800,   /* mask bit12-11 */
0190     ParityErr = 0x00000000, /* parity error */
0191     TargetErr = 0x00001000, /* target abort */
0192     MasterErr = 0x00000800, /* master error */
0193     TUNF = 0x00000400,  /* transmit underflow */
0194     ROVF = 0x00000200,  /* receive overflow */
0195     ETI = 0x00000100,   /* transmit early int */
0196     ERI = 0x00000080,   /* receive early int */
0197     CNTOVF = 0x00000040,    /* counter overflow */
0198     RBU = 0x00000020,   /* receive buffer unavailable */
0199     TBU = 0x00000010,   /* transmit buffer unavilable */
0200     TI = 0x00000008,    /* transmit interrupt */
0201     RI = 0x00000004,    /* receive interrupt */
0202     RxErr = 0x00000002, /* receive error */
0203 };
0204 
0205 /* Bits in the NetworkConfig register, W for writing, R for reading */
0206 /* FIXME: some names are invented by me. Marked with (name?) */
0207 /* If you have docs and know bit names, please fix 'em */
0208 enum rx_mode_bits {
0209     CR_W_ENH    = 0x02000000,   /* enhanced mode (name?) */
0210     CR_W_FD     = 0x00100000,   /* full duplex */
0211     CR_W_PS10   = 0x00080000,   /* 10 mbit */
0212     CR_W_TXEN   = 0x00040000,   /* tx enable (name?) */
0213     CR_W_PS1000 = 0x00010000,   /* 1000 mbit */
0214      /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */
0215     CR_W_RXMODEMASK = 0x000000e0,
0216     CR_W_PROM   = 0x00000080,   /* promiscuous mode */
0217     CR_W_AB     = 0x00000040,   /* accept broadcast */
0218     CR_W_AM     = 0x00000020,   /* accept mutlicast */
0219     CR_W_ARP    = 0x00000008,   /* receive runt pkt */
0220     CR_W_ALP    = 0x00000004,   /* receive long pkt */
0221     CR_W_SEP    = 0x00000002,   /* receive error pkt */
0222     CR_W_RXEN   = 0x00000001,   /* rx enable (unicast?) (name?) */
0223 
0224     CR_R_TXSTOP = 0x04000000,   /* tx stopped (name?) */
0225     CR_R_FD     = 0x00100000,   /* full duplex detected */
0226     CR_R_PS10   = 0x00080000,   /* 10 mbit detected */
0227     CR_R_RXSTOP = 0x00008000,   /* rx stopped (name?) */
0228 };
0229 
0230 /* The Tulip Rx and Tx buffer descriptors. */
0231 struct fealnx_desc {
0232     s32 status;
0233     s32 control;
0234     u32 buffer;
0235     u32 next_desc;
0236     struct fealnx_desc *next_desc_logical;
0237     struct sk_buff *skbuff;
0238     u32 reserved1;
0239     u32 reserved2;
0240 };
0241 
0242 /* Bits in network_desc.status */
0243 enum rx_desc_status_bits {
0244     RXOWN = 0x80000000, /* own bit */
0245     FLNGMASK = 0x0fff0000,  /* frame length */
0246     FLNGShift = 16,
0247     MARSTATUS = 0x00004000, /* multicast address received */
0248     BARSTATUS = 0x00002000, /* broadcast address received */
0249     PHYSTATUS = 0x00001000, /* physical address received */
0250     RXFSD = 0x00000800, /* first descriptor */
0251     RXLSD = 0x00000400, /* last descriptor */
0252     ErrorSummary = 0x80,    /* error summary */
0253     RUNTPKT = 0x40,     /* runt packet received */
0254     LONGPKT = 0x20,     /* long packet received */
0255     FAE = 0x10,     /* frame align error */
0256     CRC = 0x08,     /* crc error */
0257     RXER = 0x04,        /* receive error */
0258 };
0259 
0260 enum rx_desc_control_bits {
0261     RXIC = 0x00800000,  /* interrupt control */
0262     RBSShift = 0,
0263 };
0264 
0265 enum tx_desc_status_bits {
0266     TXOWN = 0x80000000, /* own bit */
0267     JABTO = 0x00004000, /* jabber timeout */
0268     CSL = 0x00002000,   /* carrier sense lost */
0269     LC = 0x00001000,    /* late collision */
0270     EC = 0x00000800,    /* excessive collision */
0271     UDF = 0x00000400,   /* fifo underflow */
0272     DFR = 0x00000200,   /* deferred */
0273     HF = 0x00000100,    /* heartbeat fail */
0274     NCRMask = 0x000000ff,   /* collision retry count */
0275     NCRShift = 0,
0276 };
0277 
0278 enum tx_desc_control_bits {
0279     TXIC = 0x80000000,  /* interrupt control */
0280     ETIControl = 0x40000000,    /* early transmit interrupt */
0281     TXLD = 0x20000000,  /* last descriptor */
0282     TXFD = 0x10000000,  /* first descriptor */
0283     CRCEnable = 0x08000000, /* crc control */
0284     PADEnable = 0x04000000, /* padding control */
0285     RetryTxLC = 0x02000000, /* retry late collision */
0286     PKTSMask = 0x3ff800,    /* packet size bit21-11 */
0287     PKTSShift = 11,
0288     TBSMask = 0x000007ff,   /* transmit buffer bit 10-0 */
0289     TBSShift = 0,
0290 };
0291 
0292 /* BootROM/EEPROM/MII Management Register */
0293 #define MASK_MIIR_MII_READ       0x00000000
0294 #define MASK_MIIR_MII_WRITE      0x00000008
0295 #define MASK_MIIR_MII_MDO        0x00000004
0296 #define MASK_MIIR_MII_MDI        0x00000002
0297 #define MASK_MIIR_MII_MDC        0x00000001
0298 
0299 /* ST+OP+PHYAD+REGAD+TA */
0300 #define OP_READ             0x6000  /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */
0301 #define OP_WRITE            0x5002  /* ST:01+OP:01+PHYAD+REGAD+TA:10 */
0302 
0303 /* ------------------------------------------------------------------------- */
0304 /*      Constants for Myson PHY                                              */
0305 /* ------------------------------------------------------------------------- */
0306 #define MysonPHYID      0xd0000302
0307 /* 89-7-27 add, (begin) */
0308 #define MysonPHYID0     0x0302
0309 #define StatusRegister  18
0310 #define SPEED100        0x0400  // bit10
0311 #define FULLMODE        0x0800  // bit11
0312 /* 89-7-27 add, (end) */
0313 
0314 /* ------------------------------------------------------------------------- */
0315 /*      Constants for Seeq 80225 PHY                                         */
0316 /* ------------------------------------------------------------------------- */
0317 #define SeeqPHYID0      0x0016
0318 
0319 #define MIIRegister18   18
0320 #define SPD_DET_100     0x80
0321 #define DPLX_DET_FULL   0x40
0322 
0323 /* ------------------------------------------------------------------------- */
0324 /*      Constants for Ahdoc 101 PHY                                          */
0325 /* ------------------------------------------------------------------------- */
0326 #define AhdocPHYID0     0x0022
0327 
0328 #define DiagnosticReg   18
0329 #define DPLX_FULL       0x0800
0330 #define Speed_100       0x0400
0331 
0332 /* 89/6/13 add, */
0333 /* -------------------------------------------------------------------------- */
0334 /*      Constants                                                             */
0335 /* -------------------------------------------------------------------------- */
0336 #define MarvellPHYID0           0x0141
0337 #define LevelOnePHYID0      0x0013
0338 
0339 #define MII1000BaseTControlReg  9
0340 #define MII1000BaseTStatusReg   10
0341 #define SpecificReg     17
0342 
0343 /* for 1000BaseT Control Register */
0344 #define PHYAbletoPerform1000FullDuplex  0x0200
0345 #define PHYAbletoPerform1000HalfDuplex  0x0100
0346 #define PHY1000AbilityMask              0x300
0347 
0348 // for phy specific status register, marvell phy.
0349 #define SpeedMask       0x0c000
0350 #define Speed_1000M     0x08000
0351 #define Speed_100M      0x4000
0352 #define Speed_10M       0
0353 #define Full_Duplex     0x2000
0354 
0355 // 89/12/29 add, for phy specific status register, levelone phy, (begin)
0356 #define LXT1000_100M    0x08000
0357 #define LXT1000_1000M   0x0c000
0358 #define LXT1000_Full    0x200
0359 // 89/12/29 add, for phy specific status register, levelone phy, (end)
0360 
0361 /* for 3-in-1 case, BMCRSR register */
0362 #define LinkIsUp2   0x00040000
0363 
0364 /* for PHY */
0365 #define LinkIsUp        0x0004
0366 
0367 
0368 struct netdev_private {
0369     /* Descriptor rings first for alignment. */
0370     struct fealnx_desc *rx_ring;
0371     struct fealnx_desc *tx_ring;
0372 
0373     dma_addr_t rx_ring_dma;
0374     dma_addr_t tx_ring_dma;
0375 
0376     spinlock_t lock;
0377 
0378     /* Media monitoring timer. */
0379     struct timer_list timer;
0380 
0381     /* Reset timer */
0382     struct timer_list reset_timer;
0383     int reset_timer_armed;
0384     unsigned long crvalue_sv;
0385     unsigned long imrvalue_sv;
0386 
0387     /* Frequently used values: keep some adjacent for cache effect. */
0388     int flags;
0389     struct pci_dev *pci_dev;
0390     unsigned long crvalue;
0391     unsigned long bcrvalue;
0392     unsigned long imrvalue;
0393     struct fealnx_desc *cur_rx;
0394     struct fealnx_desc *lack_rxbuf;
0395     int really_rx_count;
0396     struct fealnx_desc *cur_tx;
0397     struct fealnx_desc *cur_tx_copy;
0398     int really_tx_count;
0399     int free_tx_count;
0400     unsigned int rx_buf_sz; /* Based on MTU+slack. */
0401 
0402     /* These values are keep track of the transceiver/media in use. */
0403     unsigned int linkok;
0404     unsigned int line_speed;
0405     unsigned int duplexmode;
0406     unsigned int default_port:4;    /* Last dev->if_port value. */
0407     unsigned int PHYType;
0408 
0409     /* MII transceiver section. */
0410     int mii_cnt;        /* MII device addresses. */
0411     unsigned char phys[2];  /* MII device addresses. */
0412     struct mii_if_info mii;
0413     void __iomem *mem;
0414 };
0415 
0416 
0417 static int mdio_read(struct net_device *dev, int phy_id, int location);
0418 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
0419 static int netdev_open(struct net_device *dev);
0420 static void getlinktype(struct net_device *dev);
0421 static void getlinkstatus(struct net_device *dev);
0422 static void netdev_timer(struct timer_list *t);
0423 static void reset_timer(struct timer_list *t);
0424 static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue);
0425 static void init_ring(struct net_device *dev);
0426 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
0427 static irqreturn_t intr_handler(int irq, void *dev_instance);
0428 static int netdev_rx(struct net_device *dev);
0429 static void set_rx_mode(struct net_device *dev);
0430 static void __set_rx_mode(struct net_device *dev);
0431 static struct net_device_stats *get_stats(struct net_device *dev);
0432 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
0433 static const struct ethtool_ops netdev_ethtool_ops;
0434 static int netdev_close(struct net_device *dev);
0435 static void reset_rx_descriptors(struct net_device *dev);
0436 static void reset_tx_descriptors(struct net_device *dev);
0437 
0438 static void stop_nic_rx(void __iomem *ioaddr, long crvalue)
0439 {
0440     int delay = 0x1000;
0441     iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR);
0442     while (--delay) {
0443         if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP)
0444             break;
0445     }
0446 }
0447 
0448 
0449 static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue)
0450 {
0451     int delay = 0x1000;
0452     iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR);
0453     while (--delay) {
0454         if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP))
0455                         == (CR_R_RXSTOP+CR_R_TXSTOP) )
0456             break;
0457     }
0458 }
0459 
0460 static const struct net_device_ops netdev_ops = {
0461     .ndo_open       = netdev_open,
0462     .ndo_stop       = netdev_close,
0463     .ndo_start_xmit     = start_tx,
0464     .ndo_get_stats      = get_stats,
0465     .ndo_set_rx_mode    = set_rx_mode,
0466     .ndo_eth_ioctl      = mii_ioctl,
0467     .ndo_tx_timeout     = fealnx_tx_timeout,
0468     .ndo_set_mac_address    = eth_mac_addr,
0469     .ndo_validate_addr  = eth_validate_addr,
0470 };
0471 
0472 static int fealnx_init_one(struct pci_dev *pdev,
0473                const struct pci_device_id *ent)
0474 {
0475     struct netdev_private *np;
0476     int i, option, err, irq;
0477     static int card_idx = -1;
0478     char boardname[12];
0479     void __iomem *ioaddr;
0480     unsigned long len;
0481     unsigned int chip_id = ent->driver_data;
0482     struct net_device *dev;
0483     void *ring_space;
0484     dma_addr_t ring_dma;
0485     u8 addr[ETH_ALEN];
0486 #ifdef USE_IO_OPS
0487     int bar = 0;
0488 #else
0489     int bar = 1;
0490 #endif
0491 
0492     card_idx++;
0493     sprintf(boardname, "fealnx%d", card_idx);
0494 
0495     option = card_idx < MAX_UNITS ? options[card_idx] : 0;
0496 
0497     i = pci_enable_device(pdev);
0498     if (i) return i;
0499     pci_set_master(pdev);
0500 
0501     len = pci_resource_len(pdev, bar);
0502     if (len < MIN_REGION_SIZE) {
0503         dev_err(&pdev->dev,
0504                "region size %ld too small, aborting\n", len);
0505         return -ENODEV;
0506     }
0507 
0508     i = pci_request_regions(pdev, boardname);
0509     if (i)
0510         return i;
0511 
0512     irq = pdev->irq;
0513 
0514     ioaddr = pci_iomap(pdev, bar, len);
0515     if (!ioaddr) {
0516         err = -ENOMEM;
0517         goto err_out_res;
0518     }
0519 
0520     dev = alloc_etherdev(sizeof(struct netdev_private));
0521     if (!dev) {
0522         err = -ENOMEM;
0523         goto err_out_unmap;
0524     }
0525     SET_NETDEV_DEV(dev, &pdev->dev);
0526 
0527     /* read ethernet id */
0528     for (i = 0; i < 6; ++i)
0529         addr[i] = ioread8(ioaddr + PAR0 + i);
0530     eth_hw_addr_set(dev, addr);
0531 
0532     /* Reset the chip to erase previous misconfiguration. */
0533     iowrite32(0x00000001, ioaddr + BCR);
0534 
0535     /* Make certain the descriptor lists are aligned. */
0536     np = netdev_priv(dev);
0537     np->mem = ioaddr;
0538     spin_lock_init(&np->lock);
0539     np->pci_dev = pdev;
0540     np->flags = skel_netdrv_tbl[chip_id].flags;
0541     pci_set_drvdata(pdev, dev);
0542     np->mii.dev = dev;
0543     np->mii.mdio_read = mdio_read;
0544     np->mii.mdio_write = mdio_write;
0545     np->mii.phy_id_mask = 0x1f;
0546     np->mii.reg_num_mask = 0x1f;
0547 
0548     ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
0549                     GFP_KERNEL);
0550     if (!ring_space) {
0551         err = -ENOMEM;
0552         goto err_out_free_dev;
0553     }
0554     np->rx_ring = ring_space;
0555     np->rx_ring_dma = ring_dma;
0556 
0557     ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
0558                     GFP_KERNEL);
0559     if (!ring_space) {
0560         err = -ENOMEM;
0561         goto err_out_free_rx;
0562     }
0563     np->tx_ring = ring_space;
0564     np->tx_ring_dma = ring_dma;
0565 
0566     /* find the connected MII xcvrs */
0567     if (np->flags == HAS_MII_XCVR) {
0568         int phy, phy_idx = 0;
0569 
0570         for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys);
0571                    phy++) {
0572             int mii_status = mdio_read(dev, phy, 1);
0573 
0574             if (mii_status != 0xffff && mii_status != 0x0000) {
0575                 np->phys[phy_idx++] = phy;
0576                 dev_info(&pdev->dev,
0577                        "MII PHY found at address %d, status "
0578                        "0x%4.4x.\n", phy, mii_status);
0579                 /* get phy type */
0580                 {
0581                     unsigned int data;
0582 
0583                     data = mdio_read(dev, np->phys[0], 2);
0584                     if (data == SeeqPHYID0)
0585                         np->PHYType = SeeqPHY;
0586                     else if (data == AhdocPHYID0)
0587                         np->PHYType = AhdocPHY;
0588                     else if (data == MarvellPHYID0)
0589                         np->PHYType = MarvellPHY;
0590                     else if (data == MysonPHYID0)
0591                         np->PHYType = Myson981;
0592                     else if (data == LevelOnePHYID0)
0593                         np->PHYType = LevelOnePHY;
0594                     else
0595                         np->PHYType = OtherPHY;
0596                 }
0597             }
0598         }
0599 
0600         np->mii_cnt = phy_idx;
0601         if (phy_idx == 0)
0602             dev_warn(&pdev->dev,
0603                 "MII PHY not found -- this device may "
0604                    "not operate correctly.\n");
0605     } else {
0606         np->phys[0] = 32;
0607 /* 89/6/23 add, (begin) */
0608         /* get phy type */
0609         if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID)
0610             np->PHYType = MysonPHY;
0611         else
0612             np->PHYType = OtherPHY;
0613     }
0614     np->mii.phy_id = np->phys[0];
0615 
0616     if (dev->mem_start)
0617         option = dev->mem_start;
0618 
0619     /* The lower four bits are the media type. */
0620     if (option > 0) {
0621         if (option & 0x200)
0622             np->mii.full_duplex = 1;
0623         np->default_port = option & 15;
0624     }
0625 
0626     if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
0627         np->mii.full_duplex = full_duplex[card_idx];
0628 
0629     if (np->mii.full_duplex) {
0630         dev_info(&pdev->dev, "Media type forced to Full Duplex.\n");
0631 /* 89/6/13 add, (begin) */
0632 //      if (np->PHYType==MarvellPHY)
0633         if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
0634             unsigned int data;
0635 
0636             data = mdio_read(dev, np->phys[0], 9);
0637             data = (data & 0xfcff) | 0x0200;
0638             mdio_write(dev, np->phys[0], 9, data);
0639         }
0640 /* 89/6/13 add, (end) */
0641         if (np->flags == HAS_MII_XCVR)
0642             mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
0643         else
0644             iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR);
0645         np->mii.force_media = 1;
0646     }
0647 
0648     dev->netdev_ops = &netdev_ops;
0649     dev->ethtool_ops = &netdev_ethtool_ops;
0650     dev->watchdog_timeo = TX_TIMEOUT;
0651 
0652     err = register_netdev(dev);
0653     if (err)
0654         goto err_out_free_tx;
0655 
0656     printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
0657            dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr,
0658            dev->dev_addr, irq);
0659 
0660     return 0;
0661 
0662 err_out_free_tx:
0663     dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
0664               np->tx_ring_dma);
0665 err_out_free_rx:
0666     dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
0667               np->rx_ring_dma);
0668 err_out_free_dev:
0669     free_netdev(dev);
0670 err_out_unmap:
0671     pci_iounmap(pdev, ioaddr);
0672 err_out_res:
0673     pci_release_regions(pdev);
0674     return err;
0675 }
0676 
0677 
0678 static void fealnx_remove_one(struct pci_dev *pdev)
0679 {
0680     struct net_device *dev = pci_get_drvdata(pdev);
0681 
0682     if (dev) {
0683         struct netdev_private *np = netdev_priv(dev);
0684 
0685         dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
0686                   np->tx_ring_dma);
0687         dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
0688                   np->rx_ring_dma);
0689         unregister_netdev(dev);
0690         pci_iounmap(pdev, np->mem);
0691         free_netdev(dev);
0692         pci_release_regions(pdev);
0693     } else
0694         printk(KERN_ERR "fealnx: remove for unknown device\n");
0695 }
0696 
0697 
0698 static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad)
0699 {
0700     ulong miir;
0701     int i;
0702     unsigned int mask, data;
0703 
0704     /* enable MII output */
0705     miir = (ulong) ioread32(miiport);
0706     miir &= 0xfffffff0;
0707 
0708     miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO;
0709 
0710     /* send 32 1's preamble */
0711     for (i = 0; i < 32; i++) {
0712         /* low MDC; MDO is already high (miir) */
0713         miir &= ~MASK_MIIR_MII_MDC;
0714         iowrite32(miir, miiport);
0715 
0716         /* high MDC */
0717         miir |= MASK_MIIR_MII_MDC;
0718         iowrite32(miir, miiport);
0719     }
0720 
0721     /* calculate ST+OP+PHYAD+REGAD+TA */
0722     data = opcode | (phyad << 7) | (regad << 2);
0723 
0724     /* sent out */
0725     mask = 0x8000;
0726     while (mask) {
0727         /* low MDC, prepare MDO */
0728         miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
0729         if (mask & data)
0730             miir |= MASK_MIIR_MII_MDO;
0731 
0732         iowrite32(miir, miiport);
0733         /* high MDC */
0734         miir |= MASK_MIIR_MII_MDC;
0735         iowrite32(miir, miiport);
0736         udelay(30);
0737 
0738         /* next */
0739         mask >>= 1;
0740         if (mask == 0x2 && opcode == OP_READ)
0741             miir &= ~MASK_MIIR_MII_WRITE;
0742     }
0743     return miir;
0744 }
0745 
0746 
0747 static int mdio_read(struct net_device *dev, int phyad, int regad)
0748 {
0749     struct netdev_private *np = netdev_priv(dev);
0750     void __iomem *miiport = np->mem + MANAGEMENT;
0751     ulong miir;
0752     unsigned int mask, data;
0753 
0754     miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad);
0755 
0756     /* read data */
0757     mask = 0x8000;
0758     data = 0;
0759     while (mask) {
0760         /* low MDC */
0761         miir &= ~MASK_MIIR_MII_MDC;
0762         iowrite32(miir, miiport);
0763 
0764         /* read MDI */
0765         miir = ioread32(miiport);
0766         if (miir & MASK_MIIR_MII_MDI)
0767             data |= mask;
0768 
0769         /* high MDC, and wait */
0770         miir |= MASK_MIIR_MII_MDC;
0771         iowrite32(miir, miiport);
0772         udelay(30);
0773 
0774         /* next */
0775         mask >>= 1;
0776     }
0777 
0778     /* low MDC */
0779     miir &= ~MASK_MIIR_MII_MDC;
0780     iowrite32(miir, miiport);
0781 
0782     return data & 0xffff;
0783 }
0784 
0785 
0786 static void mdio_write(struct net_device *dev, int phyad, int regad, int data)
0787 {
0788     struct netdev_private *np = netdev_priv(dev);
0789     void __iomem *miiport = np->mem + MANAGEMENT;
0790     ulong miir;
0791     unsigned int mask;
0792 
0793     miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad);
0794 
0795     /* write data */
0796     mask = 0x8000;
0797     while (mask) {
0798         /* low MDC, prepare MDO */
0799         miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
0800         if (mask & data)
0801             miir |= MASK_MIIR_MII_MDO;
0802         iowrite32(miir, miiport);
0803 
0804         /* high MDC */
0805         miir |= MASK_MIIR_MII_MDC;
0806         iowrite32(miir, miiport);
0807 
0808         /* next */
0809         mask >>= 1;
0810     }
0811 
0812     /* low MDC */
0813     miir &= ~MASK_MIIR_MII_MDC;
0814     iowrite32(miir, miiport);
0815 }
0816 
0817 
0818 static int netdev_open(struct net_device *dev)
0819 {
0820     struct netdev_private *np = netdev_priv(dev);
0821     void __iomem *ioaddr = np->mem;
0822     const int irq = np->pci_dev->irq;
0823     int rc, i;
0824 
0825     iowrite32(0x00000001, ioaddr + BCR);    /* Reset */
0826 
0827     rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
0828     if (rc)
0829         return -EAGAIN;
0830 
0831     for (i = 0; i < 3; i++)
0832         iowrite16(((const unsigned short *)dev->dev_addr)[i],
0833                 ioaddr + PAR0 + i*2);
0834 
0835     init_ring(dev);
0836 
0837     iowrite32(np->rx_ring_dma, ioaddr + RXLBA);
0838     iowrite32(np->tx_ring_dma, ioaddr + TXLBA);
0839 
0840     /* Initialize other registers. */
0841     /* Configure the PCI bus bursts and FIFO thresholds.
0842        486: Set 8 longword burst.
0843        586: no burst limit.
0844        Burst length 5:3
0845        0 0 0   1
0846        0 0 1   4
0847        0 1 0   8
0848        0 1 1   16
0849        1 0 0   32
0850        1 0 1   64
0851        1 1 0   128
0852        1 1 1   256
0853        Wait the specified 50 PCI cycles after a reset by initializing
0854        Tx and Rx queues and the address filter list.
0855        FIXME (Ueimor): optimistic for alpha + posted writes ? */
0856 
0857     np->bcrvalue = 0x10;    /* little-endian, 8 burst length */
0858 #ifdef __BIG_ENDIAN
0859     np->bcrvalue |= 0x04;   /* big-endian */
0860 #endif
0861 
0862 #if defined(__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
0863     if (boot_cpu_data.x86 <= 4)
0864         np->crvalue = 0xa00;
0865     else
0866 #endif
0867         np->crvalue = 0xe00;    /* rx 128 burst length */
0868 
0869 
0870 // 89/12/29 add,
0871 // 90/1/16 modify,
0872 //   np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
0873     np->imrvalue = TUNF | CNTOVF | RBU | TI | RI;
0874     if (np->pci_dev->device == 0x891) {
0875         np->bcrvalue |= 0x200;  /* set PROG bit */
0876         np->crvalue |= CR_W_ENH;    /* set enhanced bit */
0877         np->imrvalue |= ETI;
0878     }
0879     iowrite32(np->bcrvalue, ioaddr + BCR);
0880 
0881     if (dev->if_port == 0)
0882         dev->if_port = np->default_port;
0883 
0884     iowrite32(0, ioaddr + RXPDR);
0885 // 89/9/1 modify,
0886 //   np->crvalue = 0x00e40001;    /* tx store and forward, tx/rx enable */
0887     np->crvalue |= 0x00e40001;  /* tx store and forward, tx/rx enable */
0888     np->mii.full_duplex = np->mii.force_media;
0889     getlinkstatus(dev);
0890     if (np->linkok)
0891         getlinktype(dev);
0892     __set_rx_mode(dev);
0893 
0894     netif_start_queue(dev);
0895 
0896     /* Clear and Enable interrupts by setting the interrupt mask. */
0897     iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
0898     iowrite32(np->imrvalue, ioaddr + IMR);
0899 
0900     if (debug)
0901         printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
0902 
0903     /* Set the timer to check for link beat. */
0904     timer_setup(&np->timer, netdev_timer, 0);
0905     np->timer.expires = RUN_AT(3 * HZ);
0906 
0907     /* timer handler */
0908     add_timer(&np->timer);
0909 
0910     timer_setup(&np->reset_timer, reset_timer, 0);
0911     np->reset_timer_armed = 0;
0912     return rc;
0913 }
0914 
0915 
0916 static void getlinkstatus(struct net_device *dev)
0917 /* function: Routine will read MII Status Register to get link status.       */
0918 /* input   : dev... pointer to the adapter block.                            */
0919 /* output  : none.                                                           */
0920 {
0921     struct netdev_private *np = netdev_priv(dev);
0922     unsigned int i, DelayTime = 0x1000;
0923 
0924     np->linkok = 0;
0925 
0926     if (np->PHYType == MysonPHY) {
0927         for (i = 0; i < DelayTime; ++i) {
0928             if (ioread32(np->mem + BMCRSR) & LinkIsUp2) {
0929                 np->linkok = 1;
0930                 return;
0931             }
0932             udelay(100);
0933         }
0934     } else {
0935         for (i = 0; i < DelayTime; ++i) {
0936             if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) {
0937                 np->linkok = 1;
0938                 return;
0939             }
0940             udelay(100);
0941         }
0942     }
0943 }
0944 
0945 
0946 static void getlinktype(struct net_device *dev)
0947 {
0948     struct netdev_private *np = netdev_priv(dev);
0949 
0950     if (np->PHYType == MysonPHY) {  /* 3-in-1 case */
0951         if (ioread32(np->mem + TCRRCR) & CR_R_FD)
0952             np->duplexmode = 2; /* full duplex */
0953         else
0954             np->duplexmode = 1; /* half duplex */
0955         if (ioread32(np->mem + TCRRCR) & CR_R_PS10)
0956             np->line_speed = 1; /* 10M */
0957         else
0958             np->line_speed = 2; /* 100M */
0959     } else {
0960         if (np->PHYType == SeeqPHY) {   /* this PHY is SEEQ 80225 */
0961             unsigned int data;
0962 
0963             data = mdio_read(dev, np->phys[0], MIIRegister18);
0964             if (data & SPD_DET_100)
0965                 np->line_speed = 2; /* 100M */
0966             else
0967                 np->line_speed = 1; /* 10M */
0968             if (data & DPLX_DET_FULL)
0969                 np->duplexmode = 2; /* full duplex mode */
0970             else
0971                 np->duplexmode = 1; /* half duplex mode */
0972         } else if (np->PHYType == AhdocPHY) {
0973             unsigned int data;
0974 
0975             data = mdio_read(dev, np->phys[0], DiagnosticReg);
0976             if (data & Speed_100)
0977                 np->line_speed = 2; /* 100M */
0978             else
0979                 np->line_speed = 1; /* 10M */
0980             if (data & DPLX_FULL)
0981                 np->duplexmode = 2; /* full duplex mode */
0982             else
0983                 np->duplexmode = 1; /* half duplex mode */
0984         }
0985 /* 89/6/13 add, (begin) */
0986         else if (np->PHYType == MarvellPHY) {
0987             unsigned int data;
0988 
0989             data = mdio_read(dev, np->phys[0], SpecificReg);
0990             if (data & Full_Duplex)
0991                 np->duplexmode = 2; /* full duplex mode */
0992             else
0993                 np->duplexmode = 1; /* half duplex mode */
0994             data &= SpeedMask;
0995             if (data == Speed_1000M)
0996                 np->line_speed = 3; /* 1000M */
0997             else if (data == Speed_100M)
0998                 np->line_speed = 2; /* 100M */
0999             else
1000                 np->line_speed = 1; /* 10M */
1001         }
1002 /* 89/6/13 add, (end) */
1003 /* 89/7/27 add, (begin) */
1004         else if (np->PHYType == Myson981) {
1005             unsigned int data;
1006 
1007             data = mdio_read(dev, np->phys[0], StatusRegister);
1008 
1009             if (data & SPEED100)
1010                 np->line_speed = 2;
1011             else
1012                 np->line_speed = 1;
1013 
1014             if (data & FULLMODE)
1015                 np->duplexmode = 2;
1016             else
1017                 np->duplexmode = 1;
1018         }
1019 /* 89/7/27 add, (end) */
1020 /* 89/12/29 add */
1021         else if (np->PHYType == LevelOnePHY) {
1022             unsigned int data;
1023 
1024             data = mdio_read(dev, np->phys[0], SpecificReg);
1025             if (data & LXT1000_Full)
1026                 np->duplexmode = 2; /* full duplex mode */
1027             else
1028                 np->duplexmode = 1; /* half duplex mode */
1029             data &= SpeedMask;
1030             if (data == LXT1000_1000M)
1031                 np->line_speed = 3; /* 1000M */
1032             else if (data == LXT1000_100M)
1033                 np->line_speed = 2; /* 100M */
1034             else
1035                 np->line_speed = 1; /* 10M */
1036         }
1037         np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000);
1038         if (np->line_speed == 1)
1039             np->crvalue |= CR_W_PS10;
1040         else if (np->line_speed == 3)
1041             np->crvalue |= CR_W_PS1000;
1042         if (np->duplexmode == 2)
1043             np->crvalue |= CR_W_FD;
1044     }
1045 }
1046 
1047 
1048 /* Take lock before calling this */
1049 static void allocate_rx_buffers(struct net_device *dev)
1050 {
1051     struct netdev_private *np = netdev_priv(dev);
1052 
1053     /*  allocate skb for rx buffers */
1054     while (np->really_rx_count != RX_RING_SIZE) {
1055         struct sk_buff *skb;
1056 
1057         skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1058         if (skb == NULL)
1059             break;  /* Better luck next round. */
1060 
1061         while (np->lack_rxbuf->skbuff)
1062             np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
1063 
1064         np->lack_rxbuf->skbuff = skb;
1065         np->lack_rxbuf->buffer = dma_map_single(&np->pci_dev->dev,
1066                             skb->data,
1067                             np->rx_buf_sz,
1068                             DMA_FROM_DEVICE);
1069         np->lack_rxbuf->status = RXOWN;
1070         ++np->really_rx_count;
1071     }
1072 }
1073 
1074 
1075 static void netdev_timer(struct timer_list *t)
1076 {
1077     struct netdev_private *np = from_timer(np, t, timer);
1078     struct net_device *dev = np->mii.dev;
1079     void __iomem *ioaddr = np->mem;
1080     int old_crvalue = np->crvalue;
1081     unsigned int old_linkok = np->linkok;
1082     unsigned long flags;
1083 
1084     if (debug)
1085         printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
1086                "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR),
1087                ioread32(ioaddr + TCRRCR));
1088 
1089     spin_lock_irqsave(&np->lock, flags);
1090 
1091     if (np->flags == HAS_MII_XCVR) {
1092         getlinkstatus(dev);
1093         if ((old_linkok == 0) && (np->linkok == 1)) {   /* we need to detect the media type again */
1094             getlinktype(dev);
1095             if (np->crvalue != old_crvalue) {
1096                 stop_nic_rxtx(ioaddr, np->crvalue);
1097                 iowrite32(np->crvalue, ioaddr + TCRRCR);
1098             }
1099         }
1100     }
1101 
1102     allocate_rx_buffers(dev);
1103 
1104     spin_unlock_irqrestore(&np->lock, flags);
1105 
1106     np->timer.expires = RUN_AT(10 * HZ);
1107     add_timer(&np->timer);
1108 }
1109 
1110 
1111 /* Take lock before calling */
1112 /* Reset chip and disable rx, tx and interrupts */
1113 static void reset_and_disable_rxtx(struct net_device *dev)
1114 {
1115     struct netdev_private *np = netdev_priv(dev);
1116     void __iomem *ioaddr = np->mem;
1117     int delay=51;
1118 
1119     /* Reset the chip's Tx and Rx processes. */
1120     stop_nic_rxtx(ioaddr, 0);
1121 
1122     /* Disable interrupts by clearing the interrupt mask. */
1123     iowrite32(0, ioaddr + IMR);
1124 
1125     /* Reset the chip to erase previous misconfiguration. */
1126     iowrite32(0x00000001, ioaddr + BCR);
1127 
1128     /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw).
1129        We surely wait too long (address+data phase). Who cares? */
1130     while (--delay) {
1131         ioread32(ioaddr + BCR);
1132         rmb();
1133     }
1134 }
1135 
1136 
1137 /* Take lock before calling */
1138 /* Restore chip after reset */
1139 static void enable_rxtx(struct net_device *dev)
1140 {
1141     struct netdev_private *np = netdev_priv(dev);
1142     void __iomem *ioaddr = np->mem;
1143 
1144     reset_rx_descriptors(dev);
1145 
1146     iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring),
1147         ioaddr + TXLBA);
1148     iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
1149         ioaddr + RXLBA);
1150 
1151     iowrite32(np->bcrvalue, ioaddr + BCR);
1152 
1153     iowrite32(0, ioaddr + RXPDR);
1154     __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */
1155 
1156     /* Clear and Enable interrupts by setting the interrupt mask. */
1157     iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
1158     iowrite32(np->imrvalue, ioaddr + IMR);
1159 
1160     iowrite32(0, ioaddr + TXPDR);
1161 }
1162 
1163 
1164 static void reset_timer(struct timer_list *t)
1165 {
1166     struct netdev_private *np = from_timer(np, t, reset_timer);
1167     struct net_device *dev = np->mii.dev;
1168     unsigned long flags;
1169 
1170     printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name);
1171 
1172     spin_lock_irqsave(&np->lock, flags);
1173     np->crvalue = np->crvalue_sv;
1174     np->imrvalue = np->imrvalue_sv;
1175 
1176     reset_and_disable_rxtx(dev);
1177     /* works for me without this:
1178     reset_tx_descriptors(dev); */
1179     enable_rxtx(dev);
1180     netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */
1181 
1182     np->reset_timer_armed = 0;
1183 
1184     spin_unlock_irqrestore(&np->lock, flags);
1185 }
1186 
1187 
1188 static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue)
1189 {
1190     struct netdev_private *np = netdev_priv(dev);
1191     void __iomem *ioaddr = np->mem;
1192     unsigned long flags;
1193     int i;
1194 
1195     printk(KERN_WARNING
1196            "%s: Transmit timed out, status %8.8x, resetting...\n",
1197            dev->name, ioread32(ioaddr + ISR));
1198 
1199     {
1200         printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
1201         for (i = 0; i < RX_RING_SIZE; i++)
1202             printk(KERN_CONT " %8.8x",
1203                    (unsigned int) np->rx_ring[i].status);
1204         printk(KERN_CONT "\n");
1205         printk(KERN_DEBUG "  Tx ring %p: ", np->tx_ring);
1206         for (i = 0; i < TX_RING_SIZE; i++)
1207             printk(KERN_CONT " %4.4x", np->tx_ring[i].status);
1208         printk(KERN_CONT "\n");
1209     }
1210 
1211     spin_lock_irqsave(&np->lock, flags);
1212 
1213     reset_and_disable_rxtx(dev);
1214     reset_tx_descriptors(dev);
1215     enable_rxtx(dev);
1216 
1217     spin_unlock_irqrestore(&np->lock, flags);
1218 
1219     netif_trans_update(dev); /* prevent tx timeout */
1220     dev->stats.tx_errors++;
1221     netif_wake_queue(dev); /* or .._start_.. ?? */
1222 }
1223 
1224 
1225 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1226 static void init_ring(struct net_device *dev)
1227 {
1228     struct netdev_private *np = netdev_priv(dev);
1229     int i;
1230 
1231     /* initialize rx variables */
1232     np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1233     np->cur_rx = &np->rx_ring[0];
1234     np->lack_rxbuf = np->rx_ring;
1235     np->really_rx_count = 0;
1236 
1237     /* initial rx descriptors. */
1238     for (i = 0; i < RX_RING_SIZE; i++) {
1239         np->rx_ring[i].status = 0;
1240         np->rx_ring[i].control = np->rx_buf_sz << RBSShift;
1241         np->rx_ring[i].next_desc = np->rx_ring_dma +
1242             (i + 1)*sizeof(struct fealnx_desc);
1243         np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1];
1244         np->rx_ring[i].skbuff = NULL;
1245     }
1246 
1247     /* for the last rx descriptor */
1248     np->rx_ring[i - 1].next_desc = np->rx_ring_dma;
1249     np->rx_ring[i - 1].next_desc_logical = np->rx_ring;
1250 
1251     /* allocate skb for rx buffers */
1252     for (i = 0; i < RX_RING_SIZE; i++) {
1253         struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1254 
1255         if (skb == NULL) {
1256             np->lack_rxbuf = &np->rx_ring[i];
1257             break;
1258         }
1259 
1260         ++np->really_rx_count;
1261         np->rx_ring[i].skbuff = skb;
1262         np->rx_ring[i].buffer = dma_map_single(&np->pci_dev->dev,
1263                                skb->data,
1264                                np->rx_buf_sz,
1265                                DMA_FROM_DEVICE);
1266         np->rx_ring[i].status = RXOWN;
1267         np->rx_ring[i].control |= RXIC;
1268     }
1269 
1270     /* initialize tx variables */
1271     np->cur_tx = &np->tx_ring[0];
1272     np->cur_tx_copy = &np->tx_ring[0];
1273     np->really_tx_count = 0;
1274     np->free_tx_count = TX_RING_SIZE;
1275 
1276     for (i = 0; i < TX_RING_SIZE; i++) {
1277         np->tx_ring[i].status = 0;
1278         /* do we need np->tx_ring[i].control = XXX; ?? */
1279         np->tx_ring[i].next_desc = np->tx_ring_dma +
1280             (i + 1)*sizeof(struct fealnx_desc);
1281         np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
1282         np->tx_ring[i].skbuff = NULL;
1283     }
1284 
1285     /* for the last tx descriptor */
1286     np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
1287     np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
1288 }
1289 
1290 
1291 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1292 {
1293     struct netdev_private *np = netdev_priv(dev);
1294     unsigned long flags;
1295 
1296     spin_lock_irqsave(&np->lock, flags);
1297 
1298     np->cur_tx_copy->skbuff = skb;
1299 
1300 #define one_buffer
1301 #define BPT 1022
1302 #if defined(one_buffer)
1303     np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, skb->data,
1304                          skb->len, DMA_TO_DEVICE);
1305     np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
1306     np->cur_tx_copy->control |= (skb->len << PKTSShift);    /* pkt size */
1307     np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
1308 // 89/12/29 add,
1309     if (np->pci_dev->device == 0x891)
1310         np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1311     np->cur_tx_copy->status = TXOWN;
1312     np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
1313     --np->free_tx_count;
1314 #elif defined(two_buffer)
1315     if (skb->len > BPT) {
1316         struct fealnx_desc *next;
1317 
1318         /* for the first descriptor */
1319         np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
1320                              skb->data, BPT,
1321                              DMA_TO_DEVICE);
1322         np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
1323         np->cur_tx_copy->control |= (skb->len << PKTSShift);    /* pkt size */
1324         np->cur_tx_copy->control |= (BPT << TBSShift);  /* buffer size */
1325 
1326         /* for the last descriptor */
1327         next = np->cur_tx_copy->next_desc_logical;
1328         next->skbuff = skb;
1329         next->control = TXIC | TXLD | CRCEnable | PADEnable;
1330         next->control |= (skb->len << PKTSShift);   /* pkt size */
1331         next->control |= ((skb->len - BPT) << TBSShift);    /* buf size */
1332 // 89/12/29 add,
1333         if (np->pci_dev->device == 0x891)
1334             np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1335         next->buffer = dma_map_single(&ep->pci_dev->dev,
1336                           skb->data + BPT, skb->len - BPT,
1337                           DMA_TO_DEVICE);
1338 
1339         next->status = TXOWN;
1340         np->cur_tx_copy->status = TXOWN;
1341 
1342         np->cur_tx_copy = next->next_desc_logical;
1343         np->free_tx_count -= 2;
1344     } else {
1345         np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
1346                              skb->data, skb->len,
1347                              DMA_TO_DEVICE);
1348         np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
1349         np->cur_tx_copy->control |= (skb->len << PKTSShift);    /* pkt size */
1350         np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
1351 // 89/12/29 add,
1352         if (np->pci_dev->device == 0x891)
1353             np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1354         np->cur_tx_copy->status = TXOWN;
1355         np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
1356         --np->free_tx_count;
1357     }
1358 #endif
1359 
1360     if (np->free_tx_count < 2)
1361         netif_stop_queue(dev);
1362     ++np->really_tx_count;
1363     iowrite32(0, np->mem + TXPDR);
1364 
1365     spin_unlock_irqrestore(&np->lock, flags);
1366     return NETDEV_TX_OK;
1367 }
1368 
1369 
1370 /* Take lock before calling */
1371 /* Chip probably hosed tx ring. Clean up. */
1372 static void reset_tx_descriptors(struct net_device *dev)
1373 {
1374     struct netdev_private *np = netdev_priv(dev);
1375     struct fealnx_desc *cur;
1376     int i;
1377 
1378     /* initialize tx variables */
1379     np->cur_tx = &np->tx_ring[0];
1380     np->cur_tx_copy = &np->tx_ring[0];
1381     np->really_tx_count = 0;
1382     np->free_tx_count = TX_RING_SIZE;
1383 
1384     for (i = 0; i < TX_RING_SIZE; i++) {
1385         cur = &np->tx_ring[i];
1386         if (cur->skbuff) {
1387             dma_unmap_single(&np->pci_dev->dev, cur->buffer,
1388                      cur->skbuff->len, DMA_TO_DEVICE);
1389             dev_kfree_skb_any(cur->skbuff);
1390             cur->skbuff = NULL;
1391         }
1392         cur->status = 0;
1393         cur->control = 0;   /* needed? */
1394         /* probably not needed. We do it for purely paranoid reasons */
1395         cur->next_desc = np->tx_ring_dma +
1396             (i + 1)*sizeof(struct fealnx_desc);
1397         cur->next_desc_logical = &np->tx_ring[i + 1];
1398     }
1399     /* for the last tx descriptor */
1400     np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma;
1401     np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0];
1402 }
1403 
1404 
1405 /* Take lock and stop rx before calling this */
1406 static void reset_rx_descriptors(struct net_device *dev)
1407 {
1408     struct netdev_private *np = netdev_priv(dev);
1409     struct fealnx_desc *cur = np->cur_rx;
1410     int i;
1411 
1412     allocate_rx_buffers(dev);
1413 
1414     for (i = 0; i < RX_RING_SIZE; i++) {
1415         if (cur->skbuff)
1416             cur->status = RXOWN;
1417         cur = cur->next_desc_logical;
1418     }
1419 
1420     iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
1421         np->mem + RXLBA);
1422 }
1423 
1424 
1425 /* The interrupt handler does all of the Rx thread work and cleans up
1426    after the Tx thread. */
1427 static irqreturn_t intr_handler(int irq, void *dev_instance)
1428 {
1429     struct net_device *dev = (struct net_device *) dev_instance;
1430     struct netdev_private *np = netdev_priv(dev);
1431     void __iomem *ioaddr = np->mem;
1432     long boguscnt = max_interrupt_work;
1433     unsigned int num_tx = 0;
1434     int handled = 0;
1435 
1436     spin_lock(&np->lock);
1437 
1438     iowrite32(0, ioaddr + IMR);
1439 
1440     do {
1441         u32 intr_status = ioread32(ioaddr + ISR);
1442 
1443         /* Acknowledge all of the current interrupt sources ASAP. */
1444         iowrite32(intr_status, ioaddr + ISR);
1445 
1446         if (debug)
1447             printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name,
1448                    intr_status);
1449 
1450         if (!(intr_status & np->imrvalue))
1451             break;
1452 
1453         handled = 1;
1454 
1455 // 90/1/16 delete,
1456 //
1457 //      if (intr_status & FBE)
1458 //      {   /* fatal error */
1459 //          stop_nic_tx(ioaddr, 0);
1460 //          stop_nic_rx(ioaddr, 0);
1461 //          break;
1462 //      };
1463 
1464         if (intr_status & TUNF)
1465             iowrite32(0, ioaddr + TXPDR);
1466 
1467         if (intr_status & CNTOVF) {
1468             /* missed pkts */
1469             dev->stats.rx_missed_errors +=
1470                 ioread32(ioaddr + TALLY) & 0x7fff;
1471 
1472             /* crc error */
1473             dev->stats.rx_crc_errors +=
1474                 (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1475         }
1476 
1477         if (intr_status & (RI | RBU)) {
1478             if (intr_status & RI)
1479                 netdev_rx(dev);
1480             else {
1481                 stop_nic_rx(ioaddr, np->crvalue);
1482                 reset_rx_descriptors(dev);
1483                 iowrite32(np->crvalue, ioaddr + TCRRCR);
1484             }
1485         }
1486 
1487         while (np->really_tx_count) {
1488             long tx_status = np->cur_tx->status;
1489             long tx_control = np->cur_tx->control;
1490 
1491             if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */
1492                 struct fealnx_desc *next;
1493 
1494                 next = np->cur_tx->next_desc_logical;
1495                 tx_status = next->status;
1496                 tx_control = next->control;
1497             }
1498 
1499             if (tx_status & TXOWN)
1500                 break;
1501 
1502             if (!(np->crvalue & CR_W_ENH)) {
1503                 if (tx_status & (CSL | LC | EC | UDF | HF)) {
1504                     dev->stats.tx_errors++;
1505                     if (tx_status & EC)
1506                         dev->stats.tx_aborted_errors++;
1507                     if (tx_status & CSL)
1508                         dev->stats.tx_carrier_errors++;
1509                     if (tx_status & LC)
1510                         dev->stats.tx_window_errors++;
1511                     if (tx_status & UDF)
1512                         dev->stats.tx_fifo_errors++;
1513                     if ((tx_status & HF) && np->mii.full_duplex == 0)
1514                         dev->stats.tx_heartbeat_errors++;
1515 
1516                 } else {
1517                     dev->stats.tx_bytes +=
1518                         ((tx_control & PKTSMask) >> PKTSShift);
1519 
1520                     dev->stats.collisions +=
1521                         ((tx_status & NCRMask) >> NCRShift);
1522                     dev->stats.tx_packets++;
1523                 }
1524             } else {
1525                 dev->stats.tx_bytes +=
1526                     ((tx_control & PKTSMask) >> PKTSShift);
1527                 dev->stats.tx_packets++;
1528             }
1529 
1530             /* Free the original skb. */
1531             dma_unmap_single(&np->pci_dev->dev,
1532                      np->cur_tx->buffer,
1533                      np->cur_tx->skbuff->len,
1534                      DMA_TO_DEVICE);
1535             dev_consume_skb_irq(np->cur_tx->skbuff);
1536             np->cur_tx->skbuff = NULL;
1537             --np->really_tx_count;
1538             if (np->cur_tx->control & TXLD) {
1539                 np->cur_tx = np->cur_tx->next_desc_logical;
1540                 ++np->free_tx_count;
1541             } else {
1542                 np->cur_tx = np->cur_tx->next_desc_logical;
1543                 np->cur_tx = np->cur_tx->next_desc_logical;
1544                 np->free_tx_count += 2;
1545             }
1546             num_tx++;
1547         }       /* end of for loop */
1548 
1549         if (num_tx && np->free_tx_count >= 2)
1550             netif_wake_queue(dev);
1551 
1552         /* read transmit status for enhanced mode only */
1553         if (np->crvalue & CR_W_ENH) {
1554             long data;
1555 
1556             data = ioread32(ioaddr + TSR);
1557             dev->stats.tx_errors += (data & 0xff000000) >> 24;
1558             dev->stats.tx_aborted_errors +=
1559                 (data & 0xff000000) >> 24;
1560             dev->stats.tx_window_errors +=
1561                 (data & 0x00ff0000) >> 16;
1562             dev->stats.collisions += (data & 0x0000ffff);
1563         }
1564 
1565         if (--boguscnt < 0) {
1566             printk(KERN_WARNING "%s: Too much work at interrupt, "
1567                    "status=0x%4.4x.\n", dev->name, intr_status);
1568             if (!np->reset_timer_armed) {
1569                 np->reset_timer_armed = 1;
1570                 np->reset_timer.expires = RUN_AT(HZ/2);
1571                 add_timer(&np->reset_timer);
1572                 stop_nic_rxtx(ioaddr, 0);
1573                 netif_stop_queue(dev);
1574                 /* or netif_tx_disable(dev); ?? */
1575                 /* Prevent other paths from enabling tx,rx,intrs */
1576                 np->crvalue_sv = np->crvalue;
1577                 np->imrvalue_sv = np->imrvalue;
1578                 np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */
1579                 np->imrvalue = 0;
1580             }
1581 
1582             break;
1583         }
1584     } while (1);
1585 
1586     /* read the tally counters */
1587     /* missed pkts */
1588     dev->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
1589 
1590     /* crc error */
1591     dev->stats.rx_crc_errors +=
1592         (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1593 
1594     if (debug)
1595         printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1596                dev->name, ioread32(ioaddr + ISR));
1597 
1598     iowrite32(np->imrvalue, ioaddr + IMR);
1599 
1600     spin_unlock(&np->lock);
1601 
1602     return IRQ_RETVAL(handled);
1603 }
1604 
1605 
1606 /* This routine is logically part of the interrupt handler, but separated
1607    for clarity and better register allocation. */
1608 static int netdev_rx(struct net_device *dev)
1609 {
1610     struct netdev_private *np = netdev_priv(dev);
1611     void __iomem *ioaddr = np->mem;
1612 
1613     /* If EOP is set on the next entry, it's a new packet. Send it up. */
1614     while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) {
1615         s32 rx_status = np->cur_rx->status;
1616 
1617         if (np->really_rx_count == 0)
1618             break;
1619 
1620         if (debug)
1621             printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n", rx_status);
1622 
1623         if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) ||
1624             (rx_status & ErrorSummary)) {
1625             if (rx_status & ErrorSummary) { /* there was a fatal error */
1626                 if (debug)
1627                     printk(KERN_DEBUG
1628                            "%s: Receive error, Rx status %8.8x.\n",
1629                            dev->name, rx_status);
1630 
1631                 dev->stats.rx_errors++; /* end of a packet. */
1632                 if (rx_status & (LONGPKT | RUNTPKT))
1633                     dev->stats.rx_length_errors++;
1634                 if (rx_status & RXER)
1635                     dev->stats.rx_frame_errors++;
1636                 if (rx_status & CRC)
1637                     dev->stats.rx_crc_errors++;
1638             } else {
1639                 int need_to_reset = 0;
1640                 int desno = 0;
1641 
1642                 if (rx_status & RXFSD) {    /* this pkt is too long, over one rx buffer */
1643                     struct fealnx_desc *cur;
1644 
1645                     /* check this packet is received completely? */
1646                     cur = np->cur_rx;
1647                     while (desno <= np->really_rx_count) {
1648                         ++desno;
1649                         if ((!(cur->status & RXOWN)) &&
1650                             (cur->status & RXLSD))
1651                             break;
1652                         /* goto next rx descriptor */
1653                         cur = cur->next_desc_logical;
1654                     }
1655                     if (desno > np->really_rx_count)
1656                         need_to_reset = 1;
1657                 } else  /* RXLSD did not find, something error */
1658                     need_to_reset = 1;
1659 
1660                 if (need_to_reset == 0) {
1661                     int i;
1662 
1663                     dev->stats.rx_length_errors++;
1664 
1665                     /* free all rx descriptors related this long pkt */
1666                     for (i = 0; i < desno; ++i) {
1667                         if (!np->cur_rx->skbuff) {
1668                             printk(KERN_DEBUG
1669                                 "%s: I'm scared\n", dev->name);
1670                             break;
1671                         }
1672                         np->cur_rx->status = RXOWN;
1673                         np->cur_rx = np->cur_rx->next_desc_logical;
1674                     }
1675                     continue;
1676                 } else {        /* rx error, need to reset this chip */
1677                     stop_nic_rx(ioaddr, np->crvalue);
1678                     reset_rx_descriptors(dev);
1679                     iowrite32(np->crvalue, ioaddr + TCRRCR);
1680                 }
1681                 break;  /* exit the while loop */
1682             }
1683         } else {    /* this received pkt is ok */
1684 
1685             struct sk_buff *skb;
1686             /* Omit the four octet CRC from the length. */
1687             short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4;
1688 
1689 #ifndef final_version
1690             if (debug)
1691                 printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1692                        " status %x.\n", pkt_len, rx_status);
1693 #endif
1694 
1695             /* Check if the packet is long enough to accept without copying
1696                to a minimally-sized skbuff. */
1697             if (pkt_len < rx_copybreak &&
1698                 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1699                 skb_reserve(skb, 2);    /* 16 byte align the IP header */
1700                 dma_sync_single_for_cpu(&np->pci_dev->dev,
1701                             np->cur_rx->buffer,
1702                             np->rx_buf_sz,
1703                             DMA_FROM_DEVICE);
1704                 /* Call copy + cksum if available. */
1705 
1706 #if ! defined(__alpha__)
1707                 skb_copy_to_linear_data(skb,
1708                     np->cur_rx->skbuff->data, pkt_len);
1709                 skb_put(skb, pkt_len);
1710 #else
1711                 skb_put_data(skb, np->cur_rx->skbuff->data,
1712                          pkt_len);
1713 #endif
1714                 dma_sync_single_for_device(&np->pci_dev->dev,
1715                                np->cur_rx->buffer,
1716                                np->rx_buf_sz,
1717                                DMA_FROM_DEVICE);
1718             } else {
1719                 dma_unmap_single(&np->pci_dev->dev,
1720                          np->cur_rx->buffer,
1721                          np->rx_buf_sz,
1722                          DMA_FROM_DEVICE);
1723                 skb_put(skb = np->cur_rx->skbuff, pkt_len);
1724                 np->cur_rx->skbuff = NULL;
1725                 --np->really_rx_count;
1726             }
1727             skb->protocol = eth_type_trans(skb, dev);
1728             netif_rx(skb);
1729             dev->stats.rx_packets++;
1730             dev->stats.rx_bytes += pkt_len;
1731         }
1732 
1733         np->cur_rx = np->cur_rx->next_desc_logical;
1734     }           /* end of while loop */
1735 
1736     /*  allocate skb for rx buffers */
1737     allocate_rx_buffers(dev);
1738 
1739     return 0;
1740 }
1741 
1742 
1743 static struct net_device_stats *get_stats(struct net_device *dev)
1744 {
1745     struct netdev_private *np = netdev_priv(dev);
1746     void __iomem *ioaddr = np->mem;
1747 
1748     /* The chip only need report frame silently dropped. */
1749     if (netif_running(dev)) {
1750         dev->stats.rx_missed_errors +=
1751             ioread32(ioaddr + TALLY) & 0x7fff;
1752         dev->stats.rx_crc_errors +=
1753             (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1754     }
1755 
1756     return &dev->stats;
1757 }
1758 
1759 
1760 /* for dev->set_multicast_list */
1761 static void set_rx_mode(struct net_device *dev)
1762 {
1763     spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock;
1764     unsigned long flags;
1765     spin_lock_irqsave(lp, flags);
1766     __set_rx_mode(dev);
1767     spin_unlock_irqrestore(lp, flags);
1768 }
1769 
1770 
1771 /* Take lock before calling */
1772 static void __set_rx_mode(struct net_device *dev)
1773 {
1774     struct netdev_private *np = netdev_priv(dev);
1775     void __iomem *ioaddr = np->mem;
1776     u32 mc_filter[2];   /* Multicast hash filter */
1777     u32 rx_mode;
1778 
1779     if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1780         memset(mc_filter, 0xff, sizeof(mc_filter));
1781         rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
1782     } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1783            (dev->flags & IFF_ALLMULTI)) {
1784         /* Too many to match, or accept all multicasts. */
1785         memset(mc_filter, 0xff, sizeof(mc_filter));
1786         rx_mode = CR_W_AB | CR_W_AM;
1787     } else {
1788         struct netdev_hw_addr *ha;
1789 
1790         memset(mc_filter, 0, sizeof(mc_filter));
1791         netdev_for_each_mc_addr(ha, dev) {
1792             unsigned int bit;
1793             bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1794             mc_filter[bit >> 5] |= (1 << bit);
1795         }
1796         rx_mode = CR_W_AB | CR_W_AM;
1797     }
1798 
1799     stop_nic_rxtx(ioaddr, np->crvalue);
1800 
1801     iowrite32(mc_filter[0], ioaddr + MAR0);
1802     iowrite32(mc_filter[1], ioaddr + MAR1);
1803     np->crvalue &= ~CR_W_RXMODEMASK;
1804     np->crvalue |= rx_mode;
1805     iowrite32(np->crvalue, ioaddr + TCRRCR);
1806 }
1807 
1808 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1809 {
1810     struct netdev_private *np = netdev_priv(dev);
1811 
1812     strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1813     strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1814 }
1815 
1816 static int netdev_get_link_ksettings(struct net_device *dev,
1817                      struct ethtool_link_ksettings *cmd)
1818 {
1819     struct netdev_private *np = netdev_priv(dev);
1820 
1821     spin_lock_irq(&np->lock);
1822     mii_ethtool_get_link_ksettings(&np->mii, cmd);
1823     spin_unlock_irq(&np->lock);
1824 
1825     return 0;
1826 }
1827 
1828 static int netdev_set_link_ksettings(struct net_device *dev,
1829                      const struct ethtool_link_ksettings *cmd)
1830 {
1831     struct netdev_private *np = netdev_priv(dev);
1832     int rc;
1833 
1834     spin_lock_irq(&np->lock);
1835     rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
1836     spin_unlock_irq(&np->lock);
1837 
1838     return rc;
1839 }
1840 
1841 static int netdev_nway_reset(struct net_device *dev)
1842 {
1843     struct netdev_private *np = netdev_priv(dev);
1844     return mii_nway_restart(&np->mii);
1845 }
1846 
1847 static u32 netdev_get_link(struct net_device *dev)
1848 {
1849     struct netdev_private *np = netdev_priv(dev);
1850     return mii_link_ok(&np->mii);
1851 }
1852 
1853 static u32 netdev_get_msglevel(struct net_device *dev)
1854 {
1855     return debug;
1856 }
1857 
1858 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1859 {
1860     debug = value;
1861 }
1862 
1863 static const struct ethtool_ops netdev_ethtool_ops = {
1864     .get_drvinfo        = netdev_get_drvinfo,
1865     .nway_reset     = netdev_nway_reset,
1866     .get_link       = netdev_get_link,
1867     .get_msglevel       = netdev_get_msglevel,
1868     .set_msglevel       = netdev_set_msglevel,
1869     .get_link_ksettings = netdev_get_link_ksettings,
1870     .set_link_ksettings = netdev_set_link_ksettings,
1871 };
1872 
1873 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1874 {
1875     struct netdev_private *np = netdev_priv(dev);
1876     int rc;
1877 
1878     if (!netif_running(dev))
1879         return -EINVAL;
1880 
1881     spin_lock_irq(&np->lock);
1882     rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
1883     spin_unlock_irq(&np->lock);
1884 
1885     return rc;
1886 }
1887 
1888 
1889 static int netdev_close(struct net_device *dev)
1890 {
1891     struct netdev_private *np = netdev_priv(dev);
1892     void __iomem *ioaddr = np->mem;
1893     int i;
1894 
1895     netif_stop_queue(dev);
1896 
1897     /* Disable interrupts by clearing the interrupt mask. */
1898     iowrite32(0x0000, ioaddr + IMR);
1899 
1900     /* Stop the chip's Tx and Rx processes. */
1901     stop_nic_rxtx(ioaddr, 0);
1902 
1903     del_timer_sync(&np->timer);
1904     del_timer_sync(&np->reset_timer);
1905 
1906     free_irq(np->pci_dev->irq, dev);
1907 
1908     /* Free all the skbuffs in the Rx queue. */
1909     for (i = 0; i < RX_RING_SIZE; i++) {
1910         struct sk_buff *skb = np->rx_ring[i].skbuff;
1911 
1912         np->rx_ring[i].status = 0;
1913         if (skb) {
1914             dma_unmap_single(&np->pci_dev->dev,
1915                      np->rx_ring[i].buffer, np->rx_buf_sz,
1916                      DMA_FROM_DEVICE);
1917             dev_kfree_skb(skb);
1918             np->rx_ring[i].skbuff = NULL;
1919         }
1920     }
1921 
1922     for (i = 0; i < TX_RING_SIZE; i++) {
1923         struct sk_buff *skb = np->tx_ring[i].skbuff;
1924 
1925         if (skb) {
1926             dma_unmap_single(&np->pci_dev->dev,
1927                      np->tx_ring[i].buffer, skb->len,
1928                      DMA_TO_DEVICE);
1929             dev_kfree_skb(skb);
1930             np->tx_ring[i].skbuff = NULL;
1931         }
1932     }
1933 
1934     return 0;
1935 }
1936 
1937 static const struct pci_device_id fealnx_pci_tbl[] = {
1938     {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1939     {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
1940     {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
1941     {} /* terminate list */
1942 };
1943 MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl);
1944 
1945 
1946 static struct pci_driver fealnx_driver = {
1947     .name       = "fealnx",
1948     .id_table   = fealnx_pci_tbl,
1949     .probe      = fealnx_init_one,
1950     .remove     = fealnx_remove_one,
1951 };
1952 
1953 module_pci_driver(fealnx_driver);