Back to home page

OSCL-LXR

 
 

    


0001 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
0002 /*
0003     Written 1999-2000 by Donald Becker.
0004 
0005     This software may be used and distributed according to the terms of
0006     the GNU General Public License (GPL), incorporated herein by reference.
0007     Drivers based on or derived from this code fall under the GPL and must
0008     retain the authorship, copyright and license notice.  This file is not
0009     a complete program and may only be used when the entire operating
0010     system is licensed under the GPL.
0011 
0012     The author may be reached as becker@scyld.com, or C/O
0013     Scyld Computing Corporation
0014     410 Severn Ave., Suite 210
0015     Annapolis MD 21403
0016 
0017     Support and updates available at
0018     http://www.scyld.com/network/sundance.html
0019     [link no longer provides useful info -jgarzik]
0020     Archives of the mailing list are still available at
0021     https://www.beowulf.org/pipermail/netdrivers/
0022 
0023 */
0024 
0025 #define DRV_NAME    "sundance"
0026 
0027 /* The user-configurable values.
0028    These may be modified when a driver module is loaded.*/
0029 static int debug = 1;           /* 1 normal messages, 0 quiet .. 7 verbose. */
0030 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
0031    Typical is a 64 element hash table based on the Ethernet CRC.  */
0032 static const int multicast_filter_limit = 32;
0033 
0034 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
0035    Setting to > 1518 effectively disables this feature.
0036    This chip can receive into offset buffers, so the Alpha does not
0037    need a copy-align. */
0038 static int rx_copybreak;
0039 static int flowctrl=1;
0040 
0041 /* media[] specifies the media type the NIC operates at.
0042          autosense  Autosensing active media.
0043          10mbps_hd  10Mbps half duplex.
0044          10mbps_fd  10Mbps full duplex.
0045          100mbps_hd     100Mbps half duplex.
0046          100mbps_fd     100Mbps full duplex.
0047          0      Autosensing active media.
0048          1      10Mbps half duplex.
0049          2      10Mbps full duplex.
0050          3      100Mbps half duplex.
0051          4      100Mbps full duplex.
0052 */
0053 #define MAX_UNITS 8
0054 static char *media[MAX_UNITS];
0055 
0056 
0057 /* Operational parameters that are set at compile time. */
0058 
0059 /* Keep the ring sizes a power of two for compile efficiency.
0060    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
0061    Making the Tx ring too large decreases the effectiveness of channel
0062    bonding and packet priority, and more than 128 requires modifying the
0063    Tx error recovery.
0064    Large receive rings merely waste memory. */
0065 #define TX_RING_SIZE    32
0066 #define TX_QUEUE_LEN    (TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
0067 #define RX_RING_SIZE    64
0068 #define RX_BUDGET   32
0069 #define TX_TOTAL_SIZE   TX_RING_SIZE*sizeof(struct netdev_desc)
0070 #define RX_TOTAL_SIZE   RX_RING_SIZE*sizeof(struct netdev_desc)
0071 
0072 /* Operational parameters that usually are not changed. */
0073 /* Time in jiffies before concluding the transmitter is hung. */
0074 #define TX_TIMEOUT  (4*HZ)
0075 #define PKT_BUF_SZ      1536    /* Size of each temporary Rx buffer.*/
0076 
0077 /* Include files, designed to support most kernel versions 2.0.0 and later. */
0078 #include <linux/module.h>
0079 #include <linux/kernel.h>
0080 #include <linux/string.h>
0081 #include <linux/timer.h>
0082 #include <linux/errno.h>
0083 #include <linux/ioport.h>
0084 #include <linux/interrupt.h>
0085 #include <linux/pci.h>
0086 #include <linux/netdevice.h>
0087 #include <linux/etherdevice.h>
0088 #include <linux/skbuff.h>
0089 #include <linux/init.h>
0090 #include <linux/bitops.h>
0091 #include <linux/uaccess.h>
0092 #include <asm/processor.h>      /* Processor type for cache alignment. */
0093 #include <asm/io.h>
0094 #include <linux/delay.h>
0095 #include <linux/spinlock.h>
0096 #include <linux/dma-mapping.h>
0097 #include <linux/crc32.h>
0098 #include <linux/ethtool.h>
0099 #include <linux/mii.h>
0100 
0101 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
0102 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
0103 MODULE_LICENSE("GPL");
0104 
0105 module_param(debug, int, 0);
0106 module_param(rx_copybreak, int, 0);
0107 module_param_array(media, charp, NULL, 0);
0108 module_param(flowctrl, int, 0);
0109 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
0110 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
0111 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
0112 
0113 /*
0114                 Theory of Operation
0115 
0116 I. Board Compatibility
0117 
0118 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
0119 
0120 II. Board-specific settings
0121 
0122 III. Driver operation
0123 
0124 IIIa. Ring buffers
0125 
0126 This driver uses two statically allocated fixed-size descriptor lists
0127 formed into rings by a branch from the final descriptor to the beginning of
0128 the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
0129 Some chips explicitly use only 2^N sized rings, while others use a
0130 'next descriptor' pointer that the driver forms into rings.
0131 
0132 IIIb/c. Transmit/Receive Structure
0133 
0134 This driver uses a zero-copy receive and transmit scheme.
0135 The driver allocates full frame size skbuffs for the Rx ring buffers at
0136 open() time and passes the skb->data field to the chip as receive data
0137 buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
0138 a fresh skbuff is allocated and the frame is copied to the new skbuff.
0139 When the incoming frame is larger, the skbuff is passed directly up the
0140 protocol stack.  Buffers consumed this way are replaced by newly allocated
0141 skbuffs in a later phase of receives.
0142 
0143 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
0144 using a full-sized skbuff for small frames vs. the copying costs of larger
0145 frames.  New boards are typically used in generously configured machines
0146 and the underfilled buffers have negligible impact compared to the benefit of
0147 a single allocation size, so the default value of zero results in never
0148 copying packets.  When copying is done, the cost is usually mitigated by using
0149 a combined copy/checksum routine.  Copying also preloads the cache, which is
0150 most useful with small frames.
0151 
0152 A subtle aspect of the operation is that the IP header at offset 14 in an
0153 ethernet frame isn't longword aligned for further processing.
0154 Unaligned buffers are permitted by the Sundance hardware, so
0155 frames are received into the skbuff at an offset of "+2", 16-byte aligning
0156 the IP header.
0157 
0158 IIId. Synchronization
0159 
0160 The driver runs as two independent, single-threaded flows of control.  One
0161 is the send-packet routine, which enforces single-threaded use by the
0162 dev->tbusy flag.  The other thread is the interrupt handler, which is single
0163 threaded by the hardware and interrupt handling software.
0164 
0165 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
0166 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
0167 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
0168 the 'lp->tx_full' flag.
0169 
0170 The interrupt handler has exclusive control over the Rx ring and records stats
0171 from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
0172 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
0173 clears both the tx_full and tbusy flags.
0174 
0175 IV. Notes
0176 
0177 IVb. References
0178 
0179 The Sundance ST201 datasheet, preliminary version.
0180 The Kendin KS8723 datasheet, preliminary version.
0181 The ICplus IP100 datasheet, preliminary version.
0182 http://www.scyld.com/expert/100mbps.html
0183 http://www.scyld.com/expert/NWay.html
0184 
0185 IVc. Errata
0186 
0187 */
0188 
0189 /* Work-around for Kendin chip bugs. */
0190 #ifndef CONFIG_SUNDANCE_MMIO
0191 #define USE_IO_OPS 1
0192 #endif
0193 
0194 static const struct pci_device_id sundance_pci_tbl[] = {
0195     { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
0196     { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
0197     { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
0198     { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
0199     { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
0200     { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
0201     { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
0202     { }
0203 };
0204 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
0205 
0206 enum {
0207     netdev_io_size = 128
0208 };
0209 
0210 struct pci_id_info {
0211         const char *name;
0212 };
0213 static const struct pci_id_info pci_id_tbl[] = {
0214     {"D-Link DFE-550TX FAST Ethernet Adapter"},
0215     {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
0216     {"D-Link DFE-580TX 4 port Server Adapter"},
0217     {"D-Link DFE-530TXS FAST Ethernet Adapter"},
0218     {"D-Link DL10050-based FAST Ethernet Adapter"},
0219     {"Sundance Technology Alta"},
0220     {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
0221     { } /* terminate list. */
0222 };
0223 
0224 /* This driver was written to use PCI memory space, however x86-oriented
0225    hardware often uses I/O space accesses. */
0226 
0227 /* Offsets to the device registers.
0228    Unlike software-only systems, device drivers interact with complex hardware.
0229    It's not useful to define symbolic names for every register bit in the
0230    device.  The name can only partially document the semantics and make
0231    the driver longer and more difficult to read.
0232    In general, only the important configuration values or bits changed
0233    multiple times should be defined symbolically.
0234 */
0235 enum alta_offsets {
0236     DMACtrl = 0x00,
0237     TxListPtr = 0x04,
0238     TxDMABurstThresh = 0x08,
0239     TxDMAUrgentThresh = 0x09,
0240     TxDMAPollPeriod = 0x0a,
0241     RxDMAStatus = 0x0c,
0242     RxListPtr = 0x10,
0243     DebugCtrl0 = 0x1a,
0244     DebugCtrl1 = 0x1c,
0245     RxDMABurstThresh = 0x14,
0246     RxDMAUrgentThresh = 0x15,
0247     RxDMAPollPeriod = 0x16,
0248     LEDCtrl = 0x1a,
0249     ASICCtrl = 0x30,
0250     EEData = 0x34,
0251     EECtrl = 0x36,
0252     FlashAddr = 0x40,
0253     FlashData = 0x44,
0254     WakeEvent = 0x45,
0255     TxStatus = 0x46,
0256     TxFrameId = 0x47,
0257     DownCounter = 0x18,
0258     IntrClear = 0x4a,
0259     IntrEnable = 0x4c,
0260     IntrStatus = 0x4e,
0261     MACCtrl0 = 0x50,
0262     MACCtrl1 = 0x52,
0263     StationAddr = 0x54,
0264     MaxFrameSize = 0x5A,
0265     RxMode = 0x5c,
0266     MIICtrl = 0x5e,
0267     MulticastFilter0 = 0x60,
0268     MulticastFilter1 = 0x64,
0269     RxOctetsLow = 0x68,
0270     RxOctetsHigh = 0x6a,
0271     TxOctetsLow = 0x6c,
0272     TxOctetsHigh = 0x6e,
0273     TxFramesOK = 0x70,
0274     RxFramesOK = 0x72,
0275     StatsCarrierError = 0x74,
0276     StatsLateColl = 0x75,
0277     StatsMultiColl = 0x76,
0278     StatsOneColl = 0x77,
0279     StatsTxDefer = 0x78,
0280     RxMissed = 0x79,
0281     StatsTxXSDefer = 0x7a,
0282     StatsTxAbort = 0x7b,
0283     StatsBcastTx = 0x7c,
0284     StatsBcastRx = 0x7d,
0285     StatsMcastTx = 0x7e,
0286     StatsMcastRx = 0x7f,
0287     /* Aliased and bogus values! */
0288     RxStatus = 0x0c,
0289 };
0290 
0291 #define ASIC_HI_WORD(x) ((x) + 2)
0292 
0293 enum ASICCtrl_HiWord_bit {
0294     GlobalReset = 0x0001,
0295     RxReset = 0x0002,
0296     TxReset = 0x0004,
0297     DMAReset = 0x0008,
0298     FIFOReset = 0x0010,
0299     NetworkReset = 0x0020,
0300     HostReset = 0x0040,
0301     ResetBusy = 0x0400,
0302 };
0303 
0304 /* Bits in the interrupt status/mask registers. */
0305 enum intr_status_bits {
0306     IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
0307     IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
0308     IntrDrvRqst=0x0040,
0309     StatsMax=0x0080, LinkChange=0x0100,
0310     IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
0311 };
0312 
0313 /* Bits in the RxMode register. */
0314 enum rx_mode_bits {
0315     AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
0316     AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
0317 };
0318 /* Bits in MACCtrl. */
0319 enum mac_ctrl0_bits {
0320     EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
0321     EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
0322 };
0323 enum mac_ctrl1_bits {
0324     StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
0325     TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
0326     RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
0327 };
0328 
0329 /* Bits in WakeEvent register. */
0330 enum wake_event_bits {
0331     WakePktEnable = 0x01,
0332     MagicPktEnable = 0x02,
0333     LinkEventEnable = 0x04,
0334     WolEnable = 0x80,
0335 };
0336 
0337 /* The Rx and Tx buffer descriptors. */
0338 /* Note that using only 32 bit fields simplifies conversion to big-endian
0339    architectures. */
0340 struct netdev_desc {
0341     __le32 next_desc;
0342     __le32 status;
0343     struct desc_frag { __le32 addr, length; } frag;
0344 };
0345 
0346 /* Bits in netdev_desc.status */
0347 enum desc_status_bits {
0348     DescOwn=0x8000,
0349     DescEndPacket=0x4000,
0350     DescEndRing=0x2000,
0351     LastFrag=0x80000000,
0352     DescIntrOnTx=0x8000,
0353     DescIntrOnDMADone=0x80000000,
0354     DisableAlign = 0x00000001,
0355 };
0356 
0357 #define PRIV_ALIGN  15  /* Required alignment mask */
0358 /* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
0359    within the structure. */
0360 #define MII_CNT     4
0361 struct netdev_private {
0362     /* Descriptor rings first for alignment. */
0363     struct netdev_desc *rx_ring;
0364     struct netdev_desc *tx_ring;
0365     struct sk_buff* rx_skbuff[RX_RING_SIZE];
0366     struct sk_buff* tx_skbuff[TX_RING_SIZE];
0367         dma_addr_t tx_ring_dma;
0368         dma_addr_t rx_ring_dma;
0369     struct timer_list timer;        /* Media monitoring timer. */
0370     struct net_device *ndev;        /* backpointer */
0371     /* ethtool extra stats */
0372     struct {
0373         u64 tx_multiple_collisions;
0374         u64 tx_single_collisions;
0375         u64 tx_late_collisions;
0376         u64 tx_deferred;
0377         u64 tx_deferred_excessive;
0378         u64 tx_aborted;
0379         u64 tx_bcasts;
0380         u64 rx_bcasts;
0381         u64 tx_mcasts;
0382         u64 rx_mcasts;
0383     } xstats;
0384     /* Frequently used values: keep some adjacent for cache effect. */
0385     spinlock_t lock;
0386     int msg_enable;
0387     int chip_id;
0388     unsigned int cur_rx, dirty_rx;      /* Producer/consumer ring indices */
0389     unsigned int rx_buf_sz;         /* Based on MTU+slack. */
0390     struct netdev_desc *last_tx;        /* Last Tx descriptor used. */
0391     unsigned int cur_tx, dirty_tx;
0392     /* These values are keep track of the transceiver/media in use. */
0393     unsigned int flowctrl:1;
0394     unsigned int default_port:4;        /* Last dev->if_port value. */
0395     unsigned int an_enable:1;
0396     unsigned int speed;
0397     unsigned int wol_enabled:1;         /* Wake on LAN enabled */
0398     struct tasklet_struct rx_tasklet;
0399     struct tasklet_struct tx_tasklet;
0400     int budget;
0401     int cur_task;
0402     /* Multicast and receive mode. */
0403     spinlock_t mcastlock;           /* SMP lock multicast updates. */
0404     u16 mcast_filter[4];
0405     /* MII transceiver section. */
0406     struct mii_if_info mii_if;
0407     int mii_preamble_required;
0408     unsigned char phys[MII_CNT];        /* MII device addresses, only first one used. */
0409     struct pci_dev *pci_dev;
0410     void __iomem *base;
0411     spinlock_t statlock;
0412 };
0413 
0414 /* The station address location in the EEPROM. */
0415 #define EEPROM_SA_OFFSET    0x10
0416 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
0417             IntrDrvRqst | IntrTxDone | StatsMax | \
0418             LinkChange)
0419 
0420 static int  change_mtu(struct net_device *dev, int new_mtu);
0421 static int  eeprom_read(void __iomem *ioaddr, int location);
0422 static int  mdio_read(struct net_device *dev, int phy_id, int location);
0423 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
0424 static int  mdio_wait_link(struct net_device *dev, int wait);
0425 static int  netdev_open(struct net_device *dev);
0426 static void check_duplex(struct net_device *dev);
0427 static void netdev_timer(struct timer_list *t);
0428 static void tx_timeout(struct net_device *dev, unsigned int txqueue);
0429 static void init_ring(struct net_device *dev);
0430 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
0431 static int reset_tx (struct net_device *dev);
0432 static irqreturn_t intr_handler(int irq, void *dev_instance);
0433 static void rx_poll(struct tasklet_struct *t);
0434 static void tx_poll(struct tasklet_struct *t);
0435 static void refill_rx (struct net_device *dev);
0436 static void netdev_error(struct net_device *dev, int intr_status);
0437 static void netdev_error(struct net_device *dev, int intr_status);
0438 static void set_rx_mode(struct net_device *dev);
0439 static int __set_mac_addr(struct net_device *dev);
0440 static int sundance_set_mac_addr(struct net_device *dev, void *data);
0441 static struct net_device_stats *get_stats(struct net_device *dev);
0442 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
0443 static int  netdev_close(struct net_device *dev);
0444 static const struct ethtool_ops ethtool_ops;
0445 
0446 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
0447 {
0448     struct netdev_private *np = netdev_priv(dev);
0449     void __iomem *ioaddr = np->base + ASICCtrl;
0450     int countdown;
0451 
0452     /* ST201 documentation states ASICCtrl is a 32bit register */
0453     iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
0454     /* ST201 documentation states reset can take up to 1 ms */
0455     countdown = 10 + 1;
0456     while (ioread32 (ioaddr) & (ResetBusy << 16)) {
0457         if (--countdown == 0) {
0458             printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
0459             break;
0460         }
0461         udelay(100);
0462     }
0463 }
0464 
0465 #ifdef CONFIG_NET_POLL_CONTROLLER
0466 static void sundance_poll_controller(struct net_device *dev)
0467 {
0468     struct netdev_private *np = netdev_priv(dev);
0469 
0470     disable_irq(np->pci_dev->irq);
0471     intr_handler(np->pci_dev->irq, dev);
0472     enable_irq(np->pci_dev->irq);
0473 }
0474 #endif
0475 
0476 static const struct net_device_ops netdev_ops = {
0477     .ndo_open       = netdev_open,
0478     .ndo_stop       = netdev_close,
0479     .ndo_start_xmit     = start_tx,
0480     .ndo_get_stats      = get_stats,
0481     .ndo_set_rx_mode    = set_rx_mode,
0482     .ndo_eth_ioctl      = netdev_ioctl,
0483     .ndo_tx_timeout     = tx_timeout,
0484     .ndo_change_mtu     = change_mtu,
0485     .ndo_set_mac_address    = sundance_set_mac_addr,
0486     .ndo_validate_addr  = eth_validate_addr,
0487 #ifdef CONFIG_NET_POLL_CONTROLLER
0488     .ndo_poll_controller    = sundance_poll_controller,
0489 #endif
0490 };
0491 
0492 static int sundance_probe1(struct pci_dev *pdev,
0493                const struct pci_device_id *ent)
0494 {
0495     struct net_device *dev;
0496     struct netdev_private *np;
0497     static int card_idx;
0498     int chip_idx = ent->driver_data;
0499     int irq;
0500     int i;
0501     void __iomem *ioaddr;
0502     u16 mii_ctl;
0503     void *ring_space;
0504     dma_addr_t ring_dma;
0505 #ifdef USE_IO_OPS
0506     int bar = 0;
0507 #else
0508     int bar = 1;
0509 #endif
0510     int phy, phy_end, phy_idx = 0;
0511     __le16 addr[ETH_ALEN / 2];
0512 
0513     if (pci_enable_device(pdev))
0514         return -EIO;
0515     pci_set_master(pdev);
0516 
0517     irq = pdev->irq;
0518 
0519     dev = alloc_etherdev(sizeof(*np));
0520     if (!dev)
0521         return -ENOMEM;
0522     SET_NETDEV_DEV(dev, &pdev->dev);
0523 
0524     if (pci_request_regions(pdev, DRV_NAME))
0525         goto err_out_netdev;
0526 
0527     ioaddr = pci_iomap(pdev, bar, netdev_io_size);
0528     if (!ioaddr)
0529         goto err_out_res;
0530 
0531     for (i = 0; i < 3; i++)
0532         addr[i] =
0533             cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
0534     eth_hw_addr_set(dev, (u8 *)addr);
0535 
0536     np = netdev_priv(dev);
0537     np->ndev = dev;
0538     np->base = ioaddr;
0539     np->pci_dev = pdev;
0540     np->chip_id = chip_idx;
0541     np->msg_enable = (1 << debug) - 1;
0542     spin_lock_init(&np->lock);
0543     spin_lock_init(&np->statlock);
0544     tasklet_setup(&np->rx_tasklet, rx_poll);
0545     tasklet_setup(&np->tx_tasklet, tx_poll);
0546 
0547     ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
0548             &ring_dma, GFP_KERNEL);
0549     if (!ring_space)
0550         goto err_out_cleardev;
0551     np->tx_ring = (struct netdev_desc *)ring_space;
0552     np->tx_ring_dma = ring_dma;
0553 
0554     ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
0555             &ring_dma, GFP_KERNEL);
0556     if (!ring_space)
0557         goto err_out_unmap_tx;
0558     np->rx_ring = (struct netdev_desc *)ring_space;
0559     np->rx_ring_dma = ring_dma;
0560 
0561     np->mii_if.dev = dev;
0562     np->mii_if.mdio_read = mdio_read;
0563     np->mii_if.mdio_write = mdio_write;
0564     np->mii_if.phy_id_mask = 0x1f;
0565     np->mii_if.reg_num_mask = 0x1f;
0566 
0567     /* The chip-specific entries in the device structure. */
0568     dev->netdev_ops = &netdev_ops;
0569     dev->ethtool_ops = &ethtool_ops;
0570     dev->watchdog_timeo = TX_TIMEOUT;
0571 
0572     /* MTU range: 68 - 8191 */
0573     dev->min_mtu = ETH_MIN_MTU;
0574     dev->max_mtu = 8191;
0575 
0576     pci_set_drvdata(pdev, dev);
0577 
0578     i = register_netdev(dev);
0579     if (i)
0580         goto err_out_unmap_rx;
0581 
0582     printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
0583            dev->name, pci_id_tbl[chip_idx].name, ioaddr,
0584            dev->dev_addr, irq);
0585 
0586     np->phys[0] = 1;        /* Default setting */
0587     np->mii_preamble_required++;
0588 
0589     /*
0590      * It seems some phys doesn't deal well with address 0 being accessed
0591      * first
0592      */
0593     if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
0594         phy = 0;
0595         phy_end = 31;
0596     } else {
0597         phy = 1;
0598         phy_end = 32;   /* wraps to zero, due to 'phy & 0x1f' */
0599     }
0600     for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
0601         int phyx = phy & 0x1f;
0602         int mii_status = mdio_read(dev, phyx, MII_BMSR);
0603         if (mii_status != 0xffff  &&  mii_status != 0x0000) {
0604             np->phys[phy_idx++] = phyx;
0605             np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
0606             if ((mii_status & 0x0040) == 0)
0607                 np->mii_preamble_required++;
0608             printk(KERN_INFO "%s: MII PHY found at address %d, status "
0609                    "0x%4.4x advertising %4.4x.\n",
0610                    dev->name, phyx, mii_status, np->mii_if.advertising);
0611         }
0612     }
0613     np->mii_preamble_required--;
0614 
0615     if (phy_idx == 0) {
0616         printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
0617                dev->name, ioread32(ioaddr + ASICCtrl));
0618         goto err_out_unregister;
0619     }
0620 
0621     np->mii_if.phy_id = np->phys[0];
0622 
0623     /* Parse override configuration */
0624     np->an_enable = 1;
0625     if (card_idx < MAX_UNITS) {
0626         if (media[card_idx] != NULL) {
0627             np->an_enable = 0;
0628             if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
0629                 strcmp (media[card_idx], "4") == 0) {
0630                 np->speed = 100;
0631                 np->mii_if.full_duplex = 1;
0632             } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
0633                    strcmp (media[card_idx], "3") == 0) {
0634                 np->speed = 100;
0635                 np->mii_if.full_duplex = 0;
0636             } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
0637                    strcmp (media[card_idx], "2") == 0) {
0638                 np->speed = 10;
0639                 np->mii_if.full_duplex = 1;
0640             } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
0641                    strcmp (media[card_idx], "1") == 0) {
0642                 np->speed = 10;
0643                 np->mii_if.full_duplex = 0;
0644             } else {
0645                 np->an_enable = 1;
0646             }
0647         }
0648         if (flowctrl == 1)
0649             np->flowctrl = 1;
0650     }
0651 
0652     /* Fibre PHY? */
0653     if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
0654         /* Default 100Mbps Full */
0655         if (np->an_enable) {
0656             np->speed = 100;
0657             np->mii_if.full_duplex = 1;
0658             np->an_enable = 0;
0659         }
0660     }
0661     /* Reset PHY */
0662     mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
0663     mdelay (300);
0664     /* If flow control enabled, we need to advertise it.*/
0665     if (np->flowctrl)
0666         mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
0667     mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
0668     /* Force media type */
0669     if (!np->an_enable) {
0670         mii_ctl = 0;
0671         mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
0672         mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
0673         mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
0674         printk (KERN_INFO "Override speed=%d, %s duplex\n",
0675             np->speed, np->mii_if.full_duplex ? "Full" : "Half");
0676 
0677     }
0678 
0679     /* Perhaps move the reset here? */
0680     /* Reset the chip to erase previous misconfiguration. */
0681     if (netif_msg_hw(np))
0682         printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
0683     sundance_reset(dev, 0x00ff << 16);
0684     if (netif_msg_hw(np))
0685         printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
0686 
0687     card_idx++;
0688     return 0;
0689 
0690 err_out_unregister:
0691     unregister_netdev(dev);
0692 err_out_unmap_rx:
0693     dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
0694         np->rx_ring, np->rx_ring_dma);
0695 err_out_unmap_tx:
0696     dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
0697         np->tx_ring, np->tx_ring_dma);
0698 err_out_cleardev:
0699     pci_iounmap(pdev, ioaddr);
0700 err_out_res:
0701     pci_release_regions(pdev);
0702 err_out_netdev:
0703     free_netdev (dev);
0704     return -ENODEV;
0705 }
0706 
0707 static int change_mtu(struct net_device *dev, int new_mtu)
0708 {
0709     if (netif_running(dev))
0710         return -EBUSY;
0711     dev->mtu = new_mtu;
0712     return 0;
0713 }
0714 
0715 #define eeprom_delay(ee_addr)   ioread32(ee_addr)
0716 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
0717 static int eeprom_read(void __iomem *ioaddr, int location)
0718 {
0719     int boguscnt = 10000;       /* Typical 1900 ticks. */
0720     iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
0721     do {
0722         eeprom_delay(ioaddr + EECtrl);
0723         if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
0724             return ioread16(ioaddr + EEData);
0725         }
0726     } while (--boguscnt > 0);
0727     return 0;
0728 }
0729 
0730 /*  MII transceiver control section.
0731     Read and write the MII registers using software-generated serial
0732     MDIO protocol.  See the MII specifications or DP83840A data sheet
0733     for details.
0734 
0735     The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
0736     met by back-to-back 33Mhz PCI cycles. */
0737 #define mdio_delay() ioread8(mdio_addr)
0738 
0739 enum mii_reg_bits {
0740     MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
0741 };
0742 #define MDIO_EnbIn  (0)
0743 #define MDIO_WRITE0 (MDIO_EnbOutput)
0744 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
0745 
0746 /* Generate the preamble required for initial synchronization and
0747    a few older transceivers. */
0748 static void mdio_sync(void __iomem *mdio_addr)
0749 {
0750     int bits = 32;
0751 
0752     /* Establish sync by sending at least 32 logic ones. */
0753     while (--bits >= 0) {
0754         iowrite8(MDIO_WRITE1, mdio_addr);
0755         mdio_delay();
0756         iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
0757         mdio_delay();
0758     }
0759 }
0760 
0761 static int mdio_read(struct net_device *dev, int phy_id, int location)
0762 {
0763     struct netdev_private *np = netdev_priv(dev);
0764     void __iomem *mdio_addr = np->base + MIICtrl;
0765     int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
0766     int i, retval = 0;
0767 
0768     if (np->mii_preamble_required)
0769         mdio_sync(mdio_addr);
0770 
0771     /* Shift the read command bits out. */
0772     for (i = 15; i >= 0; i--) {
0773         int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
0774 
0775         iowrite8(dataval, mdio_addr);
0776         mdio_delay();
0777         iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
0778         mdio_delay();
0779     }
0780     /* Read the two transition, 16 data, and wire-idle bits. */
0781     for (i = 19; i > 0; i--) {
0782         iowrite8(MDIO_EnbIn, mdio_addr);
0783         mdio_delay();
0784         retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
0785         iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
0786         mdio_delay();
0787     }
0788     return (retval>>1) & 0xffff;
0789 }
0790 
0791 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
0792 {
0793     struct netdev_private *np = netdev_priv(dev);
0794     void __iomem *mdio_addr = np->base + MIICtrl;
0795     int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
0796     int i;
0797 
0798     if (np->mii_preamble_required)
0799         mdio_sync(mdio_addr);
0800 
0801     /* Shift the command bits out. */
0802     for (i = 31; i >= 0; i--) {
0803         int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
0804 
0805         iowrite8(dataval, mdio_addr);
0806         mdio_delay();
0807         iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
0808         mdio_delay();
0809     }
0810     /* Clear out extra bits. */
0811     for (i = 2; i > 0; i--) {
0812         iowrite8(MDIO_EnbIn, mdio_addr);
0813         mdio_delay();
0814         iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
0815         mdio_delay();
0816     }
0817 }
0818 
0819 static int mdio_wait_link(struct net_device *dev, int wait)
0820 {
0821     int bmsr;
0822     int phy_id;
0823     struct netdev_private *np;
0824 
0825     np = netdev_priv(dev);
0826     phy_id = np->phys[0];
0827 
0828     do {
0829         bmsr = mdio_read(dev, phy_id, MII_BMSR);
0830         if (bmsr & 0x0004)
0831             return 0;
0832         mdelay(1);
0833     } while (--wait > 0);
0834     return -1;
0835 }
0836 
0837 static int netdev_open(struct net_device *dev)
0838 {
0839     struct netdev_private *np = netdev_priv(dev);
0840     void __iomem *ioaddr = np->base;
0841     const int irq = np->pci_dev->irq;
0842     unsigned long flags;
0843     int i;
0844 
0845     sundance_reset(dev, 0x00ff << 16);
0846 
0847     i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
0848     if (i)
0849         return i;
0850 
0851     if (netif_msg_ifup(np))
0852         printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
0853 
0854     init_ring(dev);
0855 
0856     iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
0857     /* The Tx list pointer is written as packets are queued. */
0858 
0859     /* Initialize other registers. */
0860     __set_mac_addr(dev);
0861 #if IS_ENABLED(CONFIG_VLAN_8021Q)
0862     iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
0863 #else
0864     iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
0865 #endif
0866     if (dev->mtu > 2047)
0867         iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
0868 
0869     /* Configure the PCI bus bursts and FIFO thresholds. */
0870 
0871     if (dev->if_port == 0)
0872         dev->if_port = np->default_port;
0873 
0874     spin_lock_init(&np->mcastlock);
0875 
0876     set_rx_mode(dev);
0877     iowrite16(0, ioaddr + IntrEnable);
0878     iowrite16(0, ioaddr + DownCounter);
0879     /* Set the chip to poll every N*320nsec. */
0880     iowrite8(100, ioaddr + RxDMAPollPeriod);
0881     iowrite8(127, ioaddr + TxDMAPollPeriod);
0882     /* Fix DFE-580TX packet drop issue */
0883     if (np->pci_dev->revision >= 0x14)
0884         iowrite8(0x01, ioaddr + DebugCtrl1);
0885     netif_start_queue(dev);
0886 
0887     spin_lock_irqsave(&np->lock, flags);
0888     reset_tx(dev);
0889     spin_unlock_irqrestore(&np->lock, flags);
0890 
0891     iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
0892 
0893     /* Disable Wol */
0894     iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
0895     np->wol_enabled = 0;
0896 
0897     if (netif_msg_ifup(np))
0898         printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
0899                "MAC Control %x, %4.4x %4.4x.\n",
0900                dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
0901                ioread32(ioaddr + MACCtrl0),
0902                ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
0903 
0904     /* Set the timer to check for link beat. */
0905     timer_setup(&np->timer, netdev_timer, 0);
0906     np->timer.expires = jiffies + 3*HZ;
0907     add_timer(&np->timer);
0908 
0909     /* Enable interrupts by setting the interrupt mask. */
0910     iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
0911 
0912     return 0;
0913 }
0914 
0915 static void check_duplex(struct net_device *dev)
0916 {
0917     struct netdev_private *np = netdev_priv(dev);
0918     void __iomem *ioaddr = np->base;
0919     int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
0920     int negotiated = mii_lpa & np->mii_if.advertising;
0921     int duplex;
0922 
0923     /* Force media */
0924     if (!np->an_enable || mii_lpa == 0xffff) {
0925         if (np->mii_if.full_duplex)
0926             iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
0927                 ioaddr + MACCtrl0);
0928         return;
0929     }
0930 
0931     /* Autonegotiation */
0932     duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
0933     if (np->mii_if.full_duplex != duplex) {
0934         np->mii_if.full_duplex = duplex;
0935         if (netif_msg_link(np))
0936             printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
0937                    "negotiated capability %4.4x.\n", dev->name,
0938                    duplex ? "full" : "half", np->phys[0], negotiated);
0939         iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
0940     }
0941 }
0942 
0943 static void netdev_timer(struct timer_list *t)
0944 {
0945     struct netdev_private *np = from_timer(np, t, timer);
0946     struct net_device *dev = np->mii_if.dev;
0947     void __iomem *ioaddr = np->base;
0948     int next_tick = 10*HZ;
0949 
0950     if (netif_msg_timer(np)) {
0951         printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
0952                "Tx %x Rx %x.\n",
0953                dev->name, ioread16(ioaddr + IntrEnable),
0954                ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
0955     }
0956     check_duplex(dev);
0957     np->timer.expires = jiffies + next_tick;
0958     add_timer(&np->timer);
0959 }
0960 
0961 static void tx_timeout(struct net_device *dev, unsigned int txqueue)
0962 {
0963     struct netdev_private *np = netdev_priv(dev);
0964     void __iomem *ioaddr = np->base;
0965     unsigned long flag;
0966 
0967     netif_stop_queue(dev);
0968     tasklet_disable_in_atomic(&np->tx_tasklet);
0969     iowrite16(0, ioaddr + IntrEnable);
0970     printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
0971            "TxFrameId %2.2x,"
0972            " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
0973            ioread8(ioaddr + TxFrameId));
0974 
0975     {
0976         int i;
0977         for (i=0; i<TX_RING_SIZE; i++) {
0978             printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
0979                 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
0980                 le32_to_cpu(np->tx_ring[i].next_desc),
0981                 le32_to_cpu(np->tx_ring[i].status),
0982                 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
0983                 le32_to_cpu(np->tx_ring[i].frag.addr),
0984                 le32_to_cpu(np->tx_ring[i].frag.length));
0985         }
0986         printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
0987             ioread32(np->base + TxListPtr),
0988             netif_queue_stopped(dev));
0989         printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
0990             np->cur_tx, np->cur_tx % TX_RING_SIZE,
0991             np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
0992         printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
0993         printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
0994     }
0995     spin_lock_irqsave(&np->lock, flag);
0996 
0997     /* Stop and restart the chip's Tx processes . */
0998     reset_tx(dev);
0999     spin_unlock_irqrestore(&np->lock, flag);
1000 
1001     dev->if_port = 0;
1002 
1003     netif_trans_update(dev); /* prevent tx timeout */
1004     dev->stats.tx_errors++;
1005     if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1006         netif_wake_queue(dev);
1007     }
1008     iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1009     tasklet_enable(&np->tx_tasklet);
1010 }
1011 
1012 
1013 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1014 static void init_ring(struct net_device *dev)
1015 {
1016     struct netdev_private *np = netdev_priv(dev);
1017     int i;
1018 
1019     np->cur_rx = np->cur_tx = 0;
1020     np->dirty_rx = np->dirty_tx = 0;
1021     np->cur_task = 0;
1022 
1023     np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1024 
1025     /* Initialize all Rx descriptors. */
1026     for (i = 0; i < RX_RING_SIZE; i++) {
1027         np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1028             ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1029         np->rx_ring[i].status = 0;
1030         np->rx_ring[i].frag.length = 0;
1031         np->rx_skbuff[i] = NULL;
1032     }
1033 
1034     /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1035     for (i = 0; i < RX_RING_SIZE; i++) {
1036         struct sk_buff *skb =
1037             netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1038         np->rx_skbuff[i] = skb;
1039         if (skb == NULL)
1040             break;
1041         skb_reserve(skb, 2);    /* 16 byte align the IP header. */
1042         np->rx_ring[i].frag.addr = cpu_to_le32(
1043             dma_map_single(&np->pci_dev->dev, skb->data,
1044                 np->rx_buf_sz, DMA_FROM_DEVICE));
1045         if (dma_mapping_error(&np->pci_dev->dev,
1046                     np->rx_ring[i].frag.addr)) {
1047             dev_kfree_skb(skb);
1048             np->rx_skbuff[i] = NULL;
1049             break;
1050         }
1051         np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1052     }
1053     np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1054 
1055     for (i = 0; i < TX_RING_SIZE; i++) {
1056         np->tx_skbuff[i] = NULL;
1057         np->tx_ring[i].status = 0;
1058     }
1059 }
1060 
1061 static void tx_poll(struct tasklet_struct *t)
1062 {
1063     struct netdev_private *np = from_tasklet(np, t, tx_tasklet);
1064     unsigned head = np->cur_task % TX_RING_SIZE;
1065     struct netdev_desc *txdesc =
1066         &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1067 
1068     /* Chain the next pointer */
1069     for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1070         int entry = np->cur_task % TX_RING_SIZE;
1071         txdesc = &np->tx_ring[entry];
1072         if (np->last_tx) {
1073             np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1074                 entry*sizeof(struct netdev_desc));
1075         }
1076         np->last_tx = txdesc;
1077     }
1078     /* Indicate the latest descriptor of tx ring */
1079     txdesc->status |= cpu_to_le32(DescIntrOnTx);
1080 
1081     if (ioread32 (np->base + TxListPtr) == 0)
1082         iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1083             np->base + TxListPtr);
1084 }
1085 
1086 static netdev_tx_t
1087 start_tx (struct sk_buff *skb, struct net_device *dev)
1088 {
1089     struct netdev_private *np = netdev_priv(dev);
1090     struct netdev_desc *txdesc;
1091     unsigned entry;
1092 
1093     /* Calculate the next Tx descriptor entry. */
1094     entry = np->cur_tx % TX_RING_SIZE;
1095     np->tx_skbuff[entry] = skb;
1096     txdesc = &np->tx_ring[entry];
1097 
1098     txdesc->next_desc = 0;
1099     txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1100     txdesc->frag.addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1101                 skb->data, skb->len, DMA_TO_DEVICE));
1102     if (dma_mapping_error(&np->pci_dev->dev,
1103                 txdesc->frag.addr))
1104             goto drop_frame;
1105     txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag);
1106 
1107     /* Increment cur_tx before tasklet_schedule() */
1108     np->cur_tx++;
1109     mb();
1110     /* Schedule a tx_poll() task */
1111     tasklet_schedule(&np->tx_tasklet);
1112 
1113     /* On some architectures: explicitly flush cache lines here. */
1114     if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1115         !netif_queue_stopped(dev)) {
1116         /* do nothing */
1117     } else {
1118         netif_stop_queue (dev);
1119     }
1120     if (netif_msg_tx_queued(np)) {
1121         printk (KERN_DEBUG
1122             "%s: Transmit frame #%d queued in slot %d.\n",
1123             dev->name, np->cur_tx, entry);
1124     }
1125     return NETDEV_TX_OK;
1126 
1127 drop_frame:
1128     dev_kfree_skb_any(skb);
1129     np->tx_skbuff[entry] = NULL;
1130     dev->stats.tx_dropped++;
1131     return NETDEV_TX_OK;
1132 }
1133 
1134 /* Reset hardware tx and free all of tx buffers */
1135 static int
1136 reset_tx (struct net_device *dev)
1137 {
1138     struct netdev_private *np = netdev_priv(dev);
1139     void __iomem *ioaddr = np->base;
1140     struct sk_buff *skb;
1141     int i;
1142 
1143     /* Reset tx logic, TxListPtr will be cleaned */
1144     iowrite16 (TxDisable, ioaddr + MACCtrl1);
1145     sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1146 
1147     /* free all tx skbuff */
1148     for (i = 0; i < TX_RING_SIZE; i++) {
1149         np->tx_ring[i].next_desc = 0;
1150 
1151         skb = np->tx_skbuff[i];
1152         if (skb) {
1153             dma_unmap_single(&np->pci_dev->dev,
1154                 le32_to_cpu(np->tx_ring[i].frag.addr),
1155                 skb->len, DMA_TO_DEVICE);
1156             dev_kfree_skb_any(skb);
1157             np->tx_skbuff[i] = NULL;
1158             dev->stats.tx_dropped++;
1159         }
1160     }
1161     np->cur_tx = np->dirty_tx = 0;
1162     np->cur_task = 0;
1163 
1164     np->last_tx = NULL;
1165     iowrite8(127, ioaddr + TxDMAPollPeriod);
1166 
1167     iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1168     return 0;
1169 }
1170 
1171 /* The interrupt handler cleans up after the Tx thread,
1172    and schedule a Rx thread work */
1173 static irqreturn_t intr_handler(int irq, void *dev_instance)
1174 {
1175     struct net_device *dev = (struct net_device *)dev_instance;
1176     struct netdev_private *np = netdev_priv(dev);
1177     void __iomem *ioaddr = np->base;
1178     int hw_frame_id;
1179     int tx_cnt;
1180     int tx_status;
1181     int handled = 0;
1182     int i;
1183 
1184     do {
1185         int intr_status = ioread16(ioaddr + IntrStatus);
1186         iowrite16(intr_status, ioaddr + IntrStatus);
1187 
1188         if (netif_msg_intr(np))
1189             printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1190                    dev->name, intr_status);
1191 
1192         if (!(intr_status & DEFAULT_INTR))
1193             break;
1194 
1195         handled = 1;
1196 
1197         if (intr_status & (IntrRxDMADone)) {
1198             iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1199                     ioaddr + IntrEnable);
1200             if (np->budget < 0)
1201                 np->budget = RX_BUDGET;
1202             tasklet_schedule(&np->rx_tasklet);
1203         }
1204         if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1205             tx_status = ioread16 (ioaddr + TxStatus);
1206             for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1207                 if (netif_msg_tx_done(np))
1208                     printk
1209                         ("%s: Transmit status is %2.2x.\n",
1210                         dev->name, tx_status);
1211                 if (tx_status & 0x1e) {
1212                     if (netif_msg_tx_err(np))
1213                         printk("%s: Transmit error status %4.4x.\n",
1214                                dev->name, tx_status);
1215                     dev->stats.tx_errors++;
1216                     if (tx_status & 0x10)
1217                         dev->stats.tx_fifo_errors++;
1218                     if (tx_status & 0x08)
1219                         dev->stats.collisions++;
1220                     if (tx_status & 0x04)
1221                         dev->stats.tx_fifo_errors++;
1222                     if (tx_status & 0x02)
1223                         dev->stats.tx_window_errors++;
1224 
1225                     /*
1226                     ** This reset has been verified on
1227                     ** DFE-580TX boards ! phdm@macqel.be.
1228                     */
1229                     if (tx_status & 0x10) { /* TxUnderrun */
1230                         /* Restart Tx FIFO and transmitter */
1231                         sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1232                         /* No need to reset the Tx pointer here */
1233                     }
1234                     /* Restart the Tx. Need to make sure tx enabled */
1235                     i = 10;
1236                     do {
1237                         iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1238                         if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1239                             break;
1240                         mdelay(1);
1241                     } while (--i);
1242                 }
1243                 /* Yup, this is a documentation bug.  It cost me *hours*. */
1244                 iowrite16 (0, ioaddr + TxStatus);
1245                 if (tx_cnt < 0) {
1246                     iowrite32(5000, ioaddr + DownCounter);
1247                     break;
1248                 }
1249                 tx_status = ioread16 (ioaddr + TxStatus);
1250             }
1251             hw_frame_id = (tx_status >> 8) & 0xff;
1252         } else  {
1253             hw_frame_id = ioread8(ioaddr + TxFrameId);
1254         }
1255 
1256         if (np->pci_dev->revision >= 0x14) {
1257             spin_lock(&np->lock);
1258             for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1259                 int entry = np->dirty_tx % TX_RING_SIZE;
1260                 struct sk_buff *skb;
1261                 int sw_frame_id;
1262                 sw_frame_id = (le32_to_cpu(
1263                     np->tx_ring[entry].status) >> 2) & 0xff;
1264                 if (sw_frame_id == hw_frame_id &&
1265                     !(le32_to_cpu(np->tx_ring[entry].status)
1266                     & 0x00010000))
1267                         break;
1268                 if (sw_frame_id == (hw_frame_id + 1) %
1269                     TX_RING_SIZE)
1270                         break;
1271                 skb = np->tx_skbuff[entry];
1272                 /* Free the original skb. */
1273                 dma_unmap_single(&np->pci_dev->dev,
1274                     le32_to_cpu(np->tx_ring[entry].frag.addr),
1275                     skb->len, DMA_TO_DEVICE);
1276                 dev_consume_skb_irq(np->tx_skbuff[entry]);
1277                 np->tx_skbuff[entry] = NULL;
1278                 np->tx_ring[entry].frag.addr = 0;
1279                 np->tx_ring[entry].frag.length = 0;
1280             }
1281             spin_unlock(&np->lock);
1282         } else {
1283             spin_lock(&np->lock);
1284             for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1285                 int entry = np->dirty_tx % TX_RING_SIZE;
1286                 struct sk_buff *skb;
1287                 if (!(le32_to_cpu(np->tx_ring[entry].status)
1288                             & 0x00010000))
1289                     break;
1290                 skb = np->tx_skbuff[entry];
1291                 /* Free the original skb. */
1292                 dma_unmap_single(&np->pci_dev->dev,
1293                     le32_to_cpu(np->tx_ring[entry].frag.addr),
1294                     skb->len, DMA_TO_DEVICE);
1295                 dev_consume_skb_irq(np->tx_skbuff[entry]);
1296                 np->tx_skbuff[entry] = NULL;
1297                 np->tx_ring[entry].frag.addr = 0;
1298                 np->tx_ring[entry].frag.length = 0;
1299             }
1300             spin_unlock(&np->lock);
1301         }
1302 
1303         if (netif_queue_stopped(dev) &&
1304             np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1305             /* The ring is no longer full, clear busy flag. */
1306             netif_wake_queue (dev);
1307         }
1308         /* Abnormal error summary/uncommon events handlers. */
1309         if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1310             netdev_error(dev, intr_status);
1311     } while (0);
1312     if (netif_msg_intr(np))
1313         printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1314                dev->name, ioread16(ioaddr + IntrStatus));
1315     return IRQ_RETVAL(handled);
1316 }
1317 
1318 static void rx_poll(struct tasklet_struct *t)
1319 {
1320     struct netdev_private *np = from_tasklet(np, t, rx_tasklet);
1321     struct net_device *dev = np->ndev;
1322     int entry = np->cur_rx % RX_RING_SIZE;
1323     int boguscnt = np->budget;
1324     void __iomem *ioaddr = np->base;
1325     int received = 0;
1326 
1327     /* If EOP is set on the next entry, it's a new packet. Send it up. */
1328     while (1) {
1329         struct netdev_desc *desc = &(np->rx_ring[entry]);
1330         u32 frame_status = le32_to_cpu(desc->status);
1331         int pkt_len;
1332 
1333         if (--boguscnt < 0) {
1334             goto not_done;
1335         }
1336         if (!(frame_status & DescOwn))
1337             break;
1338         pkt_len = frame_status & 0x1fff;    /* Chip omits the CRC. */
1339         if (netif_msg_rx_status(np))
1340             printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1341                    frame_status);
1342         if (frame_status & 0x001f4000) {
1343             /* There was a error. */
1344             if (netif_msg_rx_err(np))
1345                 printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1346                        frame_status);
1347             dev->stats.rx_errors++;
1348             if (frame_status & 0x00100000)
1349                 dev->stats.rx_length_errors++;
1350             if (frame_status & 0x00010000)
1351                 dev->stats.rx_fifo_errors++;
1352             if (frame_status & 0x00060000)
1353                 dev->stats.rx_frame_errors++;
1354             if (frame_status & 0x00080000)
1355                 dev->stats.rx_crc_errors++;
1356             if (frame_status & 0x00100000) {
1357                 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1358                        " status %8.8x.\n",
1359                        dev->name, frame_status);
1360             }
1361         } else {
1362             struct sk_buff *skb;
1363 #ifndef final_version
1364             if (netif_msg_rx_status(np))
1365                 printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1366                        ", bogus_cnt %d.\n",
1367                        pkt_len, boguscnt);
1368 #endif
1369             /* Check if the packet is long enough to accept without copying
1370                to a minimally-sized skbuff. */
1371             if (pkt_len < rx_copybreak &&
1372                 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1373                 skb_reserve(skb, 2);    /* 16 byte align the IP header */
1374                 dma_sync_single_for_cpu(&np->pci_dev->dev,
1375                         le32_to_cpu(desc->frag.addr),
1376                         np->rx_buf_sz, DMA_FROM_DEVICE);
1377                 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1378                 dma_sync_single_for_device(&np->pci_dev->dev,
1379                         le32_to_cpu(desc->frag.addr),
1380                         np->rx_buf_sz, DMA_FROM_DEVICE);
1381                 skb_put(skb, pkt_len);
1382             } else {
1383                 dma_unmap_single(&np->pci_dev->dev,
1384                     le32_to_cpu(desc->frag.addr),
1385                     np->rx_buf_sz, DMA_FROM_DEVICE);
1386                 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1387                 np->rx_skbuff[entry] = NULL;
1388             }
1389             skb->protocol = eth_type_trans(skb, dev);
1390             /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1391             netif_rx(skb);
1392         }
1393         entry = (entry + 1) % RX_RING_SIZE;
1394         received++;
1395     }
1396     np->cur_rx = entry;
1397     refill_rx (dev);
1398     np->budget -= received;
1399     iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1400     return;
1401 
1402 not_done:
1403     np->cur_rx = entry;
1404     refill_rx (dev);
1405     if (!received)
1406         received = 1;
1407     np->budget -= received;
1408     if (np->budget <= 0)
1409         np->budget = RX_BUDGET;
1410     tasklet_schedule(&np->rx_tasklet);
1411 }
1412 
1413 static void refill_rx (struct net_device *dev)
1414 {
1415     struct netdev_private *np = netdev_priv(dev);
1416     int entry;
1417     int cnt = 0;
1418 
1419     /* Refill the Rx ring buffers. */
1420     for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1421         np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1422         struct sk_buff *skb;
1423         entry = np->dirty_rx % RX_RING_SIZE;
1424         if (np->rx_skbuff[entry] == NULL) {
1425             skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1426             np->rx_skbuff[entry] = skb;
1427             if (skb == NULL)
1428                 break;      /* Better luck next round. */
1429             skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1430             np->rx_ring[entry].frag.addr = cpu_to_le32(
1431                 dma_map_single(&np->pci_dev->dev, skb->data,
1432                     np->rx_buf_sz, DMA_FROM_DEVICE));
1433             if (dma_mapping_error(&np->pci_dev->dev,
1434                     np->rx_ring[entry].frag.addr)) {
1435                 dev_kfree_skb_irq(skb);
1436                 np->rx_skbuff[entry] = NULL;
1437                 break;
1438             }
1439         }
1440         /* Perhaps we need not reset this field. */
1441         np->rx_ring[entry].frag.length =
1442             cpu_to_le32(np->rx_buf_sz | LastFrag);
1443         np->rx_ring[entry].status = 0;
1444         cnt++;
1445     }
1446 }
1447 static void netdev_error(struct net_device *dev, int intr_status)
1448 {
1449     struct netdev_private *np = netdev_priv(dev);
1450     void __iomem *ioaddr = np->base;
1451     u16 mii_ctl, mii_advertise, mii_lpa;
1452     int speed;
1453 
1454     if (intr_status & LinkChange) {
1455         if (mdio_wait_link(dev, 10) == 0) {
1456             printk(KERN_INFO "%s: Link up\n", dev->name);
1457             if (np->an_enable) {
1458                 mii_advertise = mdio_read(dev, np->phys[0],
1459                                MII_ADVERTISE);
1460                 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1461                 mii_advertise &= mii_lpa;
1462                 printk(KERN_INFO "%s: Link changed: ",
1463                     dev->name);
1464                 if (mii_advertise & ADVERTISE_100FULL) {
1465                     np->speed = 100;
1466                     printk("100Mbps, full duplex\n");
1467                 } else if (mii_advertise & ADVERTISE_100HALF) {
1468                     np->speed = 100;
1469                     printk("100Mbps, half duplex\n");
1470                 } else if (mii_advertise & ADVERTISE_10FULL) {
1471                     np->speed = 10;
1472                     printk("10Mbps, full duplex\n");
1473                 } else if (mii_advertise & ADVERTISE_10HALF) {
1474                     np->speed = 10;
1475                     printk("10Mbps, half duplex\n");
1476                 } else
1477                     printk("\n");
1478 
1479             } else {
1480                 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1481                 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1482                 np->speed = speed;
1483                 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1484                     dev->name, speed);
1485                 printk("%s duplex.\n",
1486                     (mii_ctl & BMCR_FULLDPLX) ?
1487                         "full" : "half");
1488             }
1489             check_duplex(dev);
1490             if (np->flowctrl && np->mii_if.full_duplex) {
1491                 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1492                     ioaddr + MulticastFilter1+2);
1493                 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1494                     ioaddr + MACCtrl0);
1495             }
1496             netif_carrier_on(dev);
1497         } else {
1498             printk(KERN_INFO "%s: Link down\n", dev->name);
1499             netif_carrier_off(dev);
1500         }
1501     }
1502     if (intr_status & StatsMax) {
1503         get_stats(dev);
1504     }
1505     if (intr_status & IntrPCIErr) {
1506         printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1507                dev->name, intr_status);
1508         /* We must do a global reset of DMA to continue. */
1509     }
1510 }
1511 
1512 static struct net_device_stats *get_stats(struct net_device *dev)
1513 {
1514     struct netdev_private *np = netdev_priv(dev);
1515     void __iomem *ioaddr = np->base;
1516     unsigned long flags;
1517     u8 late_coll, single_coll, mult_coll;
1518 
1519     spin_lock_irqsave(&np->statlock, flags);
1520     /* The chip only need report frame silently dropped. */
1521     dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1522     dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1523     dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1524     dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1525 
1526     mult_coll = ioread8(ioaddr + StatsMultiColl);
1527     np->xstats.tx_multiple_collisions += mult_coll;
1528     single_coll = ioread8(ioaddr + StatsOneColl);
1529     np->xstats.tx_single_collisions += single_coll;
1530     late_coll = ioread8(ioaddr + StatsLateColl);
1531     np->xstats.tx_late_collisions += late_coll;
1532     dev->stats.collisions += mult_coll
1533         + single_coll
1534         + late_coll;
1535 
1536     np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1537     np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1538     np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1539     np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1540     np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1541     np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1542     np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1543 
1544     dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1545     dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1546     dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1547     dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1548 
1549     spin_unlock_irqrestore(&np->statlock, flags);
1550 
1551     return &dev->stats;
1552 }
1553 
1554 static void set_rx_mode(struct net_device *dev)
1555 {
1556     struct netdev_private *np = netdev_priv(dev);
1557     void __iomem *ioaddr = np->base;
1558     u16 mc_filter[4];           /* Multicast hash filter */
1559     u32 rx_mode;
1560     int i;
1561 
1562     if (dev->flags & IFF_PROMISC) {         /* Set promiscuous. */
1563         memset(mc_filter, 0xff, sizeof(mc_filter));
1564         rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1565     } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1566            (dev->flags & IFF_ALLMULTI)) {
1567         /* Too many to match, or accept all multicasts. */
1568         memset(mc_filter, 0xff, sizeof(mc_filter));
1569         rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1570     } else if (!netdev_mc_empty(dev)) {
1571         struct netdev_hw_addr *ha;
1572         int bit;
1573         int index;
1574         int crc;
1575         memset (mc_filter, 0, sizeof (mc_filter));
1576         netdev_for_each_mc_addr(ha, dev) {
1577             crc = ether_crc_le(ETH_ALEN, ha->addr);
1578             for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1579                 if (crc & 0x80000000) index |= 1 << bit;
1580             mc_filter[index/16] |= (1 << (index % 16));
1581         }
1582         rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1583     } else {
1584         iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1585         return;
1586     }
1587     if (np->mii_if.full_duplex && np->flowctrl)
1588         mc_filter[3] |= 0x0200;
1589 
1590     for (i = 0; i < 4; i++)
1591         iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1592     iowrite8(rx_mode, ioaddr + RxMode);
1593 }
1594 
1595 static int __set_mac_addr(struct net_device *dev)
1596 {
1597     struct netdev_private *np = netdev_priv(dev);
1598     u16 addr16;
1599 
1600     addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1601     iowrite16(addr16, np->base + StationAddr);
1602     addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1603     iowrite16(addr16, np->base + StationAddr+2);
1604     addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1605     iowrite16(addr16, np->base + StationAddr+4);
1606     return 0;
1607 }
1608 
1609 /* Invoked with rtnl_lock held */
1610 static int sundance_set_mac_addr(struct net_device *dev, void *data)
1611 {
1612     const struct sockaddr *addr = data;
1613 
1614     if (!is_valid_ether_addr(addr->sa_data))
1615         return -EADDRNOTAVAIL;
1616     eth_hw_addr_set(dev, addr->sa_data);
1617     __set_mac_addr(dev);
1618 
1619     return 0;
1620 }
1621 
1622 static const struct {
1623     const char name[ETH_GSTRING_LEN];
1624 } sundance_stats[] = {
1625     { "tx_multiple_collisions" },
1626     { "tx_single_collisions" },
1627     { "tx_late_collisions" },
1628     { "tx_deferred" },
1629     { "tx_deferred_excessive" },
1630     { "tx_aborted" },
1631     { "tx_bcasts" },
1632     { "rx_bcasts" },
1633     { "tx_mcasts" },
1634     { "rx_mcasts" },
1635 };
1636 
1637 static int check_if_running(struct net_device *dev)
1638 {
1639     if (!netif_running(dev))
1640         return -EINVAL;
1641     return 0;
1642 }
1643 
1644 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1645 {
1646     struct netdev_private *np = netdev_priv(dev);
1647     strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1648     strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1649 }
1650 
1651 static int get_link_ksettings(struct net_device *dev,
1652                   struct ethtool_link_ksettings *cmd)
1653 {
1654     struct netdev_private *np = netdev_priv(dev);
1655     spin_lock_irq(&np->lock);
1656     mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1657     spin_unlock_irq(&np->lock);
1658     return 0;
1659 }
1660 
1661 static int set_link_ksettings(struct net_device *dev,
1662                   const struct ethtool_link_ksettings *cmd)
1663 {
1664     struct netdev_private *np = netdev_priv(dev);
1665     int res;
1666     spin_lock_irq(&np->lock);
1667     res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1668     spin_unlock_irq(&np->lock);
1669     return res;
1670 }
1671 
1672 static int nway_reset(struct net_device *dev)
1673 {
1674     struct netdev_private *np = netdev_priv(dev);
1675     return mii_nway_restart(&np->mii_if);
1676 }
1677 
1678 static u32 get_link(struct net_device *dev)
1679 {
1680     struct netdev_private *np = netdev_priv(dev);
1681     return mii_link_ok(&np->mii_if);
1682 }
1683 
1684 static u32 get_msglevel(struct net_device *dev)
1685 {
1686     struct netdev_private *np = netdev_priv(dev);
1687     return np->msg_enable;
1688 }
1689 
1690 static void set_msglevel(struct net_device *dev, u32 val)
1691 {
1692     struct netdev_private *np = netdev_priv(dev);
1693     np->msg_enable = val;
1694 }
1695 
1696 static void get_strings(struct net_device *dev, u32 stringset,
1697         u8 *data)
1698 {
1699     if (stringset == ETH_SS_STATS)
1700         memcpy(data, sundance_stats, sizeof(sundance_stats));
1701 }
1702 
1703 static int get_sset_count(struct net_device *dev, int sset)
1704 {
1705     switch (sset) {
1706     case ETH_SS_STATS:
1707         return ARRAY_SIZE(sundance_stats);
1708     default:
1709         return -EOPNOTSUPP;
1710     }
1711 }
1712 
1713 static void get_ethtool_stats(struct net_device *dev,
1714         struct ethtool_stats *stats, u64 *data)
1715 {
1716     struct netdev_private *np = netdev_priv(dev);
1717     int i = 0;
1718 
1719     get_stats(dev);
1720     data[i++] = np->xstats.tx_multiple_collisions;
1721     data[i++] = np->xstats.tx_single_collisions;
1722     data[i++] = np->xstats.tx_late_collisions;
1723     data[i++] = np->xstats.tx_deferred;
1724     data[i++] = np->xstats.tx_deferred_excessive;
1725     data[i++] = np->xstats.tx_aborted;
1726     data[i++] = np->xstats.tx_bcasts;
1727     data[i++] = np->xstats.rx_bcasts;
1728     data[i++] = np->xstats.tx_mcasts;
1729     data[i++] = np->xstats.rx_mcasts;
1730 }
1731 
1732 #ifdef CONFIG_PM
1733 
1734 static void sundance_get_wol(struct net_device *dev,
1735         struct ethtool_wolinfo *wol)
1736 {
1737     struct netdev_private *np = netdev_priv(dev);
1738     void __iomem *ioaddr = np->base;
1739     u8 wol_bits;
1740 
1741     wol->wolopts = 0;
1742 
1743     wol->supported = (WAKE_PHY | WAKE_MAGIC);
1744     if (!np->wol_enabled)
1745         return;
1746 
1747     wol_bits = ioread8(ioaddr + WakeEvent);
1748     if (wol_bits & MagicPktEnable)
1749         wol->wolopts |= WAKE_MAGIC;
1750     if (wol_bits & LinkEventEnable)
1751         wol->wolopts |= WAKE_PHY;
1752 }
1753 
1754 static int sundance_set_wol(struct net_device *dev,
1755     struct ethtool_wolinfo *wol)
1756 {
1757     struct netdev_private *np = netdev_priv(dev);
1758     void __iomem *ioaddr = np->base;
1759     u8 wol_bits;
1760 
1761     if (!device_can_wakeup(&np->pci_dev->dev))
1762         return -EOPNOTSUPP;
1763 
1764     np->wol_enabled = !!(wol->wolopts);
1765     wol_bits = ioread8(ioaddr + WakeEvent);
1766     wol_bits &= ~(WakePktEnable | MagicPktEnable |
1767             LinkEventEnable | WolEnable);
1768 
1769     if (np->wol_enabled) {
1770         if (wol->wolopts & WAKE_MAGIC)
1771             wol_bits |= (MagicPktEnable | WolEnable);
1772         if (wol->wolopts & WAKE_PHY)
1773             wol_bits |= (LinkEventEnable | WolEnable);
1774     }
1775     iowrite8(wol_bits, ioaddr + WakeEvent);
1776 
1777     device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1778 
1779     return 0;
1780 }
1781 #else
1782 #define sundance_get_wol NULL
1783 #define sundance_set_wol NULL
1784 #endif /* CONFIG_PM */
1785 
1786 static const struct ethtool_ops ethtool_ops = {
1787     .begin = check_if_running,
1788     .get_drvinfo = get_drvinfo,
1789     .nway_reset = nway_reset,
1790     .get_link = get_link,
1791     .get_wol = sundance_get_wol,
1792     .set_wol = sundance_set_wol,
1793     .get_msglevel = get_msglevel,
1794     .set_msglevel = set_msglevel,
1795     .get_strings = get_strings,
1796     .get_sset_count = get_sset_count,
1797     .get_ethtool_stats = get_ethtool_stats,
1798     .get_link_ksettings = get_link_ksettings,
1799     .set_link_ksettings = set_link_ksettings,
1800 };
1801 
1802 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1803 {
1804     struct netdev_private *np = netdev_priv(dev);
1805     int rc;
1806 
1807     if (!netif_running(dev))
1808         return -EINVAL;
1809 
1810     spin_lock_irq(&np->lock);
1811     rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1812     spin_unlock_irq(&np->lock);
1813 
1814     return rc;
1815 }
1816 
1817 static int netdev_close(struct net_device *dev)
1818 {
1819     struct netdev_private *np = netdev_priv(dev);
1820     void __iomem *ioaddr = np->base;
1821     struct sk_buff *skb;
1822     int i;
1823 
1824     /* Wait and kill tasklet */
1825     tasklet_kill(&np->rx_tasklet);
1826     tasklet_kill(&np->tx_tasklet);
1827     np->cur_tx = 0;
1828     np->dirty_tx = 0;
1829     np->cur_task = 0;
1830     np->last_tx = NULL;
1831 
1832     netif_stop_queue(dev);
1833 
1834     if (netif_msg_ifdown(np)) {
1835         printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1836                "Rx %4.4x Int %2.2x.\n",
1837                dev->name, ioread8(ioaddr + TxStatus),
1838                ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1839         printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1840                dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1841     }
1842 
1843     /* Disable interrupts by clearing the interrupt mask. */
1844     iowrite16(0x0000, ioaddr + IntrEnable);
1845 
1846     /* Disable Rx and Tx DMA for safely release resource */
1847     iowrite32(0x500, ioaddr + DMACtrl);
1848 
1849     /* Stop the chip's Tx and Rx processes. */
1850     iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1851 
1852     for (i = 2000; i > 0; i--) {
1853         if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1854             break;
1855         mdelay(1);
1856     }
1857 
1858     iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1859             ioaddr + ASIC_HI_WORD(ASICCtrl));
1860 
1861     for (i = 2000; i > 0; i--) {
1862         if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1863             break;
1864         mdelay(1);
1865     }
1866 
1867 #ifdef __i386__
1868     if (netif_msg_hw(np)) {
1869         printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1870                (int)(np->tx_ring_dma));
1871         for (i = 0; i < TX_RING_SIZE; i++)
1872             printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1873                    i, np->tx_ring[i].status, np->tx_ring[i].frag.addr,
1874                    np->tx_ring[i].frag.length);
1875         printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1876                (int)(np->rx_ring_dma));
1877         for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1878             printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1879                    i, np->rx_ring[i].status, np->rx_ring[i].frag.addr,
1880                    np->rx_ring[i].frag.length);
1881         }
1882     }
1883 #endif /* __i386__ debugging only */
1884 
1885     free_irq(np->pci_dev->irq, dev);
1886 
1887     del_timer_sync(&np->timer);
1888 
1889     /* Free all the skbuffs in the Rx queue. */
1890     for (i = 0; i < RX_RING_SIZE; i++) {
1891         np->rx_ring[i].status = 0;
1892         skb = np->rx_skbuff[i];
1893         if (skb) {
1894             dma_unmap_single(&np->pci_dev->dev,
1895                 le32_to_cpu(np->rx_ring[i].frag.addr),
1896                 np->rx_buf_sz, DMA_FROM_DEVICE);
1897             dev_kfree_skb(skb);
1898             np->rx_skbuff[i] = NULL;
1899         }
1900         np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */
1901     }
1902     for (i = 0; i < TX_RING_SIZE; i++) {
1903         np->tx_ring[i].next_desc = 0;
1904         skb = np->tx_skbuff[i];
1905         if (skb) {
1906             dma_unmap_single(&np->pci_dev->dev,
1907                 le32_to_cpu(np->tx_ring[i].frag.addr),
1908                 skb->len, DMA_TO_DEVICE);
1909             dev_kfree_skb(skb);
1910             np->tx_skbuff[i] = NULL;
1911         }
1912     }
1913 
1914     return 0;
1915 }
1916 
1917 static void sundance_remove1(struct pci_dev *pdev)
1918 {
1919     struct net_device *dev = pci_get_drvdata(pdev);
1920 
1921     if (dev) {
1922         struct netdev_private *np = netdev_priv(dev);
1923         unregister_netdev(dev);
1924         dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1925             np->rx_ring, np->rx_ring_dma);
1926         dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1927             np->tx_ring, np->tx_ring_dma);
1928         pci_iounmap(pdev, np->base);
1929         pci_release_regions(pdev);
1930         free_netdev(dev);
1931     }
1932 }
1933 
1934 static int __maybe_unused sundance_suspend(struct device *dev_d)
1935 {
1936     struct net_device *dev = dev_get_drvdata(dev_d);
1937     struct netdev_private *np = netdev_priv(dev);
1938     void __iomem *ioaddr = np->base;
1939 
1940     if (!netif_running(dev))
1941         return 0;
1942 
1943     netdev_close(dev);
1944     netif_device_detach(dev);
1945 
1946     if (np->wol_enabled) {
1947         iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1948         iowrite16(RxEnable, ioaddr + MACCtrl1);
1949     }
1950 
1951     device_set_wakeup_enable(dev_d, np->wol_enabled);
1952 
1953     return 0;
1954 }
1955 
1956 static int __maybe_unused sundance_resume(struct device *dev_d)
1957 {
1958     struct net_device *dev = dev_get_drvdata(dev_d);
1959     int err = 0;
1960 
1961     if (!netif_running(dev))
1962         return 0;
1963 
1964     err = netdev_open(dev);
1965     if (err) {
1966         printk(KERN_ERR "%s: Can't resume interface!\n",
1967                 dev->name);
1968         goto out;
1969     }
1970 
1971     netif_device_attach(dev);
1972 
1973 out:
1974     return err;
1975 }
1976 
1977 static SIMPLE_DEV_PM_OPS(sundance_pm_ops, sundance_suspend, sundance_resume);
1978 
1979 static struct pci_driver sundance_driver = {
1980     .name       = DRV_NAME,
1981     .id_table   = sundance_pci_tbl,
1982     .probe      = sundance_probe1,
1983     .remove     = sundance_remove1,
1984     .driver.pm  = &sundance_pm_ops,
1985 };
1986 
1987 module_pci_driver(sundance_driver);